From e2b31b58e8777faa51561342d72a5a30127fa4b6 Mon Sep 17 00:00:00 2001 From: Paul Zuchowski <31706010+PaulZ-98@users.noreply.github.com> Date: Thu, 15 Aug 2019 10:27:13 -0400 Subject: [PATCH 01/68] Make txg_wait_synced conditional in zfsvfs_teardown The call to txg_wait_synced in zfsvfs_teardown should be made conditional on the objset having dirty data. This can prevent unnecessary txg_wait_synced during some unmount operations. Reviewed-by: Matt Ahrens Reviewed-by: Brian Behlendorf Signed-off-by: Paul Zuchowski Closes #9115 --- module/zfs/zfs_vfsops.c | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/module/zfs/zfs_vfsops.c b/module/zfs/zfs_vfsops.c index 8d728adeae94..af82c7bc4800 100644 --- a/module/zfs/zfs_vfsops.c +++ b/module/zfs/zfs_vfsops.c @@ -1778,8 +1778,17 @@ zfsvfs_teardown(zfsvfs_t *zfsvfs, boolean_t unmounting) * Evict cached data. We must write out any dirty data before * disowning the dataset. */ - if (!zfs_is_readonly(zfsvfs)) + objset_t *os = zfsvfs->z_os; + boolean_t os_dirty = B_FALSE; + for (int t = 0; t < TXG_SIZE; t++) { + if (dmu_objset_is_dirty(os, t)) { + os_dirty = B_TRUE; + break; + } + } + if (!zfs_is_readonly(zfsvfs) && os_dirty) { txg_wait_synced(dmu_objset_pool(zfsvfs->z_os), 0); + } dmu_objset_evict_dbufs(zfsvfs->z_os); return (0); From 0e37a0f4f3bc4feb62a966a7c0dd64544172395f Mon Sep 17 00:00:00 2001 From: Serapheim Dimitropoulos Date: Thu, 15 Aug 2019 07:44:57 -0700 Subject: [PATCH 02/68] Assert that a dnode's bonuslen never exceeds its recorded size This patch introduces an assertion that can catch pitfalls in development where there is a mismatch between the size of reads and writes between a *_phys structure and its respective in-core structure when bonus buffers are used. This debugging-aid should be complementary to the verification done by ztest in ztest_verify_dnode_bt(). A side to this patch is that we now clear out any extra bytes past a bonus buffer's new size when the buffer is shrinking. Reviewed-by: Matt Ahrens Reviewed-by: Brian Behlendorf Reviewed-by: Tom Caputi Signed-off-by: Serapheim Dimitropoulos Closes #8348 --- module/zfs/dbuf.c | 44 ++++++++++++++++++++++++++++++++++++++++++++ module/zfs/dnode.c | 8 ++++++++ 2 files changed, 52 insertions(+) diff --git a/module/zfs/dbuf.c b/module/zfs/dbuf.c index 0518205f9906..f8f96c142e9f 100644 --- a/module/zfs/dbuf.c +++ b/module/zfs/dbuf.c @@ -3931,6 +3931,46 @@ dbuf_sync_indirect(dbuf_dirty_record_t *dr, dmu_tx_t *tx) zio_nowait(zio); } +#ifdef ZFS_DEBUG +/* + * Verify that the size of the data in our bonus buffer does not exceed + * its recorded size. + * + * The purpose of this verification is to catch any cases in development + * where the size of a phys structure (i.e space_map_phys_t) grows and, + * due to incorrect feature management, older pools expect to read more + * data even though they didn't actually write it to begin with. + * + * For a example, this would catch an error in the feature logic where we + * open an older pool and we expect to write the space map histogram of + * a space map with size SPACE_MAP_SIZE_V0. + */ +static void +dbuf_sync_leaf_verify_bonus_dnode(dbuf_dirty_record_t *dr) +{ + dnode_t *dn = DB_DNODE(dr->dr_dbuf); + + /* + * Encrypted bonus buffers can have data past their bonuslen. + * Skip the verification of these blocks. + */ + if (DMU_OT_IS_ENCRYPTED(dn->dn_bonustype)) + return; + + uint16_t bonuslen = dn->dn_phys->dn_bonuslen; + uint16_t maxbonuslen = DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots); + ASSERT3U(bonuslen, <=, maxbonuslen); + + arc_buf_t *datap = dr->dt.dl.dr_data; + char *datap_end = ((char *)datap) + bonuslen; + char *datap_max = ((char *)datap) + maxbonuslen; + + /* ensure that everything is zero after our data */ + for (; datap_end < datap_max; datap_end++) + ASSERT(*datap_end == 0); +} +#endif + /* * dbuf_sync_leaf() is called recursively from dbuf_sync_list() so it is * critical the we not allow the compiler to inline this function in to @@ -4007,6 +4047,10 @@ dbuf_sync_leaf(dbuf_dirty_record_t *dr, dmu_tx_t *tx) DN_MAX_BONUS_LEN(dn->dn_phys)); DB_DNODE_EXIT(db); +#ifdef ZFS_DEBUG + dbuf_sync_leaf_verify_bonus_dnode(dr); +#endif + if (*datap != db->db.db_data) { int slots = DB_DNODE(db)->dn_num_slots; int bonuslen = DN_SLOTS_TO_BONUSLEN(slots); diff --git a/module/zfs/dnode.c b/module/zfs/dnode.c index c9ff43fa6331..ef62d394a919 100644 --- a/module/zfs/dnode.c +++ b/module/zfs/dnode.c @@ -390,6 +390,14 @@ dnode_setbonuslen(dnode_t *dn, int newsize, dmu_tx_t *tx) rw_enter(&dn->dn_struct_rwlock, RW_WRITER); ASSERT3U(newsize, <=, DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots) - (dn->dn_nblkptr-1) * sizeof (blkptr_t)); + + if (newsize < dn->dn_bonuslen) { + /* clear any data after the end of the new size */ + size_t diff = dn->dn_bonuslen - newsize; + char *data_end = ((char *)dn->dn_bonus->db.db_data) + newsize; + bzero(data_end, diff); + } + dn->dn_bonuslen = newsize; if (newsize == 0) dn->dn_next_bonuslen[tx->tx_txg & TXG_MASK] = DN_ZERO_BONUSLEN; From c8bbf7c00b3e1d949f928fd3aace234e38906a12 Mon Sep 17 00:00:00 2001 From: Tony Nguyen Date: Thu, 15 Aug 2019 17:36:24 -0600 Subject: [PATCH 03/68] Improve write performance by using dmu_read_by_dnode() In zfs_log_write(), we can use dmu_read_by_dnode() rather than dmu_read() thus avoiding unnecessary dnode_hold() calls. We get a 2-5% performance gain for large sequential_writes tests, >=128K writes to files with recordsize=8K. Testing done on Ubuntu 18.04 with 4.15 kernel, 8vCPUs and SSD storage on VMware ESX. Reviewed-by: Brian Behlendorf Signed-off-by: Tony Nguyen Closes #9156 --- module/zfs/zfs_log.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/module/zfs/zfs_log.c b/module/zfs/zfs_log.c index 622ce08acd27..41b663b65fb8 100644 --- a/module/zfs/zfs_log.c +++ b/module/zfs/zfs_log.c @@ -42,6 +42,7 @@ #include #include #include +#include #include #include #include @@ -510,6 +511,7 @@ zfs_log_write(zilog_t *zilog, dmu_tx_t *tx, int txtype, znode_t *zp, offset_t off, ssize_t resid, int ioflag, zil_callback_t callback, void *callback_data) { + dmu_buf_impl_t *db = (dmu_buf_impl_t *)sa_get_db(zp->z_sa_hdl); uint32_t blocksize = zp->z_blksz; itx_wr_state_t write_state; uintptr_t fsync_cnt; @@ -556,13 +558,16 @@ zfs_log_write(zilog_t *zilog, dmu_tx_t *tx, int txtype, itx = zil_itx_create(txtype, sizeof (*lr) + (wr_state == WR_COPIED ? len : 0)); lr = (lr_write_t *)&itx->itx_lr; - if (wr_state == WR_COPIED && dmu_read(ZTOZSB(zp)->z_os, - zp->z_id, off, len, lr + 1, DMU_READ_NO_PREFETCH) != 0) { + + DB_DNODE_ENTER(db); + if (wr_state == WR_COPIED && dmu_read_by_dnode(DB_DNODE(db), + off, len, lr + 1, DMU_READ_NO_PREFETCH) != 0) { zil_itx_destroy(itx); itx = zil_itx_create(txtype, sizeof (*lr)); lr = (lr_write_t *)&itx->itx_lr; wr_state = WR_NEED_COPY; } + DB_DNODE_EXIT(db); itx->itx_wr_state = wr_state; lr->lr_foid = zp->z_id; From 0f8ff49eb69311dca3ab799e18d1a0ab389befac Mon Sep 17 00:00:00 2001 From: Serapheim Dimitropoulos Date: Thu, 15 Aug 2019 16:53:53 -0700 Subject: [PATCH 04/68] dmu_tx_wait() hang likely due to cv_signal() in dsl_pool_dirty_delta() Even though the bug's writeup (Github issue #9136) is very detailed, we still don't know exactly how we got to that state, thus I wasn't able to reproduce the bug. That said, we can make an educated guess combining the information on filled issue with the code. From the fact that `dp_dirty_total` was 0 (which is less than `zfs_dirty_data_max`) we know that there was one thread that set it to 0 and then signaled one of the waiters of `dp_spaceavail_cv` [see `dsl_pool_dirty_delta()` which is also the only place that `dp_dirty_total` is changed]. Thus, the only logical explaination then for the bug being hit is that the waiter that just got awaken didn't go through `dsl_pool_dirty_data()`. Given that this function is only called by `dsl_pool_dirty_space()` or `dsl_pool_undirty_space()` I can only think of two possible ways of the above scenario happening: [1] The waiter didn't call into any of the two functions - which I find highly unlikely (i.e. why wait on `dp_spaceavail_cv` to begin with?). [2] The waiter did call in one of the above function but it passed 0 as the space/delta to be dirtied (or undirtied) and then the callee returned immediately (e.g both `dsl_pool_dirty_space()` and `dsl_pool_undirty_space()` return immediately when space is 0). In any case and no matter how we got there, the easy fix would be to just broadcast to all waiters whenever `dp_dirty_total` hits 0. That said and given that we've never hit this before, it would make sense to think more on why the above situation occured. Attempting to mimic what Prakash was doing in the issue filed, I created a dataset with `sync=always` and started doing contiguous writes in a file within that dataset. I observed with DTrace that even though we update the pool's dirty data accounting when we would dirty stuff, the accounting wouldn't be decremented incrementally as we were done with the ZIOs of those writes (the reason being that `dbuf_write_physdone()` isn't be called as we go through the override code paths, and thus `dsl_pool_undirty_space()` is never called). As a result we'd have to wait until we get to `dsl_pool_sync()` where we zero out all dirty data accounting for the pool and the current TXG's metadata. In addition, as Matt noted and I later verified, the same issue would arise when using dedup. In both cases (sync & dedup) we shouldn't have to wait until `dsl_pool_sync()` zeros out the accounting data. According to the comment in that part of the code, the reasons why we do the zeroing, have nothing to do with what we observe: ```` /* * We have written all of the accounted dirty data, so our * dp_space_towrite should now be zero. However, some seldom-used * code paths do not adhere to this (e.g. dbuf_undirty(), also * rounding error in dbuf_write_physdone). * Shore up the accounting of any dirtied space now. */ dsl_pool_undirty_space(dp, dp->dp_dirty_pertxg[txg & TXG_MASK], txg); ```` Ideally what we want to do is to undirty in the accounting exactly what we dirty (I use the word ideally as we can still have rounding errors). This would make the behavior of the system more clear and predictable. Another interesting issue that I observed with DTrace was that we wouldn't update any of the pool's dirty data accounting whenever we would dirty and/or undirty MOS data. In addition, every time we would change the size of a dbuf through `dbuf_new_size()` we wouldn't update the accounted space dirtied in the appropriate dirty record, so when ZIOs are done we would undirty less that we dirtied from the pool's accounting point of view. For the first two issues observed (sync & dedup) this patch ensures that we still update the pool's accounting when we undirty data, regardless of the write being physical or not. For changes in the MOS, we first ensure to zero out the pool's dirty data accounting in `dsl_pool_sync()` after we synced the MOS. Then we can go ahead and enable the update of the pool's dirty data accounting wheneve we change MOS data. Another fix is that we now update the accounting explicitly for counting errors in `dbuf_write_done()`. Finally, `dbuf_new_size()` updates the accounted space of the appropriate dirty record correctly now. The problem is that we still don't know how the bug came up in the issue filled. That said the issues fixed seem to be very relevant, so instead of going with the broadcasting solution right away, I decided to leave this patch as is. Reviewed-by: Brian Behlendorf Reviewed-by: Prakash Surya Signed-off-by: Serapheim Dimitropoulos External-issue: DLPX-47285 Closes #9137 --- module/zfs/dbuf.c | 34 +++++++++++++++++++++++++++++----- module/zfs/dmu.c | 3 +++ module/zfs/dmu_objset.c | 17 +++++++++++++---- module/zfs/dsl_pool.c | 24 +++++++++++++++--------- 4 files changed, 60 insertions(+), 18 deletions(-) diff --git a/module/zfs/dbuf.c b/module/zfs/dbuf.c index f8f96c142e9f..ace862637de1 100644 --- a/module/zfs/dbuf.c +++ b/module/zfs/dbuf.c @@ -1890,9 +1890,11 @@ dbuf_new_size(dmu_buf_impl_t *db, int size, dmu_tx_t *tx) db->db.db_size = size; if (db->db_level == 0) { - ASSERT3U(db->db_last_dirty->dr_txg, ==, tx->tx_txg); db->db_last_dirty->dt.dl.dr_data = buf; } + ASSERT3U(db->db_last_dirty->dr_txg, ==, tx->tx_txg); + ASSERT3U(db->db_last_dirty->dr_accounted, ==, osize); + db->db_last_dirty->dr_accounted = size; mutex_exit(&db->db_mtx); dmu_objset_willuse_space(dn->dn_objset, size - osize, tx); @@ -2105,7 +2107,7 @@ dbuf_dirty(dmu_buf_impl_t *db, dmu_tx_t *tx) sizeof (dbuf_dirty_record_t), offsetof(dbuf_dirty_record_t, dr_dirty_node)); } - if (db->db_blkid != DMU_BONUS_BLKID && os->os_dsl_dataset != NULL) + if (db->db_blkid != DMU_BONUS_BLKID) dr->dr_accounted = db->db.db_size; dr->dr_dbuf = db; dr->dr_txg = tx->tx_txg; @@ -4356,8 +4358,7 @@ dbuf_write_physdone(zio_t *zio, arc_buf_t *buf, void *arg) /* * The callback will be called io_phys_children times. Retire one * portion of our dirty space each time we are called. Any rounding - * error will be cleaned up by dsl_pool_sync()'s call to - * dsl_pool_undirty_space(). + * error will be cleaned up by dbuf_write_done(). */ delta = dr->dr_accounted / zio->io_phys_children; dsl_pool_undirty_space(dp, delta, zio->io_txg); @@ -4440,13 +4441,36 @@ dbuf_write_done(zio_t *zio, arc_buf_t *buf, void *vdb) mutex_destroy(&dr->dt.di.dr_mtx); list_destroy(&dr->dt.di.dr_children); } - kmem_free(dr, sizeof (dbuf_dirty_record_t)); cv_broadcast(&db->db_changed); ASSERT(db->db_dirtycnt > 0); db->db_dirtycnt -= 1; db->db_data_pending = NULL; dbuf_rele_and_unlock(db, (void *)(uintptr_t)tx->tx_txg, B_FALSE); + + /* + * If we didn't do a physical write in this ZIO and we + * still ended up here, it means that the space of the + * dbuf that we just released (and undirtied) above hasn't + * been marked as undirtied in the pool's accounting. + * + * Thus, we undirty that space in the pool's view of the + * world here. For physical writes this type of update + * happens in dbuf_write_physdone(). + * + * If we did a physical write, cleanup any rounding errors + * that came up due to writing multiple copies of a block + * on disk [see dbuf_write_physdone()]. + */ + if (zio->io_phys_children == 0) { + dsl_pool_undirty_space(dmu_objset_pool(os), + dr->dr_accounted, zio->io_txg); + } else { + dsl_pool_undirty_space(dmu_objset_pool(os), + dr->dr_accounted % zio->io_phys_children, zio->io_txg); + } + + kmem_free(dr, sizeof (dbuf_dirty_record_t)); } static void diff --git a/module/zfs/dmu.c b/module/zfs/dmu.c index 955588fb7b6a..aa3ef6458d47 100644 --- a/module/zfs/dmu.c +++ b/module/zfs/dmu.c @@ -1090,6 +1090,9 @@ dmu_write(objset_t *os, uint64_t object, uint64_t offset, uint64_t size, dmu_buf_rele_array(dbp, numbufs, FTAG); } +/* + * Note: Lustre is an external consumer of this interface. + */ void dmu_write_by_dnode(dnode_t *dn, uint64_t offset, uint64_t size, const void *buf, dmu_tx_t *tx) diff --git a/module/zfs/dmu_objset.c b/module/zfs/dmu_objset.c index 7a540bdfaf10..3afafd1827ac 100644 --- a/module/zfs/dmu_objset.c +++ b/module/zfs/dmu_objset.c @@ -2908,9 +2908,17 @@ dmu_fsname(const char *snapname, char *buf) } /* - * Call when we think we're going to write/free space in open context to track - * the amount of dirty data in the open txg, which is also the amount - * of memory that can not be evicted until this txg syncs. + * Call when we think we're going to write/free space in open context + * to track the amount of dirty data in the open txg, which is also the + * amount of memory that can not be evicted until this txg syncs. + * + * Note that there are two conditions where this can be called from + * syncing context: + * + * [1] When we just created the dataset, in which case we go on with + * updating any accounting of dirty data as usual. + * [2] When we are dirtying MOS data, in which case we only update the + * pool's accounting of dirty data. */ void dmu_objset_willuse_space(objset_t *os, int64_t space, dmu_tx_t *tx) @@ -2920,8 +2928,9 @@ dmu_objset_willuse_space(objset_t *os, int64_t space, dmu_tx_t *tx) if (ds != NULL) { dsl_dir_willuse_space(ds->ds_dir, aspace, tx); - dsl_pool_dirty_space(dmu_tx_pool(tx), space, tx); } + + dsl_pool_dirty_space(dmu_tx_pool(tx), space, tx); } #if defined(_KERNEL) diff --git a/module/zfs/dsl_pool.c b/module/zfs/dsl_pool.c index 9fb3a061d946..1f1fd6462720 100644 --- a/module/zfs/dsl_pool.c +++ b/module/zfs/dsl_pool.c @@ -658,15 +658,6 @@ dsl_pool_sync(dsl_pool_t *dp, uint64_t txg) } VERIFY0(zio_wait(zio)); - /* - * We have written all of the accounted dirty data, so our - * dp_space_towrite should now be zero. However, some seldom-used - * code paths do not adhere to this (e.g. dbuf_undirty(), also - * rounding error in dbuf_write_physdone). - * Shore up the accounting of any dirtied space now. - */ - dsl_pool_undirty_space(dp, dp->dp_dirty_pertxg[txg & TXG_MASK], txg); - /* * Update the long range free counter after * we're done syncing user data @@ -762,6 +753,21 @@ dsl_pool_sync(dsl_pool_t *dp, uint64_t txg) dsl_pool_sync_mos(dp, tx); } + /* + * We have written all of the accounted dirty data, so our + * dp_space_towrite should now be zero. However, some seldom-used + * code paths do not adhere to this (e.g. dbuf_undirty()). Shore up + * the accounting of any dirtied space now. + * + * Note that, besides any dirty data from datasets, the amount of + * dirty data in the MOS is also accounted by the pool. Therefore, + * we want to do this cleanup after dsl_pool_sync_mos() so we don't + * attempt to update the accounting for the same dirty data twice. + * (i.e. at this point we only update the accounting for the space + * that we know that we "leaked"). + */ + dsl_pool_undirty_space(dp, dp->dp_dirty_pertxg[txg & TXG_MASK], txg); + /* * If we modify a dataset in the same txg that we want to destroy it, * its dsl_dir's dd_dbuf will be dirty, and thus have a hold on it. From 9323aad14d2f99d6fff1e50cce25fa6361495ec4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Michael=20Niew=C3=B6hner?= Date: Fri, 16 Aug 2019 17:02:32 +0200 Subject: [PATCH 05/68] initramfs: fixes for (debian) initramfs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * contrib/initramfs: include /etc/default/zfs and /etc/zfs/zfs-functions At least debian needs /etc/default/zfs and /etc/zfs/zfs-functions for its initramfs. Include both in build when initramfs is configured. * contrib/initramfs: include 60-zvol.rules and zvol_id Include 60-zvol.rules and zvol_id and set udev as predependency instead of debians zdev. This makes debians additional zdev hook unneeded. * Correct initconfdir substitution for some distros Not every Linux distro is using @sysconfdir@/default but @initconfdir@ which is already determined by configure. Let's use it. * systemd: prevent possible conflict between systemd and sysvinit Systemd will not load a sysvinit service if a unit exists with the same name. This prevents conflicts between sysvinit and systemd. In ZFS there is one sysvinit service that does not have a systemd service but a target counterpart, zfs-import.target. Usually it does not make any sense to install both but it is possisble. Let's prevent any conflict by masking zfs-import.service by default. This does not harm even if init.d/zfs-import does not exist. Reviewed-by: Chris Wedgwood Reviewed-by: Brian Behlendorf Tested-by: Alex Ingram Tested-by: Dreamcat4 Signed-off-by: Michael Niewöhner Closes #7904 Closes #9089 --- contrib/initramfs/Makefile.am | 16 +++++++++++----- contrib/initramfs/hooks/zfs.in | 6 ++++-- etc/systemd/system/Makefile.am | 4 ++++ rpm/generic/zfs.spec.in | 13 +++++++++++-- 4 files changed, 30 insertions(+), 9 deletions(-) diff --git a/contrib/initramfs/Makefile.am b/contrib/initramfs/Makefile.am index fefd676ce0f9..52bdeb2afe54 100644 --- a/contrib/initramfs/Makefile.am +++ b/contrib/initramfs/Makefile.am @@ -6,10 +6,15 @@ initrd_SCRIPTS = \ SUBDIRS = hooks scripts EXTRA_DIST = \ + $(top_srcdir)/etc/init.d/zfs \ + $(top_srcdir)/etc/init.d/zfs-functions \ $(top_srcdir)/contrib/initramfs/conf.d/zfs \ $(top_srcdir)/contrib/initramfs/conf-hooks.d/zfs \ $(top_srcdir)/contrib/initramfs/README.initramfs.markdown +$(top_srcdir)/etc/init.d/zfs $(top_srcdir)/etc/init.d/zfs-functions: + $(MAKE) -C $(top_srcdir)/etc/init.d zfs zfs-functions + install-initrdSCRIPTS: $(EXTRA_DIST) for d in conf.d conf-hooks.d scripts/local-top; do \ $(MKDIR_P) $(DESTDIR)$(initrddir)/$$d; \ @@ -21,8 +26,9 @@ install-initrdSCRIPTS: $(EXTRA_DIST) cp $(top_builddir)/contrib/initramfs/$$d/zfs \ $(DESTDIR)$(initrddir)/$$d/; \ done - if [ -f $(top_builddir)/etc/init.d/zfs ]; then \ - $(MKDIR_P) $(DESTDIR)$(DEFAULT_INITCONF_DIR); \ - cp $(top_builddir)/etc/init.d/zfs \ - $(DESTDIR)$(DEFAULT_INITCONF_DIR)/; \ - fi + $(MKDIR_P) $(DESTDIR)$(DEFAULT_INITCONF_DIR); \ + cp $(top_builddir)/etc/init.d/zfs \ + $(DESTDIR)$(DEFAULT_INITCONF_DIR)/; \ + $(MKDIR_P) $(DESTDIR)$(sysconfdir)/zfs; \ + cp $(top_builddir)/etc/init.d/zfs-functions \ + $(DESTDIR)$(sysconfdir)/zfs/ diff --git a/contrib/initramfs/hooks/zfs.in b/contrib/initramfs/hooks/zfs.in index e35354141d81..15f23c908b23 100755 --- a/contrib/initramfs/hooks/zfs.in +++ b/contrib/initramfs/hooks/zfs.in @@ -4,16 +4,18 @@ # # This hook installs udev rules for ZoL. -PREREQ="zdev" +PREREQ="udev" # These prerequisites are provided by the zfsutils package. The zdb utility is # not strictly required, but it can be useful at the initramfs recovery prompt. COPY_EXEC_LIST="@sbindir@/zdb @sbindir@/zpool @sbindir@/zfs" COPY_EXEC_LIST="$COPY_EXEC_LIST @mounthelperdir@/mount.zfs @udevdir@/vdev_id" +COPY_EXEC_LIST="$COPY_EXEC_LIST @udevdir@/zvol_id" COPY_FILE_LIST="/etc/hostid @sysconfdir@/zfs/zpool.cache" -COPY_FILE_LIST="$COPY_FILE_LIST @sysconfdir@/default/zfs" +COPY_FILE_LIST="$COPY_FILE_LIST @DEFAULT_INITCONF_DIR@/zfs" COPY_FILE_LIST="$COPY_FILE_LIST @sysconfdir@/zfs/zfs-functions" COPY_FILE_LIST="$COPY_FILE_LIST @sysconfdir@/zfs/vdev_id.conf" +COPY_FILE_LIST="$COPY_FILE_LIST @udevruledir@/60-zvol.rules" COPY_FILE_LIST="$COPY_FILE_LIST @udevruledir@/69-vdev.rules" # These prerequisites are provided by the base system. diff --git a/etc/systemd/system/Makefile.am b/etc/systemd/system/Makefile.am index 9249f15eb455..ba73f558a8a0 100644 --- a/etc/systemd/system/Makefile.am +++ b/etc/systemd/system/Makefile.am @@ -31,5 +31,9 @@ $(systemdunit_DATA) $(systemdpreset_DATA):%:%.in -e 's,@sysconfdir\@,$(sysconfdir),g' \ $< >'$@' +install-data-hook: + $(MKDIR_P) "$(DESTDIR)$(systemdunitdir)" + ln -s /dev/null "$(DESTDIR)$(systemdunitdir)/zfs-import.service" + distclean-local:: -$(RM) $(systemdunit_DATA) $(systemdpreset_DATA) diff --git a/rpm/generic/zfs.spec.in b/rpm/generic/zfs.spec.in index a74d066bbae1..e4aef6725266 100644 --- a/rpm/generic/zfs.spec.in +++ b/rpm/generic/zfs.spec.in @@ -433,6 +433,14 @@ systemctl --system daemon-reload >/dev/null || true %{_udevdir}/vdev_id %{_udevdir}/zvol_id %{_udevdir}/rules.d/* +%if ! 0%{?_systemd} || 0%{?_initramfs} +# Files needed for sysvinit and initramfs-tools +%{_sysconfdir}/%{name}/zfs-functions +%config(noreplace) %{_initconfdir}/zfs +%else +%exclude %{_sysconfdir}/%{name}/zfs-functions +%exclude %{_initconfdir}/zfs +%endif %if 0%{?_systemd} %{_unitdir}/* %{_presetdir}/* @@ -440,9 +448,10 @@ systemctl --system daemon-reload >/dev/null || true %{_systemdgeneratordir}/* %else %config(noreplace) %{_sysconfdir}/init.d/* -%config(noreplace) %{_initconfdir}/zfs %endif -%config(noreplace) %{_sysconfdir}/%{name} +%config(noreplace) %{_sysconfdir}/%{name}/zed.d/* +%config(noreplace) %{_sysconfdir}/%{name}/zpool.d/* +%config(noreplace) %{_sysconfdir}/%{name}/vdev_id.conf.*.example %attr(440, root, root) %config(noreplace) %{_sysconfdir}/sudoers.d/* %files -n libzpool2 From f09fda5071813751ba3fa77c28e588689795e17e Mon Sep 17 00:00:00 2001 From: Paul Dagnelie Date: Fri, 16 Aug 2019 08:08:21 -0700 Subject: [PATCH 06/68] Cap metaslab memory usage On systems with large amounts of storage and high fragmentation, a huge amount of space can be used by storing metaslab range trees. Since metaslabs are only unloaded during a txg sync, and only if they have been inactive for 8 txgs, it is possible to get into a state where all of the system's memory is consumed by range trees and metaslabs, and txgs cannot sync. While ZFS knows how to evict ARC data when needed, it has no such mechanism for range tree data. This can result in boot hangs for some system configurations. First, we add the ability to unload metaslabs outside of syncing context. Second, we store a multilist of all loaded metaslabs, sorted by their selection txg, so we can quickly identify the oldest metaslabs. We use a multilist to reduce lock contention during heavy write workloads. Finally, we add logic that will unload a metaslab when we're loading a new metaslab, if we're using more than a certain fraction of the available memory on range trees. Reviewed-by: Matt Ahrens Reviewed-by: George Wilson Reviewed-by: Sebastien Roy Reviewed-by: Serapheim Dimitropoulos Reviewed-by: Brian Behlendorf Signed-off-by: Paul Dagnelie Closes #9128 --- include/sys/arc.h | 1 + include/sys/metaslab.h | 6 +- include/sys/metaslab_impl.h | 12 ++ man/man5/zfs-module-parameters.5 | 15 ++ module/zfs/arc.c | 3 +- module/zfs/metaslab.c | 274 +++++++++++++++++++++++++++---- module/zfs/spa.c | 4 + module/zfs/spa_log_spacemap.c | 1 + module/zfs/vdev.c | 14 -- module/zfs/vdev_initialize.c | 7 +- module/zfs/vdev_trim.c | 10 +- 11 files changed, 289 insertions(+), 58 deletions(-) diff --git a/include/sys/arc.h b/include/sys/arc.h index dc2fd03647f3..59c0bea92022 100644 --- a/include/sys/arc.h +++ b/include/sys/arc.h @@ -291,6 +291,7 @@ void arc_flush(spa_t *spa, boolean_t retry); void arc_tempreserve_clear(uint64_t reserve); int arc_tempreserve_space(spa_t *spa, uint64_t reserve, uint64_t txg); +uint64_t arc_all_memory(void); uint64_t arc_target_bytes(void); void arc_init(void); void arc_fini(void); diff --git a/include/sys/metaslab.h b/include/sys/metaslab.h index 7dd5fe2b54c7..00b8b4758110 100644 --- a/include/sys/metaslab.h +++ b/include/sys/metaslab.h @@ -57,7 +57,6 @@ int metaslab_sort_by_flushed(const void *, const void *); uint64_t metaslab_unflushed_changes_memused(metaslab_t *); int metaslab_load(metaslab_t *); -void metaslab_potentially_unload(metaslab_t *, uint64_t); void metaslab_unload(metaslab_t *); boolean_t metaslab_flush(metaslab_t *, dmu_tx_t *); @@ -110,7 +109,7 @@ uint64_t metaslab_class_expandable_space(metaslab_class_t *); boolean_t metaslab_class_throttle_reserve(metaslab_class_t *, int, int, zio_t *, int); void metaslab_class_throttle_unreserve(metaslab_class_t *, int, int, zio_t *); - +void metaslab_class_evict_old(metaslab_class_t *, uint64_t); uint64_t metaslab_class_get_alloc(metaslab_class_t *); uint64_t metaslab_class_get_space(metaslab_class_t *); uint64_t metaslab_class_get_dspace(metaslab_class_t *); @@ -133,7 +132,8 @@ void metaslab_group_alloc_decrement(spa_t *, uint64_t, void *, int, int, void metaslab_group_alloc_verify(spa_t *, const blkptr_t *, void *, int); void metaslab_recalculate_weight_and_sort(metaslab_t *); void metaslab_disable(metaslab_t *); -void metaslab_enable(metaslab_t *, boolean_t); +void metaslab_enable(metaslab_t *, boolean_t, boolean_t); +void metaslab_set_selected_txg(metaslab_t *, uint64_t); extern int metaslab_debug_load; diff --git a/include/sys/metaslab_impl.h b/include/sys/metaslab_impl.h index 08ee8d279ddd..07f07c02d1a8 100644 --- a/include/sys/metaslab_impl.h +++ b/include/sys/metaslab_impl.h @@ -36,6 +36,7 @@ #include #include #include +#include #ifdef __cplusplus extern "C" { @@ -194,6 +195,12 @@ struct metaslab_class { uint64_t mc_space; /* total space (alloc + free) */ uint64_t mc_dspace; /* total deflated space */ uint64_t mc_histogram[RANGE_TREE_HISTOGRAM_SIZE]; + + /* + * List of all loaded metaslabs in the class, sorted in order of most + * recent use. + */ + multilist_t *mc_metaslab_txg_list; }; /* @@ -378,6 +385,7 @@ struct metaslab { range_tree_t *ms_allocating[TXG_SIZE]; range_tree_t *ms_allocatable; uint64_t ms_allocated_this_txg; + uint64_t ms_allocating_total; /* * The following range trees are accessed only from syncing context. @@ -508,6 +516,10 @@ struct metaslab { avl_node_t ms_group_node; /* node in metaslab group tree */ txg_node_t ms_txg_node; /* per-txg dirty metaslab links */ avl_node_t ms_spa_txg_node; /* node in spa_metaslabs_by_txg */ + /* + * Node in metaslab class's selected txg list + */ + multilist_node_t ms_class_txg_node; /* * Allocs and frees that are committed to the vdev log spacemap but diff --git a/man/man5/zfs-module-parameters.5 b/man/man5/zfs-module-parameters.5 index c25f2f04678b..8a1048bee43e 100644 --- a/man/man5/zfs-module-parameters.5 +++ b/man/man5/zfs-module-parameters.5 @@ -386,6 +386,21 @@ considering only the histogram instead. Default value: \fB3600 seconds\fR (one hour) .RE +.sp +.ne 2 +.na +\fBzfs_metaslab_mem_limit\fR (int) +.ad +.RS 12n +When we are loading a new metaslab, we check the amount of memory being used +to store metaslab range trees. If it is over a threshold, we attempt to unload +the least recently used metaslab to prevent the system from clogging all of +its memory with range trees. This tunable sets the percentage of total system +memory that is the threshold. +.sp +Default value: \fB75 percent\fR +.RE + .sp .ne 2 .na diff --git a/module/zfs/arc.c b/module/zfs/arc.c index 90a731bffa96..b5fca8e26313 100644 --- a/module/zfs/arc.c +++ b/module/zfs/arc.c @@ -1110,7 +1110,6 @@ static boolean_t arc_is_overflowing(void); static void arc_buf_watch(arc_buf_t *); static void arc_tuning_update(void); static void arc_prune_async(int64_t); -static uint64_t arc_all_memory(void); static arc_buf_contents_t arc_buf_type(arc_buf_hdr_t *); static uint32_t arc_bufc_to_flags(arc_buf_contents_t); @@ -4828,7 +4827,7 @@ arc_reduce_target_size(int64_t to_free) * Return maximum amount of memory that we could possibly use. Reduced * to half of all memory in user space which is primarily used for testing. */ -static uint64_t +uint64_t arc_all_memory(void) { #ifdef _KERNEL diff --git a/module/zfs/metaslab.c b/module/zfs/metaslab.c index 9a9a5e0cf8a2..2f92fffa4ec0 100644 --- a/module/zfs/metaslab.c +++ b/module/zfs/metaslab.c @@ -278,6 +278,13 @@ int max_disabled_ms = 3; */ unsigned long zfs_metaslab_max_size_cache_sec = 3600; /* 1 hour */ +/* + * Maximum percentage of memory to use on storing loaded metaslabs. If loading + * a metaslab would take it over this percentage, the oldest selected metaslab + * is automatically unloaded. + */ +int zfs_metaslab_mem_limit = 75; + static uint64_t metaslab_weight(metaslab_t *); static void metaslab_set_fragmentation(metaslab_t *); static void metaslab_free_impl(vdev_t *, uint64_t, uint64_t, boolean_t); @@ -286,6 +293,8 @@ static void metaslab_check_free_impl(vdev_t *, uint64_t, uint64_t); static void metaslab_passivate(metaslab_t *msp, uint64_t weight); static uint64_t metaslab_weight_from_range_tree(metaslab_t *msp); static void metaslab_flush_update(metaslab_t *, dmu_tx_t *); +static unsigned int metaslab_idx_func(multilist_t *, void *); +static void metaslab_evict(metaslab_t *, uint64_t); #ifdef _METASLAB_TRACING kmem_cache_t *metaslab_alloc_trace_cache; #endif @@ -306,6 +315,8 @@ metaslab_class_create(spa_t *spa, metaslab_ops_t *ops) mc->mc_rotor = NULL; mc->mc_ops = ops; mutex_init(&mc->mc_lock, NULL, MUTEX_DEFAULT, NULL); + mc->mc_metaslab_txg_list = multilist_create(sizeof (metaslab_t), + offsetof(metaslab_t, ms_class_txg_node), metaslab_idx_func); mc->mc_alloc_slots = kmem_zalloc(spa->spa_alloc_count * sizeof (zfs_refcount_t), KM_SLEEP); mc->mc_alloc_max_slots = kmem_zalloc(spa->spa_alloc_count * @@ -332,6 +343,7 @@ metaslab_class_destroy(metaslab_class_t *mc) kmem_free(mc->mc_alloc_max_slots, mc->mc_spa->spa_alloc_count * sizeof (uint64_t)); mutex_destroy(&mc->mc_lock); + multilist_destroy(mc->mc_metaslab_txg_list); kmem_free(mc, sizeof (metaslab_class_t)); } @@ -517,6 +529,47 @@ metaslab_class_expandable_space(metaslab_class_t *mc) return (space); } +void +metaslab_class_evict_old(metaslab_class_t *mc, uint64_t txg) +{ + multilist_t *ml = mc->mc_metaslab_txg_list; + for (int i = 0; i < multilist_get_num_sublists(ml); i++) { + multilist_sublist_t *mls = multilist_sublist_lock(ml, i); + metaslab_t *msp = multilist_sublist_head(mls); + multilist_sublist_unlock(mls); + while (msp != NULL) { + mutex_enter(&msp->ms_lock); + /* + * Once we've hit a metaslab selected too recently to + * evict, we're done evicting for now. + */ + if (msp->ms_selected_txg + metaslab_unload_delay >= + txg) { + mutex_exit(&msp->ms_lock); + break; + } + + /* + * If the metaslab has been removed from the list + * (which could happen if we were at the memory limit + * and it was evicted during this loop), then we can't + * proceed and we should restart the sublist. + */ + if (!multilist_link_active(&msp->ms_class_txg_node)) { + mutex_exit(&msp->ms_lock); + i--; + break; + } + mls = multilist_sublist_lock(ml, i); + metaslab_t *next_msp = multilist_sublist_next(mls, msp); + multilist_sublist_unlock(mls); + metaslab_evict(msp, txg); + mutex_exit(&msp->ms_lock); + msp = next_msp; + } + } +} + static int metaslab_compare(const void *x1, const void *x2) { @@ -960,6 +1013,14 @@ metaslab_group_remove(metaslab_group_t *mg, metaslab_t *msp) mutex_enter(&mg->mg_lock); ASSERT(msp->ms_group == mg); avl_remove(&mg->mg_metaslab_tree, msp); + + metaslab_class_t *mc = msp->ms_group->mg_class; + multilist_sublist_t *mls = + multilist_sublist_lock_obj(mc->mc_metaslab_txg_list, msp); + if (multilist_link_active(&msp->ms_class_txg_node)) + multilist_sublist_remove(mls, msp); + multilist_sublist_unlock(mls); + msp->ms_group = NULL; mutex_exit(&mg->mg_lock); } @@ -1519,6 +1580,13 @@ metaslab_flush_wait(metaslab_t *msp) cv_wait(&msp->ms_flush_cv, &msp->ms_lock); } +static unsigned int +metaslab_idx_func(multilist_t *ml, void *arg) +{ + metaslab_t *msp = arg; + return (msp->ms_id % multilist_get_num_sublists(ml)); +} + uint64_t metaslab_allocated_space(metaslab_t *msp) { @@ -1577,6 +1645,8 @@ metaslab_verify_space(metaslab_t *msp, uint64_t txg) allocating += range_tree_space(msp->ms_allocating[(txg + t) & TXG_MASK]); } + ASSERT3U(allocating + msp->ms_allocated_this_txg, ==, + msp->ms_allocating_total); ASSERT3U(msp->ms_deferspace, ==, range_tree_space(msp->ms_defer[0]) + @@ -1792,6 +1862,86 @@ metaslab_verify_weight_and_frag(metaslab_t *msp) VERIFY3U(msp->ms_weight, ==, weight); } +/* + * If we're over the zfs_metaslab_mem_limit, select the loaded metaslab from + * this class that was used longest ago, and attempt to unload it. We don't + * want to spend too much time in this loop to prevent performance + * degredation, and we expect that most of the time this operation will + * succeed. Between that and the normal unloading processing during txg sync, + * we expect this to keep the metaslab memory usage under control. + */ +static void +metaslab_potentially_evict(metaslab_class_t *mc) +{ +#ifdef _KERNEL + uint64_t allmem = arc_all_memory(); + extern kmem_cache_t *range_seg_cache; + uint64_t inuse = range_seg_cache->skc_obj_total; + uint64_t size = range_seg_cache->skc_obj_size; + int tries = 0; + for (; allmem * zfs_metaslab_mem_limit / 100 < inuse * size && + tries < multilist_get_num_sublists(mc->mc_metaslab_txg_list) * 2; + tries++) { + unsigned int idx = multilist_get_random_index( + mc->mc_metaslab_txg_list); + multilist_sublist_t *mls = + multilist_sublist_lock(mc->mc_metaslab_txg_list, idx); + metaslab_t *msp = multilist_sublist_head(mls); + multilist_sublist_unlock(mls); + while (msp != NULL && allmem * zfs_metaslab_mem_limit / 100 < + inuse * size) { + VERIFY3P(mls, ==, multilist_sublist_lock( + mc->mc_metaslab_txg_list, idx)); + ASSERT3U(idx, ==, + metaslab_idx_func(mc->mc_metaslab_txg_list, msp)); + + if (!multilist_link_active(&msp->ms_class_txg_node)) { + multilist_sublist_unlock(mls); + break; + } + metaslab_t *next_msp = multilist_sublist_next(mls, msp); + multilist_sublist_unlock(mls); + /* + * If the metaslab is currently loading there are two + * cases. If it's the metaslab we're evicting, we + * can't continue on or we'll panic when we attempt to + * recursively lock the mutex. If it's another + * metaslab that's loading, it can be safely skipped, + * since we know it's very new and therefore not a + * good eviction candidate. We check later once the + * lock is held that the metaslab is fully loaded + * before actually unloading it. + */ + if (msp->ms_loading) { + msp = next_msp; + inuse = range_seg_cache->skc_obj_total; + continue; + } + /* + * We can't unload metaslabs with no spacemap because + * they're not ready to be unloaded yet. We can't + * unload metaslabs with outstanding allocations + * because doing so could cause the metaslab's weight + * to decrease while it's unloaded, which violates an + * invariant that we use to prevent unnecessary + * loading. We also don't unload metaslabs that are + * currently active because they are high-weight + * metaslabs that are likely to be used in the near + * future. + */ + mutex_enter(&msp->ms_lock); + if (msp->ms_allocator == -1 && msp->ms_sm != NULL && + msp->ms_allocating_total == 0) { + metaslab_unload(msp); + } + mutex_exit(&msp->ms_lock); + msp = next_msp; + inuse = range_seg_cache->skc_obj_total; + } + } +#endif +} + static int metaslab_load_impl(metaslab_t *msp) { @@ -2024,6 +2174,16 @@ metaslab_load(metaslab_t *msp) */ ASSERT(!msp->ms_loaded); + /* + * If we're loading a metaslab in the normal class, consider evicting + * another one to keep our memory usage under the limit defined by the + * zfs_metaslab_mem_limit tunable. + */ + if (spa_normal_class(msp->ms_group->mg_class->mc_spa) == + msp->ms_group->mg_class) { + metaslab_potentially_evict(msp->ms_group->mg_class); + } + int error = metaslab_load_impl(msp); ASSERT(MUTEX_HELD(&msp->ms_lock)); @@ -2038,7 +2198,13 @@ metaslab_unload(metaslab_t *msp) { ASSERT(MUTEX_HELD(&msp->ms_lock)); - metaslab_verify_weight_and_frag(msp); + /* + * This can happen if a metaslab is selected for eviction (in + * metaslab_potentially_evict) and then unloaded during spa_sync (via + * metaslab_class_evict_old). + */ + if (!msp->ms_loaded) + return; range_tree_vacate(msp->ms_allocatable, NULL, NULL); msp->ms_loaded = B_FALSE; @@ -2047,6 +2213,15 @@ metaslab_unload(metaslab_t *msp) msp->ms_activation_weight = 0; msp->ms_weight &= ~METASLAB_ACTIVE_MASK; + if (msp->ms_group != NULL) { + metaslab_class_t *mc = msp->ms_group->mg_class; + multilist_sublist_t *mls = + multilist_sublist_lock_obj(mc->mc_metaslab_txg_list, msp); + if (multilist_link_active(&msp->ms_class_txg_node)) + multilist_sublist_remove(mls, msp); + multilist_sublist_unlock(mls); + } + /* * We explicitly recalculate the metaslab's weight based on its space * map (as it is now not loaded). We want unload metaslabs to always @@ -2063,6 +2238,20 @@ metaslab_unload(metaslab_t *msp) metaslab_recalculate_weight_and_sort(msp); } +void +metaslab_set_selected_txg(metaslab_t *msp, uint64_t txg) +{ + ASSERT(MUTEX_HELD(&msp->ms_lock)); + metaslab_class_t *mc = msp->ms_group->mg_class; + multilist_sublist_t *mls = + multilist_sublist_lock_obj(mc->mc_metaslab_txg_list, msp); + if (multilist_link_active(&msp->ms_class_txg_node)) + multilist_sublist_remove(mls, msp); + msp->ms_selected_txg = txg; + multilist_sublist_insert_tail(mls, msp); + multilist_sublist_unlock(mls); +} + void metaslab_space_update(vdev_t *vd, metaslab_class_t *mc, int64_t alloc_delta, int64_t defer_delta, int64_t space_delta) @@ -2091,6 +2280,7 @@ metaslab_init(metaslab_group_t *mg, uint64_t id, uint64_t object, mutex_init(&ms->ms_sync_lock, NULL, MUTEX_DEFAULT, NULL); cv_init(&ms->ms_load_cv, NULL, CV_DEFAULT, NULL); cv_init(&ms->ms_flush_cv, NULL, CV_DEFAULT, NULL); + multilist_link_init(&ms->ms_class_txg_node); ms->ms_id = id; ms->ms_start = id << vd->vdev_ms_shift; @@ -2703,8 +2893,13 @@ metaslab_activate_allocator(metaslab_group_t *mg, metaslab_t *msp, * If we're activating for the claim code, we don't want to actually * set the metaslab up for a specific allocator. */ - if (activation_weight == METASLAB_WEIGHT_CLAIM) + if (activation_weight == METASLAB_WEIGHT_CLAIM) { + ASSERT0(msp->ms_activation_weight); + msp->ms_activation_weight = msp->ms_weight; + metaslab_group_sort(mg, msp, msp->ms_weight | + activation_weight); return (0); + } metaslab_t **arr = (activation_weight == METASLAB_WEIGHT_PRIMARY ? mg->mg_primaries : mg->mg_secondaries); @@ -2719,6 +2914,12 @@ metaslab_activate_allocator(metaslab_group_t *mg, metaslab_t *msp, ASSERT3S(msp->ms_allocator, ==, -1); msp->ms_allocator = allocator; msp->ms_primary = (activation_weight == METASLAB_WEIGHT_PRIMARY); + + ASSERT0(msp->ms_activation_weight); + msp->ms_activation_weight = msp->ms_weight; + metaslab_group_sort_impl(mg, msp, + msp->ms_weight | activation_weight); + mutex_exit(&mg->mg_lock); return (0); @@ -2795,11 +2996,6 @@ metaslab_activate(metaslab_t *msp, int allocator, uint64_t activation_weight) return (error); } - ASSERT0(msp->ms_activation_weight); - msp->ms_activation_weight = msp->ms_weight; - metaslab_group_sort(msp->ms_group, msp, - msp->ms_weight | activation_weight); - ASSERT(msp->ms_loaded); ASSERT(msp->ms_weight & METASLAB_ACTIVE_MASK); @@ -2894,14 +3090,15 @@ static void metaslab_preload(void *arg) { metaslab_t *msp = arg; - spa_t *spa = msp->ms_group->mg_vd->vdev_spa; + metaslab_class_t *mc = msp->ms_group->mg_class; + spa_t *spa = mc->mc_spa; fstrans_cookie_t cookie = spl_fstrans_mark(); ASSERT(!MUTEX_HELD(&msp->ms_group->mg_lock)); mutex_enter(&msp->ms_lock); (void) metaslab_load(msp); - msp->ms_selected_txg = spa_syncing_txg(spa); + metaslab_set_selected_txg(msp, spa_syncing_txg(spa)); mutex_exit(&msp->ms_lock); spl_fstrans_unmark(cookie); } @@ -3613,28 +3810,21 @@ metaslab_sync(metaslab_t *msp, uint64_t txg) dmu_tx_commit(tx); } -void -metaslab_potentially_unload(metaslab_t *msp, uint64_t txg) +static void +metaslab_evict(metaslab_t *msp, uint64_t txg) { - /* - * If the metaslab is loaded and we've not tried to load or allocate - * from it in 'metaslab_unload_delay' txgs, then unload it. - */ - if (msp->ms_loaded && - msp->ms_disabled == 0 && - msp->ms_selected_txg + metaslab_unload_delay < txg) { - for (int t = 1; t < TXG_CONCURRENT_STATES; t++) { - VERIFY0(range_tree_space( - msp->ms_allocating[(txg + t) & TXG_MASK])); - } - if (msp->ms_allocator != -1) { - metaslab_passivate(msp, msp->ms_weight & - ~METASLAB_ACTIVE_MASK); - } + if (!msp->ms_loaded || msp->ms_disabled != 0) + return; - if (!metaslab_debug_unload) - metaslab_unload(msp); + for (int t = 1; t < TXG_CONCURRENT_STATES; t++) { + VERIFY0(range_tree_space( + msp->ms_allocating[(txg + t) & TXG_MASK])); } + if (msp->ms_allocator != -1) + metaslab_passivate(msp, msp->ms_weight & ~METASLAB_ACTIVE_MASK); + + if (!metaslab_debug_unload) + metaslab_unload(msp); } /* @@ -3791,7 +3981,7 @@ metaslab_sync_done(metaslab_t *msp, uint64_t txg) ASSERT0(range_tree_space(msp->ms_freeing)); ASSERT0(range_tree_space(msp->ms_freed)); ASSERT0(range_tree_space(msp->ms_checkpointing)); - + msp->ms_allocating_total -= msp->ms_allocated_this_txg; msp->ms_allocated_this_txg = 0; mutex_exit(&msp->ms_lock); } @@ -4072,6 +4262,7 @@ metaslab_block_alloc(metaslab_t *msp, uint64_t size, uint64_t txg) vdev_dirty(mg->mg_vd, VDD_METASLAB, msp, txg); range_tree_add(msp->ms_allocating[txg & TXG_MASK], start, size); + msp->ms_allocating_total += size; /* Track the last successful allocation */ msp->ms_alloc_txg = txg; @@ -4250,6 +4441,7 @@ metaslab_group_alloc_normal(metaslab_group_t *mg, zio_alloc_list_t *zal, ASSERT(msp->ms_loaded); was_active = B_TRUE; + ASSERT(msp->ms_weight & METASLAB_ACTIVE_MASK); } else if (activation_weight == METASLAB_WEIGHT_SECONDARY && mg->mg_secondaries[allocator] != NULL) { msp = mg->mg_secondaries[allocator]; @@ -4263,6 +4455,7 @@ metaslab_group_alloc_normal(metaslab_group_t *mg, zio_alloc_list_t *zal, ASSERT(msp->ms_loaded); was_active = B_TRUE; + ASSERT(msp->ms_weight & METASLAB_ACTIVE_MASK); } else { msp = find_valid_metaslab(mg, activation_weight, dva, d, want_unique, asize, allocator, try_hard, zal, @@ -4293,7 +4486,7 @@ metaslab_group_alloc_normal(metaslab_group_t *mg, zio_alloc_list_t *zal, * capable of handling our request. It's possible that * another thread may have changed the weight while we * were blocked on the metaslab lock. We check the - * active status first to see if we need to reselect + * active status first to see if we need to set_selected_txg * a new metaslab. */ if (was_active && !(msp->ms_weight & METASLAB_ACTIVE_MASK)) { @@ -4336,7 +4529,7 @@ metaslab_group_alloc_normal(metaslab_group_t *mg, zio_alloc_list_t *zal, continue; } - msp->ms_selected_txg = txg; + metaslab_set_selected_txg(msp, txg); int activation_error = metaslab_activate(msp, allocator, activation_weight); @@ -5027,6 +5220,7 @@ metaslab_unalloc_dva(spa_t *spa, const dva_t *dva, uint64_t txg) mutex_enter(&msp->ms_lock); range_tree_remove(msp->ms_allocating[txg & TXG_MASK], offset, size); + msp->ms_allocating_total -= size; VERIFY(!msp->ms_condensing); VERIFY3U(offset, >=, msp->ms_start); @@ -5158,10 +5352,20 @@ metaslab_claim_concrete(vdev_t *vd, uint64_t offset, uint64_t size, range_tree_clear(msp->ms_trim, offset, size); if (spa_writeable(spa)) { /* don't dirty if we're zdb(1M) */ + metaslab_class_t *mc = msp->ms_group->mg_class; + multilist_sublist_t *mls = + multilist_sublist_lock_obj(mc->mc_metaslab_txg_list, msp); + if (!multilist_link_active(&msp->ms_class_txg_node)) { + msp->ms_selected_txg = txg; + multilist_sublist_insert_head(mls, msp); + } + multilist_sublist_unlock(mls); + if (range_tree_is_empty(msp->ms_allocating[txg & TXG_MASK])) vdev_dirty(vd, VDD_METASLAB, msp, txg); range_tree_add(msp->ms_allocating[txg & TXG_MASK], offset, size); + msp->ms_allocating_total += size; } mutex_exit(&msp->ms_lock); @@ -5571,7 +5775,7 @@ metaslab_disable(metaslab_t *msp) } void -metaslab_enable(metaslab_t *msp, boolean_t sync) +metaslab_enable(metaslab_t *msp, boolean_t sync, boolean_t unload) { metaslab_group_t *mg = msp->ms_group; spa_t *spa = mg->mg_vd->vdev_spa; @@ -5589,6 +5793,8 @@ metaslab_enable(metaslab_t *msp, boolean_t sync) if (--msp->ms_disabled == 0) { mg->mg_ms_disabled--; cv_broadcast(&mg->mg_ms_disabled_cv); + if (unload) + metaslab_unload(msp); } mutex_exit(&msp->ms_lock); mutex_exit(&mg->mg_ms_disabled_lock); @@ -5710,6 +5916,10 @@ MODULE_PARM_DESC(metaslab_df_use_largest_segment, module_param(zfs_metaslab_max_size_cache_sec, ulong, 0644); MODULE_PARM_DESC(zfs_metaslab_max_size_cache_sec, "how long to trust the cached max chunk size of a metaslab"); + +module_param(zfs_metaslab_mem_limit, int, 0644); +MODULE_PARM_DESC(zfs_metaslab_mem_limit, + "percentage of memory that can be used to store metaslab range trees"); /* END CSTYLED */ #endif diff --git a/module/zfs/spa.c b/module/zfs/spa.c index 437efb50f900..c404e876b4bc 100644 --- a/module/zfs/spa.c +++ b/module/zfs/spa.c @@ -9013,6 +9013,10 @@ spa_sync(spa_t *spa, uint64_t txg) while ((vd = txg_list_remove(&spa->spa_vdev_txg_list, TXG_CLEAN(txg))) != NULL) vdev_sync_done(vd, txg); + + metaslab_class_evict_old(spa->spa_normal_class, txg); + metaslab_class_evict_old(spa->spa_log_class, txg); + spa_sync_close_syncing_log_sm(spa); spa_update_dspace(spa); diff --git a/module/zfs/spa_log_spacemap.c b/module/zfs/spa_log_spacemap.c index ad82e025e4c7..550aa1e3a5f5 100644 --- a/module/zfs/spa_log_spacemap.c +++ b/module/zfs/spa_log_spacemap.c @@ -1189,6 +1189,7 @@ spa_ld_log_sm_data(spa_t *spa) if (metaslab_debug_load && m->ms_sm != NULL) { VERIFY0(metaslab_load(m)); + metaslab_set_selected_txg(m, 0); } mutex_exit(&m->ms_lock); } diff --git a/module/zfs/vdev.c b/module/zfs/vdev.c index 5644b9c5b2db..a6280e0112ed 100644 --- a/module/zfs/vdev.c +++ b/module/zfs/vdev.c @@ -3262,20 +3262,6 @@ vdev_sync_done(vdev_t *vd, uint64_t txg) != NULL) metaslab_sync_done(msp, txg); - /* - * Because this function is only called on dirty vdevs, it's possible - * we won't consider all metaslabs for unloading on every - * txg. However, unless the system is largely idle it is likely that - * we will dirty all vdevs within a few txgs. - */ - for (int i = 0; i < vd->vdev_ms_count; i++) { - msp = vd->vdev_ms[i]; - mutex_enter(&msp->ms_lock); - if (msp->ms_sm != NULL) - metaslab_potentially_unload(msp, txg); - mutex_exit(&msp->ms_lock); - } - if (reassess) metaslab_sync_reassess(vd->vdev_mg); } diff --git a/module/zfs/vdev_initialize.c b/module/zfs/vdev_initialize.c index b1590132636b..a355f185cc2e 100644 --- a/module/zfs/vdev_initialize.c +++ b/module/zfs/vdev_initialize.c @@ -20,7 +20,7 @@ */ /* - * Copyright (c) 2016 by Delphix. All rights reserved. + * Copyright (c) 2016, 2019 by Delphix. All rights reserved. */ #include @@ -483,6 +483,7 @@ vdev_initialize_thread(void *arg) for (uint64_t i = 0; !vd->vdev_detached && i < vd->vdev_top->vdev_ms_count; i++) { metaslab_t *msp = vd->vdev_top->vdev_ms[i]; + boolean_t unload_when_done = B_FALSE; /* * If we've expanded the top-level vdev or it's our @@ -496,6 +497,8 @@ vdev_initialize_thread(void *arg) spa_config_exit(spa, SCL_CONFIG, FTAG); metaslab_disable(msp); mutex_enter(&msp->ms_lock); + if (!msp->ms_loaded && !msp->ms_loading) + unload_when_done = B_TRUE; VERIFY0(metaslab_load(msp)); range_tree_walk(msp->ms_allocatable, vdev_initialize_range_add, @@ -503,7 +506,7 @@ vdev_initialize_thread(void *arg) mutex_exit(&msp->ms_lock); error = vdev_initialize_ranges(vd, deadbeef); - metaslab_enable(msp, B_TRUE); + metaslab_enable(msp, B_TRUE, unload_when_done); spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER); range_tree_vacate(vd->vdev_initialize_tree, NULL, NULL); diff --git a/module/zfs/vdev_trim.c b/module/zfs/vdev_trim.c index 5ad47cccdafe..70b122a0a6e0 100644 --- a/module/zfs/vdev_trim.c +++ b/module/zfs/vdev_trim.c @@ -837,7 +837,7 @@ vdev_trim_thread(void *arg) */ if (msp->ms_sm == NULL && vd->vdev_trim_partial) { mutex_exit(&msp->ms_lock); - metaslab_enable(msp, B_FALSE); + metaslab_enable(msp, B_FALSE, B_FALSE); spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER); vdev_trim_calculate_progress(vd); continue; @@ -849,7 +849,7 @@ vdev_trim_thread(void *arg) mutex_exit(&msp->ms_lock); error = vdev_trim_ranges(&ta); - metaslab_enable(msp, B_TRUE); + metaslab_enable(msp, B_TRUE, B_FALSE); spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER); range_tree_vacate(ta.trim_tree, NULL, NULL); @@ -1154,7 +1154,7 @@ vdev_autotrim_thread(void *arg) if (msp->ms_sm == NULL || range_tree_is_empty(msp->ms_trim)) { mutex_exit(&msp->ms_lock); - metaslab_enable(msp, B_FALSE); + metaslab_enable(msp, B_FALSE, B_FALSE); continue; } @@ -1170,7 +1170,7 @@ vdev_autotrim_thread(void *arg) */ if (msp->ms_disabled > 1) { mutex_exit(&msp->ms_lock); - metaslab_enable(msp, B_FALSE); + metaslab_enable(msp, B_FALSE, B_FALSE); continue; } @@ -1288,7 +1288,7 @@ vdev_autotrim_thread(void *arg) range_tree_vacate(trim_tree, NULL, NULL); range_tree_destroy(trim_tree); - metaslab_enable(msp, issued_trim); + metaslab_enable(msp, issued_trim, B_FALSE); spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER); for (uint64_t c = 0; c < children; c++) { From 1a26cb6160949d1aa16a91714c88fd927423209f Mon Sep 17 00:00:00 2001 From: Paul Dagnelie Date: Mon, 19 Aug 2019 15:06:53 -0700 Subject: [PATCH 07/68] Add more refquota tests It used to be possible for zfs receive (and other operations related to clone swap) to bypass refquotas. This can cause a number of issues, and there should be an automated test for it. Added tests for rollback and receive not overriding refquota. Reviewed-by: Pavel Zakharov Reviewed-by: John Kennedy Reviewed-by: Brian Behlendorf Signed-off-by: Paul Dagnelie Closes #9139 --- tests/runfiles/linux.run | 3 +- .../tests/functional/refquota/Makefile.am | 4 +- .../functional/refquota/refquota_007_neg.ksh | 61 ++++++++++++++++ .../functional/refquota/refquota_008_neg.ksh | 71 +++++++++++++++++++ 4 files changed, 137 insertions(+), 2 deletions(-) create mode 100755 tests/zfs-tests/tests/functional/refquota/refquota_007_neg.ksh create mode 100755 tests/zfs-tests/tests/functional/refquota/refquota_008_neg.ksh diff --git a/tests/runfiles/linux.run b/tests/runfiles/linux.run index f8f04d73ef96..f0e468a689fb 100644 --- a/tests/runfiles/linux.run +++ b/tests/runfiles/linux.run @@ -759,7 +759,8 @@ tags = ['functional', 'redundancy'] [tests/functional/refquota] tests = ['refquota_001_pos', 'refquota_002_pos', 'refquota_003_pos', - 'refquota_004_pos', 'refquota_005_pos', 'refquota_006_neg'] + 'refquota_004_pos', 'refquota_005_pos', 'refquota_006_neg', + 'refquota_007_neg', 'refquota_008_neg'] tags = ['functional', 'refquota'] [tests/functional/refreserv] diff --git a/tests/zfs-tests/tests/functional/refquota/Makefile.am b/tests/zfs-tests/tests/functional/refquota/Makefile.am index 5f7c7b68690f..1d8418fbbec5 100644 --- a/tests/zfs-tests/tests/functional/refquota/Makefile.am +++ b/tests/zfs-tests/tests/functional/refquota/Makefile.am @@ -7,4 +7,6 @@ dist_pkgdata_SCRIPTS = \ refquota_003_pos.ksh \ refquota_004_pos.ksh \ refquota_005_pos.ksh \ - refquota_006_neg.ksh + refquota_006_neg.ksh \ + refquota_007_neg.ksh \ + refquota_008_neg.ksh diff --git a/tests/zfs-tests/tests/functional/refquota/refquota_007_neg.ksh b/tests/zfs-tests/tests/functional/refquota/refquota_007_neg.ksh new file mode 100755 index 000000000000..e2141c7d7f3b --- /dev/null +++ b/tests/zfs-tests/tests/functional/refquota/refquota_007_neg.ksh @@ -0,0 +1,61 @@ +#!/bin/ksh +# +# CDDL HEADER START +# +# This file and its contents are supplied under the terms of the +# Common Development and Distribution License ("CDDL"), version 1.0. +# You may only use this file in accordance with the terms of version +# 1.0 of the CDDL. + +# A full copy of the text of the CDDL should have accompanied this +# source. A copy of the CDDL is also available via the Internet at +# http://www.illumos.org/license/CDDL. +# +# CDDL HEADER END +# + +# +# Copyright (c) 2013 by Delphix. All rights reserved. +# + +. $STF_SUITE/include/libtest.shlib + +# +# DESCRIPTION: +# refquota limits the amount of space a dataset can consume, +# snapshot rollback should be limited by refquota. +# +# STRATEGY: +# 1. Create a file in a filesystem +# 2. Create a snapshot of the filesystem +# 3. Remove the file +# 4. Set a refquota of size half of the file +# 5. Rollback the filesystem from the snapshot +# 6. Rollback should fail +# + +verify_runnable "both" + +function cleanup +{ + log_must $ZFS destroy -rf $TESTPOOL/$TESTFS + log_must $ZFS create $TESTPOOL/$TESTFS + log_must $ZFS set mountpoint=$TESTDIR $TESTPOOL/$TESTFS +} + +log_onexit cleanup + +TESTFILE='testfile' +FS=$TESTPOOL/$TESTFS + +mntpnt=$(get_prop mountpoint $FS) +log_must mkfile 20M $mntpnt/$TESTFILE +log_must zfs snapshot $FS@snap20M +log_must rm $mntpnt/$TESTFILE + +log_must sync + +log_must zfs set refquota=10M $FS +log_mustnot zfs rollback $FS@snap20M + +log_pass "The rollback to the snapshot was restricted by refquota." diff --git a/tests/zfs-tests/tests/functional/refquota/refquota_008_neg.ksh b/tests/zfs-tests/tests/functional/refquota/refquota_008_neg.ksh new file mode 100755 index 000000000000..e7f40ec71767 --- /dev/null +++ b/tests/zfs-tests/tests/functional/refquota/refquota_008_neg.ksh @@ -0,0 +1,71 @@ +#!/bin/ksh +# +# CDDL HEADER START +# +# This file and its contents are supplied under the terms of the +# Common Development and Distribution License ("CDDL"), version 1.0. +# You may only use this file in accordance with the terms of version +# 1.0 of the CDDL. + +# A full copy of the text of the CDDL should have accompanied this +# source. A copy of the CDDL is also available via the Internet at +# http://www.illumos.org/license/CDDL. +# +# CDDL HEADER END +# + +# +# Copyright (c) 2013 by Delphix. All rights reserved. +# + +. $STF_SUITE/include/libtest.shlib + +# +# DESCRIPTION: +# refquota limits the amount of space a dataset can consume, +# This test verifies that zfs receive does not override +# refquota. +# +# STRATEGY: +# 1. Create a sub-filesystem $TESTSUBFS1 +# 2. Create a file in the sub-filesystem $TESTSUBFS1 +# 3. Create a snapshot of the sub-filesystem $TESTSUBFS1 +# 4. Create another sub-filesystem $TESTSUBFS2 +# 5. Apply a refquota value to $TESTSUBFS2, +# half the sub-filesystem $TESTSUBFS1 file size +# 6. Verify that zfs receive of the snapshot of $TESTSUBFS1 +# fails due to refquota +# + +verify_runnable "both" + +oldvalue=$(get_tunable spa_asize_inflation) +function cleanup +{ + set_tunable32 spa_asize_inflation $oldvalue + log_must zfs destroy -rf $TESTPOOL/$TESTFS + log_must zfs create $TESTPOOL/$TESTFS + log_must zfs set mountpoint=$TESTDIR $TESTPOOL/$TESTFS +} + +log_onexit cleanup + +set_tunable32 spa_asize_inflation 2 + +TESTFILE='testfile' +FS=$TESTPOOL/$TESTFS +log_must zfs create $FS/$TESTSUBFS1 +log_must zfs create $FS/$TESTSUBFS2 + +mntpnt1=$(get_prop mountpoint $FS/$TESTSUBFS1) +mntpnt2=$(get_prop mountpoint $FS/$TESTSUBFS2) + +log_must mkfile 200M $mntpnt1/$TESTFILE +log_must zfs snapshot $FS/$TESTSUBFS1@snap200m + +log_must zfs set refquota=10M $FS/$TESTSUBFS2 +log_mustnot eval "zfs send $FS/$TESTSUBFS1@snap200m |" \ + "zfs receive -F $FS/$TESTSUBFS2" + +log_pass "ZFS receive does not override refquota" + From f6fbe25664629d1ae6a3b186f14ec69dbe6c6232 Mon Sep 17 00:00:00 2001 From: colmbuckley Date: Mon, 19 Aug 2019 23:11:47 +0100 Subject: [PATCH 08/68] Set "none" scheduler if available (initramfs) Existing zfs initramfs script logic will attempt to set the 'noop' scheduler if it's available on the vdev block devices. Newer kernels have the similar 'none' scheduler on multiqueue devices; this change alters the initramfs script logic to also attempt to set this scheduler if it's available. Reviewed-by: Brian Behlendorf Reviewed-by: Garrett Fields Reviewed-by: Richard Laager Signed-off-by: Colm Buckley Closes #9042 --- contrib/initramfs/scripts/zfs.in | 19 +++++++++++++------ 1 file changed, 13 insertions(+), 6 deletions(-) diff --git a/contrib/initramfs/scripts/zfs.in b/contrib/initramfs/scripts/zfs.in index ad604a82ce52..9d11e1926afd 100644 --- a/contrib/initramfs/scripts/zfs.in +++ b/contrib/initramfs/scripts/zfs.in @@ -884,20 +884,27 @@ mountroot() ZFS_RPOOL="${pool}" fi - # Set elevator=noop on the root pool's vdevs' disks. ZFS already - # does this for wholedisk vdevs (for all pools), so this is only - # important for partitions. + # Set the no-op scheduler on the disks containing the vdevs of + # the root pool. For single-queue devices, this scheduler is + # "noop", for multi-queue devices, it is "none". + # ZFS already does this for wholedisk vdevs (for all pools), so this + # is only important for partitions. "${ZPOOL}" status -L "${ZFS_RPOOL}" 2> /dev/null | awk '/^\t / && !/(mirror|raidz)/ { dev=$1; sub(/[0-9]+$/, "", dev); print dev }' | - while read i + while read -r i do - if grep -sq noop /sys/block/$i/queue/scheduler + SCHEDULER=/sys/block/$i/queue/scheduler + if [ -e "${SCHEDULER}" ] then - echo noop > "/sys/block/$i/queue/scheduler" + # Query to see what schedulers are available + case "$(cat "${SCHEDULER}")" in + *noop*) echo noop > "${SCHEDULER}" ;; + *none*) echo none > "${SCHEDULER}" ;; + esac fi done From ff4b68eedc307e6e9b7f3890b809cbb0e9d73856 Mon Sep 17 00:00:00 2001 From: Dominic Pearson Date: Tue, 20 Aug 2019 00:22:52 +0200 Subject: [PATCH 09/68] Linux 5.3 compat: Makefile subdir-m no longer supported Uses obj-m instead, due to kernel changes. See LKML: Masahiro Yamada, Tue, 6 Aug 2019 19:03:23 +0900 Reviewed-by: Brian Behlendorf Reviewed-by: Tony Hutter Signed-off-by: Dominic Pearson Closes #9169 --- .gitignore | 11 +++++++++++ module/Makefile.in | 24 ++++++++++++------------ 2 files changed, 23 insertions(+), 12 deletions(-) diff --git a/.gitignore b/.gitignore index 549fa59f3822..ae9e22dfa7bb 100644 --- a/.gitignore +++ b/.gitignore @@ -63,3 +63,14 @@ cscope.* *.log venv +# +# Module leftovers +# +/module/avl/zavl.mod +/module/icp/icp.mod +/module/lua/zlua.mod +/module/nvpair/znvpair.mod +/module/spl/spl.mod +/module/unicode/zunicode.mod +/module/zcommon/zcommon.mod +/module/zfs/zfs.mod diff --git a/module/Makefile.in b/module/Makefile.in index eca7691aedbb..7477dbe56509 100644 --- a/module/Makefile.in +++ b/module/Makefile.in @@ -1,11 +1,11 @@ -subdir-m += avl -subdir-m += icp -subdir-m += lua -subdir-m += nvpair -subdir-m += spl -subdir-m += unicode -subdir-m += zcommon -subdir-m += zfs +obj-m += avl/ +obj-m += icp/ +obj-m += lua/ +obj-m += nvpair/ +obj-m += spl/ +obj-m += unicode/ +obj-m += zcommon/ +obj-m += zfs/ INSTALL_MOD_DIR ?= extra @@ -60,13 +60,13 @@ modules_install: modules_uninstall: @# Uninstall the kernel modules kmoddir=$(DESTDIR)$(INSTALL_MOD_PATH)/lib/modules/@LINUX_VERSION@ - list='$(subdir-m)'; for subdir in $$list; do \ - $(RM) -R $$kmoddir/$(INSTALL_MOD_DIR)/$$subdir; \ + list='$(obj-m)'; for objdir in $$list; do \ + $(RM) -R $$kmoddir/$(INSTALL_MOD_DIR)/$$objdir; \ done distdir: - list='$(subdir-m)'; for subdir in $$list; do \ - (cd @top_srcdir@/module && find $$subdir \ + list='$(obj-m)'; for objdir in $$list; do \ + (cd @top_srcdir@/module && find $$objdir \ -name '*.c' -o -name '*.h' -o -name '*.S' | \ xargs cp --parents -t @abs_top_builddir@/module/$$distdir); \ done From 3beb0a7694df5d1d4314179147aaa1d40b63fe76 Mon Sep 17 00:00:00 2001 From: jdike <52420226+jdike@users.noreply.github.com> Date: Mon, 19 Aug 2019 19:04:26 -0400 Subject: [PATCH 10/68] Fix lockdep circular locking false positive involving sa_lock There are two different deadlock scenarios, but they share a common link, which is thread 1 holding sa_lock and trying to get zap->zap_rwlock: zap_lockdir_impl+0x858/0x16c0 [zfs] zap_lockdir+0xd2/0x100 [zfs] zap_lookup_norm+0x7f/0x100 [zfs] zap_lookup+0x12/0x20 [zfs] sa_setup+0x902/0x1380 [zfs] zfsvfs_init+0x3d6/0xb20 [zfs] zfsvfs_create+0x5dd/0x900 [zfs] zfs_domount+0xa3/0xe20 [zfs] and thread 2 trying to get sa_lock, either in sa_setup: sa_setup+0x742/0x1380 [zfs] zfsvfs_init+0x3d6/0xb20 [zfs] zfsvfs_create+0x5dd/0x900 [zfs] zfs_domount+0xa3/0xe20 [zfs] or in sa_build_index: sa_build_index+0x13d/0x790 [zfs] sa_handle_get_from_db+0x368/0x500 [zfs] zfs_znode_sa_init.isra.0+0x24b/0x330 [zfs] zfs_znode_alloc+0x3da/0x1a40 [zfs] zfs_zget+0x39a/0x6e0 [zfs] zfs_root+0x101/0x160 [zfs] zfs_domount+0x91f/0xea0 [zfs] From there, there are different locking paths back to something holding zap->zap_rwlock. The deadlock scenarios involve multiple different ZFS filesystems being mounted. sa_lock is common to these scenarios, and the sa struct involved is private to a mount. Therefore, these must be referring to different sa_lock instances and these deadlocks can't occur in practice. The fix, from Brian Behlendorf, is to remove sa_lock from lockdep coverage by initializing it with MUTEX_NOLOCKDEP. Reviewed-by: Brian Behlendorf Signed-off-by: Jeff Dike Closes #9110 --- module/zfs/sa.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/module/zfs/sa.c b/module/zfs/sa.c index 4999fef345dc..f718e7662e6e 100644 --- a/module/zfs/sa.c +++ b/module/zfs/sa.c @@ -1014,7 +1014,7 @@ sa_setup(objset_t *os, uint64_t sa_obj, sa_attr_reg_t *reg_attrs, int count, } sa = kmem_zalloc(sizeof (sa_os_t), KM_SLEEP); - mutex_init(&sa->sa_lock, NULL, MUTEX_DEFAULT, NULL); + mutex_init(&sa->sa_lock, NULL, MUTEX_NOLOCKDEP, NULL); sa->sa_master_obj = sa_obj; os->os_sa = sa; From 325d288c5d536227010ff4dfcf66df89f123d166 Mon Sep 17 00:00:00 2001 From: Matthew Ahrens Date: Tue, 20 Aug 2019 11:34:52 -0700 Subject: [PATCH 11/68] Add fast path for zfs_ioc_space_snaps() handling of empty_bpobj When there are many snapshots, calls to zfs_ioc_space_snaps() (e.g. from `zfs destroy -nv pool/fs@snap1%snap10000`) can be very slow, resulting in poor performance because we are holding the dp_config_rwlock the entire time, blocking spa_sync() from continuing. With around ten thousand snapshots, we've seen up to 500 seconds in this ioctl, iterating over up to 50,000,000 bpobjs, ~99% of which are the empty bpobj. By creating a fast path for zfs_ioc_space_snaps() handling of the empty_bpobj, we can achieve a ~5x performance improvement of this ioctl (when there are many snapshots, and the deadlist is mostly empty_bpobj's). Reviewed-by: Pavel Zakharov Reviewed-by: Brian Behlendorf Reviewed-by: Paul Dagnelie Signed-off-by: Matthew Ahrens External-issue: DLPX-58348 Closes #8744 --- include/sys/dsl_deadlist.h | 14 ++- module/zfs/dsl_deadlist.c | 195 ++++++++++++++++++++++++++++++------- module/zfs/dsl_destroy.c | 7 ++ 3 files changed, 181 insertions(+), 35 deletions(-) diff --git a/include/sys/dsl_deadlist.h b/include/sys/dsl_deadlist.h index bb8248a667b1..64358bb5fc0b 100644 --- a/include/sys/dsl_deadlist.h +++ b/include/sys/dsl_deadlist.h @@ -48,8 +48,10 @@ typedef struct dsl_deadlist_phys { typedef struct dsl_deadlist { objset_t *dl_os; uint64_t dl_object; - avl_tree_t dl_tree; + avl_tree_t dl_tree; /* contains dsl_deadlist_entry_t */ + avl_tree_t dl_cache; /* contains dsl_deadlist_cache_entry_t */ boolean_t dl_havetree; + boolean_t dl_havecache; struct dmu_buf *dl_dbuf; dsl_deadlist_phys_t *dl_phys; kmutex_t dl_lock; @@ -59,6 +61,15 @@ typedef struct dsl_deadlist { boolean_t dl_oldfmt; } dsl_deadlist_t; +typedef struct dsl_deadlist_cache_entry { + avl_node_t dlce_node; + uint64_t dlce_mintxg; + uint64_t dlce_bpobj; + uint64_t dlce_bytes; + uint64_t dlce_comp; + uint64_t dlce_uncomp; +} dsl_deadlist_cache_entry_t; + typedef struct dsl_deadlist_entry { avl_node_t dle_node; uint64_t dle_mintxg; @@ -108,6 +119,7 @@ int dsl_process_sub_livelist(bpobj_t *bpobj, struct bplist *to_free, zthr_t *t, uint64_t *size); void dsl_deadlist_clear_entry(dsl_deadlist_entry_t *dle, dsl_deadlist_t *dl, dmu_tx_t *tx); +void dsl_deadlist_discard_tree(dsl_deadlist_t *dl); #ifdef __cplusplus } diff --git a/module/zfs/dsl_deadlist.c b/module/zfs/dsl_deadlist.c index 25878f0ea42c..15a59315c27d 100644 --- a/module/zfs/dsl_deadlist.c +++ b/module/zfs/dsl_deadlist.c @@ -112,16 +112,24 @@ unsigned long zfs_livelist_max_entries = 500000; */ int zfs_livelist_min_percent_shared = 75; - static int dsl_deadlist_compare(const void *arg1, const void *arg2) { - const dsl_deadlist_entry_t *dle1 = (const dsl_deadlist_entry_t *)arg1; - const dsl_deadlist_entry_t *dle2 = (const dsl_deadlist_entry_t *)arg2; + const dsl_deadlist_entry_t *dle1 = arg1; + const dsl_deadlist_entry_t *dle2 = arg2; return (AVL_CMP(dle1->dle_mintxg, dle2->dle_mintxg)); } +static int +dsl_deadlist_cache_compare(const void *arg1, const void *arg2) +{ + const dsl_deadlist_cache_entry_t *dlce1 = arg1; + const dsl_deadlist_cache_entry_t *dlce2 = arg2; + + return (AVL_CMP(dlce1->dlce_mintxg, dlce2->dlce_mintxg)); +} + static void dsl_deadlist_load_tree(dsl_deadlist_t *dl) { @@ -131,6 +139,23 @@ dsl_deadlist_load_tree(dsl_deadlist_t *dl) ASSERT(MUTEX_HELD(&dl->dl_lock)); ASSERT(!dl->dl_oldfmt); + if (dl->dl_havecache) { + /* + * After loading the tree, the caller may modify the tree, + * e.g. to add or remove nodes, or to make a node no longer + * refer to the empty_bpobj. These changes would make the + * dl_cache incorrect. Therefore we discard the cache here, + * so that it can't become incorrect. + */ + dsl_deadlist_cache_entry_t *dlce; + void *cookie = NULL; + while ((dlce = avl_destroy_nodes(&dl->dl_cache, &cookie)) + != NULL) { + kmem_free(dlce, sizeof (*dlce)); + } + avl_destroy(&dl->dl_cache); + dl->dl_havecache = B_FALSE; + } if (dl->dl_havetree) return; @@ -142,14 +167,114 @@ dsl_deadlist_load_tree(dsl_deadlist_t *dl) zap_cursor_advance(&zc)) { dsl_deadlist_entry_t *dle = kmem_alloc(sizeof (*dle), KM_SLEEP); dle->dle_mintxg = zfs_strtonum(za.za_name, NULL); - VERIFY0(bpobj_open(&dle->dle_bpobj, dl->dl_os, - za.za_first_integer)); + + /* + * Prefetch all the bpobj's so that we do that i/o + * in parallel. Then open them all in a second pass. + */ + dle->dle_bpobj.bpo_object = za.za_first_integer; + dmu_prefetch(dl->dl_os, dle->dle_bpobj.bpo_object, + 0, 0, 0, ZIO_PRIORITY_SYNC_READ); + avl_add(&dl->dl_tree, dle); } zap_cursor_fini(&zc); + + for (dsl_deadlist_entry_t *dle = avl_first(&dl->dl_tree); + dle != NULL; dle = AVL_NEXT(&dl->dl_tree, dle)) { + VERIFY0(bpobj_open(&dle->dle_bpobj, dl->dl_os, + dle->dle_bpobj.bpo_object)); + } dl->dl_havetree = B_TRUE; } +/* + * Load only the non-empty bpobj's into the dl_cache. The cache is an analog + * of the dl_tree, but contains only non-empty_bpobj nodes from the ZAP. It + * is used only for gathering space statistics. The dl_cache has two + * advantages over the dl_tree: + * + * 1. Loading the dl_cache is ~5x faster than loading the dl_tree (if it's + * mostly empty_bpobj's), due to less CPU overhead to open the empty_bpobj + * many times and to inquire about its (zero) space stats many times. + * + * 2. The dl_cache uses less memory than the dl_tree. We only need to load + * the dl_tree of snapshots when deleting a snapshot, after which we free the + * dl_tree with dsl_deadlist_discard_tree + */ +static void +dsl_deadlist_load_cache(dsl_deadlist_t *dl) +{ + zap_cursor_t zc; + zap_attribute_t za; + + ASSERT(MUTEX_HELD(&dl->dl_lock)); + + ASSERT(!dl->dl_oldfmt); + if (dl->dl_havecache) + return; + + uint64_t empty_bpobj = dmu_objset_pool(dl->dl_os)->dp_empty_bpobj; + + avl_create(&dl->dl_cache, dsl_deadlist_cache_compare, + sizeof (dsl_deadlist_cache_entry_t), + offsetof(dsl_deadlist_cache_entry_t, dlce_node)); + for (zap_cursor_init(&zc, dl->dl_os, dl->dl_object); + zap_cursor_retrieve(&zc, &za) == 0; + zap_cursor_advance(&zc)) { + if (za.za_first_integer == empty_bpobj) + continue; + dsl_deadlist_cache_entry_t *dlce = + kmem_zalloc(sizeof (*dlce), KM_SLEEP); + dlce->dlce_mintxg = zfs_strtonum(za.za_name, NULL); + + /* + * Prefetch all the bpobj's so that we do that i/o + * in parallel. Then open them all in a second pass. + */ + dlce->dlce_bpobj = za.za_first_integer; + dmu_prefetch(dl->dl_os, dlce->dlce_bpobj, + 0, 0, 0, ZIO_PRIORITY_SYNC_READ); + avl_add(&dl->dl_cache, dlce); + } + zap_cursor_fini(&zc); + + for (dsl_deadlist_cache_entry_t *dlce = avl_first(&dl->dl_cache); + dlce != NULL; dlce = AVL_NEXT(&dl->dl_cache, dlce)) { + bpobj_t bpo; + VERIFY0(bpobj_open(&bpo, dl->dl_os, dlce->dlce_bpobj)); + + VERIFY0(bpobj_space(&bpo, + &dlce->dlce_bytes, &dlce->dlce_comp, &dlce->dlce_uncomp)); + bpobj_close(&bpo); + } + dl->dl_havecache = B_TRUE; +} + +/* + * Discard the tree to save memory. + */ +void +dsl_deadlist_discard_tree(dsl_deadlist_t *dl) +{ + mutex_enter(&dl->dl_lock); + + if (!dl->dl_havetree) { + mutex_exit(&dl->dl_lock); + return; + } + dsl_deadlist_entry_t *dle; + void *cookie = NULL; + while ((dle = avl_destroy_nodes(&dl->dl_tree, &cookie)) != NULL) { + bpobj_close(&dle->dle_bpobj); + kmem_free(dle, sizeof (*dle)); + } + avl_destroy(&dl->dl_tree); + + dl->dl_havetree = B_FALSE; + mutex_exit(&dl->dl_lock); +} + void dsl_deadlist_iterate(dsl_deadlist_t *dl, deadlist_iter_t func, void *args) { @@ -190,6 +315,7 @@ dsl_deadlist_open(dsl_deadlist_t *dl, objset_t *os, uint64_t object) dl->dl_oldfmt = B_FALSE; dl->dl_phys = dl->dl_dbuf->db_data; dl->dl_havetree = B_FALSE; + dl->dl_havecache = B_FALSE; } boolean_t @@ -201,9 +327,6 @@ dsl_deadlist_is_open(dsl_deadlist_t *dl) void dsl_deadlist_close(dsl_deadlist_t *dl) { - void *cookie = NULL; - dsl_deadlist_entry_t *dle; - ASSERT(dsl_deadlist_is_open(dl)); mutex_destroy(&dl->dl_lock); @@ -216,6 +339,8 @@ dsl_deadlist_close(dsl_deadlist_t *dl) } if (dl->dl_havetree) { + dsl_deadlist_entry_t *dle; + void *cookie = NULL; while ((dle = avl_destroy_nodes(&dl->dl_tree, &cookie)) != NULL) { bpobj_close(&dle->dle_bpobj); @@ -223,6 +348,15 @@ dsl_deadlist_close(dsl_deadlist_t *dl) } avl_destroy(&dl->dl_tree); } + if (dl->dl_havecache) { + dsl_deadlist_cache_entry_t *dlce; + void *cookie = NULL; + while ((dlce = avl_destroy_nodes(&dl->dl_cache, &cookie)) + != NULL) { + kmem_free(dlce, sizeof (*dlce)); + } + avl_destroy(&dl->dl_cache); + } dmu_buf_rele(dl->dl_dbuf, dl); dl->dl_dbuf = NULL; dl->dl_phys = NULL; @@ -440,6 +574,7 @@ dsl_deadlist_remove_entry(dsl_deadlist_t *dl, uint64_t mintxg, dmu_tx_t *tx) avl_remove(&dl->dl_tree, dle); VERIFY0(zap_remove_int(os, dl->dl_object, mintxg, tx)); VERIFY0(bpobj_space(&dle->dle_bpobj, &used, &comp, &uncomp)); + dmu_buf_will_dirty(dl->dl_dbuf, tx); dl->dl_phys->dl_used -= used; dl->dl_phys->dl_comp -= comp; dl->dl_phys->dl_uncomp -= uncomp; @@ -468,6 +603,7 @@ dsl_deadlist_clear_entry(dsl_deadlist_entry_t *dle, dsl_deadlist_t *dl, mutex_enter(&dl->dl_lock); VERIFY0(zap_remove_int(os, dl->dl_object, dle->dle_mintxg, tx)); VERIFY0(bpobj_space(&dle->dle_bpobj, &used, &comp, &uncomp)); + dmu_buf_will_dirty(dl->dl_dbuf, tx); dl->dl_phys->dl_used -= used; dl->dl_phys->dl_comp -= comp; dl->dl_phys->dl_uncomp -= uncomp; @@ -603,8 +739,8 @@ void dsl_deadlist_space_range(dsl_deadlist_t *dl, uint64_t mintxg, uint64_t maxtxg, uint64_t *usedp, uint64_t *compp, uint64_t *uncompp) { - dsl_deadlist_entry_t *dle; - dsl_deadlist_entry_t dle_tofind; + dsl_deadlist_cache_entry_t *dlce; + dsl_deadlist_cache_entry_t dlce_tofind; avl_index_t where; if (dl->dl_oldfmt) { @@ -616,34 +752,25 @@ dsl_deadlist_space_range(dsl_deadlist_t *dl, uint64_t mintxg, uint64_t maxtxg, *usedp = *compp = *uncompp = 0; mutex_enter(&dl->dl_lock); - dsl_deadlist_load_tree(dl); - dle_tofind.dle_mintxg = mintxg; - dle = avl_find(&dl->dl_tree, &dle_tofind, &where); + dsl_deadlist_load_cache(dl); + dlce_tofind.dlce_mintxg = mintxg; + dlce = avl_find(&dl->dl_cache, &dlce_tofind, &where); + /* - * If we don't find this mintxg, there shouldn't be anything - * after it either. + * If this mintxg doesn't exist, it may be an empty_bpobj which + * is omitted from the sparse tree. Start at the next non-empty + * entry. */ - ASSERT(dle != NULL || - avl_nearest(&dl->dl_tree, where, AVL_AFTER) == NULL); - - for (; dle && dle->dle_mintxg < maxtxg; - dle = AVL_NEXT(&dl->dl_tree, dle)) { - uint64_t used, comp, uncomp; - - VERIFY0(bpobj_space(&dle->dle_bpobj, - &used, &comp, &uncomp)); - - *usedp += used; - *compp += comp; - *uncompp += uncomp; + if (dlce == NULL) + dlce = avl_nearest(&dl->dl_cache, where, AVL_AFTER); + + for (; dlce && dlce->dlce_mintxg < maxtxg; + dlce = AVL_NEXT(&dl->dl_tree, dlce)) { + *usedp += dlce->dlce_bytes; + *compp += dlce->dlce_comp; + *uncompp += dlce->dlce_uncomp; } - /* - * This assertion ensures that the maxtxg is a key in the deadlist - * (unless it's UINT64_MAX). - */ - ASSERT(maxtxg == UINT64_MAX || - (dle != NULL && dle->dle_mintxg == maxtxg)); mutex_exit(&dl->dl_lock); } diff --git a/module/zfs/dsl_destroy.c b/module/zfs/dsl_destroy.c index 5c483c5dd961..788753bdccdb 100644 --- a/module/zfs/dsl_destroy.c +++ b/module/zfs/dsl_destroy.c @@ -413,6 +413,13 @@ dsl_destroy_snapshot_sync_impl(dsl_dataset_t *ds, boolean_t defer, dmu_tx_t *tx) /* Merge our deadlist into next's and free it. */ dsl_deadlist_merge(&ds_next->ds_deadlist, dsl_dataset_phys(ds)->ds_deadlist_obj, tx); + + /* + * We are done with the deadlist tree (generated/used + * by dsl_deadlist_move_bpobj() and dsl_deadlist_merge()). + * Discard it to save memory. + */ + dsl_deadlist_discard_tree(&ds_next->ds_deadlist); } dsl_deadlist_close(&ds->ds_deadlist); From 92a9e1da60f760380220624db2681b72ffff9a0b Mon Sep 17 00:00:00 2001 From: Ryan Moeller Date: Tue, 20 Aug 2019 17:45:26 -0400 Subject: [PATCH 12/68] Fix automake program name transformations Automake can perform program name transformations at install time. However, arc_summary has its own name transformation taking place, which interferes with the automake transforms. The automake transforms must be taken into account in order to resolve the conflict. Signed-off-by: Ryan Moeller --- cmd/arc_summary/Makefile.am | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/cmd/arc_summary/Makefile.am b/cmd/arc_summary/Makefile.am index 7d83624d66d3..e9dbb779a37e 100644 --- a/cmd/arc_summary/Makefile.am +++ b/cmd/arc_summary/Makefile.am @@ -1,11 +1,17 @@ EXTRA_DIST = arc_summary2 arc_summary3 +transform = $(program_transform_name) + if USING_PYTHON_2 dist_bin_SCRIPTS = arc_summary2 install-exec-hook: - mv $(DESTDIR)$(bindir)/arc_summary2 $(DESTDIR)$(bindir)/arc_summary + before=$$(echo arc_summary2 | sed '$(transform)'); \ + after=$$(echo arc_summary | sed '$(transform)'); \ + mv "$(DESTDIR)$(bindir)/$$before" "$(DESTDIR)$(bindir)/$$after" else dist_bin_SCRIPTS = arc_summary3 install-exec-hook: - mv $(DESTDIR)$(bindir)/arc_summary3 $(DESTDIR)$(bindir)/arc_summary + before=$$(echo arc_summary3 | sed '$(transform)'); \ + after=$$(echo arc_summary | sed '$(transform)'); \ + mv "$(DESTDIR)$(bindir)/$$before" "$(DESTDIR)$(bindir)/$$after" endif From c759b33a519ca5f0d5bce51501bd4230134d4b25 Mon Sep 17 00:00:00 2001 From: Alexey Smirnoff Date: Tue, 20 Aug 2019 23:26:19 +0000 Subject: [PATCH 13/68] zfs-functions.in: in_mtab() always returns 1 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit $fs used with the wrong sed command where should be $mntpnt instead to match a variable exported by read_mtab() The fix is mostly to reuse the sed command found in read_mtab() Reviewed-by: Brian Behlendorf Reviewed-by: Michael Niewöhner Signed-off-by: Alexey Smirnoff Closes #9168 --- etc/init.d/zfs-functions.in | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/etc/init.d/zfs-functions.in b/etc/init.d/zfs-functions.in index 490503e91391..14667b4e9fd3 100644 --- a/etc/init.d/zfs-functions.in +++ b/etc/init.d/zfs-functions.in @@ -373,10 +373,13 @@ read_mtab() in_mtab() { - local fs="$(echo "$1" | sed 's,/,_,g')" + local mntpnt="$1" + # Remove 'unwanted' characters. + mntpnt=$(printf '%b\n' "$mntpnt" | sed -e 's,/,,g' \ + -e 's,-,,g' -e 's,\.,,g' -e 's, ,,g') local var - var="$(eval echo MTAB_$fs)" + var="$(eval echo MTAB_$mntpnt)" [ "$(eval echo "$""$var")" != "" ] return "$?" } From f66a1f88fb31716030c97f71df13a7ecef365a79 Mon Sep 17 00:00:00 2001 From: Ryan Moeller Date: Wed, 21 Aug 2019 12:01:59 -0400 Subject: [PATCH 14/68] Minor cleanup in Makefile.am Split long lines where adding license info to dist archive. Remove extra colon from target line. Reviewed-by: Chris Dunlop Reviewed-by: Brian Behlendorf Signed-off-by: Ryan Moeller Closes #9189 --- Makefile.am | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/Makefile.am b/Makefile.am index 9afe22954101..da4f6407d18b 100644 --- a/Makefile.am +++ b/Makefile.am @@ -25,11 +25,16 @@ EXTRA_DIST += META AUTHORS COPYRIGHT LICENSE NEWS NOTICE README.md EXTRA_DIST += CODE_OF_CONDUCT.md # Include all the extra licensing information for modules -EXTRA_DIST += module/icp/algs/skein/THIRDPARTYLICENSE module/icp/algs/skein/THIRDPARTYLICENSE.descrip -EXTRA_DIST += module/icp/asm-x86_64/aes/THIRDPARTYLICENSE.gladman module/icp/asm-x86_64/aes/THIRDPARTYLICENSE.gladman.descrip -EXTRA_DIST += module/icp/asm-x86_64/aes/THIRDPARTYLICENSE.openssl module/icp/asm-x86_64/aes/THIRDPARTYLICENSE.openssl.descrip -EXTRA_DIST += module/spl/THIRDPARTYLICENSE.gplv2 module/spl/THIRDPARTYLICENSE.gplv2.descrip -EXTRA_DIST += module/zfs/THIRDPARTYLICENSE.cityhash module/zfs/THIRDPARTYLICENSE.cityhash.descrip +EXTRA_DIST += module/icp/algs/skein/THIRDPARTYLICENSE +EXTRA_DIST += module/icp/algs/skein/THIRDPARTYLICENSE.descrip +EXTRA_DIST += module/icp/asm-x86_64/aes/THIRDPARTYLICENSE.gladman +EXTRA_DIST += module/icp/asm-x86_64/aes/THIRDPARTYLICENSE.gladman.descrip +EXTRA_DIST += module/icp/asm-x86_64/aes/THIRDPARTYLICENSE.openssl +EXTRA_DIST += module/icp/asm-x86_64/aes/THIRDPARTYLICENSE.openssl.descrip +EXTRA_DIST += module/spl/THIRDPARTYLICENSE.gplv2 +EXTRA_DIST += module/spl/THIRDPARTYLICENSE.gplv2.descrip +EXTRA_DIST += module/zfs/THIRDPARTYLICENSE.cityhash +EXTRA_DIST += module/zfs/THIRDPARTYLICENSE.cityhash.descrip @CODE_COVERAGE_RULES@ @@ -39,7 +44,7 @@ gitrev: BUILT_SOURCES = gitrev -distclean-local:: +distclean-local: -$(RM) -R autom4te*.cache -find . \( -name SCCS -o -name BitKeeper -o -name .svn -o -name CVS \ -o -name .pc -o -name .hg -o -name .git \) -prune -o \ From a9ebdfdd43204b8f907c5395cd68b2d745544730 Mon Sep 17 00:00:00 2001 From: Tony Hutter Date: Wed, 21 Aug 2019 09:29:23 -0700 Subject: [PATCH 15/68] Linux 5.3: Fix switch() fall though compiler errors Fix some switch() fall-though compiler errors: abd.c:1504:9: error: this statement may fall through Reviewed-by: Brian Behlendorf Signed-off-by: Tony Hutter Closes #9170 --- module/lua/llex.c | 9 ++++++--- module/zfs/abd.c | 4 ++++ module/zfs/vdev_raidz_math_scalar.c | 1 + 3 files changed, 11 insertions(+), 3 deletions(-) diff --git a/module/lua/llex.c b/module/lua/llex.c index 8760155d0546..50c301f599f1 100644 --- a/module/lua/llex.c +++ b/module/lua/llex.c @@ -431,9 +431,12 @@ static int llex (LexState *ls, SemInfo *seminfo) { if (sep >= 0) { read_long_string(ls, seminfo, sep); return TK_STRING; - } - else if (sep == -1) return '['; - else lexerror(ls, "invalid long string delimiter", TK_STRING); + } else if (sep == -1) { + return '['; + } else { + lexerror(ls, "invalid long string delimiter", TK_STRING); + break; + } } case '=': { next(ls); diff --git a/module/zfs/abd.c b/module/zfs/abd.c index 9f688d9bc2b8..8b2514404a86 100644 --- a/module/zfs/abd.c +++ b/module/zfs/abd.c @@ -1408,8 +1408,10 @@ abd_raidz_gen_iterate(abd_t **cabds, abd_t *dabd, switch (parity) { case 3: len = MIN(caiters[2].iter_mapsize, len); + /* falls through */ case 2: len = MIN(caiters[1].iter_mapsize, len); + /* falls through */ case 1: len = MIN(caiters[0].iter_mapsize, len); } @@ -1499,9 +1501,11 @@ abd_raidz_rec_iterate(abd_t **cabds, abd_t **tabds, case 3: len = MIN(xiters[2].iter_mapsize, len); len = MIN(citers[2].iter_mapsize, len); + /* falls through */ case 2: len = MIN(xiters[1].iter_mapsize, len); len = MIN(citers[1].iter_mapsize, len); + /* falls through */ case 1: len = MIN(xiters[0].iter_mapsize, len); len = MIN(citers[0].iter_mapsize, len); diff --git a/module/zfs/vdev_raidz_math_scalar.c b/module/zfs/vdev_raidz_math_scalar.c index a693bff63ffb..cd742e146ca6 100644 --- a/module/zfs/vdev_raidz_math_scalar.c +++ b/module/zfs/vdev_raidz_math_scalar.c @@ -142,6 +142,7 @@ static const struct { a.b[6] = mul_lt[a.b[6]]; \ a.b[5] = mul_lt[a.b[5]]; \ a.b[4] = mul_lt[a.b[4]]; \ + /* falls through */ \ case 4: \ a.b[3] = mul_lt[a.b[3]]; \ a.b[2] = mul_lt[a.b[2]]; \ From 20f7b917aaabd16b41321e9c4dc9f11996ca3683 Mon Sep 17 00:00:00 2001 From: Brian Behlendorf Date: Thu, 22 Aug 2019 08:53:44 -0700 Subject: [PATCH 16/68] ZTS: Fix vdev_zaps_005_pos on CentOS 6 The ancient version of blkid (v2.17.2) used in CentOS 6 will not detect the newly created pool unless it has been written to. Force a pool sync so `zpool import` will detect the newly created pool. Reviewed-by: John Kennedy Signed-off-by: Brian Behlendorf Closes #9199 --- tests/zfs-tests/tests/functional/vdev_zaps/vdev_zaps_005_pos.ksh | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/zfs-tests/tests/functional/vdev_zaps/vdev_zaps_005_pos.ksh b/tests/zfs-tests/tests/functional/vdev_zaps/vdev_zaps_005_pos.ksh index 8cf8e6d4055b..066be917e436 100755 --- a/tests/zfs-tests/tests/functional/vdev_zaps/vdev_zaps_005_pos.ksh +++ b/tests/zfs-tests/tests/functional/vdev_zaps/vdev_zaps_005_pos.ksh @@ -41,6 +41,7 @@ orig_top=$(get_top_vd_zap $DISK $conf) orig_leaf=$(get_leaf_vd_zap $DISK $conf) assert_zap_common $TESTPOOL $DISK "top" $orig_top assert_zap_common $TESTPOOL $DISK "leaf" $orig_leaf +log_must zpool sync # Export the pool. log_must zpool export $TESTPOOL From f591a581d6b57509057e6ec5ebae31d7f12cc106 Mon Sep 17 00:00:00 2001 From: Ryan Moeller Date: Thu, 22 Aug 2019 12:44:11 -0400 Subject: [PATCH 17/68] Enhance ioctl number checks When checking ZFS_IOC_* numbers, print which numbers are wrong rather than silently failing. Reviewed-by: Chris Dunlop Reviewed-by: Brian Behlendorf Signed-off-by: Ryan Moeller Closes #9187 --- .../libzfs_input_check/libzfs_input_check.c | 186 ++++++++++-------- 1 file changed, 99 insertions(+), 87 deletions(-) diff --git a/tests/zfs-tests/cmd/libzfs_input_check/libzfs_input_check.c b/tests/zfs-tests/cmd/libzfs_input_check/libzfs_input_check.c index ecdabbd148cc..bf57518c7d87 100644 --- a/tests/zfs-tests/cmd/libzfs_input_check/libzfs_input_check.c +++ b/tests/zfs-tests/cmd/libzfs_input_check/libzfs_input_check.c @@ -862,93 +862,105 @@ enum zfs_ioc_ref { boolean_t validate_ioc_values(void) { - return ( - ZFS_IOC_BASE + 0 == ZFS_IOC_POOL_CREATE && - ZFS_IOC_BASE + 1 == ZFS_IOC_POOL_DESTROY && - ZFS_IOC_BASE + 2 == ZFS_IOC_POOL_IMPORT && - ZFS_IOC_BASE + 3 == ZFS_IOC_POOL_EXPORT && - ZFS_IOC_BASE + 4 == ZFS_IOC_POOL_CONFIGS && - ZFS_IOC_BASE + 5 == ZFS_IOC_POOL_STATS && - ZFS_IOC_BASE + 6 == ZFS_IOC_POOL_TRYIMPORT && - ZFS_IOC_BASE + 7 == ZFS_IOC_POOL_SCAN && - ZFS_IOC_BASE + 8 == ZFS_IOC_POOL_FREEZE && - ZFS_IOC_BASE + 9 == ZFS_IOC_POOL_UPGRADE && - ZFS_IOC_BASE + 10 == ZFS_IOC_POOL_GET_HISTORY && - ZFS_IOC_BASE + 11 == ZFS_IOC_VDEV_ADD && - ZFS_IOC_BASE + 12 == ZFS_IOC_VDEV_REMOVE && - ZFS_IOC_BASE + 13 == ZFS_IOC_VDEV_SET_STATE && - ZFS_IOC_BASE + 14 == ZFS_IOC_VDEV_ATTACH && - ZFS_IOC_BASE + 15 == ZFS_IOC_VDEV_DETACH && - ZFS_IOC_BASE + 16 == ZFS_IOC_VDEV_SETPATH && - ZFS_IOC_BASE + 17 == ZFS_IOC_VDEV_SETFRU && - ZFS_IOC_BASE + 18 == ZFS_IOC_OBJSET_STATS && - ZFS_IOC_BASE + 19 == ZFS_IOC_OBJSET_ZPLPROPS && - ZFS_IOC_BASE + 20 == ZFS_IOC_DATASET_LIST_NEXT && - ZFS_IOC_BASE + 21 == ZFS_IOC_SNAPSHOT_LIST_NEXT && - ZFS_IOC_BASE + 22 == ZFS_IOC_SET_PROP && - ZFS_IOC_BASE + 23 == ZFS_IOC_CREATE && - ZFS_IOC_BASE + 24 == ZFS_IOC_DESTROY && - ZFS_IOC_BASE + 25 == ZFS_IOC_ROLLBACK && - ZFS_IOC_BASE + 26 == ZFS_IOC_RENAME && - ZFS_IOC_BASE + 27 == ZFS_IOC_RECV && - ZFS_IOC_BASE + 28 == ZFS_IOC_SEND && - ZFS_IOC_BASE + 29 == ZFS_IOC_INJECT_FAULT && - ZFS_IOC_BASE + 30 == ZFS_IOC_CLEAR_FAULT && - ZFS_IOC_BASE + 31 == ZFS_IOC_INJECT_LIST_NEXT && - ZFS_IOC_BASE + 32 == ZFS_IOC_ERROR_LOG && - ZFS_IOC_BASE + 33 == ZFS_IOC_CLEAR && - ZFS_IOC_BASE + 34 == ZFS_IOC_PROMOTE && - ZFS_IOC_BASE + 35 == ZFS_IOC_SNAPSHOT && - ZFS_IOC_BASE + 36 == ZFS_IOC_DSOBJ_TO_DSNAME && - ZFS_IOC_BASE + 37 == ZFS_IOC_OBJ_TO_PATH && - ZFS_IOC_BASE + 38 == ZFS_IOC_POOL_SET_PROPS && - ZFS_IOC_BASE + 39 == ZFS_IOC_POOL_GET_PROPS && - ZFS_IOC_BASE + 40 == ZFS_IOC_SET_FSACL && - ZFS_IOC_BASE + 41 == ZFS_IOC_GET_FSACL && - ZFS_IOC_BASE + 42 == ZFS_IOC_SHARE && - ZFS_IOC_BASE + 43 == ZFS_IOC_INHERIT_PROP && - ZFS_IOC_BASE + 44 == ZFS_IOC_SMB_ACL && - ZFS_IOC_BASE + 45 == ZFS_IOC_USERSPACE_ONE && - ZFS_IOC_BASE + 46 == ZFS_IOC_USERSPACE_MANY && - ZFS_IOC_BASE + 47 == ZFS_IOC_USERSPACE_UPGRADE && - ZFS_IOC_BASE + 48 == ZFS_IOC_HOLD && - ZFS_IOC_BASE + 49 == ZFS_IOC_RELEASE && - ZFS_IOC_BASE + 50 == ZFS_IOC_GET_HOLDS && - ZFS_IOC_BASE + 51 == ZFS_IOC_OBJSET_RECVD_PROPS && - ZFS_IOC_BASE + 52 == ZFS_IOC_VDEV_SPLIT && - ZFS_IOC_BASE + 53 == ZFS_IOC_NEXT_OBJ && - ZFS_IOC_BASE + 54 == ZFS_IOC_DIFF && - ZFS_IOC_BASE + 55 == ZFS_IOC_TMP_SNAPSHOT && - ZFS_IOC_BASE + 56 == ZFS_IOC_OBJ_TO_STATS && - ZFS_IOC_BASE + 57 == ZFS_IOC_SPACE_WRITTEN && - ZFS_IOC_BASE + 58 == ZFS_IOC_SPACE_SNAPS && - ZFS_IOC_BASE + 59 == ZFS_IOC_DESTROY_SNAPS && - ZFS_IOC_BASE + 60 == ZFS_IOC_POOL_REGUID && - ZFS_IOC_BASE + 61 == ZFS_IOC_POOL_REOPEN && - ZFS_IOC_BASE + 62 == ZFS_IOC_SEND_PROGRESS && - ZFS_IOC_BASE + 63 == ZFS_IOC_LOG_HISTORY && - ZFS_IOC_BASE + 64 == ZFS_IOC_SEND_NEW && - ZFS_IOC_BASE + 65 == ZFS_IOC_SEND_SPACE && - ZFS_IOC_BASE + 66 == ZFS_IOC_CLONE && - ZFS_IOC_BASE + 67 == ZFS_IOC_BOOKMARK && - ZFS_IOC_BASE + 68 == ZFS_IOC_GET_BOOKMARKS && - ZFS_IOC_BASE + 69 == ZFS_IOC_DESTROY_BOOKMARKS && - ZFS_IOC_BASE + 70 == ZFS_IOC_RECV_NEW && - ZFS_IOC_BASE + 71 == ZFS_IOC_POOL_SYNC && - ZFS_IOC_BASE + 72 == ZFS_IOC_CHANNEL_PROGRAM && - ZFS_IOC_BASE + 73 == ZFS_IOC_LOAD_KEY && - ZFS_IOC_BASE + 74 == ZFS_IOC_UNLOAD_KEY && - ZFS_IOC_BASE + 75 == ZFS_IOC_CHANGE_KEY && - ZFS_IOC_BASE + 76 == ZFS_IOC_REMAP && - ZFS_IOC_BASE + 77 == ZFS_IOC_POOL_CHECKPOINT && - ZFS_IOC_BASE + 78 == ZFS_IOC_POOL_DISCARD_CHECKPOINT && - ZFS_IOC_BASE + 79 == ZFS_IOC_POOL_INITIALIZE && - ZFS_IOC_BASE + 80 == ZFS_IOC_POOL_TRIM && - ZFS_IOC_BASE + 81 == ZFS_IOC_REDACT && - ZFS_IOC_BASE + 82 == ZFS_IOC_GET_BOOKMARK_PROPS && - LINUX_IOC_BASE + 1 == ZFS_IOC_EVENTS_NEXT && - LINUX_IOC_BASE + 2 == ZFS_IOC_EVENTS_CLEAR && - LINUX_IOC_BASE + 3 == ZFS_IOC_EVENTS_SEEK); + boolean_t result = TRUE; + +#define CHECK(expr) do { \ + if (!(expr)) { \ + result = FALSE; \ + fprintf(stderr, "(%s) === FALSE\n", #expr); \ + } \ +} while (0) + + CHECK(ZFS_IOC_BASE + 0 == ZFS_IOC_POOL_CREATE); + CHECK(ZFS_IOC_BASE + 1 == ZFS_IOC_POOL_DESTROY); + CHECK(ZFS_IOC_BASE + 2 == ZFS_IOC_POOL_IMPORT); + CHECK(ZFS_IOC_BASE + 3 == ZFS_IOC_POOL_EXPORT); + CHECK(ZFS_IOC_BASE + 4 == ZFS_IOC_POOL_CONFIGS); + CHECK(ZFS_IOC_BASE + 5 == ZFS_IOC_POOL_STATS); + CHECK(ZFS_IOC_BASE + 6 == ZFS_IOC_POOL_TRYIMPORT); + CHECK(ZFS_IOC_BASE + 7 == ZFS_IOC_POOL_SCAN); + CHECK(ZFS_IOC_BASE + 8 == ZFS_IOC_POOL_FREEZE); + CHECK(ZFS_IOC_BASE + 9 == ZFS_IOC_POOL_UPGRADE); + CHECK(ZFS_IOC_BASE + 10 == ZFS_IOC_POOL_GET_HISTORY); + CHECK(ZFS_IOC_BASE + 11 == ZFS_IOC_VDEV_ADD); + CHECK(ZFS_IOC_BASE + 12 == ZFS_IOC_VDEV_REMOVE); + CHECK(ZFS_IOC_BASE + 13 == ZFS_IOC_VDEV_SET_STATE); + CHECK(ZFS_IOC_BASE + 14 == ZFS_IOC_VDEV_ATTACH); + CHECK(ZFS_IOC_BASE + 15 == ZFS_IOC_VDEV_DETACH); + CHECK(ZFS_IOC_BASE + 16 == ZFS_IOC_VDEV_SETPATH); + CHECK(ZFS_IOC_BASE + 17 == ZFS_IOC_VDEV_SETFRU); + CHECK(ZFS_IOC_BASE + 18 == ZFS_IOC_OBJSET_STATS); + CHECK(ZFS_IOC_BASE + 19 == ZFS_IOC_OBJSET_ZPLPROPS); + CHECK(ZFS_IOC_BASE + 20 == ZFS_IOC_DATASET_LIST_NEXT); + CHECK(ZFS_IOC_BASE + 21 == ZFS_IOC_SNAPSHOT_LIST_NEXT); + CHECK(ZFS_IOC_BASE + 22 == ZFS_IOC_SET_PROP); + CHECK(ZFS_IOC_BASE + 23 == ZFS_IOC_CREATE); + CHECK(ZFS_IOC_BASE + 24 == ZFS_IOC_DESTROY); + CHECK(ZFS_IOC_BASE + 25 == ZFS_IOC_ROLLBACK); + CHECK(ZFS_IOC_BASE + 26 == ZFS_IOC_RENAME); + CHECK(ZFS_IOC_BASE + 27 == ZFS_IOC_RECV); + CHECK(ZFS_IOC_BASE + 28 == ZFS_IOC_SEND); + CHECK(ZFS_IOC_BASE + 29 == ZFS_IOC_INJECT_FAULT); + CHECK(ZFS_IOC_BASE + 30 == ZFS_IOC_CLEAR_FAULT); + CHECK(ZFS_IOC_BASE + 31 == ZFS_IOC_INJECT_LIST_NEXT); + CHECK(ZFS_IOC_BASE + 32 == ZFS_IOC_ERROR_LOG); + CHECK(ZFS_IOC_BASE + 33 == ZFS_IOC_CLEAR); + CHECK(ZFS_IOC_BASE + 34 == ZFS_IOC_PROMOTE); + CHECK(ZFS_IOC_BASE + 35 == ZFS_IOC_SNAPSHOT); + CHECK(ZFS_IOC_BASE + 36 == ZFS_IOC_DSOBJ_TO_DSNAME); + CHECK(ZFS_IOC_BASE + 37 == ZFS_IOC_OBJ_TO_PATH); + CHECK(ZFS_IOC_BASE + 38 == ZFS_IOC_POOL_SET_PROPS); + CHECK(ZFS_IOC_BASE + 39 == ZFS_IOC_POOL_GET_PROPS); + CHECK(ZFS_IOC_BASE + 40 == ZFS_IOC_SET_FSACL); + CHECK(ZFS_IOC_BASE + 41 == ZFS_IOC_GET_FSACL); + CHECK(ZFS_IOC_BASE + 42 == ZFS_IOC_SHARE); + CHECK(ZFS_IOC_BASE + 43 == ZFS_IOC_INHERIT_PROP); + CHECK(ZFS_IOC_BASE + 44 == ZFS_IOC_SMB_ACL); + CHECK(ZFS_IOC_BASE + 45 == ZFS_IOC_USERSPACE_ONE); + CHECK(ZFS_IOC_BASE + 46 == ZFS_IOC_USERSPACE_MANY); + CHECK(ZFS_IOC_BASE + 47 == ZFS_IOC_USERSPACE_UPGRADE); + CHECK(ZFS_IOC_BASE + 48 == ZFS_IOC_HOLD); + CHECK(ZFS_IOC_BASE + 49 == ZFS_IOC_RELEASE); + CHECK(ZFS_IOC_BASE + 50 == ZFS_IOC_GET_HOLDS); + CHECK(ZFS_IOC_BASE + 51 == ZFS_IOC_OBJSET_RECVD_PROPS); + CHECK(ZFS_IOC_BASE + 52 == ZFS_IOC_VDEV_SPLIT); + CHECK(ZFS_IOC_BASE + 53 == ZFS_IOC_NEXT_OBJ); + CHECK(ZFS_IOC_BASE + 54 == ZFS_IOC_DIFF); + CHECK(ZFS_IOC_BASE + 55 == ZFS_IOC_TMP_SNAPSHOT); + CHECK(ZFS_IOC_BASE + 56 == ZFS_IOC_OBJ_TO_STATS); + CHECK(ZFS_IOC_BASE + 57 == ZFS_IOC_SPACE_WRITTEN); + CHECK(ZFS_IOC_BASE + 58 == ZFS_IOC_SPACE_SNAPS); + CHECK(ZFS_IOC_BASE + 59 == ZFS_IOC_DESTROY_SNAPS); + CHECK(ZFS_IOC_BASE + 60 == ZFS_IOC_POOL_REGUID); + CHECK(ZFS_IOC_BASE + 61 == ZFS_IOC_POOL_REOPEN); + CHECK(ZFS_IOC_BASE + 62 == ZFS_IOC_SEND_PROGRESS); + CHECK(ZFS_IOC_BASE + 63 == ZFS_IOC_LOG_HISTORY); + CHECK(ZFS_IOC_BASE + 64 == ZFS_IOC_SEND_NEW); + CHECK(ZFS_IOC_BASE + 65 == ZFS_IOC_SEND_SPACE); + CHECK(ZFS_IOC_BASE + 66 == ZFS_IOC_CLONE); + CHECK(ZFS_IOC_BASE + 67 == ZFS_IOC_BOOKMARK); + CHECK(ZFS_IOC_BASE + 68 == ZFS_IOC_GET_BOOKMARKS); + CHECK(ZFS_IOC_BASE + 69 == ZFS_IOC_DESTROY_BOOKMARKS); + CHECK(ZFS_IOC_BASE + 70 == ZFS_IOC_RECV_NEW); + CHECK(ZFS_IOC_BASE + 71 == ZFS_IOC_POOL_SYNC); + CHECK(ZFS_IOC_BASE + 72 == ZFS_IOC_CHANNEL_PROGRAM); + CHECK(ZFS_IOC_BASE + 73 == ZFS_IOC_LOAD_KEY); + CHECK(ZFS_IOC_BASE + 74 == ZFS_IOC_UNLOAD_KEY); + CHECK(ZFS_IOC_BASE + 75 == ZFS_IOC_CHANGE_KEY); + CHECK(ZFS_IOC_BASE + 76 == ZFS_IOC_REMAP); + CHECK(ZFS_IOC_BASE + 77 == ZFS_IOC_POOL_CHECKPOINT); + CHECK(ZFS_IOC_BASE + 78 == ZFS_IOC_POOL_DISCARD_CHECKPOINT); + CHECK(ZFS_IOC_BASE + 79 == ZFS_IOC_POOL_INITIALIZE); + CHECK(ZFS_IOC_BASE + 80 == ZFS_IOC_POOL_TRIM); + CHECK(ZFS_IOC_BASE + 81 == ZFS_IOC_REDACT); + CHECK(ZFS_IOC_BASE + 82 == ZFS_IOC_GET_BOOKMARK_PROPS); + CHECK(LINUX_IOC_BASE + 1 == ZFS_IOC_EVENTS_NEXT); + CHECK(LINUX_IOC_BASE + 2 == ZFS_IOC_EVENTS_CLEAR); + CHECK(LINUX_IOC_BASE + 3 == ZFS_IOC_EVENTS_SEEK); + +#undef CHECK + + return (result); } int From 0154a1e539370e66bbe54eb62a36f0d684c03331 Mon Sep 17 00:00:00 2001 From: Ryan Moeller Date: Thu, 22 Aug 2019 12:46:09 -0400 Subject: [PATCH 18/68] Dedup IOC enum values in libzfs_input_check Reuse enum value ZFS_IOC_BASE for `('Z' << 8)`. This is helpful on FreeBSD where ZFS_IOC_BASE has a different value and `('Z' << 8)` is wrong. Reviewed-by: Chris Dunlop Reviewed-by: Brian Behlendorf Signed-off-by: Ryan Moeller Closes #9188 --- tests/zfs-tests/cmd/libzfs_input_check/libzfs_input_check.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/zfs-tests/cmd/libzfs_input_check/libzfs_input_check.c b/tests/zfs-tests/cmd/libzfs_input_check/libzfs_input_check.c index bf57518c7d87..652c6d9a1895 100644 --- a/tests/zfs-tests/cmd/libzfs_input_check/libzfs_input_check.c +++ b/tests/zfs-tests/cmd/libzfs_input_check/libzfs_input_check.c @@ -851,8 +851,8 @@ zfs_ioc_input_tests(const char *pool) enum zfs_ioc_ref { ZFS_IOC_BASE = ('Z' << 8), - LINUX_IOC_BASE = ('Z' << 8) + 0x80, - FREEBSD_IOC_BASE = ('Z' << 8) + 0xC0, + LINUX_IOC_BASE = ZFS_IOC_BASE + 0x80, + FREEBSD_IOC_BASE = ZFS_IOC_BASE + 0xC0, }; /* From 2f74950c5e0f0693448a4c8bb75389313f00c996 Mon Sep 17 00:00:00 2001 From: Mauricio Faria de Oliveira Date: Thu, 22 Aug 2019 13:48:48 -0300 Subject: [PATCH 19/68] Document ZFS_DKMS_ENABLE_DEBUGINFO in userland configuration Document the ZFS_DKMS_ENABLE_DEBUGINFO option in the userland configuration file, as done with the other ZFS_DKMS_* options. It has been introduced with commit e45c1734a665 ("dkms: Enable debuginfo option to be set with zfs sysconfig file") but isn't mentioned anywhere other than the 'dkms.conf' file (generated). Reviewed-by: Brian Behlendorf Signed-off-by: Mauricio Faria de Oliveira Closes #9191 --- etc/init.d/zfs.in | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/etc/init.d/zfs.in b/etc/init.d/zfs.in index 7998569b2c2d..d4ad1beaaa27 100644 --- a/etc/init.d/zfs.in +++ b/etc/init.d/zfs.in @@ -91,6 +91,10 @@ MOUNT_EXTRA_OPTIONS="" # Only applicable for Debian GNU/Linux {dkms,initramfs}. ZFS_DKMS_ENABLE_DEBUG='no' +# Build kernel modules with the --enable-debuginfo switch? +# Only applicable for Debian GNU/Linux {dkms,initramfs}. +ZFS_DKMS_ENABLE_DEBUGINFO='no' + # Keep debugging symbols in kernel modules? # Only applicable for Debian GNU/Linux {dkms,initramfs}. ZFS_DKMS_DISABLE_STRIP='no' From 31b548ffb91f1475f5154df9d26e8917e96f81fb Mon Sep 17 00:00:00 2001 From: Brian Behlendorf Date: Thu, 22 Aug 2019 10:36:57 -0700 Subject: [PATCH 20/68] ZTS: Use decimal values when setting tunables The mdb_set_uint32 function requires that the values passed in be decimal. This was overlooked initially because the matching Linux function accepts both decimal and hexadecimal values. Reviewed-by: John Kennedy Reviewed by: Sara Hartse Reviewed-by: Brian Behlendorf Signed-off-by: Igor Kozhukhov Closes #9125 Closes #9195 --- .../zfs_clone_livelist_condense_and_disable.ksh | 8 ++++---- .../zfs_destroy/zfs_clone_livelist_condense_races.ksh | 2 +- .../cli_root/zfs_destroy/zfs_destroy_clone_livelist.ksh | 2 +- .../zfs_destroy/zfs_destroy_dev_removal_condense.ksh | 2 +- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/tests/zfs-tests/tests/functional/cli_root/zfs_destroy/zfs_clone_livelist_condense_and_disable.ksh b/tests/zfs-tests/tests/functional/cli_root/zfs_destroy/zfs_clone_livelist_condense_and_disable.ksh index b9ac87238894..7faf304db1d5 100755 --- a/tests/zfs-tests/tests/functional/cli_root/zfs_destroy/zfs_clone_livelist_condense_and_disable.ksh +++ b/tests/zfs-tests/tests/functional/cli_root/zfs_destroy/zfs_clone_livelist_condense_and_disable.ksh @@ -58,9 +58,9 @@ function test_condense { # set the max livelist entries to a small value to more easily # trigger a condense - set_tunable64 zfs_livelist_max_entries 0x14 + set_tunable64 zfs_livelist_max_entries 20 # set a small percent shared threshold so the livelist is not disabled - set_tunable32 zfs_livelist_min_percent_shared 0xa + set_tunable32 zfs_livelist_min_percent_shared 10 clone_dataset $TESTFS1 snap $TESTCLONE # sync between each write to make sure a new entry is created @@ -86,7 +86,7 @@ function test_condense function test_deactivated { # Threshold set to 50 percent - set_tunable32 zfs_livelist_min_percent_shared 0x32 + set_tunable32 zfs_livelist_min_percent_shared 50 clone_dataset $TESTFS1 snap $TESTCLONE log_must mkfile 5m /$TESTPOOL/$TESTCLONE/$TESTFILE0 @@ -97,7 +97,7 @@ function test_deactivated log_must zfs destroy -R $TESTPOOL/$TESTCLONE # Threshold set to 20 percent - set_tunable32 zfs_livelist_min_percent_shared 0x14 + set_tunable32 zfs_livelist_min_percent_shared 20 clone_dataset $TESTFS1 snap $TESTCLONE log_must mkfile 5m /$TESTPOOL/$TESTCLONE/$TESTFILE0 diff --git a/tests/zfs-tests/tests/functional/cli_root/zfs_destroy/zfs_clone_livelist_condense_races.ksh b/tests/zfs-tests/tests/functional/cli_root/zfs_destroy/zfs_clone_livelist_condense_races.ksh index 037983ba7736..85692f889568 100755 --- a/tests/zfs-tests/tests/functional/cli_root/zfs_destroy/zfs_clone_livelist_condense_races.ksh +++ b/tests/zfs-tests/tests/functional/cli_root/zfs_destroy/zfs_clone_livelist_condense_races.ksh @@ -98,7 +98,7 @@ log_must zpool sync $TESTPOOL log_must zfs snapshot $TESTPOOL/$TESTFS1@snap # Reduce livelist size to trigger condense more easily -set_tunable64 zfs_livelist_max_entries 0x14 +set_tunable64 zfs_livelist_max_entries 20 # Test cancellation path in the zthr set_tunable32 zfs_livelist_condense_zthr_pause 1 diff --git a/tests/zfs-tests/tests/functional/cli_root/zfs_destroy/zfs_destroy_clone_livelist.ksh b/tests/zfs-tests/tests/functional/cli_root/zfs_destroy/zfs_destroy_clone_livelist.ksh index 1dd01151969c..6669426b0c7b 100755 --- a/tests/zfs-tests/tests/functional/cli_root/zfs_destroy/zfs_destroy_clone_livelist.ksh +++ b/tests/zfs-tests/tests/functional/cli_root/zfs_destroy/zfs_destroy_clone_livelist.ksh @@ -128,7 +128,7 @@ log_must mkfile 20m /$TESTPOOL/$TESTFS1/atestfile log_must zfs snapshot $TESTPOOL/$TESTFS1@snap # set a small livelist entry size to more easily test multiple entry livelists -set_tunable64 zfs_livelist_max_entries 0x14 +set_tunable64 zfs_livelist_max_entries 20 test_one_empty test_one diff --git a/tests/zfs-tests/tests/functional/cli_root/zfs_destroy/zfs_destroy_dev_removal_condense.ksh b/tests/zfs-tests/tests/functional/cli_root/zfs_destroy/zfs_destroy_dev_removal_condense.ksh index da5f314ef151..30c4b2ddac0d 100755 --- a/tests/zfs-tests/tests/functional/cli_root/zfs_destroy/zfs_destroy_dev_removal_condense.ksh +++ b/tests/zfs-tests/tests/functional/cli_root/zfs_destroy/zfs_destroy_dev_removal_condense.ksh @@ -45,7 +45,7 @@ function cleanup log_onexit cleanup ORIGINAL_MAX=$(get_tunable zfs_livelist_max_entries) -set_tunable64 zfs_livelist_max_entries 0x14 +set_tunable64 zfs_livelist_max_entries 20 VIRTUAL_DISK1=/var/tmp/disk1 VIRTUAL_DISK2=/var/tmp/disk2 From d1484fb1898043adc543f19d909c9f02d15b73f7 Mon Sep 17 00:00:00 2001 From: Paul Dagnelie Date: Thu, 22 Aug 2019 12:01:41 -0700 Subject: [PATCH 21/68] Fix install error introduced by #9089 Signed-off-by: Paul Dagnelie --- etc/systemd/system/Makefile.am | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/etc/systemd/system/Makefile.am b/etc/systemd/system/Makefile.am index ba73f558a8a0..130c6c757a59 100644 --- a/etc/systemd/system/Makefile.am +++ b/etc/systemd/system/Makefile.am @@ -33,7 +33,7 @@ $(systemdunit_DATA) $(systemdpreset_DATA):%:%.in install-data-hook: $(MKDIR_P) "$(DESTDIR)$(systemdunitdir)" - ln -s /dev/null "$(DESTDIR)$(systemdunitdir)/zfs-import.service" + ln -sf /dev/null "$(DESTDIR)$(systemdunitdir)/zfs-import.service" distclean-local:: -$(RM) $(systemdunit_DATA) $(systemdpreset_DATA) From 19d61d63faa359afee5b3d7eb233bb786bc0de1c Mon Sep 17 00:00:00 2001 From: yshui Date: Fri, 23 Aug 2019 01:11:17 +0100 Subject: [PATCH 22/68] zfs-mount-genrator: dependencies should be space-separated Reviewed-by: Antonio Russo Reviewed-by: Richard Laager Signed-off-by: Yuxuan Shui Closes #9174 --- etc/systemd/system-generators/zfs-mount-generator.in | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/etc/systemd/system-generators/zfs-mount-generator.in b/etc/systemd/system-generators/zfs-mount-generator.in index ae208c965f97..3e529cb67bb3 100755 --- a/etc/systemd/system-generators/zfs-mount-generator.in +++ b/etc/systemd/system-generators/zfs-mount-generator.in @@ -215,7 +215,7 @@ EOF fi # Update the dependencies for the mount file to require the # key-loading unit. - wants="${wants},${keyloadunit}" + wants="${wants} ${keyloadunit}" fi # If the mountpoint has already been created, give it precedence. From 97c54ea818ac60b914d1591e17ab175d89410b1b Mon Sep 17 00:00:00 2001 From: Ryan Moeller Date: Thu, 22 Aug 2019 20:26:51 -0400 Subject: [PATCH 23/68] Make slog test setup more robust The slog tests fail when attempting to create pools using file vdevs that already exist from previous test runs. Remove these files in the setup for the test. Reviewed-by: Igor Kozhukhov Reviewed-by: Brian Behlendorf Reviewed-by: John Kennedy Signed-off-by: Ryan Moeller Closes #9194 --- tests/zfs-tests/tests/functional/slog/setup.ksh | 9 --------- tests/zfs-tests/tests/functional/slog/slog.kshlib | 11 ++++++++++- .../zfs-tests/tests/functional/slog/slog_001_pos.ksh | 1 + .../zfs-tests/tests/functional/slog/slog_002_pos.ksh | 1 + .../zfs-tests/tests/functional/slog/slog_003_pos.ksh | 1 + .../zfs-tests/tests/functional/slog/slog_004_pos.ksh | 1 + .../zfs-tests/tests/functional/slog/slog_005_pos.ksh | 1 + .../zfs-tests/tests/functional/slog/slog_006_pos.ksh | 1 + .../zfs-tests/tests/functional/slog/slog_007_pos.ksh | 1 + .../zfs-tests/tests/functional/slog/slog_008_neg.ksh | 1 + .../zfs-tests/tests/functional/slog/slog_009_neg.ksh | 1 + .../zfs-tests/tests/functional/slog/slog_010_neg.ksh | 1 + .../zfs-tests/tests/functional/slog/slog_011_neg.ksh | 1 + .../zfs-tests/tests/functional/slog/slog_012_neg.ksh | 1 + .../zfs-tests/tests/functional/slog/slog_013_pos.ksh | 1 + .../zfs-tests/tests/functional/slog/slog_014_pos.ksh | 1 + .../zfs-tests/tests/functional/slog/slog_015_neg.ksh | 1 + .../tests/functional/slog/slog_replay_fs.ksh | 1 + .../tests/functional/slog/slog_replay_volume.ksh | 1 + 19 files changed, 27 insertions(+), 10 deletions(-) diff --git a/tests/zfs-tests/tests/functional/slog/setup.ksh b/tests/zfs-tests/tests/functional/slog/setup.ksh index f30824d3ee90..8e8d214d823c 100755 --- a/tests/zfs-tests/tests/functional/slog/setup.ksh +++ b/tests/zfs-tests/tests/functional/slog/setup.ksh @@ -38,13 +38,4 @@ if ! verify_slog_support ; then log_unsupported "This system doesn't support separate intent logs" fi -if [[ -d $VDEV ]]; then - log_must rm -rf $VDIR -fi -if [[ -d $VDEV2 ]]; then - log_must rm -rf $VDIR2 -fi -log_must mkdir -p $VDIR $VDIR2 -log_must truncate -s $MINVDEVSIZE $VDEV $SDEV $LDEV $VDEV2 $SDEV2 $LDEV2 - log_pass diff --git a/tests/zfs-tests/tests/functional/slog/slog.kshlib b/tests/zfs-tests/tests/functional/slog/slog.kshlib index 6ed7e4e0502f..75cfec2d832d 100644 --- a/tests/zfs-tests/tests/functional/slog/slog.kshlib +++ b/tests/zfs-tests/tests/functional/slog/slog.kshlib @@ -31,11 +31,20 @@ . $STF_SUITE/include/libtest.shlib . $STF_SUITE/tests/functional/slog/slog.cfg +function setup +{ + log_must rm -rf $VDIR $VDIR2 + log_must mkdir -p $VDIR $VDIR2 + log_must truncate -s $MINVDEVSIZE $VDEV $SDEV $LDEV $VDEV2 $SDEV2 $LDEV2 + + return 0 +} + function cleanup { poolexists $TESTPOOL && destroy_pool $TESTPOOL poolexists $TESTPOOL2 && destroy_pool $TESTPOOL2 - rm -rf $TESTDIR + rm -rf $TESTDIR $VDIR $VDIR2 } # diff --git a/tests/zfs-tests/tests/functional/slog/slog_001_pos.ksh b/tests/zfs-tests/tests/functional/slog/slog_001_pos.ksh index 3d3daf5f9ccc..a4c35ed9e98e 100755 --- a/tests/zfs-tests/tests/functional/slog/slog_001_pos.ksh +++ b/tests/zfs-tests/tests/functional/slog/slog_001_pos.ksh @@ -45,6 +45,7 @@ verify_runnable "global" log_assert "Creating a pool with a log device succeeds." log_onexit cleanup +log_must setup for type in "" "mirror" "raidz" "raidz2" do diff --git a/tests/zfs-tests/tests/functional/slog/slog_002_pos.ksh b/tests/zfs-tests/tests/functional/slog/slog_002_pos.ksh index b056f19cdb80..91904aa612d1 100755 --- a/tests/zfs-tests/tests/functional/slog/slog_002_pos.ksh +++ b/tests/zfs-tests/tests/functional/slog/slog_002_pos.ksh @@ -46,6 +46,7 @@ verify_runnable "global" log_assert "Adding a log device to normal pool works." log_onexit cleanup +log_must setup for type in "" "mirror" "raidz" "raidz2" do diff --git a/tests/zfs-tests/tests/functional/slog/slog_003_pos.ksh b/tests/zfs-tests/tests/functional/slog/slog_003_pos.ksh index c647b8f54b75..0b4d6ede3e13 100755 --- a/tests/zfs-tests/tests/functional/slog/slog_003_pos.ksh +++ b/tests/zfs-tests/tests/functional/slog/slog_003_pos.ksh @@ -46,6 +46,7 @@ verify_runnable "global" log_assert "Adding an extra log device works." log_onexit cleanup +log_must setup for type in "" "mirror" "raidz" "raidz2" do diff --git a/tests/zfs-tests/tests/functional/slog/slog_004_pos.ksh b/tests/zfs-tests/tests/functional/slog/slog_004_pos.ksh index 4b0b3439a2e3..10f28dcc000b 100755 --- a/tests/zfs-tests/tests/functional/slog/slog_004_pos.ksh +++ b/tests/zfs-tests/tests/functional/slog/slog_004_pos.ksh @@ -46,6 +46,7 @@ verify_runnable "global" log_assert "Attaching a log device passes." log_onexit cleanup +log_must setup for type in "" "mirror" "raidz" "raidz2" do diff --git a/tests/zfs-tests/tests/functional/slog/slog_005_pos.ksh b/tests/zfs-tests/tests/functional/slog/slog_005_pos.ksh index cbbb9486913a..4836f6f27937 100755 --- a/tests/zfs-tests/tests/functional/slog/slog_005_pos.ksh +++ b/tests/zfs-tests/tests/functional/slog/slog_005_pos.ksh @@ -46,6 +46,7 @@ verify_runnable "global" log_assert "Detaching a log device passes." log_onexit cleanup +log_must setup for type in "" "mirror" "raidz" "raidz2" do diff --git a/tests/zfs-tests/tests/functional/slog/slog_006_pos.ksh b/tests/zfs-tests/tests/functional/slog/slog_006_pos.ksh index 53e8c67ca005..24143196fd2e 100755 --- a/tests/zfs-tests/tests/functional/slog/slog_006_pos.ksh +++ b/tests/zfs-tests/tests/functional/slog/slog_006_pos.ksh @@ -46,6 +46,7 @@ verify_runnable "global" log_assert "Replacing a log device passes." log_onexit cleanup +log_must setup for type in "" "mirror" "raidz" "raidz2" do diff --git a/tests/zfs-tests/tests/functional/slog/slog_007_pos.ksh b/tests/zfs-tests/tests/functional/slog/slog_007_pos.ksh index 4926fb7b3192..27ac38606c29 100755 --- a/tests/zfs-tests/tests/functional/slog/slog_007_pos.ksh +++ b/tests/zfs-tests/tests/functional/slog/slog_007_pos.ksh @@ -48,6 +48,7 @@ verify_runnable "global" log_assert "Exporting and importing pool with log devices passes." log_onexit cleanup +log_must setup for type in "" "mirror" "raidz" "raidz2" do diff --git a/tests/zfs-tests/tests/functional/slog/slog_008_neg.ksh b/tests/zfs-tests/tests/functional/slog/slog_008_neg.ksh index 587e0e321222..54587a0c61a7 100755 --- a/tests/zfs-tests/tests/functional/slog/slog_008_neg.ksh +++ b/tests/zfs-tests/tests/functional/slog/slog_008_neg.ksh @@ -44,6 +44,7 @@ verify_runnable "global" log_assert "A raidz/raidz2 log is not supported." log_onexit cleanup +log_must setup for type in "" "mirror" "raidz" "raidz2" do diff --git a/tests/zfs-tests/tests/functional/slog/slog_009_neg.ksh b/tests/zfs-tests/tests/functional/slog/slog_009_neg.ksh index e7091f17b759..222f71a99928 100755 --- a/tests/zfs-tests/tests/functional/slog/slog_009_neg.ksh +++ b/tests/zfs-tests/tests/functional/slog/slog_009_neg.ksh @@ -45,6 +45,7 @@ verify_runnable "global" log_assert "A raidz/raidz2 log can not be added to existed pool." log_onexit cleanup +log_must setup for type in "" "mirror" "raidz" "raidz2" do diff --git a/tests/zfs-tests/tests/functional/slog/slog_010_neg.ksh b/tests/zfs-tests/tests/functional/slog/slog_010_neg.ksh index 8fe248ffbcba..edd9abea0930 100755 --- a/tests/zfs-tests/tests/functional/slog/slog_010_neg.ksh +++ b/tests/zfs-tests/tests/functional/slog/slog_010_neg.ksh @@ -46,6 +46,7 @@ verify_runnable "global" log_assert "Slog device can not be replaced with spare device." log_onexit cleanup +log_must setup log_must zpool create $TESTPOOL $VDEV spare $SDEV log $LDEV sdev=$(random_get $SDEV) diff --git a/tests/zfs-tests/tests/functional/slog/slog_011_neg.ksh b/tests/zfs-tests/tests/functional/slog/slog_011_neg.ksh index 2dad200b31c1..3bebc8201713 100755 --- a/tests/zfs-tests/tests/functional/slog/slog_011_neg.ksh +++ b/tests/zfs-tests/tests/functional/slog/slog_011_neg.ksh @@ -46,6 +46,7 @@ verify_runnable "global" log_assert "Offline and online a log device passes." log_onexit cleanup +log_must setup for type in "" "mirror" "raidz" "raidz2" do diff --git a/tests/zfs-tests/tests/functional/slog/slog_012_neg.ksh b/tests/zfs-tests/tests/functional/slog/slog_012_neg.ksh index 45566d427f1d..8d6fb2bffb7f 100755 --- a/tests/zfs-tests/tests/functional/slog/slog_012_neg.ksh +++ b/tests/zfs-tests/tests/functional/slog/slog_012_neg.ksh @@ -45,6 +45,7 @@ verify_runnable "global" log_assert "Pool can survive when one of mirror log device get corrupted." log_onexit cleanup +log_must setup for type in "" "mirror" "raidz" "raidz2" do diff --git a/tests/zfs-tests/tests/functional/slog/slog_013_pos.ksh b/tests/zfs-tests/tests/functional/slog/slog_013_pos.ksh index bbe5adc24174..d6917065ddbf 100755 --- a/tests/zfs-tests/tests/functional/slog/slog_013_pos.ksh +++ b/tests/zfs-tests/tests/functional/slog/slog_013_pos.ksh @@ -60,6 +60,7 @@ log_assert "Verify slog device can be disk, file, lofi device or any device " \ "that presents a block interface." verify_disk_count "$DISKS" 2 log_onexit cleanup_testenv +log_must setup dsk1=${DISKS%% *} log_must zpool create $TESTPOOL ${DISKS#$dsk1} diff --git a/tests/zfs-tests/tests/functional/slog/slog_014_pos.ksh b/tests/zfs-tests/tests/functional/slog/slog_014_pos.ksh index 0ec96ae1e6f7..e8ea29f1ffa3 100755 --- a/tests/zfs-tests/tests/functional/slog/slog_014_pos.ksh +++ b/tests/zfs-tests/tests/functional/slog/slog_014_pos.ksh @@ -44,6 +44,7 @@ verify_runnable "global" log_assert "log device can survive when one of the pool device get corrupted." +log_must setup for type in "mirror" "raidz" "raidz2"; do for spare in "" "spare"; do diff --git a/tests/zfs-tests/tests/functional/slog/slog_015_neg.ksh b/tests/zfs-tests/tests/functional/slog/slog_015_neg.ksh index 37821888ea00..fa6105116574 100755 --- a/tests/zfs-tests/tests/functional/slog/slog_015_neg.ksh +++ b/tests/zfs-tests/tests/functional/slog/slog_015_neg.ksh @@ -47,6 +47,7 @@ function cleanup ORIG_TIMEOUT=$(get_tunable zfs_commit_timeout_pct | tail -1 | awk '{print $NF}') log_onexit cleanup +log_must setup for PCT in 0 1 2 4 8 16 32 64 128 256 512 1024; do log_must set_tunable64 zfs_commit_timeout_pct $PCT diff --git a/tests/zfs-tests/tests/functional/slog/slog_replay_fs.ksh b/tests/zfs-tests/tests/functional/slog/slog_replay_fs.ksh index ea3f8451b9e3..3e5bccd2ef18 100755 --- a/tests/zfs-tests/tests/functional/slog/slog_replay_fs.ksh +++ b/tests/zfs-tests/tests/functional/slog/slog_replay_fs.ksh @@ -66,6 +66,7 @@ function cleanup_fs log_assert "Replay of intent log succeeds." log_onexit cleanup_fs +log_must setup # # 1. Create an empty file system (TESTFS) diff --git a/tests/zfs-tests/tests/functional/slog/slog_replay_volume.ksh b/tests/zfs-tests/tests/functional/slog/slog_replay_volume.ksh index c8a3cbbf43c4..a72c83b5bfc6 100755 --- a/tests/zfs-tests/tests/functional/slog/slog_replay_volume.ksh +++ b/tests/zfs-tests/tests/functional/slog/slog_replay_volume.ksh @@ -76,6 +76,7 @@ function cleanup_volume log_assert "Replay of intent log succeeds." log_onexit cleanup_volume +log_must setup # # 1. Create an empty volume (TESTVOL), set sync=always, and format From 4302698be16e88419b8b22194685372e012fa333 Mon Sep 17 00:00:00 2001 From: Brian Behlendorf Date: Thu, 22 Aug 2019 17:37:48 -0700 Subject: [PATCH 24/68] ZTS: Fix in-tree dbufstats test case MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Commit a887d653 updated the dbufstats such that escalated privileges are required. Since all tests under cli_user are run with normal privileges move this test case to a location where it will be run required privileges. Reviewed-by: John Kennedy Reviewed-by: Ryan Moeller Reviewed-by: Michael Niewöhner Signed-off-by: Brian Behlendorf Closes #9118 Closes #9196 --- tests/runfiles/linux.run | 4 ++-- tests/zfs-tests/tests/functional/arc/Makefile.am | 3 ++- .../misc/dbufstat_001_pos.ksh => arc/dbufstats_003_pos.ksh} | 4 ++-- tests/zfs-tests/tests/functional/cli_user/misc/Makefile.am | 3 +-- 4 files changed, 7 insertions(+), 7 deletions(-) rename tests/zfs-tests/tests/functional/{cli_user/misc/dbufstat_001_pos.ksh => arc/dbufstats_003_pos.ksh} (90%) diff --git a/tests/runfiles/linux.run b/tests/runfiles/linux.run index f0e468a689fb..848c19e3ecdd 100644 --- a/tests/runfiles/linux.run +++ b/tests/runfiles/linux.run @@ -33,7 +33,7 @@ tests = ['alloc_class_001_pos', 'alloc_class_002_neg', 'alloc_class_003_pos', tags = ['functional', 'alloc_class'] [tests/functional/arc] -tests = ['dbufstats_001_pos', 'dbufstats_002_pos'] +tests = ['dbufstats_001_pos', 'dbufstats_002_pos', 'dbufstats_003_pos'] tags = ['functional', 'arc'] [tests/functional/atime] @@ -503,7 +503,7 @@ tests = ['zdb_001_neg', 'zfs_001_neg', 'zfs_allow_001_neg', 'zpool_offline_001_neg', 'zpool_online_001_neg', 'zpool_remove_001_neg', 'zpool_replace_001_neg', 'zpool_scrub_001_neg', 'zpool_set_001_neg', 'zpool_status_001_neg', 'zpool_upgrade_001_neg', 'arcstat_001_pos', - 'arc_summary_001_pos', 'arc_summary_002_neg', 'dbufstat_001_pos'] + 'arc_summary_001_pos', 'arc_summary_002_neg'] user = tags = ['functional', 'cli_user', 'misc'] diff --git a/tests/zfs-tests/tests/functional/arc/Makefile.am b/tests/zfs-tests/tests/functional/arc/Makefile.am index dc57ebc86275..22704fa5181f 100644 --- a/tests/zfs-tests/tests/functional/arc/Makefile.am +++ b/tests/zfs-tests/tests/functional/arc/Makefile.am @@ -3,4 +3,5 @@ dist_pkgdata_SCRIPTS = \ cleanup.ksh \ setup.ksh \ dbufstats_001_pos.ksh \ - dbufstats_002_pos.ksh + dbufstats_002_pos.ksh \ + dbufstats_003_pos.ksh diff --git a/tests/zfs-tests/tests/functional/cli_user/misc/dbufstat_001_pos.ksh b/tests/zfs-tests/tests/functional/arc/dbufstats_003_pos.ksh similarity index 90% rename from tests/zfs-tests/tests/functional/cli_user/misc/dbufstat_001_pos.ksh rename to tests/zfs-tests/tests/functional/arc/dbufstats_003_pos.ksh index 0e187015f8d6..91cec74881a6 100755 --- a/tests/zfs-tests/tests/functional/cli_user/misc/dbufstat_001_pos.ksh +++ b/tests/zfs-tests/tests/functional/arc/dbufstats_003_pos.ksh @@ -33,11 +33,11 @@ log_assert "dbufstat generates output and doesn't return an error code" typeset -i i=0 while [[ $i -lt ${#args[*]} ]]; do - log_must eval "sudo dbufstat ${args[i]} > /dev/null" + log_must eval "dbufstat ${args[i]} >/dev/null" ((i = i + 1)) done # A simple test of dbufstat filter functionality -log_must eval "sudo dbufstat -F object=10,dbc=1,pool=$TESTPOOL > /dev/null" +log_must eval "dbufstat -F object=10,dbc=1,pool=$TESTPOOL >/dev/null" log_pass "dbufstat generates output and doesn't return an error code" diff --git a/tests/zfs-tests/tests/functional/cli_user/misc/Makefile.am b/tests/zfs-tests/tests/functional/cli_user/misc/Makefile.am index 29c03429091b..49138d927e06 100644 --- a/tests/zfs-tests/tests/functional/cli_user/misc/Makefile.am +++ b/tests/zfs-tests/tests/functional/cli_user/misc/Makefile.am @@ -45,8 +45,7 @@ dist_pkgdata_SCRIPTS = \ zpool_upgrade_001_neg.ksh \ arcstat_001_pos.ksh \ arc_summary_001_pos.ksh \ - arc_summary_002_neg.ksh \ - dbufstat_001_pos.ksh + arc_summary_002_neg.ksh dist_pkgdata_DATA = \ misc.cfg From a18f8bce5c6acb9c3990b2917efa96d1828e541c Mon Sep 17 00:00:00 2001 From: Ryan Moeller Date: Sun, 25 Aug 2019 21:30:39 -0400 Subject: [PATCH 25/68] Split argument list, satisfy shellcheck SC2086 Split the arguments for ${TEST_RUNNER} across multiple lines for clarity. Also added quotes in the message to match the invoked command. Unquoted variables in argument lists are subject to splitting. In this particular case we can't quote the variable because it is an optional argument. Use the method suggested in the description linked below, instead. The technique is to use an unquoted variable with an alternate value. https://github.com/koalaman/shellcheck/wiki/SC2086 Reviewed-by: Brian Behlendorf Reviewed-by: Giuseppe Di Natale Reviewed-by: John Kennedy Signed-off-by: Ryan Moeller Closes #9212 --- scripts/zfs-tests.sh | 19 +++++++++++++------ 1 file changed, 13 insertions(+), 6 deletions(-) diff --git a/scripts/zfs-tests.sh b/scripts/zfs-tests.sh index 7c5286ba70ff..ce766e239823 100755 --- a/scripts/zfs-tests.sh +++ b/scripts/zfs-tests.sh @@ -31,7 +31,7 @@ fi PROG=zfs-tests.sh VERBOSE="no" -QUIET= +QUIET="" CLEANUP="yes" CLEANUPALL="no" LOOPBACK="yes" @@ -307,7 +307,7 @@ while getopts 'hvqxkfScn:d:s:r:?t:T:u:I:' OPTION; do VERBOSE="yes" ;; q) - QUIET="-q" + QUIET="yes" ;; x) CLEANUPALL="yes" @@ -602,10 +602,17 @@ REPORT_FILE=$(mktemp -u -t zts-report.XXXX -p "$FILEDIR") # # Run all the tests as specified. # -msg "${TEST_RUNNER} ${QUIET} -c ${RUNFILE} -T ${TAGS} -i ${STF_SUITE}" \ - "-I ${ITERATIONS}" -${TEST_RUNNER} ${QUIET} -c "${RUNFILE}" -T "${TAGS}" -i "${STF_SUITE}" \ - -I "${ITERATIONS}" 2>&1 | tee "$RESULTS_FILE" +msg "${TEST_RUNNER} ${QUIET:+-q}" \ + "-c \"${RUNFILE}\"" \ + "-T \"${TAGS}\"" \ + "-i \"${STF_SUITE}\"" \ + "-I \"${ITERATIONS}\"" +${TEST_RUNNER} ${QUIET:+-q} \ + -c "${RUNFILE}" \ + -T "${TAGS}" \ + -i "${STF_SUITE}" \ + -I "${ITERATIONS}" \ + 2>&1 | tee "$RESULTS_FILE" # # Analyze the results. From 95f0144675dd0e18617355443be0fbc7171af76b Mon Sep 17 00:00:00 2001 From: Paul Dagnelie Date: Sun, 25 Aug 2019 18:33:03 -0700 Subject: [PATCH 26/68] Add regression test for "zpool list -p" Other than this test, zpool list -p is not well tested by any of the automated tests. Add a test for zpool list -p. Reviewed-by: Prakash Surya Reviewed-by: Serapheim Dimitropoulos Reviewed-by: Brian Behlendorf Signed-off-by: Paul Dagnelie Closes #9134 --- tests/runfiles/linux.run | 2 +- .../functional/cli_root/zpool_get/Makefile.am | 5 +- .../cli_root/zpool_get/zpool_get_005_pos.ksh | 78 +++++++++++++++++++ .../cli_root/zpool_get/zpool_get_parsable.cfg | 33 ++++++++ 4 files changed, 115 insertions(+), 3 deletions(-) create mode 100755 tests/zfs-tests/tests/functional/cli_root/zpool_get/zpool_get_005_pos.ksh create mode 100644 tests/zfs-tests/tests/functional/cli_root/zpool_get/zpool_get_parsable.cfg diff --git a/tests/runfiles/linux.run b/tests/runfiles/linux.run index 848c19e3ecdd..80b48ee1d48e 100644 --- a/tests/runfiles/linux.run +++ b/tests/runfiles/linux.run @@ -361,7 +361,7 @@ tags = ['functional', 'cli_root', 'zpool_export'] [tests/functional/cli_root/zpool_get] tests = ['zpool_get_001_pos', 'zpool_get_002_pos', 'zpool_get_003_pos', - 'zpool_get_004_neg'] + 'zpool_get_004_neg', 'zpool_get_005_pos'] tags = ['functional', 'cli_root', 'zpool_get'] [tests/functional/cli_root/zpool_history] diff --git a/tests/zfs-tests/tests/functional/cli_root/zpool_get/Makefile.am b/tests/zfs-tests/tests/functional/cli_root/zpool_get/Makefile.am index 36a7f23126a4..0c87c9b37763 100644 --- a/tests/zfs-tests/tests/functional/cli_root/zpool_get/Makefile.am +++ b/tests/zfs-tests/tests/functional/cli_root/zpool_get/Makefile.am @@ -5,7 +5,8 @@ dist_pkgdata_SCRIPTS = \ zpool_get_001_pos.ksh \ zpool_get_002_pos.ksh \ zpool_get_003_pos.ksh \ - zpool_get_004_neg.ksh + zpool_get_004_neg.ksh \ + zpool_get_005_pos.ksh dist_pkgdata_DATA = \ - zpool_get.cfg + zpool_get.cfg zpool_get_parsable.cfg diff --git a/tests/zfs-tests/tests/functional/cli_root/zpool_get/zpool_get_005_pos.ksh b/tests/zfs-tests/tests/functional/cli_root/zpool_get/zpool_get_005_pos.ksh new file mode 100755 index 000000000000..ad27d180fdb1 --- /dev/null +++ b/tests/zfs-tests/tests/functional/cli_root/zpool_get/zpool_get_005_pos.ksh @@ -0,0 +1,78 @@ +#!/bin/ksh -p +# +# CDDL HEADER START +# +# The contents of this file are subject to the terms of the +# Common Development and Distribution License (the "License"). +# You may not use this file except in compliance with the License. +# +# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE +# or http://www.opensolaris.org/os/licensing. +# See the License for the specific language governing permissions +# and limitations under the License. +# +# When distributing Covered Code, include this CDDL HEADER in each +# file and include the License file at usr/src/OPENSOLARIS.LICENSE. +# If applicable, add the following below this CDDL HEADER, with the +# fields enclosed by brackets "[]" replaced with your own identifying +# information: Portions Copyright [yyyy] [name of copyright owner] +# +# CDDL HEADER END +# + +# +# Copyright 2008 Sun Microsystems, Inc. All rights reserved. +# Use is subject to license terms. +# + +# +# Copyright (c) 2014 by Delphix. All rights reserved. +# + +. $STF_SUITE/include/libtest.shlib +. $STF_SUITE/tests/functional/cli_root/zpool_get/zpool_get_parsable.cfg + +# +# DESCRIPTION: +# +# Zpool get returns parsable values for all known parsable properties +# +# STRATEGY: +# 1. For all parsable properties, verify zpool get -p returns a parsable value +# + +if ! is_global_zone ; then + TESTPOOL=${TESTPOOL%%/*} +fi + +typeset -i i=0 + +while [[ $i -lt "${#properties[@]}" ]]; do + log_note "Checking for parsable ${properties[$i]} property" + log_must eval "zpool get -p ${properties[$i]} $TESTPOOL >/tmp/value.$$" + grep "${properties[$i]}" /tmp/value.$$ >/dev/null 2>&1 + if [[ $? -ne 0 ]]; then + log_fail "${properties[$i]} not seen in output" + fi + + typeset v=$(grep "${properties[$i]}" /tmp/value.$$ | awk '{print $3}') + + log_note "${properties[$i]} has a value of $v" + + # Determine if this value is a valid number, result in return code + log_must test -n "$v" + expr $v + 0 >/dev/null 2>&1 + + # All properties must be positive integers in order to be + # parsable (i.e. a return code of 0 or 1 from expr above). + # The only exception is "expandsize", which may be "-". + if [[ ! ($? -eq 0 || $? -eq 1 || \ + ("${properties[$i]}" = "expandsize" && "$v" = "-")) ]]; then + log_fail "${properties[$i]} is not parsable" + fi + + i=$(( $i + 1 )) +done + +rm /tmp/value.$$ +log_pass "Zpool get returns parsable values for all known parsable properties" diff --git a/tests/zfs-tests/tests/functional/cli_root/zpool_get/zpool_get_parsable.cfg b/tests/zfs-tests/tests/functional/cli_root/zpool_get/zpool_get_parsable.cfg new file mode 100644 index 000000000000..e7b95a47223b --- /dev/null +++ b/tests/zfs-tests/tests/functional/cli_root/zpool_get/zpool_get_parsable.cfg @@ -0,0 +1,33 @@ +# +# CDDL HEADER START +# +# The contents of this file are subject to the terms of the +# Common Development and Distribution License (the "License"). +# You may not use this file except in compliance with the License. +# +# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE +# or http://www.opensolaris.org/os/licensing. +# See the License for the specific language governing permissions +# and limitations under the License. +# +# When distributing Covered Code, include this CDDL HEADER in each +# file and include the License file at usr/src/OPENSOLARIS.LICENSE. +# If applicable, add the following below this CDDL HEADER, with the +# fields enclosed by brackets "[]" replaced with your own identifying +# information: Portions Copyright [yyyy] [name of copyright owner] +# +# CDDL HEADER END +# + +# +# Copyright 2009 Sun Microsystems, Inc. All rights reserved. +# Use is subject to license terms. +# + +# +# Copyright (c) 2013, 2014 by Delphix. All rights reserved. +# + +# Set the expected properties of zpool +typeset -a properties=("allocated" "capacity" "expandsize" "free" "freeing" + "leaked" "size") From 142f84dd19f20b47157bbbf45aaba489b6577c88 Mon Sep 17 00:00:00 2001 From: Ryan Moeller Date: Mon, 26 Aug 2019 14:48:31 -0400 Subject: [PATCH 27/68] Restore :: in Makefile.am The double-colon looked like a typo, but it's actually an obscure feature. Rules with :: may appear multiple times and are run independently of one another in the order they appear. The use of :: for distclean-local was conventional, not accidental. Add comments to indicate the intentional use of double-colon rules. Reviewed-by: Brian Behlendorf Signed-off-by: Ryan Moeller Closes #9210 --- Makefile.am | 3 ++- contrib/dracut/02zfsexpandknowledge/Makefile.am | 2 ++ contrib/dracut/90zfs/Makefile.am | 1 + contrib/initramfs/hooks/Makefile.am | 2 ++ contrib/initramfs/scripts/Makefile.am | 2 ++ etc/init.d/Makefile.am | 1 + etc/modules-load.d/Makefile.am | 1 + etc/systemd/system-generators/Makefile.am | 1 + etc/systemd/system/Makefile.am | 1 + tests/zfs-tests/include/Makefile.am | 1 + tests/zfs-tests/tests/functional/pyzfs/Makefile.am | 1 + udev/rules.d/Makefile.am | 1 + 12 files changed, 16 insertions(+), 1 deletion(-) diff --git a/Makefile.am b/Makefile.am index da4f6407d18b..3992fe9adb22 100644 --- a/Makefile.am +++ b/Makefile.am @@ -44,7 +44,8 @@ gitrev: BUILT_SOURCES = gitrev -distclean-local: +# Double-colon rules are allowed; there are multiple independent definitions. +distclean-local:: -$(RM) -R autom4te*.cache -find . \( -name SCCS -o -name BitKeeper -o -name .svn -o -name CVS \ -o -name .pc -o -name .hg -o -name .git \) -prune -o \ diff --git a/contrib/dracut/02zfsexpandknowledge/Makefile.am b/contrib/dracut/02zfsexpandknowledge/Makefile.am index a5c567c161c8..6e553e8d456f 100644 --- a/contrib/dracut/02zfsexpandknowledge/Makefile.am +++ b/contrib/dracut/02zfsexpandknowledge/Makefile.am @@ -15,8 +15,10 @@ $(pkgdracut_SCRIPTS):%:%.in -e 's,@sysconfdir\@,$(sysconfdir),g' \ $< >'$@' +# Double-colon rules are allowed; there are multiple independent definitions. clean-local:: -$(RM) $(pkgdracut_SCRIPTS) +# Double-colon rules are allowed; there are multiple independent definitions. distclean-local:: -$(RM) $(pkgdracut_SCRIPTS) diff --git a/contrib/dracut/90zfs/Makefile.am b/contrib/dracut/90zfs/Makefile.am index 0a557f57f256..1680230fa34e 100644 --- a/contrib/dracut/90zfs/Makefile.am +++ b/contrib/dracut/90zfs/Makefile.am @@ -33,5 +33,6 @@ $(pkgdracut_SCRIPTS) $(pkgdracut_DATA) :%:%.in -e 's,@mounthelperdir\@,$(mounthelperdir),g' \ $< >'$@' +# Double-colon rules are allowed; there are multiple independent definitions. distclean-local:: -$(RM) $(pkgdracut_SCRIPTS) $(pkgdracut_DATA) diff --git a/contrib/initramfs/hooks/Makefile.am b/contrib/initramfs/hooks/Makefile.am index 1735872c29b7..3d8ef627ed47 100644 --- a/contrib/initramfs/hooks/Makefile.am +++ b/contrib/initramfs/hooks/Makefile.am @@ -14,8 +14,10 @@ $(hooks_SCRIPTS):%:%.in -e 's,@mounthelperdir\@,$(mounthelperdir),g' \ $< >'$@' +# Double-colon rules are allowed; there are multiple independent definitions. clean-local:: -$(RM) $(hooks_SCRIPTS) +# Double-colon rules are allowed; there are multiple independent definitions. distclean-local:: -$(RM) $(hooks_SCRIPTS) diff --git a/contrib/initramfs/scripts/Makefile.am b/contrib/initramfs/scripts/Makefile.am index 12c2641b80cc..3ab18ba2cbce 100644 --- a/contrib/initramfs/scripts/Makefile.am +++ b/contrib/initramfs/scripts/Makefile.am @@ -13,8 +13,10 @@ $(scripts_DATA):%:%.in -e 's,@sysconfdir\@,$(sysconfdir),g' \ $< >'$@' +# Double-colon rules are allowed; there are multiple independent definitions. clean-local:: -$(RM) $(scripts_SCRIPTS) +# Double-colon rules are allowed; there are multiple independent definitions. distclean-local:: -$(RM) $(scripts_SCRIPTS) diff --git a/etc/init.d/Makefile.am b/etc/init.d/Makefile.am index 93432386a2c4..8b1a7cf9629b 100644 --- a/etc/init.d/Makefile.am +++ b/etc/init.d/Makefile.am @@ -40,5 +40,6 @@ $(init_SCRIPTS) $(initconf_SCRIPTS) $(initcommon_SCRIPTS):%:%.in [ '$@' = 'zfs-functions' -o '$@' = 'zfs' ] || \ chmod +x '$@') +# Double-colon rules are allowed; there are multiple independent definitions. distclean-local:: -$(RM) $(init_SCRIPTS) $(initcommon_SCRIPTS) $(initconf_SCRIPTS) diff --git a/etc/modules-load.d/Makefile.am b/etc/modules-load.d/Makefile.am index 58c7acd44e7c..47762b7d0657 100644 --- a/etc/modules-load.d/Makefile.am +++ b/etc/modules-load.d/Makefile.am @@ -9,5 +9,6 @@ $(modulesload_DATA):%:%.in -e '' \ $< >'$@' +# Double-colon rules are allowed; there are multiple independent definitions. distclean-local:: -$(RM) $(modulesload_DATA) diff --git a/etc/systemd/system-generators/Makefile.am b/etc/systemd/system-generators/Makefile.am index c730982a5152..b4df01322211 100644 --- a/etc/systemd/system-generators/Makefile.am +++ b/etc/systemd/system-generators/Makefile.am @@ -11,5 +11,6 @@ $(systemdgenerator_SCRIPTS): %: %.in -e 's,@sysconfdir\@,$(sysconfdir),g' \ $< >'$@' +# Double-colon rules are allowed; there are multiple independent definitions. distclean-local:: -$(RM) $(systemdgenerator_SCRIPTS) diff --git a/etc/systemd/system/Makefile.am b/etc/systemd/system/Makefile.am index 130c6c757a59..4e14467a044f 100644 --- a/etc/systemd/system/Makefile.am +++ b/etc/systemd/system/Makefile.am @@ -35,5 +35,6 @@ install-data-hook: $(MKDIR_P) "$(DESTDIR)$(systemdunitdir)" ln -sf /dev/null "$(DESTDIR)$(systemdunitdir)/zfs-import.service" +# Double-colon rules are allowed; there are multiple independent definitions. distclean-local:: -$(RM) $(systemdunit_DATA) $(systemdpreset_DATA) diff --git a/tests/zfs-tests/include/Makefile.am b/tests/zfs-tests/include/Makefile.am index 41e105287b48..86c387c677d7 100644 --- a/tests/zfs-tests/include/Makefile.am +++ b/tests/zfs-tests/include/Makefile.am @@ -16,5 +16,6 @@ $(nodist_pkgdata_DATA): %: %.in -e 's,@sysconfdir\@,$(sysconfdir),g' \ $< >'$@' +# Double-colon rules are allowed; there are multiple independent definitions. distclean-local:: -$(RM) default.cfg diff --git a/tests/zfs-tests/tests/functional/pyzfs/Makefile.am b/tests/zfs-tests/tests/functional/pyzfs/Makefile.am index 0a27adeccaf4..c4cd10894c3c 100644 --- a/tests/zfs-tests/tests/functional/pyzfs/Makefile.am +++ b/tests/zfs-tests/tests/functional/pyzfs/Makefile.am @@ -14,5 +14,6 @@ $(pkgpyzfs_SCRIPTS):%:%.in $< >'$@' -chmod 775 $@ +# Double-colon rules are allowed; there are multiple independent definitions. distclean-local:: -$(RM) $(pkgpyzfs_SCRIPTS) diff --git a/udev/rules.d/Makefile.am b/udev/rules.d/Makefile.am index f79ea4b3c3e4..86c33fc697cc 100644 --- a/udev/rules.d/Makefile.am +++ b/udev/rules.d/Makefile.am @@ -16,5 +16,6 @@ $(udevrule_DATA):%:%.in -e 's,@sysconfdir\@,$(sysconfdir),g' \ $< > '$@' +# Double-colon rules are allowed; there are multiple independent definitions. distclean-local:: -$(RM) $(udevrule_DATA) From e7a2fa70c3b0d8c8cee2b484038bb5623c7c1ea9 Mon Sep 17 00:00:00 2001 From: Tom Caputi Date: Tue, 27 Aug 2019 12:55:51 -0400 Subject: [PATCH 28/68] Fix deadlock in 'zfs rollback' Currently, the 'zfs rollback' code can end up deadlocked due to the way the kernel handles unreferenced inodes on a suspended fs. Essentially, the zfs_resume_fs() code path may cause zfs to spawn new threads as it reinstantiates the suspended fs's zil. When a new thread is spawned, the kernel may attempt to free memory for that thread by freeing some unreferenced inodes. If it happens to select inodes that are a a part of the suspended fs a deadlock will occur because freeing inodes requires holding the fs's z_teardown_inactive_lock which is still held from the suspend. This patch corrects this issue by adding an additional reference to all inodes that are still present when a suspend is initiated. This prevents them from being freed by the kernel for any reason. Reviewed-by: Alek Pinchuk Reviewed-by: Brian Behlendorf Signed-off-by: Tom Caputi Closes #9203 --- include/sys/zfs_znode.h | 1 + module/zfs/zfs_vfsops.c | 16 +++++++++++++++- module/zfs/zfs_znode.c | 1 + 3 files changed, 17 insertions(+), 1 deletion(-) diff --git a/include/sys/zfs_znode.h b/include/sys/zfs_znode.h index a0a3dd1ad1f2..acaaf28845e6 100644 --- a/include/sys/zfs_znode.h +++ b/include/sys/zfs_znode.h @@ -200,6 +200,7 @@ typedef struct znode { boolean_t z_is_mapped; /* are we mmap'ed */ boolean_t z_is_ctldir; /* are we .zfs entry */ boolean_t z_is_stale; /* are we stale due to rollback? */ + boolean_t z_suspended; /* extra ref from a suspend? */ uint_t z_blksz; /* block size in bytes */ uint_t z_seq; /* modification sequence number */ uint64_t z_mapcnt; /* number of pages mapped to file */ diff --git a/module/zfs/zfs_vfsops.c b/module/zfs/zfs_vfsops.c index af82c7bc4800..34f4842d7162 100644 --- a/module/zfs/zfs_vfsops.c +++ b/module/zfs/zfs_vfsops.c @@ -1737,7 +1737,12 @@ zfsvfs_teardown(zfsvfs_t *zfsvfs, boolean_t unmounting) * will fail with EIO since we have z_teardown_lock for writer (only * relevant for forced unmount). * - * Release all holds on dbufs. + * Release all holds on dbufs. We also grab an extra reference to all + * the remaining inodes so that the kernel does not attempt to free + * any inodes of a suspended fs. This can cause deadlocks since the + * zfs_resume_fs() process may involve starting threads, which might + * attempt to free unreferenced inodes to free up memory for the new + * thread. */ if (!unmounting) { mutex_enter(&zfsvfs->z_znodes_lock); @@ -1745,6 +1750,9 @@ zfsvfs_teardown(zfsvfs_t *zfsvfs, boolean_t unmounting) zp = list_next(&zfsvfs->z_all_znodes, zp)) { if (zp->z_sa_hdl) zfs_znode_dmu_fini(zp); + if (igrab(ZTOI(zp)) != NULL) + zp->z_suspended = B_TRUE; + } mutex_exit(&zfsvfs->z_znodes_lock); } @@ -2202,6 +2210,12 @@ zfs_resume_fs(zfsvfs_t *zfsvfs, dsl_dataset_t *ds) remove_inode_hash(ZTOI(zp)); zp->z_is_stale = B_TRUE; } + + /* see comment in zfs_suspend_fs() */ + if (zp->z_suspended) { + zfs_iput_async(ZTOI(zp)); + zp->z_suspended = B_FALSE; + } } mutex_exit(&zfsvfs->z_znodes_lock); diff --git a/module/zfs/zfs_znode.c b/module/zfs/zfs_znode.c index 498547758b1b..8512db9bcb2d 100644 --- a/module/zfs/zfs_znode.c +++ b/module/zfs/zfs_znode.c @@ -545,6 +545,7 @@ zfs_znode_alloc(zfsvfs_t *zfsvfs, dmu_buf_t *db, int blksz, zp->z_is_mapped = B_FALSE; zp->z_is_ctldir = B_FALSE; zp->z_is_stale = B_FALSE; + zp->z_suspended = B_FALSE; zp->z_sa_hdl = NULL; zp->z_mapcnt = 0; zp->z_id = db->db_object; From f335b8ffe178276c7a98bdc87965a3dc9e2b59f5 Mon Sep 17 00:00:00 2001 From: Richard Allen <33836503+belperite@users.noreply.github.com> Date: Tue, 27 Aug 2019 21:44:02 +0100 Subject: [PATCH 29/68] Fix Plymouth passphrase prompt in initramfs script Entering the ZFS encryption passphrase under Plymouth wasn't working because in the ZFS initrd script, Plymouth was calling zfs via "--command", which wasn't passing through the filesystem argument to zfs load-key properly (it was passing through the single quotes around the filesystem name intended to handle spaces literally, which zfs load-key couldn't understand). Reviewed-by: Richard Laager Reviewed-by: Garrett Fields Signed-off-by: Richard Allen Issue #9193 Closes #9202 --- contrib/initramfs/scripts/zfs.in | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/contrib/initramfs/scripts/zfs.in b/contrib/initramfs/scripts/zfs.in index 9d11e1926afd..9e90d76bb114 100644 --- a/contrib/initramfs/scripts/zfs.in +++ b/contrib/initramfs/scripts/zfs.in @@ -411,29 +411,29 @@ decrypt_fs() # Determine dataset that holds key for root dataset ENCRYPTIONROOT=$(${ZFS} get -H -o value encryptionroot "${fs}") - DECRYPT_CMD="${ZFS} load-key '${ENCRYPTIONROOT}'" # If root dataset is encrypted... if ! [ "${ENCRYPTIONROOT}" = "-" ]; then - + TRY_COUNT=3 # Prompt with plymouth, if active if [ -e /bin/plymouth ] && /bin/plymouth --ping 2>/dev/null; then - plymouth ask-for-password --prompt "Encrypted ZFS password for ${ENCRYPTIONROOT}" \ - --number-of-tries="3" \ - --command="${DECRYPT_CMD}" + while [ $TRY_COUNT -gt 0 ]; do + plymouth ask-for-password --prompt "Encrypted ZFS password for ${ENCRYPTIONROOT}" | \ + $ZFS load-key "${ENCRYPTIONROOT}" && break + TRY_COUNT=$((TRY_COUNT - 1)) + done # Prompt with systemd, if active elif [ -e /run/systemd/system ]; then - TRY_COUNT=3 while [ $TRY_COUNT -gt 0 ]; do systemd-ask-password "Encrypted ZFS password for ${ENCRYPTIONROOT}" --no-tty | \ - ${DECRYPT_CMD} && break + $ZFS load-key "${ENCRYPTIONROOT}" && break TRY_COUNT=$((TRY_COUNT - 1)) done # Prompt with ZFS tty, otherwise else - eval "${DECRYPT_CMD}" + $ZFS load-key "${ENCRYPTIONROOT}" fi fi fi From e6203d288a787f7196bdc206edfc9148f9a58780 Mon Sep 17 00:00:00 2001 From: Andriy Gapon Date: Tue, 27 Aug 2019 23:45:53 +0300 Subject: [PATCH 30/68] zfs_ioc_snapshot: check user-prop permissions on snapshotted datasets Previously, the permissions were checked on the pool which was obviously incorrect. After this change, zfs_check_userprops() only validates the properties without any permission checks. The permissions are checked individually for each snapshotted dataset. Reviewed-by: Brian Behlendorf Reviewed-by: Matt Ahrens Signed-off-by: Andriy Gapon Closes #9179 Closes #9180 --- module/zfs/zfs_ioctl.c | 26 ++++++++++++++++---------- 1 file changed, 16 insertions(+), 10 deletions(-) diff --git a/module/zfs/zfs_ioctl.c b/module/zfs/zfs_ioctl.c index 9f4991162ff4..399b15cbdef4 100644 --- a/module/zfs/zfs_ioctl.c +++ b/module/zfs/zfs_ioctl.c @@ -2739,10 +2739,9 @@ zfs_set_prop_nvlist(const char *dsname, zprop_source_t source, nvlist_t *nvl, * Check that all the properties are valid user properties. */ static int -zfs_check_userprops(const char *fsname, nvlist_t *nvl) +zfs_check_userprops(nvlist_t *nvl) { nvpair_t *pair = NULL; - int error = 0; while ((pair = nvlist_next_nvpair(nvl, pair)) != NULL) { const char *propname = nvpair_name(pair); @@ -2751,10 +2750,6 @@ zfs_check_userprops(const char *fsname, nvlist_t *nvl) nvpair_type(pair) != DATA_TYPE_STRING) return (SET_ERROR(EINVAL)); - if ((error = zfs_secpolicy_write_perms(fsname, - ZFS_DELEG_PERM_USERPROP, CRED()))) - return (error); - if (strlen(propname) >= ZAP_MAXNAMELEN) return (SET_ERROR(ENAMETOOLONG)); @@ -3465,19 +3460,18 @@ zfs_ioc_snapshot(const char *poolname, nvlist_t *innvl, nvlist_t *outnvl) nvpair_t *pair; (void) nvlist_lookup_nvlist(innvl, "props", &props); - if ((error = zfs_check_userprops(poolname, props)) != 0) - return (error); - if (!nvlist_empty(props) && zfs_earlier_version(poolname, SPA_VERSION_SNAP_PROPS)) return (SET_ERROR(ENOTSUP)); + if ((error = zfs_check_userprops(props)) != 0) + return (error); snaps = fnvlist_lookup_nvlist(innvl, "snaps"); poollen = strlen(poolname); for (pair = nvlist_next_nvpair(snaps, NULL); pair != NULL; pair = nvlist_next_nvpair(snaps, pair)) { const char *name = nvpair_name(pair); - const char *cp = strchr(name, '@'); + char *cp = strchr(name, '@'); /* * The snap name must contain an @, and the part after it must @@ -3494,6 +3488,18 @@ zfs_ioc_snapshot(const char *poolname, nvlist_t *innvl, nvlist_t *outnvl) (name[poollen] != '/' && name[poollen] != '@')) return (SET_ERROR(EXDEV)); + /* + * Check for permission to set the properties on the fs. + */ + if (!nvlist_empty(props)) { + *cp = '\0'; + error = zfs_secpolicy_write_perms(name, + ZFS_DELEG_PERM_USERPROP, CRED()); + *cp = '@'; + if (error != 0) + return (error); + } + /* This must be the only snap of this fs. */ for (nvpair_t *pair2 = nvlist_next_nvpair(snaps, pair); pair2 != NULL; pair2 = nvlist_next_nvpair(snaps, pair2)) { From 9c9dcd6e04ae7a868efafe4447bdbe67ae25a6da Mon Sep 17 00:00:00 2001 From: Ryan Moeller Date: Wed, 28 Aug 2019 13:38:40 -0400 Subject: [PATCH 31/68] Prefer `for (;;)` to `while (TRUE)` Defining a special constant to make an infinite loop is excessive, especially when the name clashes with symbols commonly defined on some platforms (ie FreeBSD). Reviewed-by: Brian Behlendorf Reviewed-by: George Melikov Reviewed-by: John Kennedy Closes #9219 --- .../cmd/rm_lnkcnt_zero_file/rm_lnkcnt_zero_file.c | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/tests/zfs-tests/cmd/rm_lnkcnt_zero_file/rm_lnkcnt_zero_file.c b/tests/zfs-tests/cmd/rm_lnkcnt_zero_file/rm_lnkcnt_zero_file.c index 7986851efae2..e262ecefea92 100644 --- a/tests/zfs-tests/cmd/rm_lnkcnt_zero_file/rm_lnkcnt_zero_file.c +++ b/tests/zfs-tests/cmd/rm_lnkcnt_zero_file/rm_lnkcnt_zero_file.c @@ -47,7 +47,6 @@ #include #include -static const int TRUE = 1; static char *filebase; static int @@ -65,7 +64,7 @@ mover(void *a) len = strlen(filebase) + 5; - while (TRUE) { + for (;;) { idx = pickidx(); (void) snprintf(buf, len, "%s.%03d", filebase, idx); ret = rename(filebase, buf); @@ -85,7 +84,7 @@ cleaner(void *a) len = strlen(filebase) + 5; - while (TRUE) { + for (;;) { idx = pickidx(); (void) snprintf(buf, len, "%s.%03d", filebase, idx); ret = remove(buf); @@ -102,7 +101,7 @@ writer(void *a) int *fd = (int *)a; int ret; - while (TRUE) { + for (;;) { if (*fd != -1) (void) close (*fd); @@ -143,7 +142,7 @@ main(int argc, char **argv) (void) pthread_create(&tid, NULL, cleaner, NULL); (void) pthread_create(&tid, NULL, writer, (void *) &fd); - while (TRUE) { + for (;;) { int ret; struct stat st; From 035e96118bc9a7cbf435dd17dda507b870fcf6e6 Mon Sep 17 00:00:00 2001 From: Chunwei Chen Date: Wed, 28 Aug 2019 10:42:02 -0700 Subject: [PATCH 32/68] Fix zil replay panic when TX_REMOVE followed by TX_CREATE If TX_REMOVE is followed by TX_CREATE on the same object id, we need to make sure the object removal is completely finished before creation. The current implementation relies on dnode_hold_impl with DNODE_MUST_BE_ALLOCATED returning ENOENT. While this check seems to work fine before, in current version it does not guarantee the object removal is completed. We fix this by checking if DNODE_MUST_BE_FREE returns successful instead. Also add test and remove dead code in dnode_hold_impl. Reviewed-by: Brian Behlendorf Reviewed-by: Tom Caputi Signed-off-by: Chunwei Chen Closes #7151 Closes #8910 Closes #9123 Closes #9145 --- include/sys/dnode.h | 7 +- module/zfs/dnode.c | 49 +++++-- module/zfs/zfs_replay.c | 8 +- tests/runfiles/linux.run | 4 +- .../tests/functional/slog/Makefile.am | 3 +- ...g_replay_fs.ksh => slog_replay_fs_001.ksh} | 0 .../functional/slog/slog_replay_fs_002.ksh | 137 ++++++++++++++++++ 7 files changed, 184 insertions(+), 24 deletions(-) rename tests/zfs-tests/tests/functional/slog/{slog_replay_fs.ksh => slog_replay_fs_001.ksh} (100%) create mode 100755 tests/zfs-tests/tests/functional/slog/slog_replay_fs_002.ksh diff --git a/include/sys/dnode.h b/include/sys/dnode.h index f18b5dd5cdbc..5e9d070e2eb9 100644 --- a/include/sys/dnode.h +++ b/include/sys/dnode.h @@ -46,6 +46,7 @@ extern "C" { */ #define DNODE_MUST_BE_ALLOCATED 1 #define DNODE_MUST_BE_FREE 2 +#define DNODE_DRY_RUN 4 /* * dnode_next_offset() flags. @@ -415,6 +416,7 @@ int dnode_hold_impl(struct objset *dd, uint64_t object, int flag, int dn_slots, boolean_t dnode_add_ref(dnode_t *dn, void *ref); void dnode_rele(dnode_t *dn, void *ref); void dnode_rele_and_unlock(dnode_t *dn, void *tag, boolean_t evicting); +int dnode_try_claim(objset_t *os, uint64_t object, int slots); void dnode_setdirty(dnode_t *dn, dmu_tx_t *tx); void dnode_sync(dnode_t *dn, dmu_tx_t *tx); void dnode_allocate(dnode_t *dn, dmu_object_type_t ot, int blocksize, int ibs, @@ -531,11 +533,6 @@ typedef struct dnode_stats { * a range of dnode slots which would overflow the dnode_phys_t. */ kstat_named_t dnode_hold_free_overflow; - /* - * Number of times a dnode_hold(...) was attempted on a dnode - * which had already been unlinked in an earlier txg. - */ - kstat_named_t dnode_hold_free_txg; /* * Number of times dnode_free_interior_slots() needed to retry * acquiring a slot zrl lock due to contention. diff --git a/module/zfs/dnode.c b/module/zfs/dnode.c index ef62d394a919..108bf171420c 100644 --- a/module/zfs/dnode.c +++ b/module/zfs/dnode.c @@ -55,7 +55,6 @@ dnode_stats_t dnode_stats = { { "dnode_hold_free_lock_retry", KSTAT_DATA_UINT64 }, { "dnode_hold_free_overflow", KSTAT_DATA_UINT64 }, { "dnode_hold_free_refcount", KSTAT_DATA_UINT64 }, - { "dnode_hold_free_txg", KSTAT_DATA_UINT64 }, { "dnode_free_interior_lock_retry", KSTAT_DATA_UINT64 }, { "dnode_allocate", KSTAT_DATA_UINT64 }, { "dnode_reallocate", KSTAT_DATA_UINT64 }, @@ -1263,6 +1262,10 @@ dnode_buf_evict_async(void *dbu) * as an extra dnode slot by an large dnode, in which case it returns * ENOENT. * + * If the DNODE_DRY_RUN flag is set, we don't actually hold the dnode, just + * return whether the hold would succeed or not. tag and dnp should set to + * NULL in this case. + * * errors: * EINVAL - Invalid object number or flags. * ENOSPC - Hole too small to fulfill "slots" request (DNODE_MUST_BE_FREE) @@ -1291,6 +1294,7 @@ dnode_hold_impl(objset_t *os, uint64_t object, int flag, int slots, ASSERT(!(flag & DNODE_MUST_BE_ALLOCATED) || (slots == 0)); ASSERT(!(flag & DNODE_MUST_BE_FREE) || (slots > 0)); + IMPLY(flag & DNODE_DRY_RUN, (tag == NULL) && (dnp == NULL)); /* * If you are holding the spa config lock as writer, you shouldn't @@ -1320,8 +1324,11 @@ dnode_hold_impl(objset_t *os, uint64_t object, int flag, int slots, if ((flag & DNODE_MUST_BE_FREE) && type != DMU_OT_NONE) return (SET_ERROR(EEXIST)); DNODE_VERIFY(dn); - (void) zfs_refcount_add(&dn->dn_holds, tag); - *dnp = dn; + /* Don't actually hold if dry run, just return 0 */ + if (!(flag & DNODE_DRY_RUN)) { + (void) zfs_refcount_add(&dn->dn_holds, tag); + *dnp = dn; + } return (0); } @@ -1462,6 +1469,14 @@ dnode_hold_impl(objset_t *os, uint64_t object, int flag, int slots, return (SET_ERROR(ENOENT)); } + /* Don't actually hold if dry run, just return 0 */ + if (flag & DNODE_DRY_RUN) { + mutex_exit(&dn->dn_mtx); + dnode_slots_rele(dnc, idx, slots); + dbuf_rele(db, FTAG); + return (0); + } + DNODE_STAT_BUMP(dnode_hold_alloc_hits); } else if (flag & DNODE_MUST_BE_FREE) { @@ -1519,6 +1534,14 @@ dnode_hold_impl(objset_t *os, uint64_t object, int flag, int slots, return (SET_ERROR(EEXIST)); } + /* Don't actually hold if dry run, just return 0 */ + if (flag & DNODE_DRY_RUN) { + mutex_exit(&dn->dn_mtx); + dnode_slots_rele(dnc, idx, slots); + dbuf_rele(db, FTAG); + return (0); + } + dnode_set_slots(dnc, idx + 1, slots - 1, DN_SLOT_INTERIOR); DNODE_STAT_BUMP(dnode_hold_free_hits); } else { @@ -1526,15 +1549,7 @@ dnode_hold_impl(objset_t *os, uint64_t object, int flag, int slots, return (SET_ERROR(EINVAL)); } - if (dn->dn_free_txg) { - DNODE_STAT_BUMP(dnode_hold_free_txg); - type = dn->dn_type; - mutex_exit(&dn->dn_mtx); - dnode_slots_rele(dnc, idx, slots); - dbuf_rele(db, FTAG); - return (SET_ERROR((flag & DNODE_MUST_BE_ALLOCATED) ? - ENOENT : EEXIST)); - } + ASSERT0(dn->dn_free_txg); if (zfs_refcount_add(&dn->dn_holds, tag) == 1) dbuf_add_ref(db, dnh); @@ -1625,6 +1640,16 @@ dnode_rele_and_unlock(dnode_t *dn, void *tag, boolean_t evicting) } } +/* + * Test whether we can create a dnode at the specified location. + */ +int +dnode_try_claim(objset_t *os, uint64_t object, int slots) +{ + return (dnode_hold_impl(os, object, DNODE_MUST_BE_FREE | DNODE_DRY_RUN, + slots, NULL, NULL)); +} + void dnode_setdirty(dnode_t *dn, dmu_tx_t *tx) { diff --git a/module/zfs/zfs_replay.c b/module/zfs/zfs_replay.c index 144381769059..7dea85bb6614 100644 --- a/module/zfs/zfs_replay.c +++ b/module/zfs/zfs_replay.c @@ -337,8 +337,8 @@ zfs_replay_create_acl(void *arg1, void *arg2, boolean_t byteswap) xva.xva_vattr.va_nblocks = lr->lr_gen; xva.xva_vattr.va_fsid = dnodesize; - error = dmu_object_info(zfsvfs->z_os, lr->lr_foid, NULL); - if (error != ENOENT) + error = dnode_try_claim(zfsvfs->z_os, objid, dnodesize >> DNODE_SHIFT); + if (error) goto bail; if (lr->lr_common.lrc_txtype & TX_CI) @@ -473,8 +473,8 @@ zfs_replay_create(void *arg1, void *arg2, boolean_t byteswap) xva.xva_vattr.va_nblocks = lr->lr_gen; xva.xva_vattr.va_fsid = dnodesize; - error = dmu_object_info(zfsvfs->z_os, objid, NULL); - if (error != ENOENT) + error = dnode_try_claim(zfsvfs->z_os, objid, dnodesize >> DNODE_SHIFT); + if (error) goto out; if (lr->lr_common.lrc_txtype & TX_CI) diff --git a/tests/runfiles/linux.run b/tests/runfiles/linux.run index 80b48ee1d48e..d05ff61202ab 100644 --- a/tests/runfiles/linux.run +++ b/tests/runfiles/linux.run @@ -835,8 +835,8 @@ tags = ['functional', 'scrub_mirror'] tests = ['slog_001_pos', 'slog_002_pos', 'slog_003_pos', 'slog_004_pos', 'slog_005_pos', 'slog_006_pos', 'slog_007_pos', 'slog_008_neg', 'slog_009_neg', 'slog_010_neg', 'slog_011_neg', 'slog_012_neg', - 'slog_013_pos', 'slog_014_pos', 'slog_015_neg', 'slog_replay_fs', - 'slog_replay_volume'] + 'slog_013_pos', 'slog_014_pos', 'slog_015_neg', 'slog_replay_fs_001', + 'slog_replay_fs_002', 'slog_replay_volume'] tags = ['functional', 'slog'] [tests/functional/snapshot] diff --git a/tests/zfs-tests/tests/functional/slog/Makefile.am b/tests/zfs-tests/tests/functional/slog/Makefile.am index 4548ce63b40c..33e3a6d3a496 100644 --- a/tests/zfs-tests/tests/functional/slog/Makefile.am +++ b/tests/zfs-tests/tests/functional/slog/Makefile.am @@ -17,7 +17,8 @@ dist_pkgdata_SCRIPTS = \ slog_013_pos.ksh \ slog_014_pos.ksh \ slog_015_neg.ksh \ - slog_replay_fs.ksh \ + slog_replay_fs_001.ksh \ + slog_replay_fs_002.ksh \ slog_replay_volume.ksh dist_pkgdata_DATA = \ diff --git a/tests/zfs-tests/tests/functional/slog/slog_replay_fs.ksh b/tests/zfs-tests/tests/functional/slog/slog_replay_fs_001.ksh similarity index 100% rename from tests/zfs-tests/tests/functional/slog/slog_replay_fs.ksh rename to tests/zfs-tests/tests/functional/slog/slog_replay_fs_001.ksh diff --git a/tests/zfs-tests/tests/functional/slog/slog_replay_fs_002.ksh b/tests/zfs-tests/tests/functional/slog/slog_replay_fs_002.ksh new file mode 100755 index 000000000000..3c3ccdf4ad23 --- /dev/null +++ b/tests/zfs-tests/tests/functional/slog/slog_replay_fs_002.ksh @@ -0,0 +1,137 @@ +#!/bin/ksh -p +# +# CDDL HEADER START +# +# The contents of this file are subject to the terms of the +# Common Development and Distribution License (the "License"). +# You may not use this file except in compliance with the License. +# +# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE +# or http://www.opensolaris.org/os/licensing. +# See the License for the specific language governing permissions +# and limitations under the License. +# +# When distributing Covered Code, include this CDDL HEADER in each +# file and include the License file at usr/src/OPENSOLARIS.LICENSE. +# If applicable, add the following below this CDDL HEADER, with the +# fields enclosed by brackets "[]" replaced with your own identifying +# information: Portions Copyright [yyyy] [name of copyright owner] +# +# CDDL HEADER END +# + +# +# Copyright 2007 Sun Microsystems, Inc. All rights reserved. +# Use is subject to license terms. +# + +. $STF_SUITE/tests/functional/slog/slog.kshlib + +# +# DESCRIPTION: +# Verify slog replay correctly when TX_REMOVEs are followed by +# TX_CREATEs. +# +# STRATEGY: +# 1. Create a file system (TESTFS) with a lot of files +# 2. Freeze TESTFS +# 3. Remove all files then create a lot of files +# 4. Copy TESTFS to temporary location (TESTDIR/copy) +# 5. Unmount filesystem +# +# 6. Remount TESTFS +# 7. Compare TESTFS against the TESTDIR/copy +# + +verify_runnable "global" + +function cleanup_fs +{ + cleanup +} + +log_assert "Replay of intent log succeeds." +log_onexit cleanup_fs +log_must setup + +# +# 1. Create a file system (TESTFS) with a lot of files +# +log_must zpool create $TESTPOOL $VDEV log mirror $LDEV +log_must zfs set compression=on $TESTPOOL +log_must zfs create $TESTPOOL/$TESTFS + +# Prep for the test of TX_REMOVE followed by TX_CREATE +dnsize=(legacy auto 1k 2k 4k 8k 16k) +NFILES=200 +log_must mkdir /$TESTPOOL/$TESTFS/dir0 +log_must eval 'for i in $(seq $NFILES); do zfs set dnodesize=${dnsize[$RANDOM % ${#dnsize[@]}]} $TESTPOOL/$TESTFS; touch /$TESTPOOL/$TESTFS/dir0/file.$i; done' + +# +# Reimport to reset dnode allocation pointer. +# This is to make sure we will have TX_REMOVE and TX_CREATE on same id +# +log_must zpool export $TESTPOOL +log_must zpool import -f -d $VDIR $TESTPOOL + +# +# This dd command works around an issue where ZIL records aren't created +# after freezing the pool unless a ZIL header already exists. Create a file +# synchronously to force ZFS to write one out. +# +log_must dd if=/dev/zero of=/$TESTPOOL/$TESTFS/sync \ + conv=fdatasync,fsync bs=1 count=1 + +# +# 2. Freeze TESTFS +# +log_must zpool freeze $TESTPOOL + +# +# 3. Remove all files then create a lot of files +# +# TX_REMOVE followed by TX_CREATE +log_must eval 'rm -f /$TESTPOOL/$TESTFS/dir0/*' +log_must eval 'for i in $(seq $NFILES); do zfs set dnodesize=${dnsize[$RANDOM % ${#dnsize[@]}]} $TESTPOOL/$TESTFS; touch /$TESTPOOL/$TESTFS/dir0/file.$i; done' + +# +# 4. Copy TESTFS to temporary location (TESTDIR/copy) +# +log_must mkdir -p $TESTDIR/copy +log_must cp -a /$TESTPOOL/$TESTFS/* $TESTDIR/copy/ + +# +# 5. Unmount filesystem and export the pool +# +# At this stage TESTFS is empty again and frozen, the intent log contains +# a complete set of deltas to replay. +# +log_must zfs unmount /$TESTPOOL/$TESTFS + +log_note "Verify transactions to replay:" +log_must zdb -iv $TESTPOOL/$TESTFS + +log_must zpool export $TESTPOOL + +# +# 6. Remount TESTFS +# +# Import the pool to unfreeze it and claim log blocks. It has to be +# `zpool import -f` because we can't write a frozen pool's labels! +# +log_must zpool import -f -d $VDIR $TESTPOOL + +# +# 7. Compare TESTFS against the TESTDIR/copy +# +log_note "Verify current block usage:" +log_must zdb -bcv $TESTPOOL + +log_note "Verify number of files" +log_must test "$(ls /$TESTPOOL/$TESTFS/dir0 | wc -l)" -eq $NFILES + +log_note "Verify working set diff:" +log_must diff -r /$TESTPOOL/$TESTFS $TESTDIR/copy + +log_pass "Replay of intent log succeeds." From 28c91ab66d4607795f1e7ae3a2a82f3690ea6c4f Mon Sep 17 00:00:00 2001 From: Don Brady Date: Wed, 28 Aug 2019 11:44:46 -0600 Subject: [PATCH 33/68] Tag ABD pages for exclusion in kernel crash dumps Tag the ABD data pages so that they can be identified for exclusion from kernel crash dumps. Eliminating the zfs file data allows for significantly smaller crash dump files. Note that ZFS in illumos has always excluded the zfs data pages from a kernel crash dump. This change tags ARC scatter data pages so they can be identified from the makedumpfile(8) command. That command is used to create smaller dump files by ignoring some memory regions and using compression. It already filters file data from the VFS page cache and will now be able to exclude ZFS file data pages from the dump file. A corresponding change to makeumpfile(8) is required to identify ZFS data pages. Reviewed-by: Brian Behlendorf Reviewed-by: Paul Dagnelie Signed-off-by: Don Brady Closes #8899 --- module/zfs/abd.c | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) diff --git a/module/zfs/abd.c b/module/zfs/abd.c index 8b2514404a86..ac6b0b742733 100644 --- a/module/zfs/abd.c +++ b/module/zfs/abd.c @@ -245,6 +245,32 @@ abd_chunkcnt_for_bytes(size_t size) } #ifdef _KERNEL +/* + * Mark zfs data pages so they can be excluded from kernel crash dumps + */ +#ifdef _LP64 +#define ABD_FILE_CACHE_PAGE 0x2F5ABDF11ECAC4E + +static inline void +abd_mark_zfs_page(struct page *page) +{ + get_page(page); + SetPagePrivate(page); + set_page_private(page, ABD_FILE_CACHE_PAGE); +} + +static inline void +abd_unmark_zfs_page(struct page *page) +{ + set_page_private(page, 0UL); + ClearPagePrivate(page); + put_page(page); +} +#else +#define abd_mark_zfs_page(page) +#define abd_unmark_zfs_page(page) +#endif /* _LP64 */ + #ifndef CONFIG_HIGHMEM #ifndef __GFP_RECLAIM @@ -318,6 +344,7 @@ abd_alloc_pages(abd_t *abd, size_t size) size_t sg_size = MIN(PAGESIZE << compound_order(page), remaining_size); sg_set_page(sg, page, sg_size, 0); + abd_mark_zfs_page(page); remaining_size -= sg_size; sg = sg_next(sg); @@ -404,6 +431,7 @@ abd_alloc_pages(abd_t *abd, size_t size) ABDSTAT_BUMP(abdstat_scatter_orders[0]); sg_set_page(sg, page, PAGESIZE, 0); + abd_mark_zfs_page(page); } if (nr_pages > 1) { @@ -430,6 +458,7 @@ abd_free_pages(abd_t *abd) abd_for_each_sg(abd, sg, nr_pages, i) { page = sg_page(sg); + abd_unmark_zfs_page(page); order = compound_order(page); __free_pages(page, order); ASSERT3U(sg->length, <=, PAGE_SIZE << order); From 07a328dde4937a49aa975e8dffea2e6f8754a097 Mon Sep 17 00:00:00 2001 From: Brian Behlendorf Date: Wed, 28 Aug 2019 14:52:08 -0700 Subject: [PATCH 34/68] ZTS: Temporarily disable several upgrade tests Until issues #9185 and #9186 have been resolved the following zpool upgrade tests are being disabled to prevent CI failures. zpool_upgrade_002_pos, zpool_upgrade_003_pos, zpool_upgrade_004_pos, zpool_upgrade_007_pos, zpool_upgrade_008_pos Reviewed-by: Paul Dagnelie Reviewed-by: Matthew Ahrens Signed-off-by: Brian Behlendorf Issue #9185 Issue #9186 Closes #9225 --- tests/runfiles/linux.run | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/tests/runfiles/linux.run b/tests/runfiles/linux.run index d05ff61202ab..a9ef628bc863 100644 --- a/tests/runfiles/linux.run +++ b/tests/runfiles/linux.run @@ -481,13 +481,15 @@ tests = ['zpool_trim_attach_detach_add_remove', tags = ['functional', 'zpool_trim'] [tests/functional/cli_root/zpool_upgrade] -tests = ['zpool_upgrade_001_pos', 'zpool_upgrade_002_pos', - 'zpool_upgrade_003_pos', 'zpool_upgrade_004_pos', +tests = ['zpool_upgrade_001_pos', 'zpool_upgrade_005_neg', 'zpool_upgrade_006_neg', - 'zpool_upgrade_007_pos', 'zpool_upgrade_008_pos', 'zpool_upgrade_009_neg'] tags = ['functional', 'cli_root', 'zpool_upgrade'] +# Disabled pending resolution of #9185 and #9186. +# 'zpool_upgrade_002_pos', 'zpool_upgrade_003_pos', 'zpool_upgrade_004_pos', +# 'zpool_upgrade_007_pos', 'zpool_upgrade_008_pos', + [tests/functional/cli_user/misc] tests = ['zdb_001_neg', 'zfs_001_neg', 'zfs_allow_001_neg', 'zfs_clone_001_neg', 'zfs_create_001_neg', 'zfs_destroy_001_neg', From 8d042842815f33d2e4ab919a695139b11b7ed0c2 Mon Sep 17 00:00:00 2001 From: Tony Nguyen Date: Wed, 28 Aug 2019 15:56:54 -0600 Subject: [PATCH 35/68] Use smaller default slack/delta value for schedule_hrtimeout_range() For interrupt coalescing, cv_timedwait_hires() uses a 100us slack/delta for calls to schedule_hrtimeout_range(). This 100us slack can be costly for small writes. This change improves small write performance by passing resolution `res` parameter to schedule_hrtimeout_range() to be used as delta/slack. A new tunable `spl_schedule_hrtimeout_slack_us` is added to preserve old behavior when desired. Performance observations on 8K recordsize filesystem: - 8K random writes at 1-64 threads, up to 60% improvement for one thread and smaller gains as thread count increases. At >64 threads, 2-5% decrease in performance was observed. - 8K sequential writes, similar 60% improvement for one thread and leveling out around 64 threads. At >64 threads, 5-10% decrease in performance was observed. - 128K sequential write sees 1-5 for the 128K. No observed regression at high thread count. Testing done on Ubuntu 18.04 with 4.15 kernel, 8vCPUs and SSD storage on VMware ESX. Reviewed-by: Richard Elling Reviewed-by: Brian Behlendorf Reviewed-by: Matt Ahrens Signed-off-by: Tony Nguyen Closes #9217 --- module/spl/spl-condvar.c | 54 +++++++++++++++++++++++++++------------- module/zfs/mmp.c | 2 +- 2 files changed, 38 insertions(+), 18 deletions(-) diff --git a/module/spl/spl-condvar.c b/module/spl/spl-condvar.c index 19c575f770b8..664fae1e7199 100644 --- a/module/spl/spl-condvar.c +++ b/module/spl/spl-condvar.c @@ -26,8 +26,10 @@ #include #include +#include #include #include +#include #include @@ -35,6 +37,34 @@ #include #endif +#define MAX_HRTIMEOUT_SLACK_US 1000 +unsigned int spl_schedule_hrtimeout_slack_us = 0; + +static int +param_set_hrtimeout_slack(const char *buf, zfs_kernel_param_t *kp) +{ + unsigned long val; + int error; + + error = kstrtoul(buf, 0, &val); + if (error) + return (error); + + if (val > MAX_HRTIMEOUT_SLACK_US) + return (-EINVAL); + + error = param_set_uint(buf, kp); + if (error < 0) + return (error); + + return (0); +} + +module_param_call(spl_schedule_hrtimeout_slack_us, param_set_hrtimeout_slack, + param_get_uint, &spl_schedule_hrtimeout_slack_us, 0644); +MODULE_PARM_DESC(spl_schedule_hrtimeout_slack_us, + "schedule_hrtimeout_range() delta/slack value in us, default(0)"); + void __cv_init(kcondvar_t *cvp, char *name, kcv_type_t type, void *arg) { @@ -304,12 +334,13 @@ EXPORT_SYMBOL(__cv_timedwait_sig); */ static clock_t __cv_timedwait_hires(kcondvar_t *cvp, kmutex_t *mp, hrtime_t expire_time, - int state) + hrtime_t res, int state) { DEFINE_WAIT(wait); kmutex_t *m; hrtime_t time_left; ktime_t ktime_left; + u64 slack = 0; ASSERT(cvp); ASSERT(mp); @@ -336,13 +367,11 @@ __cv_timedwait_hires(kcondvar_t *cvp, kmutex_t *mp, hrtime_t expire_time, * race where 'cvp->cv_waiters > 0' but the list is empty. */ mutex_exit(mp); - /* - * Allow a 100 us range to give kernel an opportunity to coalesce - * interrupts - */ + ktime_left = ktime_set(0, time_left); - schedule_hrtimeout_range(&ktime_left, 100 * NSEC_PER_USEC, - HRTIMER_MODE_REL); + slack = MIN(MAX(res, spl_schedule_hrtimeout_slack_us * NSEC_PER_USEC), + MAX_HRTIMEOUT_SLACK_US * NSEC_PER_USEC); + schedule_hrtimeout_range(&ktime_left, slack, HRTIMER_MODE_REL); /* No more waiters a different mutex could be used */ if (atomic_dec_and_test(&cvp->cv_waiters)) { @@ -369,19 +398,10 @@ static clock_t cv_timedwait_hires_common(kcondvar_t *cvp, kmutex_t *mp, hrtime_t tim, hrtime_t res, int flag, int state) { - if (res > 1) { - /* - * Align expiration to the specified resolution. - */ - if (flag & CALLOUT_FLAG_ROUNDUP) - tim += res - 1; - tim = (tim / res) * res; - } - if (!(flag & CALLOUT_FLAG_ABSOLUTE)) tim += gethrtime(); - return (__cv_timedwait_hires(cvp, mp, tim, state)); + return (__cv_timedwait_hires(cvp, mp, tim, res, state)); } clock_t diff --git a/module/zfs/mmp.c b/module/zfs/mmp.c index cd5603a1a5cd..1ffd862da126 100644 --- a/module/zfs/mmp.c +++ b/module/zfs/mmp.c @@ -672,7 +672,7 @@ mmp_thread(void *arg) CALLB_CPR_SAFE_BEGIN(&cpr); (void) cv_timedwait_sig_hires(&mmp->mmp_thread_cv, - &mmp->mmp_thread_lock, next_time, USEC2NSEC(1), + &mmp->mmp_thread_lock, next_time, USEC2NSEC(100), CALLOUT_FLAG_ABSOLUTE); CALLB_CPR_SAFE_END(&cpr, &mmp->mmp_thread_lock); } From e6cebbf86e769eba7c0e7b8834985682d1b38e7e Mon Sep 17 00:00:00 2001 From: Pavel Zakharov Date: Wed, 28 Aug 2019 18:02:58 -0400 Subject: [PATCH 36/68] zfs_handle used after being closed/freed in change_one callback This is a typical case of use after free. We would call zfs_close(zhp) which would free the handle, and then call zfs_iter_children() on that handle later. This change ensures that the zfs_handle is only closed when we are ready to return. Running `zfs inherit -r sharenfs pool` was failing with an error code without any error messages. After some debugging I've pinpointed the issue to be memory corruption, which would cause zfs to try to issue an ioctl to the wrong device and receive ENOTTY. Reviewed-by: Paul Dagnelie Reviewed-by: George Wilson Reviewed-by: Sebastien Roy Reviewed-by: Brian Behlendorf Reviewed-by: Alek Pinchuk Signed-off-by: Pavel Zakharov Issue #7967 Closes #9165 --- lib/libzfs/libzfs_changelist.c | 30 ++++++++++++++++++------------ 1 file changed, 18 insertions(+), 12 deletions(-) diff --git a/lib/libzfs/libzfs_changelist.c b/lib/libzfs/libzfs_changelist.c index 3101febc1605..72f641056edc 100644 --- a/lib/libzfs/libzfs_changelist.c +++ b/lib/libzfs/libzfs_changelist.c @@ -475,9 +475,10 @@ change_one(zfs_handle_t *zhp, void *data) prop_changelist_t *clp = data; char property[ZFS_MAXPROPLEN]; char where[64]; - prop_changenode_t *cn; + prop_changenode_t *cn = NULL; zprop_source_t sourcetype = ZPROP_SRC_NONE; zprop_source_t share_sourcetype = ZPROP_SRC_NONE; + int ret = 0; /* * We only want to unmount/unshare those filesystems that may inherit @@ -493,8 +494,7 @@ change_one(zfs_handle_t *zhp, void *data) zfs_prop_get(zhp, clp->cl_prop, property, sizeof (property), &sourcetype, where, sizeof (where), B_FALSE) != 0) { - zfs_close(zhp); - return (0); + goto out; } /* @@ -506,8 +506,7 @@ change_one(zfs_handle_t *zhp, void *data) zfs_prop_get(zhp, clp->cl_shareprop, property, sizeof (property), &share_sourcetype, where, sizeof (where), B_FALSE) != 0) { - zfs_close(zhp); - return (0); + goto out; } if (clp->cl_alldependents || clp->cl_allchildren || @@ -518,8 +517,8 @@ change_one(zfs_handle_t *zhp, void *data) share_sourcetype == ZPROP_SRC_INHERITED))) { if ((cn = zfs_alloc(zfs_get_handle(zhp), sizeof (prop_changenode_t))) == NULL) { - zfs_close(zhp); - return (-1); + ret = -1; + goto out; } cn->cn_handle = zhp; @@ -541,16 +540,23 @@ change_one(zfs_handle_t *zhp, void *data) uu_avl_insert(clp->cl_tree, cn, idx); } else { free(cn); - zfs_close(zhp); + cn = NULL; } if (!clp->cl_alldependents) - return (zfs_iter_children(zhp, change_one, data)); - } else { - zfs_close(zhp); + ret = zfs_iter_children(zhp, change_one, data); + + /* + * If we added the handle to the changelist, we will re-use it + * later so return without closing it. + */ + if (cn != NULL) + return (ret); } - return (0); +out: + zfs_close(zhp); + return (ret); } static int From eef0f4d84ec8e33b25792485f1f915efeb95af77 Mon Sep 17 00:00:00 2001 From: Paul Dagnelie Date: Thu, 29 Aug 2019 10:20:36 -0700 Subject: [PATCH 37/68] Keep more metaslabs loaded With the other metaslab changes loaded onto a system, we can significantly reduce the memory usage of each loaded metaslab and unload them on demand if there is memory pressure. However, none of those changes actually result in us keeping more metaslabs loaded. If we don't keep more metaslabs loaded, we will still have to wait for demand-loading to finish when no loaded metaslab can satisfy our allocation, which can cause ZIL performance issues. In addition, performance is traditionally measured by IOs per unit time, while unloading is currently done on a txg-count basis. Txgs can take a widely varying range of times, from tenths of a second to several seconds. This can result in confusing, hard to predict behavior. This change simply adds a time-based component to metaslab unloading. A metaslab will remain loaded for one minute and 8 txgs (by default) after it was last used, unless it is evicted due to memory pressure. Reviewed-by: Brian Behlendorf Reviewed-by: Matt Ahrens Signed-off-by: Paul Dagnelie External-issue: DLPX-65016 External-issue: DLPX-65047 Closes #9197 --- include/sys/metaslab_impl.h | 1 + man/man5/zfs-module-parameters.5 | 30 +++++++++++++- module/zfs/metaslab.c | 69 ++++++++++++++++++-------------- 3 files changed, 69 insertions(+), 31 deletions(-) diff --git a/include/sys/metaslab_impl.h b/include/sys/metaslab_impl.h index 07f07c02d1a8..3ce39183eca4 100644 --- a/include/sys/metaslab_impl.h +++ b/include/sys/metaslab_impl.h @@ -489,6 +489,7 @@ struct metaslab { */ hrtime_t ms_load_time; /* time last loaded */ hrtime_t ms_unload_time; /* time last unloaded */ + hrtime_t ms_selected_time; /* time last allocated from */ uint64_t ms_alloc_txg; /* last successful alloc (debug only) */ uint64_t ms_max_size; /* maximum allocatable size */ diff --git a/man/man5/zfs-module-parameters.5 b/man/man5/zfs-module-parameters.5 index 8a1048bee43e..f9ae2e7813ba 100644 --- a/man/man5/zfs-module-parameters.5 +++ b/man/man5/zfs-module-parameters.5 @@ -398,7 +398,7 @@ the least recently used metaslab to prevent the system from clogging all of its memory with range trees. This tunable sets the percentage of total system memory that is the threshold. .sp -Default value: \fB75 percent\fR +Default value: \fB25 percent\fR .RE .sp @@ -469,6 +469,34 @@ angular velocity disk drive. Use \fB1\fR for yes (default) and \fB0\fR for no. .RE +.sp +.ne 2 +.na +\fBmetaslab_unload_delay\fR (int) +.ad +.RS 12n +After a metaslab is used, we keep it loaded for this many txgs, to attempt to +reduce unnecessary reloading. Note that both this many txgs and +\fBmetaslab_unload_delay_ms\fR milliseconds must pass before unloading will +occur. +.sp +Default value: \fB32\fR. +.RE + +.sp +.ne 2 +.na +\fBmetaslab_unload_delay_ms\fR (int) +.ad +.RS 12n +After a metaslab is used, we keep it loaded for this many milliseconds, to +attempt to reduce unnecessary reloading. Note that both this many +milliseconds and \fBmetaslab_unload_delay\fR txgs must pass before unloading +will occur. +.sp +Default value: \fB600000\fR (ten minutes). +.RE + .sp .ne 2 .na diff --git a/module/zfs/metaslab.c b/module/zfs/metaslab.c index 2f92fffa4ec0..00af4a21bd19 100644 --- a/module/zfs/metaslab.c +++ b/module/zfs/metaslab.c @@ -198,16 +198,20 @@ int metaslab_df_use_largest_segment = B_FALSE; int metaslab_load_pct = 50; /* - * Determines how many txgs a metaslab may remain loaded without having any - * allocations from it. As long as a metaslab continues to be used we will - * keep it loaded. + * These tunables control how long a metaslab will remain loaded after the + * last allocation from it. A metaslab can't be unloaded until at least + * metaslab_unload_delay TXG's and metaslab_unload_delay_ms milliseconds + * have elapsed. However, zfs_metaslab_mem_limit may cause it to be + * unloaded sooner. These settings are intended to be generous -- to keep + * metaslabs loaded for a long time, reducing the rate of metaslab loading. */ -int metaslab_unload_delay = TXG_SIZE * 2; +int metaslab_unload_delay = 32; +int metaslab_unload_delay_ms = 10 * 60 * 1000; /* ten minutes */ /* * Max number of metaslabs per group to preload. */ -int metaslab_preload_limit = SPA_DVAS_PER_BP; +int metaslab_preload_limit = 10; /* * Enable/disable preloading of metaslab. @@ -272,18 +276,18 @@ uint64_t metaslab_trace_max_entries = 5000; */ int max_disabled_ms = 3; -/* - * Time (in seconds) to respect ms_max_size when the metaslab is not loaded. - * To avoid 64-bit overflow, don't set above UINT32_MAX. - */ -unsigned long zfs_metaslab_max_size_cache_sec = 3600; /* 1 hour */ - /* * Maximum percentage of memory to use on storing loaded metaslabs. If loading * a metaslab would take it over this percentage, the oldest selected metaslab * is automatically unloaded. */ -int zfs_metaslab_mem_limit = 75; +int zfs_metaslab_mem_limit = 25; + +/* + * Time (in seconds) to respect ms_max_size when the metaslab is not loaded. + * To avoid 64-bit overflow, don't set above UINT32_MAX. + */ +unsigned long zfs_metaslab_max_size_cache_sec = 3600; /* 1 hour */ static uint64_t metaslab_weight(metaslab_t *); static void metaslab_set_fragmentation(metaslab_t *); @@ -539,15 +543,6 @@ metaslab_class_evict_old(metaslab_class_t *mc, uint64_t txg) multilist_sublist_unlock(mls); while (msp != NULL) { mutex_enter(&msp->ms_lock); - /* - * Once we've hit a metaslab selected too recently to - * evict, we're done evicting for now. - */ - if (msp->ms_selected_txg + metaslab_unload_delay >= - txg) { - mutex_exit(&msp->ms_lock); - break; - } /* * If the metaslab has been removed from the list @@ -563,7 +558,20 @@ metaslab_class_evict_old(metaslab_class_t *mc, uint64_t txg) mls = multilist_sublist_lock(ml, i); metaslab_t *next_msp = multilist_sublist_next(mls, msp); multilist_sublist_unlock(mls); - metaslab_evict(msp, txg); + if (txg > + msp->ms_selected_txg + metaslab_unload_delay && + gethrtime() > msp->ms_selected_time + + (uint64_t)MSEC2NSEC(metaslab_unload_delay_ms)) { + metaslab_evict(msp, txg); + } else { + /* + * Once we've hit a metaslab selected too + * recently to evict, we're done evicting for + * now. + */ + mutex_exit(&msp->ms_lock); + break; + } mutex_exit(&msp->ms_lock); msp = next_msp; } @@ -2248,6 +2256,7 @@ metaslab_set_selected_txg(metaslab_t *msp, uint64_t txg) if (multilist_link_active(&msp->ms_class_txg_node)) multilist_sublist_remove(mls, msp); msp->ms_selected_txg = txg; + msp->ms_selected_time = gethrtime(); multilist_sublist_insert_tail(mls, msp); multilist_sublist_unlock(mls); } @@ -2573,7 +2582,6 @@ metaslab_space_weight(metaslab_t *msp) uint64_t weight, space; ASSERT(MUTEX_HELD(&msp->ms_lock)); - ASSERT(!vd->vdev_removing); /* * The baseline weight is the metaslab's free space. @@ -2832,13 +2840,6 @@ metaslab_weight(metaslab_t *msp) ASSERT(MUTEX_HELD(&msp->ms_lock)); - /* - * If this vdev is in the process of being removed, there is nothing - * for us to do here. - */ - if (vd->vdev_removing) - return (0); - metaslab_set_fragmentation(msp); /* @@ -5869,6 +5870,14 @@ module_param(metaslab_preload_enabled, int, 0644); MODULE_PARM_DESC(metaslab_preload_enabled, "preload potential metaslabs during reassessment"); +module_param(metaslab_unload_delay, int, 0644); +MODULE_PARM_DESC(metaslab_unload_delay, + "delay in txgs after metaslab was last used before unloading"); + +module_param(metaslab_unload_delay_ms, int, 0644); +MODULE_PARM_DESC(metaslab_unload_delay_ms, + "delay in milliseconds after metaslab was last used before unloading"); + module_param(zfs_mg_noalloc_threshold, int, 0644); MODULE_PARM_DESC(zfs_mg_noalloc_threshold, "percentage of free space for metaslab group to allow allocation"); From f66ad580cca5493b6f4b7acb23aa7d82f3b82755 Mon Sep 17 00:00:00 2001 From: Ryan Moeller Date: Thu, 29 Aug 2019 14:03:09 -0400 Subject: [PATCH 38/68] Use compatible arg order in tests BSD getopt() and getopt_long() want options before arguments. Reorder arguments to zfs/zpool in tests to put all the options first. Reviewed-by: Igor Kozhukhov Reviewed-by: Brian Behlendorf Signed-off-by: Ryan Moeller Closes #9228 --- .../functional/alloc_class/alloc_class_004_pos.ksh | 2 +- .../functional/alloc_class/alloc_class_005_pos.ksh | 4 ++-- .../cli_root/zfs_program/zfs_program_json.ksh | 10 +++++----- 3 files changed, 8 insertions(+), 8 deletions(-) diff --git a/tests/zfs-tests/tests/functional/alloc_class/alloc_class_004_pos.ksh b/tests/zfs-tests/tests/functional/alloc_class/alloc_class_004_pos.ksh index dcc6f7607c9b..79ac9364c257 100755 --- a/tests/zfs-tests/tests/functional/alloc_class/alloc_class_004_pos.ksh +++ b/tests/zfs-tests/tests/functional/alloc_class/alloc_class_004_pos.ksh @@ -52,7 +52,7 @@ do log_must zpool create $TESTPOOL $type $ZPOOL_DISKS \ special $stype $sdisks - ac_value="$(zpool get all -H -o property,value | \ + ac_value="$(zpool get -H -o property,value all | \ egrep allocation_classes | nawk '{print $2}')" if [ "$ac_value" = "active" ]; then log_note "feature@allocation_classes is active" diff --git a/tests/zfs-tests/tests/functional/alloc_class/alloc_class_005_pos.ksh b/tests/zfs-tests/tests/functional/alloc_class/alloc_class_005_pos.ksh index 417c68aa739b..337114cdb59e 100755 --- a/tests/zfs-tests/tests/functional/alloc_class/alloc_class_005_pos.ksh +++ b/tests/zfs-tests/tests/functional/alloc_class/alloc_class_005_pos.ksh @@ -41,7 +41,7 @@ do else log_must zpool create $TESTPOOL $type $ZPOOL_DISKS fi - ac_value="$(zpool get all -H -o property,value | \ + ac_value="$(zpool get -H -o property,value all | \ egrep allocation_classes | awk '{print $2}')" if [ "$ac_value" = "enabled" ]; then log_note "feature@allocation_classes is enabled" @@ -56,7 +56,7 @@ do log_must zpool add $TESTPOOL special mirror \ $CLASS_DISK0 $CLASS_DISK1 fi - ac_value="$(zpool get all -H -o property,value | \ + ac_value="$(zpool get -H -o property,value all | \ egrep allocation_classes | awk '{print $2}')" if [ "$ac_value" = "active" ]; then log_note "feature@allocation_classes is active" diff --git a/tests/zfs-tests/tests/functional/cli_root/zfs_program/zfs_program_json.ksh b/tests/zfs-tests/tests/functional/cli_root/zfs_program/zfs_program_json.ksh index 1d769096b4fb..3d59f784a488 100755 --- a/tests/zfs-tests/tests/functional/cli_root/zfs_program/zfs_program_json.ksh +++ b/tests/zfs-tests/tests/functional/cli_root/zfs_program/zfs_program_json.ksh @@ -95,10 +95,10 @@ typeset -i cnt=0 typeset cmd for cmd in ${pos_cmds[@]}; do log_must zfs program $TESTPOOL $TESTZCP $TESTDS $cmd 2>&1 - log_must zfs program $TESTPOOL -j $TESTZCP $TESTDS $cmd 2>&1 + log_must zfs program -j $TESTPOOL $TESTZCP $TESTDS $cmd 2>&1 # json.tool is needed to guarantee consistent ordering of fields # sed is needed to trim trailing space in CentOS 6's json.tool output - OUTPUT=$(zfs program $TESTPOOL -j $TESTZCP $TESTDS $cmd 2>&1 | python -m json.tool | sed 's/[[:space:]]*$//') + OUTPUT=$(zfs program -j $TESTPOOL $TESTZCP $TESTDS $cmd 2>&1 | python -m json.tool | sed 's/[[:space:]]*$//') if [ "$OUTPUT" != "${pos_cmds_out[$cnt]}" ]; then log_note "Got :$OUTPUT" log_note "Expected:${pos_cmds_out[$cnt]}" @@ -120,9 +120,9 @@ For the property list, run: zfs set|get For the delegated permission list, run: zfs allow|unallow") cnt=0 for cmd in ${neg_cmds[@]}; do - log_mustnot zfs program $TESTPOOL $TESTZCP $TESTDS $cmd 2>&1 - log_mustnot zfs program $TESTPOOL -j $TESTZCP $TESTDS $cmd 2>&1 - OUTPUT=$(zfs program $TESTPOOL -j $TESTZCP $TESTDS $cmd 2>&1) + log_mustnot zfs program $cmd $TESTPOOL $TESTZCP $TESTDS 2>&1 + log_mustnot zfs program -j $cmd $TESTPOOL $TESTZCP $TESTDS 2>&1 + OUTPUT=$(zfs program -j $cmd $TESTPOOL $TESTZCP $TESTDS 2>&1) if [ "$OUTPUT" != "${neg_cmds_out[$cnt]}" ]; then log_note "Got :$OUTPUT" log_note "Expected:${neg_cmds_out[$cnt]}" From 815a6456928b56b98bea3c9508f4037b6d6f4759 Mon Sep 17 00:00:00 2001 From: Ryan Moeller Date: Thu, 29 Aug 2019 16:11:29 -0400 Subject: [PATCH 39/68] Simplify deleting partitions in libtest Eliminate unnecessary code duplication. We can use a for-loop instead of a while-loop. There is no need to echo $DISKSARRAY in a subshell or return 0. Declare all variables with typeset. Reviewed-by: Brian Behlendorf Reviewed-by: John Kennedy Signed-off-by: Ryan Moeller Closes #9224 --- tests/zfs-tests/include/libtest.shlib | 55 +++++---------------------- 1 file changed, 10 insertions(+), 45 deletions(-) diff --git a/tests/zfs-tests/include/libtest.shlib b/tests/zfs-tests/include/libtest.shlib index 5d536fda39dd..cda4b04cddf7 100644 --- a/tests/zfs-tests/include/libtest.shlib +++ b/tests/zfs-tests/include/libtest.shlib @@ -966,61 +966,26 @@ function set_partition # function delete_partitions { - typeset -i j=1 + typeset disk - if [[ -z $DISK_ARRAY_NUM ]]; then - DISK_ARRAY_NUM=$(echo ${DISKS} | nawk '{print NF}') - fi if [[ -z $DISKSARRAY ]]; then DISKSARRAY=$DISKS fi if is_linux; then - if (( $DISK_ARRAY_NUM == 1 )); then - while ((j < MAX_PARTITIONS)); do - parted $DEV_DSKDIR/$DISK -s rm $j \ - > /dev/null 2>&1 - if (( $? == 1 )); then - lsblk | egrep ${DISK}${SLICE_PREFIX}${j} > /dev/null - if (( $? == 1 )); then - log_note "Partitions for $DISK should be deleted" - else - log_fail "Partition for ${DISK}${SLICE_PREFIX}${j} not deleted" - fi - return 0 + typeset -i part + for disk in $DISKSARRAY; do + for (( part = 1; part < MAX_PARTITIONS; part++ )); do + typeset partition=${disk}${SLICE_PREFIX}${part} + parted $DEV_DSKDIR/$disk -s rm $part > /dev/null 2>&1 + if lsblk | grep -qF ${partition}; then + log_fail "Partition ${partition} not deleted" else - lsblk | egrep ${DISK}${SLICE_PREFIX}${j} > /dev/null - if (( $? == 0 )); then - log_fail "Partition for ${DISK}${SLICE_PREFIX}${j} not deleted" - fi + log_note "Partition ${partition} deleted" fi - ((j = j+1)) - done - else - for disk in `echo $DISKSARRAY`; do - while ((j < MAX_PARTITIONS)); do - parted $DEV_DSKDIR/$disk -s rm $j > /dev/null 2>&1 - if (( $? == 1 )); then - lsblk | egrep ${disk}${SLICE_PREFIX}${j} > /dev/null - if (( $? == 1 )); then - log_note "Partitions for $disk should be deleted" - else - log_fail "Partition for ${disk}${SLICE_PREFIX}${j} not deleted" - fi - j=7 - else - lsblk | egrep ${disk}${SLICE_PREFIX}${j} > /dev/null - if (( $? == 0 )); then - log_fail "Partition for ${disk}${SLICE_PREFIX}${j} not deleted" - fi - fi - ((j = j+1)) - done - j=1 done - fi + done fi - return 0 } # From e2fcfa70e36a9f7c059ec64d787f37c6bd9ae48c Mon Sep 17 00:00:00 2001 From: Georgy Yakovlev <168902+gyakovlev@users.noreply.github.com> Date: Thu, 29 Aug 2019 12:14:48 -0800 Subject: [PATCH 40/68] etc/init.d/zfs-functions.in: remove arch warning Remove the x86_64 warning, it's no longer the case that this is the only supported architecture. Reviewed-by: Brian Behlendorf Signed-off-by: Georgy Yakovlev Closes: #9177 --- etc/init.d/zfs-functions.in | 7 ------- 1 file changed, 7 deletions(-) diff --git a/etc/init.d/zfs-functions.in b/etc/init.d/zfs-functions.in index 14667b4e9fd3..d65c79dcfd36 100644 --- a/etc/init.d/zfs-functions.in +++ b/etc/init.d/zfs-functions.in @@ -294,13 +294,6 @@ checksystem() # Just make sure that /dev/zfs is created. udev_trigger - if ! [ "$(uname -m)" = "x86_64" ]; then - echo "Warning: You're not running 64bit. Currently native zfs in"; - echo " Linux is only supported and tested on 64bit."; - # should we break here? People doing this should know what they - # do, thus i'm not breaking here. - fi - return 0 } From 475aa97cab771b3b2b9ddab03f5c14a1d4e985da Mon Sep 17 00:00:00 2001 From: Paul Dagnelie Date: Fri, 30 Aug 2019 09:28:31 -0700 Subject: [PATCH 41/68] Prevent metaslab_sync panic due to spa_final_dirty_txg If a pool enables the SPACEMAP_HISTOGRAM feature shortly before being exported, we can enter a situation that causes a kernel panic. Any metaslabs that are loaded during the final dirty txg and haven't already been condensed will cause metaslab_sync to proceed after the final dirty txg so that the condense can be performed, which there are assertions to prevent. Because of the nature of this issue, there are a number of ways we can enter this state. Rather than try to prevent each of them one by one, potentially missing some edge cases, we instead cut it off at the point of intersection; by preventing metaslab_sync from proceeding if it would only do so to perform a condense and we're past the final dirty txg, we preserve the utility of the existing asserts while preventing this particular issue. Reviewed-by: Matt Ahrens Reviewed-by: Brian Behlendorf Signed-off-by: Paul Dagnelie Closes #9185 Closes #9186 Closes #9231 Closes #9253 --- module/zfs/metaslab.c | 11 +++++++++-- tests/runfiles/linux.run | 8 +++----- 2 files changed, 12 insertions(+), 7 deletions(-) diff --git a/module/zfs/metaslab.c b/module/zfs/metaslab.c index 00af4a21bd19..11b9ba8e9326 100644 --- a/module/zfs/metaslab.c +++ b/module/zfs/metaslab.c @@ -3553,12 +3553,19 @@ metaslab_sync(metaslab_t *msp, uint64_t txg) /* * Normally, we don't want to process a metaslab if there are no * allocations or frees to perform. However, if the metaslab is being - * forced to condense and it's loaded, we need to let it through. + * forced to condense, it's loaded and we're not beyond the final + * dirty txg, we need to let it through. Not condensing beyond the + * final dirty txg prevents an issue where metaslabs that need to be + * condensed but were loaded for other reasons could cause a panic + * here. By only checking the txg in that branch of the conditional, + * we preserve the utility of the VERIFY statements in all other + * cases. */ if (range_tree_is_empty(alloctree) && range_tree_is_empty(msp->ms_freeing) && range_tree_is_empty(msp->ms_checkpointing) && - !(msp->ms_loaded && msp->ms_condense_wanted)) + !(msp->ms_loaded && msp->ms_condense_wanted && + txg <= spa_final_dirty_txg(spa))) return; diff --git a/tests/runfiles/linux.run b/tests/runfiles/linux.run index a9ef628bc863..d05ff61202ab 100644 --- a/tests/runfiles/linux.run +++ b/tests/runfiles/linux.run @@ -481,15 +481,13 @@ tests = ['zpool_trim_attach_detach_add_remove', tags = ['functional', 'zpool_trim'] [tests/functional/cli_root/zpool_upgrade] -tests = ['zpool_upgrade_001_pos', +tests = ['zpool_upgrade_001_pos', 'zpool_upgrade_002_pos', + 'zpool_upgrade_003_pos', 'zpool_upgrade_004_pos', 'zpool_upgrade_005_neg', 'zpool_upgrade_006_neg', + 'zpool_upgrade_007_pos', 'zpool_upgrade_008_pos', 'zpool_upgrade_009_neg'] tags = ['functional', 'cli_root', 'zpool_upgrade'] -# Disabled pending resolution of #9185 and #9186. -# 'zpool_upgrade_002_pos', 'zpool_upgrade_003_pos', 'zpool_upgrade_004_pos', -# 'zpool_upgrade_007_pos', 'zpool_upgrade_008_pos', - [tests/functional/cli_user/misc] tests = ['zdb_001_neg', 'zfs_001_neg', 'zfs_allow_001_neg', 'zfs_clone_001_neg', 'zfs_create_001_neg', 'zfs_destroy_001_neg', From d39c71d3654f6546dc3374c7833610f51d036109 Mon Sep 17 00:00:00 2001 From: Igor K Date: Fri, 30 Aug 2019 19:32:25 +0300 Subject: [PATCH 42/68] Fix refquota_007_neg.ksh Must use 'zfs' instead of '$ZFS' which is undefined. Reviewed-by: John Kennedy Reviewed-by: Brian Behlendorf Signed-off-by: Igor Kozhukhov Closes #9257 --- .../tests/functional/refquota/refquota_007_neg.ksh | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/zfs-tests/tests/functional/refquota/refquota_007_neg.ksh b/tests/zfs-tests/tests/functional/refquota/refquota_007_neg.ksh index e2141c7d7f3b..4f0393883b6a 100755 --- a/tests/zfs-tests/tests/functional/refquota/refquota_007_neg.ksh +++ b/tests/zfs-tests/tests/functional/refquota/refquota_007_neg.ksh @@ -38,9 +38,9 @@ verify_runnable "both" function cleanup { - log_must $ZFS destroy -rf $TESTPOOL/$TESTFS - log_must $ZFS create $TESTPOOL/$TESTFS - log_must $ZFS set mountpoint=$TESTDIR $TESTPOOL/$TESTFS + log_must zfs destroy -rf $TESTPOOL/$TESTFS + log_must zfs create $TESTPOOL/$TESTFS + log_must zfs set mountpoint=$TESTDIR $TESTPOOL/$TESTFS } log_onexit cleanup From 2b96f774236fa8793f9e5c387abf10d780ffca38 Mon Sep 17 00:00:00 2001 From: Andrea Gelmini Date: Fri, 30 Aug 2019 18:40:30 +0200 Subject: [PATCH 43/68] Fix typos in config/ Reviewed-by: Ryan Moeller Reviewed-by: Richard Laager Reviewed-by: Brian Behlendorf Signed-off-by: Andrea Gelmini Closes #9232 --- config/ax_code_coverage.m4 | 2 +- config/kernel-dentry-operations.m4 | 2 +- config/kernel-mkdir-umode-t.m4 | 2 +- config/kernel-timer.m4 | 2 +- config/lib-link.m4 | 2 +- config/pkg.m4 | 2 +- config/user.m4 | 2 +- config/zfs-build.m4 | 2 +- 8 files changed, 8 insertions(+), 8 deletions(-) diff --git a/config/ax_code_coverage.m4 b/config/ax_code_coverage.m4 index 4417d4444a96..5cdfe14562aa 100644 --- a/config/ax_code_coverage.m4 +++ b/config/ax_code_coverage.m4 @@ -50,7 +50,7 @@ # CODE_COVERAGE_LIBS is preferred for clarity; CODE_COVERAGE_LDFLAGS is # deprecated. They have the same value. # -# This code was derived from Makefile.decl in GLib, originally licenced +# This code was derived from Makefile.decl in GLib, originally licensed # under LGPLv2.1+. # # LICENSE diff --git a/config/kernel-dentry-operations.m4 b/config/kernel-dentry-operations.m4 index 61f5a27af5a7..2cd2553010d7 100644 --- a/config/kernel-dentry-operations.m4 +++ b/config/kernel-dentry-operations.m4 @@ -69,7 +69,7 @@ AC_DEFUN([ZFS_AC_KERNEL_D_SET_D_OP], ]) dnl # -dnl # 2.6.38 API chage +dnl # 2.6.38 API change dnl # Added sb->s_d_op default dentry_operations member dnl # AC_DEFUN([ZFS_AC_KERNEL_S_D_OP], diff --git a/config/kernel-mkdir-umode-t.m4 b/config/kernel-mkdir-umode-t.m4 index ebc21be9ec55..1c9fa9be3ce7 100644 --- a/config/kernel-mkdir-umode-t.m4 +++ b/config/kernel-mkdir-umode-t.m4 @@ -4,7 +4,7 @@ dnl # The VFS .create, .mkdir and .mknod callbacks were updated to take a dnl # umode_t type rather than an int. The expectation is that any backport dnl # would also change all three prototypes. However, if it turns out that dnl # some distribution doesn't backport the whole thing this could be -dnl # broken apart in to three separate checks. +dnl # broken apart into three separate checks. dnl # AC_DEFUN([ZFS_AC_KERNEL_MKDIR_UMODE_T], [ AC_MSG_CHECKING([whether iops->create()/mkdir()/mknod() take umode_t]) diff --git a/config/kernel-timer.m4 b/config/kernel-timer.m4 index b0e1afa153ab..d90642043073 100644 --- a/config/kernel-timer.m4 +++ b/config/kernel-timer.m4 @@ -6,7 +6,7 @@ dnl # (older kernels). Also sanity check the from_timer() and timer_setup() dnl # macros are available as well, since they will be used in the same newer dnl # kernels that support the new timer_list.func signature. dnl # -dnl # Also check for the existance of flags in struct timer_list, they were +dnl # Also check for the existence of flags in struct timer_list, they were dnl # added in 4.1-rc8 via 0eeda71bc30d. AC_DEFUN([ZFS_AC_KERNEL_TIMER_SETUP], [ diff --git a/config/lib-link.m4 b/config/lib-link.m4 index 0ff10731facd..01766c315c97 100644 --- a/config/lib-link.m4 +++ b/config/lib-link.m4 @@ -216,7 +216,7 @@ AC_DEFUN([AC_LIB_LINKFLAGS_BODY], fi ]) dnl Search the library and its dependencies in $additional_libdir and - dnl $LDFLAGS. Using breadth-first-seach. + dnl $LDFLAGS. Using breadth-first-search. LIB[]NAME= LTLIB[]NAME= INC[]NAME= diff --git a/config/pkg.m4 b/config/pkg.m4 index 13a889017866..f9075e56c87a 100644 --- a/config/pkg.m4 +++ b/config/pkg.m4 @@ -86,7 +86,7 @@ dnl Check to see whether a particular set of modules exists. Similar to dnl PKG_CHECK_MODULES(), but does not set variables or print errors. dnl dnl Please remember that m4 expands AC_REQUIRE([PKG_PROG_PKG_CONFIG]) -dnl only at the first occurence in configure.ac, so if the first place +dnl only at the first occurrence in configure.ac, so if the first place dnl it's called might be skipped (such as if it is within an "if", you dnl have to call PKG_CHECK_EXISTS manually AC_DEFUN([PKG_CHECK_EXISTS], diff --git a/config/user.m4 b/config/user.m4 index 1ee9dbe263bc..3d97e9a418c3 100644 --- a/config/user.m4 +++ b/config/user.m4 @@ -27,7 +27,7 @@ AC_DEFUN([ZFS_AC_CONFIG_USER], [ dnl # dnl # Setup the environment for the ZFS Test Suite. Currently only -dnl # Linux sytle systems are supported but this infrastructure can +dnl # Linux style systems are supported but this infrastructure can dnl # be extended to support other platforms if needed. dnl # AC_DEFUN([ZFS_AC_TEST_FRAMEWORK], [ diff --git a/config/zfs-build.m4 b/config/zfs-build.m4 index 8e221f2d7d40..c2e5bb25fe2e 100644 --- a/config/zfs-build.m4 +++ b/config/zfs-build.m4 @@ -461,7 +461,7 @@ AC_DEFUN([ZFS_AC_DEFAULT_PACKAGE], [ AC_MSG_RESULT([$DEFAULT_INIT_SCRIPT]) AC_SUBST(DEFAULT_INIT_SCRIPT) - AC_MSG_CHECKING([default init config direectory]) + AC_MSG_CHECKING([default init config directory]) case "$VENDOR" in alpine) DEFAULT_INITCONF_DIR=/etc/conf.d ;; gentoo) DEFAULT_INITCONF_DIR=/etc/conf.d ;; From ac3d4d0cf6b3d06cfd90636b4bc8a2043176b725 Mon Sep 17 00:00:00 2001 From: Andrea Gelmini Date: Fri, 30 Aug 2019 18:41:35 +0200 Subject: [PATCH 44/68] Fix typos in man/ Reviewed-by: Ryan Moeller Reviewed-by: Richard Laager Reviewed-by: Brian Behlendorf Signed-off-by: Andrea Gelmini Closes #9233 --- man/man1/cstyle.1 | 2 +- man/man1/ztest.1 | 2 +- man/man5/vdev_id.conf.5 | 2 +- man/man5/zfs-events.5 | 4 ++-- man/man5/zfs-module-parameters.5 | 4 ++-- man/man5/zpool-features.5 | 2 +- man/man8/zdb.8 | 2 +- man/man8/zfs.8 | 14 +++++++------- 8 files changed, 16 insertions(+), 16 deletions(-) diff --git a/man/man1/cstyle.1 b/man/man1/cstyle.1 index f2b637d4c36b..f77d534507a4 100644 --- a/man/man1/cstyle.1 +++ b/man/man1/cstyle.1 @@ -31,7 +31,7 @@ .IX "OS-Net build tools" "cstyle" "" "\fBcstyle\fP" .LP .I cstyle -inspects C source files (*.c and *.h) for common sylistic errors. It +inspects C source files (*.c and *.h) for common stylistic errors. It attempts to check for the cstyle documented in \fIhttp://www.cis.upenn.edu/~lee/06cse480/data/cstyle.ms.pdf\fP. Note that there is much in that document that diff --git a/man/man1/ztest.1 b/man/man1/ztest.1 index b8cb0d45d92c..84e56c822d13 100644 --- a/man/man1/ztest.1 +++ b/man/man1/ztest.1 @@ -175,5 +175,5 @@ By default the stack size is limited to 256K. .BR "zfs (1)" "," .BR "zdb (1)" "," .SH "AUTHOR" -This manual page was transvered to asciidoc by Michael Gebetsroither +This manual page was transferred to asciidoc by Michael Gebetsroither from http://opensolaris.org/os/community/zfs/ztest/ diff --git a/man/man5/vdev_id.conf.5 b/man/man5/vdev_id.conf.5 index 5b7fbf0cad49..89c5ee961094 100644 --- a/man/man5/vdev_id.conf.5 +++ b/man/man5/vdev_id.conf.5 @@ -41,7 +41,7 @@ disk enclosure). .TP \fIenclosure_symlinks\fR Additionally create /dev/by-enclosure symlinks to the disk enclosure -sg devices using the naming scheme from from vdev_id.conf. +sg devices using the naming scheme from vdev_id.conf. \fIenclosure_symlinks\fR is only allowed for sas_direct mode. .TP \fIenclosure_symlinks_prefix\fR diff --git a/man/man5/zfs-events.5 b/man/man5/zfs-events.5 index 7e9bbedafdad..4a28be71e685 100644 --- a/man/man5/zfs-events.5 +++ b/man/man5/zfs-events.5 @@ -557,7 +557,7 @@ How many write errors that have been detected on the vdev. \fBvdev_cksum_errors\fR .ad .RS 12n -How many checkum errors that have been detected on the vdev. +How many checksum errors that have been detected on the vdev. .RE .sp @@ -858,7 +858,7 @@ such as IDE or parallel SCSI. .RS 12n If this field exists, it is an array of counters. Each entry counts bit clears in a particular bit of a big-endian uint64 type. The first entry counts bits -clears of the the high-order bit of the first byte, the 9th byte, etc, and the +clears of the high-order bit of the first byte, the 9th byte, etc, and the last entry counts clears of the low-order bit of the 8th byte, the 16th byte, etc. This information is useful for observing a stuck bit in a parallel data path, such as IDE or parallel SCSI. diff --git a/man/man5/zfs-module-parameters.5 b/man/man5/zfs-module-parameters.5 index f9ae2e7813ba..22e6ab44ec2c 100644 --- a/man/man5/zfs-module-parameters.5 +++ b/man/man5/zfs-module-parameters.5 @@ -936,7 +936,7 @@ Default value: \fB0\fR. .ad .RS 12n Minimum time "prescient prefetched" blocks are locked in the ARC, specified -in ms. These blocks are meant to be prefetched fairly aggresively ahead of +in ms. These blocks are meant to be prefetched fairly aggressively ahead of the code that may use them. A value of \fB0\fR will default to 6000 ms. .sp Default value: \fB0\fR. @@ -2701,7 +2701,7 @@ Default value: \fB20\fR which is 5% of RAM (1/20). .ad .RS 12n The fraction of the hard limit used to determined the soft limit for I/O sorting -by the sequential scan algorithm. When we cross this limit from bellow no action +by the sequential scan algorithm. When we cross this limit from below no action is taken. When we cross this limit from above it is because we are issuing verification I/O. In this case (unless the metadata scan is done) we stop issuing verification I/O and start scanning metadata again until we get to the diff --git a/man/man5/zpool-features.5 b/man/man5/zpool-features.5 index 6c03f9049066..1adc76b7f26d 100644 --- a/man/man5/zpool-features.5 +++ b/man/man5/zpool-features.5 @@ -248,7 +248,7 @@ DEPENDENCIES bookmark, extensible_dataset, bookmark_v2 .TE This feature enables additional bookmark accounting fields, enabling the -written# prperty (space written since a bookmark) and estimates of +written# property (space written since a bookmark) and estimates of send stream sizes for incrementals from bookmarks. This feature becomes \fBactive\fR when a bookmark is created and will be diff --git a/man/man8/zdb.8 b/man/man8/zdb.8 index edbf28bf39ab..50f600252cf6 100644 --- a/man/man8/zdb.8 +++ b/man/man8/zdb.8 @@ -251,7 +251,7 @@ and, optionally, Print block pointer .It Sy d Decompress the block. Set environment variable -.Nm ZBD_NO_ZLE +.Nm ZDB_NO_ZLE to skip zle when guessing. .It Sy e Byte swap the block diff --git a/man/man8/zfs.8 b/man/man8/zfs.8 index cbbd76503ab6..09fa2831be96 100644 --- a/man/man8/zfs.8 +++ b/man/man8/zfs.8 @@ -737,7 +737,7 @@ of a dataset is not transferred to other pools when the snapshot is copied with a send/receive operation. The .Sy objsetid -can be reused (for a new datatset) after the dataset is deleted. +can be reused (for a new dataset) after the dataset is deleted. .It Sy origin For cloned file systems or volumes, the snapshot from which the clone was created. @@ -1396,7 +1396,7 @@ has the same effect as the setting .Pp If set to .Sy verify , -ZFS will do a byte-to-byte comparsion in case of two blocks having the same +ZFS will do a byte-to-byte comparison in case of two blocks having the same signature to make sure the block contents are identical. Specifying .Sy verify is mandatory for the @@ -2145,7 +2145,7 @@ Setting it to hides its partitions. Volumes with property set to .Sy none -are not exposed outside ZFS, but can be snapshoted, cloned, replicated, etc, +are not exposed outside ZFS, but can be snapshotted, cloned, replicated, etc, that can be suitable for backup purposes. Value .Sy default @@ -3416,7 +3416,7 @@ for types. List project identifier (ID) and inherit flag of file(s) or directories. .Bl -tag -width "-d" .It Fl d -Show the directory project ID and inherit flag, not its childrens. It will +Show the directory project ID and inherit flag, not its children. It will overwrite the former specified .Fl r option. @@ -3459,7 +3459,7 @@ option) value or the target directory's project ID. Print file name with a trailing NUL instead of newline (by default), like "find -print0". .It Fl d -Check the directory project ID and inherit flag, not its childrens. It will +Check the directory project ID and inherit flag, not its children. It will overwrite the former specified .Fl r option. @@ -4022,7 +4022,7 @@ This will either produce a normal snapshot or a redacted one, depending on whether the new send stream is redacted. .sp 4. To receive an incremental send from a redacted version of the initial -snapshot that is redacted with respect to a subect of the set of snapshots the +snapshot that is redacted with respect to a subject of the set of snapshots the initial snapshot was created with respect to. A send stream from a compatible redacted dataset will contain all of the blocks necessary to fill in any redacted data. @@ -4037,7 +4037,7 @@ on whether the full send stream was redacted. .sp These restrictions are detected and enforced by \fBzfs receive\fR; a redacted send stream will contain the list of snapshots that the stream is -redacted with respsect to. +redacted with respect to. These are stored with the redacted snapshot, and are used to detect and correctly handle the cases above. Note that for technical reasons, raw sends and redacted sends cannot be combined at this time. From ad0b23b14ab37a54764122fe8341e62f10245e15 Mon Sep 17 00:00:00 2001 From: Andrea Gelmini Date: Fri, 30 Aug 2019 18:43:30 +0200 Subject: [PATCH 45/68] Fix typos in cmd/ Reviewed-by: Ryan Moeller Reviewed-by: Richard Laager Reviewed-by: Brian Behlendorf Signed-off-by: Andrea Gelmini Closes #9234 --- cmd/arc_summary/arc_summary3 | 14 +++++++------- cmd/arcstat/Makefile.am | 2 +- cmd/dbufstat/Makefile.am | 2 +- cmd/fsck_zfs/fsck.zfs | 2 +- cmd/vdev_id/vdev_id | 2 +- cmd/zdb/zdb.c | 2 +- cmd/zed/agents/fmd_api.c | 2 +- cmd/zed/agents/fmd_serd.c | 2 +- cmd/zed/agents/zfs_mod.c | 6 +++--- cmd/zed/zed.d/statechange-led.sh | 4 ++-- cmd/zfs/zfs_main.c | 2 +- cmd/zinject/translate.c | 4 ++-- cmd/zpool/zpool_vdev.c | 4 ++-- cmd/zstreamdump/zstreamdump.c | 2 +- cmd/ztest/ztest.c | 2 +- 15 files changed, 26 insertions(+), 26 deletions(-) diff --git a/cmd/arc_summary/arc_summary3 b/cmd/arc_summary/arc_summary3 index fc5e1e4b64c1..d33271438498 100755 --- a/cmd/arc_summary/arc_summary3 +++ b/cmd/arc_summary/arc_summary3 @@ -43,7 +43,7 @@ import subprocess import sys import time -DECRIPTION = 'Print ARC and other statistics for ZFS on Linux' +DESCRIPTION = 'Print ARC and other statistics for ZFS on Linux' INDENT = ' '*8 LINE_LENGTH = 72 PROC_PATH = '/proc/spl/kstat/zfs/' @@ -65,7 +65,7 @@ SECTION_PATHS = {'arc': 'arcstats', 'zfetch': 'zfetchstats', 'zil': 'zil'} -parser = argparse.ArgumentParser(description=DECRIPTION) +parser = argparse.ArgumentParser(description=DESCRIPTION) parser.add_argument('-a', '--alternate', action='store_true', default=False, help='use alternate formatting for tunables and SPL', dest='alt') @@ -284,7 +284,7 @@ def get_spl_tunables(PATH): def get_descriptions(request): - """Get the decriptions of the Solaris Porting Layer (SPL) or the + """Get the descriptions of the Solaris Porting Layer (SPL) or the tunables, return with minimal formatting. """ @@ -708,7 +708,7 @@ def section_l2arc(kstats_dict): def section_spl(*_): """Print the SPL parameters, if requested with alternative format - and/or decriptions. This does not use kstats. + and/or descriptions. This does not use kstats. """ spls = get_spl_tunables(SPL_PATH) @@ -725,7 +725,7 @@ def section_spl(*_): try: print(INDENT+'#', descriptions[key]) except KeyError: - print(INDENT+'# (No decription found)') # paranoid + print(INDENT+'# (No description found)') # paranoid print(format_raw_line(key, value)) @@ -734,7 +734,7 @@ def section_spl(*_): def section_tunables(*_): """Print the tunables, if requested with alternative format and/or - decriptions. This does not use kstasts. + descriptions. This does not use kstasts. """ tunables = get_spl_tunables(TUNABLES_PATH) @@ -751,7 +751,7 @@ def section_tunables(*_): try: print(INDENT+'#', descriptions[key]) except KeyError: - print(INDENT+'# (No decription found)') # paranoid + print(INDENT+'# (No description found)') # paranoid print(format_raw_line(key, value)) diff --git a/cmd/arcstat/Makefile.am b/cmd/arcstat/Makefile.am index 462e9a6197a8..2d59faa9c87d 100644 --- a/cmd/arcstat/Makefile.am +++ b/cmd/arcstat/Makefile.am @@ -1,7 +1,7 @@ dist_bin_SCRIPTS = arcstat # -# The arcstat script is compatibile with both Python 2.6 and 3.4. +# The arcstat script is compatible with both Python 2.6 and 3.4. # As such the python 3 shebang can be replaced at install time when # targeting a python 2 system. This allows us to maintain a single # version of the source. diff --git a/cmd/dbufstat/Makefile.am b/cmd/dbufstat/Makefile.am index 968a7607797f..06923d38b2e8 100644 --- a/cmd/dbufstat/Makefile.am +++ b/cmd/dbufstat/Makefile.am @@ -1,7 +1,7 @@ dist_bin_SCRIPTS = dbufstat # -# The dbufstat script is compatibile with both Python 2.6 and 3.4. +# The dbufstat script is compatible with both Python 2.6 and 3.4. # As such the python 3 shebang can be replaced at install time when # targeting a python 2 system. This allows us to maintain a single # version of the source. diff --git a/cmd/fsck_zfs/fsck.zfs b/cmd/fsck_zfs/fsck.zfs index f1685db6527b..129a7f39c388 100755 --- a/cmd/fsck_zfs/fsck.zfs +++ b/cmd/fsck_zfs/fsck.zfs @@ -1,6 +1,6 @@ #!/bin/sh # -# fsck.zfs: A fsck helper to accomidate distributions that expect +# fsck.zfs: A fsck helper to accommodate distributions that expect # to be able to execute a fsck on all filesystem types. Currently # this script does nothing but it could be extended to act as a # compatibility wrapper for 'zpool scrub'. diff --git a/cmd/vdev_id/vdev_id b/cmd/vdev_id/vdev_id index 3796ab4885d8..a79aed3b5d82 100755 --- a/cmd/vdev_id/vdev_id +++ b/cmd/vdev_id/vdev_id @@ -102,7 +102,7 @@ Usage: vdev_id [-h] vdev_id <-d device> [-c config_file] [-p phys_per_port] [-g sas_direct|sas_switch|scsi] [-m] - -c specify name of alernate config file [default=$CONFIG] + -c specify name of an alternative config file [default=$CONFIG] -d specify basename of device (i.e. sda) -e Create enclose device symlinks only (/dev/by-enclosure) -g Storage network topology [default="$TOPOLOGY"] diff --git a/cmd/zdb/zdb.c b/cmd/zdb/zdb.c index e05323f0aa54..a6368c67d4d7 100644 --- a/cmd/zdb/zdb.c +++ b/cmd/zdb/zdb.c @@ -5391,7 +5391,7 @@ zdb_set_skip_mmp(char *target) * the name of the target pool. * * Note that the checkpointed state's pool name will be the name of - * the original pool with the above suffix appened to it. In addition, + * the original pool with the above suffix appended to it. In addition, * if the target is not a pool name (e.g. a path to a dataset) then * the new_path parameter is populated with the updated path to * reflect the fact that we are looking into the checkpointed state. diff --git a/cmd/zed/agents/fmd_api.c b/cmd/zed/agents/fmd_api.c index ae90a322cf90..607b387ca3a8 100644 --- a/cmd/zed/agents/fmd_api.c +++ b/cmd/zed/agents/fmd_api.c @@ -25,7 +25,7 @@ */ /* - * This file imlements the minimal FMD module API required to support the + * This file implements the minimal FMD module API required to support the * fault logic modules in ZED. This support includes module registration, * memory allocation, module property accessors, basic case management, * one-shot timers and SERD engines. diff --git a/cmd/zed/agents/fmd_serd.c b/cmd/zed/agents/fmd_serd.c index 043552862e82..d4ec37fb7691 100644 --- a/cmd/zed/agents/fmd_serd.c +++ b/cmd/zed/agents/fmd_serd.c @@ -281,7 +281,7 @@ fmd_serd_eng_empty(fmd_serd_eng_t *sgp) void fmd_serd_eng_reset(fmd_serd_eng_t *sgp) { - serd_log_msg(" SERD Engine: reseting %s", sgp->sg_name); + serd_log_msg(" SERD Engine: resetting %s", sgp->sg_name); while (sgp->sg_count != 0) fmd_serd_eng_discard(sgp, list_head(&sgp->sg_list)); diff --git a/cmd/zed/agents/zfs_mod.c b/cmd/zed/agents/zfs_mod.c index 6d3e7cb11250..d980794d0a57 100644 --- a/cmd/zed/agents/zfs_mod.c +++ b/cmd/zed/agents/zfs_mod.c @@ -157,7 +157,7 @@ zfs_unavail_pool(zpool_handle_t *zhp, void *data) * 1. physical match with no fs, no partition * tag it top, partition disk * - * 2. physical match again, see partion and tag + * 2. physical match again, see partition and tag * */ @@ -674,7 +674,7 @@ zfs_deliver_add(nvlist_t *nvl, boolean_t is_lofi) devid, devpath ? devpath : "NULL", is_slice); /* - * Iterate over all vdevs looking for a match in the folllowing order: + * Iterate over all vdevs looking for a match in the following order: * 1. ZPOOL_CONFIG_DEVID (identifies the unique disk) * 2. ZPOOL_CONFIG_PHYS_PATH (identifies disk physical location). * @@ -892,7 +892,7 @@ zfs_enum_pools(void *arg) * * sent messages from zevents or udev monitor * - * For now, each agent has it's own libzfs instance + * For now, each agent has its own libzfs instance */ int zfs_slm_init() diff --git a/cmd/zed/zed.d/statechange-led.sh b/cmd/zed/zed.d/statechange-led.sh index 6484b79592aa..e656e125d378 100755 --- a/cmd/zed/zed.d/statechange-led.sh +++ b/cmd/zed/zed.d/statechange-led.sh @@ -20,7 +20,7 @@ # # Exit codes: # 0: enclosure led successfully set -# 1: enclosure leds not not available +# 1: enclosure leds not available # 2: enclosure leds administratively disabled # 3: The led sysfs path passed from ZFS does not exist # 4: $ZPOOL not set @@ -68,7 +68,7 @@ check_and_set_led() # timeout. for _ in $(seq 1 5); do # We want to check the current state first, since writing to the - # 'fault' entry always always causes a SES command, even if the + # 'fault' entry always causes a SES command, even if the # current state is already what you want. current=$(cat "${file}") diff --git a/cmd/zfs/zfs_main.c b/cmd/zfs/zfs_main.c index 53b76e25d6a6..e6b5978259d6 100644 --- a/cmd/zfs/zfs_main.c +++ b/cmd/zfs/zfs_main.c @@ -8061,7 +8061,7 @@ zfs_do_change_key(int argc, char **argv) * 4) zfs project [-p id] [-r] [-s] * Set project ID and/or inherit flag on the file(s) or directories. * -p: Set the project ID as the given id. - * -r: Set on subdirectorie recursively. If not specify "-p" option, + * -r: Set on subdirectories recursively. If not specify "-p" option, * it will use top-level directory's project ID as the given id, * then set both project ID and inherit flag on all descendants * of the top-level directory. diff --git a/cmd/zinject/translate.c b/cmd/zinject/translate.c index 700961b06a3c..d4795d07110f 100644 --- a/cmd/zinject/translate.c +++ b/cmd/zinject/translate.c @@ -176,7 +176,7 @@ object_from_path(const char *dataset, uint64_t object, zinject_record_t *record) } /* - * Intialize the range based on the type, level, and range given. + * Initialize the range based on the type, level, and range given. */ static int initialize_range(err_type_t type, int level, char *range, @@ -310,7 +310,7 @@ translate_record(err_type_t type, const char *object, const char *range, ziprintf("raw object: %llu\n", record->zi_object); /* - * For the given object, intialize the range in bytes + * For the given object, initialize the range in bytes */ if (initialize_range(type, level, (char *)range, record) != 0) goto err; diff --git a/cmd/zpool/zpool_vdev.c b/cmd/zpool/zpool_vdev.c index 52c696816f73..ef2a30996e53 100644 --- a/cmd/zpool/zpool_vdev.c +++ b/cmd/zpool/zpool_vdev.c @@ -438,7 +438,7 @@ check_disk(const char *path, blkid_cache cache, int force, } /* - * Expected to fail for non-EFI labled disks. Just check the device + * Expected to fail for non-EFI labeled disks. Just check the device * as given and do not attempt to detect and scan partitions. */ err = efi_alloc_and_read(fd, &vtoc); @@ -1867,7 +1867,7 @@ make_root_vdev(zpool_handle_t *zhp, nvlist_t *props, int force, int check_rep, } /* - * Validate each device to make sure that its not shared with another + * Validate each device to make sure that it's not shared with another * subsystem. We do this even if 'force' is set, because there are some * uses (such as a dedicated dump device) that even '-f' cannot * override. diff --git a/cmd/zstreamdump/zstreamdump.c b/cmd/zstreamdump/zstreamdump.c index 5f2813ce603f..4c996ae554c4 100644 --- a/cmd/zstreamdump/zstreamdump.c +++ b/cmd/zstreamdump/zstreamdump.c @@ -197,7 +197,7 @@ print_block(char *buf, int length) } /* - * Print an array of bytes to stdout as hexidecimal characters. str must + * Print an array of bytes to stdout as hexadecimal characters. str must * have buf_len * 2 + 1 bytes of space. */ static void diff --git a/cmd/ztest/ztest.c b/cmd/ztest/ztest.c index 01421e85bf0a..24ea49c10adc 100644 --- a/cmd/ztest/ztest.c +++ b/cmd/ztest/ztest.c @@ -74,7 +74,7 @@ * * To turn this into an overnight stress test, use -T to specify run time. * - * You can ask more more vdevs [-v], datasets [-d], or threads [-t] + * You can ask more vdevs [-v], datasets [-d], or threads [-t] * to increase the pool capacity, fanout, and overall stress level. * * Use the -k option to set the desired frequency of kills. From cd6b910b64a999f7943020786892e6a688f980d7 Mon Sep 17 00:00:00 2001 From: Andrea Gelmini Date: Fri, 30 Aug 2019 18:44:43 +0200 Subject: [PATCH 46/68] Fix typos in contrib/ Reviewed-by: Ryan Moeller Reviewed-by: Richard Laager Reviewed-by: Brian Behlendorf Signed-off-by: Andrea Gelmini Closes #9235 --- contrib/initramfs/scripts/zfs.in | 12 ++++++------ contrib/pyzfs/docs/source/conf.py | 2 +- contrib/pyzfs/libzfs_core/_libzfs_core.py | 14 +++++++------- contrib/pyzfs/libzfs_core/_nvlist.py | 2 +- contrib/pyzfs/libzfs_core/exceptions.py | 4 ++-- contrib/pyzfs/libzfs_core/test/test_libzfs_core.py | 8 ++++---- 6 files changed, 21 insertions(+), 21 deletions(-) diff --git a/contrib/initramfs/scripts/zfs.in b/contrib/initramfs/scripts/zfs.in index 9e90d76bb114..c82b210d7e95 100644 --- a/contrib/initramfs/scripts/zfs.in +++ b/contrib/initramfs/scripts/zfs.in @@ -78,7 +78,7 @@ find_rootfs() { local pool="$1" - # If 'POOL_IMPORTED' isn't set, no pool imported and therefor + # If 'POOL_IMPORTED' isn't set, no pool imported and therefore # we won't be able to find a root fs. [ -z "${POOL_IMPORTED}" ] && return 1 @@ -135,7 +135,7 @@ get_pools() # Get the base list of available pools. available_pools=$(find_pools "$ZPOOL" import) - # Just in case - seen it happen (that a pool isn't visable/found + # Just in case - seen it happen (that a pool isn't visible/found # with a simple "zpool import" but only when using the "-d" # option or setting ZPOOL_IMPORT_PATH). if [ -d "/dev/disk/by-id" ] @@ -401,7 +401,7 @@ mount_fs() return 0 } -# Unlock a ZFS native crypted filesystem. +# Unlock a ZFS native encrypted filesystem. decrypt_fs() { local fs="$1" @@ -606,7 +606,7 @@ setup_snapshot_booting() if ! grep -qiE '(^|[^\\](\\\\)* )(rollback)=(on|yes|1)( |$)' /proc/cmdline then # If the destination dataset for the clone - # already exists, destroy it. Recursivly + # already exists, destroy it. Recursively if [ $(get_fs_value "${rootfs}_${snapname}" type) ]; then filesystems=$("${ZFS}" list -oname -tfilesystem -H \ -r -Sname "${ZFS_BOOTFS}") @@ -616,7 +616,7 @@ setup_snapshot_booting() fi fi - # Get all snapshots, recursivly (might need to clone /usr, /var etc + # Get all snapshots, recursively (might need to clone /usr, /var etc # as well). for s in $("${ZFS}" list -H -oname -tsnapshot -r "${rootfs}" | \ grep "${snapname}") @@ -843,7 +843,7 @@ mountroot() # Strip 'zfs:' and 'ZFS='. ZFS_BOOTFS="${ROOT#*[:=]}" - # Stip everything after the first slash. + # Strip everything after the first slash. ZFS_RPOOL="${ZFS_BOOTFS%%/*}" fi diff --git a/contrib/pyzfs/docs/source/conf.py b/contrib/pyzfs/docs/source/conf.py index 4ffd7c93e5bd..4bbb938b6296 100644 --- a/contrib/pyzfs/docs/source/conf.py +++ b/contrib/pyzfs/docs/source/conf.py @@ -291,7 +291,7 @@ ####################### # Neutralize effects of function wrapping on documented signatures. -# The affected signatures could be explcitly placed into the +# The affected signatures could be explicitly placed into the # documentation (either in .rst files or as a first line of a # docstring). import functools diff --git a/contrib/pyzfs/libzfs_core/_libzfs_core.py b/contrib/pyzfs/libzfs_core/_libzfs_core.py index 5a4843943940..06797b0f36d5 100644 --- a/contrib/pyzfs/libzfs_core/_libzfs_core.py +++ b/contrib/pyzfs/libzfs_core/_libzfs_core.py @@ -300,7 +300,7 @@ def lzc_destroy_snaps(snaps, defer): Typical error is :exc:`SnapshotIsCloned` if `defer` is `False`. The snapshot names are validated quite loosely and invalid names are - typically ignored as nonexisiting snapshots. + typically ignored as nonexisting snapshots. A snapshot name referring to a filesystem that doesn't exist is ignored. @@ -470,7 +470,7 @@ def lzc_hold(holds, fd=None): Holds for snapshots which don't exist will be skipped and have an entry added to the return value, but will not cause an overall failure. No exceptions is raised if all holds, for snapshots that existed, were - succesfully created. + successfully created. Otherwise :exc:`.HoldFailure` exception is raised and no holds will be created. :attr:`.HoldFailure.errors` may contain a single element for an error that @@ -654,7 +654,7 @@ def lzc_send_space(snapname, fromsnap=None, flags=None): should be done. :param fromsnap: the optional starting snapshot name. If not `None` then an incremental stream size is estimated, otherwise - a full stream is esimated. + a full stream is estimated. :type fromsnap: `bytes` or `None` :param flags: the flags that control what enhanced features can be used in the stream. @@ -1178,11 +1178,11 @@ def receive_header(fd): the type of the dataset for which the stream has been created (volume, filesystem) ''' - # read sizeof(dmu_replay_record_t) bytes directly into the memort backing + # read sizeof(dmu_replay_record_t) bytes directly into the memory backing # 'record' record = _ffi.new("dmu_replay_record_t *") _ffi.buffer(record)[:] = os.read(fd, _ffi.sizeof(record[0])) - # get drr_begin member and its representation as a Pythn dict + # get drr_begin member and its representation as a Python dict drr_begin = record.drr_u.drr_begin header = {} for field, descr in _ffi.typeof(drr_begin).fields: @@ -1688,7 +1688,7 @@ def lzc_set_props(name, prop, val): # As the extended API is not committed yet, the names of the new interfaces # are not settled down yet. # It's not clear if atomically setting multiple properties is an achievable -# goal and an interface acting on mutiple entities must do so atomically +# goal and an interface acting on multiple entities must do so atomically # by convention. # Being able to set a single property at a time is sufficient for ClusterHQ. lzc_set_prop = lzc_set_props @@ -1725,7 +1725,7 @@ def lzc_list(name, options): Absence of this option implies all types. The first of the returned file descriptors can be used to - read the listing in a binary encounded format. The data is + read the listing in a binary encoded format. The data is a series of variable sized records each starting with a fixed size header, the header is followed by a serialized ``nvlist``. Each record describes a single element and contains the element's diff --git a/contrib/pyzfs/libzfs_core/_nvlist.py b/contrib/pyzfs/libzfs_core/_nvlist.py index fe4239a3c06e..dc6d820bdea3 100644 --- a/contrib/pyzfs/libzfs_core/_nvlist.py +++ b/contrib/pyzfs/libzfs_core/_nvlist.py @@ -113,7 +113,7 @@ def packed_nvlist_out(packed_nvlist, packed_size): :param bytes packed_nvlist: packed nvlist_t. :param int packed_size: nvlist_t packed size. - :return: an `dict` of values representing the data containted by nvlist_t. + :return: an `dict` of values representing the data contained by nvlist_t. :rtype: dict """ props = {} diff --git a/contrib/pyzfs/libzfs_core/exceptions.py b/contrib/pyzfs/libzfs_core/exceptions.py index f465cd3d9309..f8a775433b3c 100644 --- a/contrib/pyzfs/libzfs_core/exceptions.py +++ b/contrib/pyzfs/libzfs_core/exceptions.py @@ -77,7 +77,7 @@ def __str__(self): ZFSError.__str__(self), len(self.errors), self.suppressed_count) def __repr__(self): - return "%s(%r, %r, errors=%r, supressed=%r)" % ( + return "%s(%r, %r, errors=%r, suppressed=%r)" % ( self.__class__.__name__, self.errno, self.message, self.errors, self.suppressed_count) @@ -372,7 +372,7 @@ def __init__(self, name): class QuotaExceeded(ZFSError): errno = errno.EDQUOT - message = "Quouta exceeded" + message = "Quota exceeded" def __init__(self, name): self.name = name diff --git a/contrib/pyzfs/libzfs_core/test/test_libzfs_core.py b/contrib/pyzfs/libzfs_core/test/test_libzfs_core.py index 18306f88e4e5..613d5eccd0c2 100644 --- a/contrib/pyzfs/libzfs_core/test/test_libzfs_core.py +++ b/contrib/pyzfs/libzfs_core/test/test_libzfs_core.py @@ -1913,7 +1913,7 @@ def test_recv_incremental(self): filecmp.cmp( os.path.join(mnt1, name), os.path.join(mnt2, name), False)) - # This test case fails unless unless a patch from + # This test case fails unless a patch from # https://clusterhq.atlassian.net/browse/ZFS-20 # is applied to libzfs_core, otherwise it succeeds. @unittest.skip("fails with unpatched libzfs_core") @@ -2160,7 +2160,7 @@ def test_recv_incremental_non_clone_but_set_origin(self): with streams(srcfs, src1, src2) as (_, (full, incr)): lzc.lzc_receive(dst1, full.fileno()) lzc.lzc_snapshot([dst_snap]) - # becase cannot receive incremental and set origin on a non-clone + # because cannot receive incremental and set origin on a non-clone with self.assertRaises(lzc_exc.BadStream): lzc.lzc_receive(dst2, incr.fileno(), origin=dst1) @@ -2375,7 +2375,7 @@ def test_force_recv_full_existing_modified_mounted_fs(self): for i in range(1024): f.write(b'x' * 1024) lzc.lzc_receive(dst, stream.fileno(), force=True) - # The temporary file dissappears and any access, even close(), + # The temporary file disappears and any access, even close(), # results in EIO. self.assertFalse(os.path.exists(f.name)) with self.assertRaises(IOError): @@ -2462,7 +2462,7 @@ def test_force_recv_incremental_modified_mounted_fs(self): for i in range(1024): f.write(b'x' * 1024) lzc.lzc_receive(dst2, incr.fileno(), force=True) - # The temporary file dissappears and any access, even close(), + # The temporary file disappears and any access, even close(), # results in EIO. self.assertFalse(os.path.exists(f.name)) with self.assertRaises(IOError): From 0463c955011fc3913f9aaad6c686f48f118d2fef Mon Sep 17 00:00:00 2001 From: Andrea Gelmini Date: Fri, 30 Aug 2019 18:46:52 +0200 Subject: [PATCH 47/68] Fix typos in etc/ Reviewed-by: Ryan Moeller Reviewed-by: Richard Laager Reviewed-by: Brian Behlendorf Signed-off-by: Andrea Gelmini Closes #9236 --- etc/init.d/zfs-functions.in | 4 ++-- etc/init.d/zfs-import.in | 4 ++-- etc/zfs/vdev_id.conf.sas_direct.example | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/etc/init.d/zfs-functions.in b/etc/init.d/zfs-functions.in index d65c79dcfd36..043f1b07398e 100644 --- a/etc/init.d/zfs-functions.in +++ b/etc/init.d/zfs-functions.in @@ -72,7 +72,7 @@ elif type einfo > /dev/null 2>&1 ; then # zfs_log_progress_msg() { echo -n "$1"; } zfs_log_progress_msg() { echo -n; } else - # Unknown - simple substitues. + # Unknown - simple substitutes. zfs_log_begin_msg() { echo -n "$1"; } zfs_log_end_msg() { ret=$1 @@ -283,7 +283,7 @@ checksystem() # Called with zfs=(off|no|0) - bail because we don't # want anything import, mounted or shared. # HOWEVER, only do this if we're called at the boot up - # (from init), not if we're running interactivly (as in + # (from init), not if we're running interactively (as in # from the shell - we know what we're doing). [ -n "$init" ] && exit 3 fi diff --git a/etc/init.d/zfs-import.in b/etc/init.d/zfs-import.in index 420d2e8a7a4e..47c957baac4b 100644 --- a/etc/init.d/zfs-import.in +++ b/etc/init.d/zfs-import.in @@ -90,7 +90,7 @@ do_import_all_visible() already_imported=$(find_pools "$ZPOOL" list -H -oname) available_pools=$(find_pools "$ZPOOL" import) - # Just in case - seen it happen (that a pool isn't visable/found + # Just in case - seen it happen (that a pool isn't visible/found # with a simple "zpool import" but only when using the "-d" # option or setting ZPOOL_IMPORT_PATH). if [ -d "/dev/disk/by-id" ] @@ -187,7 +187,7 @@ do_import_all_visible() # Needs to be exported for "zpool" to catch it. [ -n "$ZPOOL_IMPORT_PATH" ] && export ZPOOL_IMPORT_PATH - # Mount all availible pools (except those set in ZFS_POOL_EXCEPTIONS. + # Mount all available pools (except those set in ZFS_POOL_EXCEPTIONS. # # If not interactive (run from init - variable init='/sbin/init') # we get ONE line for all pools being imported, with just a dot diff --git a/etc/zfs/vdev_id.conf.sas_direct.example b/etc/zfs/vdev_id.conf.sas_direct.example index 0a6f130cb2d9..d17ed149d89b 100644 --- a/etc/zfs/vdev_id.conf.sas_direct.example +++ b/etc/zfs/vdev_id.conf.sas_direct.example @@ -2,7 +2,7 @@ multipath no topology sas_direct phys_per_port 4 -# Additionally create /dev/by-enclousure/ symlinks for enclosure devices +# Additionally create /dev/by-enclosure/ symlinks for enclosure devices enclosure_symlinks yes # PCI_ID HBA PORT CHANNEL NAME From cf7c5a030e505451e0cbadc49e33e41f5219c44b Mon Sep 17 00:00:00 2001 From: Andrea Gelmini Date: Fri, 30 Aug 2019 18:53:15 +0200 Subject: [PATCH 48/68] Fix typos in include/ Reviewed-by: Ryan Moeller Reviewed-by: Richard Laager Reviewed-by: Brian Behlendorf Signed-off-by: Andrea Gelmini Closes #9238 --- include/linux/vfs_compat.h | 2 +- include/spl/sys/kmem_cache.h | 2 +- include/sys/arc.h | 2 +- include/sys/arc_impl.h | 2 +- include/sys/avl.h | 4 ++-- include/sys/dmu.h | 2 +- include/sys/efi_partition.h | 4 ++-- include/sys/fs/zfs.h | 4 ++-- include/sys/lua/luaconf.h | 2 +- include/sys/sa.h | 2 +- include/sys/trace.h | 2 +- include/sys/trace_vdev.h | 2 +- include/sys/txg_impl.h | 4 ++-- include/sys/vdev_raidz_impl.h | 2 +- include/sys/zcp.h | 2 +- include/sys/zfs_acl.h | 2 +- include/sys/zfs_vfsops.h | 4 ++-- include/sys/zil.h | 4 ++-- include/sys/zio_crypt.h | 6 +++--- include/sys/zio_impl.h | 2 +- 20 files changed, 28 insertions(+), 28 deletions(-) diff --git a/include/linux/vfs_compat.h b/include/linux/vfs_compat.h index 04a2c2b879fe..28b454133c6e 100644 --- a/include/linux/vfs_compat.h +++ b/include/linux/vfs_compat.h @@ -36,7 +36,7 @@ * 2.6.28 API change, * Added insert_inode_locked() helper function, prior to this most callers * used insert_inode_hash(). The older method doesn't check for collisions - * in the inode_hashtable but it still acceptible for use. + * in the inode_hashtable but it still acceptable for use. */ #ifndef HAVE_INSERT_INODE_LOCKED static inline int diff --git a/include/spl/sys/kmem_cache.h b/include/spl/sys/kmem_cache.h index bb413207deff..4ee7bcae07e7 100644 --- a/include/spl/sys/kmem_cache.h +++ b/include/spl/sys/kmem_cache.h @@ -30,7 +30,7 @@ /* * Slab allocation interfaces. The SPL slab differs from the standard * Linux SLAB or SLUB primarily in that each cache may be backed by slabs - * allocated from the physical or virtal memory address space. The virtual + * allocated from the physical or virtual memory address space. The virtual * slabs allow for good behavior when allocation large objects of identical * size. This slab implementation also supports both constructors and * destructors which the Linux slab does not. diff --git a/include/sys/arc.h b/include/sys/arc.h index 59c0bea92022..f6dea3fbd045 100644 --- a/include/sys/arc.h +++ b/include/sys/arc.h @@ -187,7 +187,7 @@ typedef enum arc_buf_contents { } arc_buf_contents_t; /* - * The following breakdows of arc_size exist for kstat only. + * The following breakdowns of arc_size exist for kstat only. */ typedef enum arc_space_type { ARC_SPACE_DATA, diff --git a/include/sys/arc_impl.h b/include/sys/arc_impl.h index cd42c0c01a20..c8f551db731d 100644 --- a/include/sys/arc_impl.h +++ b/include/sys/arc_impl.h @@ -39,7 +39,7 @@ extern "C" { * Note that buffers can be in one of 6 states: * ARC_anon - anonymous (discussed below) * ARC_mru - recently used, currently cached - * ARC_mru_ghost - recentely used, no longer in cache + * ARC_mru_ghost - recently used, no longer in cache * ARC_mfu - frequently used, currently cached * ARC_mfu_ghost - frequently used, no longer in cache * ARC_l2c_only - exists in L2ARC but not other states diff --git a/include/sys/avl.h b/include/sys/avl.h index 206b539fab54..962e8b1cfb6f 100644 --- a/include/sys/avl.h +++ b/include/sys/avl.h @@ -97,7 +97,7 @@ extern "C" { * * 3. Use avl_destroy_nodes() to quickly process/free up any remaining nodes. * Note that once you use avl_destroy_nodes(), you can no longer - * use any routine except avl_destroy_nodes() and avl_destoy(). + * use any routine except avl_destroy_nodes() and avl_destroy(). * * 4. Use avl_destroy() to destroy the AVL tree itself. * @@ -144,7 +144,7 @@ typedef uintptr_t avl_index_t; * user data structure which must contain a field of type avl_node_t. * * Also assume the user data structures looks like: - * stuct my_type { + * struct my_type { * ... * avl_node_t my_link; * ... diff --git a/include/sys/dmu.h b/include/sys/dmu.h index 62de1eaf5857..36eff4572db7 100644 --- a/include/sys/dmu.h +++ b/include/sys/dmu.h @@ -466,7 +466,7 @@ int dmu_object_set_nlevels(objset_t *os, uint64_t object, int nlevels, /* * Set the data blocksize for an object. * - * The object cannot have any blocks allcated beyond the first. If + * The object cannot have any blocks allocated beyond the first. If * the first block is allocated already, the new size must be greater * than the current block size. If these conditions are not met, * ENOTSUP will be returned. diff --git a/include/sys/efi_partition.h b/include/sys/efi_partition.h index 684b3e588a16..88bdfd2b1ca3 100644 --- a/include/sys/efi_partition.h +++ b/include/sys/efi_partition.h @@ -297,11 +297,11 @@ typedef struct efi_gpe { * checksums, and perform any necessary byte-swapping to the on-disk * format. */ -/* Solaris library abstraction for EFI partitons */ +/* Solaris library abstraction for EFI partitions */ typedef struct dk_part { diskaddr_t p_start; /* starting LBA */ diskaddr_t p_size; /* size in blocks */ - struct uuid p_guid; /* partion type GUID */ + struct uuid p_guid; /* partition type GUID */ ushort_t p_tag; /* converted to part'n type GUID */ ushort_t p_flag; /* attributes */ char p_name[EFI_PART_NAME_LEN]; /* partition name */ diff --git a/include/sys/fs/zfs.h b/include/sys/fs/zfs.h index b4f3ede9b481..eb970b2cdfcf 100644 --- a/include/sys/fs/zfs.h +++ b/include/sys/fs/zfs.h @@ -957,7 +957,7 @@ typedef struct pool_scan_stat { /* values not stored on disk */ uint64_t pss_pass_exam; /* examined bytes per scan pass */ uint64_t pss_pass_start; /* start time of a scan pass */ - uint64_t pss_pass_scrub_pause; /* pause time of a scurb pass */ + uint64_t pss_pass_scrub_pause; /* pause time of a scrub pass */ /* cumulative time scrub spent paused, needed for rate calculation */ uint64_t pss_pass_scrub_spent_paused; uint64_t pss_pass_issued; /* issued bytes per scan pass */ @@ -1031,7 +1031,7 @@ typedef struct vdev_stat { uint64_t vs_fragmentation; /* device fragmentation */ uint64_t vs_initialize_bytes_done; /* bytes initialized */ uint64_t vs_initialize_bytes_est; /* total bytes to initialize */ - uint64_t vs_initialize_state; /* vdev_initialzing_state_t */ + uint64_t vs_initialize_state; /* vdev_initializing_state_t */ uint64_t vs_initialize_action_time; /* time_t */ uint64_t vs_checkpoint_space; /* checkpoint-consumed space */ uint64_t vs_resilver_deferred; /* resilver deferred */ diff --git a/include/sys/lua/luaconf.h b/include/sys/lua/luaconf.h index 302c57a8c4b3..fa7861336fc0 100644 --- a/include/sys/lua/luaconf.h +++ b/include/sys/lua/luaconf.h @@ -495,7 +495,7 @@ extern int64_t lcompat_pow(int64_t, int64_t); ** a single double value, using NaN values to represent non-number ** values. The trick only works on 32-bit machines (ints and pointers ** are 32-bit values) with numbers represented as IEEE 754-2008 doubles -** with conventional endianess (12345678 or 87654321), in CPUs that do +** with conventional endianness (12345678 or 87654321), in CPUs that do ** not produce signaling NaN values (all NaNs are quiet). */ diff --git a/include/sys/sa.h b/include/sys/sa.h index 50b90622164b..432e0bc415c9 100644 --- a/include/sys/sa.h +++ b/include/sys/sa.h @@ -51,7 +51,7 @@ typedef uint16_t sa_attr_type_t; typedef struct sa_attr_reg { char *sa_name; /* attribute name */ uint16_t sa_length; - sa_bswap_type_t sa_byteswap; /* bswap functon enum */ + sa_bswap_type_t sa_byteswap; /* bswap function enum */ sa_attr_type_t sa_attr; /* filled in during registration */ } sa_attr_reg_t; diff --git a/include/sys/trace.h b/include/sys/trace.h index 48497cc35f1e..e2cd634b4a2a 100644 --- a/include/sys/trace.h +++ b/include/sys/trace.h @@ -43,7 +43,7 @@ * the DEFINE_DTRACE_PROBE macros. * * When adding new DTRACE_PROBEs to zfs source, both a tracepoint event - * class defintition and a DEFINE_DTRACE_PROBE definition are needed to + * class definition and a DEFINE_DTRACE_PROBE definition are needed to * avoid undefined function errors. */ diff --git a/include/sys/trace_vdev.h b/include/sys/trace_vdev.h index 289aca69eb7c..13688a99e437 100644 --- a/include/sys/trace_vdev.h +++ b/include/sys/trace_vdev.h @@ -128,7 +128,7 @@ DEFINE_REMOVE_FREE_EVENT_TXG(zfs_remove__free__inflight); /* * When tracepoints are not available, a DEFINE_DTRACE_PROBE* macro is * needed for each DTRACE_PROBE. These will be used to generate stub - * tracing functions and protoypes for those functions. See + * tracing functions and prototypes for those functions. See * include/sys/trace.h. */ diff --git a/include/sys/txg_impl.h b/include/sys/txg_impl.h index 4e05214919d7..047d51b94c66 100644 --- a/include/sys/txg_impl.h +++ b/include/sys/txg_impl.h @@ -43,7 +43,7 @@ extern "C" { * the number of active transaction holds (tc_count). As transactions * are assigned into a transaction group the appropriate tc_count is * incremented to indicate that there are pending changes that have yet - * to quiesce. Consumers evenutally call txg_rele_to_sync() to decrement + * to quiesce. Consumers eventually call txg_rele_to_sync() to decrement * the tc_count. A transaction group is not considered quiesced until all * tx_cpu structures have reached a tc_count of zero. * @@ -78,7 +78,7 @@ struct tx_cpu { /* * The tx_state structure maintains the state information about the different - * stages of the pool's transcation groups. A per pool tx_state structure + * stages of the pool's transaction groups. A per pool tx_state structure * is used to track this information. The tx_state structure also points to * an array of tx_cpu structures (described above). Although the tx_sync_lock * is used to protect the members of this structure, it is not used to diff --git a/include/sys/vdev_raidz_impl.h b/include/sys/vdev_raidz_impl.h index 4969d110b858..2e38962cc317 100644 --- a/include/sys/vdev_raidz_impl.h +++ b/include/sys/vdev_raidz_impl.h @@ -158,7 +158,7 @@ extern const raidz_impl_ops_t vdev_raidz_aarch64_neonx2_impl; * * raidz_parity Returns parity of the RAIDZ block * raidz_ncols Returns number of columns the block spans - * raidz_nbigcols Returns number of big columns columns + * raidz_nbigcols Returns number of big columns * raidz_col_p Returns pointer to a column * raidz_col_size Returns size of a column * raidz_big_size Returns size of big columns diff --git a/include/sys/zcp.h b/include/sys/zcp.h index b720d863779c..5cc520da5c56 100644 --- a/include/sys/zcp.h +++ b/include/sys/zcp.h @@ -149,7 +149,7 @@ typedef struct zcp_arg { /* * The name of this argument. For keyword arguments this is the name * functions will use to set the argument. For positional arguments - * the name has no programatic meaning, but will appear in error + * the name has no programmatic meaning, but will appear in error * messages and help output. */ const char *za_name; diff --git a/include/sys/zfs_acl.h b/include/sys/zfs_acl.h index 6d3db5041608..747f4e57e2a0 100644 --- a/include/sys/zfs_acl.h +++ b/include/sys/zfs_acl.h @@ -62,7 +62,7 @@ struct znode_phys; /* * All ACEs have a common hdr. For * owner@, group@, and everyone@ this is all - * thats needed. + * that's needed. */ typedef struct zfs_ace_hdr { uint16_t z_type; diff --git a/include/sys/zfs_vfsops.h b/include/sys/zfs_vfsops.h index c6ab353f7c77..2886d9e25638 100644 --- a/include/sys/zfs_vfsops.h +++ b/include/sys/zfs_vfsops.h @@ -47,7 +47,7 @@ struct znode; /* * This structure emulates the vfs_t from other platforms. It's purpose - * is to faciliate the handling of mount options and minimize structural + * is to facilitate the handling of mount options and minimize structural * differences between the platforms. */ typedef struct vfs { @@ -106,7 +106,7 @@ struct zfsvfs { list_t z_all_znodes; /* all znodes in the fs */ uint64_t z_nr_znodes; /* number of znodes in the fs */ unsigned long z_rollback_time; /* last online rollback time */ - unsigned long z_snap_defer_time; /* last snapshot unmount deferal */ + unsigned long z_snap_defer_time; /* last snapshot unmount deferral */ kmutex_t z_znodes_lock; /* lock for z_all_znodes */ arc_prune_t *z_arc_prune; /* called by ARC to prune caches */ struct inode *z_ctldir; /* .zfs directory inode */ diff --git a/include/sys/zil.h b/include/sys/zil.h index cfa5e3995505..6b038a9dd228 100644 --- a/include/sys/zil.h +++ b/include/sys/zil.h @@ -80,7 +80,7 @@ typedef struct zil_header { * Log blocks are chained together. Originally they were chained at the * end of the block. For performance reasons the chain was moved to the * beginning of the block which allows writes for only the data being used. - * The older position is supported for backwards compatability. + * The older position is supported for backwards compatibility. * * The zio_eck_t contains a zec_cksum which for the intent log is * the sequence number of this log block. A seq of 0 is invalid. @@ -421,7 +421,7 @@ typedef struct zil_stats { /* * Number of transactions (reads, writes, renames, etc.) - * that have been commited. + * that have been committed. */ kstat_named_t zil_itx_count; diff --git a/include/sys/zio_crypt.h b/include/sys/zio_crypt.h index d54e2fe192fa..a029127914b2 100644 --- a/include/sys/zio_crypt.h +++ b/include/sys/zio_crypt.h @@ -55,7 +55,7 @@ typedef struct zio_crypt_info { /* length of the encryption key */ size_t ci_keylen; - /* human-readable name of the encryption alforithm */ + /* human-readable name of the encryption algorithm */ char *ci_name; } zio_crypt_info_t; @@ -78,7 +78,7 @@ typedef struct zio_crypt_key { /* buffer for hmac key */ uint8_t zk_hmac_keydata[SHA512_HMAC_KEYLEN]; - /* buffer for currrent encryption key derived from master key */ + /* buffer for current encryption key derived from master key */ uint8_t zk_current_keydata[MASTER_KEY_MAX_LEN]; /* current 64 bit salt for deriving an encryption key */ @@ -99,7 +99,7 @@ typedef struct zio_crypt_key { /* template of hmac key for illumos crypto api */ crypto_ctx_template_t zk_hmac_tmpl; - /* lock for changing the salt and dependant values */ + /* lock for changing the salt and dependent values */ krwlock_t zk_salt_lock; } zio_crypt_key_t; diff --git a/include/sys/zio_impl.h b/include/sys/zio_impl.h index fbbe06eb04f8..8ca12463176d 100644 --- a/include/sys/zio_impl.h +++ b/include/sys/zio_impl.h @@ -87,7 +87,7 @@ extern "C" { * * NOP Write: * The NOP write feature is performed by the ZIO_STAGE_NOP_WRITE stage - * and is added to an existing write pipeline if a crypographically + * and is added to an existing write pipeline if a cryptographically * secure checksum (i.e. SHA256) is enabled and compression is turned on. * The NOP write stage will compare the checksums of the current data * on-disk (level-0 blocks only) and the data that is currently being written. From 9d40bdf414e9baa8fcaf60188e2067e7ec3c1f0f Mon Sep 17 00:00:00 2001 From: Andrea Gelmini Date: Fri, 30 Aug 2019 23:26:07 +0200 Subject: [PATCH 49/68] Fix typos in modules/icp/ Reviewed-by: Ryan Moeller Reviewed-by: Richard Laager Reviewed-by: Brian Behlendorf Signed-off-by: Andrea Gelmini Closes #9239 --- module/icp/algs/skein/skein_block.c | 6 +++--- module/icp/api/kcf_ctxops.c | 4 ++-- module/icp/asm-x86_64/aes/THIRDPARTYLICENSE.openssl | 2 +- module/icp/asm-x86_64/aes/aesopt.h | 2 +- module/icp/core/kcf_mech_tabs.c | 2 +- module/icp/core/kcf_sched.c | 12 ++++++------ module/icp/illumos-crypto.c | 2 +- module/icp/include/sys/crypto/impl.h | 2 +- module/icp/include/sys/crypto/sched_impl.h | 6 +++--- module/icp/include/sys/crypto/spi.h | 2 +- module/icp/os/modhash.c | 2 +- module/icp/spi/kcf_spi.c | 2 +- 12 files changed, 22 insertions(+), 22 deletions(-) diff --git a/module/icp/algs/skein/skein_block.c b/module/icp/algs/skein/skein_block.c index 6d85cb7d9e98..7ba165a48511 100644 --- a/module/icp/algs/skein/skein_block.c +++ b/module/icp/algs/skein/skein_block.c @@ -159,7 +159,7 @@ Skein_256_Process_Block(Skein_256_Ctxt_t *ctx, const uint8_t *blkPtr, ts[r + (R) + 2] = ts[r + (R) - 1]; \ Skein_Show_R_Ptr(BLK_BITS, &ctx->h, SKEIN_RND_KEY_INJECT, Xptr); - /* loop thru it */ + /* loop through it */ for (r = 1; r < 2 * RCNT; r += 2 * SKEIN_UNROLL_256) #endif { @@ -385,7 +385,7 @@ Skein_512_Process_Block(Skein_512_Ctxt_t *ctx, const uint8_t *blkPtr, ts[r + (R)+2] = ts[r + (R) - 1]; \ Skein_Show_R_Ptr(BLK_BITS, &ctx->h, SKEIN_RND_KEY_INJECT, Xptr); - /* loop thru it */ + /* loop through it */ for (r = 1; r < 2 * RCNT; r += 2 * SKEIN_UNROLL_512) #endif /* end of looped code definitions */ { @@ -667,7 +667,7 @@ Skein1024_Process_Block(Skein1024_Ctxt_t *ctx, const uint8_t *blkPtr, ts[r + (R) + 2] = ts[r + (R) - 1]; \ Skein_Show_R_Ptr(BLK_BITS, &ctx->h, SKEIN_RND_KEY_INJECT, Xptr); - /* loop thru it */ + /* loop through it */ for (r = 1; r <= 2 * RCNT; r += 2 * SKEIN_UNROLL_1024) #endif { diff --git a/module/icp/api/kcf_ctxops.c b/module/icp/api/kcf_ctxops.c index b9b9cb74e04f..21b0977d3634 100644 --- a/module/icp/api/kcf_ctxops.c +++ b/module/icp/api/kcf_ctxops.c @@ -63,7 +63,7 @@ * * Returns: * CRYPTO_SUCCESS when the context template is successfully created. - * CRYPTO_HOST_MEMEORY: mem alloc failure + * CRYPTO_HOST_MEMORY: mem alloc failure * CRYPTO_ARGUMENTS_BAD: NULL storage for the ctx template. * RYPTO_MECHANISM_INVALID: invalid mechanism 'mech'. */ @@ -123,7 +123,7 @@ crypto_create_ctx_template(crypto_mechanism_t *mech, crypto_key_t *key, * crypto_create_ctx_template() * * Description: - * Frees the inbedded crypto_spi_ctx_template_t, then the + * Frees the embedded crypto_spi_ctx_template_t, then the * kcf_ctx_template_t. * * Context: diff --git a/module/icp/asm-x86_64/aes/THIRDPARTYLICENSE.openssl b/module/icp/asm-x86_64/aes/THIRDPARTYLICENSE.openssl index a2c4adcbe6a5..92c9e196a318 100644 --- a/module/icp/asm-x86_64/aes/THIRDPARTYLICENSE.openssl +++ b/module/icp/asm-x86_64/aes/THIRDPARTYLICENSE.openssl @@ -101,7 +101,7 @@ * must display the following acknowledgement: * "This product includes cryptographic software written by * Eric Young (eay@cryptsoft.com)" - * The word 'cryptographic' can be left out if the rouines from the library + * The word 'cryptographic' can be left out if the routines from the library * being used are not cryptographic related :-). * 4. If you include any Windows specific code (or a derivative thereof) from * the apps directory (application code) you must include an acknowledgement: diff --git a/module/icp/asm-x86_64/aes/aesopt.h b/module/icp/asm-x86_64/aes/aesopt.h index 6aa61db8275a..472111f96e59 100644 --- a/module/icp/asm-x86_64/aes/aesopt.h +++ b/module/icp/asm-x86_64/aes/aesopt.h @@ -327,7 +327,7 @@ extern "C" { * On some systems speed will be improved by aligning the AES large lookup * tables on particular boundaries. This define should be set to a power of * two giving the desired alignment. It can be left undefined if alignment - * is not needed. This option is specific to the Micrsoft VC++ compiler - + * is not needed. This option is specific to the Microsoft VC++ compiler - * it seems to sometimes cause trouble for the VC++ version 6 compiler. */ diff --git a/module/icp/core/kcf_mech_tabs.c b/module/icp/core/kcf_mech_tabs.c index 741dae7a748e..2642b317d698 100644 --- a/module/icp/core/kcf_mech_tabs.c +++ b/module/icp/core/kcf_mech_tabs.c @@ -103,7 +103,7 @@ kcf_mech_entry_tab_t kcf_mech_tabs_tab[KCF_LAST_OPSCLASS + 1] = { * Per-algorithm internal thresholds for the minimum input size of before * offloading to hardware provider. * Dispatching a crypto operation to a hardware provider entails paying the - * cost of an additional context switch. Measurments with Sun Accelerator 4000 + * cost of an additional context switch. Measurements with Sun Accelerator 4000 * shows that 512-byte jobs or smaller are better handled in software. * There is room for refinement here. * diff --git a/module/icp/core/kcf_sched.c b/module/icp/core/kcf_sched.c index da2346f7ec21..c8c2bbd42b9a 100644 --- a/module/icp/core/kcf_sched.c +++ b/module/icp/core/kcf_sched.c @@ -182,7 +182,7 @@ kcf_areqnode_alloc(kcf_provider_desc_t *pd, kcf_context_t *ictx, * reached, signal the creator thread for more threads. * * If the two conditions above are not met, we don't need to do - * any thing. The request will be picked up by one of the + * anything. The request will be picked up by one of the * worker threads when it becomes available. */ static int @@ -1182,7 +1182,7 @@ kcf_aop_done(kcf_areq_node_t *areq, int error) /* * Handle recoverable errors. This has to be done first - * before doing any thing else in this routine so that + * before doing anything else in this routine so that * we do not change the state of the request. */ if (error != CRYPTO_SUCCESS && IS_RECOVERABLE(error)) { @@ -1432,7 +1432,7 @@ crypto_cancel_req(crypto_req_id_t id) /* * There is no interface to remove an entry * once it is on the taskq. So, we do not do - * any thing for a hardware provider. + * anything for a hardware provider. */ break; default: @@ -1535,7 +1535,7 @@ kcf_misc_kstat_update(kstat_t *ksp, int rw) } /* - * Allocate and initiatize a kcf_dual_req, used for saving the arguments of + * Allocate and initialize a kcf_dual_req, used for saving the arguments of * a dual operation or an atomic operation that has to be internally * simulated with multiple single steps. * crq determines the memory allocation flags. @@ -1551,7 +1551,7 @@ kcf_alloc_req(crypto_call_req_t *crq) if (kcr == NULL) return (NULL); - /* Copy the whole crypto_call_req struct, as it isn't persistant */ + /* Copy the whole crypto_call_req struct, as it isn't persistent */ if (crq != NULL) kcr->kr_callreq = *crq; else @@ -1579,7 +1579,7 @@ kcf_next_req(void *next_req_arg, int status) kcf_provider_desc_t *pd = NULL; crypto_dual_data_t *ct = NULL; - /* Stop the processing if an error occured at this step */ + /* Stop the processing if an error occurred at this step */ if (error != CRYPTO_SUCCESS) { out: areq->an_reqarg = next_req->kr_callreq; diff --git a/module/icp/illumos-crypto.c b/module/icp/illumos-crypto.c index c2fcf1ff729c..3c5ef4393940 100644 --- a/module/icp/illumos-crypto.c +++ b/module/icp/illumos-crypto.c @@ -93,7 +93,7 @@ * will use the generic implementation. * * 7) Removing sha384 and sha512 code: The sha code was actually very - * wasy to port. However, the generic sha384 and sha512 code actually + * easy to port. However, the generic sha384 and sha512 code actually * exceeds the stack size on arm and powerpc architectures. In an effort * to remove warnings, this code was removed. * diff --git a/module/icp/include/sys/crypto/impl.h b/module/icp/include/sys/crypto/impl.h index 258cb5fedcd0..0f37f3f63532 100644 --- a/module/icp/include/sys/crypto/impl.h +++ b/module/icp/include/sys/crypto/impl.h @@ -237,7 +237,7 @@ typedef struct kcf_provider_list { struct kcf_provider_desc *pl_provider; } kcf_provider_list_t; -/* atomic operations in linux implictly form a memory barrier */ +/* atomic operations in linux implicitly form a memory barrier */ #define membar_exit() /* diff --git a/module/icp/include/sys/crypto/sched_impl.h b/module/icp/include/sys/crypto/sched_impl.h index 32ffa774957b..85ea0ba1d092 100644 --- a/module/icp/include/sys/crypto/sched_impl.h +++ b/module/icp/include/sys/crypto/sched_impl.h @@ -381,7 +381,7 @@ typedef struct kcf_pool { /* * cv & lock for the condition where more threads need to be - * created. kp_user_lock also protects the three fileds above. + * created. kp_user_lock also protects the three fields above. */ kcondvar_t kp_user_cv; /* Creator cond. variable */ kmutex_t kp_user_lock; /* Creator lock */ @@ -448,13 +448,13 @@ typedef struct kcf_ntfy_elem { * The following values are based on the assumption that it would * take around eight cpus to load a hardware provider (This is true for * at least one product) and a kernel client may come from different - * low-priority interrupt levels. We will have CYRPTO_TASKQ_MIN number + * low-priority interrupt levels. We will have CRYPTO_TASKQ_MIN number * of cached taskq entries. The CRYPTO_TASKQ_MAX number is based on * a throughput of 1GB/s using 512-byte buffers. These are just * reasonable estimates and might need to change in future. */ #define CRYPTO_TASKQ_THREADS 8 -#define CYRPTO_TASKQ_MIN 64 +#define CRYPTO_TASKQ_MIN 64 #define CRYPTO_TASKQ_MAX 2 * 1024 * 1024 extern int crypto_taskq_threads; diff --git a/module/icp/include/sys/crypto/spi.h b/module/icp/include/sys/crypto/spi.h index 0aae9181adc7..2c62b5706651 100644 --- a/module/icp/include/sys/crypto/spi.h +++ b/module/icp/include/sys/crypto/spi.h @@ -699,7 +699,7 @@ typedef struct crypto_provider_info { /* * Provider status passed by a provider to crypto_provider_notification(9F) - * and returned by the provider_stauts(9E) entry point. + * and returned by the provider_status(9E) entry point. */ #define CRYPTO_PROVIDER_READY 0 #define CRYPTO_PROVIDER_BUSY 1 diff --git a/module/icp/os/modhash.c b/module/icp/os/modhash.c index 497e84396665..5e216ed6a04a 100644 --- a/module/icp/os/modhash.c +++ b/module/icp/os/modhash.c @@ -48,7 +48,7 @@ * The number returned need _not_ be between 0 and nchains. The mod_hash * code will take care of doing that. The second argument (after the * key) to the hashing function is a void * that represents - * hash_alg_data-- this is provided so that the hashing algrorithm can + * hash_alg_data-- this is provided so that the hashing algorithm can * maintain some state across calls, or keep algorithm-specific * constants associated with the hash table. * diff --git a/module/icp/spi/kcf_spi.c b/module/icp/spi/kcf_spi.c index 0a6e38df8625..e438b58105b6 100644 --- a/module/icp/spi/kcf_spi.c +++ b/module/icp/spi/kcf_spi.c @@ -40,7 +40,7 @@ * minalloc and maxalloc values to be used for taskq_create(). */ int crypto_taskq_threads = CRYPTO_TASKQ_THREADS; -int crypto_taskq_minalloc = CYRPTO_TASKQ_MIN; +int crypto_taskq_minalloc = CRYPTO_TASKQ_MIN; int crypto_taskq_maxalloc = CRYPTO_TASKQ_MAX; static void remove_provider(kcf_provider_desc_t *); From 9f5c1bc60935068d947dd596a7dbefdf4d04efd7 Mon Sep 17 00:00:00 2001 From: Andrea Gelmini Date: Fri, 30 Aug 2019 23:32:18 +0200 Subject: [PATCH 50/68] Fix typos in module/ Reviewed-by: Ryan Moeller Reviewed-by: Richard Laager Reviewed-by: Brian Behlendorf Signed-off-by: Andrea Gelmini Closes #9241 --- module/avl/avl.c | 2 +- module/lua/llimits.h | 2 +- module/nvpair/nvpair.c | 6 +++--- module/spl/spl-condvar.c | 4 ++-- module/spl/spl-generic.c | 4 ++-- module/spl/spl-kmem-cache.c | 4 ++-- module/spl/spl-tsd.c | 4 ++-- module/zcommon/zfeature_common.c | 2 +- module/zcommon/zfs_namecheck.c | 2 +- module/zcommon/zpool_prop.c | 2 +- 10 files changed, 16 insertions(+), 16 deletions(-) diff --git a/module/avl/avl.c b/module/avl/avl.c index 736dcee84579..1d2843f0e716 100644 --- a/module/avl/avl.c +++ b/module/avl/avl.c @@ -159,7 +159,7 @@ avl_walk(avl_tree_t *tree, void *oldnode, int left) node = node->avl_child[right]) ; /* - * Otherwise, return thru left children as far as we can. + * Otherwise, return through left children as far as we can. */ } else { for (;;) { diff --git a/module/lua/llimits.h b/module/lua/llimits.h index eee8f0c2d538..2126a14648dc 100644 --- a/module/lua/llimits.h +++ b/module/lua/llimits.h @@ -98,7 +98,7 @@ typedef LUAI_UACNUMBER l_uacNumber; /* ** non-return type ** -** Supress noreturn attribute in kernel builds to avoid objtool check warnings +** Suppress noreturn attribute in kernel builds to avoid objtool check warnings */ #if defined(__GNUC__) && !defined(_KERNEL) #define l_noret void __attribute__((noreturn)) diff --git a/module/nvpair/nvpair.c b/module/nvpair/nvpair.c index 5f6423ccce73..c5bd98ebd055 100644 --- a/module/nvpair/nvpair.c +++ b/module/nvpair/nvpair.c @@ -1872,7 +1872,7 @@ nvlist_lookup_pairs(nvlist_t *nvl, int flag, ...) * (given 'ret' is non-NULL). If 'sep' is specified then 'name' will penitrate * multiple levels of embedded nvlists, with 'sep' as the separator. As an * example, if sep is '.', name might look like: "a" or "a.b" or "a.c[3]" or - * "a.d[3].e[1]". This matches the C syntax for array embed (for convience, + * "a.d[3].e[1]". This matches the C syntax for array embed (for convenience, * code also supports "a.d[3]e[1]" syntax). * * If 'ip' is non-NULL and the last name component is an array, return the @@ -3105,7 +3105,7 @@ nvs_native(nvstream_t *nvs, nvlist_t *nvl, char *buf, size_t *buflen) * * An xdr packed nvlist is encoded as: * - * - encoding methode and host endian (4 bytes) + * - encoding method and host endian (4 bytes) * - nvl_version (4 bytes) * - nvl_nvflag (4 bytes) * @@ -3499,7 +3499,7 @@ nvs_xdr_nvp_size(nvstream_t *nvs, nvpair_t *nvp, size_t *size) * the strings. These pointers are not encoded into the packed xdr buffer. * * If the data is of type DATA_TYPE_STRING_ARRAY and all the strings are - * of length 0, then each string is endcoded in xdr format as a single word. + * of length 0, then each string is encoded in xdr format as a single word. * Therefore when expanded to an nvpair there will be 2.25 word used for * each string. (a int64_t allocated for pointer usage, and a single char * for the null termination.) diff --git a/module/spl/spl-condvar.c b/module/spl/spl-condvar.c index 664fae1e7199..3cc33da6298a 100644 --- a/module/spl/spl-condvar.c +++ b/module/spl/spl-condvar.c @@ -431,8 +431,8 @@ __cv_signal(kcondvar_t *cvp) /* * All waiters are added with WQ_FLAG_EXCLUSIVE so only one - * waiter will be set runable with each call to wake_up(). - * Additionally wake_up() holds a spin_lock assoicated with + * waiter will be set runnable with each call to wake_up(). + * Additionally wake_up() holds a spin_lock associated with * the wait queue to ensure we don't race waking up processes. */ if (atomic_read(&cvp->cv_waiters) > 0) diff --git a/module/spl/spl-generic.c b/module/spl/spl-generic.c index 3c5ef60bd1a4..1deb2f444cd1 100644 --- a/module/spl/spl-generic.c +++ b/module/spl/spl-generic.c @@ -79,7 +79,7 @@ EXPORT_SYMBOL(p0); * to generate words larger than 128 bits will paradoxically be limited to * `2^128 - 1` possibilities. This is because we have a sequence of `2^128 - 1` * 128-bit words and selecting the first will implicitly select the second. If - * a caller finds this behavior undesireable, random_get_bytes() should be used + * a caller finds this behavior undesirable, random_get_bytes() should be used * instead. * * XXX: Linux interrupt handlers that trigger within the critical section @@ -207,7 +207,7 @@ nlz64(uint64_t x) /* * Newer kernels have a div_u64() function but we define our own - * to simplify portibility between kernel versions. + * to simplify portability between kernel versions. */ static inline uint64_t __div_u64(uint64_t u, uint32_t v) diff --git a/module/spl/spl-kmem-cache.c b/module/spl/spl-kmem-cache.c index 44e112cccbd9..b39867b03741 100644 --- a/module/spl/spl-kmem-cache.c +++ b/module/spl/spl-kmem-cache.c @@ -185,7 +185,7 @@ MODULE_PARM_DESC(spl_kmem_cache_kmem_threads, struct list_head spl_kmem_cache_list; /* List of caches */ struct rw_semaphore spl_kmem_cache_sem; /* Cache list lock */ -taskq_t *spl_kmem_cache_taskq; /* Task queue for ageing / reclaim */ +taskq_t *spl_kmem_cache_taskq; /* Task queue for aging / reclaim */ static void spl_cache_shrink(spl_kmem_cache_t *skc, void *obj); @@ -995,7 +995,7 @@ spl_kmem_cache_create(char *name, size_t size, size_t align, #if defined(SLAB_USERCOPY) /* * Required for PAX-enabled kernels if the slab is to be - * used for coping between user and kernel space. + * used for copying between user and kernel space. */ slabflags |= SLAB_USERCOPY; #endif diff --git a/module/spl/spl-tsd.c b/module/spl/spl-tsd.c index 4c800292ae77..14342d5a618b 100644 --- a/module/spl/spl-tsd.c +++ b/module/spl/spl-tsd.c @@ -42,7 +42,7 @@ * type is entry is called a 'key' entry and it is added to the hash during * tsd_create(). It is used to store the address of the destructor function * and it is used as an anchor point. All tsd entries which use the same - * key will be linked to this entry. This is used during tsd_destory() to + * key will be linked to this entry. This is used during tsd_destroy() to * quickly call the destructor function for all tsd associated with the key. * The 'key' entry may be looked up with tsd_hash_search() by passing the * key you wish to lookup and DTOR_PID constant as the pid. @@ -269,7 +269,7 @@ tsd_hash_add_key(tsd_hash_table_t *table, uint_t *keyp, dtor_func_t dtor) * @table: hash table * @pid: search pid * - * For every process these is a single entry in the hash which is used + * For every process there is a single entry in the hash which is used * as anchor. All other thread specific entries for this process are * linked to this anchor via the 'he_pid_list' list head. */ diff --git a/module/zcommon/zfeature_common.c b/module/zcommon/zfeature_common.c index 8e1aef5daa77..e5a1aff9cea8 100644 --- a/module/zcommon/zfeature_common.c +++ b/module/zcommon/zfeature_common.c @@ -551,7 +551,7 @@ zpool_feature_init(void) zfeature_register(SPA_FEATURE_RESILVER_DEFER, "com.datto:resilver_defer", "resilver_defer", - "Support for defering new resilvers when one is already running.", + "Support for deferring new resilvers when one is already running.", ZFEATURE_FLAG_READONLY_COMPAT, ZFEATURE_TYPE_BOOLEAN, NULL); } diff --git a/module/zcommon/zfs_namecheck.c b/module/zcommon/zfs_namecheck.c index 285f8b644ea8..1649fd5455b2 100644 --- a/module/zcommon/zfs_namecheck.c +++ b/module/zcommon/zfs_namecheck.c @@ -74,7 +74,7 @@ get_dataset_depth(const char *path) /* * Keep track of nesting until you hit the end of the - * path or found the snapshot/bookmark seperator. + * path or found the snapshot/bookmark separator. */ for (int i = 0; loc[i] != '\0' && loc[i] != '@' && diff --git a/module/zcommon/zpool_prop.c b/module/zcommon/zpool_prop.c index 539de5645f81..155d557f464a 100644 --- a/module/zcommon/zpool_prop.c +++ b/module/zcommon/zpool_prop.c @@ -156,7 +156,7 @@ zpool_name_to_prop(const char *propname) /* * Given a pool property ID, returns the corresponding name. - * Assuming the pool propety ID is valid. + * Assuming the pool property ID is valid. */ const char * zpool_prop_to_name(zpool_prop_t prop) From b520706f29958b04146884b9554d4e81a62a31fa Mon Sep 17 00:00:00 2001 From: Andrea Gelmini Date: Sat, 31 Aug 2019 01:52:00 +0200 Subject: [PATCH 51/68] Fix typos in tests/ Reviewed-by: Ryan Moeller Reviewed-by: Richard Laager Reviewed-by: Brian Behlendorf Signed-off-by: Andrea Gelmini Closes #9245 --- .../tests/functional/redacted_send/redacted_contents.ksh | 2 +- .../tests/functional/redacted_send/redacted_deleted.ksh | 2 +- .../functional/redacted_send/redacted_disabled_feature.ksh | 2 +- .../tests/functional/redundancy/redundancy_001_pos.ksh | 2 +- .../zfs-tests/tests/functional/refreserv/refreserv_003_pos.ksh | 2 +- .../tests/functional/refreserv/refreserv_multi_raidz.ksh | 2 +- tests/zfs-tests/tests/functional/removal/removal_nopwrite.ksh | 2 +- .../tests/functional/reservation/reservation_001_pos.ksh | 2 +- .../tests/functional/reservation/reservation_008_pos.ksh | 2 +- tests/zfs-tests/tests/functional/rsend/rsend.kshlib | 2 +- 10 files changed, 10 insertions(+), 10 deletions(-) diff --git a/tests/zfs-tests/tests/functional/redacted_send/redacted_contents.ksh b/tests/zfs-tests/tests/functional/redacted_send/redacted_contents.ksh index 58dbde7f16e0..fb12862c9531 100755 --- a/tests/zfs-tests/tests/functional/redacted_send/redacted_contents.ksh +++ b/tests/zfs-tests/tests/functional/redacted_send/redacted_contents.ksh @@ -28,7 +28,7 @@ # 4. A file moved in the clone does not redact the file. # 5. A copied, then removed file in the clone redacts the whole file. # 6. Overwriting a file with identical contents redacts the file. -# 7. A paritally modified block redacts the entire block. +# 7. A partially modified block redacts the entire block. # 8. Only overlapping areas of modified ranges are redacted. # 9. Send from the root dataset of a pool work correctly. # diff --git a/tests/zfs-tests/tests/functional/redacted_send/redacted_deleted.ksh b/tests/zfs-tests/tests/functional/redacted_send/redacted_deleted.ksh index e25b51cb9b93..3e2aeb733546 100755 --- a/tests/zfs-tests/tests/functional/redacted_send/redacted_deleted.ksh +++ b/tests/zfs-tests/tests/functional/redacted_send/redacted_deleted.ksh @@ -65,7 +65,7 @@ log_must mount_redacted -f $recvfs # # We have temporarily disabled redaction blkptrs, so this will not # fail as was originally intended. We should uncomment this line -# when we reenable redaction blkptrs. +# when we re-enable redaction blkptrs. # #log_mustnot dd if=$recv_mnt/f1 of=/dev/null bs=512 count=1 log_must diff $send_mnt/f2 $recv_mnt/f2 diff --git a/tests/zfs-tests/tests/functional/redacted_send/redacted_disabled_feature.ksh b/tests/zfs-tests/tests/functional/redacted_send/redacted_disabled_feature.ksh index 24478f1bc182..3cf73f00167e 100755 --- a/tests/zfs-tests/tests/functional/redacted_send/redacted_disabled_feature.ksh +++ b/tests/zfs-tests/tests/functional/redacted_send/redacted_disabled_feature.ksh @@ -26,7 +26,7 @@ # 1. Create a pool with all features disabled. # 2. Verify redacted send fails. # 3. Enable redaction_bookmarks and verify redacted sends works. -# 4. Verify recepit of a redacted stream fails. +# 4. Verify receipt of a redacted stream fails. # 5. Enable recacted_datasets and verify zfs receive works. # diff --git a/tests/zfs-tests/tests/functional/redundancy/redundancy_001_pos.ksh b/tests/zfs-tests/tests/functional/redundancy/redundancy_001_pos.ksh index e25a48be8df3..b5557f1f7e44 100755 --- a/tests/zfs-tests/tests/functional/redundancy/redundancy_001_pos.ksh +++ b/tests/zfs-tests/tests/functional/redundancy/redundancy_001_pos.ksh @@ -41,7 +41,7 @@ # 3. Fill the filesystem with directories and files. # 4. Record all the files and directories checksum information. # 5. Damaged one of the virtual disk file. -# 6. Verify the data is correct to prove raidz can withstand 1 devicd is +# 6. Verify the data is correct to prove raidz can withstand 1 device is # failing. # diff --git a/tests/zfs-tests/tests/functional/refreserv/refreserv_003_pos.ksh b/tests/zfs-tests/tests/functional/refreserv/refreserv_003_pos.ksh index da36609f2c41..3e5a78cf944f 100755 --- a/tests/zfs-tests/tests/functional/refreserv/refreserv_003_pos.ksh +++ b/tests/zfs-tests/tests/functional/refreserv/refreserv_003_pos.ksh @@ -38,7 +38,7 @@ # space outside of this refreservation. # # STRATEGY: -# 1. Setting quota and refservation +# 1. Setting quota and refreservation # 2. Verify snapshot can be created, when used =< quota - refreserv # 3. Verify failed to create snapshot, when used > quota - refreserv # diff --git a/tests/zfs-tests/tests/functional/refreserv/refreserv_multi_raidz.ksh b/tests/zfs-tests/tests/functional/refreserv/refreserv_multi_raidz.ksh index 803e391c9ce4..c904a807f17c 100755 --- a/tests/zfs-tests/tests/functional/refreserv/refreserv_multi_raidz.ksh +++ b/tests/zfs-tests/tests/functional/refreserv/refreserv_multi_raidz.ksh @@ -125,7 +125,7 @@ done log_note "sizes=$(print -C sizes)" # -# Helper furnction for checking that refreservation is calculated properly in +# Helper function for checking that refreservation is calculated properly in # multi-vdev pools. "Properly" is defined as assuming that all vdevs are as # space inefficient as the worst one. # diff --git a/tests/zfs-tests/tests/functional/removal/removal_nopwrite.ksh b/tests/zfs-tests/tests/functional/removal/removal_nopwrite.ksh index cb8bd6b810c1..e5d8261e80b6 100755 --- a/tests/zfs-tests/tests/functional/removal/removal_nopwrite.ksh +++ b/tests/zfs-tests/tests/functional/removal/removal_nopwrite.ksh @@ -64,7 +64,7 @@ log_mustnot vdevs_in_pool $TESTPOOL $REMOVEDISK # # Normally, we expect nopwrites to avoid allocating new blocks, but # after a device has been removed the DVAs will get remapped when -# a L0's indirect bloock is written. This will negate the effects +# a L0's indirect block is written. This will negate the effects # of nopwrite and should result in new allocations. # diff --git a/tests/zfs-tests/tests/functional/reservation/reservation_001_pos.ksh b/tests/zfs-tests/tests/functional/reservation/reservation_001_pos.ksh index b72b8e4a388e..b8220791f1d4 100755 --- a/tests/zfs-tests/tests/functional/reservation/reservation_001_pos.ksh +++ b/tests/zfs-tests/tests/functional/reservation/reservation_001_pos.ksh @@ -115,7 +115,7 @@ for obj in $TESTPOOL/$TESTFS $OBJ_LIST; do # # Due to the way space is consumed and released by metadata we - # can't do an exact check here, but we do do a basic sanity + # can't do an exact check here, but we do a basic sanity # check. # log_must within_limits $space_avail $new_space_avail $RESV_TOLERANCE diff --git a/tests/zfs-tests/tests/functional/reservation/reservation_008_pos.ksh b/tests/zfs-tests/tests/functional/reservation/reservation_008_pos.ksh index fbf4276e8bda..a0cd039b1839 100755 --- a/tests/zfs-tests/tests/functional/reservation/reservation_008_pos.ksh +++ b/tests/zfs-tests/tests/functional/reservation/reservation_008_pos.ksh @@ -85,7 +85,7 @@ resv_size_set=`expr $resv_space_avail / $num_resv_fs` # # We set the reservations now, rather than when we created the filesystems -# to allow us to take into account space used by the filsystem metadata +# to allow us to take into account space used by the filesystem metadata # # Note we don't set a reservation on the first filesystem we created, # hence num=1 rather than zero below. diff --git a/tests/zfs-tests/tests/functional/rsend/rsend.kshlib b/tests/zfs-tests/tests/functional/rsend/rsend.kshlib index 7bfbc1aef373..12af9d3fcd04 100644 --- a/tests/zfs-tests/tests/functional/rsend/rsend.kshlib +++ b/tests/zfs-tests/tests/functional/rsend/rsend.kshlib @@ -343,7 +343,7 @@ function getds_with_suffix } # -# Output inherited properties whitch is edited for file system +# Output inherited properties which is edited for file system # function fs_inherit_prop { From c9539600484d8f89d48629eb5775c8b1967fe7d7 Mon Sep 17 00:00:00 2001 From: Andrea Gelmini Date: Sat, 31 Aug 2019 01:53:48 +0200 Subject: [PATCH 52/68] Fix typos in tests/ Reviewed-by: Ryan Moeller Reviewed-by: Richard Laager Reviewed-by: Brian Behlendorf Signed-off-by: Andrea Gelmini Closes #9248 --- .../tests/functional/cli_root/zfs_receive/zfs_receive_raw.ksh | 4 ++-- .../cli_root/zfs_rollback/zfs_rollback_common.kshlib | 2 +- .../tests/functional/cli_root/zfs_send/zfs_send_004_neg.ksh | 2 +- .../tests/functional/cli_root/zfs_set/cache_002_neg.ksh | 2 +- .../tests/functional/cli_root/zfs_set/canmount_002_pos.ksh | 2 +- .../tests/functional/cli_root/zfs_set/mountpoint_002_pos.ksh | 2 +- .../tests/functional/cli_root/zfs_set/zfs_set_common.kshlib | 4 ++-- .../functional/cli_root/zfs_snapshot/zfs_snapshot_002_neg.ksh | 2 +- .../functional/cli_root/zfs_snapshot/zfs_snapshot_008_neg.ksh | 2 +- .../functional/cli_root/zfs_snapshot/zfs_snapshot_009_pos.ksh | 4 ++-- .../functional/cli_root/zfs_unmount/zfs_unmount_009_pos.ksh | 4 ++-- .../functional/cli_root/zfs_unshare/zfs_unshare_001_pos.ksh | 2 +- .../functional/cli_root/zfs_unshare/zfs_unshare_004_neg.ksh | 2 +- .../functional/cli_root/zfs_upgrade/zfs_upgrade_001_pos.ksh | 2 +- .../tests/functional/cli_root/zpool/zpool_001_neg.ksh | 2 +- 15 files changed, 19 insertions(+), 19 deletions(-) diff --git a/tests/zfs-tests/tests/functional/cli_root/zfs_receive/zfs_receive_raw.ksh b/tests/zfs-tests/tests/functional/cli_root/zfs_receive/zfs_receive_raw.ksh index 2042b37a98f7..e2e2c5f010f7 100755 --- a/tests/zfs-tests/tests/functional/cli_root/zfs_receive/zfs_receive_raw.ksh +++ b/tests/zfs-tests/tests/functional/cli_root/zfs_receive/zfs_receive_raw.ksh @@ -31,11 +31,11 @@ # 4. Attempt to receive a raw send stream as a child of an unencrypted dataset # 5. Verify the key is unavailable # 6. Attempt to load the key and mount the dataset -# 7. Verify the cheksum of the file is the same as the original +# 7. Verify the checksum of the file is the same as the original # 8. Attempt to receive a raw send stream as a child of an encrypted dataset # 9. Verify the key is unavailable # 10. Attempt to load the key and mount the dataset -# 11. Verify the cheksum of the file is the same as the original +# 11. Verify the checksum of the file is the same as the original # verify_runnable "both" diff --git a/tests/zfs-tests/tests/functional/cli_root/zfs_rollback/zfs_rollback_common.kshlib b/tests/zfs-tests/tests/functional/cli_root/zfs_rollback/zfs_rollback_common.kshlib index 5b157d11c15f..f69ec300ca98 100644 --- a/tests/zfs-tests/tests/functional/cli_root/zfs_rollback/zfs_rollback_common.kshlib +++ b/tests/zfs-tests/tests/functional/cli_root/zfs_rollback/zfs_rollback_common.kshlib @@ -147,7 +147,7 @@ function setup_clone_env } # -# Clean up the test environmnet +# Clean up the test environment # # $1 number of snapshot Note: Currently only support three snapshots. # diff --git a/tests/zfs-tests/tests/functional/cli_root/zfs_send/zfs_send_004_neg.ksh b/tests/zfs-tests/tests/functional/cli_root/zfs_send/zfs_send_004_neg.ksh index da14fa2fa62c..4a9d29fce1cf 100755 --- a/tests/zfs-tests/tests/functional/cli_root/zfs_send/zfs_send_004_neg.ksh +++ b/tests/zfs-tests/tests/functional/cli_root/zfs_send/zfs_send_004_neg.ksh @@ -65,7 +65,7 @@ snap2=$fs@snap2 snap3=$fs@snap3 set -A badargs \ - "" "$TESTPOOL" "$TESTFS" "$fs" "$fs@nonexisten_snap" "?" \ + "" "$TESTPOOL" "$TESTFS" "$fs" "$fs@nonexistent_snap" "?" \ "$snap1/blah" "$snap1@blah" "-i" "-x" "-i $fs" \ "-x $snap1 $snap2" "-i $snap1" \ "-i $snap2 $snap1" "$snap1 $snap2" "-i $snap1 $snap2 $snap3" \ diff --git a/tests/zfs-tests/tests/functional/cli_root/zfs_set/cache_002_neg.ksh b/tests/zfs-tests/tests/functional/cli_root/zfs_set/cache_002_neg.ksh index 5fbc8bf71657..caad211bcf65 100755 --- a/tests/zfs-tests/tests/functional/cli_root/zfs_set/cache_002_neg.ksh +++ b/tests/zfs-tests/tests/functional/cli_root/zfs_set/cache_002_neg.ksh @@ -64,4 +64,4 @@ do done done -log_pass "Setting invalid {primary|secondary}cache on fs or volume fail as expeced." +log_pass "Setting invalid {primary|secondary}cache on fs or volume fail as expected." diff --git a/tests/zfs-tests/tests/functional/cli_root/zfs_set/canmount_002_pos.ksh b/tests/zfs-tests/tests/functional/cli_root/zfs_set/canmount_002_pos.ksh index 7cbcf7903e33..3b8b88e3631e 100755 --- a/tests/zfs-tests/tests/functional/cli_root/zfs_set/canmount_002_pos.ksh +++ b/tests/zfs-tests/tests/functional/cli_root/zfs_set/canmount_002_pos.ksh @@ -40,7 +40,7 @@ # # STRATEGY: # 1. Setup a pool and create fs, volume, snapshot clone within it. -# 2. Set canmount=noauto for each dataset and check the retuen value +# 2. Set canmount=noauto for each dataset and check the return value # and check if it still can be mounted by mount -a. # 3. mount each dataset(except volume) to see if it can be mounted. # diff --git a/tests/zfs-tests/tests/functional/cli_root/zfs_set/mountpoint_002_pos.ksh b/tests/zfs-tests/tests/functional/cli_root/zfs_set/mountpoint_002_pos.ksh index ad33e18fbb24..48580cafdb31 100755 --- a/tests/zfs-tests/tests/functional/cli_root/zfs_set/mountpoint_002_pos.ksh +++ b/tests/zfs-tests/tests/functional/cli_root/zfs_set/mountpoint_002_pos.ksh @@ -34,7 +34,7 @@ # # DESCRIPTION: -# If ZFS is currently managing the file system but it is currently unmoutned, +# If ZFS is currently managing the file system but it is currently unmounted, # and the mountpoint property is changed, the file system remains unmounted. # # STRATEGY: diff --git a/tests/zfs-tests/tests/functional/cli_root/zfs_set/zfs_set_common.kshlib b/tests/zfs-tests/tests/functional/cli_root/zfs_set/zfs_set_common.kshlib index 084a4a0a82ac..5e9f719dfcfe 100644 --- a/tests/zfs-tests/tests/functional/cli_root/zfs_set/zfs_set_common.kshlib +++ b/tests/zfs-tests/tests/functional/cli_root/zfs_set/zfs_set_common.kshlib @@ -156,7 +156,7 @@ function random_string } # -# Get vaild user defined property name +# Get valid user defined property name # # $1 user defined property name length # @@ -189,7 +189,7 @@ function valid_user_property } # -# Get invaild user defined property name +# Get invalid user defined property name # # $1 user defined property name length # diff --git a/tests/zfs-tests/tests/functional/cli_root/zfs_snapshot/zfs_snapshot_002_neg.ksh b/tests/zfs-tests/tests/functional/cli_root/zfs_snapshot/zfs_snapshot_002_neg.ksh index 2efcf1cceb7e..5d8b6e2750f5 100755 --- a/tests/zfs-tests/tests/functional/cli_root/zfs_snapshot/zfs_snapshot_002_neg.ksh +++ b/tests/zfs-tests/tests/functional/cli_root/zfs_snapshot/zfs_snapshot_002_neg.ksh @@ -82,7 +82,7 @@ while (( i < ${#args[*]} )); do ((i = i + 1)) done -# Testing the invalid senario: the child volume already has an +# Testing the invalid scenario: the child volume already has an # identical name snapshot, zfs snapshot -r should fail when # creating snapshot with -r for the parent log_must zfs destroy $TESTPOOL/$TESTCTR/$TESTFS1@$TESTSNAP diff --git a/tests/zfs-tests/tests/functional/cli_root/zfs_snapshot/zfs_snapshot_008_neg.ksh b/tests/zfs-tests/tests/functional/cli_root/zfs_snapshot/zfs_snapshot_008_neg.ksh index 377910013271..627910abd6ed 100755 --- a/tests/zfs-tests/tests/functional/cli_root/zfs_snapshot/zfs_snapshot_008_neg.ksh +++ b/tests/zfs-tests/tests/functional/cli_root/zfs_snapshot/zfs_snapshot_008_neg.ksh @@ -34,7 +34,7 @@ # STRATEGY: # 1. Create 2 separate zpools, zpool name lengths must be the same. # 2. Attempt to simultaneously create a snapshot of each pool. -# 3. Veriy the snapshot creation failed. +# 3. Verify the snapshot creation failed. # verify_runnable "both" diff --git a/tests/zfs-tests/tests/functional/cli_root/zfs_snapshot/zfs_snapshot_009_pos.ksh b/tests/zfs-tests/tests/functional/cli_root/zfs_snapshot/zfs_snapshot_009_pos.ksh index 4cd98af0c69d..f0682b816ae8 100755 --- a/tests/zfs-tests/tests/functional/cli_root/zfs_snapshot/zfs_snapshot_009_pos.ksh +++ b/tests/zfs-tests/tests/functional/cli_root/zfs_snapshot/zfs_snapshot_009_pos.ksh @@ -22,7 +22,7 @@ # 1. Create multiple datasets # 2. Create multiple snapshots with a list of valid and invalid # snapshot names -# 3. Verify the valid snpashot creation +# 3. Verify the valid snapshot creation . $STF_SUITE/include/libtest.shlib @@ -86,7 +86,7 @@ for i in 1 2 3; do txg_tag=$(echo "$txg_group" | nawk -v j=$i 'FNR == j {print}') [[ $txg_tag != $(echo "$txg_group" | \ nawk -v j=$i 'FNR == j {print}') ]] \ - && log_fail "snapshots belong to differnt transaction groups" + && log_fail "snapshots belong to different transaction groups" done log_note "verify snapshot contents" for ds in $datasets; do diff --git a/tests/zfs-tests/tests/functional/cli_root/zfs_unmount/zfs_unmount_009_pos.ksh b/tests/zfs-tests/tests/functional/cli_root/zfs_unmount/zfs_unmount_009_pos.ksh index 0ed14a99fc27..3575875c2767 100755 --- a/tests/zfs-tests/tests/functional/cli_root/zfs_unmount/zfs_unmount_009_pos.ksh +++ b/tests/zfs-tests/tests/functional/cli_root/zfs_unmount/zfs_unmount_009_pos.ksh @@ -83,7 +83,7 @@ function restore_dataset } -log_assert "zfs fource unmount and destroy in snapshot directory will not cause error." +log_assert "zfs force unmount and destroy in snapshot directory will not cause error." log_onexit cleanup for fs in $TESTPOOL/$TESTFS $TESTPOOL ; do @@ -139,4 +139,4 @@ log_must eval zpool list > /dev/null 2>&1 log_must eval zpool status > /dev/null 2>&1 zpool iostat > /dev/null 2>&1 -log_pass "zfs fource unmount and destroy in snapshot directory will not cause error." +log_pass "zfs force unmount and destroy in snapshot directory will not cause error." diff --git a/tests/zfs-tests/tests/functional/cli_root/zfs_unshare/zfs_unshare_001_pos.ksh b/tests/zfs-tests/tests/functional/cli_root/zfs_unshare/zfs_unshare_001_pos.ksh index 7bb1cd4a37ca..ca625bd2278a 100755 --- a/tests/zfs-tests/tests/functional/cli_root/zfs_unshare/zfs_unshare_001_pos.ksh +++ b/tests/zfs-tests/tests/functional/cli_root/zfs_unshare/zfs_unshare_001_pos.ksh @@ -140,7 +140,7 @@ while (( i < ${#mntp_fs[*]} )); do ((i = i + 2)) done -log_note "Verify 'zfs unshare -a' succeds as root." +log_note "Verify 'zfs unshare -a' succeeds as root." i=0 typeset sharenfs_val diff --git a/tests/zfs-tests/tests/functional/cli_root/zfs_unshare/zfs_unshare_004_neg.ksh b/tests/zfs-tests/tests/functional/cli_root/zfs_unshare/zfs_unshare_004_neg.ksh index e92581c7c9bf..fd916040b1bc 100755 --- a/tests/zfs-tests/tests/functional/cli_root/zfs_unshare/zfs_unshare_004_neg.ksh +++ b/tests/zfs-tests/tests/functional/cli_root/zfs_unshare/zfs_unshare_004_neg.ksh @@ -46,7 +46,7 @@ verify_runnable "global" export NONEXISTFSNAME="nonexistfs50charslong_0123456789012345678901234567" export NONEXISTMOUNTPOINT="/nonexistmountpoint_0123456789" -set -A opts "" "$TESTPOOL/$NONEXISTFSNAME" "$NONEEXISTMOUNTPOINT" "-?" "-1" \ +set -A opts "" "$TESTPOOL/$NONEXISTFSNAME" "$NONEXISTMOUNTPOINT" "-?" "-1" \ "-a blah" "$TESTPOOL/$TESTFS $TESTPOOL/$TESTFS1" \ "-f $TESTPOOL/$TESTFS $TESTPOOL/$TESTFS1" \ "$TESTPOOL/$TESTFS $TESTDIR" "-f $TESTPOOL/$TESTFS $TESTDIR" \ diff --git a/tests/zfs-tests/tests/functional/cli_root/zfs_upgrade/zfs_upgrade_001_pos.ksh b/tests/zfs-tests/tests/functional/cli_root/zfs_upgrade/zfs_upgrade_001_pos.ksh index e37b4f81abf4..d3ed4a736cc9 100755 --- a/tests/zfs-tests/tests/functional/cli_root/zfs_upgrade/zfs_upgrade_001_pos.ksh +++ b/tests/zfs-tests/tests/functional/cli_root/zfs_upgrade/zfs_upgrade_001_pos.ksh @@ -133,7 +133,7 @@ COUNT=$( wc -l $output | awk '{print $1}' ) if (( COUNT != OLDCOUNT )); then cat $output - log_fail "Unexpect old-version filesystems print out." + log_fail "Unexpected old-version filesystems print out." fi log_pass "Executing 'zfs upgrade' command succeeds." diff --git a/tests/zfs-tests/tests/functional/cli_root/zpool/zpool_001_neg.ksh b/tests/zfs-tests/tests/functional/cli_root/zpool/zpool_001_neg.ksh index a3158bd57819..25decd78863b 100755 --- a/tests/zfs-tests/tests/functional/cli_root/zpool/zpool_001_neg.ksh +++ b/tests/zfs-tests/tests/functional/cli_root/zpool/zpool_001_neg.ksh @@ -37,7 +37,7 @@ # return an error. # # STRATEGY: -# 1. Create an array containg each zpool sub-command name. +# 1. Create an array containing each zpool sub-command name. # 2. For each element, execute the sub-command. # 3. Verify it returns an error. # From 7859537768e030d0151a6d72a6b031751228bc85 Mon Sep 17 00:00:00 2001 From: Andrea Gelmini Date: Tue, 3 Sep 2019 02:53:27 +0200 Subject: [PATCH 53/68] Fix typos in lib/ Reviewed-by: Ryan Moeller Reviewed-by: Richard Laager Reviewed-by: Brian Behlendorf Signed-off-by: Andrea Gelmini Closes #9237 --- lib/libefi/rdwr_efi.c | 6 +++--- lib/libshare/smb.c | 2 +- lib/libspl/asm-generic/atomic.c | 2 +- lib/libspl/include/atomic.h | 4 ++-- lib/libspl/include/sys/kstat.h | 2 +- lib/libspl/include/sys/param.h | 2 +- lib/libspl/include/sys/uio.h | 2 +- lib/libspl/include/sys/vtoc.h | 2 +- lib/libspl/mkdirp.c | 2 +- lib/libtpool/thread_pool.c | 2 +- lib/libzfs/THIRDPARTYLICENSE.openssl | 2 +- lib/libzfs/libzfs_crypto.c | 4 ++-- lib/libzfs/libzfs_dataset.c | 4 ++-- lib/libzfs/libzfs_pool.c | 6 +++--- lib/libzfs/libzfs_sendrecv.c | 2 +- lib/libzfs_core/libzfs_core.c | 2 +- lib/libzutil/zutil_import.c | 6 +++--- 17 files changed, 26 insertions(+), 26 deletions(-) diff --git a/lib/libefi/rdwr_efi.c b/lib/libefi/rdwr_efi.c index 93c79277dae5..5311059ee814 100644 --- a/lib/libefi/rdwr_efi.c +++ b/lib/libefi/rdwr_efi.c @@ -224,7 +224,7 @@ efi_get_info(int fd, struct dk_cinfo *dki_info) /* * The simplest way to get the partition number under linux is - * to parse it out of the /dev/ block device name. + * to parse it out of the /dev/ block device name. * The kernel creates this using the partition number when it * populates /dev/ so it may be trusted. The tricky bit here is * that the naming convention is based on the block device type. @@ -1198,7 +1198,7 @@ efi_use_whole_disk(int fd) * Verify that we've found the reserved partition by checking * that it looks the way it did when we created it in zpool_label_disk. * If we've found the incorrect partition, then we know that this - * device was reformatted and no longer is soley used by ZFS. + * device was reformatted and no longer is solely used by ZFS. */ if ((efi_label->efi_parts[resv_index].p_size != EFI_MIN_RESV_SIZE) || (efi_label->efi_parts[resv_index].p_tag != V_RESERVED) || @@ -1284,7 +1284,7 @@ efi_write(int fd, struct dk_gpt *vtoc) if ((rval = efi_get_info(fd, &dki_info)) != 0) return (rval); - /* check if we are dealing wih a metadevice */ + /* check if we are dealing with a metadevice */ if ((strncmp(dki_info.dki_cname, "pseudo", 7) == 0) && (strncmp(dki_info.dki_dname, "md", 3) == 0)) { md_flag = 1; diff --git a/lib/libshare/smb.c b/lib/libshare/smb.c index 4c2045dfdb4d..a95607ee0324 100644 --- a/lib/libshare/smb.c +++ b/lib/libshare/smb.c @@ -29,7 +29,7 @@ * * TESTING * Make sure that samba listens to 'localhost' (127.0.0.1) and that the options - * 'usershare max shares' and 'usershare owner only' have been rewied/set + * 'usershare max shares' and 'usershare owner only' have been reviewed/set * accordingly (see zfs(8) for information). * * Once configuration in samba have been done, test that this diff --git a/lib/libspl/asm-generic/atomic.c b/lib/libspl/asm-generic/atomic.c index d0023b182813..03f8ddcfa8f9 100644 --- a/lib/libspl/asm-generic/atomic.c +++ b/lib/libspl/asm-generic/atomic.c @@ -37,7 +37,7 @@ pthread_mutex_t atomic_lock = PTHREAD_MUTEX_INITIALIZER; /* - * Theses are the void returning variants + * These are the void returning variants */ /* BEGIN CSTYLED */ #define ATOMIC_INC(name, type) \ diff --git a/lib/libspl/include/atomic.h b/lib/libspl/include/atomic.h index 7072a11bdb16..f8c257f9696b 100644 --- a/lib/libspl/include/atomic.h +++ b/lib/libspl/include/atomic.h @@ -79,7 +79,7 @@ extern void atomic_add_64(volatile uint64_t *, int64_t); #endif /* - * Substract delta from target + * Subtract delta from target */ extern void atomic_sub_8(volatile uint8_t *, int8_t); extern void atomic_sub_char(volatile uchar_t *, signed char); @@ -173,7 +173,7 @@ extern uint64_t atomic_add_64_nv(volatile uint64_t *, int64_t); #endif /* - * Substract delta from target + * Subtract delta from target */ extern uint8_t atomic_sub_8_nv(volatile uint8_t *, int8_t); extern uchar_t atomic_sub_char_nv(volatile uchar_t *, signed char); diff --git a/lib/libspl/include/sys/kstat.h b/lib/libspl/include/sys/kstat.h index 9bd0d949d542..69fb6d401fc7 100644 --- a/lib/libspl/include/sys/kstat.h +++ b/lib/libspl/include/sys/kstat.h @@ -82,7 +82,7 @@ typedef struct kstat { void *ks_data; /* kstat type-specific data */ uint_t ks_ndata; /* # of type-specific data records */ size_t ks_data_size; /* total size of kstat data section */ - hrtime_t ks_snaptime; /* time of last data shapshot */ + hrtime_t ks_snaptime; /* time of last data snapshot */ /* * Fields relevant to kernel only */ diff --git a/lib/libspl/include/sys/param.h b/lib/libspl/include/sys/param.h index c22d508f9b07..26335187fdca 100644 --- a/lib/libspl/include/sys/param.h +++ b/lib/libspl/include/sys/param.h @@ -37,7 +37,7 @@ * with smaller units (fragments) only in the last direct block. * MAXBSIZE primarily determines the size of buffers in the buffer * pool. It may be made larger without any effect on existing - * file systems; however making it smaller make make some file + * file systems; however making it smaller may make some file * systems unmountable. * * Note that the blocked devices are assumed to have DEV_BSIZE diff --git a/lib/libspl/include/sys/uio.h b/lib/libspl/include/sys/uio.h index 97e8412ef70a..91ee3b3fd00d 100644 --- a/lib/libspl/include/sys/uio.h +++ b/lib/libspl/include/sys/uio.h @@ -75,7 +75,7 @@ typedef enum xuio_type { typedef struct uioa_page_s { /* locked uio_iov state */ int uioa_pfncnt; /* count of pfn_t(s) in *uioa_ppp */ - void **uioa_ppp; /* page_t or pfn_t arrary */ + void **uioa_ppp; /* page_t or pfn_t array */ caddr_t uioa_base; /* address base */ size_t uioa_len; /* span length */ } uioa_page_t; diff --git a/lib/libspl/include/sys/vtoc.h b/lib/libspl/include/sys/vtoc.h index 22a652b74bf1..5d8448b628dc 100644 --- a/lib/libspl/include/sys/vtoc.h +++ b/lib/libspl/include/sys/vtoc.h @@ -51,7 +51,7 @@ extern "C" { * v_sanity returned as VTOC_SANE * if Disk Label was sane * v_sectorsz returned as 512 - * v_reserved [all] retunred as zero + * v_reserved [all] returned as zero * timestamp [all] returned as zero * * See dklabel.h, read_vtoc(), and write_vtoc(). diff --git a/lib/libspl/mkdirp.c b/lib/libspl/mkdirp.c index 54174175200e..fce2c1c82eb7 100644 --- a/lib/libspl/mkdirp.c +++ b/lib/libspl/mkdirp.c @@ -128,7 +128,7 @@ mkdirp(const char *d, mode_t mode) * caller, or NULL is returned on error. * * The caller should handle error reporting based upon the - * returned vlaue, and should free the returned value, + * returned value, and should free the returned value, * when appropriate. */ diff --git a/lib/libtpool/thread_pool.c b/lib/libtpool/thread_pool.c index a43fdd9cd608..267fa834bd72 100644 --- a/lib/libtpool/thread_pool.c +++ b/lib/libtpool/thread_pool.c @@ -134,7 +134,7 @@ tpool_worker(void *arg) /* * This is the worker's main loop. - * It will only be left if a timeout or an error has occured. + * It will only be left if a timeout or an error has occurred. */ active.tpa_tid = pthread_self(); for (;;) { diff --git a/lib/libzfs/THIRDPARTYLICENSE.openssl b/lib/libzfs/THIRDPARTYLICENSE.openssl index a2c4adcbe6a5..92c9e196a318 100644 --- a/lib/libzfs/THIRDPARTYLICENSE.openssl +++ b/lib/libzfs/THIRDPARTYLICENSE.openssl @@ -101,7 +101,7 @@ * must display the following acknowledgement: * "This product includes cryptographic software written by * Eric Young (eay@cryptsoft.com)" - * The word 'cryptographic' can be left out if the rouines from the library + * The word 'cryptographic' can be left out if the routines from the library * being used are not cryptographic related :-). * 4. If you include any Windows specific code (or a derivative thereof) from * the apps directory (application code) you must include an acknowledgement: diff --git a/lib/libzfs/libzfs_crypto.c b/lib/libzfs/libzfs_crypto.c index d31f43b1fdf2..b7b567ef53c5 100644 --- a/lib/libzfs/libzfs_crypto.c +++ b/lib/libzfs/libzfs_crypto.c @@ -242,7 +242,7 @@ get_key_material_raw(FILE *fd, const char *fsname, zfs_keyformat_t keyformat, out: if (isatty(fileno(fd))) { - /* reset the teminal */ + /* reset the terminal */ (void) tcsetattr(fileno(fd), TCSAFLUSH, &old_term); (void) sigaction(SIGINT, &osigint, NULL); (void) sigaction(SIGTSTP, &osigtstp, NULL); @@ -1321,7 +1321,7 @@ zfs_crypto_rewrap(zfs_handle_t *zhp, nvlist_t *raw_props, boolean_t inheritkey) if (is_encroot) { /* - * If this is already an ecryption root, just keep + * If this is already an encryption root, just keep * any properties not set by the user. */ if (keyformat == ZFS_KEYFORMAT_NONE) { diff --git a/lib/libzfs/libzfs_dataset.c b/lib/libzfs/libzfs_dataset.c index 4285d1224e63..ef638a49806c 100644 --- a/lib/libzfs/libzfs_dataset.c +++ b/lib/libzfs/libzfs_dataset.c @@ -1451,7 +1451,7 @@ zfs_valid_proplist(libzfs_handle_t *hdl, zfs_type_t type, nvlist_t *nvl, * There was an error in parsing so * deal with it by issuing an error * message and leaving after - * uninitializing the the libshare + * uninitializing the libshare * interface. */ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, @@ -1656,7 +1656,7 @@ zfs_add_synthetic_resv(zfs_handle_t *zhp, nvlist_t *nvl) /* * Helper for 'zfs {set|clone} refreservation=auto'. Must be called after - * zfs_valid_proplist(), as it is what sets the UINT64_MAX sentinal value. + * zfs_valid_proplist(), as it is what sets the UINT64_MAX sentinel value. * Return codes must match zfs_add_synthetic_resv(). */ static int diff --git a/lib/libzfs/libzfs_pool.c b/lib/libzfs/libzfs_pool.c index 653335b6e4f3..694497bc407f 100644 --- a/lib/libzfs/libzfs_pool.c +++ b/lib/libzfs/libzfs_pool.c @@ -1524,7 +1524,7 @@ zpool_add(zpool_handle_t *zhp, nvlist_t *nvroot) case EOVERFLOW: /* - * This occurrs when one of the devices is below + * This occurs when one of the devices is below * SPA_MINDEVSIZE. Unfortunately, we can't detect which * device was the problem device since there's no * reliable way to determine device size from userland. @@ -4147,7 +4147,7 @@ zpool_get_errlog(zpool_handle_t *zhp, nvlist_t **nverrlistp) /* * Sort the resulting bookmarks. This is a little confusing due to the * implementation of ZFS_IOC_ERROR_LOG. The bookmarks are copied last - * to first, and 'zc_nvlist_dst_size' indicates the number of boomarks + * to first, and 'zc_nvlist_dst_size' indicates the number of bookmarks * _not_ copied as part of the process. So we point the start of our * array appropriate and decrement the total number of elements. */ @@ -4775,7 +4775,7 @@ zpool_label_disk(libzfs_handle_t *hdl, zpool_handle_t *zhp, char *name) if (rval) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "freshly written " "EFI label on '%s' is damaged. Ensure\nthis device " - "is not in in use, and is functioning properly: %d"), + "is not in use, and is functioning properly: %d"), path, rval); return (zfs_error(hdl, EZFS_LABELFAILED, errbuf)); } diff --git a/lib/libzfs/libzfs_sendrecv.c b/lib/libzfs/libzfs_sendrecv.c index 9fdb990522d9..c178296f654f 100644 --- a/lib/libzfs/libzfs_sendrecv.c +++ b/lib/libzfs/libzfs_sendrecv.c @@ -4702,7 +4702,7 @@ zfs_receive_one(libzfs_handle_t *hdl, int infd, const char *tosnap, /* * Raw sends can not be performed as an incremental on top - * of existing unencryppted datasets. zfs recv -F cant be + * of existing unencrypted datasets. zfs recv -F can't be * used to blow away an existing encrypted filesystem. This * is because it would require the dsl dir to point to the * new key (or lack of a key) and the old key at the same diff --git a/lib/libzfs_core/libzfs_core.c b/lib/libzfs_core/libzfs_core.c index d441f3655d03..a3dc70f9e486 100644 --- a/lib/libzfs_core/libzfs_core.c +++ b/lib/libzfs_core/libzfs_core.c @@ -822,7 +822,7 @@ recv_impl(const char *snapname, nvlist_t *recvdprops, nvlist_t *localprops, } /* - * All recives with a payload should use the new interface. + * All receives with a payload should use the new interface. */ if (resumable || raw || wkeydata != NULL || payload) { nvlist_t *outnvl = NULL; diff --git a/lib/libzutil/zutil_import.c b/lib/libzutil/zutil_import.c index e82744383dc0..28733cc747cc 100644 --- a/lib/libzutil/zutil_import.c +++ b/lib/libzutil/zutil_import.c @@ -1793,7 +1793,7 @@ zpool_find_import_scan_path(libpc_handle_t *hdl, pthread_mutex_t *lock, char *dpath, *name; /* - * Seperate the directory part and last part of the + * Separate the directory part and last part of the * path. We do this so that we can get the realpath of * the directory. We don't get the realpath on the * whole path because if it's a symlink, we want the @@ -2080,8 +2080,8 @@ zpool_find_import_impl(libpc_handle_t *hdl, importargs_t *iarg) tpool_destroy(t); /* - * Process the cache filtering out any entries which are not - * for the specificed pool then adding matching label configs. + * Process the cache, filtering out any entries which are not + * for the specified pool then adding matching label configs. */ cookie = NULL; while ((slice = avl_destroy_nodes(cache, &cookie)) != NULL) { From e1cfd73f7f91f1ccf4b19ec26adcbcd575f546c9 Mon Sep 17 00:00:00 2001 From: Andrea Gelmini Date: Tue, 3 Sep 2019 02:56:41 +0200 Subject: [PATCH 54/68] Fix typos in module/zfs/ Reviewed-by: Matt Ahrens Reviewed-by: Ryan Moeller Reviewed-by: Richard Laager Reviewed-by: Brian Behlendorf Signed-off-by: Andrea Gelmini Closes #9240 --- module/zfs/arc.c | 18 +++++++++--------- module/zfs/dbuf.c | 8 ++++---- module/zfs/dmu.c | 6 +++--- module/zfs/dmu_objset.c | 4 ++-- module/zfs/dmu_send.c | 4 ++-- module/zfs/dmu_zfetch.c | 2 +- module/zfs/dnode.c | 2 +- module/zfs/dsl_bookmark.c | 2 +- module/zfs/dsl_crypt.c | 6 +++--- module/zfs/dsl_dataset.c | 8 ++++---- module/zfs/dsl_destroy.c | 2 +- module/zfs/dsl_dir.c | 4 ++-- module/zfs/dsl_scan.c | 6 +++--- module/zfs/dsl_synctask.c | 2 +- module/zfs/dsl_userhold.c | 6 +++--- module/zfs/fm.c | 4 ++-- module/zfs/metaslab.c | 16 ++++++++-------- module/zfs/mmp.c | 4 ++-- module/zfs/policy.c | 2 +- module/zfs/qat.h | 6 +++--- module/zfs/sa.c | 4 ++-- module/zfs/spa.c | 12 ++++++------ module/zfs/spa_checkpoint.c | 2 +- module/zfs/spa_errlog.c | 2 +- module/zfs/spa_history.c | 2 +- module/zfs/spa_log_spacemap.c | 6 +++--- module/zfs/txg.c | 6 +++--- module/zfs/vdev.c | 6 +++--- module/zfs/vdev_cache.c | 2 +- module/zfs/vdev_initialize.c | 2 +- module/zfs/vdev_mirror.c | 2 +- module/zfs/vdev_queue.c | 2 +- module/zfs/vdev_raidz.c | 6 +++--- .../zfs/vdev_raidz_math_aarch64_neon_common.h | 2 +- module/zfs/zcp.c | 6 +++--- module/zfs/zcp_get.c | 4 ++-- module/zfs/zcp_iter.c | 2 +- module/zfs/zfs_acl.c | 2 +- module/zfs/zfs_byteswap.c | 4 ++-- module/zfs/zfs_ctldir.c | 2 +- module/zfs/zfs_dir.c | 4 ++-- module/zfs/zfs_ioctl.c | 4 ++-- module/zfs/zfs_vfsops.c | 4 ++-- module/zfs/zfs_vnops.c | 6 +++--- module/zfs/zfs_znode.c | 4 ++-- module/zfs/zil.c | 4 ++-- module/zfs/zio_checksum.c | 2 +- module/zfs/zio_compress.c | 2 +- module/zfs/zio_crypt.c | 4 ++-- module/zfs/zio_inject.c | 2 +- module/zfs/zpl_super.c | 2 +- module/zfs/zvol.c | 2 +- 52 files changed, 114 insertions(+), 114 deletions(-) diff --git a/module/zfs/arc.c b/module/zfs/arc.c index b5fca8e26313..1235074efec7 100644 --- a/module/zfs/arc.c +++ b/module/zfs/arc.c @@ -62,7 +62,7 @@ * elements of the cache are therefore exactly the same size. So * when adjusting the cache size following a cache miss, its simply * a matter of choosing a single page to evict. In our model, we - * have variable sized cache blocks (rangeing from 512 bytes to + * have variable sized cache blocks (ranging from 512 bytes to * 128K bytes). We therefore choose a set of blocks to evict to make * space for a cache miss that approximates as closely as possible * the space used by the new block. @@ -262,7 +262,7 @@ * The L1ARC has a slightly different system for storing encrypted data. * Raw (encrypted + possibly compressed) data has a few subtle differences from * data that is just compressed. The biggest difference is that it is not - * possible to decrypt encrypted data (or visa versa) if the keys aren't loaded. + * possible to decrypt encrypted data (or vice-versa) if the keys aren't loaded. * The other difference is that encryption cannot be treated as a suggestion. * If a caller would prefer compressed data, but they actually wind up with * uncompressed data the worst thing that could happen is there might be a @@ -2151,7 +2151,7 @@ arc_buf_fill(arc_buf_t *buf, spa_t *spa, const zbookmark_phys_t *zb, } /* - * Adjust encrypted and authenticated headers to accomodate + * Adjust encrypted and authenticated headers to accommodate * the request if needed. Dnode blocks (ARC_FILL_IN_PLACE) are * allowed to fail decryption due to keys not being loaded * without being marked as an IO error. @@ -2220,7 +2220,7 @@ arc_buf_fill(arc_buf_t *buf, spa_t *spa, const zbookmark_phys_t *zb, if (arc_buf_is_shared(buf)) { ASSERT(ARC_BUF_COMPRESSED(buf)); - /* We need to give the buf it's own b_data */ + /* We need to give the buf its own b_data */ buf->b_flags &= ~ARC_BUF_FLAG_SHARED; buf->b_data = arc_get_data_buf(hdr, HDR_GET_LSIZE(hdr), buf); @@ -2836,7 +2836,7 @@ arc_can_share(arc_buf_hdr_t *hdr, arc_buf_t *buf) * sufficient to make this guarantee, however it's possible * (specifically in the rare L2ARC write race mentioned in * arc_buf_alloc_impl()) there will be an existing uncompressed buf that - * is sharable, but wasn't at the time of its allocation. Rather than + * is shareable, but wasn't at the time of its allocation. Rather than * allow a new shared uncompressed buf to be created and then shuffle * the list around to make it the last element, this simply disallows * sharing if the new buf isn't the first to be added. @@ -2895,7 +2895,7 @@ arc_buf_alloc_impl(arc_buf_hdr_t *hdr, spa_t *spa, const zbookmark_phys_t *zb, /* * Only honor requests for compressed bufs if the hdr is actually - * compressed. This must be overriden if the buffer is encrypted since + * compressed. This must be overridden if the buffer is encrypted since * encrypted buffers cannot be decompressed. */ if (encrypted) { @@ -3199,7 +3199,7 @@ arc_buf_remove(arc_buf_hdr_t *hdr, arc_buf_t *buf) } /* - * Free up buf->b_data and pull the arc_buf_t off of the the arc_buf_hdr_t's + * Free up buf->b_data and pull the arc_buf_t off of the arc_buf_hdr_t's * list and free it. */ static void @@ -3658,7 +3658,7 @@ arc_hdr_realloc_crypt(arc_buf_hdr_t *hdr, boolean_t need_crypt) /* * This function is used by the send / receive code to convert a newly * allocated arc_buf_t to one that is suitable for a raw encrypted write. It - * is also used to allow the root objset block to be uupdated without altering + * is also used to allow the root objset block to be updated without altering * its embedded MACs. Both block types will always be uncompressed so we do not * have to worry about compression type or psize. */ @@ -6189,7 +6189,7 @@ arc_read(zio_t *pio, spa_t *spa, const blkptr_t *bp, /* * Determine if we have an L1 cache hit or a cache miss. For simplicity - * we maintain encrypted data seperately from compressed / uncompressed + * we maintain encrypted data separately from compressed / uncompressed * data. If the user is requesting raw encrypted data and we don't have * that in the header we will read from disk to guarantee that we can * get it even if the encryption keys aren't loaded. diff --git a/module/zfs/dbuf.c b/module/zfs/dbuf.c index ace862637de1..c3127ee0efcf 100644 --- a/module/zfs/dbuf.c +++ b/module/zfs/dbuf.c @@ -2337,7 +2337,7 @@ dmu_buf_will_dirty_impl(dmu_buf_t *db_fake, int flags, dmu_tx_t *tx) ASSERT(!zfs_refcount_is_zero(&db->db_holds)); /* - * Quick check for dirtyness. For already dirty blocks, this + * Quick check for dirtiness. For already dirty blocks, this * reduces runtime of this function by >90%, and overall performance * by 50% for some workloads (e.g. file deletion with indirect blocks * cached). @@ -2892,7 +2892,7 @@ dbuf_create(dnode_t *dn, uint8_t level, uint64_t blkid, * Hold the dn_dbufs_mtx while we get the new dbuf * in the hash table *and* added to the dbufs list. * This prevents a possible deadlock with someone - * trying to look up this dbuf before its added to the + * trying to look up this dbuf before it's added to the * dn_dbufs list. */ mutex_enter(&dn->dn_dbufs_mtx); @@ -3337,7 +3337,7 @@ dbuf_hold_impl_arg(struct dbuf_hold_arg *dh) ASSERT(dh->dh_db->db_buf == NULL || arc_referenced(dh->dh_db->db_buf)); /* - * If this buffer is currently syncing out, and we are are + * If this buffer is currently syncing out, and we are * still referencing it from db_data, we need to make a copy * of it in case we decide we want to dirty it again in this txg. */ @@ -3812,7 +3812,7 @@ dbuf_check_blkptr(dnode_t *dn, dmu_buf_impl_t *db) /* * This buffer was allocated at a time when there was * no available blkptrs from the dnode, or it was - * inappropriate to hook it in (i.e., nlevels mis-match). + * inappropriate to hook it in (i.e., nlevels mismatch). */ ASSERT(db->db_blkid < dn->dn_phys->dn_nblkptr); ASSERT(db->db_parent == NULL); diff --git a/module/zfs/dmu.c b/module/zfs/dmu.c index aa3ef6458d47..c7ddbcba7cf9 100644 --- a/module/zfs/dmu.c +++ b/module/zfs/dmu.c @@ -639,11 +639,11 @@ dmu_buf_rele_array(dmu_buf_t **dbp_fake, int numbufs, void *tag) /* * Issue prefetch i/os for the given blocks. If level is greater than 0, the - * indirect blocks prefeteched will be those that point to the blocks containing + * indirect blocks prefetched will be those that point to the blocks containing * the data starting at offset, and continuing to offset + len. * * Note that if the indirect blocks above the blocks being prefetched are not - * in cache, they will be asychronously read in. + * in cache, they will be asynchronously read in. */ void dmu_prefetch(objset_t *os, uint64_t object, int64_t level, uint64_t offset, @@ -2176,7 +2176,7 @@ dmu_write_policy(objset_t *os, dnode_t *dn, int level, int wp, zio_prop_t *zp) * Determine dedup setting. If we are in dmu_sync(), * we won't actually dedup now because that's all * done in syncing context; but we do want to use the - * dedup checkum. If the checksum is not strong + * dedup checksum. If the checksum is not strong * enough to ensure unique signatures, force * dedup_verify. */ diff --git a/module/zfs/dmu_objset.c b/module/zfs/dmu_objset.c index 3afafd1827ac..9350322fff4d 100644 --- a/module/zfs/dmu_objset.c +++ b/module/zfs/dmu_objset.c @@ -1028,7 +1028,7 @@ dmu_objset_create_impl_dnstats(spa_t *spa, dsl_dataset_t *ds, blkptr_t *bp, /* * We don't want to have to increase the meta-dnode's nlevels - * later, because then we could do it in quescing context while + * later, because then we could do it in quiescing context while * we are also accessing it in open context. * * This precaution is not necessary for the MOS (ds == NULL), @@ -2648,7 +2648,7 @@ dmu_objset_find_dp_cb(void *arg) /* * We need to get a pool_config_lock here, as there are several - * asssert(pool_config_held) down the stack. Getting a lock via + * assert(pool_config_held) down the stack. Getting a lock via * dsl_pool_config_enter is risky, as it might be stalled by a * pending writer. This would deadlock, as the write lock can * only be granted when our parent thread gives up the lock. diff --git a/module/zfs/dmu_send.c b/module/zfs/dmu_send.c index 884be31bd226..39f6883ff05f 100644 --- a/module/zfs/dmu_send.c +++ b/module/zfs/dmu_send.c @@ -548,7 +548,7 @@ dump_write(dmu_send_cookie_t *dscp, dmu_object_type_t type, uint64_t object, /* * There's no pre-computed checksum for partial-block writes, * embedded BP's, or encrypted BP's that are being sent as - * plaintext, so (like fletcher4-checkummed blocks) userland + * plaintext, so (like fletcher4-checksummed blocks) userland * will have to compute a dedup-capable checksum itself. */ drrw->drr_checksumtype = ZIO_CHECKSUM_OFF; @@ -2262,7 +2262,7 @@ setup_send_progress(struct dmu_send_params *dspp) * * The final case is a simple zfs full or incremental send. The to_ds traversal * thread behaves the same as always. The redact list thread is never started. - * The send merge thread takes all the blocks that the to_ds traveral thread + * The send merge thread takes all the blocks that the to_ds traversal thread * sends it, prefetches the data, and sends the blocks on to the main thread. * The main thread sends the data over the wire. * diff --git a/module/zfs/dmu_zfetch.c b/module/zfs/dmu_zfetch.c index 6511e4f8ea9e..b99106ae7d4d 100644 --- a/module/zfs/dmu_zfetch.c +++ b/module/zfs/dmu_zfetch.c @@ -221,7 +221,7 @@ dmu_zfetch(zfetch_t *zf, uint64_t blkid, uint64_t nblks, boolean_t fetch_data, * can only read from blocks that we carefully ensure are on * concrete vdevs (or previously-loaded indirect vdevs). So we * can't allow the predictive prefetcher to attempt reads of other - * blocks (e.g. of the MOS's dnode obejct). + * blocks (e.g. of the MOS's dnode object). */ if (!spa_indirect_vdevs_loaded(spa)) return; diff --git a/module/zfs/dnode.c b/module/zfs/dnode.c index 108bf171420c..4ee192ed5e95 100644 --- a/module/zfs/dnode.c +++ b/module/zfs/dnode.c @@ -1787,7 +1787,7 @@ dnode_set_blksz(dnode_t *dn, uint64_t size, int ibs, dmu_tx_t *tx) dn->dn_indblkshift = ibs; dn->dn_next_indblkshift[tx->tx_txg&TXG_MASK] = ibs; } - /* rele after we have fixed the blocksize in the dnode */ + /* release after we have fixed the blocksize in the dnode */ if (db) dbuf_rele(db, FTAG); diff --git a/module/zfs/dsl_bookmark.c b/module/zfs/dsl_bookmark.c index 4da17488ce8e..2126f3d9bdb0 100644 --- a/module/zfs/dsl_bookmark.c +++ b/module/zfs/dsl_bookmark.c @@ -88,7 +88,7 @@ dsl_bookmark_lookup_impl(dsl_dataset_t *ds, const char *shortname, } /* - * If later_ds is non-NULL, this will return EXDEV if the the specified bookmark + * If later_ds is non-NULL, this will return EXDEV if the specified bookmark * does not represents an earlier point in later_ds's timeline. However, * bmp will still be filled in if we return EXDEV. * diff --git a/module/zfs/dsl_crypt.c b/module/zfs/dsl_crypt.c index 24711227ba55..271019e7902e 100644 --- a/module/zfs/dsl_crypt.c +++ b/module/zfs/dsl_crypt.c @@ -227,7 +227,7 @@ dsl_crypto_params_create_nvlist(dcp_cmd_t cmd, nvlist_t *props, goto error; } - /* if the user asked for the deault crypt, determine that now */ + /* if the user asked for the default crypt, determine that now */ if (dcp->cp_crypt == ZIO_CRYPT_ON) dcp->cp_crypt = ZIO_CRYPT_ON_VALUE; @@ -1596,7 +1596,7 @@ spa_keystore_change_key(const char *dsname, dsl_crypto_params_t *dcp) /* * Perform the actual work in syncing context. The blocks modified * here could be calculated but it would require holding the pool - * lock and tarversing all of the datasets that will have their keys + * lock and traversing all of the datasets that will have their keys * changed. */ return (dsl_sync_task(dsname, spa_keystore_change_key_check, @@ -1714,7 +1714,7 @@ dsl_dataset_promote_crypt_sync(dsl_dir_t *target, dsl_dir_t *origin, return; /* - * If the target is being promoted to the encyrption root update the + * If the target is being promoted to the encryption root update the * DSL Crypto Key and keylocation to reflect that. We also need to * update the DSL Crypto Keys of all children inheritting their * encryption root to point to the new target. Otherwise, the check diff --git a/module/zfs/dsl_dataset.c b/module/zfs/dsl_dataset.c index 069a66ecbc3b..8c5e6cb5cc24 100644 --- a/module/zfs/dsl_dataset.c +++ b/module/zfs/dsl_dataset.c @@ -393,7 +393,7 @@ load_zfeature(objset_t *mos, dsl_dataset_t *ds, spa_feature_t f) } /* - * We have to release the fsid syncronously or we risk that a subsequent + * We have to release the fsid synchronously or we risk that a subsequent * mount of the same dataset will fail to unique_insert the fsid. This * failure would manifest itself as the fsid of this dataset changing * between mounts which makes NFS clients quite unhappy. @@ -2308,7 +2308,7 @@ get_clones_stat(dsl_dataset_t *ds, nvlist_t *nv) * We use nvlist_alloc() instead of fnvlist_alloc() because the * latter would allocate the list with NV_UNIQUE_NAME flag. * As a result, every time a clone name is appended to the list - * it would be (linearly) searched for for a duplicate name. + * it would be (linearly) searched for a duplicate name. * We already know that all clone names must be unique and we * want avoid the quadratic complexity of double-checking that * because we can have a large number of clones. @@ -2683,7 +2683,7 @@ dsl_get_mountpoint(dsl_dataset_t *ds, const char *dsname, char *value, int error; dsl_pool_t *dp = ds->ds_dir->dd_pool; - /* Retrieve the mountpoint value stored in the zap opbject */ + /* Retrieve the mountpoint value stored in the zap object */ error = dsl_prop_get_ds(ds, zfs_prop_to_name(ZFS_PROP_MOUNTPOINT), 1, ZAP_MAXVALUELEN, value, source); if (error != 0) { @@ -3961,7 +3961,7 @@ dsl_dataset_clone_swap_check_impl(dsl_dataset_t *clone, * The clone can't be too much over the head's refquota. * * To ensure that the entire refquota can be used, we allow one - * transaction to exceed the the refquota. Therefore, this check + * transaction to exceed the refquota. Therefore, this check * needs to also allow for the space referenced to be more than the * refquota. The maximum amount of space that one transaction can use * on disk is DMU_MAX_ACCESS * spa_asize_inflation. Allowing this diff --git a/module/zfs/dsl_destroy.c b/module/zfs/dsl_destroy.c index 788753bdccdb..a30018341a84 100644 --- a/module/zfs/dsl_destroy.c +++ b/module/zfs/dsl_destroy.c @@ -667,7 +667,7 @@ dsl_destroy_snapshots_nvl(nvlist_t *snaps, boolean_t defer, /* * lzc_destroy_snaps() is documented to fill the errlist with - * int32 values, so we need to covert the int64 values that are + * int32 values, so we need to convert the int64 values that are * returned from LUA. */ int rv = 0; diff --git a/module/zfs/dsl_dir.c b/module/zfs/dsl_dir.c index 7b3c892c02de..373f661a6eb2 100644 --- a/module/zfs/dsl_dir.c +++ b/module/zfs/dsl_dir.c @@ -97,7 +97,7 @@ * limit set. If there is a limit at any initialized level up the tree, the * check must pass or the creation will fail. Likewise, when a filesystem or * snapshot is destroyed, the counts are recursively adjusted all the way up - * the initizized nodes in the tree. Renaming a filesystem into different point + * the initialized nodes in the tree. Renaming a filesystem into different point * in the tree will first validate, then update the counts on each branch up to * the common ancestor. A receive will also validate the counts and then update * them. @@ -1467,7 +1467,7 @@ dsl_dir_tempreserve_clear(void *tr_cookie, dmu_tx_t *tx) * less than the amount specified. * * NOTE: The behavior of this function is identical to the Illumos / FreeBSD - * version however it has been adjusted to use an iterative rather then + * version however it has been adjusted to use an iterative rather than * recursive algorithm to minimize stack usage. */ void diff --git a/module/zfs/dsl_scan.c b/module/zfs/dsl_scan.c index d6956f5607c3..1becd4d55dae 100644 --- a/module/zfs/dsl_scan.c +++ b/module/zfs/dsl_scan.c @@ -1912,7 +1912,7 @@ dsl_scan_visitbp(blkptr_t *bp, const zbookmark_phys_t *zb, /* * This debugging is commented out to conserve stack space. This - * function is called recursively and the debugging addes several + * function is called recursively and the debugging adds several * bytes to the stack for each call. It can be commented back in * if required to debug an issue in dsl_scan_visitbp(). * @@ -3373,7 +3373,7 @@ dsl_process_async_destroys(dsl_pool_t *dp, dmu_tx_t *tx) /* * This is the primary entry point for scans that is called from syncing * context. Scans must happen entirely during syncing context so that we - * cna guarantee that blocks we are currently scanning will not change out + * can guarantee that blocks we are currently scanning will not change out * from under us. While a scan is active, this function controls how quickly * transaction groups proceed, instead of the normal handling provided by * txg_sync_thread(). @@ -3977,7 +3977,7 @@ scan_exec_io(dsl_pool_t *dp, const blkptr_t *bp, int zio_flags, * As can be seen, at fill_ratio=3, the algorithm is slightly biased towards * extents that are more completely filled (in a 3:2 ratio) vs just larger. * Note that as an optimization, we replace multiplication and division by - * 100 with bitshifting by 7 (which effecitvely multiplies and divides by 128). + * 100 with bitshifting by 7 (which effectively multiplies and divides by 128). */ static int ext_size_compare(const void *x, const void *y) diff --git a/module/zfs/dsl_synctask.c b/module/zfs/dsl_synctask.c index b225eed37d40..2d6ca8549eb9 100644 --- a/module/zfs/dsl_synctask.c +++ b/module/zfs/dsl_synctask.c @@ -143,7 +143,7 @@ dsl_sync_task(const char *pool, dsl_checkfunc_t *checkfunc, * For that reason, early synctasks can affect the process of writing dirty * changes to disk for the txg that they run and should be used with caution. * In addition, early synctasks should not dirty any metaslabs as this would - * invalidate the precodition/invariant for subsequent early synctasks. + * invalidate the precondition/invariant for subsequent early synctasks. * [see dsl_pool_sync() and dsl_early_sync_task_verify()] */ int diff --git a/module/zfs/dsl_userhold.c b/module/zfs/dsl_userhold.c index 638805d0b92b..2b2182fadec5 100644 --- a/module/zfs/dsl_userhold.c +++ b/module/zfs/dsl_userhold.c @@ -302,7 +302,7 @@ dsl_dataset_user_hold_sync(void *arg, dmu_tx_t *tx) * holds is nvl of snapname -> holdname * errlist will be filled in with snapname -> error * - * The snaphosts must all be in the same pool. + * The snapshots must all be in the same pool. * * Holds for snapshots that don't exist will be skipped. * @@ -556,9 +556,9 @@ dsl_dataset_user_release_sync(void *arg, dmu_tx_t *tx) * errlist will be filled in with snapname -> error * * If tmpdp is not NULL the names for holds should be the dsobj's of snapshots, - * otherwise they should be the names of shapshots. + * otherwise they should be the names of snapshots. * - * As a release may cause snapshots to be destroyed this trys to ensure they + * As a release may cause snapshots to be destroyed this tries to ensure they * aren't mounted. * * The release of non-existent holds are skipped. diff --git a/module/zfs/fm.c b/module/zfs/fm.c index 0a0fc79bd372..98a844820b3a 100644 --- a/module/zfs/fm.c +++ b/module/zfs/fm.c @@ -31,7 +31,7 @@ * Name-Value Pair Lists * * The embodiment of an FMA protocol element (event, fmri or authority) is a - * name-value pair list (nvlist_t). FMA-specific nvlist construtor and + * name-value pair list (nvlist_t). FMA-specific nvlist constructor and * destructor functions, fm_nvlist_create() and fm_nvlist_destroy(), are used * to create an nvpair list using custom allocators. Callers may choose to * allocate either from the kernel memory allocator, or from a preallocated @@ -784,7 +784,7 @@ zfs_zevent_destroy(zfs_zevent_t *ze) #endif /* _KERNEL */ /* - * Wrapppers for FM nvlist allocators + * Wrappers for FM nvlist allocators */ /* ARGSUSED */ static void * diff --git a/module/zfs/metaslab.c b/module/zfs/metaslab.c index 11b9ba8e9326..1b45e3e33c3d 100644 --- a/module/zfs/metaslab.c +++ b/module/zfs/metaslab.c @@ -112,7 +112,7 @@ int zfs_mg_noalloc_threshold = 0; /* * Metaslab groups are considered eligible for allocations if their - * fragmenation metric (measured as a percentage) is less than or + * fragmentation metric (measured as a percentage) is less than or * equal to zfs_mg_fragmentation_threshold. If a metaslab group * exceeds this threshold then it will be skipped unless all metaslab * groups within the metaslab class have also crossed this threshold. @@ -1285,7 +1285,7 @@ metaslab_largest_unflushed_free(metaslab_t *msp) * deferred. Similar logic applies to the ms_freed tree. See * metaslab_load() for more details. * - * There are two primary sources of innacuracy in this estimate. Both + * There are two primary sources of inaccuracy in this estimate. Both * are tolerated for performance reasons. The first source is that we * only check the largest segment for overlaps. Smaller segments may * have more favorable overlaps with the other trees, resulting in @@ -1874,7 +1874,7 @@ metaslab_verify_weight_and_frag(metaslab_t *msp) * If we're over the zfs_metaslab_mem_limit, select the loaded metaslab from * this class that was used longest ago, and attempt to unload it. We don't * want to spend too much time in this loop to prevent performance - * degredation, and we expect that most of the time this operation will + * degradation, and we expect that most of the time this operation will * succeed. Between that and the normal unloading processing during txg sync, * we expect this to keep the metaslab memory usage under control. */ @@ -3060,7 +3060,7 @@ metaslab_passivate(metaslab_t *msp, uint64_t weight) * we either fail an allocation attempt (similar to space-based metaslabs) * or have exhausted the free space in zfs_metaslab_switch_threshold * buckets since the metaslab was activated. This function checks to see - * if we've exhaused the zfs_metaslab_switch_threshold buckets in the + * if we've exhausted the zfs_metaslab_switch_threshold buckets in the * metaslab and passivates it proactively. This will allow us to select a * metaslab with a larger contiguous region, if any, remaining within this * metaslab group. If we're in sync pass > 1, then we continue using this @@ -4294,7 +4294,7 @@ metaslab_block_alloc(metaslab_t *msp, uint64_t size, uint64_t txg) * have selected, we may not try the newly-activated metaslab, and instead * activate another metaslab. This is not optimal, but generally does not cause * any problems (a possible exception being if every metaslab is completely full - * except for the the newly-activated metaslab which we fail to examine). + * except for the newly-activated metaslab which we fail to examine). */ static metaslab_t * find_valid_metaslab(metaslab_group_t *mg, uint64_t activation_weight, @@ -4441,7 +4441,7 @@ metaslab_group_alloc_normal(metaslab_group_t *mg, zio_alloc_list_t *zal, /* * Even though we don't hold the ms_lock for the * primary metaslab, those fields should not - * change while we hold the mg_lock. Thus is is + * change while we hold the mg_lock. Thus it is * safe to make assertions on them. */ ASSERT(msp->ms_primary); @@ -4879,7 +4879,7 @@ metaslab_alloc_dva(spa_t *spa, metaslab_class_t *mc, uint64_t psize, /* * If we don't need to try hard, then require that the - * block be on an different metaslab from any other DVAs + * block be on a different metaslab from any other DVAs * in this BP (unique=true). If we are trying hard, then * allow any metaslab to be used (unique=false). */ @@ -5685,7 +5685,7 @@ metaslab_check_free_impl(vdev_t *vd, uint64_t offset, uint64_t size) * * It would intuitively make sense to also check the current allocating * tree since metaslab_unalloc_dva() exists for extents that are - * allocated and freed in the same sync pass withing the same txg. + * allocated and freed in the same sync pass within the same txg. * Unfortunately there are places (e.g. the ZIL) where we allocate a * segment but then we free part of it within the same txg * [see zil_sync()]. Thus, we don't call range_tree_verify() in the diff --git a/module/zfs/mmp.c b/module/zfs/mmp.c index 1ffd862da126..810d20fdd95c 100644 --- a/module/zfs/mmp.c +++ b/module/zfs/mmp.c @@ -87,12 +87,12 @@ * * In this case, a weak guarantee is provided. Since the host which last had * the pool imported will suspend the pool if no mmp writes land within - * fail_intervals * multihost_interval ms, the absense of writes during that + * fail_intervals * multihost_interval ms, the absence of writes during that * time means either the pool is not imported, or it is imported but the pool * is suspended and no further writes will occur. * * Note that resuming the suspended pool on the remote host would invalidate - * this gurantee, and so it is not allowed. + * this guarantee, and so it is not allowed. * * The factor of 2 provides a conservative safety factor and derives from * MMP_IMPORT_SAFETY_FACTOR; diff --git a/module/zfs/policy.c b/module/zfs/policy.c index a723235d3015..7f9456a670eb 100644 --- a/module/zfs/policy.c +++ b/module/zfs/policy.c @@ -70,7 +70,7 @@ static int priv_policy_user(const cred_t *cr, int capability, boolean_t all, int err) { /* - * All priv_policy_user checks are preceeded by kuid/kgid_has_mapping() + * All priv_policy_user checks are preceded by kuid/kgid_has_mapping() * checks. If we cannot do them, we shouldn't be using ns_capable() * since we don't know whether the affected files are valid in our * namespace. Note that kuid_has_mapping() came after cred->user_ns, so diff --git a/module/zfs/qat.h b/module/zfs/qat.h index 9014c03148ba..fdd608139402 100644 --- a/module/zfs/qat.h +++ b/module/zfs/qat.h @@ -85,7 +85,7 @@ typedef struct qat_stats { * Number of fails in the QAT compression / decompression engine. * Note: when a QAT error happens, it doesn't necessarily indicate a * critical hardware issue. Sometimes it is because the output buffer - * is not big enough. The compression job will be transfered to the + * is not big enough. The compression job will be transferred to the * gzip software implementation so the functionality of ZFS is not * impacted. */ @@ -118,7 +118,7 @@ typedef struct qat_stats { /* * Number of fails in the QAT encryption / decryption engine. * Note: when a QAT error happens, it doesn't necessarily indicate a - * critical hardware issue. The encryption job will be transfered + * critical hardware issue. The encryption job will be transferred * to the software implementation so the functionality of ZFS is * not impacted. */ @@ -135,7 +135,7 @@ typedef struct qat_stats { /* * Number of fails in the QAT checksum engine. * Note: when a QAT error happens, it doesn't necessarily indicate a - * critical hardware issue. The checksum job will be transfered to the + * critical hardware issue. The checksum job will be transferred to the * software implementation so the functionality of ZFS is not impacted. */ kstat_named_t cksum_fails; diff --git a/module/zfs/sa.c b/module/zfs/sa.c index f718e7662e6e..621838396a45 100644 --- a/module/zfs/sa.c +++ b/module/zfs/sa.c @@ -83,7 +83,7 @@ * Layouts are simply an array of the attributes and their * ordering i.e. [0, 1, 4, 5, 2] * - * Each distinct layout is given a unique layout number and that is whats + * Each distinct layout is given a unique layout number and that is what's * stored in the header at the beginning of the SA data buffer. * * A layout only covers a single dbuf (bonus or spill). If a set of @@ -95,7 +95,7 @@ * Adding a single attribute will cause the entire set of attributes to * be rewritten and could result in a new layout number being constructed * as part of the rewrite if no such layout exists for the new set of - * attribues. The new attribute will be appended to the end of the already + * attributes. The new attribute will be appended to the end of the already * existing attributes. * * Both the attribute registration and attribute layout information are diff --git a/module/zfs/spa.c b/module/zfs/spa.c index c404e876b4bc..f4a6f3f456b8 100644 --- a/module/zfs/spa.c +++ b/module/zfs/spa.c @@ -2204,7 +2204,7 @@ spa_load_verify_done(zio_t *zio) } /* - * Maximum number of inflight bytes is the log2 faction of the arc size. + * Maximum number of inflight bytes is the log2 fraction of the arc size. * By default, we set it to 1/16th of the arc. */ int spa_load_verify_shift = 4; @@ -3030,7 +3030,7 @@ spa_activity_check_duration(spa_t *spa, uberblock_t *ub) } else if (MMP_VALID(ub)) { /* - * zfs-0.7 compatability case + * zfs-0.7 compatibility case */ import_delay = MAX(import_delay, (multihost_interval + @@ -4339,7 +4339,7 @@ spa_ld_check_for_config_update(spa_t *spa, uint64_t config_cache_txg, need_update = B_TRUE; /* - * Update the config cache asychronously in case we're the + * Update the config cache asynchronously in case we're the * root pool, in which case the config cache isn't writable yet. */ if (need_update) @@ -4652,7 +4652,7 @@ spa_load_impl(spa_t *spa, spa_import_type_t type, char **ereport) return (error); /* - * Redo the loading process process again with the + * Redo the loading process again with the * checkpointed uberblock. */ spa_ld_prepare_for_reload(spa); @@ -8441,7 +8441,7 @@ spa_sync_props(void *arg, dmu_tx_t *tx) case ZPOOL_PROP_READONLY: case ZPOOL_PROP_CACHEFILE: /* - * 'readonly' and 'cachefile' are also non-persisitent + * 'readonly' and 'cachefile' are also non-persistent * properties. */ break; @@ -9278,7 +9278,7 @@ EXPORT_SYMBOL(spa_inject_delref); EXPORT_SYMBOL(spa_scan_stat_init); EXPORT_SYMBOL(spa_scan_get_stats); -/* device maniion */ +/* device manipulation */ EXPORT_SYMBOL(spa_vdev_add); EXPORT_SYMBOL(spa_vdev_attach); EXPORT_SYMBOL(spa_vdev_detach); diff --git a/module/zfs/spa_checkpoint.c b/module/zfs/spa_checkpoint.c index d6f68ceda589..44711acef5a2 100644 --- a/module/zfs/spa_checkpoint.c +++ b/module/zfs/spa_checkpoint.c @@ -102,7 +102,7 @@ * Once the synctask is done and the discarding zthr is awake, we discard * the checkpointed data over multiple TXGs by having the zthr prefetching * entries from vdev_checkpoint_sm and then starting a synctask that places - * them as free blocks in to their respective ms_allocatable and ms_sm + * them as free blocks into their respective ms_allocatable and ms_sm * structures. * [see spa_checkpoint_discard_thread()] * diff --git a/module/zfs/spa_errlog.c b/module/zfs/spa_errlog.c index e42f8a0212f6..fa5120eb61b3 100644 --- a/module/zfs/spa_errlog.c +++ b/module/zfs/spa_errlog.c @@ -31,7 +31,7 @@ * and the current log. All errors seen are logged to the current log. When a * scrub completes, the current log becomes the last log, the last log is thrown * out, and the current log is reinitialized. This way, if an error is somehow - * corrected, a new scrub will show that that it no longer exists, and will be + * corrected, a new scrub will show that it no longer exists, and will be * deleted from the log when the scrub completes. * * The log is stored using a ZAP object whose key is a string form of the diff --git a/module/zfs/spa_history.c b/module/zfs/spa_history.c index 68c6b544e378..d2839b97fc0d 100644 --- a/module/zfs/spa_history.c +++ b/module/zfs/spa_history.c @@ -63,7 +63,7 @@ * overwrite the original creation of the pool. 'sh_phys_max_off' is the * physical ending offset in bytes of the log. This tells you the length of * the buffer. 'sh_eof' is the logical EOF (in bytes). Whenever a record - * is added, 'sh_eof' is incremented by the the size of the record. + * is added, 'sh_eof' is incremented by the size of the record. * 'sh_eof' is never decremented. 'sh_bof' is the logical BOF (in bytes). * This is where the consumer should start reading from after reading in * the 'zpool create' portion of the log. diff --git a/module/zfs/spa_log_spacemap.c b/module/zfs/spa_log_spacemap.c index 550aa1e3a5f5..8a8593a2711d 100644 --- a/module/zfs/spa_log_spacemap.c +++ b/module/zfs/spa_log_spacemap.c @@ -180,7 +180,7 @@ unsigned long zfs_log_sm_blksz = 1ULL << 17; /* - * Percentage of the overall system’s memory that ZFS allows to be + * Percentage of the overall system's memory that ZFS allows to be * used for unflushed changes (e.g. the sum of size of all the nodes * in the unflushed trees). * @@ -392,7 +392,7 @@ summary_entry_is_full(spa_t *spa, log_summary_entry_t *e) * Update the log summary information to reflect the fact that a metaslab * was flushed or destroyed (e.g due to device removal or pool export/destroy). * - * We typically flush the oldest flushed metaslab so the first (and olderst) + * We typically flush the oldest flushed metaslab so the first (and oldest) * entry of the summary is updated. However if that metaslab is getting loaded * we may flush the second oldest one which may be part of an entry later in * the summary. Moreover, if we call into this function from metaslab_fini() @@ -838,7 +838,7 @@ spa_flush_metaslabs(spa_t *spa, dmu_tx_t *tx) /* * Close the log space map for this TXG and update the block counts - * for the the log's in-memory structure and the summary. + * for the log's in-memory structure and the summary. */ void spa_sync_close_syncing_log_sm(spa_t *spa) diff --git a/module/zfs/txg.c b/module/zfs/txg.c index b7914e000d5e..dc085f78dd03 100644 --- a/module/zfs/txg.c +++ b/module/zfs/txg.c @@ -644,8 +644,8 @@ txg_quiesce_thread(void *arg) /* * Delay this thread by delay nanoseconds if we are still in the open - * transaction group and there is already a waiting txg quiesing or quiesced. - * Abort the delay if this txg stalls or enters the quiesing state. + * transaction group and there is already a waiting txg quiescing or quiesced. + * Abort the delay if this txg stalls or enters the quiescing state. */ void txg_delay(dsl_pool_t *dp, uint64_t txg, hrtime_t delay, hrtime_t resolution) @@ -768,7 +768,7 @@ txg_wait_open(dsl_pool_t *dp, uint64_t txg, boolean_t should_quiesce) /* * If there isn't a txg syncing or in the pipeline, push another txg through - * the pipeline by queiscing the open txg. + * the pipeline by quiescing the open txg. */ void txg_kick(dsl_pool_t *dp) diff --git a/module/zfs/vdev.c b/module/zfs/vdev.c index a6280e0112ed..f083732b2188 100644 --- a/module/zfs/vdev.c +++ b/module/zfs/vdev.c @@ -223,7 +223,7 @@ vdev_default_xlate(vdev_t *vd, const range_seg_t *in, range_seg_t *res) } /* - * Derive the enumerated alloction bias from string input. + * Derive the enumerated allocation bias from string input. * String origin is either the per-vdev zap or zpool(1M). */ static vdev_alloc_bias_t @@ -1321,7 +1321,7 @@ vdev_metaslab_init(vdev_t *vd, uint64_t txg) #ifndef _KERNEL /* - * To accomodate zdb_leak_init() fake indirect + * To accommodate zdb_leak_init() fake indirect * metaslabs, we allocate a metaslab group for * indirect vdevs which normally don't have one. */ @@ -4191,7 +4191,7 @@ vdev_space_update(vdev_t *vd, int64_t alloc_delta, int64_t defer_delta, * Apply the inverse of the psize-to-asize (ie. RAID-Z) space-expansion * factor. We must calculate this here and not at the root vdev * because the root vdev's psize-to-asize is simply the max of its - * childrens', thus not accurate enough for us. + * children's, thus not accurate enough for us. */ dspace_delta = vdev_deflated_space(vd, space_delta); diff --git a/module/zfs/vdev_cache.c b/module/zfs/vdev_cache.c index 0f1d9448b590..b63b9f9795f9 100644 --- a/module/zfs/vdev_cache.c +++ b/module/zfs/vdev_cache.c @@ -46,7 +46,7 @@ * terribly wasteful of bandwidth. A more intelligent version of the cache * could keep track of access patterns and not do read-ahead unless it sees * at least two temporally close I/Os to the same region. Currently, only - * metadata I/O is inflated. A futher enhancement could take advantage of + * metadata I/O is inflated. A further enhancement could take advantage of * more semantic information about the I/O. And it could use something * faster than an AVL tree; that was chosen solely for convenience. * diff --git a/module/zfs/vdev_initialize.c b/module/zfs/vdev_initialize.c index a355f185cc2e..4963ba38d434 100644 --- a/module/zfs/vdev_initialize.c +++ b/module/zfs/vdev_initialize.c @@ -602,7 +602,7 @@ vdev_initialize_stop_wait(spa_t *spa, list_t *vd_list) } /* - * Stop initializing a device, with the resultant initialing state being + * Stop initializing a device, with the resultant initializing state being * tgt_state. For blocking behavior pass NULL for vd_list. Otherwise, when * a list_t is provided the stopping vdev is inserted in to the list. Callers * are then required to call vdev_initialize_stop_wait() to block for all the diff --git a/module/zfs/vdev_mirror.c b/module/zfs/vdev_mirror.c index 23ff75bfc96f..2f75fca827f7 100644 --- a/module/zfs/vdev_mirror.c +++ b/module/zfs/vdev_mirror.c @@ -485,7 +485,7 @@ vdev_mirror_preferred_child_randomize(zio_t *zio) /* * Try to find a vdev whose DTL doesn't contain the block we want to read - * prefering vdevs based on determined load. + * preferring vdevs based on determined load. * * Try to find a child whose DTL doesn't contain the block we want to read. * If we can't, try the read on any vdev we haven't already tried. diff --git a/module/zfs/vdev_queue.c b/module/zfs/vdev_queue.c index 86b20f134834..d3d9a6baa4a3 100644 --- a/module/zfs/vdev_queue.c +++ b/module/zfs/vdev_queue.c @@ -893,7 +893,7 @@ vdev_queue_change_io_priority(zio_t *zio, zio_priority_t priority) * ZIO_PRIORITY_NOW is used by the vdev cache code and the aggregate zio * code to issue IOs without adding them to the vdev queue. In this * case, the zio is already going to be issued as quickly as possible - * and so it doesn't need any reprioitization to help. + * and so it doesn't need any reprioritization to help. */ if (zio->io_priority == ZIO_PRIORITY_NOW) return; diff --git a/module/zfs/vdev_raidz.c b/module/zfs/vdev_raidz.c index 327b186713fa..f63ccaa94cb8 100644 --- a/module/zfs/vdev_raidz.c +++ b/module/zfs/vdev_raidz.c @@ -98,7 +98,7 @@ * R = 4^n-1 * D_0 + 4^n-2 * D_1 + ... + 4^1 * D_n-2 + 4^0 * D_n-1 * = ((...((D_0) * 4 + D_1) * 4 + ...) * 4 + D_n-2) * 4 + D_n-1 * - * We chose 1, 2, and 4 as our generators because 1 corresponds to the trival + * We chose 1, 2, and 4 as our generators because 1 corresponds to the trivial * XOR operation, and 2 and 4 can be computed quickly and generate linearly- * independent coefficients. (There are no additional coefficients that have * this property which is why the uncorrected Plank method breaks down.) @@ -447,7 +447,7 @@ vdev_raidz_map_alloc(zio_t *zio, uint64_t ashift, uint64_t dcols, /* * If all data stored spans all columns, there's a danger that parity * will always be on the same device and, since parity isn't read - * during normal operation, that that device's I/O bandwidth won't be + * during normal operation, that device's I/O bandwidth won't be * used effectively. We therefore switch the parity every 1MB. * * ... at least that was, ostensibly, the theory. As a practical @@ -2336,7 +2336,7 @@ vdev_raidz_state_change(vdev_t *vd, int faulted, int degraded) /* * Determine if any portion of the provided block resides on a child vdev * with a dirty DTL and therefore needs to be resilvered. The function - * assumes that at least one DTL is dirty which imples that full stripe + * assumes that at least one DTL is dirty which implies that full stripe * width blocks must be resilvered. */ static boolean_t diff --git a/module/zfs/vdev_raidz_math_aarch64_neon_common.h b/module/zfs/vdev_raidz_math_aarch64_neon_common.h index 024917417a55..0ea2ad611c77 100644 --- a/module/zfs/vdev_raidz_math_aarch64_neon_common.h +++ b/module/zfs/vdev_raidz_math_aarch64_neon_common.h @@ -42,7 +42,7 @@ /* * Here we need registers not used otherwise. * They will be used in unused ASM for the case - * with more registers than required... but GGC + * with more registers than required... but GCC * will still need to make sure the constraints * are correct, and duplicate constraints are illegal * ... and we use the "register" number as a name diff --git a/module/zfs/zcp.c b/module/zfs/zcp.c index 1aeea131449e..44e4d230a30f 100644 --- a/module/zfs/zcp.c +++ b/module/zfs/zcp.c @@ -66,7 +66,7 @@ * consuming excessive system or running forever. If one of these limits is * hit, the channel program will be stopped immediately and return from * zcp_eval() with an error code. No attempt will be made to roll back or undo - * any changes made by the channel program before the error occured. + * any changes made by the channel program before the error occurred. * Consumers invoking zcp_eval() from elsewhere in the kernel may pass a time * limit of 0, disabling the time limit. * @@ -77,7 +77,7 @@ * In place of a return value, an error message will also be returned in the * 'result' nvlist containing information about the error. No attempt will be * made to roll back or undo any changes made by the channel program before the - * error occured. + * error occurred. * * 3. If an error occurs inside a ZFS library call which returns an error code, * the error is returned to the Lua script to be handled as desired. @@ -160,7 +160,7 @@ zcp_argerror(lua_State *state, int narg, const char *msg, ...) * of a function call. * * If an error occurs, the cleanup function will be invoked exactly once and - * then unreigstered. + * then unregistered. * * Returns the registered cleanup handler so the caller can deregister it * if no error occurs. diff --git a/module/zfs/zcp_get.c b/module/zfs/zcp_get.c index 0a5f0b8242ab..42c125d48cd0 100644 --- a/module/zfs/zcp_get.c +++ b/module/zfs/zcp_get.c @@ -547,7 +547,7 @@ get_zap_prop(lua_State *state, dsl_dataset_t *ds, zfs_prop_t zfs_prop) error = dsl_prop_get_ds(ds, prop_name, sizeof (numval), 1, &numval, setpoint); - /* Fill in temorary value for prop, if applicable */ + /* Fill in temporary value for prop, if applicable */ (void) get_temporary_prop(ds, zfs_prop, &numval, setpoint); /* Push value to lua stack */ @@ -678,7 +678,7 @@ parse_userquota_prop(const char *prop_name, zfs_userquota_prop_t *type, if (strncmp(cp, "S-1-", 4) == 0) { /* * It's a numeric SID (eg "S-1-234-567-89") and we want to - * seperate the domain id and the rid + * separate the domain id and the rid */ int domain_len = strrchr(cp, '-') - cp; domain_val = kmem_alloc(domain_len + 1, KM_SLEEP); diff --git a/module/zfs/zcp_iter.c b/module/zfs/zcp_iter.c index 7600e662dfbb..f727c56f212d 100644 --- a/module/zfs/zcp_iter.c +++ b/module/zfs/zcp_iter.c @@ -457,7 +457,7 @@ static zcp_list_info_t zcp_system_props_list_info = { }; /* - * Get a list of all visble system properties and their values for a given + * Get a list of all visible system properties and their values for a given * dataset. Returned on the stack as a Lua table. */ static int diff --git a/module/zfs/zfs_acl.c b/module/zfs/zfs_acl.c index b1af4da2f4a5..26af91e27d42 100644 --- a/module/zfs/zfs_acl.c +++ b/module/zfs/zfs_acl.c @@ -810,7 +810,7 @@ zfs_acl_xform(znode_t *zp, zfs_acl_t *aclp, cred_t *cr) * for zfs_copy_ace_2_fuid(). * * We only convert an ACL once, so this won't happen - * everytime. + * every time. */ oldaclp = kmem_alloc(sizeof (zfs_oldace_t) * aclp->z_acl_count, KM_SLEEP); diff --git a/module/zfs/zfs_byteswap.c b/module/zfs/zfs_byteswap.c index 7893bde4e2db..1b8bb82c3fbc 100644 --- a/module/zfs/zfs_byteswap.c +++ b/module/zfs/zfs_byteswap.c @@ -44,7 +44,7 @@ zfs_oldace_byteswap(ace_t *ace, int ace_cnt) } /* - * swap ace_t and ace_oject_t + * swap ace_t and ace_object_t */ void zfs_ace_byteswap(void *buf, size_t size, boolean_t zfs_layout) @@ -70,7 +70,7 @@ zfs_ace_byteswap(void *buf, size_t size, boolean_t zfs_layout) * larger than needed to hold the aces * present. As long as we do not do any * swapping beyond the end of our block we are - * okay. It it safe to swap any non-ace data + * okay. It is safe to swap any non-ace data * within the block since it is just zeros. */ if (ptr + sizeof (zfs_ace_hdr_t) > end) { diff --git a/module/zfs/zfs_ctldir.c b/module/zfs/zfs_ctldir.c index b3cbc7d7e5fa..1e61ef06d003 100644 --- a/module/zfs/zfs_ctldir.c +++ b/module/zfs/zfs_ctldir.c @@ -596,7 +596,7 @@ zfsctl_root(znode_t *zp) /* * Generate a long fid to indicate a snapdir. We encode whether snapdir is - * already monunted in gen field. We do this because nfsd lookup will not + * already mounted in gen field. We do this because nfsd lookup will not * trigger automount. Next time the nfsd does fh_to_dentry, we will notice * this and do automount and return ESTALE to force nfsd revalidate and follow * mount. diff --git a/module/zfs/zfs_dir.c b/module/zfs/zfs_dir.c index 63ac97754d37..6bdad737cd84 100644 --- a/module/zfs/zfs_dir.c +++ b/module/zfs/zfs_dir.c @@ -55,7 +55,7 @@ #include /* - * zfs_match_find() is used by zfs_dirent_lock() to peform zap lookups + * zfs_match_find() is used by zfs_dirent_lock() to perform zap lookups * of names after deciding which is the appropriate lookup interface. */ static int @@ -232,7 +232,7 @@ zfs_dirent_lock(zfs_dirlock_t **dlpp, znode_t *dzp, char *name, znode_t **zpp, /* * Wait until there are no locks on this name. * - * Don't grab the the lock if it is already held. However, cannot + * Don't grab the lock if it is already held. However, cannot * have both ZSHARED and ZHAVELOCK together. */ ASSERT(!(flag & ZSHARED) || !(flag & ZHAVELOCK)); diff --git a/module/zfs/zfs_ioctl.c b/module/zfs/zfs_ioctl.c index 399b15cbdef4..c5093fd447aa 100644 --- a/module/zfs/zfs_ioctl.c +++ b/module/zfs/zfs_ioctl.c @@ -2106,7 +2106,7 @@ zfs_ioc_objset_stats_impl(zfs_cmd_t *zc, objset_t *os) * which we aren't supposed to do with a * DS_MODE_USER hold, because it could be * inconsistent. So this is a bit of a workaround... - * XXX reading with out owning + * XXX reading without owning */ if (!zc->zc_objset_stats.dds_inconsistent && dmu_objset_type(os) == DMU_OST_ZVOL) { @@ -7097,7 +7097,7 @@ zfs_check_input_nvpairs(nvlist_t *innvl, const zfs_ioc_vec_t *vec) continue; if (nvl_keys[k].zkey_flags & ZK_WILDCARDLIST) { - /* at least one non-optionial key is expected here */ + /* at least one non-optional key is expected here */ if (!required_keys_found) return (SET_ERROR(ZFS_ERR_IOC_ARG_REQUIRED)); continue; diff --git a/module/zfs/zfs_vfsops.c b/module/zfs/zfs_vfsops.c index 34f4842d7162..0914e4b7de36 100644 --- a/module/zfs/zfs_vfsops.c +++ b/module/zfs/zfs_vfsops.c @@ -1477,7 +1477,7 @@ zfs_statvfs(struct dentry *dentry, struct kstatfs *statp) * "preferred" size. */ - /* Round up so we never have a filesytem using 0 blocks. */ + /* Round up so we never have a filesystem using 0 blocks. */ refdbytes = P2ROUNDUP(refdbytes, statp->f_bsize); statp->f_blocks = (refdbytes + availbytes) >> bshift; statp->f_bfree = availbytes >> bshift; @@ -2431,7 +2431,7 @@ zfs_get_zplprop(objset_t *os, zfs_prop_t prop, uint64_t *value) } /* - * Return true if the coresponding vfs's unmounted flag is set. + * Return true if the corresponding vfs's unmounted flag is set. * Otherwise return false. * If this function returns true we know VFS unmount has been initiated. */ diff --git a/module/zfs/zfs_vnops.c b/module/zfs/zfs_vnops.c index 1ad6f1588cc2..de7b59935e8c 100644 --- a/module/zfs/zfs_vnops.c +++ b/module/zfs/zfs_vnops.c @@ -889,7 +889,7 @@ zfs_write(struct inode *ip, uio_t *uio, int ioflag, cred_t *cr) * Clear Set-UID/Set-GID bits on successful write if not * privileged and at least one of the execute bits is set. * - * It would be nice to to this after all writes have + * It would be nice to do this after all writes have * been done, but that would still expose the ISUID/ISGID * to another app after the partial write is committed. * @@ -4378,7 +4378,7 @@ zfs_link(struct inode *tdip, struct inode *sip, char *name, cred_t *cr, uint64_t txtype = TX_LINK; /* * tmpfile is created to be in z_unlinkedobj, so remove it. - * Also, we don't log in ZIL, be cause all previous file + * Also, we don't log in ZIL, because all previous file * operation on the tmpfile are ignored by ZIL. Instead we * always wait for txg to sync to make sure all previous * operation are sync safe. @@ -4638,7 +4638,7 @@ zfs_dirty_inode(struct inode *ip, int flags) #ifdef I_DIRTY_TIME /* - * This is the lazytime semantic indroduced in Linux 4.0 + * This is the lazytime semantic introduced in Linux 4.0 * This flag will only be called from update_time when lazytime is set. * (Note, I_DIRTY_SYNC will also set if not lazytime) * Fortunately mtime and ctime are managed within ZFS itself, so we diff --git a/module/zfs/zfs_znode.c b/module/zfs/zfs_znode.c index 8512db9bcb2d..549c701a039e 100644 --- a/module/zfs/zfs_znode.c +++ b/module/zfs/zfs_znode.c @@ -788,7 +788,7 @@ zfs_mknode(znode_t *dzp, vattr_t *vap, dmu_tx_t *tx, cred_t *cr, } /* - * No execs denied will be deterimed when zfs_mode_compute() is called. + * No execs denied will be determined when zfs_mode_compute() is called. */ pflags |= acl_ids->z_aclp->z_hints & (ZFS_ACL_TRIVIAL|ZFS_INHERIT_ACE|ZFS_ACL_AUTO_INHERIT| @@ -1271,7 +1271,7 @@ zfs_rezget(znode_t *zp) * If the file has zero links, then it has been unlinked on the send * side and it must be in the received unlinked set. * We call zfs_znode_dmu_fini() now to prevent any accesses to the - * stale data and to prevent automatical removal of the file in + * stale data and to prevent automatic removal of the file in * zfs_zinactive(). The file will be removed either when it is removed * on the send side and the next incremental stream is received or * when the unlinked set gets processed. diff --git a/module/zfs/zil.c b/module/zfs/zil.c index 98678aa44655..8411e333b18d 100644 --- a/module/zfs/zil.c +++ b/module/zfs/zil.c @@ -58,7 +58,7 @@ * * In the event of a crash or power loss, the itxs contained by each * dataset's on-disk ZIL will be replayed when that dataset is first - * instantiated (e.g. if the dataset is a normal fileystem, when it is + * instantiated (e.g. if the dataset is a normal filesystem, when it is * first mounted). * * As hinted at above, there is one ZIL per dataset (both the in-memory @@ -2002,7 +2002,7 @@ zil_itx_assign(zilog_t *zilog, itx_t *itx, dmu_tx_t *tx) /* * If there are any in-memory intent log transactions which have now been * synced then start up a taskq to free them. We should only do this after we - * have written out the uberblocks (i.e. txg has been comitted) so that + * have written out the uberblocks (i.e. txg has been committed) so that * don't inadvertently clean out in-memory log records that would be required * by zil_commit(). */ diff --git a/module/zfs/zio_checksum.c b/module/zfs/zio_checksum.c index 7b148375d0c2..179fab5de365 100644 --- a/module/zfs/zio_checksum.c +++ b/module/zfs/zio_checksum.c @@ -308,7 +308,7 @@ zio_checksum_template_init(enum zio_checksum checksum, spa_t *spa) mutex_exit(&spa->spa_cksum_tmpls_lock); } -/* convenience function to update a checksum to accomodate an encryption MAC */ +/* convenience function to update a checksum to accommodate an encryption MAC */ static void zio_checksum_handle_crypt(zio_cksum_t *cksum, zio_cksum_t *saved, boolean_t xor) { diff --git a/module/zfs/zio_compress.c b/module/zfs/zio_compress.c index f5cbc3e8218a..cdaade27c679 100644 --- a/module/zfs/zio_compress.c +++ b/module/zfs/zio_compress.c @@ -155,7 +155,7 @@ zio_decompress_data(enum zio_compress c, abd_t *src, void *dst, abd_return_buf(src, tmp, s_len); /* - * Decompression shouldn't fail, because we've already verifyied + * Decompression shouldn't fail, because we've already verified * the checksum. However, for extra protection (e.g. against bitflips * in non-ECC RAM), we handle this error (and test it). */ diff --git a/module/zfs/zio_crypt.c b/module/zfs/zio_crypt.c index eb781b64fa1d..7cf20f4136b8 100644 --- a/module/zfs/zio_crypt.c +++ b/module/zfs/zio_crypt.c @@ -369,7 +369,7 @@ zio_crypt_key_get_salt(zio_crypt_key_t *key, uint8_t *salt) /* * This function handles all encryption and decryption in zfs. When * encrypting it expects puio to reference the plaintext and cuio to - * reference the cphertext. cuio must have enough space for the + * reference the ciphertext. cuio must have enough space for the * ciphertext + room for a MAC. datalen should be the length of the * plaintext / ciphertext alone. */ @@ -934,7 +934,7 @@ zio_crypt_bp_zero_nonportable_blkprop(blkptr_t *bp, uint64_t version) /* * At L0 we want to verify these fields to ensure that data blocks - * can not be reinterpretted. For instance, we do not want an attacker + * can not be reinterpreted. For instance, we do not want an attacker * to trick us into returning raw lz4 compressed data to the user * by modifying the compression bits. At higher levels, we cannot * enforce this policy since raw sends do not convey any information diff --git a/module/zfs/zio_inject.c b/module/zfs/zio_inject.c index 78896d3dc38b..d8af503bdfc2 100644 --- a/module/zfs/zio_inject.c +++ b/module/zfs/zio_inject.c @@ -113,7 +113,7 @@ freq_triggered(uint32_t frequency) return (B_TRUE); /* - * Note: we still handle legacy (unscaled) frequecy values + * Note: we still handle legacy (unscaled) frequency values */ uint32_t maximum = (frequency <= 100) ? 100 : ZI_PERCENTAGE_MAX; diff --git a/module/zfs/zpl_super.c b/module/zfs/zpl_super.c index 216c79401526..810ab28988a7 100644 --- a/module/zfs/zpl_super.c +++ b/module/zfs/zpl_super.c @@ -297,7 +297,7 @@ zpl_mount_impl(struct file_system_type *fs_type, int flags, zfs_mnt_t *zm) * The dsl pool lock must be released prior to calling sget(). * It is possible sget() may block on the lock in grab_super() * while deactivate_super() holds that same lock and waits for - * a txg sync. If the dsl_pool lock is held over over sget() + * a txg sync. If the dsl_pool lock is held over sget() * this can prevent the pool sync and cause a deadlock. */ dsl_pool_rele(dmu_objset_pool(os), FTAG); diff --git a/module/zfs/zvol.c b/module/zfs/zvol.c index f74eb28aec86..840b8d008ec0 100644 --- a/module/zfs/zvol.c +++ b/module/zfs/zvol.c @@ -1997,7 +1997,7 @@ zvol_create_snap_minor_cb(const char *dsname, void *arg) /* at this point, the dsname should name a snapshot */ if (strchr(dsname, '@') == 0) { dprintf("zvol_create_snap_minor_cb(): " - "%s is not a shapshot name\n", dsname); + "%s is not a snapshot name\n", dsname); } else { minors_job_t *job; char *n = strdup(dsname); From ade306a9d4fa23649c11584b289eab917534bf13 Mon Sep 17 00:00:00 2001 From: Andrea Gelmini Date: Tue, 3 Sep 2019 02:58:26 +0200 Subject: [PATCH 55/68] Fix typos in tests/ Reviewed-by: Ryan Moeller Reviewed-by: Richard Laager Reviewed-by: Brian Behlendorf Signed-off-by: Andrea Gelmini Closes #9242 --- tests/zfs-tests/tests/functional/slog/slog_015_neg.ksh | 2 +- .../zfs-tests/tests/functional/snapshot/snapshot_002_pos.ksh | 2 +- .../zfs-tests/tests/functional/snapshot/snapshot_006_pos.ksh | 2 +- tests/zfs-tests/tests/functional/trim/trim.kshlib | 2 +- .../tests/functional/userquota/userquota_005_neg.ksh | 2 +- .../tests/functional/userquota/userquota_010_pos.ksh | 2 +- .../tests/functional/userquota/userquota_012_neg.ksh | 2 +- tests/zfs-tests/tests/functional/xattr/xattr_003_neg.ksh | 4 ++-- tests/zfs-tests/tests/functional/xattr/xattr_011_pos.ksh | 2 +- .../tests/functional/zvol/zvol_swap/zvol_swap_003_pos.ksh | 2 +- 10 files changed, 11 insertions(+), 11 deletions(-) diff --git a/tests/zfs-tests/tests/functional/slog/slog_015_neg.ksh b/tests/zfs-tests/tests/functional/slog/slog_015_neg.ksh index fa6105116574..a53aeabffcdd 100755 --- a/tests/zfs-tests/tests/functional/slog/slog_015_neg.ksh +++ b/tests/zfs-tests/tests/functional/slog/slog_015_neg.ksh @@ -26,7 +26,7 @@ # 3. Concurrently do the following: # 3.1. Perform 8K sync writes # 3.2. Perform log offline/online commands -# 4. Loop to test with growing "zfs_commit_timout_pct" values. +# 4. Loop to test with growing "zfs_commit_timeout_pct" values. # verify_runnable "global" diff --git a/tests/zfs-tests/tests/functional/snapshot/snapshot_002_pos.ksh b/tests/zfs-tests/tests/functional/snapshot/snapshot_002_pos.ksh index b404ffbd50e6..124a7db9c6e6 100755 --- a/tests/zfs-tests/tests/functional/snapshot/snapshot_002_pos.ksh +++ b/tests/zfs-tests/tests/functional/snapshot/snapshot_002_pos.ksh @@ -36,7 +36,7 @@ # DESCRIPTION: # An archive of a zfs file system and an archive of its snapshot # is identical even though the original file system has -# changed sinced the snapshot was taken. +# changed since the snapshot was taken. # # STRATEGY: # 1) Create files in all of the zfs file systems diff --git a/tests/zfs-tests/tests/functional/snapshot/snapshot_006_pos.ksh b/tests/zfs-tests/tests/functional/snapshot/snapshot_006_pos.ksh index dc50e46933aa..68a616c02a6c 100755 --- a/tests/zfs-tests/tests/functional/snapshot/snapshot_006_pos.ksh +++ b/tests/zfs-tests/tests/functional/snapshot/snapshot_006_pos.ksh @@ -35,7 +35,7 @@ # # DESCRIPTION: # An archive of a zfs dataset and an archive of its snapshot -# changed sinced the snapshot was taken. +# changed since the snapshot was taken. # # STRATEGY: # 1) Create some files in a ZFS dataset diff --git a/tests/zfs-tests/tests/functional/trim/trim.kshlib b/tests/zfs-tests/tests/functional/trim/trim.kshlib index 02802d8c91bf..ed6a8f91b970 100644 --- a/tests/zfs-tests/tests/functional/trim/trim.kshlib +++ b/tests/zfs-tests/tests/functional/trim/trim.kshlib @@ -18,7 +18,7 @@ . $STF_SUITE/tests/functional/cli_root/zpool_trim/zpool_trim.kshlib # -# Get the actual on disk disk for the provided file. +# Get the actual size on disk for the provided file. # function get_size_mb { diff --git a/tests/zfs-tests/tests/functional/userquota/userquota_005_neg.ksh b/tests/zfs-tests/tests/functional/userquota/userquota_005_neg.ksh index 825ebe09b28b..5684b05b7e4b 100755 --- a/tests/zfs-tests/tests/functional/userquota/userquota_005_neg.ksh +++ b/tests/zfs-tests/tests/functional/userquota/userquota_005_neg.ksh @@ -64,7 +64,7 @@ for user in "${no_users[@]}"; do log_mustnot zfs set userquota@$user=100m $QFS done -log_note "can set all numberic id even that id is not existed" +log_note "can set all numeric id even if that id does not exist" log_must zfs set userquota@12345678=100m $QFS log_mustnot zfs set userquota@12345678=100m $snap_fs diff --git a/tests/zfs-tests/tests/functional/userquota/userquota_010_pos.ksh b/tests/zfs-tests/tests/functional/userquota/userquota_010_pos.ksh index 08af6560dc87..20c9c56ba5ef 100755 --- a/tests/zfs-tests/tests/functional/userquota/userquota_010_pos.ksh +++ b/tests/zfs-tests/tests/functional/userquota/userquota_010_pos.ksh @@ -33,7 +33,7 @@ # # DESCRIPTION: -# Check userquota and groupquota be overwrited at same time +# Check userquota and groupquota being exceeded at the same time # # # STRATEGY: diff --git a/tests/zfs-tests/tests/functional/userquota/userquota_012_neg.ksh b/tests/zfs-tests/tests/functional/userquota/userquota_012_neg.ksh index 088499eb0426..b553f91d40da 100755 --- a/tests/zfs-tests/tests/functional/userquota/userquota_012_neg.ksh +++ b/tests/zfs-tests/tests/functional/userquota/userquota_012_neg.ksh @@ -56,7 +56,7 @@ log_onexit cleanup typeset snap_fs=$QFS@snap log_assert "Check set userquota and groupquota on snapshot" -log_note "Check can not set user|group quuota on snapshot" +log_note "Check can not set user|group quota on snapshot" log_must zfs snapshot $snap_fs log_mustnot zfs set userquota@$QUSER1=$UQUOTA_SIZE $snap_fs diff --git a/tests/zfs-tests/tests/functional/xattr/xattr_003_neg.ksh b/tests/zfs-tests/tests/functional/xattr/xattr_003_neg.ksh index a56fce4eaba1..0a661e935b78 100755 --- a/tests/zfs-tests/tests/functional/xattr/xattr_003_neg.ksh +++ b/tests/zfs-tests/tests/functional/xattr/xattr_003_neg.ksh @@ -37,8 +37,8 @@ # should fail. # # STRATEGY: -# 1. Create a file, and set an with an xattr -# 2. Set the octal file permissions to 000 on the file. +# 1. Create a file with an xattr +# 2. Set the file permissions to 000 # 3. Check that we're unable to read the xattr as a non-root user # 4. Check that we're unable to write an xattr as a non-root user # diff --git a/tests/zfs-tests/tests/functional/xattr/xattr_011_pos.ksh b/tests/zfs-tests/tests/functional/xattr/xattr_011_pos.ksh index 80704fad75e1..246f077af0a8 100755 --- a/tests/zfs-tests/tests/functional/xattr/xattr_011_pos.ksh +++ b/tests/zfs-tests/tests/functional/xattr/xattr_011_pos.ksh @@ -135,7 +135,7 @@ else fi log_note "Checking mv" -# mv doesn't have any flags to preserve/ommit xattrs - they're +# mv doesn't have any flags to preserve/omit xattrs - they're # always moved. log_must touch $TESTDIR/mvfile.$$ create_xattr $TESTDIR/mvfile.$$ passwd /etc/passwd diff --git a/tests/zfs-tests/tests/functional/zvol/zvol_swap/zvol_swap_003_pos.ksh b/tests/zfs-tests/tests/functional/zvol/zvol_swap/zvol_swap_003_pos.ksh index 256ca53241bf..9ccf3f9ded55 100755 --- a/tests/zfs-tests/tests/functional/zvol/zvol_swap/zvol_swap_003_pos.ksh +++ b/tests/zfs-tests/tests/functional/zvol/zvol_swap/zvol_swap_003_pos.ksh @@ -40,7 +40,7 @@ # # STRATEGY: # 1. Modify /etc/vfstab to add the test zvol as swap device. -# 2. Use /sbin/swapadd to add zvol as swap device throuth /etc/vfstab +# 2. Use /sbin/swapadd to add zvol as swap device through /etc/vfstab # 3. Create a file under /tmp and verify the file # From 37e42197a4d2bec5d4307f61024c9678d1f3ffe0 Mon Sep 17 00:00:00 2001 From: Andrea Gelmini Date: Tue, 3 Sep 2019 03:07:35 +0200 Subject: [PATCH 56/68] Fix typos in tests/ Reviewed-by: Ryan Moeller Reviewed-by: Richard Laager Reviewed-by: Brian Behlendorf Signed-off-by: Andrea Gelmini Closes #9243 --- tests/zfs-tests/tests/functional/rsend/rsend_008_pos.ksh | 2 +- tests/zfs-tests/tests/functional/rsend/rsend_011_pos.ksh | 2 +- tests/zfs-tests/tests/functional/rsend/rsend_012_pos.ksh | 2 +- tests/zfs-tests/tests/functional/rsend/rsend_022_pos.ksh | 2 +- tests/zfs-tests/tests/functional/rsend/rsend_024_pos.ksh | 2 +- tests/zfs-tests/tests/functional/rsend/send-cD.ksh | 2 +- tests/zfs-tests/tests/functional/rsend/send-c_resume.ksh | 2 +- .../tests/functional/rsend/send-c_stream_size_estimate.ksh | 2 +- .../tests/functional/rsend/send_realloc_encrypted_files.ksh | 2 +- tests/zfs-tests/tests/functional/rsend/send_realloc_files.ksh | 2 +- 10 files changed, 10 insertions(+), 10 deletions(-) diff --git a/tests/zfs-tests/tests/functional/rsend/rsend_008_pos.ksh b/tests/zfs-tests/tests/functional/rsend/rsend_008_pos.ksh index 5e657a898f4c..531478760457 100755 --- a/tests/zfs-tests/tests/functional/rsend/rsend_008_pos.ksh +++ b/tests/zfs-tests/tests/functional/rsend/rsend_008_pos.ksh @@ -38,7 +38,7 @@ # STRATEGY: # 1. Separately promote pool clone, filesystem clone and volume clone. # 2. Recursively backup all the POOL and restore in POOL2 -# 3. Verify all the datesets and property be properly received. +# 3. Verify all the datasets and properties were properly received. # verify_runnable "both" diff --git a/tests/zfs-tests/tests/functional/rsend/rsend_011_pos.ksh b/tests/zfs-tests/tests/functional/rsend/rsend_011_pos.ksh index 9ecd18d87da6..68f0e13927dc 100755 --- a/tests/zfs-tests/tests/functional/rsend/rsend_011_pos.ksh +++ b/tests/zfs-tests/tests/functional/rsend/rsend_011_pos.ksh @@ -63,7 +63,7 @@ for prop in $(fs_inherit_prop); do done # -# Inherit propertes in sub-datasets +# Inherit properties in sub-datasets # for ds in "$POOL/$FS/fs1" "$POOL/$FS/fs1/fs2" "$POOL/$FS/fs1/fclone" ; do for prop in $(fs_inherit_prop) ; do diff --git a/tests/zfs-tests/tests/functional/rsend/rsend_012_pos.ksh b/tests/zfs-tests/tests/functional/rsend/rsend_012_pos.ksh index 57d58b9bab77..d85970a74217 100755 --- a/tests/zfs-tests/tests/functional/rsend/rsend_012_pos.ksh +++ b/tests/zfs-tests/tests/functional/rsend/rsend_012_pos.ksh @@ -39,7 +39,7 @@ # 1. Setting properties for all the filesystem and volumes randomly # 2. Backup all the data from POOL by send -R # 3. Restore all the data in POOL2 -# 4. Verify all the perperties in two pools are same +# 4. Verify all the properties in the two pools are the same # verify_runnable "global" diff --git a/tests/zfs-tests/tests/functional/rsend/rsend_022_pos.ksh b/tests/zfs-tests/tests/functional/rsend/rsend_022_pos.ksh index 60be67328e1c..cb68b1c3b27d 100755 --- a/tests/zfs-tests/tests/functional/rsend/rsend_022_pos.ksh +++ b/tests/zfs-tests/tests/functional/rsend/rsend_022_pos.ksh @@ -25,7 +25,7 @@ # # Strategy: # 1. Bookmark a ZFS snapshot -# 2. Destroy the ZFS sanpshot +# 2. Destroy the ZFS snapshot # 3. Destroy the filesystem for the receive # 4. Verify receive of the full send stream # 5. Start an incremental ZFS send of the ZFS bookmark, redirect output to a diff --git a/tests/zfs-tests/tests/functional/rsend/rsend_024_pos.ksh b/tests/zfs-tests/tests/functional/rsend/rsend_024_pos.ksh index 20f0bee15572..2d9fb01af10f 100755 --- a/tests/zfs-tests/tests/functional/rsend/rsend_024_pos.ksh +++ b/tests/zfs-tests/tests/functional/rsend/rsend_024_pos.ksh @@ -25,7 +25,7 @@ # # Strategy: # 1. Destroy the filesystem for the receive -# 2. Unmount the source filsesystem +# 2. Unmount the source filesystem # 3. Start a full ZFS send, redirect output to a file # 4. Mess up the contents of the stream state file on disk # 5. Try ZFS receive, which should fail with a checksum mismatch error diff --git a/tests/zfs-tests/tests/functional/rsend/send-cD.ksh b/tests/zfs-tests/tests/functional/rsend/send-cD.ksh index ceface9dbc09..d0754a4f1aaa 100755 --- a/tests/zfs-tests/tests/functional/rsend/send-cD.ksh +++ b/tests/zfs-tests/tests/functional/rsend/send-cD.ksh @@ -45,7 +45,7 @@ typeset inc=$BACKDIR/stream.inc log_must zfs create -o compress=lz4 $sendfs log_must zfs create -o compress=lz4 $recvfs typeset dir=$(get_prop mountpoint $sendfs) -# Don't use write_compressible: we want compressible but undedupable data here. +# Don't use write_compressible: we want compressible but undeduplicable data. log_must eval "dd if=/dev/urandom bs=1024k count=4 | base64 >$dir/file" log_must zfs snapshot $sendfs@snap0 log_must eval "zfs send -D -c $sendfs@snap0 >$stream0" diff --git a/tests/zfs-tests/tests/functional/rsend/send-c_resume.ksh b/tests/zfs-tests/tests/functional/rsend/send-c_resume.ksh index d8d7c40e4931..05ba5ed244d9 100755 --- a/tests/zfs-tests/tests/functional/rsend/send-c_resume.ksh +++ b/tests/zfs-tests/tests/functional/rsend/send-c_resume.ksh @@ -28,7 +28,7 @@ # 2. Mess up the contents of the stream state file on disk # 3. Try ZFS receive, which should fail with a checksum mismatch error # 4. ZFS send to the stream state file again using the receive_resume_token -# 5. ZFS receieve and verify the receive completes successfully +# 5. ZFS receive and verify the receive completes successfully # 6. Repeat steps on an incremental ZFS send # diff --git a/tests/zfs-tests/tests/functional/rsend/send-c_stream_size_estimate.ksh b/tests/zfs-tests/tests/functional/rsend/send-c_stream_size_estimate.ksh index 130bc3dbc9c3..f11068192880 100755 --- a/tests/zfs-tests/tests/functional/rsend/send-c_stream_size_estimate.ksh +++ b/tests/zfs-tests/tests/functional/rsend/send-c_stream_size_estimate.ksh @@ -89,4 +89,4 @@ for compress in $compress_types; do "$vol_csize and $vol_refer differed by too much" done -log_pass "The the stream size given by -P accounts for compressed send." +log_pass "The stream size given by -P accounts for compressed send." diff --git a/tests/zfs-tests/tests/functional/rsend/send_realloc_encrypted_files.ksh b/tests/zfs-tests/tests/functional/rsend/send_realloc_encrypted_files.ksh index 3c3de86d91c6..83a79784d226 100755 --- a/tests/zfs-tests/tests/functional/rsend/send_realloc_encrypted_files.ksh +++ b/tests/zfs-tests/tests/functional/rsend/send_realloc_encrypted_files.ksh @@ -25,7 +25,7 @@ # Strategy: # 1. Create a pool containing an encrypted filesystem. # 2. Use 'zfs send -wp' to perform a raw send of the initial filesystem. -# 3. Repeat the followings steps N times to verify raw incremental receives. +# 3. Repeat the following steps N times to verify raw incremental receives. # a) Randomly change several key dataset properties. # b) Modify the contents of the filesystem such that dnode reallocation # is likely during the 'zfs receive', and receive_object() exercises diff --git a/tests/zfs-tests/tests/functional/rsend/send_realloc_files.ksh b/tests/zfs-tests/tests/functional/rsend/send_realloc_files.ksh index 4b89a73d8081..27d65439b25b 100755 --- a/tests/zfs-tests/tests/functional/rsend/send_realloc_files.ksh +++ b/tests/zfs-tests/tests/functional/rsend/send_realloc_files.ksh @@ -25,7 +25,7 @@ # Strategy: # 1. Create a pool containing an encrypted filesystem. # 2. Use 'zfs send -wp' to perform a raw send of the initial filesystem. -# 3. Repeat the followings steps N times to verify raw incremental receives. +# 3. Repeat the following steps N times to verify raw incremental receives. # a) Randomly change several key dataset properties. # b) Modify the contents of the filesystem such that dnode reallocation # is likely during the 'zfs receive', and receive_object() exercises From 24739cd5b084367dbafd84883b6fb7687858a922 Mon Sep 17 00:00:00 2001 From: Andrea Gelmini Date: Tue, 3 Sep 2019 03:08:56 +0200 Subject: [PATCH 57/68] Fix typos in tests/ Reviewed-by: Ryan Moeller Reviewed-by: Richard Laager Reviewed-by: Brian Behlendorf Signed-off-by: Andrea Gelmini Closes #9244 --- .../tests/functional/pool_checkpoint/checkpoint_removal.ksh | 2 +- .../tests/functional/pool_checkpoint/pool_checkpoint.kshlib | 2 +- .../tests/functional/projectquota/projectid_001_pos.ksh | 4 ++-- .../tests/functional/projectquota/projectid_002_pos.ksh | 2 +- .../tests/functional/projectquota/projectquota_004_neg.ksh | 2 +- .../tests/functional/projectquota/projectspace_004_pos.ksh | 2 +- .../tests/functional/projectquota/projecttree_002_pos.ksh | 2 +- .../tests/functional/projectquota/projecttree_003_neg.ksh | 4 ++-- tests/zfs-tests/tests/functional/pyzfs/Makefile.am | 2 +- tests/zfs-tests/tests/functional/raidz/raidz_001_neg.ksh | 2 +- 10 files changed, 12 insertions(+), 12 deletions(-) diff --git a/tests/zfs-tests/tests/functional/pool_checkpoint/checkpoint_removal.ksh b/tests/zfs-tests/tests/functional/pool_checkpoint/checkpoint_removal.ksh index ad96d5dcb637..514a05984160 100755 --- a/tests/zfs-tests/tests/functional/pool_checkpoint/checkpoint_removal.ksh +++ b/tests/zfs-tests/tests/functional/pool_checkpoint/checkpoint_removal.ksh @@ -52,7 +52,7 @@ populate_test_pool # # Create big empty file and do some writes at random # offsets to ensure that it takes up space. Note that -# the implcitly created filesystem ($FS0) does not +# the implicitly created filesystem ($FS0) does not # have compression enabled. # log_must mkfile $BIGFILESIZE $FS0FILE diff --git a/tests/zfs-tests/tests/functional/pool_checkpoint/pool_checkpoint.kshlib b/tests/zfs-tests/tests/functional/pool_checkpoint/pool_checkpoint.kshlib index 6e410e0c85f8..ea6c03e9d59d 100644 --- a/tests/zfs-tests/tests/functional/pool_checkpoint/pool_checkpoint.kshlib +++ b/tests/zfs-tests/tests/functional/pool_checkpoint/pool_checkpoint.kshlib @@ -27,7 +27,7 @@ # This is why these tests run directly on pools that use a # "real disk vdev" (meaning not a file based one). These tests # use the $TESTPOOL pool that is created on top of $TESTDISK. -# This pool is refered to as the "test pool" and thus all +# This pool is referred to as the "test pool" and thus all # the tests of this group use the testpool-related functions of # this file (not the nested_pools ones). # diff --git a/tests/zfs-tests/tests/functional/projectquota/projectid_001_pos.ksh b/tests/zfs-tests/tests/functional/projectquota/projectid_001_pos.ksh index 44af9941b929..46e79062a0e2 100755 --- a/tests/zfs-tests/tests/functional/projectquota/projectid_001_pos.ksh +++ b/tests/zfs-tests/tests/functional/projectquota/projectid_001_pos.ksh @@ -38,8 +38,8 @@ # # # STRATEGY: -# 1. Create a regular file and a directroy. -# 2. Set project ID on both directroy and regular file. +# 1. Create a regular file and a directory. +# 2. Set project ID on both directory and regular file. # 3. New created subdir or regular file should inherit its parent's # project ID if its parent has project inherit flag. # 4. New created subdir should inherit its parent project's inherit flag. diff --git a/tests/zfs-tests/tests/functional/projectquota/projectid_002_pos.ksh b/tests/zfs-tests/tests/functional/projectquota/projectid_002_pos.ksh index 1a402e298b99..e382f464046b 100755 --- a/tests/zfs-tests/tests/functional/projectquota/projectid_002_pos.ksh +++ b/tests/zfs-tests/tests/functional/projectquota/projectid_002_pos.ksh @@ -41,7 +41,7 @@ # 1. Create three directories # 2. Set tdir1 and tdir3 project ID as PRJID1, # set tdir2 project ID as PRJID2. -# 3. Create regular file under tdir1. It inherits tdir1 proejct ID. +# 3. Create regular file under tdir1. It inherits tdir1 project ID. # 4. Hardlink from tdir1's child to tdir2 should be denied, # move tdir1's child to tdir2 will be object recreated. # 5. Hardlink from tdir1's child to tdir3 should succeed. diff --git a/tests/zfs-tests/tests/functional/projectquota/projectquota_004_neg.ksh b/tests/zfs-tests/tests/functional/projectquota/projectquota_004_neg.ksh index df0eda7d770a..a975d2a19f0c 100755 --- a/tests/zfs-tests/tests/functional/projectquota/projectquota_004_neg.ksh +++ b/tests/zfs-tests/tests/functional/projectquota/projectquota_004_neg.ksh @@ -62,7 +62,7 @@ for prj in "${no_prjs[@]}"; do log_mustnot zfs set projectquota@$prj=100m $QFS done -log_note "can set all numberic id even that id is not existed" +log_note "can set all numeric id even if that id does not exist" log_must zfs set projectquota@12345678=100m $QFS set -A sizes "100mfsd" "m0.12m" "GGM" "-1234-m" "123m-m" diff --git a/tests/zfs-tests/tests/functional/projectquota/projectspace_004_pos.ksh b/tests/zfs-tests/tests/functional/projectquota/projectspace_004_pos.ksh index 494d7f3b7ac0..ec299e0e7f93 100755 --- a/tests/zfs-tests/tests/functional/projectquota/projectspace_004_pos.ksh +++ b/tests/zfs-tests/tests/functional/projectquota/projectspace_004_pos.ksh @@ -38,7 +38,7 @@ # # STRATEGY: # 1. set project [obj]quota on the directory -# 2. set project ID and inherit flag on the directoty +# 2. set project ID and inherit flag on the directory # 3. run 'df [-i]' on the directory and check the result # diff --git a/tests/zfs-tests/tests/functional/projectquota/projecttree_002_pos.ksh b/tests/zfs-tests/tests/functional/projectquota/projecttree_002_pos.ksh index 4008811a19e1..d61019242703 100755 --- a/tests/zfs-tests/tests/functional/projectquota/projecttree_002_pos.ksh +++ b/tests/zfs-tests/tests/functional/projectquota/projecttree_002_pos.ksh @@ -39,7 +39,7 @@ # # STRATEGY: # 1. Create a tree with 4 level directories. -# 2. Set project ID on both directroy and regular file via +# 2. Set project ID on both directory and regular file via # "zfs project -p". # 3. Check the project ID via "zfs project". # 4. Set project inherit flag on kinds of level directories (and its diff --git a/tests/zfs-tests/tests/functional/projectquota/projecttree_003_neg.ksh b/tests/zfs-tests/tests/functional/projectquota/projecttree_003_neg.ksh index 33382fdbe92d..cbc45857f779 100755 --- a/tests/zfs-tests/tests/functional/projectquota/projecttree_003_neg.ksh +++ b/tests/zfs-tests/tests/functional/projectquota/projecttree_003_neg.ksh @@ -43,8 +43,8 @@ # 2. "-C" only supports "-r" and "-k". # 3. "-s" only supports "-r" and "-p". # 4. "-c", "-C" and "-s" can NOT be specified together. -# 5. "-d" can overwirte former "-r". -# 6. "-r" can overwirte former "-d". +# 5. "-d" can overwrite former "-r". +# 6. "-r" can overwrite former "-d". # 7. "-0" must be together with "-c". # 8. "-d" must be on directory. # 9. "-r" must be on directory. diff --git a/tests/zfs-tests/tests/functional/pyzfs/Makefile.am b/tests/zfs-tests/tests/functional/pyzfs/Makefile.am index c4cd10894c3c..0c68c252b93b 100644 --- a/tests/zfs-tests/tests/functional/pyzfs/Makefile.am +++ b/tests/zfs-tests/tests/functional/pyzfs/Makefile.am @@ -7,7 +7,7 @@ EXTRA_DIST = \ # # The pyzfs module is built either for Python 2 or Python 3. In order -# to properly test it the unit tests must be updated to the matching vesion. +# to properly test it the unit tests must be updated to the matching version. # $(pkgpyzfs_SCRIPTS):%:%.in -$(SED) -e 's,@PYTHON\@,$(PYTHON),g' \ diff --git a/tests/zfs-tests/tests/functional/raidz/raidz_001_neg.ksh b/tests/zfs-tests/tests/functional/raidz/raidz_001_neg.ksh index 4c105b9411c1..0f88a1a51468 100755 --- a/tests/zfs-tests/tests/functional/raidz/raidz_001_neg.ksh +++ b/tests/zfs-tests/tests/functional/raidz/raidz_001_neg.ksh @@ -35,4 +35,4 @@ log_mustnot raidz_test -T -log_pass "raidz_test detects errors as espected." +log_pass "raidz_test detects errors as expected." From 220dd4ae84eaa07d58ccd26f13ca4c4cb9259ee5 Mon Sep 17 00:00:00 2001 From: Andrea Gelmini Date: Tue, 3 Sep 2019 03:10:31 +0200 Subject: [PATCH 58/68] Fix typos in tests/ Reviewed-by: Ryan Moeller Reviewed-by: Richard Laager Reviewed-by: Brian Behlendorf Signed-off-by: Andrea Gelmini Closes #9246 --- .../zfs-tests/tests/functional/delegate/zfs_allow_009_neg.ksh | 2 +- .../zfs-tests/tests/functional/fault/auto_online_001_pos.ksh | 2 +- .../zfs-tests/tests/functional/fault/auto_spare_multiple.ksh | 2 +- tests/zfs-tests/tests/functional/history/history_005_neg.ksh | 4 ++-- tests/zfs-tests/tests/functional/history/history_006_neg.ksh | 2 +- tests/zfs-tests/tests/functional/history/history_007_pos.ksh | 2 +- .../zfs-tests/tests/functional/history/history_common.kshlib | 2 +- tests/zfs-tests/tests/functional/inuse/inuse_001_pos.ksh | 2 +- tests/zfs-tests/tests/functional/inuse/inuse_004_pos.ksh | 4 ++-- tests/zfs-tests/tests/functional/inuse/inuse_008_pos.ksh | 2 +- .../tests/functional/large_files/large_files_001_pos.ksh | 2 +- tests/zfs-tests/tests/functional/mmap/mmap_write_001_pos.ksh | 2 +- tests/zfs-tests/tests/functional/mmp/mmp_on_uberblocks.ksh | 4 ++-- tests/zfs-tests/tests/functional/no_space/enospc_df.ksh | 2 +- tests/zfs-tests/tests/functional/nopwrite/nopwrite_sync.ksh | 2 +- 15 files changed, 18 insertions(+), 18 deletions(-) diff --git a/tests/zfs-tests/tests/functional/delegate/zfs_allow_009_neg.ksh b/tests/zfs-tests/tests/functional/delegate/zfs_allow_009_neg.ksh index c2c911020410..45fdb5b85692 100755 --- a/tests/zfs-tests/tests/functional/delegate/zfs_allow_009_neg.ksh +++ b/tests/zfs-tests/tests/functional/delegate/zfs_allow_009_neg.ksh @@ -36,7 +36,7 @@ # zfs allow can deal with invalid arguments.(Invalid options or combination) # # STRATEGY: -# 1. Verify invalid argumets will cause error. +# 1. Verify invalid arguments will cause error. # 2. Verify non-optional argument was missing will cause error. # 3. Verify invalid options cause error. # diff --git a/tests/zfs-tests/tests/functional/fault/auto_online_001_pos.ksh b/tests/zfs-tests/tests/functional/fault/auto_online_001_pos.ksh index bc925bc91c81..03fc15a8a7cb 100755 --- a/tests/zfs-tests/tests/functional/fault/auto_online_001_pos.ksh +++ b/tests/zfs-tests/tests/functional/fault/auto_online_001_pos.ksh @@ -129,7 +129,7 @@ do typeset -i timeout=0 while true; do if ((timeout == $MAXTIMEOUT)); then - log_fail "Timeout occured" + log_fail "Timeout occurred" fi ((timeout++)) diff --git a/tests/zfs-tests/tests/functional/fault/auto_spare_multiple.ksh b/tests/zfs-tests/tests/functional/fault/auto_spare_multiple.ksh index 8650ceff7d16..25c23aecc308 100755 --- a/tests/zfs-tests/tests/functional/fault/auto_spare_multiple.ksh +++ b/tests/zfs-tests/tests/functional/fault/auto_spare_multiple.ksh @@ -116,7 +116,7 @@ for type in "mirror" "raidz" "raidz2" "raidz3"; do done # Rinse and repeat, this time faulting both devices at the same time -# NOTE: "raidz" is exluded since it cannot survive 2 faulted devices +# NOTE: "raidz" is excluded since it cannot survive 2 faulted devices # NOTE: "mirror" is a 4-way mirror here and should survive this test for type in "mirror" "raidz2" "raidz3"; do # 1. Create a pool with two hot spares diff --git a/tests/zfs-tests/tests/functional/history/history_005_neg.ksh b/tests/zfs-tests/tests/functional/history/history_005_neg.ksh index f6a81a4ac5f2..297a701cc567 100755 --- a/tests/zfs-tests/tests/functional/history/history_005_neg.ksh +++ b/tests/zfs-tests/tests/functional/history/history_005_neg.ksh @@ -42,9 +42,9 @@ # zpool iostat # # STRATEGY: -# 1. Create a test pool. +# 1. Create a test pool # 2. Separately invoke zpool list|status|iostat -# 3. Verify they was not recored in pool history. +# 3. Verify they were not recorded in pool history # verify_runnable "global" diff --git a/tests/zfs-tests/tests/functional/history/history_006_neg.ksh b/tests/zfs-tests/tests/functional/history/history_006_neg.ksh index a2da831c5cce..e97adc4e3ce0 100755 --- a/tests/zfs-tests/tests/functional/history/history_006_neg.ksh +++ b/tests/zfs-tests/tests/functional/history/history_006_neg.ksh @@ -40,7 +40,7 @@ # STRATEGY: # 1. Create a test pool. # 2. Separately invoke zfs list|get|holds|mount|unmount|share|unshare|send -# 3. Verify they were not recored in pool history. +# 3. Verify they were not recorded in pool history. # verify_runnable "global" diff --git a/tests/zfs-tests/tests/functional/history/history_007_pos.ksh b/tests/zfs-tests/tests/functional/history/history_007_pos.ksh index b65e855d8c70..d1c92c5e7c20 100755 --- a/tests/zfs-tests/tests/functional/history/history_007_pos.ksh +++ b/tests/zfs-tests/tests/functional/history/history_007_pos.ksh @@ -83,7 +83,7 @@ for arch in "i386" "sparc"; do TZ=$TIMEZONE zpool history $migratedpoolname | grep -v "^$" \ >$migrated_cmds_f RET=$? - (( $RET != 0 )) && log_fail "zpool histroy $migratedpoolname fails." + (( $RET != 0 )) && log_fail "zpool history $migratedpoolname fails." # The migrated history file should differ with original history file on # two commands -- 'export' and 'import', which are included in migrated diff --git a/tests/zfs-tests/tests/functional/history/history_common.kshlib b/tests/zfs-tests/tests/functional/history/history_common.kshlib index 80af2e903daa..d97e015fcfef 100644 --- a/tests/zfs-tests/tests/functional/history/history_common.kshlib +++ b/tests/zfs-tests/tests/functional/history/history_common.kshlib @@ -224,7 +224,7 @@ function verify_allow # # Here, we determine three things: - # - Whether we're operating on a set or an indivdual permission (which + # - Whether we're operating on a set or an individual permission (which # dictates the case of the first character in the code) # - The name of the dataset we're operating on. # - Whether the operation applies locally or to descendent datasets (or diff --git a/tests/zfs-tests/tests/functional/inuse/inuse_001_pos.ksh b/tests/zfs-tests/tests/functional/inuse/inuse_001_pos.ksh index 63c68e66e4e4..aecdc5a3b078 100755 --- a/tests/zfs-tests/tests/functional/inuse/inuse_001_pos.ksh +++ b/tests/zfs-tests/tests/functional/inuse/inuse_001_pos.ksh @@ -80,7 +80,7 @@ dumpdev=`dumpadm | grep "Dump device" | awk '{print $3}'` [[ -z "$dumpdev" ]] && log_untested "No dump device has been configured" [[ "$dumpdev" != "$diskslice" ]] && \ - log_untested "Dump device has not been been configured to $diskslice" + log_untested "Dump device has not been configured to $diskslice" log_note "Attempt to zpool the dump device" unset NOINUSE_CHECK diff --git a/tests/zfs-tests/tests/functional/inuse/inuse_004_pos.ksh b/tests/zfs-tests/tests/functional/inuse/inuse_004_pos.ksh index 95d505f35bf8..b126f66a0c3e 100755 --- a/tests/zfs-tests/tests/functional/inuse/inuse_004_pos.ksh +++ b/tests/zfs-tests/tests/functional/inuse/inuse_004_pos.ksh @@ -48,8 +48,8 @@ verify_runnable "global" function cleanup { # - # Essentailly this is the default_cleanup routine but I cannot get it - # to work correctly. So its reproduced below. Still need to full + # Essentially this is the default_cleanup routine but I cannot get it + # to work correctly. So its reproduced below. Still need to fully # understand why default_cleanup does not work correctly from here. # log_must zfs umount $TESTPOOL/$TESTFS diff --git a/tests/zfs-tests/tests/functional/inuse/inuse_008_pos.ksh b/tests/zfs-tests/tests/functional/inuse/inuse_008_pos.ksh index ddc8fa7a49c2..1f5510ae5e6e 100755 --- a/tests/zfs-tests/tests/functional/inuse/inuse_008_pos.ksh +++ b/tests/zfs-tests/tests/functional/inuse/inuse_008_pos.ksh @@ -69,7 +69,7 @@ function verify_assertion #slices echo "y" | newfs -v $t > /dev/null 2>&1 (( $? !=0 )) && \ log_fail "newfs over exported pool " \ - "failes unexpected." + "fails unexpectedly." done return 0 diff --git a/tests/zfs-tests/tests/functional/large_files/large_files_001_pos.ksh b/tests/zfs-tests/tests/functional/large_files/large_files_001_pos.ksh index 3be20356ea0e..f59603724e76 100755 --- a/tests/zfs-tests/tests/functional/large_files/large_files_001_pos.ksh +++ b/tests/zfs-tests/tests/functional/large_files/large_files_001_pos.ksh @@ -38,7 +38,7 @@ # STRATEGY: # 1. largest_file will write to a file and increase its size # to the maximum allowable. -# 2. The last byte of the file should be accessbile without error. +# 2. The last byte of the file should be accessible without error. # 3. Writing beyond the maximum file size generates an 'errno' of # EFBIG. # diff --git a/tests/zfs-tests/tests/functional/mmap/mmap_write_001_pos.ksh b/tests/zfs-tests/tests/functional/mmap/mmap_write_001_pos.ksh index 24150b827f8f..2f4257993d4a 100755 --- a/tests/zfs-tests/tests/functional/mmap/mmap_write_001_pos.ksh +++ b/tests/zfs-tests/tests/functional/mmap/mmap_write_001_pos.ksh @@ -33,7 +33,7 @@ # # DESCRIPTION: -# Writing to a file and mmaping that file at the +# Writing to a file and mmapping that file at the # same time does not result in a deadlock. # # STRATEGY: diff --git a/tests/zfs-tests/tests/functional/mmp/mmp_on_uberblocks.ksh b/tests/zfs-tests/tests/functional/mmp/mmp_on_uberblocks.ksh index bf1eb54a7389..9c4552b0cfb0 100755 --- a/tests/zfs-tests/tests/functional/mmp/mmp_on_uberblocks.ksh +++ b/tests/zfs-tests/tests/functional/mmp/mmp_on_uberblocks.ksh @@ -66,11 +66,11 @@ UBER_CHANGES=$(count_mmp_writes $TESTPOOL 10) log_note "Uberblock changed $UBER_CHANGES times" if [ $UBER_CHANGES -lt $MIN_UB_WRITES ]; then - log_fail "Fewer uberblock writes occured than expected ($EXPECTED)" + log_fail "Fewer uberblock writes occurred than expected ($EXPECTED)" fi if [ $UBER_CHANGES -gt $MAX_UB_WRITES ]; then - log_fail "More uberblock writes occured than expected ($EXPECTED)" + log_fail "More uberblock writes occurred than expected ($EXPECTED)" fi log_must set_tunable64 zfs_multihost_interval $MMP_INTERVAL_MIN diff --git a/tests/zfs-tests/tests/functional/no_space/enospc_df.ksh b/tests/zfs-tests/tests/functional/no_space/enospc_df.ksh index b3df69141fe7..b1eeaf2cc569 100755 --- a/tests/zfs-tests/tests/functional/no_space/enospc_df.ksh +++ b/tests/zfs-tests/tests/functional/no_space/enospc_df.ksh @@ -58,7 +58,7 @@ log_must zfs umount $TESTPOOL/$TESTFS # Ensure the pool root filesystem shows in df output. # If the pool was full (available == 0) and the pool -# root filesytem had very little in it (used < 1 block), +# root filesystem had very little in it (used < 1 block), # the size reported to df was zero (issue #8253) and # df skipped the filesystem in its output. log_must eval "df -h | grep $TESTPOOL" diff --git a/tests/zfs-tests/tests/functional/nopwrite/nopwrite_sync.ksh b/tests/zfs-tests/tests/functional/nopwrite/nopwrite_sync.ksh index c9d7b59b344b..bd38883d7578 100755 --- a/tests/zfs-tests/tests/functional/nopwrite/nopwrite_sync.ksh +++ b/tests/zfs-tests/tests/functional/nopwrite/nopwrite_sync.ksh @@ -24,7 +24,7 @@ # # Strategy: # 1. Create an origin fs with compression and sha256. -# 2. Clone origin such that it inherits the properies. +# 2. Clone origin such that it inherits the properties. # 3. Use dd with the sync flag to test the sync write path. # From 4001f09055a6daa3030d7f7916b4319949bc9334 Mon Sep 17 00:00:00 2001 From: Andrea Gelmini Date: Tue, 3 Sep 2019 03:12:01 +0200 Subject: [PATCH 59/68] Fix typos in tests/ Reviewed-by: Ryan Moeller Reviewed-by: Richard Laager Reviewed-by: Brian Behlendorf Signed-off-by: Andrea Gelmini Closes #9247 --- .../tests/functional/cli_root/zpool_add/zpool_add.kshlib | 2 +- .../functional/cli_root/zpool_create/zpool_create.shlib | 2 +- .../cli_root/zpool_create/zpool_create_005_pos.ksh | 6 +++--- .../cli_root/zpool_create/zpool_create_016_pos.ksh | 2 +- .../cli_root/zpool_events/zpool_events_clear.ksh | 2 +- .../cli_root/zpool_history/zpool_history_001_neg.ksh | 2 +- .../zpool_import/import_rewind_config_changed.ksh | 2 +- .../zpool_import/zpool_import_missing_002_pos.ksh | 2 +- .../cli_root/zpool_labelclear/zpool_labelclear_active.ksh | 4 ++-- .../zpool_labelclear/zpool_labelclear_exported.ksh | 8 ++++---- .../cli_root/zpool_upgrade/zpool_upgrade_007_pos.ksh | 2 +- tests/zfs-tests/tests/functional/cli_user/misc/setup.ksh | 2 +- .../functional/cli_user/misc/zpool_online_001_neg.ksh | 2 +- .../cli_user/zpool_iostat/zpool_iostat_005_pos.ksh | 2 +- .../cli_user/zpool_status/zpool_status_003_pos.ksh | 2 +- 15 files changed, 21 insertions(+), 21 deletions(-) diff --git a/tests/zfs-tests/tests/functional/cli_root/zpool_add/zpool_add.kshlib b/tests/zfs-tests/tests/functional/cli_root/zpool_add/zpool_add.kshlib index f80a2a864e43..94615ee3a0b5 100644 --- a/tests/zfs-tests/tests/functional/cli_root/zpool_add/zpool_add.kshlib +++ b/tests/zfs-tests/tests/functional/cli_root/zpool_add/zpool_add.kshlib @@ -90,7 +90,7 @@ function find_mnttab_dev } # -# Save the systme current dump device configuration +# Save the system current dump device configuration # function save_dump_dev { diff --git a/tests/zfs-tests/tests/functional/cli_root/zpool_create/zpool_create.shlib b/tests/zfs-tests/tests/functional/cli_root/zpool_create/zpool_create.shlib index 9e6874832066..3f3f4472990d 100644 --- a/tests/zfs-tests/tests/functional/cli_root/zpool_create/zpool_create.shlib +++ b/tests/zfs-tests/tests/functional/cli_root/zpool_create/zpool_create.shlib @@ -146,7 +146,7 @@ function find_vfstab_dev } # -# Save the systme current dump device configuration +# Save the system current dump device configuration # function save_dump_dev { diff --git a/tests/zfs-tests/tests/functional/cli_root/zpool_create/zpool_create_005_pos.ksh b/tests/zfs-tests/tests/functional/cli_root/zpool_create/zpool_create_005_pos.ksh index 2afbec37dca9..de5e9d8e79c3 100755 --- a/tests/zfs-tests/tests/functional/cli_root/zpool_create/zpool_create_005_pos.ksh +++ b/tests/zfs-tests/tests/functional/cli_root/zpool_create/zpool_create_005_pos.ksh @@ -95,7 +95,7 @@ do log_must zpool create $opt $TESTPOOL ${pooltype[i]} \ $file.1 $file.2 $file.3 ! poolexists $TESTPOOL && \ - log_fail "Createing pool with $opt fails." + log_fail "Creating pool with $opt fails." mpt=`zfs mount | egrep "^$TESTPOOL[^/]" | awk '{print $2}'` (( ${#mpt} == 0 )) && \ log_fail "$TESTPOOL created with $opt is not mounted." @@ -105,12 +105,12 @@ do from the output of zfs mount" if [[ "$opt" == "-m $TESTDIR1" ]]; then [[ ! -d $TESTDIR1 ]] && \ - log_fail "$TESTDIR1 is not created auotmatically." + log_fail "$TESTDIR1 is not created automatically." [[ "$mpt" != "$TESTDIR1" ]] && \ log_fail "$TESTPOOL is not mounted on $TESTDIR1." elif [[ "$opt" == "-R $TESTDIR1" ]]; then [[ ! -d $TESTDIR1/$TESTPOOL ]] && \ - log_fail "$TESTDIR1/$TESTPOOL is not created auotmatically." + log_fail "$TESTDIR1/$TESTPOOL is not created automatically." [[ "$mpt" != "$TESTDIR1/$TESTPOOL" ]] && \ log_fail "$TESTPOOL is not mounted on $TESTDIR1/$TESTPOOL." else diff --git a/tests/zfs-tests/tests/functional/cli_root/zpool_create/zpool_create_016_pos.ksh b/tests/zfs-tests/tests/functional/cli_root/zpool_create/zpool_create_016_pos.ksh index 3fca607b1f46..cbb5806d9af6 100755 --- a/tests/zfs-tests/tests/functional/cli_root/zpool_create/zpool_create_016_pos.ksh +++ b/tests/zfs-tests/tests/functional/cli_root/zpool_create/zpool_create_016_pos.ksh @@ -41,7 +41,7 @@ # STRATEGY: # 1. delete all devices in the swap # 2. create a zpool -# 3. Verify the creation is successed. +# 3. Verify the creation was successful # verify_runnable "global" diff --git a/tests/zfs-tests/tests/functional/cli_root/zpool_events/zpool_events_clear.ksh b/tests/zfs-tests/tests/functional/cli_root/zpool_events/zpool_events_clear.ksh index ab862354b810..67038a4743d8 100755 --- a/tests/zfs-tests/tests/functional/cli_root/zpool_events/zpool_events_clear.ksh +++ b/tests/zfs-tests/tests/functional/cli_root/zpool_events/zpool_events_clear.ksh @@ -34,7 +34,7 @@ log_assert "'zpool events -c' should successfully clear events." # 1. Clear all ZFS events # This is needed because we may already over the max number or events queued # (zfs_zevent_len_max) generated by previous tests: generating $EVENTS_NUM new -# events and then counting them is racy and leads to failues, so start from 0. +# events and then counting them is racy and leads to failures, so start from 0. log_must zpool events -c # 2. Generate some new ZFS events diff --git a/tests/zfs-tests/tests/functional/cli_root/zpool_history/zpool_history_001_neg.ksh b/tests/zfs-tests/tests/functional/cli_root/zpool_history/zpool_history_001_neg.ksh index dd1be14a066b..a2b73182bf4c 100755 --- a/tests/zfs-tests/tests/functional/cli_root/zpool_history/zpool_history_001_neg.ksh +++ b/tests/zfs-tests/tests/functional/cli_root/zpool_history/zpool_history_001_neg.ksh @@ -38,7 +38,7 @@ # # STRATEGY: # 1. Create pool, volume & snap -# 2. Verify 'zpool history' can cope with incorret arguments. +# 2. Verify 'zpool history' can cope with incorrect arguments. # verify_runnable "global" diff --git a/tests/zfs-tests/tests/functional/cli_root/zpool_import/import_rewind_config_changed.ksh b/tests/zfs-tests/tests/functional/cli_root/zpool_import/import_rewind_config_changed.ksh index e8f3937609d1..f42ba10d65c4 100755 --- a/tests/zfs-tests/tests/functional/cli_root/zpool_import/import_rewind_config_changed.ksh +++ b/tests/zfs-tests/tests/functional/cli_root/zpool_import/import_rewind_config_changed.ksh @@ -115,7 +115,7 @@ function test_common # further than the time that we took the checkpoint. # # Note that, ideally we would want to take a checkpoint - # right after we recond the txg we plan to rewind to. + # right after we record the txg we plan to rewind to. # But since we can't attach, detach or remove devices # while having a checkpoint, we take it after the # operation that changes the config. diff --git a/tests/zfs-tests/tests/functional/cli_root/zpool_import/zpool_import_missing_002_pos.ksh b/tests/zfs-tests/tests/functional/cli_root/zpool_import/zpool_import_missing_002_pos.ksh index 7534ebca87fe..c6d2637074fe 100755 --- a/tests/zfs-tests/tests/functional/cli_root/zpool_import/zpool_import_missing_002_pos.ksh +++ b/tests/zfs-tests/tests/functional/cli_root/zpool_import/zpool_import_missing_002_pos.ksh @@ -54,7 +54,7 @@ # 3. Export the test pool. # 4. Move one or more device files to other directory # 5. Verify 'zpool import -d' with the new directory -# will handle moved files successfullly. +# will handle moved files successfully. # Using the various combinations. # - Regular import # - Alternate Root Specified diff --git a/tests/zfs-tests/tests/functional/cli_root/zpool_labelclear/zpool_labelclear_active.ksh b/tests/zfs-tests/tests/functional/cli_root/zpool_labelclear/zpool_labelclear_active.ksh index dcca2e9335d6..b63d55d7ad64 100755 --- a/tests/zfs-tests/tests/functional/cli_root/zpool_labelclear/zpool_labelclear_active.ksh +++ b/tests/zfs-tests/tests/functional/cli_root/zpool_labelclear/zpool_labelclear_active.ksh @@ -24,8 +24,8 @@ # STRATEGY: # 1. Create the pool with log device. # 2. Try clearing the label on data and log devices. -# 3. Add auxilary (cache/spare) vdevs. -# 4. Try clearing the label on auxilary vdevs. +# 3. Add auxiliary (cache/spare) vdevs. +# 4. Try clearing the label on auxiliary vdevs. # 5. Check that zpool labelclear will return non-zero and # labels are intact. diff --git a/tests/zfs-tests/tests/functional/cli_root/zpool_labelclear/zpool_labelclear_exported.ksh b/tests/zfs-tests/tests/functional/cli_root/zpool_labelclear/zpool_labelclear_exported.ksh index a5131bdbb78b..72a555bebe07 100755 --- a/tests/zfs-tests/tests/functional/cli_root/zpool_labelclear/zpool_labelclear_exported.ksh +++ b/tests/zfs-tests/tests/functional/cli_root/zpool_labelclear/zpool_labelclear_exported.ksh @@ -26,8 +26,8 @@ # 2. Export the pool. # 3. Check that zpool labelclear returns non-zero when trying to # clear the label on ACTIVE vdevs, and succeeds with -f. -# 4. Add auxilary vdevs (cache/spare). -# 5. Check that zpool labelclear succeeds on auxilary vdevs of +# 4. Add auxiliary vdevs (cache/spare). +# 5. Check that zpool labelclear succeeds on auxiliary vdevs of # exported pool. verify_runnable "global" @@ -44,7 +44,7 @@ log_assert "zpool labelclear will fail on ACTIVE vdevs of exported pool and" \ for vdevtype in "" "cache" "spare"; do # Create simple pool, skip any mounts log_must zpool create -O mountpoint=none -f $TESTPOOL $disk1 log $disk2 - # Add auxilary vdevs (cache/spare) + # Add auxiliary vdevs (cache/spare) if [[ -n $vdevtype ]]; then log_must zpool add $TESTPOOL $vdevtype $disk3 fi @@ -63,7 +63,7 @@ for vdevtype in "" "cache" "spare"; do log_must zpool labelclear -f $disk2 log_mustnot zdb -lq $disk2 - # Check that labelclear on auxilary vdevs will succeed + # Check that labelclear on auxiliary vdevs will succeed if [[ -n $vdevtype ]]; then log_must zpool labelclear $disk3 log_mustnot zdb -lq $disk3 diff --git a/tests/zfs-tests/tests/functional/cli_root/zpool_upgrade/zpool_upgrade_007_pos.ksh b/tests/zfs-tests/tests/functional/cli_root/zpool_upgrade/zpool_upgrade_007_pos.ksh index adc1ba47fcc1..696c8c66cc1c 100755 --- a/tests/zfs-tests/tests/functional/cli_root/zpool_upgrade/zpool_upgrade_007_pos.ksh +++ b/tests/zfs-tests/tests/functional/cli_root/zpool_upgrade/zpool_upgrade_007_pos.ksh @@ -42,7 +42,7 @@ # # STRATEGY: # 1. Import pools of all versions -# 2. Setup a test enviorment over the old pools. +# 2. Setup a test environment over the old pools. # 3. Verify the commands related to 'zfs upgrade' succeed as expected. # diff --git a/tests/zfs-tests/tests/functional/cli_user/misc/setup.ksh b/tests/zfs-tests/tests/functional/cli_user/misc/setup.ksh index bcf6a2296d57..fc0ebde10025 100755 --- a/tests/zfs-tests/tests/functional/cli_user/misc/setup.ksh +++ b/tests/zfs-tests/tests/functional/cli_user/misc/setup.ksh @@ -34,7 +34,7 @@ # This setup script is moderately complex, as it creates scenarios for all # of the tests included in this directory. Usually we'd want each test case -# to setup/teardown it's own configuration, but this would be time consuming +# to setup/teardown its own configuration, but this would be time consuming # given the nature of these tests. However, as a side-effect, one test # leaving the system in an unknown state could impact other test cases. diff --git a/tests/zfs-tests/tests/functional/cli_user/misc/zpool_online_001_neg.ksh b/tests/zfs-tests/tests/functional/cli_user/misc/zpool_online_001_neg.ksh index b89cf07ac183..cd290515357f 100755 --- a/tests/zfs-tests/tests/functional/cli_user/misc/zpool_online_001_neg.ksh +++ b/tests/zfs-tests/tests/functional/cli_user/misc/zpool_online_001_neg.ksh @@ -49,7 +49,7 @@ function check_for_online | grep ONLINE ) if [ -n "$RESULT" ] then - log_fail "A disk was brough online!" + log_fail "A disk was brought online!" fi } diff --git a/tests/zfs-tests/tests/functional/cli_user/zpool_iostat/zpool_iostat_005_pos.ksh b/tests/zfs-tests/tests/functional/cli_user/zpool_iostat/zpool_iostat_005_pos.ksh index 1ae91c1a8434..53652ec11b5a 100755 --- a/tests/zfs-tests/tests/functional/cli_user/zpool_iostat/zpool_iostat_005_pos.ksh +++ b/tests/zfs-tests/tests/functional/cli_user/zpool_iostat/zpool_iostat_005_pos.ksh @@ -68,7 +68,7 @@ for i in $files ; do test_zpool_script "$i" "$testpool" "zpool iostat -Pv -c" done -# Test that we can run multiple scripts separated with a commma by running +# Test that we can run multiple scripts separated with a comma by running # all the scripts in a single -c line. allscripts="$(echo $scripts | sed -r 's/[[:blank:]]+/,/g')" test_zpool_script "$allscripts" "$testpool" "zpool iostat -Pv -c" diff --git a/tests/zfs-tests/tests/functional/cli_user/zpool_status/zpool_status_003_pos.ksh b/tests/zfs-tests/tests/functional/cli_user/zpool_status/zpool_status_003_pos.ksh index c5e0c6e474a5..fa7d3f3f2d56 100755 --- a/tests/zfs-tests/tests/functional/cli_user/zpool_status/zpool_status_003_pos.ksh +++ b/tests/zfs-tests/tests/functional/cli_user/zpool_status/zpool_status_003_pos.ksh @@ -68,7 +68,7 @@ for i in $files ; do test_zpool_script "$i" "$testpool" "zpool status -P -c" done -# Test that we can run multiple scripts separated with a commma by running +# Test that we can run multiple scripts separated with a comma by running # all the scripts in a single -c line. allscripts="$(echo $scripts | sed -r 's/[[:blank:]]+/,/g')" test_zpool_script "$allscripts" "$testpool" "zpool status -P -c" From cb14aa4ca9a2ba84beeb2165486e65c441272c04 Mon Sep 17 00:00:00 2001 From: Andrea Gelmini Date: Tue, 3 Sep 2019 03:13:19 +0200 Subject: [PATCH 60/68] Fix typos in tests/ Reviewed-by: Ryan Moeller Reviewed-by: Richard Laager Reviewed-by: Brian Behlendorf Signed-off-by: Andrea Gelmini Closes #9249 --- .../functional/cli_root/zfs_destroy/zfs_destroy_016_pos.ksh | 2 +- .../cli_root/zfs_destroy/zfs_destroy_clone_livelist.ksh | 2 +- .../cli_root/zfs_destroy/zfs_destroy_common.kshlib | 6 +++--- .../zfs_destroy/zfs_destroy_dev_removal_condense.ksh | 2 +- .../tests/functional/cli_root/zfs_get/zfs_get_004_pos.ksh | 2 +- .../functional/cli_root/zfs_inherit/zfs_inherit_002_neg.ksh | 4 ++-- .../functional/cli_root/zfs_inherit/zfs_inherit_003_pos.ksh | 4 ++-- .../functional/cli_root/zfs_mount/zfs_mount_007_pos.ksh | 2 +- .../functional/cli_root/zfs_mount/zfs_mount_008_pos.ksh | 2 +- .../functional/cli_root/zfs_mount/zfs_mount_010_neg.ksh | 2 +- .../functional/cli_root/zfs_mount/zfs_mount_remount.ksh | 2 +- .../cli_root/zfs_property/zfs_written_property_001_pos.ksh | 6 +++--- .../functional/cli_root/zfs_receive/zfs_receive_001_pos.ksh | 2 +- .../functional/cli_root/zfs_receive/zfs_receive_004_neg.ksh | 2 +- .../cli_root/zfs_receive/zfs_receive_from_encrypted.ksh | 4 ++-- 15 files changed, 22 insertions(+), 22 deletions(-) diff --git a/tests/zfs-tests/tests/functional/cli_root/zfs_destroy/zfs_destroy_016_pos.ksh b/tests/zfs-tests/tests/functional/cli_root/zfs_destroy/zfs_destroy_016_pos.ksh index 83cd0a27c300..1e129ddd3bc9 100755 --- a/tests/zfs-tests/tests/functional/cli_root/zfs_destroy/zfs_destroy_016_pos.ksh +++ b/tests/zfs-tests/tests/functional/cli_root/zfs_destroy/zfs_destroy_016_pos.ksh @@ -157,7 +157,7 @@ verify_snapshots 1 snaps="1 2 3 4 5" setup_snapshots -log_note "Snapshot destory with hold" +log_note "Snapshot destroy with hold" range="1 2 3 4 5" for i in 1 2 3 4 5; do log_must zfs hold keep $TESTPOOL/$TESTFS1@snap$i diff --git a/tests/zfs-tests/tests/functional/cli_root/zfs_destroy/zfs_destroy_clone_livelist.ksh b/tests/zfs-tests/tests/functional/cli_root/zfs_destroy/zfs_destroy_clone_livelist.ksh index 6669426b0c7b..c427e4bc4470 100755 --- a/tests/zfs-tests/tests/functional/cli_root/zfs_destroy/zfs_destroy_clone_livelist.ksh +++ b/tests/zfs-tests/tests/functional/cli_root/zfs_destroy/zfs_destroy_clone_livelist.ksh @@ -30,7 +30,7 @@ # destroyed # 3. Multiple clones with empty livelists # - same as 1. but with multiple clones -# 4. Multuple clones with populated livelists +# 4. Multiple clones with populated livelists # - same as 2. but with multiple clones . $STF_SUITE/include/libtest.shlib diff --git a/tests/zfs-tests/tests/functional/cli_root/zfs_destroy/zfs_destroy_common.kshlib b/tests/zfs-tests/tests/functional/cli_root/zfs_destroy/zfs_destroy_common.kshlib index 895efaf988a3..504e3a580ab3 100644 --- a/tests/zfs-tests/tests/functional/cli_root/zfs_destroy/zfs_destroy_common.kshlib +++ b/tests/zfs-tests/tests/functional/cli_root/zfs_destroy/zfs_destroy_common.kshlib @@ -56,7 +56,7 @@ function setup_testenv #[dtst] if ! datasetexists $FS; then log_must zfs create $FS fi - # Volume test is only availible on globle zone + # Volume test is only available on global zone if ! datasetexists $VOL && is_global_zone; then log_must zfs create -V $VOLSIZE $VOL block_device_wait @@ -127,7 +127,7 @@ function check_dataset shift for dtst in "$@"; do - # Volume and related stuff are unvailable in local zone + # Volume and related stuff are unavailable in local zone if ! is_global_zone; then if [[ $dtst == $VOL || $dtst == $VOLSNAP || \ $dtst == $VOLCLONE ]] @@ -140,7 +140,7 @@ function check_dataset if (( ${#newlist} != 0 )); then # Run each item in $newlist individually so on failure, the - # probelmatic dataset is listed in the logs. + # problematic dataset is listed in the logs. for i in $newlist; do log_must $funname $i done diff --git a/tests/zfs-tests/tests/functional/cli_root/zfs_destroy/zfs_destroy_dev_removal_condense.ksh b/tests/zfs-tests/tests/functional/cli_root/zfs_destroy/zfs_destroy_dev_removal_condense.ksh index 30c4b2ddac0d..b6442de40e9b 100755 --- a/tests/zfs-tests/tests/functional/cli_root/zfs_destroy/zfs_destroy_dev_removal_condense.ksh +++ b/tests/zfs-tests/tests/functional/cli_root/zfs_destroy/zfs_destroy_dev_removal_condense.ksh @@ -61,7 +61,7 @@ log_must zfs snapshot $TESTPOOL2/$TESTFS@snap log_must zfs clone $TESTPOOL2/$TESTFS@snap $TESTPOOL2/$TESTCLONE -# Create inital files and pause condense zthr on next execution +# Create initial files and pause condense zthr on next execution log_must mkfile 10m /$TESTPOOL2/$TESTCLONE/A log_must mkfile 1m /$TESTPOOL2/$TESTCLONE/B log_must zpool sync $TESTPOOL2 diff --git a/tests/zfs-tests/tests/functional/cli_root/zfs_get/zfs_get_004_pos.ksh b/tests/zfs-tests/tests/functional/cli_root/zfs_get/zfs_get_004_pos.ksh index b038e7484ab2..4bd61137c7be 100755 --- a/tests/zfs-tests/tests/functional/cli_root/zfs_get/zfs_get_004_pos.ksh +++ b/tests/zfs-tests/tests/functional/cli_root/zfs_get/zfs_get_004_pos.ksh @@ -114,7 +114,7 @@ availspace=$(get_prop available $TESTPOOL) typeset -i i=0 # make sure 'availspace' is larger then twice of FILESIZE to create a new pool. -# If any, we only totally create 3 pools for multple datasets testing to limit +# If any, we only totally create 3 pools for multiple datasets testing to limit # testing time while (( availspace > DFILESIZE )) && (( i < 3 )) ; do (( i += 1 )) diff --git a/tests/zfs-tests/tests/functional/cli_root/zfs_inherit/zfs_inherit_002_neg.ksh b/tests/zfs-tests/tests/functional/cli_root/zfs_inherit/zfs_inherit_002_neg.ksh index 584039f543c6..3ef65b517c6d 100755 --- a/tests/zfs-tests/tests/functional/cli_root/zfs_inherit/zfs_inherit_002_neg.ksh +++ b/tests/zfs-tests/tests/functional/cli_root/zfs_inherit/zfs_inherit_002_neg.ksh @@ -36,8 +36,8 @@ # 'zfs inherit' should return an error with bad parameters in one command. # # STRATEGY: -# 1. Set an array of bad options and invlid properties to 'zfs inherit' -# 2. Execute 'zfs inherit' with bad options and passing invlid properties +# 1. Set an array of bad options and invalid properties to 'zfs inherit' +# 2. Execute 'zfs inherit' with bad options and passing invalid properties # 3. Verify an error is returned. # diff --git a/tests/zfs-tests/tests/functional/cli_root/zfs_inherit/zfs_inherit_003_pos.ksh b/tests/zfs-tests/tests/functional/cli_root/zfs_inherit/zfs_inherit_003_pos.ksh index bc0d8c59c0cd..3317b09e2b5b 100755 --- a/tests/zfs-tests/tests/functional/cli_root/zfs_inherit/zfs_inherit_003_pos.ksh +++ b/tests/zfs-tests/tests/functional/cli_root/zfs_inherit/zfs_inherit_003_pos.ksh @@ -37,8 +37,8 @@ # 'zfs inherit' should return an error with bad parameters in one command. # # STRATEGY: -# 1. Set an array of bad options and invlid properties to 'zfs inherit' -# 2. Execute 'zfs inherit' with bad options and passing invlid properties +# 1. Set an array of bad options and invalid properties to 'zfs inherit' +# 2. Execute 'zfs inherit' with bad options and passing invalid properties # 3. Verify an error is returned. # diff --git a/tests/zfs-tests/tests/functional/cli_root/zfs_mount/zfs_mount_007_pos.ksh b/tests/zfs-tests/tests/functional/cli_root/zfs_mount/zfs_mount_007_pos.ksh index e2ef0bf00db0..52ae1879d1ae 100755 --- a/tests/zfs-tests/tests/functional/cli_root/zfs_mount/zfs_mount_007_pos.ksh +++ b/tests/zfs-tests/tests/functional/cli_root/zfs_mount/zfs_mount_007_pos.ksh @@ -45,7 +45,7 @@ # setuid setuid/nosetuid # # STRATEGY: -# 1. Create filesystem and get origianl property value. +# 1. Create filesystem and get original property value. # 2. Using 'zfs mount -o' to set filesystem property. # 3. Verify the property was set temporarily. # 4. Verify it will not affect the property that is stored on disk. diff --git a/tests/zfs-tests/tests/functional/cli_root/zfs_mount/zfs_mount_008_pos.ksh b/tests/zfs-tests/tests/functional/cli_root/zfs_mount/zfs_mount_008_pos.ksh index 5f88b611002a..84835a0d6d62 100755 --- a/tests/zfs-tests/tests/functional/cli_root/zfs_mount/zfs_mount_008_pos.ksh +++ b/tests/zfs-tests/tests/functional/cli_root/zfs_mount/zfs_mount_008_pos.ksh @@ -83,7 +83,7 @@ log_must mkfile 1M $mntpnt/$TESTFILE2 log_mustnot ls $testfile log_must ls $mntpnt/$TESTFILE1 $mntpnt/$TESTFILE2 -# Verify $TESTFILE2 was created in $fs1, rather then $fs +# Verify $TESTFILE2 was created in $fs1, rather than $fs log_must zfs unmount $fs1 log_must zfs set mountpoint=$mntpnt1 $fs1 log_must zfs mount $fs1 diff --git a/tests/zfs-tests/tests/functional/cli_root/zfs_mount/zfs_mount_010_neg.ksh b/tests/zfs-tests/tests/functional/cli_root/zfs_mount/zfs_mount_010_neg.ksh index 963ad626c2d0..0b5d61f62f40 100755 --- a/tests/zfs-tests/tests/functional/cli_root/zfs_mount/zfs_mount_010_neg.ksh +++ b/tests/zfs-tests/tests/functional/cli_root/zfs_mount/zfs_mount_010_neg.ksh @@ -72,4 +72,4 @@ else fi cd $curpath -log_pass "zfs mount fails with mounted filesystem or busy moutpoint as expected." +log_pass "zfs mount fails with mounted filesystem or busy mountpoint as expected." diff --git a/tests/zfs-tests/tests/functional/cli_root/zfs_mount/zfs_mount_remount.ksh b/tests/zfs-tests/tests/functional/cli_root/zfs_mount/zfs_mount_remount.ksh index f7a0978352b5..66a4338655de 100755 --- a/tests/zfs-tests/tests/functional/cli_root/zfs_mount/zfs_mount_remount.ksh +++ b/tests/zfs-tests/tests/functional/cli_root/zfs_mount/zfs_mount_remount.ksh @@ -29,7 +29,7 @@ # # DESCRIPTION: -# Verify remount functionality, expecially on readonly objects. +# Verify remount functionality, especially on readonly objects. # # STRATEGY: # 1. Prepare a filesystem and a snapshot diff --git a/tests/zfs-tests/tests/functional/cli_root/zfs_property/zfs_written_property_001_pos.ksh b/tests/zfs-tests/tests/functional/cli_root/zfs_property/zfs_written_property_001_pos.ksh index bf94274ddbf8..9a2d3cb80256 100755 --- a/tests/zfs-tests/tests/functional/cli_root/zfs_property/zfs_written_property_001_pos.ksh +++ b/tests/zfs-tests/tests/functional/cli_root/zfs_property/zfs_written_property_001_pos.ksh @@ -17,7 +17,7 @@ # # DESCRIPTION # Verify that "zfs list" gives correct values for written and written@ -# proerties for the dataset when different operations are on done on it +# properties for the dataset when different operations are on done on it # # # STRATEGY @@ -86,7 +86,7 @@ blocks=0 for i in 1 2 3; do written=$(get_prop written $TESTPOOL/$TESTFS1@snap$i) if [[ $blocks -eq 0 ]]; then - # Written value for the frist non-clone snapshot is + # Written value for the first non-clone snapshot is # expected to be equal to the referenced value. expected_written=$( \ get_prop referenced $TESTPOOL/$TESTFS1@snap$i) @@ -120,7 +120,7 @@ sync_pool written=$(get_prop written $TESTPOOL/$TESTFS1) writtenat3=$(get_prop written@snap3 $TESTPOOL/$TESTFS1) [[ $written -eq $writtenat3 ]] || \ - log_fail "Written and written@ dont match $written $writtenat3" + log_fail "Written and written@ don't match $written $writtenat3" within_percent $written $before_written 0.1 && \ log_fail "Unexpected written value after delete $written $before_written" writtenat=$(get_prop written@snap1 $TESTPOOL/$TESTFS1) diff --git a/tests/zfs-tests/tests/functional/cli_root/zfs_receive/zfs_receive_001_pos.ksh b/tests/zfs-tests/tests/functional/cli_root/zfs_receive/zfs_receive_001_pos.ksh index 5ce0e02fa617..f8439dcbbebd 100755 --- a/tests/zfs-tests/tests/functional/cli_root/zfs_receive/zfs_receive_001_pos.ksh +++ b/tests/zfs-tests/tests/functional/cli_root/zfs_receive/zfs_receive_001_pos.ksh @@ -155,7 +155,7 @@ for orig_fs in $datasets ; do log_must zfs destroy -Rf $rst_fs - log_note "Verfiying 'zfs receive -d ' works." + log_note "Verifying 'zfs receive -d ' works." i=0 while (( i < ${#bkup[*]} )); do diff --git a/tests/zfs-tests/tests/functional/cli_root/zfs_receive/zfs_receive_004_neg.ksh b/tests/zfs-tests/tests/functional/cli_root/zfs_receive/zfs_receive_004_neg.ksh index fcbdc5e1594e..3a9c2279a61d 100755 --- a/tests/zfs-tests/tests/functional/cli_root/zfs_receive/zfs_receive_004_neg.ksh +++ b/tests/zfs-tests/tests/functional/cli_root/zfs_receive/zfs_receive_004_neg.ksh @@ -36,7 +36,7 @@ # Verify 'zfs receive' fails with malformed parameters. # # STRATEGY: -# 1. Denfine malformed parameters array +# 1. Define malformed parameters array # 2. Feed the malformed parameters to 'zfs receive' # 3. Verify the command should be failed # diff --git a/tests/zfs-tests/tests/functional/cli_root/zfs_receive/zfs_receive_from_encrypted.ksh b/tests/zfs-tests/tests/functional/cli_root/zfs_receive/zfs_receive_from_encrypted.ksh index 5eee9eecf4bb..de771ccf3952 100755 --- a/tests/zfs-tests/tests/functional/cli_root/zfs_receive/zfs_receive_from_encrypted.ksh +++ b/tests/zfs-tests/tests/functional/cli_root/zfs_receive/zfs_receive_from_encrypted.ksh @@ -31,9 +31,9 @@ # 4. Snapshot the encrypted dataset # 5. Attempt to receive the snapshot into an unencrypted child # 6. Verify encryption is not enabled -# 7. Verify the cheksum of the file is the same as the original +# 7. Verify the checksum of the file is the same as the original # 8. Attempt to receive the snapshot into an encrypted child -# 9. Verify the cheksum of the file is the same as the original +# 9. Verify the checksum of the file is the same as the original # verify_runnable "both" From c6e457dffb24bdc3efaea73b99480d2cf6567128 Mon Sep 17 00:00:00 2001 From: Andrea Gelmini Date: Tue, 3 Sep 2019 03:14:53 +0200 Subject: [PATCH 61/68] Fix typos in tests/ Reviewed-by: Ryan Moeller Reviewed-by: Richard Laager Reviewed-by: Brian Behlendorf Signed-off-by: Andrea Gelmini Closes #9250 --- tests/zfs-tests/cmd/mmapwrite/mmapwrite.c | 2 +- tests/zfs-tests/include/blkdev.shlib | 2 +- tests/zfs-tests/include/libtest.shlib | 6 +++--- .../zfs-tests/tests/functional/acl/acl_common.kshlib | 12 ++++++------ .../tests/functional/cachefile/cachefile_004_pos.ksh | 4 ++-- .../functional/casenorm/insensitive_formd_lookup.ksh | 2 +- .../synctask_core/tst.list_user_props.ksh | 2 +- .../synctask_core/tst.terminate_by_signal.ksh | 2 +- .../cli_root/zfs_clone/zfs_clone_010_pos.ksh | 2 +- .../cli_root/zfs_copies/zfs_copies_002_pos.ksh | 2 +- .../cli_root/zfs_create/zfs_create_011_pos.ksh | 2 +- .../cli_root/zfs_destroy/zfs_destroy_001_pos.ksh | 2 +- .../cli_root/zfs_destroy/zfs_destroy_005_neg.ksh | 4 ++-- .../cli_root/zfs_destroy/zfs_destroy_014_pos.ksh | 4 ++-- 14 files changed, 24 insertions(+), 24 deletions(-) diff --git a/tests/zfs-tests/cmd/mmapwrite/mmapwrite.c b/tests/zfs-tests/cmd/mmapwrite/mmapwrite.c index b9915d5d31eb..458d6d8e402b 100644 --- a/tests/zfs-tests/cmd/mmapwrite/mmapwrite.c +++ b/tests/zfs-tests/cmd/mmapwrite/mmapwrite.c @@ -43,7 +43,7 @@ * is hold) occurred, zfs_dirty_inode open a txg failed, and wait previous * txg "n" completed. * 3. context #1 call uiomove to write, however page fault is occurred in - * uiomove, which means it need mm_sem, but mm_sem is hold by + * uiomove, which means it needs mm_sem, but mm_sem is hold by * context #2, so it stuck and can't complete, then txg "n" will not * complete. * diff --git a/tests/zfs-tests/include/blkdev.shlib b/tests/zfs-tests/include/blkdev.shlib index ca8807e82c6a..87500e92a398 100644 --- a/tests/zfs-tests/include/blkdev.shlib +++ b/tests/zfs-tests/include/blkdev.shlib @@ -131,7 +131,7 @@ function is_loop_device #disk } # -# Check if the given device is a multipath device and if there is a sybolic +# Check if the given device is a multipath device and if there is a symbolic # link to a device mapper and to a disk # Currently no support for dm devices alone without multipath # diff --git a/tests/zfs-tests/include/libtest.shlib b/tests/zfs-tests/include/libtest.shlib index cda4b04cddf7..ed68cac0681e 100644 --- a/tests/zfs-tests/include/libtest.shlib +++ b/tests/zfs-tests/include/libtest.shlib @@ -869,7 +869,7 @@ function zero_partitions # # Size should be specified with units as per # the `format` command requirements eg. 100mb 3gb # -# NOTE: This entire interface is problematic for the Linux parted utilty +# NOTE: This entire interface is problematic for the Linux parted utility # which requires the end of the partition to be specified. It would be # best to retire this interface and replace it with something more flexible. # At the moment a best effort is made. @@ -1072,7 +1072,7 @@ function partition_disk # # dirnum: the maximum number of subdirectories to use, -1 no limit # filenum: the maximum number of files per subdirectory # bytes: number of bytes to write -# num_writes: numer of types to write out bytes +# num_writes: number of types to write out bytes # data: the data that will be written # # E.g. @@ -2859,7 +2859,7 @@ function labelvtoc # # check if the system was installed as zfsroot or not -# return: 0 ture, otherwise false +# return: 0 if zfsroot, non-zero if not # function is_zfsroot { diff --git a/tests/zfs-tests/tests/functional/acl/acl_common.kshlib b/tests/zfs-tests/tests/functional/acl/acl_common.kshlib index a81cd76ba6aa..ba08bcb48bef 100644 --- a/tests/zfs-tests/tests/functional/acl/acl_common.kshlib +++ b/tests/zfs-tests/tests/functional/acl/acl_common.kshlib @@ -34,7 +34,7 @@ # # Get the given file/directory access mode # -# $1 object -- file or directroy +# $1 object -- file or directory # function get_mode # { @@ -49,7 +49,7 @@ function get_mode # # # Get the given file/directory ACL # -# $1 object -- file or directroy +# $1 object -- file or directory # function get_acl # { @@ -64,7 +64,7 @@ function get_acl # # # Get the given file/directory ACL # -# $1 object -- file or directroy +# $1 object -- file or directory # function get_compact_acl # { @@ -243,12 +243,12 @@ function usr_exec # [...] # # Count how many ACEs for the specified file or directory. # -# $1 file or directroy name +# $1 file or directory name # function count_ACE # { if [[ ! -e $1 ]]; then - log_note "Need input file or directroy name." + log_note "Need input file or directory name." return 1 fi @@ -399,7 +399,7 @@ function rwx_node #user node acl_spec|access # # Get the given file/directory xattr # -# $1 object -- file or directroy +# $1 object -- file or directory # function get_xattr # { diff --git a/tests/zfs-tests/tests/functional/cachefile/cachefile_004_pos.ksh b/tests/zfs-tests/tests/functional/cachefile/cachefile_004_pos.ksh index e0b81e166279..841b141e16fc 100755 --- a/tests/zfs-tests/tests/functional/cachefile/cachefile_004_pos.ksh +++ b/tests/zfs-tests/tests/functional/cachefile/cachefile_004_pos.ksh @@ -38,9 +38,9 @@ # Verify set, export and destroy when cachefile is set on pool. # # STRATEGY: -# 1. Create two pools with one same cahcefile1. +# 1. Create two pools with one same cachefile1. # 2. Set cachefile of the two pools to another same cachefile2. -# 3. Verify cachefile1 not exist. +# 3. Verify cachefile1 does not exist. # 4. Export the two pools. # 5. Verify cachefile2 not exist. # 6. Import the two pools and set cachefile to cachefile2. diff --git a/tests/zfs-tests/tests/functional/casenorm/insensitive_formd_lookup.ksh b/tests/zfs-tests/tests/functional/casenorm/insensitive_formd_lookup.ksh index d28431300a30..1ef9d2756fc8 100755 --- a/tests/zfs-tests/tests/functional/casenorm/insensitive_formd_lookup.ksh +++ b/tests/zfs-tests/tests/functional/casenorm/insensitive_formd_lookup.ksh @@ -19,7 +19,7 @@ # DESCRIPTION: # For the filesystem with casesensitivity=insensitive, normalization=formD, -# check that lookup succeds using any name form. +# check that lookup succeeds using any name form. # # STRATEGY: # For each c/n name form: diff --git a/tests/zfs-tests/tests/functional/channel_program/synctask_core/tst.list_user_props.ksh b/tests/zfs-tests/tests/functional/channel_program/synctask_core/tst.list_user_props.ksh index 34fdbd56d924..a454a2753302 100755 --- a/tests/zfs-tests/tests/functional/channel_program/synctask_core/tst.list_user_props.ksh +++ b/tests/zfs-tests/tests/functional/channel_program/synctask_core/tst.list_user_props.ksh @@ -144,4 +144,4 @@ log_must_program $TESTPOOL - <<-EOF return 0 EOF -log_pass "Listing zfs user properies should work correctly." +log_pass "Listing zfs user properties should work correctly." diff --git a/tests/zfs-tests/tests/functional/channel_program/synctask_core/tst.terminate_by_signal.ksh b/tests/zfs-tests/tests/functional/channel_program/synctask_core/tst.terminate_by_signal.ksh index 6f58cc1f4f8d..74889eba8059 100755 --- a/tests/zfs-tests/tests/functional/channel_program/synctask_core/tst.terminate_by_signal.ksh +++ b/tests/zfs-tests/tests/functional/channel_program/synctask_core/tst.terminate_by_signal.ksh @@ -90,7 +90,7 @@ snap_count=$(zfs list -t snapshot | grep $TESTPOOL | wc -l) log_note "$snap_count snapshots created by ZCP" if [ "$snap_count" -eq 0 ]; then - log_fail "Channel progam failed to run." + log_fail "Channel program failed to run." elif [ "$snap_count" -gt 50 ]; then log_fail "Too many snapshots after a cancel ($snap_count)." else diff --git a/tests/zfs-tests/tests/functional/cli_root/zfs_clone/zfs_clone_010_pos.ksh b/tests/zfs-tests/tests/functional/cli_root/zfs_clone/zfs_clone_010_pos.ksh index 40cabf649d11..62a755eaeef2 100755 --- a/tests/zfs-tests/tests/functional/cli_root/zfs_clone/zfs_clone_010_pos.ksh +++ b/tests/zfs-tests/tests/functional/cli_root/zfs_clone/zfs_clone_010_pos.ksh @@ -150,7 +150,7 @@ log_note "Verify zfs clone property for multiple clones" names=$(zfs list -rt all -o name $TESTPOOL) log_must verify_clones 3 0 -log_note "verfify clone property for clone deletion" +log_note "verify clone property for clone deletion" i=1 for ds in $datasets; do log_must zfs destroy $ds/$TESTCLONE.$i diff --git a/tests/zfs-tests/tests/functional/cli_root/zfs_copies/zfs_copies_002_pos.ksh b/tests/zfs-tests/tests/functional/cli_root/zfs_copies/zfs_copies_002_pos.ksh index a5a9729dc17f..11265cd5afe6 100755 --- a/tests/zfs-tests/tests/functional/cli_root/zfs_copies/zfs_copies_002_pos.ksh +++ b/tests/zfs-tests/tests/functional/cli_root/zfs_copies/zfs_copies_002_pos.ksh @@ -92,7 +92,7 @@ for val in 1 2 3; do check_used $used $val done -log_note "Verify df(1M) can corectly display the space charged." +log_note "Verify df(1M) can correctly display the space charged." for val in 1 2 3; do used=`df -F zfs -k /$TESTPOOL/fs_$val/$FILE | grep $TESTPOOL/fs_$val \ | awk '{print $3}'` diff --git a/tests/zfs-tests/tests/functional/cli_root/zfs_create/zfs_create_011_pos.ksh b/tests/zfs-tests/tests/functional/cli_root/zfs_create/zfs_create_011_pos.ksh index 0144b050d7d7..982a4ea16b5e 100755 --- a/tests/zfs-tests/tests/functional/cli_root/zfs_create/zfs_create_011_pos.ksh +++ b/tests/zfs-tests/tests/functional/cli_root/zfs_create/zfs_create_011_pos.ksh @@ -33,7 +33,7 @@ # # DESCRIPTION: -# 'zfs create -p' should work as expecteed +# 'zfs create -p' should work as expected # # STRATEGY: # 1. To create $newdataset with -p option, first make sure the upper level diff --git a/tests/zfs-tests/tests/functional/cli_root/zfs_destroy/zfs_destroy_001_pos.ksh b/tests/zfs-tests/tests/functional/cli_root/zfs_destroy/zfs_destroy_001_pos.ksh index 534c33f0a02b..26857d48d48b 100755 --- a/tests/zfs-tests/tests/functional/cli_root/zfs_destroy/zfs_destroy_001_pos.ksh +++ b/tests/zfs-tests/tests/functional/cli_root/zfs_destroy/zfs_destroy_001_pos.ksh @@ -53,7 +53,7 @@ verify_runnable "both" # run 'zfs destroy $opt '. 3rd, check the system status. # # $1 option of 'zfs destroy' -# $2 dataset will be destroied. +# $2 dataset will be destroyed. # function test_n_check { diff --git a/tests/zfs-tests/tests/functional/cli_root/zfs_destroy/zfs_destroy_005_neg.ksh b/tests/zfs-tests/tests/functional/cli_root/zfs_destroy/zfs_destroy_005_neg.ksh index 2e4a0c3b2bb5..1c5b2cf1c741 100755 --- a/tests/zfs-tests/tests/functional/cli_root/zfs_destroy/zfs_destroy_005_neg.ksh +++ b/tests/zfs-tests/tests/functional/cli_root/zfs_destroy/zfs_destroy_005_neg.ksh @@ -145,8 +145,8 @@ if is_global_zone; then check_dataset datasetexists $CTR $VOL check_dataset datasetnonexists $VOLSNAP $VOLCLONE - # Due to recusive destroy being a best-effort operation, - # all of the non-busy datasets bellow should be gone now. + # Due to recursive destroy being a best-effort operation, + # all of the non-busy datasets below should be gone now. check_dataset datasetnonexists $FS $FSSNAP $FSCLONE fi diff --git a/tests/zfs-tests/tests/functional/cli_root/zfs_destroy/zfs_destroy_014_pos.ksh b/tests/zfs-tests/tests/functional/cli_root/zfs_destroy/zfs_destroy_014_pos.ksh index df7cfcf5271d..58c4cfb5646d 100755 --- a/tests/zfs-tests/tests/functional/cli_root/zfs_destroy/zfs_destroy_014_pos.ksh +++ b/tests/zfs-tests/tests/functional/cli_root/zfs_destroy/zfs_destroy_014_pos.ksh @@ -24,7 +24,7 @@ # # DESCRIPTION: # 'zfs destroy -R ' can destroy all the child -# snapshots and preserves all the nested datasetss. +# snapshots and preserves all the nested datasets. # # STRATEGY: # 1. Create nested datasets in the storage pool. @@ -57,7 +57,7 @@ for ds in $datasets; do datasetexists $ds || log_fail "Create $ds dataset fail." done -# create recursive nestedd snapshot +# create recursive nested snapshot log_must zfs snapshot -r $TESTPOOL/$TESTFS1@snap for ds in $datasets; do datasetexists $ds@snap || log_fail "Create $ds@snap snapshot fail." From a57c82fc50bced77b7f409595130091360533012 Mon Sep 17 00:00:00 2001 From: Andrea Gelmini Date: Tue, 3 Sep 2019 03:17:39 +0200 Subject: [PATCH 62/68] Fix typos Reviewed-by: Ryan Moeller Reviewed-by: Richard Laager Reviewed-by: Brian Behlendorf Signed-off-by: Andrea Gelmini Closes #9251 --- .github/CONTRIBUTING.md | 2 +- scripts/kmodtool | 6 +++--- tests/README.md | 2 +- tests/test-runner/bin/Makefile.am | 2 +- tests/test-runner/bin/test-runner.py | 4 ++-- tests/test-runner/man/test-runner.1 | 4 ++-- 6 files changed, 10 insertions(+), 10 deletions(-) diff --git a/.github/CONTRIBUTING.md b/.github/CONTRIBUTING.md index 004711ae78c4..2b47d458c1a3 100644 --- a/.github/CONTRIBUTING.md +++ b/.github/CONTRIBUTING.md @@ -51,7 +51,7 @@ configure option should be set. This will enable additional correctness checks and all the ASSERTs to help quickly catch potential issues. In addition, there are numerous utilities and debugging files which -provide visibility in to the inner workings of ZFS. The most useful +provide visibility into the inner workings of ZFS. The most useful of these tools are discussed in detail on the [debugging ZFS wiki page](https://github.com/zfsonlinux/zfs/wiki/Debugging). diff --git a/scripts/kmodtool b/scripts/kmodtool index a632dd046b5a..a05b0078bbb5 100755 --- a/scripts/kmodtool +++ b/scripts/kmodtool @@ -397,7 +397,7 @@ print_rpmtemplate () # and print it and some other required stuff as macro print_rpmtemplate_header - # now print the packages itselfs + # now print the packages for kernel in ${kernel_versions_to_build_for} ; do local kernel_verrelarch=${kernel%%${kernels_known_variants}} @@ -489,7 +489,7 @@ while [ "${1}" ] ; do --obsolete-name) shift if [[ ! "${1}" ]] ; then - error_out 2 "Please provide the name of the kmod to obsolte together with --obsolete-name" >&2 + error_out 2 "Please provide the name of the kmod to obsolete together with --obsolete-name" >&2 fi obsolete_name="${1}" shift @@ -497,7 +497,7 @@ while [ "${1}" ] ; do --obsolete-version) shift if [[ ! "${1}" ]] ; then - error_out 2 "Please provide the version of the kmod to obsolte together with --obsolete-version" >&2 + error_out 2 "Please provide the version of the kmod to obsolete together with --obsolete-version" >&2 fi obsolete_version="${1}" shift diff --git a/tests/README.md b/tests/README.md index 7b3768c29110..b2c7f99c7098 100644 --- a/tests/README.md +++ b/tests/README.md @@ -78,7 +78,7 @@ The following zfs-tests.sh options are supported: when test-runner exists. This is useful when the results of a specific test need to be preserved for further analysis. - -f Use sparse files directly instread of loopback devices for + -f Use sparse files directly instead of loopback devices for the testing. When running in this mode certain tests will be skipped which depend on real block devices. diff --git a/tests/test-runner/bin/Makefile.am b/tests/test-runner/bin/Makefile.am index 30c564e55533..e1ae21548e98 100644 --- a/tests/test-runner/bin/Makefile.am +++ b/tests/test-runner/bin/Makefile.am @@ -3,7 +3,7 @@ dist_pkgdata_SCRIPTS = \ test-runner.py \ zts-report.py # -# These scripts are compatibile with both Python 2.6 and 3.4. As such the +# These scripts are compatible with both Python 2.6 and 3.4. As such the # python 3 shebang can be replaced at install time when targeting a python # 2 system. This allows us to maintain a single version of the source. # diff --git a/tests/test-runner/bin/test-runner.py b/tests/test-runner/bin/test-runner.py index 4d4fd96ad771..bf2c77c18a93 100755 --- a/tests/test-runner/bin/test-runner.py +++ b/tests/test-runner/bin/test-runner.py @@ -307,7 +307,7 @@ def log(self, options): This function is responsible for writing all output. This includes the console output, the logfile of all results (with timestamped merged stdout and stderr), and for each test, the unmodified - stdout/stderr/merged in it's own file. + stdout/stderr/merged in its own file. """ logname = getpwuid(os.getuid()).pw_name @@ -716,7 +716,7 @@ def complete_outputdirs(self): def setup_logging(self, options): """ - This funtion creates the output directory and gets a file object + This function creates the output directory and gets a file object for the logfile. This function must be called before write_log() can be used. """ diff --git a/tests/test-runner/man/test-runner.1 b/tests/test-runner/man/test-runner.1 index 31cd412452b8..95255073b705 100644 --- a/tests/test-runner/man/test-runner.1 +++ b/tests/test-runner/man/test-runner.1 @@ -103,7 +103,7 @@ The file has one section named "DEFAULT," which contains configuration option names and their values in "name = value" format. The values in this section apply to all the subsequent sections, unless they are also specified there, in which case the default is overridden. The remaining section names are the -absolute pathnames of files and direcotries, describing tests and test groups +absolute pathnames of files and directories, describing tests and test groups respectively. The legal option names are: .sp .ne 2 @@ -248,7 +248,7 @@ Run \fIscript\fR after any test or test group. \fB-q\fR .ad .RS 6n -Print only the results sumary to the standard output. +Print only the results summary to the standard output. .RE .ne 2 From 1e52716257877dbd97373cc8ca239315eaee2984 Mon Sep 17 00:00:00 2001 From: George Wilson Date: Mon, 2 Sep 2019 22:17:51 -0400 Subject: [PATCH 63/68] maxinflight can overflow in spa_load_verify_cb() When running on larger memory systems, we can overflow the value of maxinflight. This can result in maxinflight having a value of 0 causing the system to hang. Reviewed-by: Igor Kozhukhov Reviewed-by: Brian Behlendorf Signed-off-by: George Wilson Closes #9272 --- module/zfs/spa.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/module/zfs/spa.c b/module/zfs/spa.c index f4a6f3f456b8..d885c20c9256 100644 --- a/module/zfs/spa.c +++ b/module/zfs/spa.c @@ -2229,7 +2229,8 @@ spa_load_verify_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp, if (!BP_IS_METADATA(bp) && !spa_load_verify_data) return (0); - int maxinflight_bytes = arc_target_bytes() >> spa_load_verify_shift; + uint64_t maxinflight_bytes = + arc_target_bytes() >> spa_load_verify_shift; zio_t *rio = arg; size_t size = BP_GET_PSIZE(bp); From 11857d6e685d18455d0673f575eaca98c3805a5e Mon Sep 17 00:00:00 2001 From: Brian Atkinson Date: Fri, 30 Aug 2019 12:44:43 -0600 Subject: [PATCH 64/68] I added Mark Maybees's modifications to allow Direct IO. This allows read/writes to bypass the ARC layer and deliver the IO requests directly to the VDEV's. This is accomplished by mapping/pinning the user pages into the kernel. I forgot to remove the older kernel-get-user-pages-unlocked M4 file in config. --- config/kernel-get-user-pages.m4 | 138 +++ config/kernel.m4 | 1 + include/linux/kmap_compat.h | 36 + include/spl/sys/uio.h | 9 + include/sys/abd.h | 3 + include/sys/dmu.h | 1 + module/zfs/abd.c | 48 + module/zfs/dbuf.c | 7 +- module/zfs/dmu.c | 1662 ++++++++++++++++++------------- module/zfs/zfs_vnops.c | 20 +- 10 files changed, 1244 insertions(+), 681 deletions(-) create mode 100644 config/kernel-get-user-pages.m4 diff --git a/config/kernel-get-user-pages.m4 b/config/kernel-get-user-pages.m4 new file mode 100644 index 000000000000..ed4337e2b262 --- /dev/null +++ b/config/kernel-get-user-pages.m4 @@ -0,0 +1,138 @@ +dnl # +dnl # get_user_pages_unlocked() function was not available till 4.0. +dnl # +dnl # long get_user_pages_unlocked(struct task_struct *tsk, +dnl # struct mm_struct *mm, unsigned long start, unsigned long nr_pages, +dnl # int write, int force, struct page **pages) +dnl # 4.8 API Change +dnl # long get_user_pages_unlocked(unsigned long start, +dnl # unsigned long nr_pages, int write, int force, struct page **page) +dnl # 4.9 API Change +dnl # long get_user_pages_unlocked(usigned long start, int nr_pages, +dnl # struct page **pages, unsigned int gup_flags) +dnl # +dnl # +dnl # In earlier kernels (< 4.0) get_user_pages() is available +dnl # +dnl # int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, +dnl # unsigned long start, int nr_pages, int write, int force, +dnl # struct_page **pages, struct vm_area_struct **vmas) +dnl # +dnl # 4.6 API Change +dnl # long get_user_pages(unsigned long start, unsigned long nr_pages, +dnl # unsigned int gup_flags, struct page **pages, +dnl # struct vm_area_struct **vmas) +dnl # +AC_DEFUN([ZFS_AC_KERNEL_GET_USER_PAGES], [ + dnl # + dnl # Current API of get_user_pages_unlocked + dnl # + AC_MSG_CHECKING([whether get_user_pages_unlocked() takes gup flags]) + ZFS_LINUX_TRY_COMPILE([ + #include + ], [ + unsigned long start = 0; + unsigned long nr_pages = 1; + unsigned int gup_flags = 0; + struct page **pages = NULL; + long ret __attribute__ ((unused)); + ret = get_user_pages_unlocked(start, nr_pages, pages, gup_flags); + ], [ + AC_MSG_RESULT(yes) + AC_DEFINE(HAVE_GET_USER_PAGES_UNLOCKED_GUP_FLAGS, 1, + [get_user_pages_unlocked() takes gup flags]) + ], [ + dnl # + dnl # 4.8 API change, get_user_pages_unlocked + dnl # + AC_MSG_RESULT(no) + AC_MSG_CHECKING([whether get_user_pages_unlocked() takes write flag]) + ZFS_LINUX_TRY_COMPILE([ + #include + ], [ + unsigned long start = 0; + unsigned long nr_pages = 1; + int write = 0; + int force = 0; + long ret __attribute__ ((unused)); + struct page **pages = NULL; + ret = get_user_pages_unlocked(start, nr_pages, write, force, pages); + ], [ + AC_MSG_RESULT(yes) + AC_DEFINE(HAVE_GET_USER_PAGES_UNLOCKED_WRITE_FLAG, 1, + [get_user_pages_unlocked() takes write flag]) + ], [ + dnl # + dnl # 4.0 API, get_user_pages_unlocked + dnl # + AC_MSG_RESULT(no) + AC_MSG_CHECKING( + [whether get_user_pages_unlocked() takes struct task_struct]) + ZFS_LINUX_TRY_COMPILE([ + #include + ], [ + struct task_struct *tsk = NULL; + struct mm_struct *mm = NULL; + unsigned long start = 0; + unsigned long nr_pages = 1; + int write = 0; + int force = 0; + struct page **pages = NULL; + long ret __attribute__ ((unused)); + ret = get_user_pages_unlocked(tsk, mm, start, nr_pages, write, + force, pages); + ], [ + AC_MSG_RESULT(yes) + AC_DEFINE(HAVE_GET_USER_PAGES_UNLOCKED_TASK_STRUCT, 1, + [get_user_pages_unlocked() takes struct task_struct]) + ], [ + dnl # + dnl # 4.6 API change, get_user_pages + dnl # + AC_MSG_RESULT(no) + AC_MSG_CHECKING([whether get_user_pages() takes gup flags]) + ZFS_LINUX_TRY_COMPILE([ + #include + ], [ + struct vm_area_struct **vmas = NULL; + unsigned long start = 0; + unsigned long nr_pages = 1; + unsigned int gup_flags = 0; + struct page **pages = NULL; + long ret __attribute__ ((unused)); + ret = get_user_pagees(start, nr_pages, gup_flags, pages, vmas); + ], [ + AC_MSG_RESULT(yes) + AC_DEFINE(HAVE_GET_USER_PAGES_GUP_FLAGS, 1, + [get_user_pages() takes gup flags]) + ], [ + dnl # + dnl # 2.6.31 API, get_user_pages + AC_MSG_RESULT(no) + AC_MSG_CHECKING([whether get_user_pages() takes struct task_struct]) + ZFS_LINUX_TRY_COMPILE([ + #include + ], [ + struct task_struct *tsk = NULL; + struct mm_struct *mm = NULL; + struct vm_area_struct **vmas = NULL; + unsigned long start = 0; + unsigned long nr_pages = 1; + int write = 0; + int force = 0; + struct page **pages = NULL; + int ret __attribute__ ((unused)); + ret = get_user_pages(tsk, mm, start, nr_pages, write, + force, pages, vmas); + ], [ + AC_MSG_RESULT(yes) + AC_DEFINE(HAVE_GET_USER_PAGES_TASK_STRUCT, 1, + [get_user_pages() takes struct task_struct]) + ], [ + AC_MSG_ERROR([no; Direct IO not supported for this kernel]) + ]) + ]) + ]) + ]) + ]) +]) diff --git a/config/kernel.m4 b/config/kernel.m4 index 8e89c8014d8a..b639942e5fc5 100644 --- a/config/kernel.m4 +++ b/config/kernel.m4 @@ -136,6 +136,7 @@ AC_DEFUN([ZFS_AC_CONFIG_KERNEL], [ ZFS_AC_KERNEL_SHRINKER_CALLBACK ZFS_AC_KERNEL_S_INSTANCES_LIST_HEAD ZFS_AC_KERNEL_S_D_OP + ZFS_AC_KERNEL_GET_USER_PAGES ZFS_AC_KERNEL_BDI ZFS_AC_KERNEL_SET_NLINK ZFS_AC_KERNEL_ELEVATOR_CHANGE diff --git a/include/linux/kmap_compat.h b/include/linux/kmap_compat.h index b9c7f5bcc9dc..4c519fd0cb79 100644 --- a/include/linux/kmap_compat.h +++ b/include/linux/kmap_compat.h @@ -45,4 +45,40 @@ #define zfs_access_ok(type, addr, size) access_ok(addr, size) #endif +/* + * read returning FOLL_WRITE is due to the fact that we are stating + * that the kernel will have write access to the user pages. So, when + * a Direct IO read request is issued, the kernel must write to the user + * pages. + * + * get_user_pages_unlocked was not available to 4.0, so we also check + * for get_user_pages on older kernels. + */ +/* 4.9 API change - for and read flag is passed as gup flags */ +#if defined(HAVE_GET_USER_PAGES_UNLOCKED_GUP_FLAGS) +#define zfs_get_user_pages(addr, numpages, read, pages) \ + get_user_pages_unlocked(addr, numpages, pages, read ? FOLL_WRITE : 0) +/* 4.8 API change - no longer takes struct task_struct as arguement */ +#elif defined(HAVE_GET_USER_PAGES_UNLOCKED_WRITE_FLAG) +#define zfs_get_user_pages(addr, numpages, read, pages) \ + get_user_pages_unlocked(addr, numpages, read, 0, pages) +/* 4.0 API */ +#elif defined(HAVE_GET_USER_PAGES_UNLOCKED_TASK_STRUCT) +#define zfs_get_user_pages(addr, numpages, read, pages) \ + get_user_pages_unlocked(current, current->mm, addr, numpages, read, 0, \ + pages) +/* 4.6 API change - no longer requires struct's task_struct or mm_struct */ +#elif defined(HAVE_USER_GET_PAGES_GUP_FLAGS) +#define zfs_get_user_pages(addr, numpages, read, pages) \ + get_user_pages(addr, numpages, read ? FOLL_WRITE : 0, pages, NULL) +#elif defined(HAVE_USER_GET_PAGES_TASK_STRUCT) +/* 2.6.31 API */ +#define zfs_get_user_pages(addr, numpages, read, pages) \ + get_user_pages(current, current->mm, addr, numpages, read, 0, pages, \ + NULL) +#else +#define zfs_get_user_pages(addr, numpages, read, pages) \ + SET_ERROR(ENOTSUP) +#endif + #endif /* _ZFS_KMAP_H */ diff --git a/include/spl/sys/uio.h b/include/spl/sys/uio.h index fac26079d7bc..247a45abcefd 100644 --- a/include/spl/sys/uio.h +++ b/include/spl/sys/uio.h @@ -31,6 +31,15 @@ #include #include +/* + * uio_extflg: extended flags + */ +#define UIO_COPY_DEFAULT 0x0000 /* no special options to copy */ +#define UIO_COPY_CACHED 0x0001 /* copy should not bypass caches */ +#define UIO_ASYNC 0x0002 /* uio_t is reall a uioa_t */ +#define UIO_XUIO 0x0004 /* struct is xuio_t */ +#define UIO_DIRECT 0x0008 /* request direct I/O */ + typedef struct iovec iovec_t; typedef enum uio_rw { diff --git a/include/sys/abd.h b/include/sys/abd.h index b781be4da700..c689386d85ab 100644 --- a/include/sys/abd.h +++ b/include/sys/abd.h @@ -96,6 +96,9 @@ void abd_free(abd_t *); abd_t *abd_get_offset(abd_t *, size_t); abd_t *abd_get_offset_size(abd_t *, size_t, size_t); abd_t *abd_get_from_buf(void *, size_t); +#ifdef _KERNEL +abd_t *abd_get_from_pages(struct page **, uint_t); +#endif void abd_put(abd_t *); /* diff --git a/include/sys/dmu.h b/include/sys/dmu.h index 36eff4572db7..6126e7ded7e1 100644 --- a/include/sys/dmu.h +++ b/include/sys/dmu.h @@ -833,6 +833,7 @@ int dmu_free_long_object(objset_t *os, uint64_t object); #define DMU_READ_PREFETCH 0 /* prefetch */ #define DMU_READ_NO_PREFETCH 1 /* don't prefetch */ #define DMU_READ_NO_DECRYPT 2 /* don't decrypt */ +#define DMU_DIRECTIO 4 /* use direct IO */ int dmu_read(objset_t *os, uint64_t object, uint64_t offset, uint64_t size, void *buf, uint32_t flags); int dmu_read_by_dnode(dnode_t *dn, uint64_t offset, uint64_t size, void *buf, diff --git a/module/zfs/abd.c b/module/zfs/abd.c index ac6b0b742733..234fd569d185 100644 --- a/module/zfs/abd.c +++ b/module/zfs/abd.c @@ -858,6 +858,7 @@ abd_get_offset_impl(abd_t *sabd, size_t off, size_t size) zfs_refcount_create(&abd->abd_children); (void) zfs_refcount_add_many(&sabd->abd_children, abd->abd_size, abd); + abd_verify(abd); return (abd); } @@ -905,6 +906,53 @@ abd_get_from_buf(void *buf, size_t size) return (abd); } +#ifdef _KERNEL +/* + * Allocate a scatter gather ABD structure for pages. You must free this + * with abd_put() since the resulting ABD doesn't own its pages. + */ +abd_t * +abd_get_from_pages(struct page **pages, uint_t n_pages) +{ + abd_t *abd = abd_alloc_struct(); + struct sg_table table; + gfp_t gfp = __GFP_NOWARN | GFP_NOIO; + size_t size = n_pages * PAGE_SIZE; + int err; + + VERIFY3U(size, <=, SPA_MAXBLOCKSIZE); + + /* + * Even if this buf is filesystem metadata, we only track that if we + * own the underlying data buffer, which is not true in this case. + * Therefore, we don't ever use ABD_FLAG_META here. + */ + abd->abd_flags = 0; + abd->abd_size = size; + abd->abd_parent = NULL; + zfs_refcount_create(&abd->abd_children); + + while ((err = sg_alloc_table_from_pages(&table, pages, n_pages, 0, + size, gfp))) { + ABDSTAT_BUMP(abdstat_scatter_sg_table_retry); + schedule_timeout_interruptible(1); + ASSERT3U(err, ==, 0); + } + + ABD_SCATTER(abd).abd_offset = 0; + ABD_SCATTER(abd).abd_sgl = table.sgl; + ABD_SCATTER(abd).abd_nents = table.nents; + + if (table.nents > 1) { + ABDSTAT_BUMP(abdstat_scatter_page_multi_chunk); + abd->abd_flags |= ABD_FLAG_MULTI_CHUNK; + } + + abd_verify(abd); + return (abd); +} +#endif /* _KERNEL */ + /* * Free an ABD allocated from abd_get_offset() or abd_get_from_buf(). Will not * free the underlying scatterlist or buffer. diff --git a/module/zfs/dbuf.c b/module/zfs/dbuf.c index c3127ee0efcf..fd9900386f85 100644 --- a/module/zfs/dbuf.c +++ b/module/zfs/dbuf.c @@ -4418,10 +4418,9 @@ dbuf_write_done(zio_t *zio, arc_buf_t *buf, void *vdb) if (db->db_level == 0) { ASSERT(db->db_blkid != DMU_BONUS_BLKID); ASSERT(dr->dt.dl.dr_override_state == DR_NOT_OVERRIDDEN); - if (db->db_state != DB_NOFILL) { - if (dr->dt.dl.dr_data != db->db_buf) - arc_buf_destroy(dr->dt.dl.dr_data, db); - } + /* no dr_data if this is a NO_FILL or direct IO */ + if (dr->dt.dl.dr_data && dr->dt.dl.dr_data != db->db_buf) + arc_buf_destroy(dr->dt.dl.dr_data, db); } else { dnode_t *dn; diff --git a/module/zfs/dmu.c b/module/zfs/dmu.c index c7ddbcba7cf9..3bc3fa4447f8 100644 --- a/module/zfs/dmu.c +++ b/module/zfs/dmu.c @@ -54,6 +54,7 @@ #ifdef _KERNEL #include #include +#include #endif /* @@ -964,6 +965,62 @@ dmu_free_range(objset_t *os, uint64_t object, uint64_t offset, return (0); } +static void +dmu_read_abd_done(zio_t *zio) +{ + abd_put((abd_t *)zio->io_private); +} + +static int +dmu_read_abd(dnode_t *dn, uint64_t offset, uint64_t size, + abd_t *data, uint32_t flags) +{ + spa_t *spa = dn->dn_objset->os_spa; + dmu_buf_t **dbp; + int numbufs, err; + size_t off = 0; + zio_t *rio; + + /* + * Direct IO must be block aligned + */ + ASSERT(flags & DMU_DIRECTIO); + ASSERT(offset % dn->dn_datablksz == 0); + ASSERT(size % dn->dn_datablksz == 0); + + err = dmu_buf_hold_array_by_dnode(dn, offset, + size, B_FALSE, FTAG, &numbufs, &dbp, 0); + if (err) + return (err); + + rio = zio_root(spa, NULL, NULL, ZIO_FLAG_CANFAIL); + + for (int i = 0; i < numbufs; i++) { + dmu_buf_impl_t *db = (dmu_buf_impl_t *)dbp[i]; + blkptr_t *bp = db->db_blkptr; + size_t psize = BP_GET_PSIZE(bp); + abd_t *buf; + zio_t *zio; + + buf = abd_get_offset_size(data, off, psize); + zio = zio_read(rio, spa, bp, buf, psize, + dmu_read_abd_done, buf, + ZIO_PRIORITY_SYNC_READ, 0, NULL); + + if (i+1 == numbufs) + zio_wait(zio); + else + zio_nowait(zio); + off += psize; + } + + err = zio_wait(rio); + + dmu_buf_rele_array(dbp, numbufs, FTAG); + + return (err); +} + static int dmu_read_impl(dnode_t *dn, uint64_t offset, uint64_t size, void *buf, uint32_t flags) @@ -983,6 +1040,17 @@ dmu_read_impl(dnode_t *dn, uint64_t offset, uint64_t size, size = newsz; } + if (flags & DMU_DIRECTIO && + offset % dn->dn_datablksz == 0 && + size % dn->dn_datablksz == 0) { + abd_t *data; + + data = abd_get_from_buf(buf, size); + err = dmu_read_abd(dn, offset, size, data, DMU_DIRECTIO); + abd_put(data); + return (err); + } + while (size > 0) { uint64_t mylen = MIN(size, DMU_MAX_ACCESS / 2); int i; @@ -1040,149 +1108,693 @@ dmu_read_by_dnode(dnode_t *dn, uint64_t offset, uint64_t size, void *buf, return (dmu_read_impl(dn, offset, size, buf, flags)); } +typedef struct { + dbuf_dirty_record_t *dsa_dr; + dmu_sync_cb_t *dsa_done; + zgd_t *dsa_zgd; + dmu_tx_t *dsa_tx; +} dmu_sync_arg_t; + +/* ARGSUSED */ static void -dmu_write_impl(dmu_buf_t **dbp, int numbufs, uint64_t offset, uint64_t size, - const void *buf, dmu_tx_t *tx) +dmu_sync_ready(zio_t *zio, arc_buf_t *buf, void *varg) { - int i; - - for (i = 0; i < numbufs; i++) { - uint64_t tocpy; - int64_t bufoff; - dmu_buf_t *db = dbp[i]; - - ASSERT(size > 0); - - bufoff = offset - db->db_offset; - tocpy = MIN(db->db_size - bufoff, size); - - ASSERT(i == 0 || i == numbufs-1 || tocpy == db->db_size); - - if (tocpy == db->db_size) - dmu_buf_will_fill(db, tx); - else - dmu_buf_will_dirty(db, tx); - - (void) memcpy((char *)db->db_data + bufoff, buf, tocpy); + dmu_sync_arg_t *dsa = varg; - if (tocpy == db->db_size) - dmu_buf_fill_done(db, tx); + if (zio->io_error == 0) { + dbuf_dirty_record_t *dr = dsa->dsa_dr; + blkptr_t *bp = zio->io_bp; - offset += tocpy; - size -= tocpy; - buf = (char *)buf + tocpy; + if (BP_IS_HOLE(bp)) { + dmu_buf_t *db = NULL; + if (dr) + db = &(dr->dr_dbuf->db); + else + db = dsa->dsa_zgd->zgd_db; + /* + * A block of zeros may compress to a hole, but the + * block size still needs to be known for replay. + */ + BP_SET_LSIZE(bp, db->db_size); + } else if (!BP_IS_EMBEDDED(bp)) { + ASSERT(BP_GET_LEVEL(bp) == 0); + BP_SET_FILL(bp, 1); + } } } -void -dmu_write(objset_t *os, uint64_t object, uint64_t offset, uint64_t size, - const void *buf, dmu_tx_t *tx) +static void +dmu_sync_late_arrival_ready(zio_t *zio) { - dmu_buf_t **dbp; - int numbufs; - - if (size == 0) - return; - - VERIFY0(dmu_buf_hold_array(os, object, offset, size, - FALSE, FTAG, &numbufs, &dbp)); - dmu_write_impl(dbp, numbufs, offset, size, buf, tx); - dmu_buf_rele_array(dbp, numbufs, FTAG); + dmu_sync_ready(zio, NULL, zio->io_private); } -/* - * Note: Lustre is an external consumer of this interface. - */ -void -dmu_write_by_dnode(dnode_t *dn, uint64_t offset, uint64_t size, - const void *buf, dmu_tx_t *tx) +/* ARGSUSED */ +static void +dmu_sync_done(zio_t *zio, arc_buf_t *buf, void *varg) { - dmu_buf_t **dbp; - int numbufs; - - if (size == 0) - return; + dmu_sync_arg_t *dsa = varg; + dbuf_dirty_record_t *dr = dsa->dsa_dr; + dmu_buf_impl_t *db = dr->dr_dbuf; + zgd_t *zgd = dsa->dsa_zgd; - VERIFY0(dmu_buf_hold_array_by_dnode(dn, offset, size, - FALSE, FTAG, &numbufs, &dbp, DMU_READ_PREFETCH)); - dmu_write_impl(dbp, numbufs, offset, size, buf, tx); - dmu_buf_rele_array(dbp, numbufs, FTAG); -} + /* + * Record the vdev(s) backing this blkptr so they can be flushed after + * the writes for the lwb have completed. + */ + if (zgd && zio->io_error == 0) { + zil_lwb_add_block(zgd->zgd_lwb, zgd->zgd_bp); + } -void -dmu_prealloc(objset_t *os, uint64_t object, uint64_t offset, uint64_t size, - dmu_tx_t *tx) -{ - dmu_buf_t **dbp; - int numbufs, i; + mutex_enter(&db->db_mtx); + ASSERT(dr->dt.dl.dr_override_state == DR_IN_DMU_SYNC); + if (zio->io_error == 0) { + dr->dt.dl.dr_nopwrite = !!(zio->io_flags & ZIO_FLAG_NOPWRITE); + if (dr->dt.dl.dr_nopwrite) { + blkptr_t *bp = zio->io_bp; + blkptr_t *bp_orig = &zio->io_bp_orig; + uint8_t chksum = BP_GET_CHECKSUM(bp_orig); - if (size == 0) - return; + ASSERT(BP_EQUAL(bp, bp_orig)); + VERIFY(BP_EQUAL(bp, db->db_blkptr)); + ASSERT(zio->io_prop.zp_compress != ZIO_COMPRESS_OFF); + VERIFY(zio_checksum_table[chksum].ci_flags & + ZCHECKSUM_FLAG_NOPWRITE); + } + dr->dt.dl.dr_overridden_by = *zio->io_bp; + dr->dt.dl.dr_override_state = DR_OVERRIDDEN; + dr->dt.dl.dr_copies = zio->io_prop.zp_copies; - VERIFY(0 == dmu_buf_hold_array(os, object, offset, size, - FALSE, FTAG, &numbufs, &dbp)); + /* + * Old style holes are filled with all zeros, whereas + * new-style holes maintain their lsize, type, level, + * and birth time (see zio_write_compress). While we + * need to reset the BP_SET_LSIZE() call that happened + * in dmu_sync_ready for old style holes, we do *not* + * want to wipe out the information contained in new + * style holes. Thus, only zero out the block pointer if + * it's an old style hole. + */ + if (BP_IS_HOLE(&dr->dt.dl.dr_overridden_by) && + dr->dt.dl.dr_overridden_by.blk_birth == 0) + BP_ZERO(&dr->dt.dl.dr_overridden_by); + } else { + dr->dt.dl.dr_override_state = DR_NOT_OVERRIDDEN; + } + cv_broadcast(&db->db_changed); + mutex_exit(&db->db_mtx); - for (i = 0; i < numbufs; i++) { - dmu_buf_t *db = dbp[i]; + if (dsa->dsa_done) + dsa->dsa_done(dsa->dsa_zgd, zio->io_error); - dmu_buf_will_not_fill(db, tx); - } - dmu_buf_rele_array(dbp, numbufs, FTAG); + kmem_free(dsa, sizeof (*dsa)); } -void -dmu_write_embedded(objset_t *os, uint64_t object, uint64_t offset, - void *data, uint8_t etype, uint8_t comp, int uncompressed_size, - int compressed_size, int byteorder, dmu_tx_t *tx) +static void +dmu_sync_late_arrival_done(zio_t *zio) { - dmu_buf_t *db; + blkptr_t *bp = zio->io_bp; + dmu_sync_arg_t *dsa = zio->io_private; + zgd_t *zgd = dsa->dsa_zgd; - ASSERT3U(etype, <, NUM_BP_EMBEDDED_TYPES); - ASSERT3U(comp, <, ZIO_COMPRESS_FUNCTIONS); - VERIFY0(dmu_buf_hold_noread(os, object, offset, - FTAG, &db)); + if (zio->io_error == 0) { + /* + * Record the vdev(s) backing this blkptr so they can be + * flushed after the writes for the lwb have completed. + */ + zil_lwb_add_block(zgd->zgd_lwb, zgd->zgd_bp); - dmu_buf_write_embedded(db, - data, (bp_embedded_type_t)etype, (enum zio_compress)comp, - uncompressed_size, compressed_size, byteorder, tx); + if (!BP_IS_HOLE(bp)) { + ASSERTV(blkptr_t *bp_orig = &zio->io_bp_orig); + ASSERT(!(zio->io_flags & ZIO_FLAG_NOPWRITE)); + ASSERT(BP_IS_HOLE(bp_orig) || !BP_EQUAL(bp, bp_orig)); + ASSERT(zio->io_bp->blk_birth == zio->io_txg); + ASSERT(zio->io_txg > spa_syncing_txg(zio->io_spa)); + zio_free(zio->io_spa, zio->io_txg, zio->io_bp); + } + } - dmu_buf_rele(db, FTAG); -} + dmu_tx_commit(dsa->dsa_tx); -void -dmu_redact(objset_t *os, uint64_t object, uint64_t offset, uint64_t size, - dmu_tx_t *tx) -{ - int numbufs, i; - dmu_buf_t **dbp; + dsa->dsa_done(dsa->dsa_zgd, zio->io_error); - VERIFY0(dmu_buf_hold_array(os, object, offset, size, FALSE, FTAG, - &numbufs, &dbp)); - for (i = 0; i < numbufs; i++) - dmu_buf_redact(dbp[i], tx); - dmu_buf_rele_array(dbp, numbufs, FTAG); + abd_put(zio->io_abd); + kmem_free(dsa, sizeof (*dsa)); } -/* - * DMU support for xuio - */ -kstat_t *xuio_ksp = NULL; +static int +dmu_sync_late_arrival(zio_t *pio, objset_t *os, dmu_sync_cb_t *done, zgd_t *zgd, + zio_prop_t *zp, zbookmark_phys_t *zb) +{ + dmu_sync_arg_t *dsa; + dmu_tx_t *tx; -typedef struct xuio_stats { - /* loaned yet not returned arc_buf */ - kstat_named_t xuiostat_onloan_rbuf; - kstat_named_t xuiostat_onloan_wbuf; - /* whether a copy is made when loaning out a read buffer */ - kstat_named_t xuiostat_rbuf_copied; - kstat_named_t xuiostat_rbuf_nocopy; - /* whether a copy is made when assigning a write buffer */ - kstat_named_t xuiostat_wbuf_copied; - kstat_named_t xuiostat_wbuf_nocopy; -} xuio_stats_t; + tx = dmu_tx_create(os); + dmu_tx_hold_space(tx, zgd->zgd_db->db_size); + if (dmu_tx_assign(tx, TXG_WAIT) != 0) { + dmu_tx_abort(tx); + /* Make zl_get_data do txg_waited_synced() */ + return (SET_ERROR(EIO)); + } -static xuio_stats_t xuio_stats = { - { "onloan_read_buf", KSTAT_DATA_UINT64 }, - { "onloan_write_buf", KSTAT_DATA_UINT64 }, + /* + * In order to prevent the zgd's lwb from being free'd prior to + * dmu_sync_late_arrival_done() being called, we have to ensure + * the lwb's "max txg" takes this tx's txg into account. + */ + zil_lwb_add_txg(zgd->zgd_lwb, dmu_tx_get_txg(tx)); + + dsa = kmem_alloc(sizeof (dmu_sync_arg_t), KM_SLEEP); + dsa->dsa_dr = NULL; + dsa->dsa_done = done; + dsa->dsa_zgd = zgd; + dsa->dsa_tx = tx; + + /* + * Since we are currently syncing this txg, it's nontrivial to + * determine what BP to nopwrite against, so we disable nopwrite. + * + * When syncing, the db_blkptr is initially the BP of the previous + * txg. We can not nopwrite against it because it will be changed + * (this is similar to the non-late-arrival case where the dbuf is + * dirty in a future txg). + * + * Then dbuf_write_ready() sets bp_blkptr to the location we will write. + * We can not nopwrite against it because although the BP will not + * (typically) be changed, the data has not yet been persisted to this + * location. + * + * Finally, when dbuf_write_done() is called, it is theoretically + * possible to always nopwrite, because the data that was written in + * this txg is the same data that we are trying to write. However we + * would need to check that this dbuf is not dirty in any future + * txg's (as we do in the normal dmu_sync() path). For simplicity, we + * don't nopwrite in this case. + */ + zp->zp_nopwrite = B_FALSE; + + zio_nowait(zio_write(pio, os->os_spa, dmu_tx_get_txg(tx), zgd->zgd_bp, + abd_get_from_buf(zgd->zgd_db->db_data, zgd->zgd_db->db_size), + zgd->zgd_db->db_size, zgd->zgd_db->db_size, zp, + dmu_sync_late_arrival_ready, NULL, NULL, dmu_sync_late_arrival_done, + dsa, ZIO_PRIORITY_SYNC_WRITE, ZIO_FLAG_CANFAIL, zb)); + + return (0); +} + +/* + * Intent log support: sync the block associated with db to disk. + * N.B. and XXX: the caller is responsible for making sure that the + * data isn't changing while dmu_sync() is writing it. + * + * Return values: + * + * EEXIST: this txg has already been synced, so there's nothing to do. + * The caller should not log the write. + * + * ENOENT: the block was dbuf_free_range()'d, so there's nothing to do. + * The caller should not log the write. + * + * EALREADY: this block is already in the process of being synced. + * The caller should track its progress (somehow). + * + * EIO: could not do the I/O. + * The caller should do a txg_wait_synced(). + * + * 0: the I/O has been initiated. + * The caller should log this blkptr in the done callback. + * It is possible that the I/O will fail, in which case + * the error will be reported to the done callback and + * propagated to pio from zio_done(). + */ +int +dmu_sync(zio_t *pio, uint64_t txg, dmu_sync_cb_t *done, zgd_t *zgd) +{ + dmu_buf_impl_t *db = (dmu_buf_impl_t *)zgd->zgd_db; + objset_t *os = db->db_objset; + dsl_dataset_t *ds = os->os_dsl_dataset; + dbuf_dirty_record_t *dr; + dmu_sync_arg_t *dsa; + zbookmark_phys_t zb; + zio_prop_t zp; + dnode_t *dn; + + ASSERT(pio != NULL); + ASSERT(txg != 0); + + SET_BOOKMARK(&zb, ds->ds_object, + db->db.db_object, db->db_level, db->db_blkid); + + DB_DNODE_ENTER(db); + dn = DB_DNODE(db); + dmu_write_policy(os, dn, db->db_level, WP_DMU_SYNC, &zp); + DB_DNODE_EXIT(db); + + /* + * If we're frozen (running ziltest), we always need to generate a bp. + */ + if (txg > spa_freeze_txg(os->os_spa)) + return (dmu_sync_late_arrival(pio, os, done, zgd, &zp, &zb)); + + /* + * Grabbing db_mtx now provides a barrier between dbuf_sync_leaf() + * and us. If we determine that this txg is not yet syncing, + * but it begins to sync a moment later, that's OK because the + * sync thread will block in dbuf_sync_leaf() until we drop db_mtx. + */ + mutex_enter(&db->db_mtx); + + if (txg <= spa_last_synced_txg(os->os_spa)) { + /* + * This txg has already synced. There's nothing to do. + */ + mutex_exit(&db->db_mtx); + return (SET_ERROR(EEXIST)); + } + + if (txg <= spa_syncing_txg(os->os_spa)) { + /* + * This txg is currently syncing, so we can't mess with + * the dirty record anymore; just write a new log block. + */ + mutex_exit(&db->db_mtx); + return (dmu_sync_late_arrival(pio, os, done, zgd, &zp, &zb)); + } + + dr = db->db_last_dirty; + while (dr && dr->dr_txg != txg) + dr = dr->dr_next; + + if (dr == NULL) { + /* + * There's no dr for this dbuf, so it must have been freed. + * There's no need to log writes to freed blocks, so we're done. + */ + mutex_exit(&db->db_mtx); + return (SET_ERROR(ENOENT)); + } + + ASSERT(dr->dr_next == NULL || dr->dr_next->dr_txg < txg); + + if (db->db_blkptr != NULL) { + /* + * We need to fill in zgd_bp with the current blkptr so that + * the nopwrite code can check if we're writing the same + * data that's already on disk. We can only nopwrite if we + * are sure that after making the copy, db_blkptr will not + * change until our i/o completes. We ensure this by + * holding the db_mtx, and only allowing nopwrite if the + * block is not already dirty (see below). This is verified + * by dmu_sync_done(), which VERIFYs that the db_blkptr has + * not changed. + */ + *zgd->zgd_bp = *db->db_blkptr; + } + + /* + * Assume the on-disk data is X, the current syncing data (in + * txg - 1) is Y, and the current in-memory data is Z (currently + * in dmu_sync). + * + * We usually want to perform a nopwrite if X and Z are the + * same. However, if Y is different (i.e. the BP is going to + * change before this write takes effect), then a nopwrite will + * be incorrect - we would override with X, which could have + * been freed when Y was written. + * + * (Note that this is not a concern when we are nop-writing from + * syncing context, because X and Y must be identical, because + * all previous txgs have been synced.) + * + * Therefore, we disable nopwrite if the current BP could change + * before this TXG. There are two ways it could change: by + * being dirty (dr_next is non-NULL), or by being freed + * (dnode_block_freed()). This behavior is verified by + * zio_done(), which VERIFYs that the override BP is identical + * to the on-disk BP. + */ + DB_DNODE_ENTER(db); + dn = DB_DNODE(db); + if (dr->dr_next != NULL || dnode_block_freed(dn, db->db_blkid)) + zp.zp_nopwrite = B_FALSE; + DB_DNODE_EXIT(db); + + ASSERT(dr->dr_txg == txg); + if (dr->dt.dl.dr_override_state == DR_IN_DMU_SYNC || + dr->dt.dl.dr_override_state == DR_OVERRIDDEN) { + /* + * We have already issued a sync write for this buffer, + * or this buffer has already been synced. It could not + * have been dirtied since, or we would have cleared the state. + */ + mutex_exit(&db->db_mtx); + return (SET_ERROR(EALREADY)); + } + + ASSERT(dr->dt.dl.dr_override_state == DR_NOT_OVERRIDDEN); + dr->dt.dl.dr_override_state = DR_IN_DMU_SYNC; + mutex_exit(&db->db_mtx); + + dsa = kmem_alloc(sizeof (dmu_sync_arg_t), KM_SLEEP); + dsa->dsa_dr = dr; + dsa->dsa_done = done; + dsa->dsa_zgd = zgd; + dsa->dsa_tx = NULL; + + zio_nowait(arc_write(pio, os->os_spa, txg, + zgd->zgd_bp, dr->dt.dl.dr_data, DBUF_IS_L2CACHEABLE(db), + &zp, dmu_sync_ready, NULL, NULL, dmu_sync_done, dsa, + ZIO_PRIORITY_SYNC_WRITE, ZIO_FLAG_CANFAIL, &zb)); + + return (0); +} + +static void +dmu_write_direct_ready(zio_t *zio) +{ + dmu_sync_ready(zio, NULL, zio->io_private); +} + +static void +dmu_write_direct_done(zio_t *zio) +{ + dmu_sync_arg_t *dsa = zio->io_private; + dbuf_dirty_record_t *dr = dsa->dsa_dr; + dmu_buf_impl_t *db = dr->dr_dbuf; + + mutex_enter(&db->db_mtx); + ASSERT(db->db.db_data == NULL); + ASSERT(dr->dr_data == NULL); + db->db_state = DB_UNCACHED; + mutex_exit(&db->db_mtx); + + abd_put(zio->io_abd); + dmu_sync_done(zio, NULL, zio->io_private); + kmem_free(zio->io_bp, sizeof (blkptr_t)); +} + +static int +dmu_write_direct(zio_t *pio, dmu_buf_impl_t *db, abd_t *data, dmu_tx_t *tx) +{ + objset_t *os = db->db_objset; + dsl_dataset_t *ds = os->os_dsl_dataset; + dbuf_dirty_record_t *dr; + dmu_sync_arg_t *dsa; + zbookmark_phys_t zb; + zio_prop_t zp; + dnode_t *dn; + uint64_t txg = dmu_tx_get_txg(tx); + blkptr_t *bp; + zio_t *zio; + + ASSERT(tx != NULL); + + SET_BOOKMARK(&zb, ds->ds_object, + db->db.db_object, db->db_level, db->db_blkid); + + /* + * No support for this + */ + if (txg > spa_freeze_txg(os->os_spa)) + return (SET_ERROR(ENOTSUP)); + + DB_DNODE_ENTER(db); + dn = DB_DNODE(db); + dmu_write_policy(os, dn, db->db_level, WP_DMU_SYNC, &zp); + DB_DNODE_EXIT(db); + + /* + * Dirty this dbuf with DB_NOFILL since we will not have any data + * associated with the dbuf. + */ + dmu_buf_will_not_fill(&db->db, tx); + + /* XXX - probably don't need this, since we are in an open tx */ + mutex_enter(&db->db_mtx); + + ASSERT(txg > spa_last_synced_txg(os->os_spa)); + ASSERT(txg > spa_syncing_txg(os->os_spa)); + + dr = db->db_last_dirty; + VERIFY(dr->dr_txg == txg); + + bp = kmem_alloc(sizeof (blkptr_t), KM_SLEEP); + if (db->db_blkptr != NULL) { + /* + * fill in bp with current blkptr so that + * the nopwrite code can check if we're writing the same + * data that's already on disk. + */ + *bp = *db->db_blkptr; + } else { + bzero(bp, sizeof (blkptr_t)); + } + + /* + * Disable nopwrite if the current BP could change before + * this TXG syncs. + */ + if (dr->dr_next != NULL) + zp.zp_nopwrite = B_FALSE; + + ASSERT(dr->dt.dl.dr_override_state == DR_NOT_OVERRIDDEN); + dr->dt.dl.dr_override_state = DR_IN_DMU_SYNC; + mutex_exit(&db->db_mtx); + + /* + * We will not be writing this block in syncing context, so + * update the dirty space accounting. + * XXX - this should be handled as part of will_not_fill() + */ + dsl_pool_undirty_space(dmu_objset_pool(os), dr->dr_accounted, txg); + + dsa = kmem_alloc(sizeof (dmu_sync_arg_t), KM_SLEEP); + dsa->dsa_dr = dr; + dsa->dsa_done = NULL; + dsa->dsa_zgd = NULL; + dsa->dsa_tx = NULL; + + zio = zio_write(pio, os->os_spa, txg, bp, data, + db->db.db_size, db->db.db_size, &zp, + dmu_write_direct_ready, NULL, NULL, dmu_write_direct_done, dsa, + ZIO_PRIORITY_SYNC_WRITE, ZIO_FLAG_CANFAIL, &zb); + + if (pio == NULL) + zio_wait(zio); + else + zio_nowait(zio); + return (0); +} + +static int +dmu_write_abd(dnode_t *dn, uint64_t offset, uint64_t size, + abd_t *data, uint32_t flags, dmu_tx_t *tx) +{ + spa_t *spa = dn->dn_objset->os_spa; + dmu_buf_t **dbp; + int numbufs, err; + size_t off = 0; + zio_t *rio; + + /* + * Direct IO must be block aligned + */ + ASSERT(flags & DMU_DIRECTIO); + ASSERT(offset % dn->dn_datablksz == 0); + ASSERT(size % dn->dn_datablksze == 0); + + err = dmu_buf_hold_array_by_dnode(dn, offset, + size, B_FALSE, FTAG, &numbufs, &dbp, 0); + if (err) + return (err); + + rio = zio_root(spa, NULL, NULL, ZIO_FLAG_CANFAIL); + + for (int i = 0; i < numbufs; i++) { + dmu_buf_impl_t *db = (dmu_buf_impl_t *)dbp[i]; + size_t dsize = dn->dn_datablksz; + abd_t *buf; + + buf = abd_get_offset_size(data, off, dsize); + if (i+1 == numbufs) { + /* + * Passing NULL as the zio_t * here so the pio + * is NULL in dmu_write_direct. This allows us + * to make use of the calling thread when issuing + * zio_write instead of handing off to a taskq. + */ + err = dmu_write_direct(NULL, db, buf, tx); + } else { + err = dmu_write_direct(rio, db, buf, tx); + } + ASSERT(err == 0); + + off += dsize; + } + err = zio_wait(rio); + + dmu_buf_rele_array(dbp, numbufs, FTAG); + + return (err); +} + +static void +dmu_write_impl(dmu_buf_t **dbp, int numbufs, uint64_t offset, uint64_t size, + const void *buf, dmu_tx_t *tx) +{ + int i; + + for (i = 0; i < numbufs; i++) { + uint64_t tocpy; + int64_t bufoff; + dmu_buf_t *db = dbp[i]; + + ASSERT(size > 0); + + bufoff = offset - db->db_offset; + tocpy = MIN(db->db_size - bufoff, size); + + ASSERT(i == 0 || i == numbufs-1 || tocpy == db->db_size); + + if (tocpy == db->db_size) + dmu_buf_will_fill(db, tx); + else + dmu_buf_will_dirty(db, tx); + + (void) memcpy((char *)db->db_data + bufoff, buf, tocpy); + + if (tocpy == db->db_size) + dmu_buf_fill_done(db, tx); + + offset += tocpy; + size -= tocpy; + buf = (char *)buf + tocpy; + } +} + +void +dmu_write(objset_t *os, uint64_t object, uint64_t offset, uint64_t size, + const void *buf, dmu_tx_t *tx) +{ + dmu_buf_t **dbp; + int numbufs; + + if (size == 0) + return; + + VERIFY0(dmu_buf_hold_array(os, object, offset, size, + FALSE, FTAG, &numbufs, &dbp)); + dmu_write_impl(dbp, numbufs, offset, size, buf, tx); + dmu_buf_rele_array(dbp, numbufs, FTAG); +} + +/* + * Note: Lustre is an external consumer of this interface. + * XXX - always directio since Lustre is the only consumer + */ +void +dmu_write_by_dnode(dnode_t *dn, uint64_t offset, uint64_t size, + const void *buf, dmu_tx_t *tx) +{ + dmu_buf_t **dbp; + int numbufs; + boolean_t directio = B_TRUE; + + if (size == 0) + return; + + if (directio && + offset % dn->dn_datablksz == 0 && + size % dn->dn_datablksz == 0) { + abd_t *data; + + data = abd_get_from_buf((void *)buf, size); + VERIFY0(dmu_write_abd(dn, offset, size, + data, DMU_DIRECTIO, tx)); + abd_put(data); + return; + } + + VERIFY0(dmu_buf_hold_array_by_dnode(dn, offset, size, + FALSE, FTAG, &numbufs, &dbp, DMU_READ_PREFETCH)); + dmu_write_impl(dbp, numbufs, offset, size, buf, tx); + dmu_buf_rele_array(dbp, numbufs, FTAG); +} + +void +dmu_prealloc(objset_t *os, uint64_t object, uint64_t offset, uint64_t size, + dmu_tx_t *tx) +{ + dmu_buf_t **dbp; + int numbufs, i; + + if (size == 0) + return; + + VERIFY(0 == dmu_buf_hold_array(os, object, offset, size, + FALSE, FTAG, &numbufs, &dbp)); + + for (i = 0; i < numbufs; i++) { + dmu_buf_t *db = dbp[i]; + + dmu_buf_will_not_fill(db, tx); + } + dmu_buf_rele_array(dbp, numbufs, FTAG); +} + +void +dmu_write_embedded(objset_t *os, uint64_t object, uint64_t offset, + void *data, uint8_t etype, uint8_t comp, int uncompressed_size, + int compressed_size, int byteorder, dmu_tx_t *tx) +{ + dmu_buf_t *db; + + ASSERT3U(etype, <, NUM_BP_EMBEDDED_TYPES); + ASSERT3U(comp, <, ZIO_COMPRESS_FUNCTIONS); + VERIFY0(dmu_buf_hold_noread(os, object, offset, + FTAG, &db)); + + dmu_buf_write_embedded(db, + data, (bp_embedded_type_t)etype, (enum zio_compress)comp, + uncompressed_size, compressed_size, byteorder, tx); + + dmu_buf_rele(db, FTAG); +} + +void +dmu_redact(objset_t *os, uint64_t object, uint64_t offset, uint64_t size, + dmu_tx_t *tx) +{ + int numbufs, i; + dmu_buf_t **dbp; + + VERIFY0(dmu_buf_hold_array(os, object, offset, size, FALSE, FTAG, + &numbufs, &dbp)); + for (i = 0; i < numbufs; i++) + dmu_buf_redact(dbp[i], tx); + dmu_buf_rele_array(dbp, numbufs, FTAG); +} + +/* + * DMU support for xuio + */ +kstat_t *xuio_ksp = NULL; + +typedef struct xuio_stats { + /* loaned yet not returned arc_buf */ + kstat_named_t xuiostat_onloan_rbuf; + kstat_named_t xuiostat_onloan_wbuf; + /* whether a copy is made when loaning out a read buffer */ + kstat_named_t xuiostat_rbuf_copied; + kstat_named_t xuiostat_rbuf_nocopy; + /* whether a copy is made when assigning a write buffer */ + kstat_named_t xuiostat_wbuf_copied; + kstat_named_t xuiostat_wbuf_nocopy; +} xuio_stats_t; + +static xuio_stats_t xuio_stats = { + { "onloan_read_buf", KSTAT_DATA_UINT64 }, + { "onloan_write_buf", KSTAT_DATA_UINT64 }, { "read_buf_copied", KSTAT_DATA_UINT64 }, { "read_buf_nocopy", KSTAT_DATA_UINT64 }, { "write_buf_copied", KSTAT_DATA_UINT64 }, @@ -1314,6 +1926,45 @@ xuio_stat_wbuf_nocopy(void) } #ifdef _KERNEL +int +dmu_uio_dnode_rw_direct(dnode_t *dn, uio_t *uio, uint64_t size, + dmu_tx_t *tx, boolean_t read) +{ + const struct iovec *iov = uio->uio_iov; + ulong_t addr = (ulong_t)iov->iov_base; + uint_t numpages; + abd_t *data; + int err; + + ASSERT(size % PAGE_SIZE == 0); + numpages = size / PAGE_SIZE; + struct page **pages = + kmem_alloc(numpages * sizeof (struct page *), KM_SLEEP); + + err = zfs_get_user_pages(addr, numpages, read, pages); + if (err == ENOTSUP) + return (err); + else + ASSERT3U(err, ==, numpages); + + data = abd_get_from_pages(pages, numpages); + + if (read) { + err = dmu_read_abd(dn, uio->uio_loffset, size, + data, DMU_DIRECTIO); + } else { /* write */ + err = dmu_write_abd(dn, uio->uio_loffset, size, + data, DMU_DIRECTIO, tx); + } + abd_put(data); + for (int i = 0; i < numpages; i++) + put_page(pages[i]); + kmem_free(pages, numpages * sizeof (struct page *)); + if (err == 0) + uioskip(uio, size); + return (err); +} + int dmu_read_uio_dnode(dnode_t *dn, uio_t *uio, uint64_t size) { @@ -1323,8 +1974,19 @@ dmu_read_uio_dnode(dnode_t *dn, uio_t *uio, uint64_t size) xuio_t *xuio = NULL; #endif + if (uio->uio_extflg & UIO_DIRECT && + uio->uio_iovcnt == 1 && + uio->uio_loffset % dn->dn_datablksz == 0 && + size % dn->dn_datablksz == 0) { + err = dmu_uio_dnode_rw_direct(dn, uio, size, NULL, B_TRUE); + return (err); + } + if (uio->uio_extflg & UIO_DIRECT) + return (SET_ERROR(ENOTSUP)); + /* * NB: we could do this block-at-a-time, but it's nice + * * to be reading in parallel. */ err = dmu_buf_hold_array_by_dnode(dn, uio->uio_loffset, size, @@ -1388,628 +2050,284 @@ dmu_read_uio_dbuf(dmu_buf_t *zdb, uio_t *uio, uint64_t size) int err; if (size == 0) - return (0); - - DB_DNODE_ENTER(db); - dn = DB_DNODE(db); - err = dmu_read_uio_dnode(dn, uio, size); - DB_DNODE_EXIT(db); - - return (err); -} - -/* - * Read 'size' bytes into the uio buffer. - * From the specified object - * Starting at offset uio->uio_loffset. - */ -int -dmu_read_uio(objset_t *os, uint64_t object, uio_t *uio, uint64_t size) -{ - dnode_t *dn; - int err; - - if (size == 0) - return (0); - - err = dnode_hold(os, object, FTAG, &dn); - if (err) - return (err); - - err = dmu_read_uio_dnode(dn, uio, size); - - dnode_rele(dn, FTAG); - - return (err); -} - -int -dmu_write_uio_dnode(dnode_t *dn, uio_t *uio, uint64_t size, dmu_tx_t *tx) -{ - dmu_buf_t **dbp; - int numbufs; - int err = 0; - int i; - - err = dmu_buf_hold_array_by_dnode(dn, uio->uio_loffset, size, - FALSE, FTAG, &numbufs, &dbp, DMU_READ_PREFETCH); - if (err) - return (err); - - for (i = 0; i < numbufs; i++) { - uint64_t tocpy; - int64_t bufoff; - dmu_buf_t *db = dbp[i]; - - ASSERT(size > 0); - - bufoff = uio->uio_loffset - db->db_offset; - tocpy = MIN(db->db_size - bufoff, size); - - ASSERT(i == 0 || i == numbufs-1 || tocpy == db->db_size); - - if (tocpy == db->db_size) - dmu_buf_will_fill(db, tx); - else - dmu_buf_will_dirty(db, tx); - - /* - * XXX uiomove could block forever (eg.nfs-backed - * pages). There needs to be a uiolockdown() function - * to lock the pages in memory, so that uiomove won't - * block. - */ - err = uiomove((char *)db->db_data + bufoff, tocpy, - UIO_WRITE, uio); - - if (tocpy == db->db_size) - dmu_buf_fill_done(db, tx); - - if (err) - break; - - size -= tocpy; - } - - dmu_buf_rele_array(dbp, numbufs, FTAG); - return (err); -} - -/* - * Write 'size' bytes from the uio buffer. - * To object zdb->db_object. - * Starting at offset uio->uio_loffset. - * - * If the caller already has a dbuf in the target object - * (e.g. its bonus buffer), this routine is faster than dmu_write_uio(), - * because we don't have to find the dnode_t for the object. - */ -int -dmu_write_uio_dbuf(dmu_buf_t *zdb, uio_t *uio, uint64_t size, - dmu_tx_t *tx) -{ - dmu_buf_impl_t *db = (dmu_buf_impl_t *)zdb; - dnode_t *dn; - int err; - - if (size == 0) - return (0); - - DB_DNODE_ENTER(db); - dn = DB_DNODE(db); - err = dmu_write_uio_dnode(dn, uio, size, tx); - DB_DNODE_EXIT(db); - - return (err); -} - -/* - * Write 'size' bytes from the uio buffer. - * To the specified object. - * Starting at offset uio->uio_loffset. - */ -int -dmu_write_uio(objset_t *os, uint64_t object, uio_t *uio, uint64_t size, - dmu_tx_t *tx) -{ - dnode_t *dn; - int err; - - if (size == 0) - return (0); - - err = dnode_hold(os, object, FTAG, &dn); - if (err) - return (err); - - err = dmu_write_uio_dnode(dn, uio, size, tx); - - dnode_rele(dn, FTAG); - - return (err); -} -#endif /* _KERNEL */ - -/* - * Allocate a loaned anonymous arc buffer. - */ -arc_buf_t * -dmu_request_arcbuf(dmu_buf_t *handle, int size) -{ - dmu_buf_impl_t *db = (dmu_buf_impl_t *)handle; - - return (arc_loan_buf(db->db_objset->os_spa, B_FALSE, size)); -} - -/* - * Free a loaned arc buffer. - */ -void -dmu_return_arcbuf(arc_buf_t *buf) -{ - arc_return_buf(buf, FTAG); - arc_buf_destroy(buf, FTAG); -} - -void -dmu_copy_from_buf(objset_t *os, uint64_t object, uint64_t offset, - dmu_buf_t *handle, dmu_tx_t *tx) -{ - dmu_buf_t *dst_handle; - dmu_buf_impl_t *dstdb; - dmu_buf_impl_t *srcdb = (dmu_buf_impl_t *)handle; - dmu_object_type_t type; - arc_buf_t *abuf; - uint64_t datalen; - boolean_t byteorder; - uint8_t salt[ZIO_DATA_SALT_LEN]; - uint8_t iv[ZIO_DATA_IV_LEN]; - uint8_t mac[ZIO_DATA_MAC_LEN]; - - ASSERT3P(srcdb->db_buf, !=, NULL); - - /* hold the db that we want to write to */ - VERIFY0(dmu_buf_hold(os, object, offset, FTAG, &dst_handle, - DMU_READ_NO_DECRYPT)); - dstdb = (dmu_buf_impl_t *)dst_handle; - datalen = arc_buf_size(srcdb->db_buf); - - DB_DNODE_ENTER(dstdb); - type = DB_DNODE(dstdb)->dn_type; - DB_DNODE_EXIT(dstdb); - - /* allocated an arc buffer that matches the type of srcdb->db_buf */ - if (arc_is_encrypted(srcdb->db_buf)) { - arc_get_raw_params(srcdb->db_buf, &byteorder, salt, iv, mac); - abuf = arc_loan_raw_buf(os->os_spa, dmu_objset_id(os), - byteorder, salt, iv, mac, type, - datalen, arc_buf_lsize(srcdb->db_buf), - arc_get_compression(srcdb->db_buf)); - } else { - /* we won't get a compressed db back from dmu_buf_hold() */ - ASSERT3U(arc_get_compression(srcdb->db_buf), - ==, ZIO_COMPRESS_OFF); - abuf = arc_loan_buf(os->os_spa, - DMU_OT_IS_METADATA(type), datalen); - } - - ASSERT3U(datalen, ==, arc_buf_size(abuf)); - - /* copy the data to the new buffer and assign it to the dstdb */ - bcopy(srcdb->db_buf->b_data, abuf->b_data, datalen); - dbuf_assign_arcbuf(dstdb, abuf, tx); - dmu_buf_rele(dst_handle, FTAG); -} - -/* - * When possible directly assign passed loaned arc buffer to a dbuf. - * If this is not possible copy the contents of passed arc buf via - * dmu_write(). - */ -int -dmu_assign_arcbuf_by_dnode(dnode_t *dn, uint64_t offset, arc_buf_t *buf, - dmu_tx_t *tx) -{ - dmu_buf_impl_t *db; - objset_t *os = dn->dn_objset; - uint64_t object = dn->dn_object; - uint32_t blksz = (uint32_t)arc_buf_lsize(buf); - uint64_t blkid; - - rw_enter(&dn->dn_struct_rwlock, RW_READER); - blkid = dbuf_whichblock(dn, 0, offset); - db = dbuf_hold(dn, blkid, FTAG); - if (db == NULL) - return (SET_ERROR(EIO)); - rw_exit(&dn->dn_struct_rwlock); - - /* - * We can only assign if the offset is aligned, the arc buf is the - * same size as the dbuf, and the dbuf is not metadata. - */ - if (offset == db->db.db_offset && blksz == db->db.db_size) { - dbuf_assign_arcbuf(db, buf, tx); - dbuf_rele(db, FTAG); - } else { - /* compressed bufs must always be assignable to their dbuf */ - ASSERT3U(arc_get_compression(buf), ==, ZIO_COMPRESS_OFF); - ASSERT(!(buf->b_flags & ARC_BUF_FLAG_COMPRESSED)); - - dbuf_rele(db, FTAG); - dmu_write(os, object, offset, blksz, buf->b_data, tx); - dmu_return_arcbuf(buf); - XUIOSTAT_BUMP(xuiostat_wbuf_copied); - } + return (0); - return (0); + DB_DNODE_ENTER(db); + dn = DB_DNODE(db); + err = dmu_read_uio_dnode(dn, uio, size); + DB_DNODE_EXIT(db); + + return (err); } +/* + * Read 'size' bytes into the uio buffer. + * From the specified object + * Starting at offset uio->uio_loffset. + */ int -dmu_assign_arcbuf_by_dbuf(dmu_buf_t *handle, uint64_t offset, arc_buf_t *buf, - dmu_tx_t *tx) +dmu_read_uio(objset_t *os, uint64_t object, uio_t *uio, uint64_t size) { + dnode_t *dn; int err; - dmu_buf_impl_t *dbuf = (dmu_buf_impl_t *)handle; - - DB_DNODE_ENTER(dbuf); - err = dmu_assign_arcbuf_by_dnode(DB_DNODE(dbuf), offset, buf, tx); - DB_DNODE_EXIT(dbuf); - return (err); -} + if (size == 0) + return (0); -typedef struct { - dbuf_dirty_record_t *dsa_dr; - dmu_sync_cb_t *dsa_done; - zgd_t *dsa_zgd; - dmu_tx_t *dsa_tx; -} dmu_sync_arg_t; + err = dnode_hold(os, object, FTAG, &dn); + if (err) + return (err); -/* ARGSUSED */ -static void -dmu_sync_ready(zio_t *zio, arc_buf_t *buf, void *varg) -{ - dmu_sync_arg_t *dsa = varg; - dmu_buf_t *db = dsa->dsa_zgd->zgd_db; - blkptr_t *bp = zio->io_bp; + err = dmu_read_uio_dnode(dn, uio, size); - if (zio->io_error == 0) { - if (BP_IS_HOLE(bp)) { - /* - * A block of zeros may compress to a hole, but the - * block size still needs to be known for replay. - */ - BP_SET_LSIZE(bp, db->db_size); - } else if (!BP_IS_EMBEDDED(bp)) { - ASSERT(BP_GET_LEVEL(bp) == 0); - BP_SET_FILL(bp, 1); - } - } -} + dnode_rele(dn, FTAG); -static void -dmu_sync_late_arrival_ready(zio_t *zio) -{ - dmu_sync_ready(zio, NULL, zio->io_private); + return (err); } -/* ARGSUSED */ -static void -dmu_sync_done(zio_t *zio, arc_buf_t *buf, void *varg) +int +dmu_write_uio_dnode(dnode_t *dn, uio_t *uio, uint64_t size, dmu_tx_t *tx) { - dmu_sync_arg_t *dsa = varg; - dbuf_dirty_record_t *dr = dsa->dsa_dr; - dmu_buf_impl_t *db = dr->dr_dbuf; - zgd_t *zgd = dsa->dsa_zgd; + dmu_buf_t **dbp; + int numbufs; + int err = 0; + int i; - /* - * Record the vdev(s) backing this blkptr so they can be flushed after - * the writes for the lwb have completed. - */ - if (zio->io_error == 0) { - zil_lwb_add_block(zgd->zgd_lwb, zgd->zgd_bp); + if (uio->uio_extflg & UIO_DIRECT && + uio->uio_iovcnt == 1 && + uio->uio_loffset % dn->dn_datablksz == 0 && + size % dn->dn_datablksz == 0) { + err = dmu_uio_dnode_rw_direct(dn, uio, size, tx, B_FALSE); + return (err); } + if (uio->uio_extflg & UIO_DIRECT) + return (SET_ERROR(ENOTSUP)); - mutex_enter(&db->db_mtx); - ASSERT(dr->dt.dl.dr_override_state == DR_IN_DMU_SYNC); - if (zio->io_error == 0) { - dr->dt.dl.dr_nopwrite = !!(zio->io_flags & ZIO_FLAG_NOPWRITE); - if (dr->dt.dl.dr_nopwrite) { - blkptr_t *bp = zio->io_bp; - blkptr_t *bp_orig = &zio->io_bp_orig; - uint8_t chksum = BP_GET_CHECKSUM(bp_orig); + err = dmu_buf_hold_array_by_dnode(dn, uio->uio_loffset, size, + FALSE, FTAG, &numbufs, &dbp, DMU_READ_PREFETCH); + if (err) + return (err); - ASSERT(BP_EQUAL(bp, bp_orig)); - VERIFY(BP_EQUAL(bp, db->db_blkptr)); - ASSERT(zio->io_prop.zp_compress != ZIO_COMPRESS_OFF); - VERIFY(zio_checksum_table[chksum].ci_flags & - ZCHECKSUM_FLAG_NOPWRITE); - } - dr->dt.dl.dr_overridden_by = *zio->io_bp; - dr->dt.dl.dr_override_state = DR_OVERRIDDEN; - dr->dt.dl.dr_copies = zio->io_prop.zp_copies; + for (i = 0; i < numbufs; i++) { + uint64_t tocpy; + int64_t bufoff; + dmu_buf_t *db = dbp[i]; - /* - * Old style holes are filled with all zeros, whereas - * new-style holes maintain their lsize, type, level, - * and birth time (see zio_write_compress). While we - * need to reset the BP_SET_LSIZE() call that happened - * in dmu_sync_ready for old style holes, we do *not* - * want to wipe out the information contained in new - * style holes. Thus, only zero out the block pointer if - * it's an old style hole. - */ - if (BP_IS_HOLE(&dr->dt.dl.dr_overridden_by) && - dr->dt.dl.dr_overridden_by.blk_birth == 0) - BP_ZERO(&dr->dt.dl.dr_overridden_by); - } else { - dr->dt.dl.dr_override_state = DR_NOT_OVERRIDDEN; - } - cv_broadcast(&db->db_changed); - mutex_exit(&db->db_mtx); + ASSERT(size > 0); - dsa->dsa_done(dsa->dsa_zgd, zio->io_error); + bufoff = uio->uio_loffset - db->db_offset; + tocpy = MIN(db->db_size - bufoff, size); - kmem_free(dsa, sizeof (*dsa)); -} + ASSERT(i == 0 || i == numbufs-1 || tocpy == db->db_size); -static void -dmu_sync_late_arrival_done(zio_t *zio) -{ - blkptr_t *bp = zio->io_bp; - dmu_sync_arg_t *dsa = zio->io_private; - zgd_t *zgd = dsa->dsa_zgd; + if (tocpy == db->db_size) + dmu_buf_will_fill(db, tx); + else + dmu_buf_will_dirty(db, tx); - if (zio->io_error == 0) { /* - * Record the vdev(s) backing this blkptr so they can be - * flushed after the writes for the lwb have completed. + * XXX uiomove could block forever (eg.nfs-backed + * pages). There needs to be a uiolockdown() function + * to lock the pages in memory, so that uiomove won't + * block. */ - zil_lwb_add_block(zgd->zgd_lwb, zgd->zgd_bp); + err = uiomove((char *)db->db_data + bufoff, tocpy, + UIO_WRITE, uio); - if (!BP_IS_HOLE(bp)) { - ASSERTV(blkptr_t *bp_orig = &zio->io_bp_orig); - ASSERT(!(zio->io_flags & ZIO_FLAG_NOPWRITE)); - ASSERT(BP_IS_HOLE(bp_orig) || !BP_EQUAL(bp, bp_orig)); - ASSERT(zio->io_bp->blk_birth == zio->io_txg); - ASSERT(zio->io_txg > spa_syncing_txg(zio->io_spa)); - zio_free(zio->io_spa, zio->io_txg, zio->io_bp); - } - } + if (tocpy == db->db_size) + dmu_buf_fill_done(db, tx); - dmu_tx_commit(dsa->dsa_tx); + if (err) + break; - dsa->dsa_done(dsa->dsa_zgd, zio->io_error); + size -= tocpy; + } - abd_put(zio->io_abd); - kmem_free(dsa, sizeof (*dsa)); + dmu_buf_rele_array(dbp, numbufs, FTAG); + return (err); } -static int -dmu_sync_late_arrival(zio_t *pio, objset_t *os, dmu_sync_cb_t *done, zgd_t *zgd, - zio_prop_t *zp, zbookmark_phys_t *zb) +/* + * Write 'size' bytes from the uio buffer. + * To object zdb->db_object. + * Starting at offset uio->uio_loffset. + * + * If the caller already has a dbuf in the target object + * (e.g. its bonus buffer), this routine is faster than dmu_write_uio(), + * because we don't have to find the dnode_t for the object. + */ +int +dmu_write_uio_dbuf(dmu_buf_t *zdb, uio_t *uio, uint64_t size, + dmu_tx_t *tx) { - dmu_sync_arg_t *dsa; - dmu_tx_t *tx; + dmu_buf_impl_t *db = (dmu_buf_impl_t *)zdb; + dnode_t *dn; + int err; - tx = dmu_tx_create(os); - dmu_tx_hold_space(tx, zgd->zgd_db->db_size); - if (dmu_tx_assign(tx, TXG_WAIT) != 0) { - dmu_tx_abort(tx); - /* Make zl_get_data do txg_waited_synced() */ - return (SET_ERROR(EIO)); - } + if (size == 0) + return (0); - /* - * In order to prevent the zgd's lwb from being free'd prior to - * dmu_sync_late_arrival_done() being called, we have to ensure - * the lwb's "max txg" takes this tx's txg into account. - */ - zil_lwb_add_txg(zgd->zgd_lwb, dmu_tx_get_txg(tx)); + DB_DNODE_ENTER(db); + dn = DB_DNODE(db); + err = dmu_write_uio_dnode(dn, uio, size, tx); + DB_DNODE_EXIT(db); - dsa = kmem_alloc(sizeof (dmu_sync_arg_t), KM_SLEEP); - dsa->dsa_dr = NULL; - dsa->dsa_done = done; - dsa->dsa_zgd = zgd; - dsa->dsa_tx = tx; + return (err); +} - /* - * Since we are currently syncing this txg, it's nontrivial to - * determine what BP to nopwrite against, so we disable nopwrite. - * - * When syncing, the db_blkptr is initially the BP of the previous - * txg. We can not nopwrite against it because it will be changed - * (this is similar to the non-late-arrival case where the dbuf is - * dirty in a future txg). - * - * Then dbuf_write_ready() sets bp_blkptr to the location we will write. - * We can not nopwrite against it because although the BP will not - * (typically) be changed, the data has not yet been persisted to this - * location. - * - * Finally, when dbuf_write_done() is called, it is theoretically - * possible to always nopwrite, because the data that was written in - * this txg is the same data that we are trying to write. However we - * would need to check that this dbuf is not dirty in any future - * txg's (as we do in the normal dmu_sync() path). For simplicity, we - * don't nopwrite in this case. - */ - zp->zp_nopwrite = B_FALSE; +/* + * Write 'size' bytes from the uio buffer. + * To the specified object. + * Starting at offset uio->uio_loffset. + */ +int +dmu_write_uio(objset_t *os, uint64_t object, uio_t *uio, uint64_t size, + dmu_tx_t *tx) +{ + dnode_t *dn; + int err; + + if (size == 0) + return (0); + + err = dnode_hold(os, object, FTAG, &dn); + if (err) + return (err); - zio_nowait(zio_write(pio, os->os_spa, dmu_tx_get_txg(tx), zgd->zgd_bp, - abd_get_from_buf(zgd->zgd_db->db_data, zgd->zgd_db->db_size), - zgd->zgd_db->db_size, zgd->zgd_db->db_size, zp, - dmu_sync_late_arrival_ready, NULL, NULL, dmu_sync_late_arrival_done, - dsa, ZIO_PRIORITY_SYNC_WRITE, ZIO_FLAG_CANFAIL, zb)); + err = dmu_write_uio_dnode(dn, uio, size, tx); - return (0); + dnode_rele(dn, FTAG); + + return (err); } +#endif /* _KERNEL */ /* - * Intent log support: sync the block associated with db to disk. - * N.B. and XXX: the caller is responsible for making sure that the - * data isn't changing while dmu_sync() is writing it. - * - * Return values: - * - * EEXIST: this txg has already been synced, so there's nothing to do. - * The caller should not log the write. - * - * ENOENT: the block was dbuf_free_range()'d, so there's nothing to do. - * The caller should not log the write. - * - * EALREADY: this block is already in the process of being synced. - * The caller should track its progress (somehow). - * - * EIO: could not do the I/O. - * The caller should do a txg_wait_synced(). - * - * 0: the I/O has been initiated. - * The caller should log this blkptr in the done callback. - * It is possible that the I/O will fail, in which case - * the error will be reported to the done callback and - * propagated to pio from zio_done(). + * Allocate a loaned anonymous arc buffer. */ -int -dmu_sync(zio_t *pio, uint64_t txg, dmu_sync_cb_t *done, zgd_t *zgd) +arc_buf_t * +dmu_request_arcbuf(dmu_buf_t *handle, int size) { - dmu_buf_impl_t *db = (dmu_buf_impl_t *)zgd->zgd_db; - objset_t *os = db->db_objset; - dsl_dataset_t *ds = os->os_dsl_dataset; - dbuf_dirty_record_t *dr; - dmu_sync_arg_t *dsa; - zbookmark_phys_t zb; - zio_prop_t zp; - dnode_t *dn; + dmu_buf_impl_t *db = (dmu_buf_impl_t *)handle; - ASSERT(pio != NULL); - ASSERT(txg != 0); + return (arc_loan_buf(db->db_objset->os_spa, B_FALSE, size)); +} - SET_BOOKMARK(&zb, ds->ds_object, - db->db.db_object, db->db_level, db->db_blkid); +/* + * Free a loaned arc buffer. + */ +void +dmu_return_arcbuf(arc_buf_t *buf) +{ + arc_return_buf(buf, FTAG); + arc_buf_destroy(buf, FTAG); +} - DB_DNODE_ENTER(db); - dn = DB_DNODE(db); - dmu_write_policy(os, dn, db->db_level, WP_DMU_SYNC, &zp); - DB_DNODE_EXIT(db); +void +dmu_copy_from_buf(objset_t *os, uint64_t object, uint64_t offset, + dmu_buf_t *handle, dmu_tx_t *tx) +{ + dmu_buf_t *dst_handle; + dmu_buf_impl_t *dstdb; + dmu_buf_impl_t *srcdb = (dmu_buf_impl_t *)handle; + dmu_object_type_t type; + arc_buf_t *abuf; + uint64_t datalen; + boolean_t byteorder; + uint8_t salt[ZIO_DATA_SALT_LEN]; + uint8_t iv[ZIO_DATA_IV_LEN]; + uint8_t mac[ZIO_DATA_MAC_LEN]; - /* - * If we're frozen (running ziltest), we always need to generate a bp. - */ - if (txg > spa_freeze_txg(os->os_spa)) - return (dmu_sync_late_arrival(pio, os, done, zgd, &zp, &zb)); + ASSERT3P(srcdb->db_buf, !=, NULL); - /* - * Grabbing db_mtx now provides a barrier between dbuf_sync_leaf() - * and us. If we determine that this txg is not yet syncing, - * but it begins to sync a moment later, that's OK because the - * sync thread will block in dbuf_sync_leaf() until we drop db_mtx. - */ - mutex_enter(&db->db_mtx); + /* hold the db that we want to write to */ + VERIFY0(dmu_buf_hold(os, object, offset, FTAG, &dst_handle, + DMU_READ_NO_DECRYPT)); + dstdb = (dmu_buf_impl_t *)dst_handle; + datalen = arc_buf_size(srcdb->db_buf); - if (txg <= spa_last_synced_txg(os->os_spa)) { - /* - * This txg has already synced. There's nothing to do. - */ - mutex_exit(&db->db_mtx); - return (SET_ERROR(EEXIST)); - } + DB_DNODE_ENTER(dstdb); + type = DB_DNODE(dstdb)->dn_type; + DB_DNODE_EXIT(dstdb); - if (txg <= spa_syncing_txg(os->os_spa)) { - /* - * This txg is currently syncing, so we can't mess with - * the dirty record anymore; just write a new log block. - */ - mutex_exit(&db->db_mtx); - return (dmu_sync_late_arrival(pio, os, done, zgd, &zp, &zb)); + /* allocated an arc buffer that matches the type of srcdb->db_buf */ + if (arc_is_encrypted(srcdb->db_buf)) { + arc_get_raw_params(srcdb->db_buf, &byteorder, salt, iv, mac); + abuf = arc_loan_raw_buf(os->os_spa, dmu_objset_id(os), + byteorder, salt, iv, mac, type, + datalen, arc_buf_lsize(srcdb->db_buf), + arc_get_compression(srcdb->db_buf)); + } else { + /* we won't get a compressed db back from dmu_buf_hold() */ + ASSERT3U(arc_get_compression(srcdb->db_buf), + ==, ZIO_COMPRESS_OFF); + abuf = arc_loan_buf(os->os_spa, + DMU_OT_IS_METADATA(type), datalen); } - dr = db->db_last_dirty; - while (dr && dr->dr_txg != txg) - dr = dr->dr_next; + ASSERT3U(datalen, ==, arc_buf_size(abuf)); - if (dr == NULL) { - /* - * There's no dr for this dbuf, so it must have been freed. - * There's no need to log writes to freed blocks, so we're done. - */ - mutex_exit(&db->db_mtx); - return (SET_ERROR(ENOENT)); - } + /* copy the data to the new buffer and assign it to the dstdb */ + bcopy(srcdb->db_buf->b_data, abuf->b_data, datalen); + dbuf_assign_arcbuf(dstdb, abuf, tx); + dmu_buf_rele(dst_handle, FTAG); +} - ASSERT(dr->dr_next == NULL || dr->dr_next->dr_txg < txg); +/* + * When possible directly assign passed loaned arc buffer to a dbuf. + * If this is not possible copy the contents of passed arc buf via + * dmu_write(). + */ +int +dmu_assign_arcbuf_by_dnode(dnode_t *dn, uint64_t offset, arc_buf_t *buf, + dmu_tx_t *tx) +{ + dmu_buf_impl_t *db; + objset_t *os = dn->dn_objset; + uint64_t object = dn->dn_object; + uint32_t blksz = (uint32_t)arc_buf_lsize(buf); + uint64_t blkid; - if (db->db_blkptr != NULL) { - /* - * We need to fill in zgd_bp with the current blkptr so that - * the nopwrite code can check if we're writing the same - * data that's already on disk. We can only nopwrite if we - * are sure that after making the copy, db_blkptr will not - * change until our i/o completes. We ensure this by - * holding the db_mtx, and only allowing nopwrite if the - * block is not already dirty (see below). This is verified - * by dmu_sync_done(), which VERIFYs that the db_blkptr has - * not changed. - */ - *zgd->zgd_bp = *db->db_blkptr; - } + rw_enter(&dn->dn_struct_rwlock, RW_READER); + blkid = dbuf_whichblock(dn, 0, offset); + db = dbuf_hold(dn, blkid, FTAG); + if (db == NULL) + return (SET_ERROR(EIO)); + rw_exit(&dn->dn_struct_rwlock); /* - * Assume the on-disk data is X, the current syncing data (in - * txg - 1) is Y, and the current in-memory data is Z (currently - * in dmu_sync). - * - * We usually want to perform a nopwrite if X and Z are the - * same. However, if Y is different (i.e. the BP is going to - * change before this write takes effect), then a nopwrite will - * be incorrect - we would override with X, which could have - * been freed when Y was written. - * - * (Note that this is not a concern when we are nop-writing from - * syncing context, because X and Y must be identical, because - * all previous txgs have been synced.) - * - * Therefore, we disable nopwrite if the current BP could change - * before this TXG. There are two ways it could change: by - * being dirty (dr_next is non-NULL), or by being freed - * (dnode_block_freed()). This behavior is verified by - * zio_done(), which VERIFYs that the override BP is identical - * to the on-disk BP. + * We can only assign if the offset is aligned, the arc buf is the + * same size as the dbuf, and the dbuf is not metadata. */ - DB_DNODE_ENTER(db); - dn = DB_DNODE(db); - if (dr->dr_next != NULL || dnode_block_freed(dn, db->db_blkid)) - zp.zp_nopwrite = B_FALSE; - DB_DNODE_EXIT(db); + if (offset == db->db.db_offset && blksz == db->db.db_size) { + dbuf_assign_arcbuf(db, buf, tx); + dbuf_rele(db, FTAG); + } else { + /* compressed bufs must always be assignable to their dbuf */ + ASSERT3U(arc_get_compression(buf), ==, ZIO_COMPRESS_OFF); + ASSERT(!(buf->b_flags & ARC_BUF_FLAG_COMPRESSED)); - ASSERT(dr->dr_txg == txg); - if (dr->dt.dl.dr_override_state == DR_IN_DMU_SYNC || - dr->dt.dl.dr_override_state == DR_OVERRIDDEN) { - /* - * We have already issued a sync write for this buffer, - * or this buffer has already been synced. It could not - * have been dirtied since, or we would have cleared the state. - */ - mutex_exit(&db->db_mtx); - return (SET_ERROR(EALREADY)); + dbuf_rele(db, FTAG); + dmu_write(os, object, offset, blksz, buf->b_data, tx); + dmu_return_arcbuf(buf); + XUIOSTAT_BUMP(xuiostat_wbuf_copied); } - ASSERT(dr->dt.dl.dr_override_state == DR_NOT_OVERRIDDEN); - dr->dt.dl.dr_override_state = DR_IN_DMU_SYNC; - mutex_exit(&db->db_mtx); + return (0); +} - dsa = kmem_alloc(sizeof (dmu_sync_arg_t), KM_SLEEP); - dsa->dsa_dr = dr; - dsa->dsa_done = done; - dsa->dsa_zgd = zgd; - dsa->dsa_tx = NULL; +int +dmu_assign_arcbuf_by_dbuf(dmu_buf_t *handle, uint64_t offset, arc_buf_t *buf, + dmu_tx_t *tx) +{ + int err; + dmu_buf_impl_t *dbuf = (dmu_buf_impl_t *)handle; - zio_nowait(arc_write(pio, os->os_spa, txg, - zgd->zgd_bp, dr->dt.dl.dr_data, DBUF_IS_L2CACHEABLE(db), - &zp, dmu_sync_ready, NULL, NULL, dmu_sync_done, dsa, - ZIO_PRIORITY_SYNC_WRITE, ZIO_FLAG_CANFAIL, &zb)); + DB_DNODE_ENTER(dbuf); + err = dmu_assign_arcbuf_by_dnode(DB_DNODE(dbuf), offset, buf, tx); + DB_DNODE_EXIT(dbuf); - return (0); + return (err); } int diff --git a/module/zfs/zfs_vnops.c b/module/zfs/zfs_vnops.c index de7b59935e8c..b7cb67afce64 100644 --- a/module/zfs/zfs_vnops.c +++ b/module/zfs/zfs_vnops.c @@ -658,7 +658,7 @@ zfs_write(struct inode *ip, uio_t *uio, int ioflag, cred_t *cr) xuio = (xuio_t *)uio; else #endif - if (uio_prefaultpages(MIN(n, max_blksz), uio)) { + if (uio_prefaultpages(n, uio)) { ZFS_EXIT(zfsvfs); return (SET_ERROR(EFAULT)); } @@ -748,7 +748,7 @@ zfs_write(struct inode *ip, uio_t *uio, int ioflag, cred_t *cr) #endif } else if (n >= max_blksz && woff >= zp->z_size && P2PHASE(woff, max_blksz) == 0 && - zp->z_blksz == max_blksz) { + zp->z_blksz == max_blksz && !(ioflag & O_DIRECT)) { /* * This write covers a full block. "Borrow" a buffer * from the dmu so that we can fill it before we enter @@ -762,10 +762,13 @@ zfs_write(struct inode *ip, uio_t *uio, int ioflag, cred_t *cr) max_blksz); ASSERT(abuf != NULL); ASSERT(arc_buf_size(abuf) == max_blksz); - if ((error = uiocopy(abuf->b_data, max_blksz, + while ((error = uiocopy(abuf->b_data, max_blksz, UIO_WRITE, uio, &cbytes))) { - dmu_return_arcbuf(abuf); - break; + if (error != EFAULT || + uio_prefaultpages(max_blksz, uio)) { + dmu_return_arcbuf(abuf); + break; + } } ASSERT(cbytes == max_blksz); } @@ -822,7 +825,14 @@ zfs_write(struct inode *ip, uio_t *uio, int ioflag, cred_t *cr) ssize_t tx_bytes; if (abuf == NULL) { + if (ioflag & O_DIRECT) + uio->uio_extflg |= UIO_DIRECT; + tx_bytes = uio->uio_resid; + /* + * Needed to resolve a deadlock which could occur when + * handlingna page fault in zfs_write + */ uio->uio_fault_disable = B_TRUE; error = dmu_write_uio_dbuf(sa_get_db(zp->z_sa_hdl), uio, nbytes, tx); From efdf303913f468866ea7676f0f71ccdcf829f294 Mon Sep 17 00:00:00 2001 From: Brian Atkinson Date: Tue, 3 Sep 2019 09:47:44 -0600 Subject: [PATCH 65/68] I added back the check in zfs_vnops to add the correct flag for Direct IO for reads. --- module/zfs/zfs_vnops.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/module/zfs/zfs_vnops.c b/module/zfs/zfs_vnops.c index b7cb67afce64..c8b60e8afb99 100644 --- a/module/zfs/zfs_vnops.c +++ b/module/zfs/zfs_vnops.c @@ -532,6 +532,9 @@ zfs_read(struct inode *ip, uio_t *uio, int ioflag, cred_t *cr) } } #endif /* HAVE_UIO_ZEROCOPY */ + + if (ioflag & O_DIRECT) + uio->uio_extflg |= UIO_DIRECT; while (n > 0) { ssize_t nbytes = MIN(n, zfs_read_chunk_size - From 21db0dfb236a302fd0a1d083a14b5afa171852ee Mon Sep 17 00:00:00 2001 From: Brian Atkinson Date: Wed, 4 Sep 2019 15:51:12 -0600 Subject: [PATCH 66/68] Fixed memory leak that was created in abd_get_from_pages(). The struct sg_table that was created using sg_alloc_table_from_pages was never free'd using sg_free_table(). I rearranged abd.c to accommodate this change as well as updated the calls into the dmu layer to call the proper abd functions. --- include/sys/abd.h | 13 +++- module/zfs/abd.c | 132 +++++++++++++++++++++++++++++++++++------ module/zfs/dmu.c | 10 +++- module/zfs/zfs_vnops.c | 3 + 4 files changed, 137 insertions(+), 21 deletions(-) diff --git a/include/sys/abd.h b/include/sys/abd.h index c689386d85ab..1f0d11b98d84 100644 --- a/include/sys/abd.h +++ b/include/sys/abd.h @@ -39,6 +39,10 @@ extern "C" { #endif +#ifndef _KERNEL +struct page; /* forward declaration to be used in abd.c */ +#endif + typedef enum abd_flags { ABD_FLAG_LINEAR = 1 << 0, /* is buffer linear (or scattered)? */ ABD_FLAG_OWNER = 1 << 1, /* does it own its data buffers? */ @@ -46,6 +50,7 @@ typedef enum abd_flags { ABD_FLAG_MULTI_ZONE = 1 << 3, /* pages split over memory zones */ ABD_FLAG_MULTI_CHUNK = 1 << 4, /* pages split over multiple chunks */ ABD_FLAG_LINEAR_PAGE = 1 << 5, /* linear but allocd from page */ + ABD_FLAG_DIO_PAGE = 1 << 6, /* pages mapped/pinned from user space */ } abd_flags_t; typedef struct abd { @@ -84,6 +89,12 @@ abd_is_linear_page(abd_t *abd) B_TRUE : B_FALSE); } +static inline boolean_t +abd_has_directio_pages(abd_t *abd) +{ + return ((abd->abd_flags & ABD_FLAG_DIO_PAGE) != 0 ? B_TRUE : B_FALSE); +} + /* * Allocations and deallocations */ @@ -96,9 +107,7 @@ void abd_free(abd_t *); abd_t *abd_get_offset(abd_t *, size_t); abd_t *abd_get_offset_size(abd_t *, size_t, size_t); abd_t *abd_get_from_buf(void *, size_t); -#ifdef _KERNEL abd_t *abd_get_from_pages(struct page **, uint_t); -#endif void abd_put(abd_t *); /* diff --git a/module/zfs/abd.c b/module/zfs/abd.c index 234fd569d185..e54492464364 100644 --- a/module/zfs/abd.c +++ b/module/zfs/abd.c @@ -441,11 +441,24 @@ abd_alloc_pages(abd_t *abd, size_t size) } #endif /* !CONFIG_HIGHMEM */ +/* + * This must be called if any of the sg_table allocation fuctions + * are called + */ +static void +abd_free_sg_table(abd_t *abd) +{ + struct sg_table table; + + table.sgl = ABD_SCATTER(abd).abd_sgl; + table.nents = table.orig_nents = ABD_SCATTER(abd).abd_nents; + sg_free_table(&table); +} + static void abd_free_pages(abd_t *abd) { struct scatterlist *sg = NULL; - struct sg_table table; struct page *page; int nr_pages = ABD_SCATTER(abd).abd_nents; int order, i = 0; @@ -464,10 +477,7 @@ abd_free_pages(abd_t *abd) ASSERT3U(sg->length, <=, PAGE_SIZE << order); ABDSTAT_BUMPDOWN(abdstat_scatter_orders[order]); } - - table.sgl = ABD_SCATTER(abd).abd_sgl; - table.nents = table.orig_nents = nr_pages; - sg_free_table(&table); + abd_free_sg_table(abd); } #else /* _KERNEL */ @@ -476,8 +486,6 @@ abd_free_pages(abd_t *abd) #define PAGE_SHIFT (highbit64(PAGESIZE)-1) #endif -struct page; - #define zfs_kmap_atomic(chunk, km) ((void *)chunk) #define zfs_kunmap_atomic(addr, km) do { (void)(addr); } while (0) #define local_irq_save(flags) do { (void)(flags); } while (0) @@ -498,6 +506,19 @@ sg_init_table(struct scatterlist *sg, int nr) sg[nr - 1].end = 1; } +/* + * This must be called if any of the sg_table allocation fuctions + * are called + */ +static void +abd_free_sg_table(abd_t *abd) +{ + int nents = ABD_SCATTER(abd).abd_nents; + vmem_free(ABD_SCATTER(abd).abd_sgl, + nents * sizeof (struct scatterlist)); +} + + #define for_each_sg(sgl, sg, nr, i) \ for ((i) = 0, (sg) = (sgl); (i) < (nr); (i)++, (sg) = sg_next(sg)) @@ -557,7 +578,7 @@ abd_free_pages(abd_t *abd) } } - vmem_free(ABD_SCATTER(abd).abd_sgl, n * sizeof (struct scatterlist)); + abd_free_sg_table(abd); } #endif /* _KERNEL */ @@ -606,7 +627,7 @@ abd_verify(abd_t *abd) ASSERT3U(abd->abd_size, <=, SPA_MAXBLOCKSIZE); ASSERT3U(abd->abd_flags, ==, abd->abd_flags & (ABD_FLAG_LINEAR | ABD_FLAG_OWNER | ABD_FLAG_META | ABD_FLAG_MULTI_ZONE | - ABD_FLAG_MULTI_CHUNK | ABD_FLAG_LINEAR_PAGE)); + ABD_FLAG_MULTI_CHUNK | ABD_FLAG_LINEAR_PAGE | ABD_FLAG_DIO_PAGE)); IMPLY(abd->abd_parent != NULL, !(abd->abd_flags & ABD_FLAG_OWNER)); IMPLY(abd->abd_flags & ABD_FLAG_META, abd->abd_flags & ABD_FLAG_OWNER); if (abd_is_linear(abd)) { @@ -677,6 +698,32 @@ abd_alloc(size_t size, boolean_t is_metadata) return (abd); } +/* + * This is to be called only with abd_get_from_pages() + */ +static void +abd_free_from_pages(abd_t *abd) +{ + if (abd->abd_flags & ABD_FLAG_MULTI_CHUNK) + ABDSTAT_BUMPDOWN(abdstat_scatter_page_multi_chunk); + + /* + * If the abd buffer was used for Direct IO, we must make sure + * sg_table is freed + */ + abd_free_sg_table(abd); + + ASSERT(!(abd->abd_flags & ABD_FLAG_OWNER)); + + if (abd->abd_parent != NULL) { + (void) zfs_refcount_remove_many(&abd->abd_parent->abd_children, + abd->abd_size, abd); + } + + zfs_refcount_destroy(&abd->abd_children); + abd_free_struct(abd); +} + static void abd_free_scatter(abd_t *abd) { @@ -751,19 +798,23 @@ abd_free_linear(abd_t *abd) } /* - * Free an ABD. Only use this on ABDs allocated with abd_alloc() or - * abd_alloc_linear(). + * Free an ABD. Only use this on ABDs allocated with abd_alloc(), + * abd_alloc_linear(), or abd_get_from_pages(). */ void abd_free(abd_t *abd) { abd_verify(abd); ASSERT3P(abd->abd_parent, ==, NULL); - ASSERT(abd->abd_flags & ABD_FLAG_OWNER); - if (abd_is_linear(abd)) - abd_free_linear(abd); - else - abd_free_scatter(abd); + if (abd_has_directio_pages(abd)) { + abd_free_from_pages(abd); + } else { + ASSERT(abd->abd_flags & ABD_FLAG_OWNER); + if (abd_is_linear(abd)) + abd_free_linear(abd); + else + abd_free_scatter(abd); + } } /* @@ -909,7 +960,7 @@ abd_get_from_buf(void *buf, size_t size) #ifdef _KERNEL /* * Allocate a scatter gather ABD structure for pages. You must free this - * with abd_put() since the resulting ABD doesn't own its pages. + * with abd_free(). */ abd_t * abd_get_from_pages(struct page **pages, uint_t n_pages) @@ -926,8 +977,12 @@ abd_get_from_pages(struct page **pages, uint_t n_pages) * Even if this buf is filesystem metadata, we only track that if we * own the underlying data buffer, which is not true in this case. * Therefore, we don't ever use ABD_FLAG_META here. + * + * Currently, the only consumer of this function is Direct IO + * read/write, so we will add the flag ABD_FLAG_DIO_PAGE. */ abd->abd_flags = 0; + abd->abd_flags |= ABD_FLAG_DIO_PAGE; abd->abd_size = size; abd->abd_parent = NULL; zfs_refcount_create(&abd->abd_children); @@ -951,6 +1006,49 @@ abd_get_from_pages(struct page **pages, uint_t n_pages) abd_verify(abd); return (abd); } + +#else /* _KERNEL */ + +abd_t * +abd_get_from_pages(struct page **pages, uint_t n_pages) +{ + abd_t *abd = abd_alloc_struct(); + struct scatterlist *sg; + size_t size = n_pages * PAGESIZE; + int i; + + /* + * Even if this buf is filesystem metadata, we only track that if we + * own the underlying data buffer, which is not true in this case. + * Therefore, we don't ever use ABD_FLAG_META here. + * + * Currently, the only consumer of this function is Direct IO + * read/write, so we will add the flag ABD_FLAG_DIO_PAGE. + */ + abd->abd_flags = 0; + abd->abd_flags |= ABD_FLAG_DIO_PAGE; + abd->abd_size = size; + abd->abd_parent = NULL; + zfs_refcount_create(&abd->abd_children); + + ABD_SCATTER(abd).abd_sgl = vmem_alloc(n_pages * + sizeof (struct scatterlist), KM_SLEEP); + sg_init_table(ABD_SCATTER(abd).abd_sgl, n_pages); + + abd_for_each_sg(abd, sg, n_pages, i) { + sg_set_page(sg, pages[i], PAGESIZE, 0); + } + ABD_SCATTER(abd).abd_nents = n_pages; + ABD_SCATTER(abd).abd_offset = 0; + + if (ABD_SCATTER(abd).abd_nents > 1) { + ABDSTAT_BUMP(abdstat_scatter_page_multi_chunk); + abd->abd_flags |= ABD_FLAG_MULTI_CHUNK; + } + + abd_verify(abd); + return (abd); +} #endif /* _KERNEL */ /* diff --git a/module/zfs/dmu.c b/module/zfs/dmu.c index 3bc3fa4447f8..b43d1851e480 100644 --- a/module/zfs/dmu.c +++ b/module/zfs/dmu.c @@ -1956,9 +1956,15 @@ dmu_uio_dnode_rw_direct(dnode_t *dn, uio_t *uio, uint64_t size, err = dmu_write_abd(dn, uio->uio_loffset, size, data, DMU_DIRECTIO, tx); } - abd_put(data); - for (int i = 0; i < numpages; i++) + + abd_free(data); + + for (int i = 0; i < numpages; i++) { + if (read) + set_page_dirty_lock(pages[i]); put_page(pages[i]); + } + kmem_free(pages, numpages * sizeof (struct page *)); if (err == 0) uioskip(uio, size); diff --git a/module/zfs/zfs_vnops.c b/module/zfs/zfs_vnops.c index b7cb67afce64..5adda1817e7d 100644 --- a/module/zfs/zfs_vnops.c +++ b/module/zfs/zfs_vnops.c @@ -533,6 +533,9 @@ zfs_read(struct inode *ip, uio_t *uio, int ioflag, cred_t *cr) } #endif /* HAVE_UIO_ZEROCOPY */ + if (ioflag & O_DIRECT) + uio->uio_extflg |= UIO_DIRECT; + while (n > 0) { ssize_t nbytes = MIN(n, zfs_read_chunk_size - P2PHASE(uio->uio_loffset, zfs_read_chunk_size)); From 2137ae65eabd73977c76835cdeb7d58e978bfd50 Mon Sep 17 00:00:00 2001 From: Brian Atkinson Date: Mon, 9 Sep 2019 11:10:51 -0600 Subject: [PATCH 67/68] I removed the user space implementation of abd_get_from_pages. I also updated the abd code so now abd_put is only used to free up the sg_table instead of having to call abd_free. --- include/sys/abd.h | 6 +-- module/zfs/abd.c | 103 +++++++++-------------------------------- module/zfs/dmu.c | 2 +- module/zfs/zfs_vnops.c | 2 +- 4 files changed, 25 insertions(+), 88 deletions(-) diff --git a/include/sys/abd.h b/include/sys/abd.h index 1f0d11b98d84..76e5394ceb2f 100644 --- a/include/sys/abd.h +++ b/include/sys/abd.h @@ -39,10 +39,6 @@ extern "C" { #endif -#ifndef _KERNEL -struct page; /* forward declaration to be used in abd.c */ -#endif - typedef enum abd_flags { ABD_FLAG_LINEAR = 1 << 0, /* is buffer linear (or scattered)? */ ABD_FLAG_OWNER = 1 << 1, /* does it own its data buffers? */ @@ -107,7 +103,9 @@ void abd_free(abd_t *); abd_t *abd_get_offset(abd_t *, size_t); abd_t *abd_get_offset_size(abd_t *, size_t, size_t); abd_t *abd_get_from_buf(void *, size_t); +#ifdef _KERNEL abd_t *abd_get_from_pages(struct page **, uint_t); +#endif void abd_put(abd_t *); /* diff --git a/module/zfs/abd.c b/module/zfs/abd.c index e54492464364..6b1325f59a97 100644 --- a/module/zfs/abd.c +++ b/module/zfs/abd.c @@ -486,6 +486,8 @@ abd_free_pages(abd_t *abd) #define PAGE_SHIFT (highbit64(PAGESIZE)-1) #endif +struct page; + #define zfs_kmap_atomic(chunk, km) ((void *)chunk) #define zfs_kunmap_atomic(addr, km) do { (void)(addr); } while (0) #define local_irq_save(flags) do { (void)(flags); } while (0) @@ -698,32 +700,6 @@ abd_alloc(size_t size, boolean_t is_metadata) return (abd); } -/* - * This is to be called only with abd_get_from_pages() - */ -static void -abd_free_from_pages(abd_t *abd) -{ - if (abd->abd_flags & ABD_FLAG_MULTI_CHUNK) - ABDSTAT_BUMPDOWN(abdstat_scatter_page_multi_chunk); - - /* - * If the abd buffer was used for Direct IO, we must make sure - * sg_table is freed - */ - abd_free_sg_table(abd); - - ASSERT(!(abd->abd_flags & ABD_FLAG_OWNER)); - - if (abd->abd_parent != NULL) { - (void) zfs_refcount_remove_many(&abd->abd_parent->abd_children, - abd->abd_size, abd); - } - - zfs_refcount_destroy(&abd->abd_children); - abd_free_struct(abd); -} - static void abd_free_scatter(abd_t *abd) { @@ -799,22 +775,18 @@ abd_free_linear(abd_t *abd) /* * Free an ABD. Only use this on ABDs allocated with abd_alloc(), - * abd_alloc_linear(), or abd_get_from_pages(). + * abd_alloc_linear(). */ void abd_free(abd_t *abd) { abd_verify(abd); ASSERT3P(abd->abd_parent, ==, NULL); - if (abd_has_directio_pages(abd)) { - abd_free_from_pages(abd); - } else { - ASSERT(abd->abd_flags & ABD_FLAG_OWNER); - if (abd_is_linear(abd)) - abd_free_linear(abd); - else - abd_free_scatter(abd); - } + ASSERT(abd->abd_flags & ABD_FLAG_OWNER); + if (abd_is_linear(abd)) + abd_free_linear(abd); + else + abd_free_scatter(abd); } /* @@ -960,7 +932,7 @@ abd_get_from_buf(void *buf, size_t size) #ifdef _KERNEL /* * Allocate a scatter gather ABD structure for pages. You must free this - * with abd_free(). + * with abd_put() since the resulting ABD doesn't own its pages. */ abd_t * abd_get_from_pages(struct page **pages, uint_t n_pages) @@ -982,7 +954,7 @@ abd_get_from_pages(struct page **pages, uint_t n_pages) * read/write, so we will add the flag ABD_FLAG_DIO_PAGE. */ abd->abd_flags = 0; - abd->abd_flags |= ABD_FLAG_DIO_PAGE; + abd->abd_flags = ABD_FLAG_DIO_PAGE | ABD_FLAG_OWNER; abd->abd_size = size; abd->abd_parent = NULL; zfs_refcount_create(&abd->abd_children); @@ -1006,49 +978,6 @@ abd_get_from_pages(struct page **pages, uint_t n_pages) abd_verify(abd); return (abd); } - -#else /* _KERNEL */ - -abd_t * -abd_get_from_pages(struct page **pages, uint_t n_pages) -{ - abd_t *abd = abd_alloc_struct(); - struct scatterlist *sg; - size_t size = n_pages * PAGESIZE; - int i; - - /* - * Even if this buf is filesystem metadata, we only track that if we - * own the underlying data buffer, which is not true in this case. - * Therefore, we don't ever use ABD_FLAG_META here. - * - * Currently, the only consumer of this function is Direct IO - * read/write, so we will add the flag ABD_FLAG_DIO_PAGE. - */ - abd->abd_flags = 0; - abd->abd_flags |= ABD_FLAG_DIO_PAGE; - abd->abd_size = size; - abd->abd_parent = NULL; - zfs_refcount_create(&abd->abd_children); - - ABD_SCATTER(abd).abd_sgl = vmem_alloc(n_pages * - sizeof (struct scatterlist), KM_SLEEP); - sg_init_table(ABD_SCATTER(abd).abd_sgl, n_pages); - - abd_for_each_sg(abd, sg, n_pages, i) { - sg_set_page(sg, pages[i], PAGESIZE, 0); - } - ABD_SCATTER(abd).abd_nents = n_pages; - ABD_SCATTER(abd).abd_offset = 0; - - if (ABD_SCATTER(abd).abd_nents > 1) { - ABDSTAT_BUMP(abdstat_scatter_page_multi_chunk); - abd->abd_flags |= ABD_FLAG_MULTI_CHUNK; - } - - abd_verify(abd); - return (abd); -} #endif /* _KERNEL */ /* @@ -1059,7 +988,17 @@ void abd_put(abd_t *abd) { abd_verify(abd); - ASSERT(!(abd->abd_flags & ABD_FLAG_OWNER)); + + if (!(abd->abd_flags & ABD_FLAG_DIO_PAGE)) { + ASSERT(!(abd->abd_flags & ABD_FLAG_OWNER)); + } else { + if (abd->abd_flags & ABD_FLAG_MULTI_CHUNK) + ABDSTAT_BUMPDOWN(abdstat_scatter_page_multi_chunk); + if (abd->abd_flags & ABD_FLAG_OWNER) { + /* Only the parent abd needs to free the sg_table */ + abd_free_sg_table(abd); + } + } if (abd->abd_parent != NULL) { (void) zfs_refcount_remove_many(&abd->abd_parent->abd_children, diff --git a/module/zfs/dmu.c b/module/zfs/dmu.c index b43d1851e480..b0b872a658c7 100644 --- a/module/zfs/dmu.c +++ b/module/zfs/dmu.c @@ -1957,7 +1957,7 @@ dmu_uio_dnode_rw_direct(dnode_t *dn, uio_t *uio, uint64_t size, data, DMU_DIRECTIO, tx); } - abd_free(data); + abd_put(data); for (int i = 0; i < numpages; i++) { if (read) diff --git a/module/zfs/zfs_vnops.c b/module/zfs/zfs_vnops.c index 91a5211e1ed9..e6a25702eaf5 100644 --- a/module/zfs/zfs_vnops.c +++ b/module/zfs/zfs_vnops.c @@ -532,7 +532,7 @@ zfs_read(struct inode *ip, uio_t *uio, int ioflag, cred_t *cr) } } #endif /* HAVE_UIO_ZEROCOPY */ - + if (ioflag & O_DIRECT) uio->uio_extflg |= UIO_DIRECT; From e55089868c1206482230f419d695aea5f234bb04 Mon Sep 17 00:00:00 2001 From: Brian Atkinson Date: Tue, 10 Sep 2019 15:38:10 -0600 Subject: [PATCH 68/68] Fixed bug where immediately deleting a file written using Direct IO would cause a NULL pointer derefence. Just needed to check if the dbuf was in a DB_UNCACHED state, and if so, not call any of the ARC related functions in dbuf_unoverride and dbuf_undirty. --- module/zfs/dbuf.c | 23 ++++++++++++++++++----- module/zfs/dmu.c | 2 +- 2 files changed, 19 insertions(+), 6 deletions(-) diff --git a/module/zfs/dbuf.c b/module/zfs/dbuf.c index fd9900386f85..25f54ee10ac4 100644 --- a/module/zfs/dbuf.c +++ b/module/zfs/dbuf.c @@ -1716,6 +1716,7 @@ dbuf_unoverride(dbuf_dirty_record_t *dr) uint64_t txg = dr->dr_txg; ASSERT(MUTEX_HELD(&db->db_mtx)); + /* * This assert is valid because dmu_sync() expects to be called by * a zilog's get_data while holding a range lock. This call only @@ -1746,7 +1747,13 @@ dbuf_unoverride(dbuf_dirty_record_t *dr) * the buf thawed to save the effort of freezing & * immediately re-thawing it. */ - arc_release(dr->dt.dl.dr_data, db); + if (db->db_state != DB_UNCACHED) { + /* + * In the event that Direct IO was used, we do not + * need to release the buffer from the ARC. + */ + arc_release(dr->dt.dl.dr_data, db); + } } /* @@ -2308,10 +2315,16 @@ dbuf_undirty(dmu_buf_impl_t *db, dmu_tx_t *tx) if (db->db_state != DB_NOFILL) { dbuf_unoverride(dr); - ASSERT(db->db_buf != NULL); - ASSERT(dr->dt.dl.dr_data != NULL); - if (dr->dt.dl.dr_data != db->db_buf) - arc_buf_destroy(dr->dt.dl.dr_data, db); + /* + * In the Direct IO case, the buffer is still dirty, but it + * is UNCACHED, so we to not need to destroy the ARC buffer. + */ + if (db->db_state != DB_UNCACHED) { + ASSERT(db->db_buf != NULL); + ASSERT(dr->dt.dl.dr_data != NULL); + if (dr->dt.dl.dr_data != db->db_buf) + arc_buf_destroy(dr->dt.dl.dr_data, db); + } } kmem_free(dr, sizeof (dbuf_dirty_record_t)); diff --git a/module/zfs/dmu.c b/module/zfs/dmu.c index b0b872a658c7..d25b82026e82 100644 --- a/module/zfs/dmu.c +++ b/module/zfs/dmu.c @@ -1484,7 +1484,7 @@ dmu_write_direct_done(zio_t *zio) mutex_enter(&db->db_mtx); ASSERT(db->db.db_data == NULL); - ASSERT(dr->dr_data == NULL); + ASSERT(dr->dt.dl.dr_data == NULL); db->db_state = DB_UNCACHED; mutex_exit(&db->db_mtx);