Gentoo Logo
Gentoo Spaceship




Note: Due to technical difficulties, the Archives are currently not up to date. GMANE provides an alternative service for most mailing lists.
c.f. bug 424647
List Archive: gentoo-commits
Navigation:
Lists: gentoo-commits: < Prev By Thread Next > < Prev By Date Next >
Headers:
To: gentoo-commits@g.o
From: "Richard Yao (ryao)" <ryao@g.o>
Subject: gentoo-x86 commit in sys-fs/zfs/files: zfs-0.6.0_rc9-range-lock-caller-allocate.patch
Date: Mon, 25 Jun 2012 21:03:27 +0000 (UTC)
ryao        12/06/25 21:03:27

  Modified:             zfs-0.6.0_rc9-range-lock-caller-allocate.patch
  Log:
  Fix additional deadlock fix regression
  
  (Portage version: 2.1.10.49/cvs/Linux x86_64)

Revision  Changes    Path
1.4                  sys-fs/zfs/files/zfs-0.6.0_rc9-range-lock-caller-allocate.patch

file : http://sources.gentoo.org/viewvc.cgi/gentoo-x86/sys-fs/zfs/files/zfs-0.6.0_rc9-range-lock-caller-allocate.patch?rev=1.4&view=markup
plain: http://sources.gentoo.org/viewvc.cgi/gentoo-x86/sys-fs/zfs/files/zfs-0.6.0_rc9-range-lock-caller-allocate.patch?rev=1.4&content-type=text/plain
diff : http://sources.gentoo.org/viewvc.cgi/gentoo-x86/sys-fs/zfs/files/zfs-0.6.0_rc9-range-lock-caller-allocate.patch?r1=1.3&r2=1.4

Index: zfs-0.6.0_rc9-range-lock-caller-allocate.patch
===================================================================
RCS file: /var/cvsroot/gentoo-x86/sys-fs/zfs/files/zfs-0.6.0_rc9-range-lock-caller-allocate.patch,v
retrieving revision 1.3
retrieving revision 1.4
diff -u -r1.3 -r1.4
--- zfs-0.6.0_rc9-range-lock-caller-allocate.patch	25 Jun 2012 20:21:55 -0000	1.3
+++ zfs-0.6.0_rc9-range-lock-caller-allocate.patch	25 Jun 2012 21:03:27 -0000	1.4
@@ -1,36 +1,28 @@
-From fc1f1d3940f4d2e5b1b85481d900d8198cf4b6f3 Mon Sep 17 00:00:00 2001
-From: Richard Yao <ryao@...>
-Date: Mon, 25 Jun 2012 14:41:30 -0400
-Subject: [PATCH] Make callers responsible for memory allocation in
- zfs_range_lock()
+commit e7deab3edf6940f13013ca147c91472577223923
+Author: Richard Yao <ryao@...>
+Date:   Mon Jun 25 14:41:30 2012 -0400
 
-zfs_range_lock() is used in zvols, and previously, it could deadlock due
-to an allocation using KM_SLEEP. We avoid this by moving responsibility
-the memory allocation from zfs_range_lock() to the caller. This enables
-us to avoid such deadlocks and use stack allocations, which are more
-efficient and prevents deadlocks. The contexts in which stack
-allocations are done do not appear to be stack heavy, so we do not risk
-overflowing the stack from doing this.
-
-Signed-off-by: Richard Yao <ryao@...>
-
-Conflicts:
-
-	module/zfs/zvol.c
----
- cmd/ztest/ztest.c       |   32 +++++++++++++++++---------------
- include/sys/zfs_rlock.h |    2 +-
- module/zfs/zfs_rlock.c  |   15 +++++++--------
- module/zfs/zfs_vnops.c  |   30 ++++++++++++++++--------------
- module/zfs/zfs_znode.c  |   30 +++++++++++++++---------------
- module/zfs/zvol.c       |   24 +++++++++++++-----------
- 6 files changed, 69 insertions(+), 64 deletions(-)
+    Make callers responsible for memory allocation in zfs_range_lock()
+    
+    zfs_range_lock() is used in zvols, and previously, it could deadlock due
+    to an allocation using KM_SLEEP. We avoid this by moving responsibility
+    the memory allocation from zfs_range_lock() to the caller. This enables
+    us to avoid such deadlocks and use stack allocations, which are more
+    efficient and prevents deadlocks. The contexts in which stack
+    allocations are done do not appear to be stack heavy, so we do not risk
+    overflowing the stack from doing this.
+    
+    Signed-off-by: Richard Yao <ryao@...>
+    
+    Conflicts:
+    
+    	module/zfs/zvol.c
 
 diff --git a/cmd/ztest/ztest.c b/cmd/ztest/ztest.c
 index 72d511b..c5dd0c2 100644
 --- a/cmd/ztest/ztest.c
 +++ b/cmd/ztest/ztest.c
-@@ -973,12 +973,11 @@ enum ztest_object {
+@@ -973,12 +973,11 @@ ztest_object_unlock(ztest_ds_t *zd, uint64_t object)
  }
  
  static rl_t *
@@ -44,7 +36,7 @@
  
  	rl = umem_alloc(sizeof (*rl), UMEM_NOFAIL);
  	rl->rl_object = object;
-@@ -1389,7 +1388,7 @@ enum ztest_object {
+@@ -1389,7 +1388,7 @@ ztest_replay_write(ztest_ds_t *zd, lr_write_t *lr, boolean_t byteswap)
  	dmu_tx_t *tx;
  	dmu_buf_t *db;
  	arc_buf_t *abuf = NULL;
@@ -53,7 +45,7 @@
  
  	if (byteswap)
  		byteswap_uint64_array(lr, sizeof (*lr));
-@@ -1413,7 +1412,7 @@ enum ztest_object {
+@@ -1413,7 +1412,7 @@ ztest_replay_write(ztest_ds_t *zd, lr_write_t *lr, boolean_t byteswap)
  		bt = NULL;
  
  	ztest_object_lock(zd, lr->lr_foid, RL_READER);
@@ -62,7 +54,7 @@
  
  	VERIFY3U(0, ==, dmu_bonus_hold(os, lr->lr_foid, FTAG, &db));
  
-@@ -1438,7 +1437,7 @@ enum ztest_object {
+@@ -1438,7 +1437,7 @@ ztest_replay_write(ztest_ds_t *zd, lr_write_t *lr, boolean_t byteswap)
  		if (abuf != NULL)
  			dmu_return_arcbuf(abuf);
  		dmu_buf_rele(db, FTAG);
@@ -71,7 +63,7 @@
  		ztest_object_unlock(zd, lr->lr_foid);
  		return (ENOSPC);
  	}
-@@ -1495,7 +1494,7 @@ enum ztest_object {
+@@ -1495,7 +1494,7 @@ ztest_replay_write(ztest_ds_t *zd, lr_write_t *lr, boolean_t byteswap)
  
  	dmu_tx_commit(tx);
  
@@ -80,7 +72,7 @@
  	ztest_object_unlock(zd, lr->lr_foid);
  
  	return (0);
-@@ -1507,13 +1506,13 @@ enum ztest_object {
+@@ -1507,13 +1506,13 @@ ztest_replay_truncate(ztest_ds_t *zd, lr_truncate_t *lr, boolean_t byteswap)
  	objset_t *os = zd->zd_os;
  	dmu_tx_t *tx;
  	uint64_t txg;
@@ -96,7 +88,7 @@
  	    RL_WRITER);
  
  	tx = dmu_tx_create(os);
-@@ -1522,7 +1521,7 @@ enum ztest_object {
+@@ -1522,7 +1521,7 @@ ztest_replay_truncate(ztest_ds_t *zd, lr_truncate_t *lr, boolean_t byteswap)
  
  	txg = ztest_tx_assign(tx, TXG_WAIT, FTAG);
  	if (txg == 0) {
@@ -105,7 +97,7 @@
  		ztest_object_unlock(zd, lr->lr_foid);
  		return (ENOSPC);
  	}
-@@ -1534,7 +1533,7 @@ enum ztest_object {
+@@ -1534,7 +1533,7 @@ ztest_replay_truncate(ztest_ds_t *zd, lr_truncate_t *lr, boolean_t byteswap)
  
  	dmu_tx_commit(tx);
  
@@ -114,7 +106,7 @@
  	ztest_object_unlock(zd, lr->lr_foid);
  
  	return (0);
-@@ -1670,6 +1669,8 @@ enum ztest_object {
+@@ -1670,6 +1669,8 @@ ztest_get_data(void *arg, lr_write_t *lr, char *buf, zio_t *zio)
  	dmu_object_info_t doi;
  	dmu_buf_t *db;
  	zgd_t *zgd;
@@ -123,7 +115,7 @@
  	int error;
  
  	ztest_object_lock(zd, object, RL_READER);
-@@ -1694,9 +1695,10 @@ enum ztest_object {
+@@ -1694,9 +1695,10 @@ ztest_get_data(void *arg, lr_write_t *lr, char *buf, zio_t *zio)
  	zgd = umem_zalloc(sizeof (*zgd), UMEM_NOFAIL);
  	zgd->zgd_zilog = zd->zd_zilog;
  	zgd->zgd_private = zd;
@@ -135,7 +127,7 @@
  		    RL_READER);
  
  		error = dmu_read(os, object, offset, size, buf,
-@@ -1711,7 +1713,7 @@ enum ztest_object {
+@@ -1711,7 +1713,7 @@ ztest_get_data(void *arg, lr_write_t *lr, char *buf, zio_t *zio)
  			offset = 0;
  		}
  
@@ -144,7 +136,7 @@
  		    RL_READER);
  
  		error = dmu_buf_hold(os, object, offset, zgd, &db,
-@@ -1953,12 +1955,12 @@ enum ztest_object {
+@@ -1953,12 +1955,12 @@ ztest_prealloc(ztest_ds_t *zd, uint64_t object, uint64_t offset, uint64_t size)
  	objset_t *os = zd->zd_os;
  	dmu_tx_t *tx;
  	uint64_t txg;
@@ -159,7 +151,7 @@
  
  	tx = dmu_tx_create(os);
  
-@@ -1974,7 +1976,7 @@ enum ztest_object {
+@@ -1974,7 +1976,7 @@ ztest_prealloc(ztest_ds_t *zd, uint64_t object, uint64_t offset, uint64_t size)
  		(void) dmu_free_long_range(os, object, offset, size);
  	}
  
@@ -172,7 +164,7 @@
 index da18b1f..85dc16a 100644
 --- a/include/sys/zfs_rlock.h
 +++ b/include/sys/zfs_rlock.h
-@@ -63,7 +63,7 @@
+@@ -63,7 +63,7 @@ typedef struct rl {
   * is converted to WRITER that specified to lock from the start of the
   * end of file.  zfs_range_lock() returns the range lock structure.
   */
@@ -198,7 +190,7 @@
   *
   * AVL tree
   * --------
-@@ -420,13 +420,11 @@
+@@ -420,13 +420,11 @@ got_lock:
   * previously locked as RL_WRITER).
   */
  rl_t *
@@ -213,7 +205,7 @@
  	new->r_zp = zp;
  	new->r_off = off;
  	if (len + off < off)	/* overflow */
-@@ -531,7 +529,6 @@
+@@ -531,7 +529,6 @@ zfs_range_unlock_reader(znode_t *zp, rl_t *remove, list_t *free_list)
  		}
  
  		mutex_exit(&zp->z_range_lock);
@@ -221,7 +213,7 @@
  	}
  }
  
-@@ -572,7 +569,9 @@
+@@ -572,7 +569,9 @@ zfs_range_unlock(rl_t *rl)
  
  	while ((free_rl = list_head(&free_list)) != NULL) {
  		list_remove(&free_list, free_rl);
@@ -233,10 +225,10 @@
  
  	list_destroy(&free_list);
 diff --git a/module/zfs/zfs_vnops.c b/module/zfs/zfs_vnops.c
-index 2da5fec..c8ca7c5 100644
+index 2da5fec..1ef5299 100644
 --- a/module/zfs/zfs_vnops.c
 +++ b/module/zfs/zfs_vnops.c
-@@ -370,7 +370,7 @@
+@@ -370,7 +370,7 @@ zfs_read(struct inode *ip, uio_t *uio, int ioflag, cred_t *cr)
  	objset_t	*os;
  	ssize_t		n, nbytes;
  	int		error = 0;
@@ -245,7 +237,7 @@
  #ifdef HAVE_UIO_ZEROCOPY
  	xuio_t		*xuio = NULL;
  #endif /* HAVE_UIO_ZEROCOPY */
-@@ -418,7 +418,7 @@
+@@ -418,7 +418,7 @@ zfs_read(struct inode *ip, uio_t *uio, int ioflag, cred_t *cr)
  	/*
  	 * Lock the range against changes.
  	 */
@@ -254,7 +246,7 @@
  
  	/*
  	 * If we are reading past end-of-file we can skip
-@@ -482,7 +482,7 @@
+@@ -482,7 +482,7 @@ zfs_read(struct inode *ip, uio_t *uio, int ioflag, cred_t *cr)
  		n -= nbytes;
  	}
  out:
@@ -263,7 +255,7 @@
  
  	ZFS_ACCESSTIME_STAMP(zsb, zp);
  	zfs_inode_update(zp);
-@@ -524,7 +524,7 @@
+@@ -524,7 +524,7 @@ zfs_write(struct inode *ip, uio_t *uio, int ioflag, cred_t *cr)
  	zilog_t		*zilog;
  	offset_t	woff;
  	ssize_t		n, nbytes;
@@ -272,7 +264,7 @@
  	int		max_blksz = zsb->z_max_blksz;
  	int		error = 0;
  	arc_buf_t	*abuf;
-@@ -608,9 +608,9 @@
+@@ -608,9 +608,9 @@ zfs_write(struct inode *ip, uio_t *uio, int ioflag, cred_t *cr)
  		 * Obtain an appending range lock to guarantee file append
  		 * semantics.  We reset the write offset once we have the lock.
  		 */
@@ -285,7 +277,7 @@
  			/*
  			 * We overlocked the file because this write will cause
  			 * the file block size to increase.
-@@ -625,11 +625,11 @@
+@@ -625,11 +625,11 @@ zfs_write(struct inode *ip, uio_t *uio, int ioflag, cred_t *cr)
  		 * this write, then this range lock will lock the entire file
  		 * so that we can re-write the block safely.
  		 */
@@ -299,7 +291,7 @@
  		ZFS_EXIT(zsb);
  		return (EFBIG);
  	}
-@@ -719,7 +719,7 @@
+@@ -719,7 +719,7 @@ again:
  		 * on the first iteration since zfs_range_reduce() will
  		 * shrink down r_len to the appropriate size.
  		 */
@@ -308,7 +300,7 @@
  			uint64_t new_blksz;
  
  			if (zp->z_blksz > max_blksz) {
-@@ -729,7 +729,7 @@
+@@ -729,7 +729,7 @@ again:
  				new_blksz = MIN(end_size, max_blksz);
  			}
  			zfs_grow_blocksize(zp, new_blksz, tx);
@@ -317,7 +309,7 @@
  		}
  
  		/*
-@@ -842,7 +842,7 @@
+@@ -842,7 +842,7 @@ again:
  			uio_prefaultpages(MIN(n, max_blksz), uio);
  	}
  
@@ -326,23 +318,23 @@
  
  	/*
  	 * If we're in replay mode, or we made no progress, return error.
-@@ -915,6 +915,7 @@
- 	blkptr_t *bp = &lr->lr_blkptr;
- 	dmu_buf_t *db;
- 	zgd_t *zgd;
-+	rl_t rl;
- 	int error = 0;
+@@ -893,6 +893,7 @@ zfs_get_done(zgd_t *zgd, int error)
+ 	if (error == 0 && zgd->zgd_bp)
+ 		zil_add_block(zgd->zgd_zilog, zgd->zgd_bp);
  
- 	ASSERT(zio != NULL);
-@@ -935,6 +936,7 @@
++	kmem_free(zgd->zgd_rl, sizeof (rl_t));
+ 	kmem_free(zgd, sizeof (zgd_t));
+ }
+ 
+@@ -935,6 +936,7 @@ zfs_get_data(void *arg, lr_write_t *lr, char *buf, zio_t *zio)
  	}
  
  	zgd = (zgd_t *)kmem_zalloc(sizeof (zgd_t), KM_SLEEP);
-+	zgd->zgd_rl = &rl;
++	zgd->zgd_rl = (rl_t *)kmem_zalloc(sizeof (rl_t), KM_SLEEP);
  	zgd->zgd_zilog = zsb->z_log;
  	zgd->zgd_private = zp;
  
-@@ -946,7 +948,7 @@
+@@ -946,7 +948,7 @@ zfs_get_data(void *arg, lr_write_t *lr, char *buf, zio_t *zio)
  	 * we don't have to write the data twice.
  	 */
  	if (buf != NULL) { /* immediate write */
@@ -351,7 +343,7 @@
  		/* test for truncation needs to be done while range locked */
  		if (offset >= zp->z_size) {
  			error = ENOENT;
-@@ -967,7 +969,7 @@
+@@ -967,7 +969,7 @@ zfs_get_data(void *arg, lr_write_t *lr, char *buf, zio_t *zio)
  			size = zp->z_blksz;
  			blkoff = ISP2(size) ? P2PHASE(offset, size) : offset;
  			offset -= blkoff;
@@ -364,7 +356,7 @@
 index 3a6872f..e363839 100644
 --- a/module/zfs/zfs_znode.c
 +++ b/module/zfs/zfs_znode.c
-@@ -1158,20 +1158,20 @@
+@@ -1158,20 +1158,20 @@ zfs_extend(znode_t *zp, uint64_t end)
  {
  	zfs_sb_t *zsb = ZTOZSB(zp);
  	dmu_tx_t *tx;
@@ -388,7 +380,7 @@
  		return (0);
  	}
  top:
-@@ -1202,7 +1202,7 @@
+@@ -1202,7 +1202,7 @@ top:
  			goto top;
  		}
  		dmu_tx_abort(tx);
@@ -397,7 +389,7 @@
  		return (error);
  	}
  
-@@ -1214,7 +1214,7 @@
+@@ -1214,7 +1214,7 @@ top:
  	VERIFY(0 == sa_update(zp->z_sa_hdl, SA_ZPL_SIZE(ZTOZSB(zp)),
  	    &zp->z_size, sizeof (zp->z_size), tx));
  
@@ -406,7 +398,7 @@
  
  	dmu_tx_commit(tx);
  
-@@ -1235,19 +1235,19 @@
+@@ -1235,19 +1235,19 @@ static int
  zfs_free_range(znode_t *zp, uint64_t off, uint64_t len)
  {
  	zfs_sb_t *zsb = ZTOZSB(zp);
@@ -429,7 +421,7 @@
  		return (0);
  	}
  
-@@ -1256,7 +1256,7 @@
+@@ -1256,7 +1256,7 @@ zfs_free_range(znode_t *zp, uint64_t off, uint64_t len)
  
  	error = dmu_free_long_range(zsb->z_os, zp->z_id, off, len);
  
@@ -438,7 +430,7 @@
  
  	return (error);
  }
-@@ -1275,7 +1275,7 @@
+@@ -1275,7 +1275,7 @@ zfs_trunc(znode_t *zp, uint64_t end)
  {
  	zfs_sb_t *zsb = ZTOZSB(zp);
  	dmu_tx_t *tx;
@@ -447,7 +439,7 @@
  	int error;
  	sa_bulk_attr_t bulk[2];
  	int count = 0;
-@@ -1283,19 +1283,19 @@
+@@ -1283,19 +1283,19 @@ zfs_trunc(znode_t *zp, uint64_t end)
  	/*
  	 * We will change zp_size, lock the whole file.
  	 */
@@ -470,7 +462,7 @@
  		return (error);
  	}
  top:
-@@ -1310,7 +1310,7 @@
+@@ -1310,7 +1310,7 @@ top:
  			goto top;
  		}
  		dmu_tx_abort(tx);
@@ -479,7 +471,7 @@
  		return (error);
  	}
  
-@@ -1327,7 +1327,7 @@
+@@ -1327,7 +1327,7 @@ top:
  
  	dmu_tx_commit(tx);
  
@@ -492,7 +484,7 @@
 index 125d58d..bbe53d9 100644
 --- a/module/zfs/zvol.c
 +++ b/module/zfs/zvol.c
-@@ -537,7 +537,7 @@
+@@ -537,7 +537,7 @@ zvol_write(void *arg)
  	uint64_t size = blk_rq_bytes(req);
  	int error = 0;
  	dmu_tx_t *tx;
@@ -501,7 +493,7 @@
  
  	if (req->cmd_flags & VDEV_REQ_FLUSH)
  		zil_commit(zv->zv_zilog, ZVOL_OBJ);
-@@ -550,7 +550,7 @@
+@@ -550,7 +550,7 @@ zvol_write(void *arg)
  		return;
  	}
  
@@ -510,7 +502,7 @@
  
  	tx = dmu_tx_create(zv->zv_objset);
  	dmu_tx_hold_write(tx, ZVOL_OBJ, offset, size);
-@@ -559,7 +559,7 @@
+@@ -559,7 +559,7 @@ zvol_write(void *arg)
  	error = dmu_tx_assign(tx, TXG_WAIT);
  	if (error) {
  		dmu_tx_abort(tx);
@@ -519,7 +511,7 @@
  		blk_end_request(req, -error, size);
  		return;
  	}
-@@ -570,7 +570,7 @@
+@@ -570,7 +570,7 @@ zvol_write(void *arg)
  		    req->cmd_flags & VDEV_REQ_FUA);
  
  	dmu_tx_commit(tx);
@@ -528,7 +520,7 @@
  
  	if ((req->cmd_flags & VDEV_REQ_FUA) ||
  	    zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS)
-@@ -589,7 +589,7 @@
+@@ -589,7 +589,7 @@ zvol_discard(void *arg)
  	uint64_t offset = blk_rq_pos(req) << 9;
  	uint64_t size = blk_rq_bytes(req);
  	int error;
@@ -537,7 +529,7 @@
  
  	if (offset + size > zv->zv_volsize) {
  		blk_end_request(req, -EIO, size);
-@@ -601,7 +601,7 @@
+@@ -601,7 +601,7 @@ zvol_discard(void *arg)
  		return;
  	}
  
@@ -546,7 +538,7 @@
  
  	error = dmu_free_long_range(zv->zv_objset, ZVOL_OBJ, offset, size);
  
-@@ -609,7 +609,7 @@
+@@ -609,7 +609,7 @@ zvol_discard(void *arg)
  	 * TODO: maybe we should add the operation to the log.
  	 */
  
@@ -555,7 +547,7 @@
  
  	blk_end_request(req, -error, size);
  }
-@@ -630,18 +630,18 @@
+@@ -630,18 +630,18 @@ zvol_read(void *arg)
  	uint64_t offset = blk_rq_pos(req) << 9;
  	uint64_t size = blk_rq_bytes(req);
  	int error;
@@ -577,7 +569,7 @@
  
  	/* convert checksum errors into IO errors */
  	if (error == ECKSUM)
-@@ -744,6 +744,7 @@
+@@ -744,6 +744,7 @@ zvol_get_done(zgd_t *zgd, int error)
  	if (error == 0 && zgd->zgd_bp)
  		zil_add_block(zgd->zgd_zilog, zgd->zgd_bp);
  
@@ -585,7 +577,7 @@
  	kmem_free(zgd, sizeof (zgd_t));
  }
  
-@@ -766,7 +767,8 @@
+@@ -766,7 +767,8 @@ zvol_get_data(void *arg, lr_write_t *lr, char *buf, zio_t *zio)
  
  	zgd = (zgd_t *)kmem_zalloc(sizeof (zgd_t), KM_SLEEP);
  	zgd->zgd_zilog = zv->zv_zilog;
@@ -595,6 +587,3 @@
  
  	/*
  	 * Write records come in two flavors: immediate and indirect.
--- 
-1.7.10
-





Navigation:
Lists: gentoo-commits: < Prev By Thread Next > < Prev By Date Next >
Previous by thread:
gentoo-x86 commit in sys-fs/zfs/files: zfs-0.6.0_rc9-range-lock-caller-allocate.patch
Next by thread:
gentoo-x86 commit in sys-fs/zfs: zfs-0.6.0_rc9-r1.ebuild ChangeLog
Previous by date:
gentoo-x86 commit in sys-fs/zfs: zfs-0.6.0_rc9-r3.ebuild ChangeLog zfs-0.6.0_rc9-r2.ebuild
Next by date:
gentoo-x86 commit in sci-mathematics/p9m4: metadata.xml ChangeLog


Updated Jun 26, 2012

Summary: Archive of the gentoo-commits mailing list.

Donate to support our development efforts.

Copyright 2001-2013 Gentoo Foundation, Inc. Questions, Comments? Contact us.