Merge /home/trondmy/scm/kernel/git/torvalds/linux-2.6
diff --git a/fs/exec.c b/fs/exec.c
index a04a575..d2208f7 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -126,8 +126,7 @@
 	struct nameidata nd;
 	int error;
 
-	nd.intent.open.flags = FMODE_READ;
-	error = __user_walk(library, LOOKUP_FOLLOW|LOOKUP_OPEN, &nd);
+	error = __user_path_lookup_open(library, LOOKUP_FOLLOW, &nd, FMODE_READ);
 	if (error)
 		goto out;
 
@@ -139,7 +138,7 @@
 	if (error)
 		goto exit;
 
-	file = dentry_open(nd.dentry, nd.mnt, O_RDONLY);
+	file = nameidata_to_filp(&nd, O_RDONLY);
 	error = PTR_ERR(file);
 	if (IS_ERR(file))
 		goto out;
@@ -167,6 +166,7 @@
 out:
   	return error;
 exit:
+	release_open_intent(&nd);
 	path_release(&nd);
 	goto out;
 }
@@ -490,8 +490,7 @@
 	int err;
 	struct file *file;
 
-	nd.intent.open.flags = FMODE_READ;
-	err = path_lookup(name, LOOKUP_FOLLOW|LOOKUP_OPEN, &nd);
+	err = path_lookup_open(name, LOOKUP_FOLLOW, &nd, FMODE_READ);
 	file = ERR_PTR(err);
 
 	if (!err) {
@@ -504,7 +503,7 @@
 				err = -EACCES;
 			file = ERR_PTR(err);
 			if (!err) {
-				file = dentry_open(nd.dentry, nd.mnt, O_RDONLY);
+				file = nameidata_to_filp(&nd, O_RDONLY);
 				if (!IS_ERR(file)) {
 					err = deny_write_access(file);
 					if (err) {
@@ -516,6 +515,7 @@
 				return file;
 			}
 		}
+		release_open_intent(&nd);
 		path_release(&nd);
 	}
 	goto out;
diff --git a/fs/lockd/host.c b/fs/lockd/host.c
index 82c77df..c4c8601 100644
--- a/fs/lockd/host.c
+++ b/fs/lockd/host.c
@@ -173,11 +173,10 @@
 
 	/* If we've already created an RPC client, check whether
 	 * RPC rebind is required
-	 * Note: why keep rebinding if we're on a tcp connection?
 	 */
 	if ((clnt = host->h_rpcclnt) != NULL) {
 		xprt = clnt->cl_xprt;
-		if (!xprt->stream && time_after_eq(jiffies, host->h_nextrebind)) {
+		if (time_after_eq(jiffies, host->h_nextrebind)) {
 			clnt->cl_port = 0;
 			host->h_nextrebind = jiffies + NLM_HOST_REBIND;
 			dprintk("lockd: next rebind in %ld jiffies\n",
@@ -189,7 +188,6 @@
 			goto forgetit;
 
 		xprt_set_timeout(&xprt->timeout, 5, nlmsvc_timeout);
-		xprt->nocong = 1;	/* No congestion control for NLM */
 		xprt->resvport = 1;	/* NLM requires a reserved port */
 
 		/* Existing NLM servers accept AUTH_UNIX only */
diff --git a/fs/locks.c b/fs/locks.c
index f7daa5f..a1e8b22 100644
--- a/fs/locks.c
+++ b/fs/locks.c
@@ -316,21 +316,22 @@
 	/* POSIX-1996 leaves the case l->l_len < 0 undefined;
 	   POSIX-2001 defines it. */
 	start += l->l_start;
-	end = start + l->l_len - 1;
-	if (l->l_len < 0) {
-		end = start - 1;
-		start += l->l_len;
-	}
-
 	if (start < 0)
 		return -EINVAL;
-	if (l->l_len > 0 && end < 0)
-		return -EOVERFLOW;
-
+	fl->fl_end = OFFSET_MAX;
+	if (l->l_len > 0) {
+		end = start + l->l_len - 1;
+		fl->fl_end = end;
+	} else if (l->l_len < 0) {
+		end = start - 1;
+		fl->fl_end = end;
+		start += l->l_len;
+		if (start < 0)
+			return -EINVAL;
+	}
 	fl->fl_start = start;	/* we record the absolute position */
-	fl->fl_end = end;
-	if (l->l_len == 0)
-		fl->fl_end = OFFSET_MAX;
+	if (fl->fl_end < fl->fl_start)
+		return -EOVERFLOW;
 	
 	fl->fl_owner = current->files;
 	fl->fl_pid = current->tgid;
@@ -362,14 +363,21 @@
 		return -EINVAL;
 	}
 
-	if (((start += l->l_start) < 0) || (l->l_len < 0))
+	start += l->l_start;
+	if (start < 0)
 		return -EINVAL;
-	fl->fl_end = start + l->l_len - 1;
-	if (l->l_len > 0 && fl->fl_end < 0)
-		return -EOVERFLOW;
+	fl->fl_end = OFFSET_MAX;
+	if (l->l_len > 0) {
+		fl->fl_end = start + l->l_len - 1;
+	} else if (l->l_len < 0) {
+		fl->fl_end = start - 1;
+		start += l->l_len;
+		if (start < 0)
+			return -EINVAL;
+	}
 	fl->fl_start = start;	/* we record the absolute position */
-	if (l->l_len == 0)
-		fl->fl_end = OFFSET_MAX;
+	if (fl->fl_end < fl->fl_start)
+		return -EOVERFLOW;
 	
 	fl->fl_owner = current->files;
 	fl->fl_pid = current->tgid;
@@ -829,12 +837,16 @@
 		/* Detect adjacent or overlapping regions (if same lock type)
 		 */
 		if (request->fl_type == fl->fl_type) {
+			/* In all comparisons of start vs end, use
+			 * "start - 1" rather than "end + 1". If end
+			 * is OFFSET_MAX, end + 1 will become negative.
+			 */
 			if (fl->fl_end < request->fl_start - 1)
 				goto next_lock;
 			/* If the next lock in the list has entirely bigger
 			 * addresses than the new one, insert the lock here.
 			 */
-			if (fl->fl_start > request->fl_end + 1)
+			if (fl->fl_start - 1 > request->fl_end)
 				break;
 
 			/* If we come here, the new and old lock are of the
diff --git a/fs/namei.c b/fs/namei.c
index aa62dbd..aaaa810 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -28,6 +28,7 @@
 #include <linux/syscalls.h>
 #include <linux/mount.h>
 #include <linux/audit.h>
+#include <linux/file.h>
 #include <asm/namei.h>
 #include <asm/uaccess.h>
 
@@ -317,6 +318,18 @@
 	mntput_no_expire(nd->mnt);
 }
 
+/**
+ * release_open_intent - free up open intent resources
+ * @nd: pointer to nameidata
+ */
+void release_open_intent(struct nameidata *nd)
+{
+	if (nd->intent.open.file->f_dentry == NULL)
+		put_filp(nd->intent.open.file);
+	else
+		fput(nd->intent.open.file);
+}
+
 /*
  * Internal lookup() using the new generic dcache.
  * SMP-safe
@@ -750,6 +763,7 @@
 		struct qstr this;
 		unsigned int c;
 
+		nd->flags |= LOOKUP_CONTINUE;
 		err = exec_permission_lite(inode, nd);
 		if (err == -EAGAIN) { 
 			err = permission(inode, MAY_EXEC, nd);
@@ -802,7 +816,6 @@
 			if (err < 0)
 				break;
 		}
-		nd->flags |= LOOKUP_CONTINUE;
 		/* This does the actual lookups.. */
 		err = do_lookup(nd, &this, &next);
 		if (err)
@@ -1052,6 +1065,70 @@
 	return retval;
 }
 
+static int __path_lookup_intent_open(const char *name, unsigned int lookup_flags,
+		struct nameidata *nd, int open_flags, int create_mode)
+{
+	struct file *filp = get_empty_filp();
+	int err;
+
+	if (filp == NULL)
+		return -ENFILE;
+	nd->intent.open.file = filp;
+	nd->intent.open.flags = open_flags;
+	nd->intent.open.create_mode = create_mode;
+	err = path_lookup(name, lookup_flags|LOOKUP_OPEN, nd);
+	if (IS_ERR(nd->intent.open.file)) {
+		if (err == 0) {
+			err = PTR_ERR(nd->intent.open.file);
+			path_release(nd);
+		}
+	} else if (err != 0)
+		release_open_intent(nd);
+	return err;
+}
+
+/**
+ * path_lookup_open - lookup a file path with open intent
+ * @name: pointer to file name
+ * @lookup_flags: lookup intent flags
+ * @nd: pointer to nameidata
+ * @open_flags: open intent flags
+ */
+int path_lookup_open(const char *name, unsigned int lookup_flags,
+		struct nameidata *nd, int open_flags)
+{
+	return __path_lookup_intent_open(name, lookup_flags, nd,
+			open_flags, 0);
+}
+
+/**
+ * path_lookup_create - lookup a file path with open + create intent
+ * @name: pointer to file name
+ * @lookup_flags: lookup intent flags
+ * @nd: pointer to nameidata
+ * @open_flags: open intent flags
+ * @create_mode: create intent flags
+ */
+int path_lookup_create(const char *name, unsigned int lookup_flags,
+		struct nameidata *nd, int open_flags, int create_mode)
+{
+	return __path_lookup_intent_open(name, lookup_flags|LOOKUP_CREATE, nd,
+			open_flags, create_mode);
+}
+
+int __user_path_lookup_open(const char __user *name, unsigned int lookup_flags,
+		struct nameidata *nd, int open_flags)
+{
+	char *tmp = getname(name);
+	int err = PTR_ERR(tmp);
+
+	if (!IS_ERR(tmp)) {
+		err = __path_lookup_intent_open(tmp, lookup_flags, nd, open_flags, 0);
+		putname(tmp);
+	}
+	return err;
+}
+
 /*
  * Restricted form of lookup. Doesn't follow links, single-component only,
  * needs parent already locked. Doesn't follow mounts.
@@ -1416,27 +1493,27 @@
  */
 int open_namei(const char * pathname, int flag, int mode, struct nameidata *nd)
 {
-	int acc_mode, error = 0;
+	int acc_mode, error;
 	struct path path;
 	struct dentry *dir;
 	int count = 0;
 
 	acc_mode = ACC_MODE(flag);
 
+	/* O_TRUNC implies we need access checks for write permissions */
+	if (flag & O_TRUNC)
+		acc_mode |= MAY_WRITE;
+
 	/* Allow the LSM permission hook to distinguish append 
 	   access from general write access. */
 	if (flag & O_APPEND)
 		acc_mode |= MAY_APPEND;
 
-	/* Fill in the open() intent data */
-	nd->intent.open.flags = flag;
-	nd->intent.open.create_mode = mode;
-
 	/*
 	 * The simplest case - just a plain lookup.
 	 */
 	if (!(flag & O_CREAT)) {
-		error = path_lookup(pathname, lookup_flags(flag)|LOOKUP_OPEN, nd);
+		error = path_lookup_open(pathname, lookup_flags(flag), nd, flag);
 		if (error)
 			return error;
 		goto ok;
@@ -1445,7 +1522,7 @@
 	/*
 	 * Create - we need to know the parent.
 	 */
-	error = path_lookup(pathname, LOOKUP_PARENT|LOOKUP_OPEN|LOOKUP_CREATE, nd);
+	error = path_lookup_create(pathname, LOOKUP_PARENT, nd, flag, mode);
 	if (error)
 		return error;
 
@@ -1520,6 +1597,8 @@
 exit_dput:
 	dput_path(&path, nd);
 exit:
+	if (!IS_ERR(nd->intent.open.file))
+		release_open_intent(nd);
 	path_release(nd);
 	return error;
 
diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c
index 4a36839..44135af 100644
--- a/fs/nfs/delegation.c
+++ b/fs/nfs/delegation.c
@@ -142,7 +142,7 @@
 /*
  * Basic procedure for returning a delegation to the server
  */
-int nfs_inode_return_delegation(struct inode *inode)
+int __nfs_inode_return_delegation(struct inode *inode)
 {
 	struct nfs4_client *clp = NFS_SERVER(inode)->nfs4_state;
 	struct nfs_inode *nfsi = NFS_I(inode);
diff --git a/fs/nfs/delegation.h b/fs/nfs/delegation.h
index 3f6c45a..8017846 100644
--- a/fs/nfs/delegation.h
+++ b/fs/nfs/delegation.h
@@ -25,7 +25,7 @@
 
 int nfs_inode_set_delegation(struct inode *inode, struct rpc_cred *cred, struct nfs_openres *res);
 void nfs_inode_reclaim_delegation(struct inode *inode, struct rpc_cred *cred, struct nfs_openres *res);
-int nfs_inode_return_delegation(struct inode *inode);
+int __nfs_inode_return_delegation(struct inode *inode);
 int nfs_async_inode_return_delegation(struct inode *inode, const nfs4_stateid *stateid);
 
 struct inode *nfs_delegation_find_inode(struct nfs4_client *clp, const struct nfs_fh *fhandle);
@@ -47,11 +47,25 @@
 		return 1;
 	return 0;
 }
+
+static inline int nfs_inode_return_delegation(struct inode *inode)
+{
+	int err = 0;
+
+	if (NFS_I(inode)->delegation != NULL)
+		err = __nfs_inode_return_delegation(inode);
+	return err;
+}
 #else
 static inline int nfs_have_delegation(struct inode *inode, int flags)
 {
 	return 0;
 }
+
+static inline int nfs_inode_return_delegation(struct inode *inode)
+{
+	return 0;
+}
 #endif
 
 #endif
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index 2df639f..eb50c19 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -565,8 +565,6 @@
 		}
 	}
 	unlock_kernel();
-	if (desc->error < 0)
-		return desc->error;
 	if (res < 0)
 		return res;
 	return 0;
@@ -803,6 +801,7 @@
  */
 static void nfs_dentry_iput(struct dentry *dentry, struct inode *inode)
 {
+	nfs_inode_return_delegation(inode);
 	if (dentry->d_flags & DCACHE_NFSFS_RENAMED) {
 		lock_kernel();
 		inode->i_nlink--;
@@ -916,7 +915,6 @@
 static struct dentry *nfs_atomic_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd)
 {
 	struct dentry *res = NULL;
-	struct inode *inode = NULL;
 	int error;
 
 	/* Check that we are indeed trying to open this file */
@@ -930,8 +928,10 @@
 	dentry->d_op = NFS_PROTO(dir)->dentry_ops;
 
 	/* Let vfs_create() deal with O_EXCL */
-	if (nd->intent.open.flags & O_EXCL)
-		goto no_entry;
+	if (nd->intent.open.flags & O_EXCL) {
+		d_add(dentry, NULL);
+		goto out;
+	}
 
 	/* Open the file on the server */
 	lock_kernel();
@@ -945,32 +945,30 @@
 
 	if (nd->intent.open.flags & O_CREAT) {
 		nfs_begin_data_update(dir);
-		inode = nfs4_atomic_open(dir, dentry, nd);
+		res = nfs4_atomic_open(dir, dentry, nd);
 		nfs_end_data_update(dir);
 	} else
-		inode = nfs4_atomic_open(dir, dentry, nd);
+		res = nfs4_atomic_open(dir, dentry, nd);
 	unlock_kernel();
-	if (IS_ERR(inode)) {
-		error = PTR_ERR(inode);
+	if (IS_ERR(res)) {
+		error = PTR_ERR(res);
 		switch (error) {
 			/* Make a negative dentry */
 			case -ENOENT:
-				inode = NULL;
-				break;
+				res = NULL;
+				goto out;
 			/* This turned out not to be a regular file */
+			case -EISDIR:
+			case -ENOTDIR:
+				goto no_open;
 			case -ELOOP:
 				if (!(nd->intent.open.flags & O_NOFOLLOW))
 					goto no_open;
-			/* case -EISDIR: */
 			/* case -EINVAL: */
 			default:
-				res = ERR_PTR(error);
 				goto out;
 		}
-	}
-no_entry:
-	res = d_add_unique(dentry, inode);
-	if (res != NULL)
+	} else if (res != NULL)
 		dentry = res;
 	nfs_renew_times(dentry);
 	nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
@@ -1014,7 +1012,7 @@
 	 */
 	lock_kernel();
 	verifier = nfs_save_change_attribute(dir);
-	ret = nfs4_open_revalidate(dir, dentry, openflags);
+	ret = nfs4_open_revalidate(dir, dentry, openflags, nd);
 	if (!ret)
 		nfs_set_verifier(dentry, verifier);
 	unlock_kernel();
@@ -1137,7 +1135,7 @@
 
 	lock_kernel();
 	nfs_begin_data_update(dir);
-	error = NFS_PROTO(dir)->create(dir, dentry, &attr, open_flags);
+	error = NFS_PROTO(dir)->create(dir, dentry, &attr, open_flags, nd);
 	nfs_end_data_update(dir);
 	if (error != 0)
 		goto out_err;
@@ -1332,6 +1330,7 @@
 
 	nfs_begin_data_update(dir);
 	if (inode != NULL) {
+		nfs_inode_return_delegation(inode);
 		nfs_begin_data_update(inode);
 		error = NFS_PROTO(dir)->remove(dir, &dentry->d_name);
 		/* The VFS may want to delete this inode */
@@ -1512,9 +1511,11 @@
 	 */
 	if (!new_inode)
 		goto go_ahead;
-	if (S_ISDIR(new_inode->i_mode))
-		goto out;
-	else if (atomic_read(&new_dentry->d_count) > 2) {
+	if (S_ISDIR(new_inode->i_mode)) {
+		error = -EISDIR;
+		if (!S_ISDIR(old_inode->i_mode))
+			goto out;
+	} else if (atomic_read(&new_dentry->d_count) > 2) {
 		int err;
 		/* copy the target dentry's name */
 		dentry = d_alloc(new_dentry->d_parent,
@@ -1539,7 +1540,8 @@
 #endif
 			goto out;
 		}
-	}
+	} else
+		new_inode->i_nlink--;
 
 go_ahead:
 	/*
@@ -1549,6 +1551,7 @@
 		nfs_wb_all(old_inode);
 		shrink_dcache_parent(old_dentry);
 	}
+	nfs_inode_return_delegation(old_inode);
 
 	if (new_inode)
 		d_delete(new_dentry);
diff --git a/fs/nfs/file.c b/fs/nfs/file.c
index 6bdcfa9..572d859 100644
--- a/fs/nfs/file.c
+++ b/fs/nfs/file.c
@@ -376,22 +376,31 @@
 
 static int do_getlk(struct file *filp, int cmd, struct file_lock *fl)
 {
+	struct file_lock *cfl;
 	struct inode *inode = filp->f_mapping->host;
 	int status = 0;
 
 	lock_kernel();
-	/* Use local locking if mounted with "-onolock" */
-	if (!(NFS_SERVER(inode)->flags & NFS_MOUNT_NONLM))
-		status = NFS_PROTO(inode)->lock(filp, cmd, fl);
-	else {
-		struct file_lock *cfl = posix_test_lock(filp, fl);
-
-		fl->fl_type = F_UNLCK;
-		if (cfl != NULL)
-			memcpy(fl, cfl, sizeof(*fl));
+	/* Try local locking first */
+	cfl = posix_test_lock(filp, fl);
+	if (cfl != NULL) {
+		locks_copy_lock(fl, cfl);
+		goto out;
 	}
+
+	if (nfs_have_delegation(inode, FMODE_READ))
+		goto out_noconflict;
+
+	if (NFS_SERVER(inode)->flags & NFS_MOUNT_NONLM)
+		goto out_noconflict;
+
+	status = NFS_PROTO(inode)->lock(filp, cmd, fl);
+out:
 	unlock_kernel();
 	return status;
+out_noconflict:
+	fl->fl_type = F_UNLCK;
+	goto out;
 }
 
 static int do_vfs_lock(struct file *file, struct file_lock *fl)
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index d4eadee..65d5ab4 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -358,6 +358,35 @@
 	return no_root_error;
 }
 
+static void nfs_init_timeout_values(struct rpc_timeout *to, int proto, unsigned int timeo, unsigned int retrans)
+{
+	to->to_initval = timeo * HZ / 10;
+	to->to_retries = retrans;
+	if (!to->to_retries)
+		to->to_retries = 2;
+
+	switch (proto) {
+	case IPPROTO_TCP:
+		if (!to->to_initval)
+			to->to_initval = 60 * HZ;
+		if (to->to_initval > NFS_MAX_TCP_TIMEOUT)
+			to->to_initval = NFS_MAX_TCP_TIMEOUT;
+		to->to_increment = to->to_initval;
+		to->to_maxval = to->to_initval + (to->to_increment * to->to_retries);
+		to->to_exponential = 0;
+		break;
+	case IPPROTO_UDP:
+	default:
+		if (!to->to_initval)
+			to->to_initval = 11 * HZ / 10;
+		if (to->to_initval > NFS_MAX_UDP_TIMEOUT)
+			to->to_initval = NFS_MAX_UDP_TIMEOUT;
+		to->to_maxval = NFS_MAX_UDP_TIMEOUT;
+		to->to_exponential = 1;
+		break;
+	}
+}
+
 /*
  * Create an RPC client handle.
  */
@@ -367,22 +396,12 @@
 	struct rpc_timeout	timeparms;
 	struct rpc_xprt		*xprt = NULL;
 	struct rpc_clnt		*clnt = NULL;
-	int			tcp   = (data->flags & NFS_MOUNT_TCP);
+	int			proto = (data->flags & NFS_MOUNT_TCP) ? IPPROTO_TCP : IPPROTO_UDP;
 
-	/* Initialize timeout values */
-	timeparms.to_initval = data->timeo * HZ / 10;
-	timeparms.to_retries = data->retrans;
-	timeparms.to_maxval  = tcp ? RPC_MAX_TCP_TIMEOUT : RPC_MAX_UDP_TIMEOUT;
-	timeparms.to_exponential = 1;
-
-	if (!timeparms.to_initval)
-		timeparms.to_initval = (tcp ? 600 : 11) * HZ / 10;
-	if (!timeparms.to_retries)
-		timeparms.to_retries = 5;
+	nfs_init_timeout_values(&timeparms, proto, data->timeo, data->retrans);
 
 	/* create transport and client */
-	xprt = xprt_create_proto(tcp ? IPPROTO_TCP : IPPROTO_UDP,
-				 &server->addr, &timeparms);
+	xprt = xprt_create_proto(proto, &server->addr, &timeparms);
 	if (IS_ERR(xprt)) {
 		dprintk("%s: cannot create RPC transport. Error = %ld\n",
 				__FUNCTION__, PTR_ERR(xprt));
@@ -576,7 +595,6 @@
 		{ NFS_MOUNT_SOFT, ",soft", ",hard" },
 		{ NFS_MOUNT_INTR, ",intr", "" },
 		{ NFS_MOUNT_POSIX, ",posix", "" },
-		{ NFS_MOUNT_TCP, ",tcp", ",udp" },
 		{ NFS_MOUNT_NOCTO, ",nocto", "" },
 		{ NFS_MOUNT_NOAC, ",noac", "" },
 		{ NFS_MOUNT_NONLM, ",nolock", ",lock" },
@@ -585,6 +603,8 @@
 	};
 	struct proc_nfs_info *nfs_infop;
 	struct nfs_server *nfss = NFS_SB(mnt->mnt_sb);
+	char buf[12];
+	char *proto;
 
 	seq_printf(m, ",v%d", nfss->rpc_ops->version);
 	seq_printf(m, ",rsize=%d", nfss->rsize);
@@ -603,6 +623,18 @@
 		else
 			seq_puts(m, nfs_infop->nostr);
 	}
+	switch (nfss->client->cl_xprt->prot) {
+		case IPPROTO_TCP:
+			proto = "tcp";
+			break;
+		case IPPROTO_UDP:
+			proto = "udp";
+			break;
+		default:
+			snprintf(buf, sizeof(buf), "%u", nfss->client->cl_xprt->prot);
+			proto = buf;
+	}
+	seq_printf(m, ",proto=%s", proto);
 	seq_puts(m, ",addr=");
 	seq_escape(m, nfss->hostname, " \t\n\\");
 	return 0;
@@ -821,6 +853,11 @@
 			filemap_fdatawait(inode->i_mapping);
 		nfs_wb_all(inode);
 	}
+	/*
+	 * Return any delegations if we're going to change ACLs
+	 */
+	if ((attr->ia_valid & (ATTR_MODE|ATTR_UID|ATTR_GID)) != 0)
+		nfs_inode_return_delegation(inode);
 	error = NFS_PROTO(inode)->setattr(dentry, &fattr, attr);
 	if (error == 0)
 		nfs_refresh_inode(inode, &fattr);
@@ -1639,8 +1676,7 @@
 	struct nfs_inode *nfsi = NFS_I(inode);
 
 	/* If we are holding a delegation, return it! */
-	if (nfsi->delegation != NULL)
-		nfs_inode_return_delegation(inode);
+	nfs_inode_return_delegation(inode);
 	/* First call standard NFS clear_inode() code */
 	nfs_clear_inode(inode);
 	/* Now clear out any remaining state */
@@ -1669,7 +1705,7 @@
 	struct rpc_clnt *clnt = NULL;
 	struct rpc_timeout timeparms;
 	rpc_authflavor_t authflavour;
-	int proto, err = -EIO;
+	int err = -EIO;
 
 	sb->s_blocksize_bits = 0;
 	sb->s_blocksize = 0;
@@ -1687,30 +1723,8 @@
 	server->acdirmax = data->acdirmax*HZ;
 
 	server->rpc_ops = &nfs_v4_clientops;
-	/* Initialize timeout values */
 
-	timeparms.to_initval = data->timeo * HZ / 10;
-	timeparms.to_retries = data->retrans;
-	timeparms.to_exponential = 1;
-	if (!timeparms.to_retries)
-		timeparms.to_retries = 5;
-
-	proto = data->proto;
-	/* Which IP protocol do we use? */
-	switch (proto) {
-	case IPPROTO_TCP:
-		timeparms.to_maxval  = RPC_MAX_TCP_TIMEOUT;
-		if (!timeparms.to_initval)
-			timeparms.to_initval = 600 * HZ / 10;
-		break;
-	case IPPROTO_UDP:
-		timeparms.to_maxval  = RPC_MAX_UDP_TIMEOUT;
-		if (!timeparms.to_initval)
-			timeparms.to_initval = 11 * HZ / 10;
-		break;
-	default:
-		return -EINVAL;
-	}
+	nfs_init_timeout_values(&timeparms, data->proto, data->timeo, data->retrans);
 
 	clp = nfs4_get_client(&server->addr.sin_addr);
 	if (!clp) {
@@ -1735,7 +1749,7 @@
 
 	down_write(&clp->cl_sem);
 	if (IS_ERR(clp->cl_rpcclient)) {
-		xprt = xprt_create_proto(proto, &server->addr, &timeparms);
+		xprt = xprt_create_proto(data->proto, &server->addr, &timeparms);
 		if (IS_ERR(xprt)) {
 			up_write(&clp->cl_sem);
 			err = PTR_ERR(xprt);
diff --git a/fs/nfs/nfs3proc.c b/fs/nfs/nfs3proc.c
index edc9551..e4a1cd4 100644
--- a/fs/nfs/nfs3proc.c
+++ b/fs/nfs/nfs3proc.c
@@ -299,7 +299,7 @@
  */
 static int
 nfs3_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
-		 int flags)
+		 int flags, struct nameidata *nd)
 {
 	struct nfs_fh		fhandle;
 	struct nfs_fattr	fattr;
@@ -735,7 +735,7 @@
 static void
 nfs3_read_done(struct rpc_task *task)
 {
-	struct nfs_write_data *data = (struct nfs_write_data *) task->tk_calldata;
+	struct nfs_read_data *data = (struct nfs_read_data *) task->tk_calldata;
 
 	if (nfs3_async_handle_jukebox(task))
 		return;
diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h
index ec1a22d..78a53f5 100644
--- a/fs/nfs/nfs4_fs.h
+++ b/fs/nfs/nfs4_fs.h
@@ -93,25 +93,50 @@
 };
 
 /*
+ * struct rpc_sequence ensures that RPC calls are sent in the exact
+ * order that they appear on the list.
+ */
+struct rpc_sequence {
+	struct rpc_wait_queue	wait;	/* RPC call delay queue */
+	spinlock_t lock;		/* Protects the list */
+	struct list_head list;		/* Defines sequence of RPC calls */
+};
+
+#define NFS_SEQID_CONFIRMED 1
+struct nfs_seqid_counter {
+	struct rpc_sequence *sequence;
+	int flags;
+	u32 counter;
+};
+
+struct nfs_seqid {
+	struct nfs_seqid_counter *sequence;
+	struct list_head list;
+};
+
+static inline void nfs_confirm_seqid(struct nfs_seqid_counter *seqid, int status)
+{
+	if (seqid_mutating_err(-status))
+		seqid->flags |= NFS_SEQID_CONFIRMED;
+}
+
+/*
  * NFS4 state_owners and lock_owners are simply labels for ordered
  * sequences of RPC calls. Their sole purpose is to provide once-only
  * semantics by allowing the server to identify replayed requests.
- *
- * The ->so_sema is held during all state_owner seqid-mutating operations:
- * OPEN, OPEN_DOWNGRADE, and CLOSE. Its purpose is to properly serialize
- * so_seqid.
  */
 struct nfs4_state_owner {
+	spinlock_t	     so_lock;
 	struct list_head     so_list;	 /* per-clientid list of state_owners */
 	struct nfs4_client   *so_client;
 	u32                  so_id;      /* 32-bit identifier, unique */
-	struct semaphore     so_sema;
-	u32                  so_seqid;   /* protected by so_sema */
 	atomic_t	     so_count;
 
 	struct rpc_cred	     *so_cred;	 /* Associated cred */
 	struct list_head     so_states;
 	struct list_head     so_delegations;
+	struct nfs_seqid_counter so_seqid;
+	struct rpc_sequence  so_sequence;
 };
 
 /*
@@ -132,7 +157,7 @@
 	fl_owner_t		ls_owner;	/* POSIX lock owner */
 #define NFS_LOCK_INITIALIZED 1
 	int			ls_flags;
-	u32			ls_seqid;
+	struct nfs_seqid_counter	ls_seqid;
 	u32			ls_id;
 	nfs4_stateid		ls_stateid;
 	atomic_t		ls_count;
@@ -153,7 +178,6 @@
 	struct inode *inode;		/* Pointer to the inode */
 
 	unsigned long flags;		/* Do we hold any locks? */
-	struct semaphore lock_sema;	/* Serializes file locking operations */
 	spinlock_t state_lock;		/* Protects the lock_states list */
 
 	nfs4_stateid stateid;
@@ -191,8 +215,8 @@
 extern int nfs4_proc_async_renew(struct nfs4_client *);
 extern int nfs4_proc_renew(struct nfs4_client *);
 extern int nfs4_do_close(struct inode *inode, struct nfs4_state *state, mode_t mode);
-extern struct inode *nfs4_atomic_open(struct inode *, struct dentry *, struct nameidata *);
-extern int nfs4_open_revalidate(struct inode *, struct dentry *, int);
+extern struct dentry *nfs4_atomic_open(struct inode *, struct dentry *, struct nameidata *);
+extern int nfs4_open_revalidate(struct inode *, struct dentry *, int, struct nameidata *);
 
 extern struct nfs4_state_recovery_ops nfs4_reboot_recovery_ops;
 extern struct nfs4_state_recovery_ops nfs4_network_partition_recovery_ops;
@@ -224,12 +248,17 @@
 extern void nfs4_put_open_state(struct nfs4_state *);
 extern void nfs4_close_state(struct nfs4_state *, mode_t);
 extern struct nfs4_state *nfs4_find_state(struct inode *, struct rpc_cred *, mode_t mode);
-extern void nfs4_increment_seqid(int status, struct nfs4_state_owner *sp);
 extern void nfs4_schedule_state_recovery(struct nfs4_client *);
+extern void nfs4_put_lock_state(struct nfs4_lock_state *lsp);
 extern int nfs4_set_lock_state(struct nfs4_state *state, struct file_lock *fl);
-extern void nfs4_increment_lock_seqid(int status, struct nfs4_lock_state *ls);
 extern void nfs4_copy_stateid(nfs4_stateid *, struct nfs4_state *, fl_owner_t);
 
+extern struct nfs_seqid *nfs_alloc_seqid(struct nfs_seqid_counter *counter);
+extern int nfs_wait_on_sequence(struct nfs_seqid *seqid, struct rpc_task *task);
+extern void nfs_increment_open_seqid(int status, struct nfs_seqid *seqid);
+extern void nfs_increment_lock_seqid(int status, struct nfs_seqid *seqid);
+extern void nfs_free_seqid(struct nfs_seqid *seqid);
+
 extern const nfs4_stateid zero_stateid;
 
 /* nfs4xdr.c */
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 9701ca8..9c1da34 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -47,6 +47,7 @@
 #include <linux/nfs_page.h>
 #include <linux/smp_lock.h>
 #include <linux/namei.h>
+#include <linux/mount.h>
 
 #include "nfs4_fs.h"
 #include "delegation.h"
@@ -56,10 +57,11 @@
 #define NFS4_POLL_RETRY_MIN	(1*HZ)
 #define NFS4_POLL_RETRY_MAX	(15*HZ)
 
+static int _nfs4_proc_open_confirm(struct rpc_clnt *clnt, const struct nfs_fh *fh, struct nfs4_state_owner *sp, nfs4_stateid *stateid, struct nfs_seqid *seqid);
 static int nfs4_do_fsinfo(struct nfs_server *, struct nfs_fh *, struct nfs_fsinfo *);
-static int nfs4_async_handle_error(struct rpc_task *, struct nfs_server *);
+static int nfs4_async_handle_error(struct rpc_task *, const struct nfs_server *);
 static int _nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry);
-static int nfs4_handle_exception(struct nfs_server *server, int errorcode, struct nfs4_exception *exception);
+static int nfs4_handle_exception(const struct nfs_server *server, int errorcode, struct nfs4_exception *exception);
 extern u32 *nfs4_decode_dirent(u32 *p, struct nfs_entry *entry, int plus);
 extern struct rpc_procinfo nfs4_procedures[];
 
@@ -189,12 +191,28 @@
 		nfsi->change_attr = cinfo->after;
 }
 
+/* Helper for asynchronous RPC calls */
+static int nfs4_call_async(struct rpc_clnt *clnt, rpc_action tk_begin,
+		rpc_action tk_exit, void *calldata)
+{
+	struct rpc_task *task;
+
+	if (!(task = rpc_new_task(clnt, tk_exit, RPC_TASK_ASYNC)))
+		return -ENOMEM;
+
+	task->tk_calldata = calldata;
+	task->tk_action = tk_begin;
+	rpc_execute(task);
+	return 0;
+}
+
 static void update_open_stateid(struct nfs4_state *state, nfs4_stateid *stateid, int open_flags)
 {
 	struct inode *inode = state->inode;
 
 	open_flags &= (FMODE_READ|FMODE_WRITE);
 	/* Protect against nfs4_find_state() */
+	spin_lock(&state->owner->so_lock);
 	spin_lock(&inode->i_lock);
 	state->state |= open_flags;
 	/* NB! List reordering - see the reclaim code for why.  */
@@ -204,12 +222,12 @@
 		state->nreaders++;
 	memcpy(&state->stateid, stateid, sizeof(state->stateid));
 	spin_unlock(&inode->i_lock);
+	spin_unlock(&state->owner->so_lock);
 }
 
 /*
  * OPEN_RECLAIM:
  * 	reclaim state on the server after a reboot.
- * 	Assumes caller is holding the sp->so_sem
  */
 static int _nfs4_open_reclaim(struct nfs4_state_owner *sp, struct nfs4_state *state)
 {
@@ -218,7 +236,6 @@
 	struct nfs_delegation *delegation = NFS_I(inode)->delegation;
 	struct nfs_openargs o_arg = {
 		.fh = NFS_FH(inode),
-		.seqid = sp->so_seqid,
 		.id = sp->so_id,
 		.open_flags = state->state,
 		.clientid = server->nfs4_state->cl_clientid,
@@ -245,8 +262,13 @@
 		}
 		o_arg.u.delegation_type = delegation->type;
 	}
+	o_arg.seqid = nfs_alloc_seqid(&sp->so_seqid);
+	if (o_arg.seqid == NULL)
+		return -ENOMEM;
 	status = rpc_call_sync(server->client, &msg, RPC_TASK_NOINTR);
-	nfs4_increment_seqid(status, sp);
+	/* Confirm the sequence as being established */
+	nfs_confirm_seqid(&sp->so_seqid, status);
+	nfs_increment_open_seqid(status, o_arg.seqid);
 	if (status == 0) {
 		memcpy(&state->stateid, &o_res.stateid, sizeof(state->stateid));
 		if (o_res.delegation_type != 0) {
@@ -256,6 +278,7 @@
 				nfs_async_inode_return_delegation(inode, &o_res.stateid);
 		}
 	}
+	nfs_free_seqid(o_arg.seqid);
 	clear_bit(NFS_DELEGATED_STATE, &state->flags);
 	/* Ensure we update the inode attributes */
 	NFS_CACHEINV(inode);
@@ -302,23 +325,35 @@
 	};
 	int status = 0;
 
-	down(&sp->so_sema);
 	if (!test_bit(NFS_DELEGATED_STATE, &state->flags))
 		goto out;
 	if (state->state == 0)
 		goto out;
-	arg.seqid = sp->so_seqid;
+	arg.seqid = nfs_alloc_seqid(&sp->so_seqid);
+	status = -ENOMEM;
+	if (arg.seqid == NULL)
+		goto out;
 	arg.open_flags = state->state;
 	memcpy(arg.u.delegation.data, state->stateid.data, sizeof(arg.u.delegation.data));
 	status = rpc_call_sync(server->client, &msg, RPC_TASK_NOINTR);
-	nfs4_increment_seqid(status, sp);
+	nfs_increment_open_seqid(status, arg.seqid);
+	if (status != 0)
+		goto out_free;
+	if(res.rflags & NFS4_OPEN_RESULT_CONFIRM) {
+		status = _nfs4_proc_open_confirm(server->client, NFS_FH(inode),
+				sp, &res.stateid, arg.seqid);
+		if (status != 0)
+			goto out_free;
+	}
+	nfs_confirm_seqid(&sp->so_seqid, 0);
 	if (status >= 0) {
 		memcpy(state->stateid.data, res.stateid.data,
 				sizeof(state->stateid.data));
 		clear_bit(NFS_DELEGATED_STATE, &state->flags);
 	}
+out_free:
+	nfs_free_seqid(arg.seqid);
 out:
-	up(&sp->so_sema);
 	dput(parent);
 	return status;
 }
@@ -345,11 +380,11 @@
 	return err;
 }
 
-static inline int _nfs4_proc_open_confirm(struct rpc_clnt *clnt, const struct nfs_fh *fh, struct nfs4_state_owner *sp, nfs4_stateid *stateid)
+static int _nfs4_proc_open_confirm(struct rpc_clnt *clnt, const struct nfs_fh *fh, struct nfs4_state_owner *sp, nfs4_stateid *stateid, struct nfs_seqid *seqid)
 {
 	struct nfs_open_confirmargs arg = {
 		.fh             = fh,
-		.seqid          = sp->so_seqid,
+		.seqid          = seqid,
 		.stateid	= *stateid,
 	};
 	struct nfs_open_confirmres res;
@@ -362,7 +397,9 @@
 	int status;
 
 	status = rpc_call_sync(clnt, &msg, RPC_TASK_NOINTR);
-	nfs4_increment_seqid(status, sp);
+	/* Confirm the sequence as being established */
+	nfs_confirm_seqid(&sp->so_seqid, status);
+	nfs_increment_open_seqid(status, seqid);
 	if (status >= 0)
 		memcpy(stateid, &res.stateid, sizeof(*stateid));
 	return status;
@@ -380,21 +417,37 @@
 	int status;
 
 	/* Update sequence id. The caller must serialize! */
-	o_arg->seqid = sp->so_seqid;
 	o_arg->id = sp->so_id;
 	o_arg->clientid = sp->so_client->cl_clientid;
 
 	status = rpc_call_sync(server->client, &msg, RPC_TASK_NOINTR);
-	nfs4_increment_seqid(status, sp);
+	if (status == 0) {
+		/* OPEN on anything except a regular file is disallowed in NFSv4 */
+		switch (o_res->f_attr->mode & S_IFMT) {
+			case S_IFREG:
+				break;
+			case S_IFLNK:
+				status = -ELOOP;
+				break;
+			case S_IFDIR:
+				status = -EISDIR;
+				break;
+			default:
+				status = -ENOTDIR;
+		}
+	}
+
+	nfs_increment_open_seqid(status, o_arg->seqid);
 	if (status != 0)
 		goto out;
 	update_changeattr(dir, &o_res->cinfo);
 	if(o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) {
 		status = _nfs4_proc_open_confirm(server->client, &o_res->fh,
-				sp, &o_res->stateid);
+				sp, &o_res->stateid, o_arg->seqid);
 		if (status != 0)
 			goto out;
 	}
+	nfs_confirm_seqid(&sp->so_seqid, 0);
 	if (!(o_res->f_attr->valid & NFS_ATTR_FATTR))
 		status = server->rpc_ops->getattr(server, &o_res->fh, o_res->f_attr);
 out:
@@ -465,6 +518,10 @@
 		set_bit(NFS_DELEGATED_STATE, &state->flags);
 		goto out;
 	}
+	o_arg.seqid = nfs_alloc_seqid(&sp->so_seqid);
+	status = -ENOMEM;
+	if (o_arg.seqid == NULL)
+		goto out;
 	status = _nfs4_proc_open(dir, sp, &o_arg, &o_res);
 	if (status != 0)
 		goto out_nodeleg;
@@ -490,6 +547,7 @@
 			nfs_inode_reclaim_delegation(inode, sp->so_cred, &o_res);
 	}
 out_nodeleg:
+	nfs_free_seqid(o_arg.seqid);
 	clear_bit(NFS_DELEGATED_STATE, &state->flags);
 out:
 	dput(parent);
@@ -564,7 +622,6 @@
 		dprintk("%s: nfs4_get_state_owner failed!\n", __FUNCTION__);
 		goto out_err;
 	}
-	down(&sp->so_sema);
 	state = nfs4_get_open_state(inode, sp);
 	if (state == NULL)
 		goto out_err;
@@ -589,7 +646,6 @@
 	set_bit(NFS_DELEGATED_STATE, &state->flags);
 	update_open_stateid(state, &delegation->stateid, open_flags);
 out_ok:
-	up(&sp->so_sema);
 	nfs4_put_state_owner(sp);
 	up_read(&nfsi->rwsem);
 	up_read(&clp->cl_sem);
@@ -600,11 +656,12 @@
 	if (sp != NULL) {
 		if (state != NULL)
 			nfs4_put_open_state(state);
-		up(&sp->so_sema);
 		nfs4_put_state_owner(sp);
 	}
 	up_read(&nfsi->rwsem);
 	up_read(&clp->cl_sem);
+	if (err != -EACCES)
+		nfs_inode_return_delegation(inode);
 	return err;
 }
 
@@ -665,8 +722,10 @@
 	} else
 		o_arg.u.attrs = sattr;
 	/* Serialization for the sequence id */
-	down(&sp->so_sema);
 
+	o_arg.seqid = nfs_alloc_seqid(&sp->so_seqid);
+	if (o_arg.seqid == NULL)
+		return -ENOMEM;
 	status = _nfs4_proc_open(dir, sp, &o_arg, &o_res);
 	if (status != 0)
 		goto out_err;
@@ -681,7 +740,7 @@
 	update_open_stateid(state, &o_res.stateid, flags);
 	if (o_res.delegation_type != 0)
 		nfs_inode_set_delegation(inode, cred, &o_res);
-	up(&sp->so_sema);
+	nfs_free_seqid(o_arg.seqid);
 	nfs4_put_state_owner(sp);
 	up_read(&clp->cl_sem);
 	*res = state;
@@ -690,7 +749,7 @@
 	if (sp != NULL) {
 		if (state != NULL)
 			nfs4_put_open_state(state);
-		up(&sp->so_sema);
+		nfs_free_seqid(o_arg.seqid);
 		nfs4_put_state_owner(sp);
 	}
 	/* Note: clp->cl_sem must be released before nfs4_put_open_state()! */
@@ -718,7 +777,7 @@
 		 * It is actually a sign of a bug on the client or on the server.
 		 *
 		 * If we receive a BAD_SEQID error in the particular case of
-		 * doing an OPEN, we assume that nfs4_increment_seqid() will
+		 * doing an OPEN, we assume that nfs_increment_open_seqid() will
 		 * have unhashed the old state_owner for us, and that we can
 		 * therefore safely retry using a new one. We should still warn
 		 * the user though...
@@ -728,6 +787,16 @@
 			exception.retry = 1;
 			continue;
 		}
+		/*
+		 * BAD_STATEID on OPEN means that the server cancelled our
+		 * state before it received the OPEN_CONFIRM.
+		 * Recover by retrying the request as per the discussion
+		 * on Page 181 of RFC3530.
+		 */
+		if (status == -NFS4ERR_BAD_STATEID) {
+			exception.retry = 1;
+			continue;
+		}
 		res = ERR_PTR(nfs4_handle_exception(NFS_SERVER(dir),
 					status, &exception));
 	} while (exception.retry);
@@ -789,17 +858,27 @@
 	struct nfs_closeres res;
 };
 
+static void nfs4_free_closedata(struct nfs4_closedata *calldata)
+{
+	struct nfs4_state *state = calldata->state;
+	struct nfs4_state_owner *sp = state->owner;
+
+	nfs4_put_open_state(calldata->state);
+	nfs_free_seqid(calldata->arg.seqid);
+	nfs4_put_state_owner(sp);
+	kfree(calldata);
+}
+
 static void nfs4_close_done(struct rpc_task *task)
 {
 	struct nfs4_closedata *calldata = (struct nfs4_closedata *)task->tk_calldata;
 	struct nfs4_state *state = calldata->state;
-	struct nfs4_state_owner *sp = state->owner;
 	struct nfs_server *server = NFS_SERVER(calldata->inode);
 
         /* hmm. we are done with the inode, and in the process of freeing
 	 * the state_owner. we keep this around to process errors
 	 */
-	nfs4_increment_seqid(task->tk_status, sp);
+	nfs_increment_open_seqid(task->tk_status, calldata->arg.seqid);
 	switch (task->tk_status) {
 		case 0:
 			memcpy(&state->stateid, &calldata->res.stateid,
@@ -817,24 +896,46 @@
 			}
 	}
 	state->state = calldata->arg.open_flags;
-	nfs4_put_open_state(state);
-	up(&sp->so_sema);
-	nfs4_put_state_owner(sp);
-	up_read(&server->nfs4_state->cl_sem);
-	kfree(calldata);
+	nfs4_free_closedata(calldata);
 }
 
-static inline int nfs4_close_call(struct rpc_clnt *clnt, struct nfs4_closedata *calldata)
+static void nfs4_close_begin(struct rpc_task *task)
 {
+	struct nfs4_closedata *calldata = (struct nfs4_closedata *)task->tk_calldata;
+	struct nfs4_state *state = calldata->state;
 	struct rpc_message msg = {
 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE],
 		.rpc_argp = &calldata->arg,
 		.rpc_resp = &calldata->res,
-		.rpc_cred = calldata->state->owner->so_cred,
+		.rpc_cred = state->owner->so_cred,
 	};
-	if (calldata->arg.open_flags != 0)
+	int mode = 0;
+	int status;
+
+	status = nfs_wait_on_sequence(calldata->arg.seqid, task);
+	if (status != 0)
+		return;
+	/* Don't reorder reads */
+	smp_rmb();
+	/* Recalculate the new open mode in case someone reopened the file
+	 * while we were waiting in line to be scheduled.
+	 */
+	if (state->nreaders != 0)
+		mode |= FMODE_READ;
+	if (state->nwriters != 0)
+		mode |= FMODE_WRITE;
+	if (test_bit(NFS_DELEGATED_STATE, &state->flags))
+		state->state = mode;
+	if (mode == state->state) {
+		nfs4_free_closedata(calldata);
+		task->tk_exit = NULL;
+		rpc_exit(task, 0);
+		return;
+	}
+	if (mode != 0)
 		msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_DOWNGRADE];
-	return rpc_call_async(clnt, &msg, 0, nfs4_close_done, calldata);
+	calldata->arg.open_flags = mode;
+	rpc_call_setup(task, &msg, 0);
 }
 
 /* 
@@ -851,39 +952,52 @@
 int nfs4_do_close(struct inode *inode, struct nfs4_state *state, mode_t mode) 
 {
 	struct nfs4_closedata *calldata;
-	int status;
+	int status = -ENOMEM;
 
-	/* Tell caller we're done */
-	if (test_bit(NFS_DELEGATED_STATE, &state->flags)) {
-		state->state = mode;
-		return 0;
-	}
-	calldata = (struct nfs4_closedata *)kmalloc(sizeof(*calldata), GFP_KERNEL);
+	calldata = kmalloc(sizeof(*calldata), GFP_KERNEL);
 	if (calldata == NULL)
-		return -ENOMEM;
+		goto out;
 	calldata->inode = inode;
 	calldata->state = state;
 	calldata->arg.fh = NFS_FH(inode);
+	calldata->arg.stateid = &state->stateid;
 	/* Serialization for the sequence id */
-	calldata->arg.seqid = state->owner->so_seqid;
-	calldata->arg.open_flags = mode;
-	memcpy(&calldata->arg.stateid, &state->stateid,
-			sizeof(calldata->arg.stateid));
-	status = nfs4_close_call(NFS_SERVER(inode)->client, calldata);
-	/*
-	 * Return -EINPROGRESS on success in order to indicate to the
-	 * caller that an asynchronous RPC call has been launched, and
-	 * that it will release the semaphores on completion.
-	 */
-	return (status == 0) ? -EINPROGRESS : status;
+	calldata->arg.seqid = nfs_alloc_seqid(&state->owner->so_seqid);
+	if (calldata->arg.seqid == NULL)
+		goto out_free_calldata;
+
+	status = nfs4_call_async(NFS_SERVER(inode)->client, nfs4_close_begin,
+			nfs4_close_done, calldata);
+	if (status == 0)
+		goto out;
+
+	nfs_free_seqid(calldata->arg.seqid);
+out_free_calldata:
+	kfree(calldata);
+out:
+	return status;
 }
 
-struct inode *
+static void nfs4_intent_set_file(struct nameidata *nd, struct dentry *dentry, struct nfs4_state *state)
+{
+	struct file *filp;
+
+	filp = lookup_instantiate_filp(nd, dentry, NULL);
+	if (!IS_ERR(filp)) {
+		struct nfs_open_context *ctx;
+		ctx = (struct nfs_open_context *)filp->private_data;
+		ctx->state = state;
+	} else
+		nfs4_close_state(state, nd->intent.open.flags);
+}
+
+struct dentry *
 nfs4_atomic_open(struct inode *dir, struct dentry *dentry, struct nameidata *nd)
 {
 	struct iattr attr;
 	struct rpc_cred *cred;
 	struct nfs4_state *state;
+	struct dentry *res;
 
 	if (nd->flags & LOOKUP_CREATE) {
 		attr.ia_mode = nd->intent.open.create_mode;
@@ -897,16 +1011,23 @@
 
 	cred = rpcauth_lookupcred(NFS_SERVER(dir)->client->cl_auth, 0);
 	if (IS_ERR(cred))
-		return (struct inode *)cred;
+		return (struct dentry *)cred;
 	state = nfs4_do_open(dir, dentry, nd->intent.open.flags, &attr, cred);
 	put_rpccred(cred);
-	if (IS_ERR(state))
-		return (struct inode *)state;
-	return state->inode;
+	if (IS_ERR(state)) {
+		if (PTR_ERR(state) == -ENOENT)
+			d_add(dentry, NULL);
+		return (struct dentry *)state;
+	}
+	res = d_add_unique(dentry, state->inode);
+	if (res != NULL)
+		dentry = res;
+	nfs4_intent_set_file(nd, dentry, state);
+	return res;
 }
 
 int
-nfs4_open_revalidate(struct inode *dir, struct dentry *dentry, int openflags)
+nfs4_open_revalidate(struct inode *dir, struct dentry *dentry, int openflags, struct nameidata *nd)
 {
 	struct rpc_cred *cred;
 	struct nfs4_state *state;
@@ -919,18 +1040,30 @@
 	if (IS_ERR(state))
 		state = nfs4_do_open(dir, dentry, openflags, NULL, cred);
 	put_rpccred(cred);
-	if (state == ERR_PTR(-ENOENT) && dentry->d_inode == 0)
-		return 1;
-	if (IS_ERR(state))
-		return 0;
+	if (IS_ERR(state)) {
+		switch (PTR_ERR(state)) {
+			case -EPERM:
+			case -EACCES:
+			case -EDQUOT:
+			case -ENOSPC:
+			case -EROFS:
+				lookup_instantiate_filp(nd, (struct dentry *)state, NULL);
+				return 1;
+			case -ENOENT:
+				if (dentry->d_inode == NULL)
+					return 1;
+		}
+		goto out_drop;
+	}
 	inode = state->inode;
+	iput(inode);
 	if (inode == dentry->d_inode) {
-		iput(inode);
+		nfs4_intent_set_file(nd, dentry, state);
 		return 1;
 	}
-	d_drop(dentry);
 	nfs4_close_state(state, openflags);
-	iput(inode);
+out_drop:
+	d_drop(dentry);
 	return 0;
 }
 
@@ -1431,7 +1564,7 @@
 
 static int
 nfs4_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
-                 int flags)
+                 int flags, struct nameidata *nd)
 {
 	struct nfs4_state *state;
 	struct rpc_cred *cred;
@@ -1453,13 +1586,13 @@
 		struct nfs_fattr fattr;
 		status = nfs4_do_setattr(NFS_SERVER(dir), &fattr,
 		                     NFS_FH(state->inode), sattr, state);
-		if (status == 0) {
+		if (status == 0)
 			nfs_setattr_update_inode(state->inode, sattr);
-			goto out;
-		}
-	} else if (flags != 0)
-		goto out;
-	nfs4_close_state(state, flags);
+	}
+	if (status == 0 && nd != NULL && (nd->flags & LOOKUP_OPEN))
+		nfs4_intent_set_file(nd, dentry, state);
+	else
+		nfs4_close_state(state, flags);
 out:
 	return status;
 }
@@ -2106,65 +2239,6 @@
 	return 0;
 }
 
-/*
- * We will need to arrange for the VFS layer to provide an atomic open.
- * Until then, this open method is prone to inefficiency and race conditions
- * due to the lookup, potential create, and open VFS calls from sys_open()
- * placed on the wire.
- */
-static int
-nfs4_proc_file_open(struct inode *inode, struct file *filp)
-{
-	struct dentry *dentry = filp->f_dentry;
-	struct nfs_open_context *ctx;
-	struct nfs4_state *state = NULL;
-	struct rpc_cred *cred;
-	int status = -ENOMEM;
-
-	dprintk("nfs4_proc_file_open: starting on (%.*s/%.*s)\n",
-	                       (int)dentry->d_parent->d_name.len,
-	                       dentry->d_parent->d_name.name,
-	                       (int)dentry->d_name.len, dentry->d_name.name);
-
-
-	/* Find our open stateid */
-	cred = rpcauth_lookupcred(NFS_SERVER(inode)->client->cl_auth, 0);
-	if (IS_ERR(cred))
-		return PTR_ERR(cred);
-	ctx = alloc_nfs_open_context(dentry, cred);
-	put_rpccred(cred);
-	if (unlikely(ctx == NULL))
-		return -ENOMEM;
-	status = -EIO; /* ERACE actually */
-	state = nfs4_find_state(inode, cred, filp->f_mode);
-	if (unlikely(state == NULL))
-		goto no_state;
-	ctx->state = state;
-	nfs4_close_state(state, filp->f_mode);
-	ctx->mode = filp->f_mode;
-	nfs_file_set_open_context(filp, ctx);
-	put_nfs_open_context(ctx);
-	if (filp->f_mode & FMODE_WRITE)
-		nfs_begin_data_update(inode);
-	return 0;
-no_state:
-	printk(KERN_WARNING "NFS: v4 raced in function %s\n", __FUNCTION__);
-	put_nfs_open_context(ctx);
-	return status;
-}
-
-/*
- * Release our state
- */
-static int
-nfs4_proc_file_release(struct inode *inode, struct file *filp)
-{
-	if (filp->f_mode & FMODE_WRITE)
-		nfs_end_data_update(inode);
-	nfs_file_clear_open_context(filp);
-	return 0;
-}
-
 static inline int nfs4_server_supports_acls(struct nfs_server *server)
 {
 	return (server->caps & NFS_CAP_ACLS)
@@ -2285,7 +2359,7 @@
 			return -ENOMEM;
 		args.acl_pages[0] = localpage;
 		args.acl_pgbase = 0;
-		args.acl_len = PAGE_SIZE;
+		resp_len = args.acl_len = PAGE_SIZE;
 	} else {
 		resp_buf = buf;
 		buf_to_pages(buf, buflen, args.acl_pages, &args.acl_pgbase);
@@ -2345,6 +2419,7 @@
 
 	if (!nfs4_server_supports_acls(server))
 		return -EOPNOTSUPP;
+	nfs_inode_return_delegation(inode);
 	buf_to_pages(buf, buflen, arg.acl_pages, &arg.acl_pgbase);
 	ret = rpc_call_sync(NFS_SERVER(inode)->client, &msg, 0);
 	if (ret == 0)
@@ -2353,7 +2428,7 @@
 }
 
 static int
-nfs4_async_handle_error(struct rpc_task *task, struct nfs_server *server)
+nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server)
 {
 	struct nfs4_client *clp = server->nfs4_state;
 
@@ -2431,7 +2506,7 @@
 /* This is the error handling routine for processes that are allowed
  * to sleep.
  */
-int nfs4_handle_exception(struct nfs_server *server, int errorcode, struct nfs4_exception *exception)
+int nfs4_handle_exception(const struct nfs_server *server, int errorcode, struct nfs4_exception *exception)
 {
 	struct nfs4_client *clp = server->nfs4_state;
 	int ret = errorcode;
@@ -2632,7 +2707,6 @@
 
 	down_read(&clp->cl_sem);
 	nlo.clientid = clp->cl_clientid;
-	down(&state->lock_sema);
 	status = nfs4_set_lock_state(state, request);
 	if (status != 0)
 		goto out;
@@ -2659,7 +2733,6 @@
 		status = 0;
 	}
 out:
-	up(&state->lock_sema);
 	up_read(&clp->cl_sem);
 	return status;
 }
@@ -2696,67 +2769,125 @@
 	return res;
 }
 
-static int _nfs4_proc_unlck(struct nfs4_state *state, int cmd, struct file_lock *request)
+struct nfs4_unlockdata {
+	struct nfs_lockargs arg;
+	struct nfs_locku_opargs luargs;
+	struct nfs_lockres res;
+	struct nfs4_lock_state *lsp;
+	struct nfs_open_context *ctx;
+	atomic_t refcount;
+	struct completion completion;
+};
+
+static void nfs4_locku_release_calldata(struct nfs4_unlockdata *calldata)
 {
-	struct inode *inode = state->inode;
-	struct nfs_server *server = NFS_SERVER(inode);
-	struct nfs4_client *clp = server->nfs4_state;
-	struct nfs_lockargs arg = {
-		.fh = NFS_FH(inode),
-		.type = nfs4_lck_type(cmd, request),
-		.offset = request->fl_start,
-		.length = nfs4_lck_length(request),
-	};
-	struct nfs_lockres res = {
-		.server = server,
-	};
+	if (atomic_dec_and_test(&calldata->refcount)) {
+		nfs_free_seqid(calldata->luargs.seqid);
+		nfs4_put_lock_state(calldata->lsp);
+		put_nfs_open_context(calldata->ctx);
+		kfree(calldata);
+	}
+}
+
+static void nfs4_locku_complete(struct nfs4_unlockdata *calldata)
+{
+	complete(&calldata->completion);
+	nfs4_locku_release_calldata(calldata);
+}
+
+static void nfs4_locku_done(struct rpc_task *task)
+{
+	struct nfs4_unlockdata *calldata = (struct nfs4_unlockdata *)task->tk_calldata;
+
+	nfs_increment_lock_seqid(task->tk_status, calldata->luargs.seqid);
+	switch (task->tk_status) {
+		case 0:
+			memcpy(calldata->lsp->ls_stateid.data,
+					calldata->res.u.stateid.data,
+					sizeof(calldata->lsp->ls_stateid.data));
+			break;
+		case -NFS4ERR_STALE_STATEID:
+		case -NFS4ERR_EXPIRED:
+			nfs4_schedule_state_recovery(calldata->res.server->nfs4_state);
+			break;
+		default:
+			if (nfs4_async_handle_error(task, calldata->res.server) == -EAGAIN) {
+				rpc_restart_call(task);
+				return;
+			}
+	}
+	nfs4_locku_complete(calldata);
+}
+
+static void nfs4_locku_begin(struct rpc_task *task)
+{
+	struct nfs4_unlockdata *calldata = (struct nfs4_unlockdata *)task->tk_calldata;
 	struct rpc_message msg = {
 		.rpc_proc	= &nfs4_procedures[NFSPROC4_CLNT_LOCKU],
-		.rpc_argp       = &arg,
-		.rpc_resp       = &res,
-		.rpc_cred	= state->owner->so_cred,
+		.rpc_argp       = &calldata->arg,
+		.rpc_resp       = &calldata->res,
+		.rpc_cred	= calldata->lsp->ls_state->owner->so_cred,
 	};
-	struct nfs4_lock_state *lsp;
-	struct nfs_locku_opargs luargs;
 	int status;
-			
-	down_read(&clp->cl_sem);
-	down(&state->lock_sema);
-	status = nfs4_set_lock_state(state, request);
-	if (status != 0)
-		goto out;
-	lsp = request->fl_u.nfs4_fl.owner;
-	/* We might have lost the locks! */
-	if ((lsp->ls_flags & NFS_LOCK_INITIALIZED) == 0)
-		goto out;
-	luargs.seqid = lsp->ls_seqid;
-	memcpy(&luargs.stateid, &lsp->ls_stateid, sizeof(luargs.stateid));
-	arg.u.locku = &luargs;
-	status = rpc_call_sync(server->client, &msg, RPC_TASK_NOINTR);
-	nfs4_increment_lock_seqid(status, lsp);
 
-	if (status == 0)
-		memcpy(&lsp->ls_stateid,  &res.u.stateid, 
-				sizeof(lsp->ls_stateid));
-out:
-	up(&state->lock_sema);
-	if (status == 0)
-		do_vfs_lock(request->fl_file, request);
-	up_read(&clp->cl_sem);
-	return status;
+	status = nfs_wait_on_sequence(calldata->luargs.seqid, task);
+	if (status != 0)
+		return;
+	if ((calldata->lsp->ls_flags & NFS_LOCK_INITIALIZED) == 0) {
+		nfs4_locku_complete(calldata);
+		task->tk_exit = NULL;
+		rpc_exit(task, 0);
+		return;
+	}
+	rpc_call_setup(task, &msg, 0);
 }
 
 static int nfs4_proc_unlck(struct nfs4_state *state, int cmd, struct file_lock *request)
 {
-	struct nfs4_exception exception = { };
-	int err;
+	struct nfs4_unlockdata *calldata;
+	struct inode *inode = state->inode;
+	struct nfs_server *server = NFS_SERVER(inode);
+	struct nfs4_lock_state *lsp;
+	int status;
 
-	do {
-		err = nfs4_handle_exception(NFS_SERVER(state->inode),
-				_nfs4_proc_unlck(state, cmd, request),
-				&exception);
-	} while (exception.retry);
-	return err;
+	status = nfs4_set_lock_state(state, request);
+	if (status != 0)
+		return status;
+	lsp = request->fl_u.nfs4_fl.owner;
+	/* We might have lost the locks! */
+	if ((lsp->ls_flags & NFS_LOCK_INITIALIZED) == 0)
+		return 0;
+	calldata = kmalloc(sizeof(*calldata), GFP_KERNEL);
+	if (calldata == NULL)
+		return -ENOMEM;
+	calldata->luargs.seqid = nfs_alloc_seqid(&lsp->ls_seqid);
+	if (calldata->luargs.seqid == NULL) {
+		kfree(calldata);
+		return -ENOMEM;
+	}
+	calldata->luargs.stateid = &lsp->ls_stateid;
+	calldata->arg.fh = NFS_FH(inode);
+	calldata->arg.type = nfs4_lck_type(cmd, request);
+	calldata->arg.offset = request->fl_start;
+	calldata->arg.length = nfs4_lck_length(request);
+	calldata->arg.u.locku = &calldata->luargs;
+	calldata->res.server = server;
+	calldata->lsp = lsp;
+	atomic_inc(&lsp->ls_count);
+
+	/* Ensure we don't close file until we're done freeing locks! */
+	calldata->ctx = get_nfs_open_context((struct nfs_open_context*)request->fl_file->private_data);
+
+	atomic_set(&calldata->refcount, 2);
+	init_completion(&calldata->completion);
+
+	status = nfs4_call_async(NFS_SERVER(inode)->client, nfs4_locku_begin,
+			nfs4_locku_done, calldata);
+	if (status == 0)
+		wait_for_completion_interruptible(&calldata->completion);
+	do_vfs_lock(request->fl_file, request);
+	nfs4_locku_release_calldata(calldata);
+	return status;
 }
 
 static int _nfs4_do_setlk(struct nfs4_state *state, int cmd, struct file_lock *request, int reclaim)
@@ -2764,11 +2895,23 @@
 	struct inode *inode = state->inode;
 	struct nfs_server *server = NFS_SERVER(inode);
 	struct nfs4_lock_state *lsp = request->fl_u.nfs4_fl.owner;
+	struct nfs_lock_opargs largs = {
+		.lock_stateid = &lsp->ls_stateid,
+		.open_stateid = &state->stateid,
+		.lock_owner = {
+			.clientid = server->nfs4_state->cl_clientid,
+			.id = lsp->ls_id,
+		},
+		.reclaim = reclaim,
+	};
 	struct nfs_lockargs arg = {
 		.fh = NFS_FH(inode),
 		.type = nfs4_lck_type(cmd, request),
 		.offset = request->fl_start,
 		.length = nfs4_lck_length(request),
+		.u = {
+			.lock = &largs,
+		},
 	};
 	struct nfs_lockres res = {
 		.server = server,
@@ -2779,53 +2922,39 @@
 		.rpc_resp       = &res,
 		.rpc_cred	= state->owner->so_cred,
 	};
-	struct nfs_lock_opargs largs = {
-		.reclaim = reclaim,
-		.new_lock_owner = 0,
-	};
-	int status;
+	int status = -ENOMEM;
 
-	if (!(lsp->ls_flags & NFS_LOCK_INITIALIZED)) {
+	largs.lock_seqid = nfs_alloc_seqid(&lsp->ls_seqid);
+	if (largs.lock_seqid == NULL)
+		return -ENOMEM;
+	if (!(lsp->ls_seqid.flags & NFS_SEQID_CONFIRMED)) {
 		struct nfs4_state_owner *owner = state->owner;
-		struct nfs_open_to_lock otl = {
-			.lock_owner = {
-				.clientid = server->nfs4_state->cl_clientid,
-			},
-		};
 
-		otl.lock_seqid = lsp->ls_seqid;
-		otl.lock_owner.id = lsp->ls_id;
-		memcpy(&otl.open_stateid, &state->stateid, sizeof(otl.open_stateid));
-		largs.u.open_lock = &otl;
+		largs.open_seqid = nfs_alloc_seqid(&owner->so_seqid);
+		if (largs.open_seqid == NULL)
+			goto out;
 		largs.new_lock_owner = 1;
-		arg.u.lock = &largs;
-		down(&owner->so_sema);
-		otl.open_seqid = owner->so_seqid;
 		status = rpc_call_sync(server->client, &msg, RPC_TASK_NOINTR);
-		/* increment open_owner seqid on success, and 
-		* seqid mutating errors */
-		nfs4_increment_seqid(status, owner);
-		up(&owner->so_sema);
-		if (status == 0) {
-			lsp->ls_flags |= NFS_LOCK_INITIALIZED;
-			lsp->ls_seqid++;
+		/* increment open seqid on success, and seqid mutating errors */
+		if (largs.new_lock_owner != 0) {
+			nfs_increment_open_seqid(status, largs.open_seqid);
+			if (status == 0)
+				nfs_confirm_seqid(&lsp->ls_seqid, 0);
 		}
-	} else {
-		struct nfs_exist_lock el = {
-			.seqid = lsp->ls_seqid,
-		};
-		memcpy(&el.stateid, &lsp->ls_stateid, sizeof(el.stateid));
-		largs.u.exist_lock = &el;
-		arg.u.lock = &largs;
+		nfs_free_seqid(largs.open_seqid);
+	} else
 		status = rpc_call_sync(server->client, &msg, RPC_TASK_NOINTR);
-		/* increment seqid on success, and * seqid mutating errors*/
-		nfs4_increment_lock_seqid(status, lsp);
-	}
+	/* increment lock seqid on success, and seqid mutating errors*/
+	nfs_increment_lock_seqid(status, largs.lock_seqid);
 	/* save the returned stateid. */
-	if (status == 0)
-		memcpy(&lsp->ls_stateid, &res.u.stateid, sizeof(nfs4_stateid));
-	else if (status == -NFS4ERR_DENIED)
+	if (status == 0) {
+		memcpy(lsp->ls_stateid.data, res.u.stateid.data,
+				sizeof(lsp->ls_stateid.data));
+		lsp->ls_flags |= NFS_LOCK_INITIALIZED;
+	} else if (status == -NFS4ERR_DENIED)
 		status = -EAGAIN;
+out:
+	nfs_free_seqid(largs.lock_seqid);
 	return status;
 }
 
@@ -2865,11 +2994,9 @@
 	int status;
 
 	down_read(&clp->cl_sem);
-	down(&state->lock_sema);
 	status = nfs4_set_lock_state(state, request);
 	if (status == 0)
 		status = _nfs4_do_setlk(state, cmd, request, 0);
-	up(&state->lock_sema);
 	if (status == 0) {
 		/* Note: we always want to sleep here! */
 		request->fl_flags |= FL_SLEEP;
@@ -3024,8 +3151,8 @@
 	.read_setup	= nfs4_proc_read_setup,
 	.write_setup	= nfs4_proc_write_setup,
 	.commit_setup	= nfs4_proc_commit_setup,
-	.file_open      = nfs4_proc_file_open,
-	.file_release   = nfs4_proc_file_release,
+	.file_open      = nfs_open,
+	.file_release   = nfs_release,
 	.lock		= nfs4_proc_lock,
 	.clear_acl_cache = nfs4_zap_acl_attr,
 };
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index afe587d..2d5a6a2 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -264,13 +264,16 @@
 {
 	struct nfs4_state_owner *sp;
 
-	sp = kmalloc(sizeof(*sp),GFP_KERNEL);
+	sp = kzalloc(sizeof(*sp),GFP_KERNEL);
 	if (!sp)
 		return NULL;
-	init_MUTEX(&sp->so_sema);
-	sp->so_seqid = 0;                 /* arbitrary */
+	spin_lock_init(&sp->so_lock);
 	INIT_LIST_HEAD(&sp->so_states);
 	INIT_LIST_HEAD(&sp->so_delegations);
+	rpc_init_wait_queue(&sp->so_sequence.wait, "Seqid_waitqueue");
+	sp->so_seqid.sequence = &sp->so_sequence;
+	spin_lock_init(&sp->so_sequence.lock);
+	INIT_LIST_HEAD(&sp->so_sequence.list);
 	atomic_set(&sp->so_count, 1);
 	return sp;
 }
@@ -359,7 +362,6 @@
 	memset(state->stateid.data, 0, sizeof(state->stateid.data));
 	atomic_set(&state->count, 1);
 	INIT_LIST_HEAD(&state->lock_states);
-	init_MUTEX(&state->lock_sema);
 	spin_lock_init(&state->state_lock);
 	return state;
 }
@@ -437,21 +439,23 @@
 	if (state)
 		goto out;
 	new = nfs4_alloc_open_state();
+	spin_lock(&owner->so_lock);
 	spin_lock(&inode->i_lock);
 	state = __nfs4_find_state_byowner(inode, owner);
 	if (state == NULL && new != NULL) {
 		state = new;
-		/* Caller *must* be holding owner->so_sem */
-		/* Note: The reclaim code dictates that we add stateless
-		 * and read-only stateids to the end of the list */
-		list_add_tail(&state->open_states, &owner->so_states);
 		state->owner = owner;
 		atomic_inc(&owner->so_count);
 		list_add(&state->inode_states, &nfsi->open_states);
 		state->inode = igrab(inode);
 		spin_unlock(&inode->i_lock);
+		/* Note: The reclaim code dictates that we add stateless
+		 * and read-only stateids to the end of the list */
+		list_add_tail(&state->open_states, &owner->so_states);
+		spin_unlock(&owner->so_lock);
 	} else {
 		spin_unlock(&inode->i_lock);
+		spin_unlock(&owner->so_lock);
 		if (new)
 			nfs4_free_open_state(new);
 	}
@@ -461,19 +465,21 @@
 
 /*
  * Beware! Caller must be holding exactly one
- * reference to clp->cl_sem and owner->so_sema!
+ * reference to clp->cl_sem!
  */
 void nfs4_put_open_state(struct nfs4_state *state)
 {
 	struct inode *inode = state->inode;
 	struct nfs4_state_owner *owner = state->owner;
 
-	if (!atomic_dec_and_lock(&state->count, &inode->i_lock))
+	if (!atomic_dec_and_lock(&state->count, &owner->so_lock))
 		return;
+	spin_lock(&inode->i_lock);
 	if (!list_empty(&state->inode_states))
 		list_del(&state->inode_states);
-	spin_unlock(&inode->i_lock);
 	list_del(&state->open_states);
+	spin_unlock(&inode->i_lock);
+	spin_unlock(&owner->so_lock);
 	iput(inode);
 	BUG_ON (state->state != 0);
 	nfs4_free_open_state(state);
@@ -481,20 +487,17 @@
 }
 
 /*
- * Beware! Caller must be holding no references to clp->cl_sem!
- * of owner->so_sema!
+ * Close the current file.
  */
 void nfs4_close_state(struct nfs4_state *state, mode_t mode)
 {
 	struct inode *inode = state->inode;
 	struct nfs4_state_owner *owner = state->owner;
-	struct nfs4_client *clp = owner->so_client;
 	int newstate;
 
 	atomic_inc(&owner->so_count);
-	down_read(&clp->cl_sem);
-	down(&owner->so_sema);
 	/* Protect against nfs4_find_state() */
+	spin_lock(&owner->so_lock);
 	spin_lock(&inode->i_lock);
 	if (mode & FMODE_READ)
 		state->nreaders--;
@@ -507,6 +510,7 @@
 		list_move_tail(&state->open_states, &owner->so_states);
 	}
 	spin_unlock(&inode->i_lock);
+	spin_unlock(&owner->so_lock);
 	newstate = 0;
 	if (state->state != 0) {
 		if (state->nreaders)
@@ -515,14 +519,16 @@
 			newstate |= FMODE_WRITE;
 		if (state->state == newstate)
 			goto out;
-		if (nfs4_do_close(inode, state, newstate) == -EINPROGRESS)
+		if (test_bit(NFS_DELEGATED_STATE, &state->flags)) {
+			state->state = newstate;
+			goto out;
+		}
+		if (nfs4_do_close(inode, state, newstate) == 0)
 			return;
 	}
 out:
 	nfs4_put_open_state(state);
-	up(&owner->so_sema);
 	nfs4_put_state_owner(owner);
-	up_read(&clp->cl_sem);
 }
 
 /*
@@ -546,19 +552,16 @@
  * Return a compatible lock_state. If no initialized lock_state structure
  * exists, return an uninitialized one.
  *
- * The caller must be holding state->lock_sema
  */
 static struct nfs4_lock_state *nfs4_alloc_lock_state(struct nfs4_state *state, fl_owner_t fl_owner)
 {
 	struct nfs4_lock_state *lsp;
 	struct nfs4_client *clp = state->owner->so_client;
 
-	lsp = kmalloc(sizeof(*lsp), GFP_KERNEL);
+	lsp = kzalloc(sizeof(*lsp), GFP_KERNEL);
 	if (lsp == NULL)
 		return NULL;
-	lsp->ls_flags = 0;
-	lsp->ls_seqid = 0;	/* arbitrary */
-	memset(lsp->ls_stateid.data, 0, sizeof(lsp->ls_stateid.data));
+	lsp->ls_seqid.sequence = &state->owner->so_sequence;
 	atomic_set(&lsp->ls_count, 1);
 	lsp->ls_owner = fl_owner;
 	spin_lock(&clp->cl_lock);
@@ -572,7 +575,7 @@
  * Return a compatible lock_state. If no initialized lock_state structure
  * exists, return an uninitialized one.
  *
- * The caller must be holding state->lock_sema and clp->cl_sem
+ * The caller must be holding clp->cl_sem
  */
 static struct nfs4_lock_state *nfs4_get_lock_state(struct nfs4_state *state, fl_owner_t owner)
 {
@@ -605,7 +608,7 @@
  * Release reference to lock_state, and free it if we see that
  * it is no longer in use
  */
-static void nfs4_put_lock_state(struct nfs4_lock_state *lsp)
+void nfs4_put_lock_state(struct nfs4_lock_state *lsp)
 {
 	struct nfs4_state *state;
 
@@ -673,29 +676,94 @@
 	nfs4_put_lock_state(lsp);
 }
 
-/*
-* Called with state->lock_sema and clp->cl_sem held.
-*/
-void nfs4_increment_lock_seqid(int status, struct nfs4_lock_state *lsp)
+struct nfs_seqid *nfs_alloc_seqid(struct nfs_seqid_counter *counter)
 {
-	if (status == NFS_OK || seqid_mutating_err(-status))
-		lsp->ls_seqid++;
+	struct nfs_seqid *new;
+
+	new = kmalloc(sizeof(*new), GFP_KERNEL);
+	if (new != NULL) {
+		new->sequence = counter;
+		INIT_LIST_HEAD(&new->list);
+	}
+	return new;
+}
+
+void nfs_free_seqid(struct nfs_seqid *seqid)
+{
+	struct rpc_sequence *sequence = seqid->sequence->sequence;
+
+	if (!list_empty(&seqid->list)) {
+		spin_lock(&sequence->lock);
+		list_del(&seqid->list);
+		spin_unlock(&sequence->lock);
+	}
+	rpc_wake_up_next(&sequence->wait);
+	kfree(seqid);
 }
 
 /*
-* Called with sp->so_sema and clp->cl_sem held.
-*
-* Increment the seqid if the OPEN/OPEN_DOWNGRADE/CLOSE succeeded, or
-* failed with a seqid incrementing error -
-* see comments nfs_fs.h:seqid_mutating_error()
-*/
-void nfs4_increment_seqid(int status, struct nfs4_state_owner *sp)
+ * Increment the seqid if the OPEN/OPEN_DOWNGRADE/CLOSE succeeded, or
+ * failed with a seqid incrementing error -
+ * see comments nfs_fs.h:seqid_mutating_error()
+ */
+static inline void nfs_increment_seqid(int status, struct nfs_seqid *seqid)
 {
-	if (status == NFS_OK || seqid_mutating_err(-status))
-		sp->so_seqid++;
-	/* If the server returns BAD_SEQID, unhash state_owner here */
-	if (status == -NFS4ERR_BAD_SEQID)
+	switch (status) {
+		case 0:
+			break;
+		case -NFS4ERR_BAD_SEQID:
+		case -NFS4ERR_STALE_CLIENTID:
+		case -NFS4ERR_STALE_STATEID:
+		case -NFS4ERR_BAD_STATEID:
+		case -NFS4ERR_BADXDR:
+		case -NFS4ERR_RESOURCE:
+		case -NFS4ERR_NOFILEHANDLE:
+			/* Non-seqid mutating errors */
+			return;
+	};
+	/*
+	 * Note: no locking needed as we are guaranteed to be first
+	 * on the sequence list
+	 */
+	seqid->sequence->counter++;
+}
+
+void nfs_increment_open_seqid(int status, struct nfs_seqid *seqid)
+{
+	if (status == -NFS4ERR_BAD_SEQID) {
+		struct nfs4_state_owner *sp = container_of(seqid->sequence,
+				struct nfs4_state_owner, so_seqid);
 		nfs4_drop_state_owner(sp);
+	}
+	return nfs_increment_seqid(status, seqid);
+}
+
+/*
+ * Increment the seqid if the LOCK/LOCKU succeeded, or
+ * failed with a seqid incrementing error -
+ * see comments nfs_fs.h:seqid_mutating_error()
+ */
+void nfs_increment_lock_seqid(int status, struct nfs_seqid *seqid)
+{
+	return nfs_increment_seqid(status, seqid);
+}
+
+int nfs_wait_on_sequence(struct nfs_seqid *seqid, struct rpc_task *task)
+{
+	struct rpc_sequence *sequence = seqid->sequence->sequence;
+	int status = 0;
+
+	if (sequence->list.next == &seqid->list)
+		goto out;
+	spin_lock(&sequence->lock);
+	if (!list_empty(&sequence->list)) {
+		rpc_sleep_on(&sequence->wait, task, NULL, NULL);
+		status = -EAGAIN;
+	} else
+		list_add(&seqid->list, &sequence->list);
+	spin_unlock(&sequence->lock);
+out:
+	return status;
 }
 
 static int reclaimer(void *);
@@ -791,8 +859,6 @@
 		if (state->state == 0)
 			continue;
 		status = ops->recover_open(sp, state);
-		list_for_each_entry(lock, &state->lock_states, ls_locks)
-			lock->ls_flags &= ~NFS_LOCK_INITIALIZED;
 		if (status >= 0) {
 			status = nfs4_reclaim_locks(ops, state);
 			if (status < 0)
@@ -831,6 +897,28 @@
 	return status;
 }
 
+static void nfs4_state_mark_reclaim(struct nfs4_client *clp)
+{
+	struct nfs4_state_owner *sp;
+	struct nfs4_state *state;
+	struct nfs4_lock_state *lock;
+
+	/* Reset all sequence ids to zero */
+	list_for_each_entry(sp, &clp->cl_state_owners, so_list) {
+		sp->so_seqid.counter = 0;
+		sp->so_seqid.flags = 0;
+		spin_lock(&sp->so_lock);
+		list_for_each_entry(state, &sp->so_states, open_states) {
+			list_for_each_entry(lock, &state->lock_states, ls_locks) {
+				lock->ls_seqid.counter = 0;
+				lock->ls_seqid.flags = 0;
+				lock->ls_flags &= ~NFS_LOCK_INITIALIZED;
+			}
+		}
+		spin_unlock(&sp->so_lock);
+	}
+}
+
 static int reclaimer(void *ptr)
 {
 	struct reclaimer_args *args = (struct reclaimer_args *)ptr;
@@ -864,6 +952,7 @@
 		default:
 			ops = &nfs4_network_partition_recovery_ops;
 	};
+	nfs4_state_mark_reclaim(clp);
 	status = __nfs4_init_client(clp);
 	if (status)
 		goto out_error;
diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
index 6c564ef..cd76264 100644
--- a/fs/nfs/nfs4xdr.c
+++ b/fs/nfs/nfs4xdr.c
@@ -602,10 +602,10 @@
 {
 	uint32_t *p;
 
-	RESERVE_SPACE(8+sizeof(arg->stateid.data));
+	RESERVE_SPACE(8+sizeof(arg->stateid->data));
 	WRITE32(OP_CLOSE);
-	WRITE32(arg->seqid);
-	WRITEMEM(arg->stateid.data, sizeof(arg->stateid.data));
+	WRITE32(arg->seqid->sequence->counter);
+	WRITEMEM(arg->stateid->data, sizeof(arg->stateid->data));
 	
 	return 0;
 }
@@ -729,22 +729,18 @@
 	WRITE64(arg->length);
 	WRITE32(opargs->new_lock_owner);
 	if (opargs->new_lock_owner){
-		struct nfs_open_to_lock *ol = opargs->u.open_lock;
-
 		RESERVE_SPACE(40);
-		WRITE32(ol->open_seqid);
-		WRITEMEM(&ol->open_stateid, sizeof(ol->open_stateid));
-		WRITE32(ol->lock_seqid);
-		WRITE64(ol->lock_owner.clientid);
+		WRITE32(opargs->open_seqid->sequence->counter);
+		WRITEMEM(opargs->open_stateid->data, sizeof(opargs->open_stateid->data));
+		WRITE32(opargs->lock_seqid->sequence->counter);
+		WRITE64(opargs->lock_owner.clientid);
 		WRITE32(4);
-		WRITE32(ol->lock_owner.id);
+		WRITE32(opargs->lock_owner.id);
 	}
 	else {
-		struct nfs_exist_lock *el = opargs->u.exist_lock;
-
 		RESERVE_SPACE(20);
-		WRITEMEM(&el->stateid, sizeof(el->stateid));
-		WRITE32(el->seqid);
+		WRITEMEM(opargs->lock_stateid->data, sizeof(opargs->lock_stateid->data));
+		WRITE32(opargs->lock_seqid->sequence->counter);
 	}
 
 	return 0;
@@ -775,8 +771,8 @@
 	RESERVE_SPACE(44);
 	WRITE32(OP_LOCKU);
 	WRITE32(arg->type);
-	WRITE32(opargs->seqid);
-	WRITEMEM(&opargs->stateid, sizeof(opargs->stateid));
+	WRITE32(opargs->seqid->sequence->counter);
+	WRITEMEM(opargs->stateid->data, sizeof(opargs->stateid->data));
 	WRITE64(arg->offset);
 	WRITE64(arg->length);
 
@@ -826,7 +822,7 @@
  */
 	RESERVE_SPACE(8);
 	WRITE32(OP_OPEN);
-	WRITE32(arg->seqid);
+	WRITE32(arg->seqid->sequence->counter);
 	encode_share_access(xdr, arg->open_flags);
 	RESERVE_SPACE(16);
 	WRITE64(arg->clientid);
@@ -941,7 +937,7 @@
 	RESERVE_SPACE(8+sizeof(arg->stateid.data));
 	WRITE32(OP_OPEN_CONFIRM);
 	WRITEMEM(arg->stateid.data, sizeof(arg->stateid.data));
-	WRITE32(arg->seqid);
+	WRITE32(arg->seqid->sequence->counter);
 
 	return 0;
 }
@@ -950,10 +946,10 @@
 {
 	uint32_t *p;
 
-	RESERVE_SPACE(8+sizeof(arg->stateid.data));
+	RESERVE_SPACE(8+sizeof(arg->stateid->data));
 	WRITE32(OP_OPEN_DOWNGRADE);
-	WRITEMEM(arg->stateid.data, sizeof(arg->stateid.data));
-	WRITE32(arg->seqid);
+	WRITEMEM(arg->stateid->data, sizeof(arg->stateid->data));
+	WRITE32(arg->seqid->sequence->counter);
 	encode_share_access(xdr, arg->open_flags);
 	return 0;
 }
@@ -1437,6 +1433,9 @@
 	};
 	int status;
 
+	status = nfs_wait_on_sequence(args->seqid, req->rq_task);
+	if (status != 0)
+		goto out;
 	xdr_init_encode(&xdr, &req->rq_snd_buf, p);
 	encode_compound_hdr(&xdr, &hdr);
 	status = encode_putfh(&xdr, args->fh);
@@ -1464,6 +1463,9 @@
 	};
 	int status;
 
+	status = nfs_wait_on_sequence(args->seqid, req->rq_task);
+	if (status != 0)
+		goto out;
 	xdr_init_encode(&xdr, &req->rq_snd_buf, p);
 	encode_compound_hdr(&xdr, &hdr);
 	status = encode_putfh(&xdr, args->fh);
@@ -1485,6 +1487,9 @@
 	};
 	int status;
 
+	status = nfs_wait_on_sequence(args->seqid, req->rq_task);
+	if (status != 0)
+		goto out;
 	xdr_init_encode(&xdr, &req->rq_snd_buf, p);
 	encode_compound_hdr(&xdr, &hdr);
 	status = encode_putfh(&xdr, args->fh);
@@ -1525,8 +1530,15 @@
 	struct compound_hdr hdr = {
 		.nops   = 2,
 	};
+	struct nfs_lock_opargs *opargs = args->u.lock;
 	int status;
 
+	status = nfs_wait_on_sequence(opargs->lock_seqid, req->rq_task);
+	if (status != 0)
+		goto out;
+	/* Do we need to do an open_to_lock_owner? */
+	if (opargs->lock_seqid->sequence->flags & NFS_SEQID_CONFIRMED)
+		opargs->new_lock_owner = 0;
 	xdr_init_encode(&xdr, &req->rq_snd_buf, p);
 	encode_compound_hdr(&xdr, &hdr);
 	status = encode_putfh(&xdr, args->fh);
@@ -2890,8 +2902,8 @@
 
 	status = decode_op_hdr(xdr, OP_LOCK);
 	if (status == 0) {
-		READ_BUF(sizeof(nfs4_stateid));
-		COPYMEM(&res->u.stateid, sizeof(res->u.stateid));
+		READ_BUF(sizeof(res->u.stateid.data));
+		COPYMEM(res->u.stateid.data, sizeof(res->u.stateid.data));
 	} else if (status == -NFS4ERR_DENIED)
 		return decode_lock_denied(xdr, &res->u.denied);
 	return status;
@@ -2913,8 +2925,8 @@
 
 	status = decode_op_hdr(xdr, OP_LOCKU);
 	if (status == 0) {
-		READ_BUF(sizeof(nfs4_stateid));
-		COPYMEM(&res->u.stateid, sizeof(res->u.stateid));
+		READ_BUF(sizeof(res->u.stateid.data));
+		COPYMEM(res->u.stateid.data, sizeof(res->u.stateid.data));
 	}
 	return status;
 }
@@ -3243,7 +3255,8 @@
 		if (attrlen <= *acl_len)
 			xdr_read_pages(xdr, attrlen);
 		*acl_len = attrlen;
-	}
+	} else
+		status = -EOPNOTSUPP;
 
 out:
 	return status;
diff --git a/fs/nfs/proc.c b/fs/nfs/proc.c
index be23c3f..8fef865 100644
--- a/fs/nfs/proc.c
+++ b/fs/nfs/proc.c
@@ -216,7 +216,7 @@
 
 static int
 nfs_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
-		int flags)
+		int flags, struct nameidata *nd)
 {
 	struct nfs_fh		fhandle;
 	struct nfs_fattr	fattr;
diff --git a/fs/open.c b/fs/open.c
index f0d90cf..8d06ec9 100644
--- a/fs/open.c
+++ b/fs/open.c
@@ -739,7 +739,8 @@
 }
 
 static struct file *__dentry_open(struct dentry *dentry, struct vfsmount *mnt,
-					int flags, struct file *f)
+					int flags, struct file *f,
+					int (*open)(struct inode *, struct file *))
 {
 	struct inode *inode;
 	int error;
@@ -761,11 +762,14 @@
 	f->f_op = fops_get(inode->i_fop);
 	file_move(f, &inode->i_sb->s_files);
 
-	if (f->f_op && f->f_op->open) {
-		error = f->f_op->open(inode,f);
+	if (!open && f->f_op)
+		open = f->f_op->open;
+	if (open) {
+		error = open(inode, f);
 		if (error)
 			goto cleanup_all;
 	}
+
 	f->f_flags &= ~(O_CREAT | O_EXCL | O_NOCTTY | O_TRUNC);
 
 	file_ra_state_init(&f->f_ra, f->f_mapping->host->i_mapping);
@@ -814,28 +818,75 @@
 {
 	int namei_flags, error;
 	struct nameidata nd;
-	struct file *f;
 
 	namei_flags = flags;
 	if ((namei_flags+1) & O_ACCMODE)
 		namei_flags++;
-	if (namei_flags & O_TRUNC)
-		namei_flags |= 2;
-
-	error = -ENFILE;
-	f = get_empty_filp();
-	if (f == NULL)
-		return ERR_PTR(error);
 
 	error = open_namei(filename, namei_flags, mode, &nd);
 	if (!error)
-		return __dentry_open(nd.dentry, nd.mnt, flags, f);
+		return nameidata_to_filp(&nd, flags);
 
-	put_filp(f);
 	return ERR_PTR(error);
 }
 EXPORT_SYMBOL(filp_open);
 
+/**
+ * lookup_instantiate_filp - instantiates the open intent filp
+ * @nd: pointer to nameidata
+ * @dentry: pointer to dentry
+ * @open: open callback
+ *
+ * Helper for filesystems that want to use lookup open intents and pass back
+ * a fully instantiated struct file to the caller.
+ * This function is meant to be called from within a filesystem's
+ * lookup method.
+ * Note that in case of error, nd->intent.open.file is destroyed, but the
+ * path information remains valid.
+ * If the open callback is set to NULL, then the standard f_op->open()
+ * filesystem callback is substituted.
+ */
+struct file *lookup_instantiate_filp(struct nameidata *nd, struct dentry *dentry,
+		int (*open)(struct inode *, struct file *))
+{
+	if (IS_ERR(nd->intent.open.file))
+		goto out;
+	if (IS_ERR(dentry))
+		goto out_err;
+	nd->intent.open.file = __dentry_open(dget(dentry), mntget(nd->mnt),
+					     nd->intent.open.flags - 1,
+					     nd->intent.open.file,
+					     open);
+out:
+	return nd->intent.open.file;
+out_err:
+	release_open_intent(nd);
+	nd->intent.open.file = (struct file *)dentry;
+	goto out;
+}
+EXPORT_SYMBOL_GPL(lookup_instantiate_filp);
+
+/**
+ * nameidata_to_filp - convert a nameidata to an open filp.
+ * @nd: pointer to nameidata
+ * @flags: open flags
+ *
+ * Note that this function destroys the original nameidata
+ */
+struct file *nameidata_to_filp(struct nameidata *nd, int flags)
+{
+	struct file *filp;
+
+	/* Pick up the filp from the open intent */
+	filp = nd->intent.open.file;
+	/* Has the filesystem initialised the file for us? */
+	if (filp->f_dentry == NULL)
+		filp = __dentry_open(nd->dentry, nd->mnt, flags, filp, NULL);
+	else
+		path_release(nd);
+	return filp;
+}
+
 struct file *dentry_open(struct dentry *dentry, struct vfsmount *mnt, int flags)
 {
 	int error;
@@ -846,7 +897,7 @@
 	if (f == NULL)
 		return ERR_PTR(error);
 
-	return __dentry_open(dentry, mnt, flags, f);
+	return __dentry_open(dentry, mnt, flags, f, NULL);
 }
 EXPORT_SYMBOL(dentry_open);
 
diff --git a/include/linux/namei.h b/include/linux/namei.h
index 7db67b0..1c975d0 100644
--- a/include/linux/namei.h
+++ b/include/linux/namei.h
@@ -8,6 +8,7 @@
 struct open_intent {
 	int	flags;
 	int	create_mode;
+	struct file *file;
 };
 
 enum { MAX_NESTED_LINKS = 5 };
@@ -65,6 +66,13 @@
 extern void path_release(struct nameidata *);
 extern void path_release_on_umount(struct nameidata *);
 
+extern int __user_path_lookup_open(const char __user *, unsigned lookup_flags, struct nameidata *nd, int open_flags);
+extern int path_lookup_open(const char *, unsigned lookup_flags, struct nameidata *, int open_flags);
+extern struct file *lookup_instantiate_filp(struct nameidata *nd, struct dentry *dentry,
+		int (*open)(struct inode *, struct file *));
+extern struct file *nameidata_to_filp(struct nameidata *nd, int flags);
+extern void release_open_intent(struct nameidata *);
+
 extern struct dentry * lookup_one_len(const char *, struct dentry *, int);
 extern struct dentry * lookup_hash(struct qstr *, struct dentry *);
 
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
index 9a6047f..7bac278 100644
--- a/include/linux/nfs_fs.h
+++ b/include/linux/nfs_fs.h
@@ -41,6 +41,10 @@
 #define NFS_MAX_FILE_IO_BUFFER_SIZE	32768
 #define NFS_DEF_FILE_IO_BUFFER_SIZE	4096
 
+/* Default timeout values */
+#define NFS_MAX_UDP_TIMEOUT	(60*HZ)
+#define NFS_MAX_TCP_TIMEOUT	(600*HZ)
+
 /*
  * superblock magic number for NFS
  */
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index a2bf691..60086da 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -96,12 +96,13 @@
 	u64			after;
 };
 
+struct nfs_seqid;
 /*
  * Arguments to the open call.
  */
 struct nfs_openargs {
 	const struct nfs_fh *	fh;
-	__u32                   seqid;
+	struct nfs_seqid *	seqid;
 	int			open_flags;
 	__u64                   clientid;
 	__u32                   id;
@@ -136,7 +137,7 @@
 struct nfs_open_confirmargs {
 	const struct nfs_fh *	fh;
 	nfs4_stateid            stateid;
-	__u32                   seqid;
+	struct nfs_seqid *	seqid;
 };
 
 struct nfs_open_confirmres {
@@ -148,8 +149,8 @@
  */
 struct nfs_closeargs {
 	struct nfs_fh *         fh;
-	nfs4_stateid            stateid;
-	__u32                   seqid;
+	nfs4_stateid *		stateid;
+	struct nfs_seqid *	seqid;
 	int			open_flags;
 };
 
@@ -164,30 +165,19 @@
 	u32                     id;
 };
 
-struct nfs_open_to_lock {
-	__u32                   open_seqid;
-	nfs4_stateid            open_stateid;
-	__u32                   lock_seqid;
-	struct nfs_lowner       lock_owner;
-};
-
-struct nfs_exist_lock {
-	nfs4_stateid            stateid;
-	__u32                   seqid;
-};
-
 struct nfs_lock_opargs {
+	struct nfs_seqid *	lock_seqid;
+	nfs4_stateid *		lock_stateid;
+	struct nfs_seqid *	open_seqid;
+	nfs4_stateid *		open_stateid;
+	struct nfs_lowner       lock_owner;
 	__u32                   reclaim;
 	__u32                   new_lock_owner;
-	union {
-		struct nfs_open_to_lock *open_lock;
-		struct nfs_exist_lock   *exist_lock;
-	} u;
 };
 
 struct nfs_locku_opargs {
-	__u32                   seqid;
-	nfs4_stateid            stateid;
+	struct nfs_seqid *	seqid;
+	nfs4_stateid *		stateid;
 };
 
 struct nfs_lockargs {
@@ -722,7 +712,7 @@
 	int	(*write)   (struct nfs_write_data *);
 	int	(*commit)  (struct nfs_write_data *);
 	int	(*create)  (struct inode *, struct dentry *,
-			    struct iattr *, int);
+			    struct iattr *, int, struct nameidata *);
 	int	(*remove)  (struct inode *, struct qstr *);
 	int	(*unlink_setup)  (struct rpc_message *,
 			    struct dentry *, struct qstr *);
diff --git a/include/linux/sunrpc/auth.h b/include/linux/sunrpc/auth.h
index 04ebc24..b68c11a 100644
--- a/include/linux/sunrpc/auth.h
+++ b/include/linux/sunrpc/auth.h
@@ -66,7 +66,12 @@
 
 struct rpc_auth {
 	unsigned int		au_cslack;	/* call cred size estimate */
-	unsigned int		au_rslack;	/* reply verf size guess */
+				/* guess at number of u32's auth adds before
+				 * reply data; normally the verifier size: */
+	unsigned int		au_rslack;
+				/* for gss, used to calculate au_rslack: */
+	unsigned int		au_verfsize;
+
 	unsigned int		au_flags;	/* various flags */
 	struct rpc_authops *	au_ops;		/* operations */
 	rpc_authflavor_t	au_flavor;	/* pseudoflavor (note may
diff --git a/include/linux/sunrpc/debug.h b/include/linux/sunrpc/debug.h
index eadb31e..1a42d90 100644
--- a/include/linux/sunrpc/debug.h
+++ b/include/linux/sunrpc/debug.h
@@ -32,6 +32,7 @@
 #define RPCDBG_AUTH		0x0010
 #define RPCDBG_PMAP		0x0020
 #define RPCDBG_SCHED		0x0040
+#define RPCDBG_TRANS		0x0080
 #define RPCDBG_SVCSOCK		0x0100
 #define RPCDBG_SVCDSP		0x0200
 #define RPCDBG_MISC		0x0400
@@ -94,6 +95,8 @@
 	CTL_NLMDEBUG,
 	CTL_SLOTTABLE_UDP,
 	CTL_SLOTTABLE_TCP,
+	CTL_MIN_RESVPORT,
+	CTL_MAX_RESVPORT,
 };
 
 #endif /* _LINUX_SUNRPC_DEBUG_H_ */
diff --git a/include/linux/sunrpc/gss_api.h b/include/linux/sunrpc/gss_api.h
index 689262f..9b8bcf1 100644
--- a/include/linux/sunrpc/gss_api.h
+++ b/include/linux/sunrpc/gss_api.h
@@ -40,14 +40,21 @@
 		struct gss_ctx		**ctx_id);
 u32 gss_get_mic(
 		struct gss_ctx		*ctx_id,
-		u32			qop,
 		struct xdr_buf		*message,
 		struct xdr_netobj	*mic_token);
 u32 gss_verify_mic(
 		struct gss_ctx		*ctx_id,
 		struct xdr_buf		*message,
-		struct xdr_netobj	*mic_token,
-		u32			*qstate);
+		struct xdr_netobj	*mic_token);
+u32 gss_wrap(
+		struct gss_ctx		*ctx_id,
+		int			offset,
+		struct xdr_buf		*outbuf,
+		struct page		**inpages);
+u32 gss_unwrap(
+		struct gss_ctx		*ctx_id,
+		int			offset,
+		struct xdr_buf		*inbuf);
 u32 gss_delete_sec_context(
 		struct gss_ctx		**ctx_id);
 
@@ -56,7 +63,6 @@
 
 struct pf_desc {
 	u32	pseudoflavor;
-	u32	qop;
 	u32	service;
 	char	*name;
 	char	*auth_domain_name;
@@ -85,14 +91,21 @@
 			struct gss_ctx		*ctx_id);
 	u32 (*gss_get_mic)(
 			struct gss_ctx		*ctx_id,
-			u32			qop, 
 			struct xdr_buf		*message,
 			struct xdr_netobj	*mic_token);
 	u32 (*gss_verify_mic)(
 			struct gss_ctx		*ctx_id,
 			struct xdr_buf		*message,
-			struct xdr_netobj	*mic_token,
-			u32			*qstate);
+			struct xdr_netobj	*mic_token);
+	u32 (*gss_wrap)(
+			struct gss_ctx		*ctx_id,
+			int			offset,
+			struct xdr_buf		*outbuf,
+			struct page		**inpages);
+	u32 (*gss_unwrap)(
+			struct gss_ctx		*ctx_id,
+			int			offset,
+			struct xdr_buf		*buf);
 	void (*gss_delete_sec_context)(
 			void			*internal_ctx_id);
 };
diff --git a/include/linux/sunrpc/gss_err.h b/include/linux/sunrpc/gss_err.h
index 92608a2..a680786 100644
--- a/include/linux/sunrpc/gss_err.h
+++ b/include/linux/sunrpc/gss_err.h
@@ -66,16 +66,6 @@
 
 
 /*
- * Define the default Quality of Protection for per-message services.  Note
- * that an implementation that offers multiple levels of QOP may either reserve
- * a value (for example zero, as assumed here) to mean "default protection", or
- * alternatively may simply equate GSS_C_QOP_DEFAULT to a specific explicit
- * QOP value.  However a value of 0 should always be interpreted by a GSSAPI
- * implementation as a request for the default protection level.
- */
-#define GSS_C_QOP_DEFAULT 0
-
-/*
  * Expiration time of 2^32-1 seconds means infinite lifetime for a
  * credential or security context
  */
diff --git a/include/linux/sunrpc/gss_krb5.h b/include/linux/sunrpc/gss_krb5.h
index ffe31d2..2c3601d 100644
--- a/include/linux/sunrpc/gss_krb5.h
+++ b/include/linux/sunrpc/gss_krb5.h
@@ -116,18 +116,22 @@
 
 s32
 make_checksum(s32 cksumtype, char *header, int hdrlen, struct xdr_buf *body,
-		   struct xdr_netobj *cksum);
+		   int body_offset, struct xdr_netobj *cksum);
+
+u32 gss_get_mic_kerberos(struct gss_ctx *, struct xdr_buf *,
+		struct xdr_netobj *);
+
+u32 gss_verify_mic_kerberos(struct gss_ctx *, struct xdr_buf *,
+		struct xdr_netobj *);
 
 u32
-krb5_make_token(struct krb5_ctx *context_handle, int qop_req,
-	struct xdr_buf *input_message_buffer,
-	struct xdr_netobj *output_message_buffer, int toktype);
+gss_wrap_kerberos(struct gss_ctx *ctx_id, int offset,
+		struct xdr_buf *outbuf, struct page **pages);
 
 u32
-krb5_read_token(struct krb5_ctx *context_handle,
-	  struct xdr_netobj *input_token_buffer,
-	  struct xdr_buf *message_buffer,
-	  int *qop_state, int toktype);
+gss_unwrap_kerberos(struct gss_ctx *ctx_id, int offset,
+		struct xdr_buf *buf);
+
 
 u32
 krb5_encrypt(struct crypto_tfm * key,
@@ -137,6 +141,13 @@
 krb5_decrypt(struct crypto_tfm * key,
 	     void *iv, void *in, void *out, int length); 
 
+int
+gss_encrypt_xdr_buf(struct crypto_tfm *tfm, struct xdr_buf *outbuf, int offset,
+		struct page **pages);
+
+int
+gss_decrypt_xdr_buf(struct crypto_tfm *tfm, struct xdr_buf *inbuf, int offset);
+
 s32
 krb5_make_seq_num(struct crypto_tfm * key,
 		int direction,
diff --git a/include/linux/sunrpc/gss_spkm3.h b/include/linux/sunrpc/gss_spkm3.h
index b5c9968..0beb2cf 100644
--- a/include/linux/sunrpc/gss_spkm3.h
+++ b/include/linux/sunrpc/gss_spkm3.h
@@ -41,9 +41,9 @@
 #define SPKM_WRAP_TOK	5
 #define SPKM_DEL_TOK	6
 
-u32 spkm3_make_token(struct spkm3_ctx *ctx, int qop_req, struct xdr_buf * text, struct xdr_netobj * token, int toktype);
+u32 spkm3_make_token(struct spkm3_ctx *ctx, struct xdr_buf * text, struct xdr_netobj * token, int toktype);
 
-u32 spkm3_read_token(struct spkm3_ctx *ctx, struct xdr_netobj *read_token, struct xdr_buf *message_buffer, int *qop_state, int toktype);
+u32 spkm3_read_token(struct spkm3_ctx *ctx, struct xdr_netobj *read_token, struct xdr_buf *message_buffer, int toktype);
 
 #define CKSUMTYPE_RSA_MD5            0x0007
 
diff --git a/include/linux/sunrpc/msg_prot.h b/include/linux/sunrpc/msg_prot.h
index 15f1153..f43f237 100644
--- a/include/linux/sunrpc/msg_prot.h
+++ b/include/linux/sunrpc/msg_prot.h
@@ -76,5 +76,30 @@
 
 #define RPC_MAXNETNAMELEN	256
 
+/*
+ * From RFC 1831:
+ *
+ * "A record is composed of one or more record fragments.  A record
+ *  fragment is a four-byte header followed by 0 to (2**31) - 1 bytes of
+ *  fragment data.  The bytes encode an unsigned binary number; as with
+ *  XDR integers, the byte order is from highest to lowest.  The number
+ *  encodes two values -- a boolean which indicates whether the fragment
+ *  is the last fragment of the record (bit value 1 implies the fragment
+ *  is the last fragment) and a 31-bit unsigned binary value which is the
+ *  length in bytes of the fragment's data.  The boolean value is the
+ *  highest-order bit of the header; the length is the 31 low-order bits.
+ *  (Note that this record specification is NOT in XDR standard form!)"
+ *
+ * The Linux RPC client always sends its requests in a single record
+ * fragment, limiting the maximum payload size for stream transports to
+ * 2GB.
+ */
+
+typedef u32	rpc_fraghdr;
+
+#define	RPC_LAST_STREAM_FRAGMENT	(1U << 31)
+#define	RPC_FRAGMENT_SIZE_MASK		(~RPC_LAST_STREAM_FRAGMENT)
+#define	RPC_MAX_FRAGMENT_SIZE		((1U << 31) - 1)
+
 #endif /* __KERNEL__ */
 #endif /* _LINUX_SUNRPC_MSGPROT_H_ */
diff --git a/include/linux/sunrpc/xdr.h b/include/linux/sunrpc/xdr.h
index 23448d0..5da9687 100644
--- a/include/linux/sunrpc/xdr.h
+++ b/include/linux/sunrpc/xdr.h
@@ -161,14 +161,10 @@
 
 typedef size_t (*skb_read_actor_t)(skb_reader_t *desc, void *to, size_t len);
 
+extern int csum_partial_copy_to_xdr(struct xdr_buf *, struct sk_buff *);
 extern ssize_t xdr_partial_copy_from_skb(struct xdr_buf *, unsigned int,
 		skb_reader_t *, skb_read_actor_t);
 
-struct socket;
-struct sockaddr;
-extern int xdr_sendpages(struct socket *, struct sockaddr *, int,
-		struct xdr_buf *, unsigned int, int);
-
 extern int xdr_encode_word(struct xdr_buf *, int, u32);
 extern int xdr_decode_word(struct xdr_buf *, int, u32 *);
 
diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h
index e618c16..3b8b6e8 100644
--- a/include/linux/sunrpc/xprt.h
+++ b/include/linux/sunrpc/xprt.h
@@ -1,5 +1,5 @@
 /*
- *  linux/include/linux/sunrpc/clnt_xprt.h
+ *  linux/include/linux/sunrpc/xprt.h
  *
  *  Declarations for the RPC transport interface.
  *
@@ -15,20 +15,6 @@
 #include <linux/sunrpc/sched.h>
 #include <linux/sunrpc/xdr.h>
 
-/*
- * The transport code maintains an estimate on the maximum number of out-
- * standing RPC requests, using a smoothed version of the congestion
- * avoidance implemented in 44BSD. This is basically the Van Jacobson
- * congestion algorithm: If a retransmit occurs, the congestion window is
- * halved; otherwise, it is incremented by 1/cwnd when
- *
- *	-	a reply is received and
- *	-	a full number of requests are outstanding and
- *	-	the congestion window hasn't been updated recently.
- *
- * Upper procedures may check whether a request would block waiting for
- * a free RPC slot by using the RPC_CONGESTED() macro.
- */
 extern unsigned int xprt_udp_slot_table_entries;
 extern unsigned int xprt_tcp_slot_table_entries;
 
@@ -36,36 +22,25 @@
 #define RPC_DEF_SLOT_TABLE	(16U)
 #define RPC_MAX_SLOT_TABLE	(128U)
 
-#define RPC_CWNDSHIFT		(8U)
-#define RPC_CWNDSCALE		(1U << RPC_CWNDSHIFT)
-#define RPC_INITCWND		RPC_CWNDSCALE
-#define RPC_MAXCWND(xprt)	((xprt)->max_reqs << RPC_CWNDSHIFT)
-#define RPCXPRT_CONGESTED(xprt) ((xprt)->cong >= (xprt)->cwnd)
-
-/* Default timeout values */
-#define RPC_MAX_UDP_TIMEOUT	(60*HZ)
-#define RPC_MAX_TCP_TIMEOUT	(600*HZ)
-
 /*
- * Wait duration for an RPC TCP connection to be established.  Solaris
- * NFS over TCP uses 60 seconds, for example, which is in line with how
- * long a server takes to reboot.
- */
-#define RPC_CONNECT_TIMEOUT	(60*HZ)
-
-/*
- * Delay an arbitrary number of seconds before attempting to reconnect
- * after an error.
- */
-#define RPC_REESTABLISH_TIMEOUT	(15*HZ)
-
-/* RPC call and reply header size as number of 32bit words (verifier
+ * RPC call and reply header size as number of 32bit words (verifier
  * size computed separately)
  */
 #define RPC_CALLHDRSIZE		6
 #define RPC_REPHDRSIZE		4
 
 /*
+ * Parameters for choosing a free port
+ */
+extern unsigned int xprt_min_resvport;
+extern unsigned int xprt_max_resvport;
+
+#define RPC_MIN_RESVPORT	(1U)
+#define RPC_MAX_RESVPORT	(65535U)
+#define RPC_DEF_MIN_RESVPORT	(650U)
+#define RPC_DEF_MAX_RESVPORT	(1023U)
+
+/*
  * This describes a timeout strategy
  */
 struct rpc_timeout {
@@ -76,6 +51,9 @@
 	unsigned char		to_exponential;
 };
 
+struct rpc_task;
+struct rpc_xprt;
+
 /*
  * This describes a complete RPC request
  */
@@ -95,7 +73,10 @@
 	int			rq_cong;	/* has incremented xprt->cong */
 	int			rq_received;	/* receive completed */
 	u32			rq_seqno;	/* gss seq no. used on req. */
-
+	int			rq_enc_pages_num;
+	struct page		**rq_enc_pages;	/* scratch pages for use by
+						   gss privacy code */
+	void (*rq_release_snd_buf)(struct rpc_rqst *); /* release rq_enc_pages */
 	struct list_head	rq_list;
 
 	struct xdr_buf		rq_private_buf;		/* The receive buffer
@@ -121,12 +102,21 @@
 #define rq_svec			rq_snd_buf.head
 #define rq_slen			rq_snd_buf.len
 
-#define XPRT_LAST_FRAG		(1 << 0)
-#define XPRT_COPY_RECM		(1 << 1)
-#define XPRT_COPY_XID		(1 << 2)
-#define XPRT_COPY_DATA		(1 << 3)
+struct rpc_xprt_ops {
+	void		(*set_buffer_size)(struct rpc_xprt *xprt, size_t sndsize, size_t rcvsize);
+	int		(*reserve_xprt)(struct rpc_task *task);
+	void		(*release_xprt)(struct rpc_xprt *xprt, struct rpc_task *task);
+	void		(*connect)(struct rpc_task *task);
+	int		(*send_request)(struct rpc_task *task);
+	void		(*set_retrans_timeout)(struct rpc_task *task);
+	void		(*timer)(struct rpc_task *task);
+	void		(*release_request)(struct rpc_task *task);
+	void		(*close)(struct rpc_xprt *xprt);
+	void		(*destroy)(struct rpc_xprt *xprt);
+};
 
 struct rpc_xprt {
+	struct rpc_xprt_ops *	ops;		/* transport methods */
 	struct socket *		sock;		/* BSD socket layer */
 	struct sock *		inet;		/* INET layer */
 
@@ -137,11 +127,13 @@
 	unsigned long		cong;		/* current congestion */
 	unsigned long		cwnd;		/* congestion window */
 
-	unsigned int		rcvsize,	/* socket receive buffer size */
-				sndsize;	/* socket send buffer size */
+	size_t			rcvsize,	/* transport rcv buffer size */
+				sndsize;	/* transport send buffer size */
 
 	size_t			max_payload;	/* largest RPC payload size,
 						   in bytes */
+	unsigned int		tsh_size;	/* size of transport specific
+						   header */
 
 	struct rpc_wait_queue	sending;	/* requests waiting to send */
 	struct rpc_wait_queue	resend;		/* requests waiting to resend */
@@ -150,11 +142,9 @@
 	struct list_head	free;		/* free slots */
 	struct rpc_rqst *	slot;		/* slot table storage */
 	unsigned int		max_reqs;	/* total slots */
-	unsigned long		sockstate;	/* Socket state */
+	unsigned long		state;		/* transport state */
 	unsigned char		shutdown   : 1,	/* being shut down */
-				nocong	   : 1,	/* no congestion control */
-				resvport   : 1, /* use a reserved port */
-				stream     : 1;	/* TCP */
+				resvport   : 1; /* use a reserved port */
 
 	/*
 	 * XID
@@ -171,22 +161,27 @@
 	unsigned long		tcp_copied,	/* copied to request */
 				tcp_flags;
 	/*
-	 * Connection of sockets
+	 * Connection of transports
 	 */
-	struct work_struct	sock_connect;
+	unsigned long		connect_timeout,
+				bind_timeout,
+				reestablish_timeout;
+	struct work_struct	connect_worker;
 	unsigned short		port;
+
 	/*
-	 * Disconnection of idle sockets
+	 * Disconnection of idle transports
 	 */
 	struct work_struct	task_cleanup;
 	struct timer_list	timer;
-	unsigned long		last_used;
+	unsigned long		last_used,
+				idle_timeout;
 
 	/*
 	 * Send stuff
 	 */
-	spinlock_t		sock_lock;	/* lock socket info */
-	spinlock_t		xprt_lock;	/* lock xprt info */
+	spinlock_t		transport_lock;	/* lock transport info */
+	spinlock_t		reserve_lock;	/* lock slot table */
 	struct rpc_task *	snd_task;	/* Task blocked in send */
 
 	struct list_head	recv;
@@ -195,37 +190,111 @@
 	void			(*old_data_ready)(struct sock *, int);
 	void			(*old_state_change)(struct sock *);
 	void			(*old_write_space)(struct sock *);
-
-	wait_queue_head_t	cong_wait;
 };
 
+#define XPRT_LAST_FRAG		(1 << 0)
+#define XPRT_COPY_RECM		(1 << 1)
+#define XPRT_COPY_XID		(1 << 2)
+#define XPRT_COPY_DATA		(1 << 3)
+
 #ifdef __KERNEL__
 
-struct rpc_xprt *	xprt_create_proto(int proto, struct sockaddr_in *addr,
-					struct rpc_timeout *toparms);
-int			xprt_destroy(struct rpc_xprt *);
-void			xprt_set_timeout(struct rpc_timeout *, unsigned int,
-					unsigned long);
+/*
+ * Transport operations used by ULPs
+ */
+struct rpc_xprt *	xprt_create_proto(int proto, struct sockaddr_in *addr, struct rpc_timeout *to);
+void			xprt_set_timeout(struct rpc_timeout *to, unsigned int retr, unsigned long incr);
 
-void			xprt_reserve(struct rpc_task *);
-int			xprt_prepare_transmit(struct rpc_task *);
-void			xprt_transmit(struct rpc_task *);
-void			xprt_receive(struct rpc_task *);
+/*
+ * Generic internal transport functions
+ */
+void			xprt_connect(struct rpc_task *task);
+void			xprt_reserve(struct rpc_task *task);
+int			xprt_reserve_xprt(struct rpc_task *task);
+int			xprt_reserve_xprt_cong(struct rpc_task *task);
+int			xprt_prepare_transmit(struct rpc_task *task);
+void			xprt_transmit(struct rpc_task *task);
+void			xprt_abort_transmit(struct rpc_task *task);
 int			xprt_adjust_timeout(struct rpc_rqst *req);
-void			xprt_release(struct rpc_task *);
-void			xprt_connect(struct rpc_task *);
-void			xprt_sock_setbufsize(struct rpc_xprt *);
+void			xprt_release_xprt(struct rpc_xprt *xprt, struct rpc_task *task);
+void			xprt_release_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task);
+void			xprt_release(struct rpc_task *task);
+int			xprt_destroy(struct rpc_xprt *xprt);
 
-#define XPRT_LOCKED	0
-#define XPRT_CONNECT	1
-#define XPRT_CONNECTING	2
+static inline u32 *xprt_skip_transport_header(struct rpc_xprt *xprt, u32 *p)
+{
+	return p + xprt->tsh_size;
+}
 
-#define xprt_connected(xp)		(test_bit(XPRT_CONNECT, &(xp)->sockstate))
-#define xprt_set_connected(xp)		(set_bit(XPRT_CONNECT, &(xp)->sockstate))
-#define xprt_test_and_set_connected(xp)	(test_and_set_bit(XPRT_CONNECT, &(xp)->sockstate))
-#define xprt_test_and_clear_connected(xp) \
-					(test_and_clear_bit(XPRT_CONNECT, &(xp)->sockstate))
-#define xprt_clear_connected(xp)	(clear_bit(XPRT_CONNECT, &(xp)->sockstate))
+/*
+ * Transport switch helper functions
+ */
+void			xprt_set_retrans_timeout_def(struct rpc_task *task);
+void			xprt_set_retrans_timeout_rtt(struct rpc_task *task);
+void			xprt_wake_pending_tasks(struct rpc_xprt *xprt, int status);
+void			xprt_wait_for_buffer_space(struct rpc_task *task);
+void			xprt_write_space(struct rpc_xprt *xprt);
+void			xprt_update_rtt(struct rpc_task *task);
+void			xprt_adjust_cwnd(struct rpc_task *task, int result);
+struct rpc_rqst *	xprt_lookup_rqst(struct rpc_xprt *xprt, u32 xid);
+void			xprt_complete_rqst(struct rpc_task *task, int copied);
+void			xprt_release_rqst_cong(struct rpc_task *task);
+void			xprt_disconnect(struct rpc_xprt *xprt);
+
+/*
+ * Socket transport setup operations
+ */
+int			xs_setup_udp(struct rpc_xprt *xprt, struct rpc_timeout *to);
+int			xs_setup_tcp(struct rpc_xprt *xprt, struct rpc_timeout *to);
+
+/*
+ * Reserved bit positions in xprt->state
+ */
+#define XPRT_LOCKED		(0)
+#define XPRT_CONNECTED		(1)
+#define XPRT_CONNECTING		(2)
+
+static inline void xprt_set_connected(struct rpc_xprt *xprt)
+{
+	set_bit(XPRT_CONNECTED, &xprt->state);
+}
+
+static inline void xprt_clear_connected(struct rpc_xprt *xprt)
+{
+	clear_bit(XPRT_CONNECTED, &xprt->state);
+}
+
+static inline int xprt_connected(struct rpc_xprt *xprt)
+{
+	return test_bit(XPRT_CONNECTED, &xprt->state);
+}
+
+static inline int xprt_test_and_set_connected(struct rpc_xprt *xprt)
+{
+	return test_and_set_bit(XPRT_CONNECTED, &xprt->state);
+}
+
+static inline int xprt_test_and_clear_connected(struct rpc_xprt *xprt)
+{
+	return test_and_clear_bit(XPRT_CONNECTED, &xprt->state);
+}
+
+static inline void xprt_clear_connecting(struct rpc_xprt *xprt)
+{
+	smp_mb__before_clear_bit();
+	clear_bit(XPRT_CONNECTING, &xprt->state);
+	smp_mb__after_clear_bit();
+}
+
+static inline int xprt_connecting(struct rpc_xprt *xprt)
+{
+	return test_bit(XPRT_CONNECTING, &xprt->state);
+}
+
+static inline int xprt_test_and_set_connecting(struct rpc_xprt *xprt)
+{
+	return test_and_set_bit(XPRT_CONNECTING, &xprt->state);
+}
 
 #endif /* __KERNEL__*/
 
diff --git a/net/sunrpc/Makefile b/net/sunrpc/Makefile
index 46a2ce0..cdcab9c 100644
--- a/net/sunrpc/Makefile
+++ b/net/sunrpc/Makefile
@@ -6,7 +6,7 @@
 obj-$(CONFIG_SUNRPC) += sunrpc.o
 obj-$(CONFIG_SUNRPC_GSS) += auth_gss/
 
-sunrpc-y := clnt.o xprt.o sched.o \
+sunrpc-y := clnt.o xprt.o socklib.o xprtsock.o sched.o \
 	    auth.o auth_null.o auth_unix.o \
 	    svc.o svcsock.o svcauth.o svcauth_unix.o \
 	    pmap_clnt.o timer.o xdr.o \
diff --git a/net/sunrpc/auth.c b/net/sunrpc/auth.c
index 505e2d4..a415d99 100644
--- a/net/sunrpc/auth.c
+++ b/net/sunrpc/auth.c
@@ -11,7 +11,6 @@
 #include <linux/module.h>
 #include <linux/slab.h>
 #include <linux/errno.h>
-#include <linux/socket.h>
 #include <linux/sunrpc/clnt.h>
 #include <linux/spinlock.h>
 
diff --git a/net/sunrpc/auth_gss/Makefile b/net/sunrpc/auth_gss/Makefile
index fe1b874..f3431a7 100644
--- a/net/sunrpc/auth_gss/Makefile
+++ b/net/sunrpc/auth_gss/Makefile
@@ -10,7 +10,7 @@
 obj-$(CONFIG_RPCSEC_GSS_KRB5) += rpcsec_gss_krb5.o
 
 rpcsec_gss_krb5-objs := gss_krb5_mech.o gss_krb5_seal.o gss_krb5_unseal.o \
-	gss_krb5_seqnum.o
+	gss_krb5_seqnum.o gss_krb5_wrap.o
 
 obj-$(CONFIG_RPCSEC_GSS_SPKM3) += rpcsec_gss_spkm3.o
 
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
index 2f7b867..f44f46f 100644
--- a/net/sunrpc/auth_gss/auth_gss.c
+++ b/net/sunrpc/auth_gss/auth_gss.c
@@ -42,9 +42,8 @@
 #include <linux/init.h>
 #include <linux/types.h>
 #include <linux/slab.h>
-#include <linux/socket.h>
-#include <linux/in.h>
 #include <linux/sched.h>
+#include <linux/pagemap.h>
 #include <linux/sunrpc/clnt.h>
 #include <linux/sunrpc/auth.h>
 #include <linux/sunrpc/auth_gss.h>
@@ -846,10 +845,8 @@
 
 	/* We compute the checksum for the verifier over the xdr-encoded bytes
 	 * starting with the xid and ending at the end of the credential: */
-	iov.iov_base = req->rq_snd_buf.head[0].iov_base;
-	if (task->tk_client->cl_xprt->stream)
-		/* See clnt.c:call_header() */
-		iov.iov_base += 4;
+	iov.iov_base = xprt_skip_transport_header(task->tk_xprt,
+					req->rq_snd_buf.head[0].iov_base);
 	iov.iov_len = (u8 *)p - (u8 *)iov.iov_base;
 	xdr_buf_from_iov(&iov, &verf_buf);
 
@@ -857,9 +854,7 @@
 	*p++ = htonl(RPC_AUTH_GSS);
 
 	mic.data = (u8 *)(p + 1);
-	maj_stat = gss_get_mic(ctx->gc_gss_ctx,
-			       GSS_C_QOP_DEFAULT, 
-			       &verf_buf, &mic);
+	maj_stat = gss_get_mic(ctx->gc_gss_ctx, &verf_buf, &mic);
 	if (maj_stat == GSS_S_CONTEXT_EXPIRED) {
 		cred->cr_flags &= ~RPCAUTH_CRED_UPTODATE;
 	} else if (maj_stat != 0) {
@@ -890,10 +885,8 @@
 gss_validate(struct rpc_task *task, u32 *p)
 {
 	struct rpc_cred *cred = task->tk_msg.rpc_cred;
-	struct gss_cred	*gss_cred = container_of(cred, struct gss_cred,
-						gc_base);
 	struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred);
-	u32		seq, qop_state;
+	u32		seq;
 	struct kvec	iov;
 	struct xdr_buf	verf_buf;
 	struct xdr_netobj mic;
@@ -914,23 +907,14 @@
 	mic.data = (u8 *)p;
 	mic.len = len;
 
-	maj_stat = gss_verify_mic(ctx->gc_gss_ctx, &verf_buf, &mic, &qop_state);
+	maj_stat = gss_verify_mic(ctx->gc_gss_ctx, &verf_buf, &mic);
 	if (maj_stat == GSS_S_CONTEXT_EXPIRED)
 		cred->cr_flags &= ~RPCAUTH_CRED_UPTODATE;
 	if (maj_stat)
 		goto out_bad;
-       switch (gss_cred->gc_service) {
-       case RPC_GSS_SVC_NONE:
-	       /* verifier data, flavor, length: */
-	       task->tk_auth->au_rslack = XDR_QUADLEN(len) + 2;
-	       break;
-       case RPC_GSS_SVC_INTEGRITY:
-	       /* verifier data, flavor, length, length, sequence number: */
-	       task->tk_auth->au_rslack = XDR_QUADLEN(len) + 4;
-	       break;
-       case RPC_GSS_SVC_PRIVACY:
-	       goto out_bad;
-       }
+	/* We leave it to unwrap to calculate au_rslack. For now we just
+	 * calculate the length of the verifier: */
+	task->tk_auth->au_verfsize = XDR_QUADLEN(len) + 2;
 	gss_put_ctx(ctx);
 	dprintk("RPC: %4u GSS gss_validate: gss_verify_mic succeeded.\n",
 			task->tk_pid);
@@ -975,8 +959,7 @@
 	p = iov->iov_base + iov->iov_len;
 	mic.data = (u8 *)(p + 1);
 
-	maj_stat = gss_get_mic(ctx->gc_gss_ctx,
-			GSS_C_QOP_DEFAULT, &integ_buf, &mic);
+	maj_stat = gss_get_mic(ctx->gc_gss_ctx, &integ_buf, &mic);
 	status = -EIO; /* XXX? */
 	if (maj_stat == GSS_S_CONTEXT_EXPIRED)
 		cred->cr_flags &= ~RPCAUTH_CRED_UPTODATE;
@@ -990,6 +973,113 @@
 	return 0;
 }
 
+static void
+priv_release_snd_buf(struct rpc_rqst *rqstp)
+{
+	int i;
+
+	for (i=0; i < rqstp->rq_enc_pages_num; i++)
+		__free_page(rqstp->rq_enc_pages[i]);
+	kfree(rqstp->rq_enc_pages);
+}
+
+static int
+alloc_enc_pages(struct rpc_rqst *rqstp)
+{
+	struct xdr_buf *snd_buf = &rqstp->rq_snd_buf;
+	int first, last, i;
+
+	if (snd_buf->page_len == 0) {
+		rqstp->rq_enc_pages_num = 0;
+		return 0;
+	}
+
+	first = snd_buf->page_base >> PAGE_CACHE_SHIFT;
+	last = (snd_buf->page_base + snd_buf->page_len - 1) >> PAGE_CACHE_SHIFT;
+	rqstp->rq_enc_pages_num = last - first + 1 + 1;
+	rqstp->rq_enc_pages
+		= kmalloc(rqstp->rq_enc_pages_num * sizeof(struct page *),
+				GFP_NOFS);
+	if (!rqstp->rq_enc_pages)
+		goto out;
+	for (i=0; i < rqstp->rq_enc_pages_num; i++) {
+		rqstp->rq_enc_pages[i] = alloc_page(GFP_NOFS);
+		if (rqstp->rq_enc_pages[i] == NULL)
+			goto out_free;
+	}
+	rqstp->rq_release_snd_buf = priv_release_snd_buf;
+	return 0;
+out_free:
+	for (i--; i >= 0; i--) {
+		__free_page(rqstp->rq_enc_pages[i]);
+	}
+out:
+	return -EAGAIN;
+}
+
+static inline int
+gss_wrap_req_priv(struct rpc_cred *cred, struct gss_cl_ctx *ctx,
+		kxdrproc_t encode, struct rpc_rqst *rqstp, u32 *p, void *obj)
+{
+	struct xdr_buf	*snd_buf = &rqstp->rq_snd_buf;
+	u32		offset;
+	u32             maj_stat;
+	int		status;
+	u32		*opaque_len;
+	struct page	**inpages;
+	int		first;
+	int		pad;
+	struct kvec	*iov;
+	char		*tmp;
+
+	opaque_len = p++;
+	offset = (u8 *)p - (u8 *)snd_buf->head[0].iov_base;
+	*p++ = htonl(rqstp->rq_seqno);
+
+	status = encode(rqstp, p, obj);
+	if (status)
+		return status;
+
+	status = alloc_enc_pages(rqstp);
+	if (status)
+		return status;
+	first = snd_buf->page_base >> PAGE_CACHE_SHIFT;
+	inpages = snd_buf->pages + first;
+	snd_buf->pages = rqstp->rq_enc_pages;
+	snd_buf->page_base -= first << PAGE_CACHE_SHIFT;
+	/* Give the tail its own page, in case we need extra space in the
+	 * head when wrapping: */
+	if (snd_buf->page_len || snd_buf->tail[0].iov_len) {
+		tmp = page_address(rqstp->rq_enc_pages[rqstp->rq_enc_pages_num - 1]);
+		memcpy(tmp, snd_buf->tail[0].iov_base, snd_buf->tail[0].iov_len);
+		snd_buf->tail[0].iov_base = tmp;
+	}
+	maj_stat = gss_wrap(ctx->gc_gss_ctx, offset, snd_buf, inpages);
+	/* RPC_SLACK_SPACE should prevent this ever happening: */
+	BUG_ON(snd_buf->len > snd_buf->buflen);
+        status = -EIO;
+	/* We're assuming that when GSS_S_CONTEXT_EXPIRED, the encryption was
+	 * done anyway, so it's safe to put the request on the wire: */
+	if (maj_stat == GSS_S_CONTEXT_EXPIRED)
+		cred->cr_flags &= ~RPCAUTH_CRED_UPTODATE;
+	else if (maj_stat)
+		return status;
+
+	*opaque_len = htonl(snd_buf->len - offset);
+	/* guess whether we're in the head or the tail: */
+	if (snd_buf->page_len || snd_buf->tail[0].iov_len)
+		iov = snd_buf->tail;
+	else
+		iov = snd_buf->head;
+	p = iov->iov_base + iov->iov_len;
+	pad = 3 - ((snd_buf->len - offset - 1) & 3);
+	memset(p, 0, pad);
+	iov->iov_len += pad;
+	snd_buf->len += pad;
+
+	return 0;
+}
+
 static int
 gss_wrap_req(struct rpc_task *task,
 	     kxdrproc_t encode, void *rqstp, u32 *p, void *obj)
@@ -1017,6 +1107,8 @@
 								rqstp, p, obj);
 			break;
        		case RPC_GSS_SVC_PRIVACY:
+			status = gss_wrap_req_priv(cred, ctx, encode,
+					rqstp, p, obj);
 			break;
 	}
 out:
@@ -1054,8 +1146,7 @@
 	if (xdr_buf_read_netobj(rcv_buf, &mic, mic_offset))
 		return status;
 
-	maj_stat = gss_verify_mic(ctx->gc_gss_ctx, &integ_buf,
-			&mic, NULL);
+	maj_stat = gss_verify_mic(ctx->gc_gss_ctx, &integ_buf, &mic);
 	if (maj_stat == GSS_S_CONTEXT_EXPIRED)
 		cred->cr_flags &= ~RPCAUTH_CRED_UPTODATE;
 	if (maj_stat != GSS_S_COMPLETE)
@@ -1063,6 +1154,35 @@
 	return 0;
 }
 
+static inline int
+gss_unwrap_resp_priv(struct rpc_cred *cred, struct gss_cl_ctx *ctx,
+		struct rpc_rqst *rqstp, u32 **p)
+{
+	struct xdr_buf  *rcv_buf = &rqstp->rq_rcv_buf;
+	u32 offset;
+	u32 opaque_len;
+	u32 maj_stat;
+	int status = -EIO;
+
+	opaque_len = ntohl(*(*p)++);
+	offset = (u8 *)(*p) - (u8 *)rcv_buf->head[0].iov_base;
+	if (offset + opaque_len > rcv_buf->len)
+		return status;
+	/* remove padding: */
+	rcv_buf->len = offset + opaque_len;
+
+	maj_stat = gss_unwrap(ctx->gc_gss_ctx, offset, rcv_buf);
+	if (maj_stat == GSS_S_CONTEXT_EXPIRED)
+		cred->cr_flags &= ~RPCAUTH_CRED_UPTODATE;
+	if (maj_stat != GSS_S_COMPLETE)
+		return status;
+	if (ntohl(*(*p)++) != rqstp->rq_seqno)
+		return status;
+
+	return 0;
+}
+
+
 static int
 gss_unwrap_resp(struct rpc_task *task,
 		kxdrproc_t decode, void *rqstp, u32 *p, void *obj)
@@ -1071,6 +1191,9 @@
 	struct gss_cred *gss_cred = container_of(cred, struct gss_cred,
 			gc_base);
 	struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred);
+	u32		*savedp = p;
+	struct kvec	*head = ((struct rpc_rqst *)rqstp)->rq_rcv_buf.head;
+	int		savedlen = head->iov_len;
 	int             status = -EIO;
 
 	if (ctx->gc_proc != RPC_GSS_PROC_DATA)
@@ -1084,8 +1207,14 @@
 				goto out;
 			break;
        		case RPC_GSS_SVC_PRIVACY:
+			status = gss_unwrap_resp_priv(cred, ctx, rqstp, &p);
+			if (status)
+				goto out;
 			break;
 	}
+	/* take into account extra slack for integrity and privacy cases: */
+	task->tk_auth->au_rslack = task->tk_auth->au_verfsize + (p - savedp)
+						+ (savedlen - head->iov_len);
 out_decode:
 	status = decode(rqstp, p, obj);
 out:
diff --git a/net/sunrpc/auth_gss/gss_krb5_crypto.c b/net/sunrpc/auth_gss/gss_krb5_crypto.c
index ee6ae74..3f3d543 100644
--- a/net/sunrpc/auth_gss/gss_krb5_crypto.c
+++ b/net/sunrpc/auth_gss/gss_krb5_crypto.c
@@ -139,17 +139,91 @@
 	sg->length = len;
 }
 
+static int
+process_xdr_buf(struct xdr_buf *buf, int offset, int len,
+		int (*actor)(struct scatterlist *, void *), void *data)
+{
+	int i, page_len, thislen, page_offset, ret = 0;
+	struct scatterlist	sg[1];
+
+	if (offset >= buf->head[0].iov_len) {
+		offset -= buf->head[0].iov_len;
+	} else {
+		thislen = buf->head[0].iov_len - offset;
+		if (thislen > len)
+			thislen = len;
+		buf_to_sg(sg, buf->head[0].iov_base + offset, thislen);
+		ret = actor(sg, data);
+		if (ret)
+			goto out;
+		offset = 0;
+		len -= thislen;
+	}
+	if (len == 0)
+		goto out;
+
+	if (offset >= buf->page_len) {
+		offset -= buf->page_len;
+	} else {
+		page_len = buf->page_len - offset;
+		if (page_len > len)
+			page_len = len;
+		len -= page_len;
+		page_offset = (offset + buf->page_base) & (PAGE_CACHE_SIZE - 1);
+		i = (offset + buf->page_base) >> PAGE_CACHE_SHIFT;
+		thislen = PAGE_CACHE_SIZE - page_offset;
+		do {
+			if (thislen > page_len)
+				thislen = page_len;
+			sg->page = buf->pages[i];
+			sg->offset = page_offset;
+			sg->length = thislen;
+			ret = actor(sg, data);
+			if (ret)
+				goto out;
+			page_len -= thislen;
+			i++;
+			page_offset = 0;
+			thislen = PAGE_CACHE_SIZE;
+		} while (page_len != 0);
+		offset = 0;
+	}
+	if (len == 0)
+		goto out;
+
+	if (offset < buf->tail[0].iov_len) {
+		thislen = buf->tail[0].iov_len - offset;
+		if (thislen > len)
+			thislen = len;
+		buf_to_sg(sg, buf->tail[0].iov_base + offset, thislen);
+		ret = actor(sg, data);
+		len -= thislen;
+	}
+	if (len != 0)
+		ret = -EINVAL;
+out:
+	return ret;
+}
+
+static int
+checksummer(struct scatterlist *sg, void *data)
+{
+	struct crypto_tfm *tfm = (struct crypto_tfm *)data;
+
+	crypto_digest_update(tfm, sg, 1);
+
+	return 0;
+}
+
 /* checksum the plaintext data and hdrlen bytes of the token header */
 s32
 make_checksum(s32 cksumtype, char *header, int hdrlen, struct xdr_buf *body,
-		   struct xdr_netobj *cksum)
+		   int body_offset, struct xdr_netobj *cksum)
 {
 	char                            *cksumname;
 	struct crypto_tfm               *tfm = NULL; /* XXX add to ctx? */
 	struct scatterlist              sg[1];
 	u32                             code = GSS_S_FAILURE;
-	int				len, thislen, offset;
-	int				i;
 
 	switch (cksumtype) {
 		case CKSUMTYPE_RSA_MD5:
@@ -169,33 +243,8 @@
 	crypto_digest_init(tfm);
 	buf_to_sg(sg, header, hdrlen);
 	crypto_digest_update(tfm, sg, 1);
-	if (body->head[0].iov_len) {
-		buf_to_sg(sg, body->head[0].iov_base, body->head[0].iov_len);
-		crypto_digest_update(tfm, sg, 1);
-	}
-
-	len = body->page_len;
-	if (len != 0) {
-		offset = body->page_base & (PAGE_CACHE_SIZE - 1);
-		i = body->page_base >> PAGE_CACHE_SHIFT;
-		thislen = PAGE_CACHE_SIZE - offset;
-		do {
-			if (thislen > len)
-				thislen = len;
-			sg->page = body->pages[i];
-			sg->offset = offset;
-			sg->length = thislen;
-			crypto_digest_update(tfm, sg, 1);
-			len -= thislen;
-			i++;
-			offset = 0;
-			thislen = PAGE_CACHE_SIZE;
-		} while(len != 0);
-	}
-	if (body->tail[0].iov_len) {
-		buf_to_sg(sg, body->tail[0].iov_base, body->tail[0].iov_len);
-		crypto_digest_update(tfm, sg, 1);
-	}
+	process_xdr_buf(body, body_offset, body->len - body_offset,
+			checksummer, tfm);
 	crypto_digest_final(tfm, cksum->data);
 	code = 0;
 out:
@@ -204,3 +253,154 @@
 }
 
 EXPORT_SYMBOL(make_checksum);
+
+struct encryptor_desc {
+	u8 iv[8]; /* XXX hard-coded blocksize */
+	struct crypto_tfm *tfm;
+	int pos;
+	struct xdr_buf *outbuf;
+	struct page **pages;
+	struct scatterlist infrags[4];
+	struct scatterlist outfrags[4];
+	int fragno;
+	int fraglen;
+};
+
+static int
+encryptor(struct scatterlist *sg, void *data)
+{
+	struct encryptor_desc *desc = data;
+	struct xdr_buf *outbuf = desc->outbuf;
+	struct page *in_page;
+	int thislen = desc->fraglen + sg->length;
+	int fraglen, ret;
+	int page_pos;
+
+	/* Worst case is 4 fragments: head, end of page 1, start
+	 * of page 2, tail.  Anything more is a bug. */
+	BUG_ON(desc->fragno > 3);
+	desc->infrags[desc->fragno] = *sg;
+	desc->outfrags[desc->fragno] = *sg;
+
+	page_pos = desc->pos - outbuf->head[0].iov_len;
+	if (page_pos >= 0 && page_pos < outbuf->page_len) {
+		/* pages are not in place: */
+		int i = (page_pos + outbuf->page_base) >> PAGE_CACHE_SHIFT;
+		in_page = desc->pages[i];
+	} else {
+		in_page = sg->page;
+	}
+	desc->infrags[desc->fragno].page = in_page;
+	desc->fragno++;
+	desc->fraglen += sg->length;
+	desc->pos += sg->length;
+
+	fraglen = thislen & 7; /* XXX hardcoded blocksize */
+	thislen -= fraglen;
+
+	if (thislen == 0)
+		return 0;
+
+	ret = crypto_cipher_encrypt_iv(desc->tfm, desc->outfrags, desc->infrags,
+					thislen, desc->iv);
+	if (ret)
+		return ret;
+	if (fraglen) {
+		desc->outfrags[0].page = sg->page;
+		desc->outfrags[0].offset = sg->offset + sg->length - fraglen;
+		desc->outfrags[0].length = fraglen;
+		desc->infrags[0] = desc->outfrags[0];
+		desc->infrags[0].page = in_page;
+		desc->fragno = 1;
+		desc->fraglen = fraglen;
+	} else {
+		desc->fragno = 0;
+		desc->fraglen = 0;
+	}
+	return 0;
+}
+
+int
+gss_encrypt_xdr_buf(struct crypto_tfm *tfm, struct xdr_buf *buf, int offset,
+		struct page **pages)
+{
+	int ret;
+	struct encryptor_desc desc;
+
+	BUG_ON((buf->len - offset) % crypto_tfm_alg_blocksize(tfm) != 0);
+
+	memset(desc.iv, 0, sizeof(desc.iv));
+	desc.tfm = tfm;
+	desc.pos = offset;
+	desc.outbuf = buf;
+	desc.pages = pages;
+	desc.fragno = 0;
+	desc.fraglen = 0;
+
+	ret = process_xdr_buf(buf, offset, buf->len - offset, encryptor, &desc);
+	return ret;
+}
+
+EXPORT_SYMBOL(gss_encrypt_xdr_buf);
+
+struct decryptor_desc {
+	u8 iv[8]; /* XXX hard-coded blocksize */
+	struct crypto_tfm *tfm;
+	struct scatterlist frags[4];
+	int fragno;
+	int fraglen;
+};
+
+static int
+decryptor(struct scatterlist *sg, void *data)
+{
+	struct decryptor_desc *desc = data;
+	int thislen = desc->fraglen + sg->length;
+	int fraglen, ret;
+
+	/* Worst case is 4 fragments: head, end of page 1, start
+	 * of page 2, tail.  Anything more is a bug. */
+	BUG_ON(desc->fragno > 3);
+	desc->frags[desc->fragno] = *sg;
+	desc->fragno++;
+	desc->fraglen += sg->length;
+
+	fraglen = thislen & 7; /* XXX hardcoded blocksize */
+	thislen -= fraglen;
+
+	if (thislen == 0)
+		return 0;
+
+	ret = crypto_cipher_decrypt_iv(desc->tfm, desc->frags, desc->frags,
+					thislen, desc->iv);
+	if (ret)
+		return ret;
+	if (fraglen) {
+		desc->frags[0].page = sg->page;
+		desc->frags[0].offset = sg->offset + sg->length - fraglen;
+		desc->frags[0].length = fraglen;
+		desc->fragno = 1;
+		desc->fraglen = fraglen;
+	} else {
+		desc->fragno = 0;
+		desc->fraglen = 0;
+	}
+	return 0;
+}
+
+int
+gss_decrypt_xdr_buf(struct crypto_tfm *tfm, struct xdr_buf *buf, int offset)
+{
+	struct decryptor_desc desc;
+
+	/* XXXJBF: */
+	BUG_ON((buf->len - offset) % crypto_tfm_alg_blocksize(tfm) != 0);
+
+	memset(desc.iv, 0, sizeof(desc.iv));
+	desc.tfm = tfm;
+	desc.fragno = 0;
+	desc.fraglen = 0;
+	return process_xdr_buf(buf, offset, buf->len - offset, decryptor, &desc);
+}
+
+EXPORT_SYMBOL(gss_decrypt_xdr_buf);
diff --git a/net/sunrpc/auth_gss/gss_krb5_mech.c b/net/sunrpc/auth_gss/gss_krb5_mech.c
index 606a8a8..5f1f806 100644
--- a/net/sunrpc/auth_gss/gss_krb5_mech.c
+++ b/net/sunrpc/auth_gss/gss_krb5_mech.c
@@ -39,7 +39,6 @@
 #include <linux/types.h>
 #include <linux/slab.h>
 #include <linux/sunrpc/auth.h>
-#include <linux/in.h>
 #include <linux/sunrpc/gss_krb5.h>
 #include <linux/sunrpc/xdr.h>
 #include <linux/crypto.h>
@@ -191,43 +190,12 @@
 	kfree(kctx);
 }
 
-static u32
-gss_verify_mic_kerberos(struct gss_ctx		*ctx,
-			struct xdr_buf		*message,
-			struct xdr_netobj	*mic_token,
-			u32			*qstate) {
-	u32 maj_stat = 0;
-	int qop_state;
-	struct krb5_ctx *kctx = ctx->internal_ctx_id;
-
-	maj_stat = krb5_read_token(kctx, mic_token, message, &qop_state,
-				   KG_TOK_MIC_MSG);
-	if (!maj_stat && qop_state)
-	    *qstate = qop_state;
-
-	dprintk("RPC:      gss_verify_mic_kerberos returning %d\n", maj_stat);
-	return maj_stat;
-}
-
-static u32
-gss_get_mic_kerberos(struct gss_ctx	*ctx,
-		     u32		qop,
-		     struct xdr_buf 	*message,
-		     struct xdr_netobj	*mic_token) {
-	u32 err = 0;
-	struct krb5_ctx *kctx = ctx->internal_ctx_id;
-
-	err = krb5_make_token(kctx, qop, message, mic_token, KG_TOK_MIC_MSG);
-
-	dprintk("RPC:      gss_get_mic_kerberos returning %d\n",err);
-
-	return err;
-}
-
 static struct gss_api_ops gss_kerberos_ops = {
 	.gss_import_sec_context	= gss_import_sec_context_kerberos,
 	.gss_get_mic		= gss_get_mic_kerberos,
 	.gss_verify_mic		= gss_verify_mic_kerberos,
+	.gss_wrap		= gss_wrap_kerberos,
+	.gss_unwrap		= gss_unwrap_kerberos,
 	.gss_delete_sec_context	= gss_delete_sec_context_kerberos,
 };
 
@@ -242,6 +210,11 @@
 		.service = RPC_GSS_SVC_INTEGRITY,
 		.name = "krb5i",
 	},
+	[2] = {
+		.pseudoflavor = RPC_AUTH_GSS_KRB5P,
+		.service = RPC_GSS_SVC_PRIVACY,
+		.name = "krb5p",
+	},
 };
 
 static struct gss_api_mech gss_kerberos_mech = {
diff --git a/net/sunrpc/auth_gss/gss_krb5_seal.c b/net/sunrpc/auth_gss/gss_krb5_seal.c
index afeeb87..13f8ae9 100644
--- a/net/sunrpc/auth_gss/gss_krb5_seal.c
+++ b/net/sunrpc/auth_gss/gss_krb5_seal.c
@@ -70,22 +70,13 @@
 # define RPCDBG_FACILITY        RPCDBG_AUTH
 #endif
 
-static inline int
-gss_krb5_padding(int blocksize, int length) {
-	/* Most of the code is block-size independent but in practice we
-	 * use only 8: */
-	BUG_ON(blocksize != 8);
-	return 8 - (length & 7);
-}
-
 u32
-krb5_make_token(struct krb5_ctx *ctx, int qop_req,
-		   struct xdr_buf *text, struct xdr_netobj *token,
-		   int toktype)
+gss_get_mic_kerberos(struct gss_ctx *gss_ctx, struct xdr_buf *text,
+		struct xdr_netobj *token)
 {
+	struct krb5_ctx		*ctx = gss_ctx->internal_ctx_id;
 	s32			checksum_type;
 	struct xdr_netobj	md5cksum = {.len = 0, .data = NULL};
-	int			blocksize = 0, tmsglen;
 	unsigned char		*ptr, *krb5_hdr, *msg_start;
 	s32			now;
 
@@ -93,9 +84,6 @@
 
 	now = get_seconds();
 
-	if (qop_req != 0)
-		goto out_err;
-
 	switch (ctx->signalg) {
 		case SGN_ALG_DES_MAC_MD5:
 			checksum_type = CKSUMTYPE_RSA_MD5;
@@ -111,21 +99,13 @@
 		goto out_err;
 	}
 
-	if (toktype == KG_TOK_WRAP_MSG) {
-		blocksize = crypto_tfm_alg_blocksize(ctx->enc);
-		tmsglen = blocksize + text->len
-			+ gss_krb5_padding(blocksize, blocksize + text->len);
-	} else {
-		tmsglen = 0;
-	}
-
-	token->len = g_token_size(&ctx->mech_used, 22 + tmsglen);
+	token->len = g_token_size(&ctx->mech_used, 22);
 
 	ptr = token->data;
-	g_make_token_header(&ctx->mech_used, 22 + tmsglen, &ptr);
+	g_make_token_header(&ctx->mech_used, 22, &ptr);
 
-	*ptr++ = (unsigned char) ((toktype>>8)&0xff);
-	*ptr++ = (unsigned char) (toktype&0xff);
+	*ptr++ = (unsigned char) ((KG_TOK_MIC_MSG>>8)&0xff);
+	*ptr++ = (unsigned char) (KG_TOK_MIC_MSG&0xff);
 
 	/* ptr now at byte 2 of header described in rfc 1964, section 1.2.1: */
 	krb5_hdr = ptr - 2;
@@ -133,17 +113,9 @@
 
 	*(u16 *)(krb5_hdr + 2) = htons(ctx->signalg);
 	memset(krb5_hdr + 4, 0xff, 4);
-	if (toktype == KG_TOK_WRAP_MSG)
-		*(u16 *)(krb5_hdr + 4) = htons(ctx->sealalg);
 
-	if (toktype == KG_TOK_WRAP_MSG) {
-		/* XXX removing support for now */
-		goto out_err;
-	} else { /* Sign only.  */
-		if (make_checksum(checksum_type, krb5_hdr, 8, text,
-				       &md5cksum))
+	if (make_checksum(checksum_type, krb5_hdr, 8, text, 0, &md5cksum))
 			goto out_err;
-	}
 
 	switch (ctx->signalg) {
 	case SGN_ALG_DES_MAC_MD5:
diff --git a/net/sunrpc/auth_gss/gss_krb5_unseal.c b/net/sunrpc/auth_gss/gss_krb5_unseal.c
index 8767fc5..2030475 100644
--- a/net/sunrpc/auth_gss/gss_krb5_unseal.c
+++ b/net/sunrpc/auth_gss/gss_krb5_unseal.c
@@ -68,21 +68,14 @@
 #endif
 
 
-/* message_buffer is an input if toktype is MIC and an output if it is WRAP:
- * If toktype is MIC: read_token is a mic token, and message_buffer is the
- *   data that the mic was supposedly taken over.
- * If toktype is WRAP: read_token is a wrap token, and message_buffer is used
- *   to return the decrypted data.
- */
+/* read_token is a mic token, and message_buffer is the data that the mic was
+ * supposedly taken over. */
 
-/* XXX will need to change prototype and/or just split into a separate function
- * when we add privacy (because read_token will be in pages too). */
 u32
-krb5_read_token(struct krb5_ctx *ctx,
-		struct xdr_netobj *read_token,
-		struct xdr_buf *message_buffer,
-		int *qop_state, int toktype)
+gss_verify_mic_kerberos(struct gss_ctx *gss_ctx,
+		struct xdr_buf *message_buffer, struct xdr_netobj *read_token)
 {
+	struct krb5_ctx		*ctx = gss_ctx->internal_ctx_id;
 	int			signalg;
 	int			sealalg;
 	s32			checksum_type;
@@ -100,16 +93,12 @@
 					read_token->len))
 		goto out;
 
-	if ((*ptr++ != ((toktype>>8)&0xff)) || (*ptr++ != (toktype&0xff)))
+	if ((*ptr++ != ((KG_TOK_MIC_MSG>>8)&0xff)) ||
+	    (*ptr++ != ( KG_TOK_MIC_MSG    &0xff))   )
 		goto out;
 
 	/* XXX sanity-check bodysize?? */
 
-	if (toktype == KG_TOK_WRAP_MSG) {
-		/* XXX gone */
-		goto out;
-	}
-
 	/* get the sign and seal algorithms */
 
 	signalg = ptr[0] + (ptr[1] << 8);
@@ -120,14 +109,7 @@
 	if ((ptr[4] != 0xff) || (ptr[5] != 0xff))
 		goto out;
 
-	if (((toktype != KG_TOK_WRAP_MSG) && (sealalg != 0xffff)) ||
-	    ((toktype == KG_TOK_WRAP_MSG) && (sealalg == 0xffff)))
-		goto out;
-
-	/* in the current spec, there is only one valid seal algorithm per
-	   key type, so a simple comparison is ok */
-
-	if ((toktype == KG_TOK_WRAP_MSG) && !(sealalg == ctx->sealalg))
+	if (sealalg != 0xffff)
 		goto out;
 
 	/* there are several mappings of seal algorithms to sign algorithms,
@@ -154,7 +136,7 @@
 	switch (signalg) {
 	case SGN_ALG_DES_MAC_MD5:
 		ret = make_checksum(checksum_type, ptr - 2, 8,
-					 message_buffer, &md5cksum);
+					 message_buffer, 0, &md5cksum);
 		if (ret)
 			goto out;
 
@@ -175,9 +157,6 @@
 
 	/* it got through unscathed.  Make sure the context is unexpired */
 
-	if (qop_state)
-		*qop_state = GSS_C_QOP_DEFAULT;
-
 	now = get_seconds();
 
 	ret = GSS_S_CONTEXT_EXPIRED;
diff --git a/net/sunrpc/auth_gss/gss_krb5_wrap.c b/net/sunrpc/auth_gss/gss_krb5_wrap.c
new file mode 100644
index 0000000..af777cf
--- /dev/null
+++ b/net/sunrpc/auth_gss/gss_krb5_wrap.c
@@ -0,0 +1,363 @@
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/jiffies.h>
+#include <linux/sunrpc/gss_krb5.h>
+#include <linux/random.h>
+#include <linux/pagemap.h>
+#include <asm/scatterlist.h>
+#include <linux/crypto.h>
+
+#ifdef RPC_DEBUG
+# define RPCDBG_FACILITY	RPCDBG_AUTH
+#endif
+
+static inline int
+gss_krb5_padding(int blocksize, int length)
+{
+	/* Most of the code is block-size independent but currently we
+	 * use only 8: */
+	BUG_ON(blocksize != 8);
+	return 8 - (length & 7);
+}
+
+static inline void
+gss_krb5_add_padding(struct xdr_buf *buf, int offset, int blocksize)
+{
+	int padding = gss_krb5_padding(blocksize, buf->len - offset);
+	char *p;
+	struct kvec *iov;
+
+	if (buf->page_len || buf->tail[0].iov_len)
+		iov = &buf->tail[0];
+	else
+		iov = &buf->head[0];
+	p = iov->iov_base + iov->iov_len;
+	iov->iov_len += padding;
+	buf->len += padding;
+	memset(p, padding, padding);
+}
+
+static inline int
+gss_krb5_remove_padding(struct xdr_buf *buf, int blocksize)
+{
+	u8 *ptr;
+	u8 pad;
+	int len = buf->len;
+
+	if (len <= buf->head[0].iov_len) {
+		pad = *(u8 *)(buf->head[0].iov_base + len - 1);
+		if (pad > buf->head[0].iov_len)
+			return -EINVAL;
+		buf->head[0].iov_len -= pad;
+		goto out;
+	} else
+		len -= buf->head[0].iov_len;
+	if (len <= buf->page_len) {
+		int last = (buf->page_base + len - 1)
+					>>PAGE_CACHE_SHIFT;
+		int offset = (buf->page_base + len - 1)
+					& (PAGE_CACHE_SIZE - 1);
+		ptr = kmap_atomic(buf->pages[last], KM_SKB_SUNRPC_DATA);
+		pad = *(ptr + offset);
+		kunmap_atomic(ptr, KM_SKB_SUNRPC_DATA);
+		goto out;
+	} else
+		len -= buf->page_len;
+	BUG_ON(len > buf->tail[0].iov_len);
+	pad = *(u8 *)(buf->tail[0].iov_base + len - 1);
+out:
+	/* XXX: NOTE: we do not adjust the page lengths--they represent
+	 * a range of data in the real filesystem page cache, and we need
+	 * to know that range so the xdr code can properly place read data.
+	 * However adjusting the head length, as we do above, is harmless.
+	 * In the case of a request that fits into a single page, the server
+	 * also uses length and head length together to determine the original
+	 * start of the request to copy the request for deferal; so it's
+	 * easier on the server if we adjust head and tail length in tandem.
+	 * It's not really a problem that we don't fool with the page and
+	 * tail lengths, though--at worst badly formed xdr might lead the
+	 * server to attempt to parse the padding.
+	 * XXX: Document all these weird requirements for gss mechanism
+	 * wrap/unwrap functions. */
+	if (pad > blocksize)
+		return -EINVAL;
+	if (buf->len > pad)
+		buf->len -= pad;
+	else
+		return -EINVAL;
+	return 0;
+}
+
+static inline void
+make_confounder(char *p, int blocksize)
+{
+	static u64 i = 0;
+	u64 *q = (u64 *)p;
+
+	/* rfc1964 claims this should be "random".  But all that's really
+	 * necessary is that it be unique.  And not even that is necessary in
+	 * our case since our "gssapi" implementation exists only to support
+	 * rpcsec_gss, so we know that the only buffers we will ever encrypt
+	 * already begin with a unique sequence number.  Just to hedge my bets
+	 * I'll make a half-hearted attempt at something unique, but ensuring
+	 * uniqueness would mean worrying about atomicity and rollover, and I
+	 * don't care enough. */
+
+	BUG_ON(blocksize != 8);
+	*q = i++;
+}
+
+/* Assumptions: the head and tail of inbuf are ours to play with.
+ * The pages, however, may be real pages in the page cache and we replace
+ * them with scratch pages from **pages before writing to them. */
+/* XXX: obviously the above should be documentation of wrap interface,
+ * and shouldn't be in this kerberos-specific file. */
+
+/* XXX factor out common code with seal/unseal. */
+
+u32
+gss_wrap_kerberos(struct gss_ctx *ctx, int offset,
+		struct xdr_buf *buf, struct page **pages)
+{
+	struct krb5_ctx		*kctx = ctx->internal_ctx_id;
+	s32			checksum_type;
+	struct xdr_netobj	md5cksum = {.len = 0, .data = NULL};
+	int			blocksize = 0, plainlen;
+	unsigned char		*ptr, *krb5_hdr, *msg_start;
+	s32			now;
+	int			headlen;
+	struct page		**tmp_pages;
+
+	dprintk("RPC:     gss_wrap_kerberos\n");
+
+	now = get_seconds();
+
+	switch (kctx->signalg) {
+		case SGN_ALG_DES_MAC_MD5:
+			checksum_type = CKSUMTYPE_RSA_MD5;
+			break;
+		default:
+			dprintk("RPC:      gss_krb5_seal: kctx->signalg %d not"
+				" supported\n", kctx->signalg);
+			goto out_err;
+	}
+	if (kctx->sealalg != SEAL_ALG_NONE && kctx->sealalg != SEAL_ALG_DES) {
+		dprintk("RPC:      gss_krb5_seal: kctx->sealalg %d not supported\n",
+			kctx->sealalg);
+		goto out_err;
+	}
+
+	blocksize = crypto_tfm_alg_blocksize(kctx->enc);
+	gss_krb5_add_padding(buf, offset, blocksize);
+	BUG_ON((buf->len - offset) % blocksize);
+	plainlen = blocksize + buf->len - offset;
+
+	headlen = g_token_size(&kctx->mech_used, 22 + plainlen) -
+						(buf->len - offset);
+
+	ptr = buf->head[0].iov_base + offset;
+	/* shift data to make room for header. */
+	/* XXX Would be cleverer to encrypt while copying. */
+	/* XXX bounds checking, slack, etc. */
+	memmove(ptr + headlen, ptr, buf->head[0].iov_len - offset);
+	buf->head[0].iov_len += headlen;
+	buf->len += headlen;
+	BUG_ON((buf->len - offset - headlen) % blocksize);
+
+	g_make_token_header(&kctx->mech_used, 22 + plainlen, &ptr);
+
+
+	*ptr++ = (unsigned char) ((KG_TOK_WRAP_MSG>>8)&0xff);
+	*ptr++ = (unsigned char) (KG_TOK_WRAP_MSG&0xff);
+
+	/* ptr now at byte 2 of header described in rfc 1964, section 1.2.1: */
+	krb5_hdr = ptr - 2;
+	msg_start = krb5_hdr + 24;
+	/* XXXJBF: */ BUG_ON(buf->head[0].iov_base + offset + headlen != msg_start + blocksize);
+
+	*(u16 *)(krb5_hdr + 2) = htons(kctx->signalg);
+	memset(krb5_hdr + 4, 0xff, 4);
+	*(u16 *)(krb5_hdr + 4) = htons(kctx->sealalg);
+
+	make_confounder(msg_start, blocksize);
+
+	/* XXXJBF: UGH!: */
+	tmp_pages = buf->pages;
+	buf->pages = pages;
+	if (make_checksum(checksum_type, krb5_hdr, 8, buf,
+				offset + headlen - blocksize, &md5cksum))
+		goto out_err;
+	buf->pages = tmp_pages;
+
+	switch (kctx->signalg) {
+	case SGN_ALG_DES_MAC_MD5:
+		if (krb5_encrypt(kctx->seq, NULL, md5cksum.data,
+				  md5cksum.data, md5cksum.len))
+			goto out_err;
+		memcpy(krb5_hdr + 16,
+		       md5cksum.data + md5cksum.len - KRB5_CKSUM_LENGTH,
+		       KRB5_CKSUM_LENGTH);
+
+		dprintk("RPC:      make_seal_token: cksum data: \n");
+		print_hexl((u32 *) (krb5_hdr + 16), KRB5_CKSUM_LENGTH, 0);
+		break;
+	default:
+		BUG();
+	}
+
+	kfree(md5cksum.data);
+
+	/* XXX would probably be more efficient to compute checksum
+	 * and encrypt at the same time: */
+	if ((krb5_make_seq_num(kctx->seq, kctx->initiate ? 0 : 0xff,
+			       kctx->seq_send, krb5_hdr + 16, krb5_hdr + 8)))
+		goto out_err;
+
+	if (gss_encrypt_xdr_buf(kctx->enc, buf, offset + headlen - blocksize,
+									pages))
+		goto out_err;
+
+	kctx->seq_send++;
+
+	return ((kctx->endtime < now) ? GSS_S_CONTEXT_EXPIRED : GSS_S_COMPLETE);
+out_err:
+	if (md5cksum.data) kfree(md5cksum.data);
+	return GSS_S_FAILURE;
+}
+
+u32
+gss_unwrap_kerberos(struct gss_ctx *ctx, int offset, struct xdr_buf *buf)
+{
+	struct krb5_ctx		*kctx = ctx->internal_ctx_id;
+	int			signalg;
+	int			sealalg;
+	s32			checksum_type;
+	struct xdr_netobj	md5cksum = {.len = 0, .data = NULL};
+	s32			now;
+	int			direction;
+	s32			seqnum;
+	unsigned char		*ptr;
+	int			bodysize;
+	u32			ret = GSS_S_DEFECTIVE_TOKEN;
+	void			*data_start, *orig_start;
+	int			data_len;
+	int			blocksize;
+
+	dprintk("RPC:      gss_unwrap_kerberos\n");
+
+	ptr = (u8 *)buf->head[0].iov_base + offset;
+	if (g_verify_token_header(&kctx->mech_used, &bodysize, &ptr,
+					buf->len - offset))
+		goto out;
+
+	if ((*ptr++ != ((KG_TOK_WRAP_MSG>>8)&0xff)) ||
+	    (*ptr++ !=  (KG_TOK_WRAP_MSG    &0xff))   )
+		goto out;
+
+	/* XXX sanity-check bodysize?? */
+
+	/* get the sign and seal algorithms */
+
+	signalg = ptr[0] + (ptr[1] << 8);
+	sealalg = ptr[2] + (ptr[3] << 8);
+
+	/* Sanity checks */
+
+	if ((ptr[4] != 0xff) || (ptr[5] != 0xff))
+		goto out;
+
+	if (sealalg == 0xffff)
+		goto out;
+
+	/* in the current spec, there is only one valid seal algorithm per
+	   key type, so a simple comparison is ok */
+
+	if (sealalg != kctx->sealalg)
+		goto out;
+
+	/* there are several mappings of seal algorithms to sign algorithms,
+	   but few enough that we can try them all. */
+
+	if ((kctx->sealalg == SEAL_ALG_NONE && signalg > 1) ||
+	    (kctx->sealalg == SEAL_ALG_1 && signalg != SGN_ALG_3) ||
+	    (kctx->sealalg == SEAL_ALG_DES3KD &&
+	     signalg != SGN_ALG_HMAC_SHA1_DES3_KD))
+		goto out;
+
+	if (gss_decrypt_xdr_buf(kctx->enc, buf,
+			ptr + 22 - (unsigned char *)buf->head[0].iov_base))
+		goto out;
+
+	/* compute the checksum of the message */
+
+	/* initialize the the cksum */
+	switch (signalg) {
+	case SGN_ALG_DES_MAC_MD5:
+		checksum_type = CKSUMTYPE_RSA_MD5;
+		break;
+	default:
+		ret = GSS_S_DEFECTIVE_TOKEN;
+		goto out;
+	}
+
+	switch (signalg) {
+	case SGN_ALG_DES_MAC_MD5:
+		ret = make_checksum(checksum_type, ptr - 2, 8, buf,
+			 ptr + 22 - (unsigned char *)buf->head[0].iov_base, &md5cksum);
+		if (ret)
+			goto out;
+
+		ret = krb5_encrypt(kctx->seq, NULL, md5cksum.data,
+				   md5cksum.data, md5cksum.len);
+		if (ret)
+			goto out;
+
+		if (memcmp(md5cksum.data + 8, ptr + 14, 8)) {
+			ret = GSS_S_BAD_SIG;
+			goto out;
+		}
+		break;
+	default:
+		ret = GSS_S_DEFECTIVE_TOKEN;
+		goto out;
+	}
+
+	/* it got through unscathed.  Make sure the context is unexpired */
+
+	now = get_seconds();
+
+	ret = GSS_S_CONTEXT_EXPIRED;
+	if (now > kctx->endtime)
+		goto out;
+
+	/* do sequencing checks */
+
+	ret = GSS_S_BAD_SIG;
+	if ((ret = krb5_get_seq_num(kctx->seq, ptr + 14, ptr + 6, &direction,
+				    &seqnum)))
+		goto out;
+
+	if ((kctx->initiate && direction != 0xff) ||
+	    (!kctx->initiate && direction != 0))
+		goto out;
+
+	/* Copy the data back to the right position.  XXX: Would probably be
+	 * better to copy and encrypt at the same time. */
+
+	blocksize = crypto_tfm_alg_blocksize(kctx->enc);
+	data_start = ptr + 22 + blocksize;
+	orig_start = buf->head[0].iov_base + offset;
+	data_len = (buf->head[0].iov_base + buf->head[0].iov_len) - data_start;
+	memmove(orig_start, data_start, data_len);
+	buf->head[0].iov_len -= (data_start - orig_start);
+	buf->len -= (data_start - orig_start);
+
+	ret = GSS_S_DEFECTIVE_TOKEN;
+	if (gss_krb5_remove_padding(buf, blocksize))
+		goto out;
+
+	ret = GSS_S_COMPLETE;
+out:
+	if (md5cksum.data) kfree(md5cksum.data);
+	return ret;
+}
diff --git a/net/sunrpc/auth_gss/gss_mech_switch.c b/net/sunrpc/auth_gss/gss_mech_switch.c
index 9dfb683..b048bf67 100644
--- a/net/sunrpc/auth_gss/gss_mech_switch.c
+++ b/net/sunrpc/auth_gss/gss_mech_switch.c
@@ -35,7 +35,6 @@
 
 #include <linux/types.h>
 #include <linux/slab.h>
-#include <linux/socket.h>
 #include <linux/module.h>
 #include <linux/sunrpc/msg_prot.h>
 #include <linux/sunrpc/gss_asn1.h>
@@ -251,13 +250,11 @@
 
 u32
 gss_get_mic(struct gss_ctx	*context_handle,
-	    u32			qop,
 	    struct xdr_buf	*message,
 	    struct xdr_netobj	*mic_token)
 {
 	 return context_handle->mech_type->gm_ops
 		->gss_get_mic(context_handle,
-			      qop,
 			      message,
 			      mic_token);
 }
@@ -267,16 +264,34 @@
 u32
 gss_verify_mic(struct gss_ctx		*context_handle,
 	       struct xdr_buf		*message,
-	       struct xdr_netobj	*mic_token,
-	       u32			*qstate)
+	       struct xdr_netobj	*mic_token)
 {
 	return context_handle->mech_type->gm_ops
 		->gss_verify_mic(context_handle,
 				 message,
-				 mic_token,
-				 qstate);
+				 mic_token);
 }
 
+u32
+gss_wrap(struct gss_ctx	*ctx_id,
+	 int		offset,
+	 struct xdr_buf	*buf,
+	 struct page	**inpages)
+{
+	return ctx_id->mech_type->gm_ops
+		->gss_wrap(ctx_id, offset, buf, inpages);
+}
+
+u32
+gss_unwrap(struct gss_ctx	*ctx_id,
+	   int			offset,
+	   struct xdr_buf	*buf)
+{
+	return ctx_id->mech_type->gm_ops
+		->gss_unwrap(ctx_id, offset, buf);
+}
+
+
 /* gss_delete_sec_context: free all resources associated with context_handle.
  * Note this differs from the RFC 2744-specified prototype in that we don't
  * bother returning an output token, since it would never be used anyway. */
diff --git a/net/sunrpc/auth_gss/gss_spkm3_mech.c b/net/sunrpc/auth_gss/gss_spkm3_mech.c
index 6c97d61..39b3edc 100644
--- a/net/sunrpc/auth_gss/gss_spkm3_mech.c
+++ b/net/sunrpc/auth_gss/gss_spkm3_mech.c
@@ -224,18 +224,13 @@
 static u32
 gss_verify_mic_spkm3(struct gss_ctx		*ctx,
 			struct xdr_buf		*signbuf,
-			struct xdr_netobj	*checksum,
-			u32		*qstate) {
+			struct xdr_netobj	*checksum)
+{
 	u32 maj_stat = 0;
-	int qop_state = 0;
 	struct spkm3_ctx *sctx = ctx->internal_ctx_id;
 
 	dprintk("RPC: gss_verify_mic_spkm3 calling spkm3_read_token\n");
-	maj_stat = spkm3_read_token(sctx, checksum, signbuf, &qop_state,
-				   SPKM_MIC_TOK);
-
-	if (!maj_stat && qop_state)
-	    *qstate = qop_state;
+	maj_stat = spkm3_read_token(sctx, checksum, signbuf, SPKM_MIC_TOK);
 
 	dprintk("RPC: gss_verify_mic_spkm3 returning %d\n", maj_stat);
 	return maj_stat;
@@ -243,15 +238,15 @@
 
 static u32
 gss_get_mic_spkm3(struct gss_ctx	*ctx,
-		     u32		qop,
 		     struct xdr_buf	*message_buffer,
-		     struct xdr_netobj	*message_token) {
+		     struct xdr_netobj	*message_token)
+{
 	u32 err = 0;
 	struct spkm3_ctx *sctx = ctx->internal_ctx_id;
 
 	dprintk("RPC: gss_get_mic_spkm3\n");
 
-	err = spkm3_make_token(sctx, qop, message_buffer,
+	err = spkm3_make_token(sctx, message_buffer,
 			      message_token, SPKM_MIC_TOK);
 	return err;
 }
@@ -264,8 +259,8 @@
 };
 
 static struct pf_desc gss_spkm3_pfs[] = {
-	{RPC_AUTH_GSS_SPKM, 0, RPC_GSS_SVC_NONE, "spkm3"},
-	{RPC_AUTH_GSS_SPKMI, 0, RPC_GSS_SVC_INTEGRITY, "spkm3i"},
+	{RPC_AUTH_GSS_SPKM, RPC_GSS_SVC_NONE, "spkm3"},
+	{RPC_AUTH_GSS_SPKMI, RPC_GSS_SVC_INTEGRITY, "spkm3i"},
 };
 
 static struct gss_api_mech gss_spkm3_mech = {
diff --git a/net/sunrpc/auth_gss/gss_spkm3_seal.c b/net/sunrpc/auth_gss/gss_spkm3_seal.c
index 2533986..148201e 100644
--- a/net/sunrpc/auth_gss/gss_spkm3_seal.c
+++ b/net/sunrpc/auth_gss/gss_spkm3_seal.c
@@ -51,7 +51,7 @@
  */
 
 u32
-spkm3_make_token(struct spkm3_ctx *ctx, int qop_req,
+spkm3_make_token(struct spkm3_ctx *ctx,
 		   struct xdr_buf * text, struct xdr_netobj * token,
 		   int toktype)
 {
@@ -68,8 +68,6 @@
 	dprintk("RPC: spkm3_make_token\n");
 
 	now = jiffies;
-	if (qop_req != 0)
-		goto out_err;
 
 	if (ctx->ctx_id.len != 16) {
 		dprintk("RPC: spkm3_make_token BAD ctx_id.len %d\n",
diff --git a/net/sunrpc/auth_gss/gss_spkm3_unseal.c b/net/sunrpc/auth_gss/gss_spkm3_unseal.c
index 65ce81b..c3c0d95 100644
--- a/net/sunrpc/auth_gss/gss_spkm3_unseal.c
+++ b/net/sunrpc/auth_gss/gss_spkm3_unseal.c
@@ -52,7 +52,7 @@
 spkm3_read_token(struct spkm3_ctx *ctx,
 		struct xdr_netobj *read_token,    /* checksum */
 		struct xdr_buf *message_buffer, /* signbuf */
-		int *qop_state, int toktype)
+		int toktype)
 {
 	s32			code;
 	struct xdr_netobj	wire_cksum = {.len =0, .data = NULL};
diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
index e330819..e4ada15 100644
--- a/net/sunrpc/auth_gss/svcauth_gss.c
+++ b/net/sunrpc/auth_gss/svcauth_gss.c
@@ -566,8 +566,7 @@
 
 	if (rqstp->rq_deferred) /* skip verification of revisited request */
 		return SVC_OK;
-	if (gss_verify_mic(ctx_id, &rpchdr, &checksum, NULL)
-							!= GSS_S_COMPLETE) {
+	if (gss_verify_mic(ctx_id, &rpchdr, &checksum) != GSS_S_COMPLETE) {
 		*authp = rpcsec_gsserr_credproblem;
 		return SVC_DENIED;
 	}
@@ -604,7 +603,7 @@
 	xdr_buf_from_iov(&iov, &verf_data);
 	p = rqstp->rq_res.head->iov_base + rqstp->rq_res.head->iov_len;
 	mic.data = (u8 *)(p + 1);
-	maj_stat = gss_get_mic(ctx_id, 0, &verf_data, &mic);
+	maj_stat = gss_get_mic(ctx_id, &verf_data, &mic);
 	if (maj_stat != GSS_S_COMPLETE)
 		return -1;
 	*p++ = htonl(mic.len);
@@ -710,7 +709,7 @@
 		goto out;
 	if (read_bytes_from_xdr_buf(buf, integ_len + 4, mic.data, mic.len))
 		goto out;
-	maj_stat = gss_verify_mic(ctx, &integ_buf, &mic, NULL);
+	maj_stat = gss_verify_mic(ctx, &integ_buf, &mic);
 	if (maj_stat != GSS_S_COMPLETE)
 		goto out;
 	if (ntohl(svc_getu32(&buf->head[0])) != seq)
@@ -1012,7 +1011,7 @@
 			resv = &resbuf->tail[0];
 		}
 		mic.data = (u8 *)resv->iov_base + resv->iov_len + 4;
-		if (gss_get_mic(gsd->rsci->mechctx, 0, &integ_buf, &mic))
+		if (gss_get_mic(gsd->rsci->mechctx, &integ_buf, &mic))
 			goto out_err;
 		svc_putu32(resv, htonl(mic.len));
 		memset(mic.data + mic.len, 0,
diff --git a/net/sunrpc/auth_null.c b/net/sunrpc/auth_null.c
index 9b72d3a..f56767a 100644
--- a/net/sunrpc/auth_null.c
+++ b/net/sunrpc/auth_null.c
@@ -7,9 +7,7 @@
  */
 
 #include <linux/types.h>
-#include <linux/socket.h>
 #include <linux/module.h>
-#include <linux/in.h>
 #include <linux/utsname.h>
 #include <linux/sunrpc/clnt.h>
 #include <linux/sched.h>
diff --git a/net/sunrpc/auth_unix.c b/net/sunrpc/auth_unix.c
index 4ff297a..890fb5e 100644
--- a/net/sunrpc/auth_unix.c
+++ b/net/sunrpc/auth_unix.c
@@ -9,8 +9,6 @@
 #include <linux/types.h>
 #include <linux/sched.h>
 #include <linux/module.h>
-#include <linux/socket.h>
-#include <linux/in.h>
 #include <linux/sunrpc/clnt.h>
 #include <linux/sunrpc/auth.h>
 
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index f17e615..702ede3 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -1,5 +1,5 @@
 /*
- *  linux/net/sunrpc/rpcclnt.c
+ *  linux/net/sunrpc/clnt.c
  *
  *  This file contains the high-level RPC interface.
  *  It is modeled as a finite state machine to support both synchronous
@@ -27,7 +27,6 @@
 #include <linux/types.h>
 #include <linux/mm.h>
 #include <linux/slab.h>
-#include <linux/in.h>
 #include <linux/utsname.h>
 
 #include <linux/sunrpc/clnt.h>
@@ -53,6 +52,7 @@
 static void	call_encode(struct rpc_task *task);
 static void	call_decode(struct rpc_task *task);
 static void	call_bind(struct rpc_task *task);
+static void	call_bind_status(struct rpc_task *task);
 static void	call_transmit(struct rpc_task *task);
 static void	call_status(struct rpc_task *task);
 static void	call_refresh(struct rpc_task *task);
@@ -517,15 +517,8 @@
 rpc_setbufsize(struct rpc_clnt *clnt, unsigned int sndsize, unsigned int rcvsize)
 {
 	struct rpc_xprt *xprt = clnt->cl_xprt;
-
-	xprt->sndsize = 0;
-	if (sndsize)
-		xprt->sndsize = sndsize + RPC_SLACK_SPACE;
-	xprt->rcvsize = 0;
-	if (rcvsize)
-		xprt->rcvsize = rcvsize + RPC_SLACK_SPACE;
-	if (xprt_connected(xprt))
-		xprt_sock_setbufsize(xprt);
+	if (xprt->ops->set_buffer_size)
+		xprt->ops->set_buffer_size(xprt, sndsize, rcvsize);
 }
 
 /*
@@ -685,13 +678,11 @@
 static void
 call_encode(struct rpc_task *task)
 {
-	struct rpc_clnt	*clnt = task->tk_client;
 	struct rpc_rqst	*req = task->tk_rqstp;
 	struct xdr_buf *sndbuf = &req->rq_snd_buf;
 	struct xdr_buf *rcvbuf = &req->rq_rcv_buf;
 	unsigned int	bufsiz;
 	kxdrproc_t	encode;
-	int		status;
 	u32		*p;
 
 	dprintk("RPC: %4d call_encode (status %d)\n", 
@@ -719,11 +710,15 @@
 		rpc_exit(task, -EIO);
 		return;
 	}
-	if (encode && (status = rpcauth_wrap_req(task, encode, req, p,
-						 task->tk_msg.rpc_argp)) < 0) {
-		printk(KERN_WARNING "%s: can't encode arguments: %d\n",
-				clnt->cl_protname, -status);
-		rpc_exit(task, status);
+	if (encode == NULL)
+		return;
+
+	task->tk_status = rpcauth_wrap_req(task, encode, req, p,
+			task->tk_msg.rpc_argp);
+	if (task->tk_status == -ENOMEM) {
+		/* XXX: Is this sane? */
+		rpc_delay(task, 3*HZ);
+		task->tk_status = -EAGAIN;
 	}
 }
 
@@ -734,43 +729,95 @@
 call_bind(struct rpc_task *task)
 {
 	struct rpc_clnt	*clnt = task->tk_client;
-	struct rpc_xprt *xprt = clnt->cl_xprt;
 
-	dprintk("RPC: %4d call_bind xprt %p %s connected\n", task->tk_pid,
-			xprt, (xprt_connected(xprt) ? "is" : "is not"));
+	dprintk("RPC: %4d call_bind (status %d)\n",
+				task->tk_pid, task->tk_status);
 
-	task->tk_action = (xprt_connected(xprt)) ? call_transmit : call_connect;
-
+	task->tk_action = call_connect;
 	if (!clnt->cl_port) {
-		task->tk_action = call_connect;
-		task->tk_timeout = RPC_CONNECT_TIMEOUT;
+		task->tk_action = call_bind_status;
+		task->tk_timeout = task->tk_xprt->bind_timeout;
 		rpc_getport(task, clnt);
 	}
 }
 
 /*
- * 4a.	Connect to the RPC server (TCP case)
+ * 4a.	Sort out bind result
+ */
+static void
+call_bind_status(struct rpc_task *task)
+{
+	int status = -EACCES;
+
+	if (task->tk_status >= 0) {
+		dprintk("RPC: %4d call_bind_status (status %d)\n",
+					task->tk_pid, task->tk_status);
+		task->tk_status = 0;
+		task->tk_action = call_connect;
+		return;
+	}
+
+	switch (task->tk_status) {
+	case -EACCES:
+		dprintk("RPC: %4d remote rpcbind: RPC program/version unavailable\n",
+				task->tk_pid);
+		rpc_delay(task, 3*HZ);
+		goto retry_bind;
+	case -ETIMEDOUT:
+		dprintk("RPC: %4d rpcbind request timed out\n",
+				task->tk_pid);
+		if (RPC_IS_SOFT(task)) {
+			status = -EIO;
+			break;
+		}
+		goto retry_bind;
+	case -EPFNOSUPPORT:
+		dprintk("RPC: %4d remote rpcbind service unavailable\n",
+				task->tk_pid);
+		break;
+	case -EPROTONOSUPPORT:
+		dprintk("RPC: %4d remote rpcbind version 2 unavailable\n",
+				task->tk_pid);
+		break;
+	default:
+		dprintk("RPC: %4d unrecognized rpcbind error (%d)\n",
+				task->tk_pid, -task->tk_status);
+		status = -EIO;
+		break;
+	}
+
+	rpc_exit(task, status);
+	return;
+
+retry_bind:
+	task->tk_status = 0;
+	task->tk_action = call_bind;
+	return;
+}
+
+/*
+ * 4b.	Connect to the RPC server
  */
 static void
 call_connect(struct rpc_task *task)
 {
-	struct rpc_clnt *clnt = task->tk_client;
+	struct rpc_xprt *xprt = task->tk_xprt;
 
-	dprintk("RPC: %4d call_connect status %d\n",
-				task->tk_pid, task->tk_status);
+	dprintk("RPC: %4d call_connect xprt %p %s connected\n",
+			task->tk_pid, xprt,
+			(xprt_connected(xprt) ? "is" : "is not"));
 
-	if (xprt_connected(clnt->cl_xprt)) {
-		task->tk_action = call_transmit;
-		return;
+	task->tk_action = call_transmit;
+	if (!xprt_connected(xprt)) {
+		task->tk_action = call_connect_status;
+		if (task->tk_status < 0)
+			return;
+		xprt_connect(task);
 	}
-	task->tk_action = call_connect_status;
-	if (task->tk_status < 0)
-		return;
-	xprt_connect(task);
 }
 
 /*
- * 4b. Sort out connect result
+ * 4c.	Sort out connect result
  */
 static void
 call_connect_status(struct rpc_task *task)
@@ -778,6 +825,9 @@
 	struct rpc_clnt *clnt = task->tk_client;
 	int status = task->tk_status;
 
+	dprintk("RPC: %5u call_connect_status (status %d)\n", 
+				task->tk_pid, task->tk_status);
+
 	task->tk_status = 0;
 	if (status >= 0) {
 		clnt->cl_stats->netreconn++;
@@ -785,17 +835,19 @@
 		return;
 	}
 
-	/* Something failed: we may have to rebind */
+	/* Something failed: remote service port may have changed */
 	if (clnt->cl_autobind)
 		clnt->cl_port = 0;
+
 	switch (status) {
 	case -ENOTCONN:
 	case -ETIMEDOUT:
 	case -EAGAIN:
-		task->tk_action = (clnt->cl_port == 0) ? call_bind : call_connect;
+		task->tk_action = call_bind;
 		break;
 	default:
 		rpc_exit(task, -EIO);
+		break;
 	}
 }
 
@@ -815,10 +867,12 @@
 	if (task->tk_status != 0)
 		return;
 	/* Encode here so that rpcsec_gss can use correct sequence number. */
-	if (!task->tk_rqstp->rq_bytes_sent)
+	if (task->tk_rqstp->rq_bytes_sent == 0) {
 		call_encode(task);
-	if (task->tk_status < 0)
-		return;
+		/* Did the encode result in an error condition? */
+		if (task->tk_status != 0)
+			goto out_nosend;
+	}
 	xprt_transmit(task);
 	if (task->tk_status < 0)
 		return;
@@ -826,6 +880,10 @@
 		task->tk_action = NULL;
 		rpc_wake_up_task(task);
 	}
+	return;
+out_nosend:
+	/* release socket write lock before attempting to handle error */
+	xprt_abort_transmit(task);
 }
 
 /*
@@ -1020,13 +1078,12 @@
 call_header(struct rpc_task *task)
 {
 	struct rpc_clnt *clnt = task->tk_client;
-	struct rpc_xprt *xprt = clnt->cl_xprt;
 	struct rpc_rqst	*req = task->tk_rqstp;
 	u32		*p = req->rq_svec[0].iov_base;
 
 	/* FIXME: check buffer size? */
-	if (xprt->stream)
-		*p++ = 0;		/* fill in later */
+
+	p = xprt_skip_transport_header(task->tk_xprt, p);
 	*p++ = req->rq_xid;		/* XID */
 	*p++ = htonl(RPC_CALL);		/* CALL */
 	*p++ = htonl(RPC_VERSION);	/* RPC version */
diff --git a/net/sunrpc/pmap_clnt.c b/net/sunrpc/pmap_clnt.c
index 4e81f27..a398575 100644
--- a/net/sunrpc/pmap_clnt.c
+++ b/net/sunrpc/pmap_clnt.c
@@ -26,7 +26,7 @@
 #define PMAP_GETPORT		3
 
 static struct rpc_procinfo	pmap_procedures[];
-static struct rpc_clnt *	pmap_create(char *, struct sockaddr_in *, int);
+static struct rpc_clnt *	pmap_create(char *, struct sockaddr_in *, int, int);
 static void			pmap_getport_done(struct rpc_task *);
 static struct rpc_program	pmap_program;
 static DEFINE_SPINLOCK(pmap_lock);
@@ -65,7 +65,7 @@
 	map->pm_binding = 1;
 	spin_unlock(&pmap_lock);
 
-	pmap_clnt = pmap_create(clnt->cl_server, sap, map->pm_prot);
+	pmap_clnt = pmap_create(clnt->cl_server, sap, map->pm_prot, 0);
 	if (IS_ERR(pmap_clnt)) {
 		task->tk_status = PTR_ERR(pmap_clnt);
 		goto bailout;
@@ -112,7 +112,7 @@
 			NIPQUAD(sin->sin_addr.s_addr), prog, vers, prot);
 
 	sprintf(hostname, "%u.%u.%u.%u", NIPQUAD(sin->sin_addr.s_addr));
-	pmap_clnt = pmap_create(hostname, sin, prot);
+	pmap_clnt = pmap_create(hostname, sin, prot, 0);
 	if (IS_ERR(pmap_clnt))
 		return PTR_ERR(pmap_clnt);
 
@@ -171,7 +171,7 @@
 
 	sin.sin_family = AF_INET;
 	sin.sin_addr.s_addr = htonl(INADDR_LOOPBACK);
-	pmap_clnt = pmap_create("localhost", &sin, IPPROTO_UDP);
+	pmap_clnt = pmap_create("localhost", &sin, IPPROTO_UDP, 1);
 	if (IS_ERR(pmap_clnt)) {
 		error = PTR_ERR(pmap_clnt);
 		dprintk("RPC: couldn't create pmap client. Error = %d\n", error);
@@ -198,7 +198,7 @@
 }
 
 static struct rpc_clnt *
-pmap_create(char *hostname, struct sockaddr_in *srvaddr, int proto)
+pmap_create(char *hostname, struct sockaddr_in *srvaddr, int proto, int privileged)
 {
 	struct rpc_xprt	*xprt;
 	struct rpc_clnt	*clnt;
@@ -208,6 +208,8 @@
 	if (IS_ERR(xprt))
 		return (struct rpc_clnt *)xprt;
 	xprt->addr.sin_port = htons(RPC_PMAP_PORT);
+	if (!privileged)
+		xprt->resvport = 0;
 
 	/* printk("pmap: create clnt\n"); */
 	clnt = rpc_new_client(xprt, hostname,
diff --git a/net/sunrpc/socklib.c b/net/sunrpc/socklib.c
new file mode 100644
index 0000000..8f97e90
--- /dev/null
+++ b/net/sunrpc/socklib.c
@@ -0,0 +1,175 @@
+/*
+ * linux/net/sunrpc/socklib.c
+ *
+ * Common socket helper routines for RPC client and server
+ *
+ * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
+ */
+
+#include <linux/types.h>
+#include <linux/pagemap.h>
+#include <linux/udp.h>
+#include <linux/sunrpc/xdr.h>
+
+
+/**
+ * skb_read_bits - copy some data bits from skb to internal buffer
+ * @desc: sk_buff copy helper
+ * @to: copy destination
+ * @len: number of bytes to copy
+ *
+ * Possibly called several times to iterate over an sk_buff and copy
+ * data out of it.
+ */
+static size_t skb_read_bits(skb_reader_t *desc, void *to, size_t len)
+{
+	if (len > desc->count)
+		len = desc->count;
+	if (skb_copy_bits(desc->skb, desc->offset, to, len))
+		return 0;
+	desc->count -= len;
+	desc->offset += len;
+	return len;
+}
+
+/**
+ * skb_read_and_csum_bits - copy and checksum from skb to buffer
+ * @desc: sk_buff copy helper
+ * @to: copy destination
+ * @len: number of bytes to copy
+ *
+ * Same as skb_read_bits, but calculate a checksum at the same time.
+ */
+static size_t skb_read_and_csum_bits(skb_reader_t *desc, void *to, size_t len)
+{
+	unsigned int	csum2, pos;
+
+	if (len > desc->count)
+		len = desc->count;
+	pos = desc->offset;
+	csum2 = skb_copy_and_csum_bits(desc->skb, pos, to, len, 0);
+	desc->csum = csum_block_add(desc->csum, csum2, pos);
+	desc->count -= len;
+	desc->offset += len;
+	return len;
+}
+
+/**
+ * xdr_partial_copy_from_skb - copy data out of an skb
+ * @xdr: target XDR buffer
+ * @base: starting offset
+ * @desc: sk_buff copy helper
+ * @copy_actor: virtual method for copying data
+ *
+ */
+ssize_t xdr_partial_copy_from_skb(struct xdr_buf *xdr, unsigned int base, skb_reader_t *desc, skb_read_actor_t copy_actor)
+{
+	struct page	**ppage = xdr->pages;
+	unsigned int	len, pglen = xdr->page_len;
+	ssize_t		copied = 0;
+	int		ret;
+
+	len = xdr->head[0].iov_len;
+	if (base < len) {
+		len -= base;
+		ret = copy_actor(desc, (char *)xdr->head[0].iov_base + base, len);
+		copied += ret;
+		if (ret != len || !desc->count)
+			goto out;
+		base = 0;
+	} else
+		base -= len;
+
+	if (unlikely(pglen == 0))
+		goto copy_tail;
+	if (unlikely(base >= pglen)) {
+		base -= pglen;
+		goto copy_tail;
+	}
+	if (base || xdr->page_base) {
+		pglen -= base;
+		base += xdr->page_base;
+		ppage += base >> PAGE_CACHE_SHIFT;
+		base &= ~PAGE_CACHE_MASK;
+	}
+	do {
+		char *kaddr;
+
+		/* ACL likes to be lazy in allocating pages - ACLs
+		 * are small by default but can get huge. */
+		if (unlikely(*ppage == NULL)) {
+			*ppage = alloc_page(GFP_ATOMIC);
+			if (unlikely(*ppage == NULL)) {
+				if (copied == 0)
+					copied = -ENOMEM;
+				goto out;
+			}
+		}
+
+		len = PAGE_CACHE_SIZE;
+		kaddr = kmap_atomic(*ppage, KM_SKB_SUNRPC_DATA);
+		if (base) {
+			len -= base;
+			if (pglen < len)
+				len = pglen;
+			ret = copy_actor(desc, kaddr + base, len);
+			base = 0;
+		} else {
+			if (pglen < len)
+				len = pglen;
+			ret = copy_actor(desc, kaddr, len);
+		}
+		flush_dcache_page(*ppage);
+		kunmap_atomic(kaddr, KM_SKB_SUNRPC_DATA);
+		copied += ret;
+		if (ret != len || !desc->count)
+			goto out;
+		ppage++;
+	} while ((pglen -= len) != 0);
+copy_tail:
+	len = xdr->tail[0].iov_len;
+	if (base < len)
+		copied += copy_actor(desc, (char *)xdr->tail[0].iov_base + base, len - base);
+out:
+	return copied;
+}
+
+/**
+ * csum_partial_copy_to_xdr - checksum and copy data
+ * @xdr: target XDR buffer
+ * @skb: source skb
+ *
+ * We have set things up such that we perform the checksum of the UDP
+ * packet in parallel with the copies into the RPC client iovec.  -DaveM
+ */
+int csum_partial_copy_to_xdr(struct xdr_buf *xdr, struct sk_buff *skb)
+{
+	skb_reader_t	desc;
+
+	desc.skb = skb;
+	desc.offset = sizeof(struct udphdr);
+	desc.count = skb->len - desc.offset;
+
+	if (skb->ip_summed == CHECKSUM_UNNECESSARY)
+		goto no_checksum;
+
+	desc.csum = csum_partial(skb->data, desc.offset, skb->csum);
+	if (xdr_partial_copy_from_skb(xdr, 0, &desc, skb_read_and_csum_bits) < 0)
+		return -1;
+	if (desc.offset != skb->len) {
+		unsigned int csum2;
+		csum2 = skb_checksum(skb, desc.offset, skb->len - desc.offset, 0);
+		desc.csum = csum_block_add(desc.csum, csum2, desc.offset);
+	}
+	if (desc.count)
+		return -1;
+	if ((unsigned short)csum_fold(desc.csum))
+		return -1;
+	return 0;
+no_checksum:
+	if (xdr_partial_copy_from_skb(xdr, 0, &desc, skb_read_bits) < 0)
+		return -1;
+	if (desc.count)
+		return -1;
+	return 0;
+}
diff --git a/net/sunrpc/sunrpc_syms.c b/net/sunrpc/sunrpc_syms.c
index ed48ff0..2387e7b 100644
--- a/net/sunrpc/sunrpc_syms.c
+++ b/net/sunrpc/sunrpc_syms.c
@@ -10,7 +10,6 @@
 #include <linux/module.h>
 
 #include <linux/types.h>
-#include <linux/socket.h>
 #include <linux/sched.h>
 #include <linux/uio.h>
 #include <linux/unistd.h>
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index 691dea4..f16e7cd 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -548,9 +548,6 @@
 /*
  * Receive a datagram from a UDP socket.
  */
-extern int
-csum_partial_copy_to_xdr(struct xdr_buf *xdr, struct sk_buff *skb);
-
 static int
 svc_udp_recvfrom(struct svc_rqst *rqstp)
 {
diff --git a/net/sunrpc/sysctl.c b/net/sunrpc/sysctl.c
index 1b9616a..d0c9f46 100644
--- a/net/sunrpc/sysctl.c
+++ b/net/sunrpc/sysctl.c
@@ -119,8 +119,18 @@
 	return 0;
 }
 
+unsigned int xprt_udp_slot_table_entries = RPC_DEF_SLOT_TABLE;
+unsigned int xprt_tcp_slot_table_entries = RPC_DEF_SLOT_TABLE;
+unsigned int xprt_min_resvport = RPC_DEF_MIN_RESVPORT;
+EXPORT_SYMBOL(xprt_min_resvport);
+unsigned int xprt_max_resvport = RPC_DEF_MAX_RESVPORT;
+EXPORT_SYMBOL(xprt_max_resvport);
+
+
 static unsigned int min_slot_table_size = RPC_MIN_SLOT_TABLE;
 static unsigned int max_slot_table_size = RPC_MAX_SLOT_TABLE;
+static unsigned int xprt_min_resvport_limit = RPC_MIN_RESVPORT;
+static unsigned int xprt_max_resvport_limit = RPC_MAX_RESVPORT;
 
 static ctl_table debug_table[] = {
 	{
@@ -177,6 +187,28 @@
 		.extra1		= &min_slot_table_size,
 		.extra2		= &max_slot_table_size
 	},
+	{
+		.ctl_name	= CTL_MIN_RESVPORT,
+		.procname	= "min_resvport",
+		.data		= &xprt_min_resvport,
+		.maxlen		= sizeof(unsigned int),
+		.mode		= 0644,
+		.proc_handler	= &proc_dointvec_minmax,
+		.strategy	= &sysctl_intvec,
+		.extra1		= &xprt_min_resvport_limit,
+		.extra2		= &xprt_max_resvport_limit
+	},
+	{
+		.ctl_name	= CTL_MAX_RESVPORT,
+		.procname	= "max_resvport",
+		.data		= &xprt_max_resvport,
+		.maxlen		= sizeof(unsigned int),
+		.mode		= 0644,
+		.proc_handler	= &proc_dointvec_minmax,
+		.strategy	= &sysctl_intvec,
+		.extra1		= &xprt_min_resvport_limit,
+		.extra2		= &xprt_max_resvport_limit
+	},
 	{ .ctl_name = 0 }
 };
 
diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c
index fde16f4..32df433 100644
--- a/net/sunrpc/xdr.c
+++ b/net/sunrpc/xdr.c
@@ -6,15 +6,12 @@
  * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
  */
 
+#include <linux/module.h>
 #include <linux/types.h>
-#include <linux/socket.h>
 #include <linux/string.h>
 #include <linux/kernel.h>
 #include <linux/pagemap.h>
 #include <linux/errno.h>
-#include <linux/in.h>
-#include <linux/net.h>
-#include <net/sock.h>
 #include <linux/sunrpc/xdr.h>
 #include <linux/sunrpc/msg_prot.h>
 
@@ -176,178 +173,6 @@
 	xdr->buflen += len;
 }
 
-ssize_t
-xdr_partial_copy_from_skb(struct xdr_buf *xdr, unsigned int base,
-			  skb_reader_t *desc,
-			  skb_read_actor_t copy_actor)
-{
-	struct page	**ppage = xdr->pages;
-	unsigned int	len, pglen = xdr->page_len;
-	ssize_t		copied = 0;
-	int		ret;
-
-	len = xdr->head[0].iov_len;
-	if (base < len) {
-		len -= base;
-		ret = copy_actor(desc, (char *)xdr->head[0].iov_base + base, len);
-		copied += ret;
-		if (ret != len || !desc->count)
-			goto out;
-		base = 0;
-	} else
-		base -= len;
-
-	if (pglen == 0)
-		goto copy_tail;
-	if (base >= pglen) {
-		base -= pglen;
-		goto copy_tail;
-	}
-	if (base || xdr->page_base) {
-		pglen -= base;
-		base  += xdr->page_base;
-		ppage += base >> PAGE_CACHE_SHIFT;
-		base &= ~PAGE_CACHE_MASK;
-	}
-	do {
-		char *kaddr;
-
-		/* ACL likes to be lazy in allocating pages - ACLs
-		 * are small by default but can get huge. */
-		if (unlikely(*ppage == NULL)) {
-			*ppage = alloc_page(GFP_ATOMIC);
-			if (unlikely(*ppage == NULL)) {
-				if (copied == 0)
-					copied = -ENOMEM;
-				goto out;
-			}
-		}
-
-		len = PAGE_CACHE_SIZE;
-		kaddr = kmap_atomic(*ppage, KM_SKB_SUNRPC_DATA);
-		if (base) {
-			len -= base;
-			if (pglen < len)
-				len = pglen;
-			ret = copy_actor(desc, kaddr + base, len);
-			base = 0;
-		} else {
-			if (pglen < len)
-				len = pglen;
-			ret = copy_actor(desc, kaddr, len);
-		}
-		flush_dcache_page(*ppage);
-		kunmap_atomic(kaddr, KM_SKB_SUNRPC_DATA);
-		copied += ret;
-		if (ret != len || !desc->count)
-			goto out;
-		ppage++;
-	} while ((pglen -= len) != 0);
-copy_tail:
-	len = xdr->tail[0].iov_len;
-	if (base < len)
-		copied += copy_actor(desc, (char *)xdr->tail[0].iov_base + base, len - base);
-out:
-	return copied;
-}
-
-
-int
-xdr_sendpages(struct socket *sock, struct sockaddr *addr, int addrlen,
-		struct xdr_buf *xdr, unsigned int base, int msgflags)
-{
-	struct page **ppage = xdr->pages;
-	unsigned int len, pglen = xdr->page_len;
-	int err, ret = 0;
-	ssize_t (*sendpage)(struct socket *, struct page *, int, size_t, int);
-
-	len = xdr->head[0].iov_len;
-	if (base < len || (addr != NULL && base == 0)) {
-		struct kvec iov = {
-			.iov_base = xdr->head[0].iov_base + base,
-			.iov_len  = len - base,
-		};
-		struct msghdr msg = {
-			.msg_name    = addr,
-			.msg_namelen = addrlen,
-			.msg_flags   = msgflags,
-		};
-		if (xdr->len > len)
-			msg.msg_flags |= MSG_MORE;
-
-		if (iov.iov_len != 0)
-			err = kernel_sendmsg(sock, &msg, &iov, 1, iov.iov_len);
-		else
-			err = kernel_sendmsg(sock, &msg, NULL, 0, 0);
-		if (ret == 0)
-			ret = err;
-		else if (err > 0)
-			ret += err;
-		if (err != iov.iov_len)
-			goto out;
-		base = 0;
-	} else
-		base -= len;
-
-	if (pglen == 0)
-		goto copy_tail;
-	if (base >= pglen) {
-		base -= pglen;
-		goto copy_tail;
-	}
-	if (base || xdr->page_base) {
-		pglen -= base;
-		base  += xdr->page_base;
-		ppage += base >> PAGE_CACHE_SHIFT;
-		base &= ~PAGE_CACHE_MASK;
-	}
-
-	sendpage = sock->ops->sendpage ? : sock_no_sendpage;
-	do {
-		int flags = msgflags;
-
-		len = PAGE_CACHE_SIZE;
-		if (base)
-			len -= base;
-		if (pglen < len)
-			len = pglen;
-
-		if (pglen != len || xdr->tail[0].iov_len != 0)
-			flags |= MSG_MORE;
-
-		/* Hmm... We might be dealing with highmem pages */
-		if (PageHighMem(*ppage))
-			sendpage = sock_no_sendpage;
-		err = sendpage(sock, *ppage, base, len, flags);
-		if (ret == 0)
-			ret = err;
-		else if (err > 0)
-			ret += err;
-		if (err != len)
-			goto out;
-		base = 0;
-		ppage++;
-	} while ((pglen -= len) != 0);
-copy_tail:
-	len = xdr->tail[0].iov_len;
-	if (base < len) {
-		struct kvec iov = {
-			.iov_base = xdr->tail[0].iov_base + base,
-			.iov_len  = len - base,
-		};
-		struct msghdr msg = {
-			.msg_flags   = msgflags,
-		};
-		err = kernel_sendmsg(sock, &msg, &iov, 1, iov.iov_len);
-		if (ret == 0)
-			ret = err;
-		else if (err > 0)
-			ret += err;
-	}
-out:
-	return ret;
-}
-
 
 /*
  * Helper routines for doing 'memmove' like operations on a struct xdr_buf
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index 3c654e0..6dda386 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -10,12 +10,12 @@
  *	one is available. Otherwise, it sleeps on the backlog queue
  *	(xprt_reserve).
  *  -	Next, the caller puts together the RPC message, stuffs it into
- *	the request struct, and calls xprt_call().
- *  -	xprt_call transmits the message and installs the caller on the
- *	socket's wait list. At the same time, it installs a timer that
+ *	the request struct, and calls xprt_transmit().
+ *  -	xprt_transmit sends the message and installs the caller on the
+ *	transport's wait list. At the same time, it installs a timer that
  *	is run after the packet's timeout has expired.
  *  -	When a packet arrives, the data_ready handler walks the list of
- *	pending requests for that socket. If a matching XID is found, the
+ *	pending requests for that transport. If a matching XID is found, the
  *	caller is woken up, and the timer removed.
  *  -	When no reply arrives within the timeout interval, the timer is
  *	fired by the kernel and runs xprt_timer(). It either adjusts the
@@ -33,36 +33,17 @@
  *
  *  Copyright (C) 1995-1997, Olaf Kirch <okir@monad.swb.de>
  *
- *  TCP callback races fixes (C) 1998 Red Hat Software <alan@redhat.com>
- *  TCP send fixes (C) 1998 Red Hat Software <alan@redhat.com>
- *  TCP NFS related read + write fixes
- *   (C) 1999 Dave Airlie, University of Limerick, Ireland <airlied@linux.ie>
- *
- *  Rewrite of larges part of the code in order to stabilize TCP stuff.
- *  Fix behaviour when socket buffer is full.
- *   (C) 1999 Trond Myklebust <trond.myklebust@fys.uio.no>
+ *  Transport switch API copyright (C) 2005, Chuck Lever <cel@netapp.com>
  */
 
+#include <linux/module.h>
+
 #include <linux/types.h>
-#include <linux/slab.h>
-#include <linux/capability.h>
-#include <linux/sched.h>
-#include <linux/errno.h>
-#include <linux/socket.h>
-#include <linux/in.h>
-#include <linux/net.h>
-#include <linux/mm.h>
-#include <linux/udp.h>
-#include <linux/tcp.h>
-#include <linux/sunrpc/clnt.h>
-#include <linux/file.h>
+#include <linux/interrupt.h>
 #include <linux/workqueue.h>
 #include <linux/random.h>
 
-#include <net/sock.h>
-#include <net/checksum.h>
-#include <net/udp.h>
-#include <net/tcp.h>
+#include <linux/sunrpc/clnt.h>
 
 /*
  * Local variables
@@ -73,93 +54,62 @@
 # define RPCDBG_FACILITY	RPCDBG_XPRT
 #endif
 
-#define XPRT_MAX_BACKOFF	(8)
-#define XPRT_IDLE_TIMEOUT	(5*60*HZ)
-#define XPRT_MAX_RESVPORT	(800)
-
 /*
  * Local functions
  */
 static void	xprt_request_init(struct rpc_task *, struct rpc_xprt *);
 static inline void	do_xprt_reserve(struct rpc_task *);
-static void	xprt_disconnect(struct rpc_xprt *);
 static void	xprt_connect_status(struct rpc_task *task);
-static struct rpc_xprt * xprt_setup(int proto, struct sockaddr_in *ap,
-						struct rpc_timeout *to);
-static struct socket *xprt_create_socket(struct rpc_xprt *, int, int);
-static void	xprt_bind_socket(struct rpc_xprt *, struct socket *);
 static int      __xprt_get_cong(struct rpc_xprt *, struct rpc_task *);
 
-static int	xprt_clear_backlog(struct rpc_xprt *xprt);
-
-#ifdef RPC_DEBUG_DATA
 /*
- * Print the buffer contents (first 128 bytes only--just enough for
- * diropres return).
+ * The transport code maintains an estimate on the maximum number of out-
+ * standing RPC requests, using a smoothed version of the congestion
+ * avoidance implemented in 44BSD. This is basically the Van Jacobson
+ * congestion algorithm: If a retransmit occurs, the congestion window is
+ * halved; otherwise, it is incremented by 1/cwnd when
+ *
+ *	-	a reply is received and
+ *	-	a full number of requests are outstanding and
+ *	-	the congestion window hasn't been updated recently.
  */
-static void
-xprt_pktdump(char *msg, u32 *packet, unsigned int count)
-{
-	u8	*buf = (u8 *) packet;
-	int	j;
+#define RPC_CWNDSHIFT		(8U)
+#define RPC_CWNDSCALE		(1U << RPC_CWNDSHIFT)
+#define RPC_INITCWND		RPC_CWNDSCALE
+#define RPC_MAXCWND(xprt)	((xprt)->max_reqs << RPC_CWNDSHIFT)
 
-	dprintk("RPC:      %s\n", msg);
-	for (j = 0; j < count && j < 128; j += 4) {
-		if (!(j & 31)) {
-			if (j)
-				dprintk("\n");
-			dprintk("0x%04x ", j);
-		}
-		dprintk("%02x%02x%02x%02x ",
-			buf[j], buf[j+1], buf[j+2], buf[j+3]);
-	}
-	dprintk("\n");
-}
-#else
-static inline void
-xprt_pktdump(char *msg, u32 *packet, unsigned int count)
-{
-	/* NOP */
-}
-#endif
+#define RPCXPRT_CONGESTED(xprt) ((xprt)->cong >= (xprt)->cwnd)
 
-/*
- * Look up RPC transport given an INET socket
+/**
+ * xprt_reserve_xprt - serialize write access to transports
+ * @task: task that is requesting access to the transport
+ *
+ * This prevents mixing the payload of separate requests, and prevents
+ * transport connects from colliding with writes.  No congestion control
+ * is provided.
  */
-static inline struct rpc_xprt *
-xprt_from_sock(struct sock *sk)
+int xprt_reserve_xprt(struct rpc_task *task)
 {
-	return (struct rpc_xprt *) sk->sk_user_data;
-}
-
-/*
- * Serialize write access to sockets, in order to prevent different
- * requests from interfering with each other.
- * Also prevents TCP socket connects from colliding with writes.
- */
-static int
-__xprt_lock_write(struct rpc_xprt *xprt, struct rpc_task *task)
-{
+	struct rpc_xprt	*xprt = task->tk_xprt;
 	struct rpc_rqst *req = task->tk_rqstp;
 
-	if (test_and_set_bit(XPRT_LOCKED, &xprt->sockstate)) {
+	if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) {
 		if (task == xprt->snd_task)
 			return 1;
+		if (task == NULL)
+			return 0;
 		goto out_sleep;
 	}
-	if (xprt->nocong || __xprt_get_cong(xprt, task)) {
-		xprt->snd_task = task;
-		if (req) {
-			req->rq_bytes_sent = 0;
-			req->rq_ntrans++;
-		}
-		return 1;
+	xprt->snd_task = task;
+	if (req) {
+		req->rq_bytes_sent = 0;
+		req->rq_ntrans++;
 	}
-	smp_mb__before_clear_bit();
-	clear_bit(XPRT_LOCKED, &xprt->sockstate);
-	smp_mb__after_clear_bit();
+	return 1;
+
 out_sleep:
-	dprintk("RPC: %4d failed to lock socket %p\n", task->tk_pid, xprt);
+	dprintk("RPC: %4d failed to lock transport %p\n",
+			task->tk_pid, xprt);
 	task->tk_timeout = 0;
 	task->tk_status = -EAGAIN;
 	if (req && req->rq_ntrans)
@@ -169,26 +119,92 @@
 	return 0;
 }
 
-static inline int
-xprt_lock_write(struct rpc_xprt *xprt, struct rpc_task *task)
+/*
+ * xprt_reserve_xprt_cong - serialize write access to transports
+ * @task: task that is requesting access to the transport
+ *
+ * Same as xprt_reserve_xprt, but Van Jacobson congestion control is
+ * integrated into the decision of whether a request is allowed to be
+ * woken up and given access to the transport.
+ */
+int xprt_reserve_xprt_cong(struct rpc_task *task)
+{
+	struct rpc_xprt	*xprt = task->tk_xprt;
+	struct rpc_rqst *req = task->tk_rqstp;
+
+	if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) {
+		if (task == xprt->snd_task)
+			return 1;
+		goto out_sleep;
+	}
+	if (__xprt_get_cong(xprt, task)) {
+		xprt->snd_task = task;
+		if (req) {
+			req->rq_bytes_sent = 0;
+			req->rq_ntrans++;
+		}
+		return 1;
+	}
+	smp_mb__before_clear_bit();
+	clear_bit(XPRT_LOCKED, &xprt->state);
+	smp_mb__after_clear_bit();
+out_sleep:
+	dprintk("RPC: %4d failed to lock transport %p\n", task->tk_pid, xprt);
+	task->tk_timeout = 0;
+	task->tk_status = -EAGAIN;
+	if (req && req->rq_ntrans)
+		rpc_sleep_on(&xprt->resend, task, NULL, NULL);
+	else
+		rpc_sleep_on(&xprt->sending, task, NULL, NULL);
+	return 0;
+}
+
+static inline int xprt_lock_write(struct rpc_xprt *xprt, struct rpc_task *task)
 {
 	int retval;
 
-	spin_lock_bh(&xprt->sock_lock);
-	retval = __xprt_lock_write(xprt, task);
-	spin_unlock_bh(&xprt->sock_lock);
+	spin_lock_bh(&xprt->transport_lock);
+	retval = xprt->ops->reserve_xprt(task);
+	spin_unlock_bh(&xprt->transport_lock);
 	return retval;
 }
 
+static void __xprt_lock_write_next(struct rpc_xprt *xprt)
+{
+	struct rpc_task *task;
+	struct rpc_rqst *req;
 
-static void
-__xprt_lock_write_next(struct rpc_xprt *xprt)
+	if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
+		return;
+
+	task = rpc_wake_up_next(&xprt->resend);
+	if (!task) {
+		task = rpc_wake_up_next(&xprt->sending);
+		if (!task)
+			goto out_unlock;
+	}
+
+	req = task->tk_rqstp;
+	xprt->snd_task = task;
+	if (req) {
+		req->rq_bytes_sent = 0;
+		req->rq_ntrans++;
+	}
+	return;
+
+out_unlock:
+	smp_mb__before_clear_bit();
+	clear_bit(XPRT_LOCKED, &xprt->state);
+	smp_mb__after_clear_bit();
+}
+
+static void __xprt_lock_write_next_cong(struct rpc_xprt *xprt)
 {
 	struct rpc_task *task;
 
-	if (test_and_set_bit(XPRT_LOCKED, &xprt->sockstate))
+	if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
 		return;
-	if (!xprt->nocong && RPCXPRT_CONGESTED(xprt))
+	if (RPCXPRT_CONGESTED(xprt))
 		goto out_unlock;
 	task = rpc_wake_up_next(&xprt->resend);
 	if (!task) {
@@ -196,7 +212,7 @@
 		if (!task)
 			goto out_unlock;
 	}
-	if (xprt->nocong || __xprt_get_cong(xprt, task)) {
+	if (__xprt_get_cong(xprt, task)) {
 		struct rpc_rqst *req = task->tk_rqstp;
 		xprt->snd_task = task;
 		if (req) {
@@ -207,87 +223,52 @@
 	}
 out_unlock:
 	smp_mb__before_clear_bit();
-	clear_bit(XPRT_LOCKED, &xprt->sockstate);
+	clear_bit(XPRT_LOCKED, &xprt->state);
 	smp_mb__after_clear_bit();
 }
 
-/*
- * Releases the socket for use by other requests.
+/**
+ * xprt_release_xprt - allow other requests to use a transport
+ * @xprt: transport with other tasks potentially waiting
+ * @task: task that is releasing access to the transport
+ *
+ * Note that "task" can be NULL.  No congestion control is provided.
  */
-static void
-__xprt_release_write(struct rpc_xprt *xprt, struct rpc_task *task)
+void xprt_release_xprt(struct rpc_xprt *xprt, struct rpc_task *task)
 {
 	if (xprt->snd_task == task) {
 		xprt->snd_task = NULL;
 		smp_mb__before_clear_bit();
-		clear_bit(XPRT_LOCKED, &xprt->sockstate);
+		clear_bit(XPRT_LOCKED, &xprt->state);
 		smp_mb__after_clear_bit();
 		__xprt_lock_write_next(xprt);
 	}
 }
 
-static inline void
-xprt_release_write(struct rpc_xprt *xprt, struct rpc_task *task)
+/**
+ * xprt_release_xprt_cong - allow other requests to use a transport
+ * @xprt: transport with other tasks potentially waiting
+ * @task: task that is releasing access to the transport
+ *
+ * Note that "task" can be NULL.  Another task is awoken to use the
+ * transport if the transport's congestion window allows it.
+ */
+void xprt_release_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task)
 {
-	spin_lock_bh(&xprt->sock_lock);
-	__xprt_release_write(xprt, task);
-	spin_unlock_bh(&xprt->sock_lock);
+	if (xprt->snd_task == task) {
+		xprt->snd_task = NULL;
+		smp_mb__before_clear_bit();
+		clear_bit(XPRT_LOCKED, &xprt->state);
+		smp_mb__after_clear_bit();
+		__xprt_lock_write_next_cong(xprt);
+	}
 }
 
-/*
- * Write data to socket.
- */
-static inline int
-xprt_sendmsg(struct rpc_xprt *xprt, struct rpc_rqst *req)
+static inline void xprt_release_write(struct rpc_xprt *xprt, struct rpc_task *task)
 {
-	struct socket	*sock = xprt->sock;
-	struct xdr_buf	*xdr = &req->rq_snd_buf;
-	struct sockaddr *addr = NULL;
-	int addrlen = 0;
-	unsigned int	skip;
-	int		result;
-
-	if (!sock)
-		return -ENOTCONN;
-
-	xprt_pktdump("packet data:",
-				req->rq_svec->iov_base,
-				req->rq_svec->iov_len);
-
-	/* For UDP, we need to provide an address */
-	if (!xprt->stream) {
-		addr = (struct sockaddr *) &xprt->addr;
-		addrlen = sizeof(xprt->addr);
-	}
-	/* Dont repeat bytes */
-	skip = req->rq_bytes_sent;
-
-	clear_bit(SOCK_ASYNC_NOSPACE, &sock->flags);
-	result = xdr_sendpages(sock, addr, addrlen, xdr, skip, MSG_DONTWAIT);
-
-	dprintk("RPC:      xprt_sendmsg(%d) = %d\n", xdr->len - skip, result);
-
-	if (result >= 0)
-		return result;
-
-	switch (result) {
-	case -ECONNREFUSED:
-		/* When the server has died, an ICMP port unreachable message
-		 * prompts ECONNREFUSED.
-		 */
-	case -EAGAIN:
-		break;
-	case -ECONNRESET:
-	case -ENOTCONN:
-	case -EPIPE:
-		/* connection broken */
-		if (xprt->stream)
-			result = -ENOTCONN;
-		break;
-	default:
-		printk(KERN_NOTICE "RPC: sendmsg returned error %d\n", -result);
-	}
-	return result;
+	spin_lock_bh(&xprt->transport_lock);
+	xprt->ops->release_xprt(xprt, task);
+	spin_unlock_bh(&xprt->transport_lock);
 }
 
 /*
@@ -321,26 +302,40 @@
 		return;
 	req->rq_cong = 0;
 	xprt->cong -= RPC_CWNDSCALE;
-	__xprt_lock_write_next(xprt);
+	__xprt_lock_write_next_cong(xprt);
 }
 
-/*
- * Adjust RPC congestion window
+/**
+ * xprt_release_rqst_cong - housekeeping when request is complete
+ * @task: RPC request that recently completed
+ *
+ * Useful for transports that require congestion control.
+ */
+void xprt_release_rqst_cong(struct rpc_task *task)
+{
+	__xprt_put_cong(task->tk_xprt, task->tk_rqstp);
+}
+
+/**
+ * xprt_adjust_cwnd - adjust transport congestion window
+ * @task: recently completed RPC request used to adjust window
+ * @result: result code of completed RPC request
+ *
  * We use a time-smoothed congestion estimator to avoid heavy oscillation.
  */
-static void
-xprt_adjust_cwnd(struct rpc_xprt *xprt, int result)
+void xprt_adjust_cwnd(struct rpc_task *task, int result)
 {
-	unsigned long	cwnd;
+	struct rpc_rqst *req = task->tk_rqstp;
+	struct rpc_xprt *xprt = task->tk_xprt;
+	unsigned long cwnd = xprt->cwnd;
 
-	cwnd = xprt->cwnd;
 	if (result >= 0 && cwnd <= xprt->cong) {
 		/* The (cwnd >> 1) term makes sure
 		 * the result gets rounded properly. */
 		cwnd += (RPC_CWNDSCALE * RPC_CWNDSCALE + (cwnd >> 1)) / cwnd;
 		if (cwnd > RPC_MAXCWND(xprt))
 			cwnd = RPC_MAXCWND(xprt);
-		__xprt_lock_write_next(xprt);
+		__xprt_lock_write_next_cong(xprt);
 	} else if (result == -ETIMEDOUT) {
 		cwnd >>= 1;
 		if (cwnd < RPC_CWNDSCALE)
@@ -349,11 +344,89 @@
 	dprintk("RPC:      cong %ld, cwnd was %ld, now %ld\n",
 			xprt->cong, xprt->cwnd, cwnd);
 	xprt->cwnd = cwnd;
+	__xprt_put_cong(xprt, req);
+}
+
+/**
+ * xprt_wake_pending_tasks - wake all tasks on a transport's pending queue
+ * @xprt: transport with waiting tasks
+ * @status: result code to plant in each task before waking it
+ *
+ */
+void xprt_wake_pending_tasks(struct rpc_xprt *xprt, int status)
+{
+	if (status < 0)
+		rpc_wake_up_status(&xprt->pending, status);
+	else
+		rpc_wake_up(&xprt->pending);
+}
+
+/**
+ * xprt_wait_for_buffer_space - wait for transport output buffer to clear
+ * @task: task to be put to sleep
+ *
+ */
+void xprt_wait_for_buffer_space(struct rpc_task *task)
+{
+	struct rpc_rqst *req = task->tk_rqstp;
+	struct rpc_xprt *xprt = req->rq_xprt;
+
+	task->tk_timeout = req->rq_timeout;
+	rpc_sleep_on(&xprt->pending, task, NULL, NULL);
+}
+
+/**
+ * xprt_write_space - wake the task waiting for transport output buffer space
+ * @xprt: transport with waiting tasks
+ *
+ * Can be called in a soft IRQ context, so xprt_write_space never sleeps.
+ */
+void xprt_write_space(struct rpc_xprt *xprt)
+{
+	if (unlikely(xprt->shutdown))
+		return;
+
+	spin_lock_bh(&xprt->transport_lock);
+	if (xprt->snd_task) {
+		dprintk("RPC:      write space: waking waiting task on xprt %p\n",
+				xprt);
+		rpc_wake_up_task(xprt->snd_task);
+	}
+	spin_unlock_bh(&xprt->transport_lock);
+}
+
+/**
+ * xprt_set_retrans_timeout_def - set a request's retransmit timeout
+ * @task: task whose timeout is to be set
+ *
+ * Set a request's retransmit timeout based on the transport's
+ * default timeout parameters.  Used by transports that don't adjust
+ * the retransmit timeout based on round-trip time estimation.
+ */
+void xprt_set_retrans_timeout_def(struct rpc_task *task)
+{
+	task->tk_timeout = task->tk_rqstp->rq_timeout;
 }
 
 /*
- * Reset the major timeout value
+ * xprt_set_retrans_timeout_rtt - set a request's retransmit timeout
+ * @task: task whose timeout is to be set
+ * 
+ * Set a request's retransmit timeout using the RTT estimator.
  */
+void xprt_set_retrans_timeout_rtt(struct rpc_task *task)
+{
+	int timer = task->tk_msg.rpc_proc->p_timer;
+	struct rpc_rtt *rtt = task->tk_client->cl_rtt;
+	struct rpc_rqst *req = task->tk_rqstp;
+	unsigned long max_timeout = req->rq_xprt->timeout.to_maxval;
+
+	task->tk_timeout = rpc_calc_rto(rtt, timer);
+	task->tk_timeout <<= rpc_ntimeo(rtt, timer) + req->rq_retries;
+	if (task->tk_timeout > max_timeout || task->tk_timeout == 0)
+		task->tk_timeout = max_timeout;
+}
+
 static void xprt_reset_majortimeo(struct rpc_rqst *req)
 {
 	struct rpc_timeout *to = &req->rq_xprt->timeout;
@@ -368,8 +441,10 @@
 	req->rq_majortimeo += jiffies;
 }
 
-/*
- * Adjust timeout values etc for next retransmit
+/**
+ * xprt_adjust_timeout - adjust timeout values for next retransmit
+ * @req: RPC request containing parameters to use for the adjustment
+ *
  */
 int xprt_adjust_timeout(struct rpc_rqst *req)
 {
@@ -391,9 +466,9 @@
 		req->rq_retries = 0;
 		xprt_reset_majortimeo(req);
 		/* Reset the RTT counters == "slow start" */
-		spin_lock_bh(&xprt->sock_lock);
+		spin_lock_bh(&xprt->transport_lock);
 		rpc_init_rtt(req->rq_task->tk_client->cl_rtt, to->to_initval);
-		spin_unlock_bh(&xprt->sock_lock);
+		spin_unlock_bh(&xprt->transport_lock);
 		pprintk("RPC: %lu timeout\n", jiffies);
 		status = -ETIMEDOUT;
 	}
@@ -405,133 +480,52 @@
 	return status;
 }
 
-/*
- * Close down a transport socket
- */
-static void
-xprt_close(struct rpc_xprt *xprt)
-{
-	struct socket	*sock = xprt->sock;
-	struct sock	*sk = xprt->inet;
-
-	if (!sk)
-		return;
-
-	write_lock_bh(&sk->sk_callback_lock);
-	xprt->inet = NULL;
-	xprt->sock = NULL;
-
-	sk->sk_user_data    = NULL;
-	sk->sk_data_ready   = xprt->old_data_ready;
-	sk->sk_state_change = xprt->old_state_change;
-	sk->sk_write_space  = xprt->old_write_space;
-	write_unlock_bh(&sk->sk_callback_lock);
-
-	sk->sk_no_check	 = 0;
-
-	sock_release(sock);
-}
-
-static void
-xprt_socket_autoclose(void *args)
+static void xprt_autoclose(void *args)
 {
 	struct rpc_xprt *xprt = (struct rpc_xprt *)args;
 
 	xprt_disconnect(xprt);
-	xprt_close(xprt);
+	xprt->ops->close(xprt);
 	xprt_release_write(xprt, NULL);
 }
 
-/*
- * Mark a transport as disconnected
+/**
+ * xprt_disconnect - mark a transport as disconnected
+ * @xprt: transport to flag for disconnect
+ *
  */
-static void
-xprt_disconnect(struct rpc_xprt *xprt)
+void xprt_disconnect(struct rpc_xprt *xprt)
 {
 	dprintk("RPC:      disconnected transport %p\n", xprt);
-	spin_lock_bh(&xprt->sock_lock);
+	spin_lock_bh(&xprt->transport_lock);
 	xprt_clear_connected(xprt);
-	rpc_wake_up_status(&xprt->pending, -ENOTCONN);
-	spin_unlock_bh(&xprt->sock_lock);
+	xprt_wake_pending_tasks(xprt, -ENOTCONN);
+	spin_unlock_bh(&xprt->transport_lock);
 }
 
-/*
- * Used to allow disconnection when we've been idle
- */
 static void
 xprt_init_autodisconnect(unsigned long data)
 {
 	struct rpc_xprt *xprt = (struct rpc_xprt *)data;
 
-	spin_lock(&xprt->sock_lock);
+	spin_lock(&xprt->transport_lock);
 	if (!list_empty(&xprt->recv) || xprt->shutdown)
 		goto out_abort;
-	if (test_and_set_bit(XPRT_LOCKED, &xprt->sockstate))
+	if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
 		goto out_abort;
-	spin_unlock(&xprt->sock_lock);
-	/* Let keventd close the socket */
-	if (test_bit(XPRT_CONNECTING, &xprt->sockstate) != 0)
+	spin_unlock(&xprt->transport_lock);
+	if (xprt_connecting(xprt))
 		xprt_release_write(xprt, NULL);
 	else
 		schedule_work(&xprt->task_cleanup);
 	return;
 out_abort:
-	spin_unlock(&xprt->sock_lock);
+	spin_unlock(&xprt->transport_lock);
 }
 
-static void xprt_socket_connect(void *args)
-{
-	struct rpc_xprt *xprt = (struct rpc_xprt *)args;
-	struct socket *sock = xprt->sock;
-	int status = -EIO;
-
-	if (xprt->shutdown || xprt->addr.sin_port == 0)
-		goto out;
-
-	/*
-	 * Start by resetting any existing state
-	 */
-	xprt_close(xprt);
-	sock = xprt_create_socket(xprt, xprt->prot, xprt->resvport);
-	if (sock == NULL) {
-		/* couldn't create socket or bind to reserved port;
-		 * this is likely a permanent error, so cause an abort */
-		goto out;
-	}
-	xprt_bind_socket(xprt, sock);
-	xprt_sock_setbufsize(xprt);
-
-	status = 0;
-	if (!xprt->stream)
-		goto out;
-
-	/*
-	 * Tell the socket layer to start connecting...
-	 */
-	status = sock->ops->connect(sock, (struct sockaddr *) &xprt->addr,
-			sizeof(xprt->addr), O_NONBLOCK);
-	dprintk("RPC: %p  connect status %d connected %d sock state %d\n",
-			xprt, -status, xprt_connected(xprt), sock->sk->sk_state);
-	if (status < 0) {
-		switch (status) {
-			case -EINPROGRESS:
-			case -EALREADY:
-				goto out_clear;
-		}
-	}
-out:
-	if (status < 0)
-		rpc_wake_up_status(&xprt->pending, status);
-	else
-		rpc_wake_up(&xprt->pending);
-out_clear:
-	smp_mb__before_clear_bit();
-	clear_bit(XPRT_CONNECTING, &xprt->sockstate);
-	smp_mb__after_clear_bit();
-}
-
-/*
- * Attempt to connect a TCP socket.
+/**
+ * xprt_connect - schedule a transport connect operation
+ * @task: RPC task that is requesting the connect
  *
  */
 void xprt_connect(struct rpc_task *task)
@@ -552,37 +546,19 @@
 	if (!xprt_lock_write(xprt, task))
 		return;
 	if (xprt_connected(xprt))
-		goto out_write;
+		xprt_release_write(xprt, task);
+	else {
+		if (task->tk_rqstp)
+			task->tk_rqstp->rq_bytes_sent = 0;
 
-	if (task->tk_rqstp)
-		task->tk_rqstp->rq_bytes_sent = 0;
-
-	task->tk_timeout = RPC_CONNECT_TIMEOUT;
-	rpc_sleep_on(&xprt->pending, task, xprt_connect_status, NULL);
-	if (!test_and_set_bit(XPRT_CONNECTING, &xprt->sockstate)) {
-		/* Note: if we are here due to a dropped connection
-		 * 	 we delay reconnecting by RPC_REESTABLISH_TIMEOUT/HZ
-		 * 	 seconds
-		 */
-		if (xprt->sock != NULL)
-			schedule_delayed_work(&xprt->sock_connect,
-					RPC_REESTABLISH_TIMEOUT);
-		else {
-			schedule_work(&xprt->sock_connect);
-			if (!RPC_IS_ASYNC(task))
-				flush_scheduled_work();
-		}
+		task->tk_timeout = xprt->connect_timeout;
+		rpc_sleep_on(&xprt->pending, task, xprt_connect_status, NULL);
+		xprt->ops->connect(task);
 	}
 	return;
- out_write:
-	xprt_release_write(xprt, task);
 }
 
-/*
- * We arrive here when awoken from waiting on connection establishment.
- */
-static void
-xprt_connect_status(struct rpc_task *task)
+static void xprt_connect_status(struct rpc_task *task)
 {
 	struct rpc_xprt	*xprt = task->tk_xprt;
 
@@ -592,31 +568,42 @@
 		return;
 	}
 
-	/* if soft mounted, just cause this RPC to fail */
-	if (RPC_IS_SOFT(task))
-		task->tk_status = -EIO;
-
 	switch (task->tk_status) {
 	case -ECONNREFUSED:
 	case -ECONNRESET:
+		dprintk("RPC: %4d xprt_connect_status: server %s refused connection\n",
+				task->tk_pid, task->tk_client->cl_server);
+		break;
 	case -ENOTCONN:
-		return;
+		dprintk("RPC: %4d xprt_connect_status: connection broken\n",
+				task->tk_pid);
+		break;
 	case -ETIMEDOUT:
-		dprintk("RPC: %4d xprt_connect_status: timed out\n",
+		dprintk("RPC: %4d xprt_connect_status: connect attempt timed out\n",
 				task->tk_pid);
 		break;
 	default:
-		printk(KERN_ERR "RPC: error %d connecting to server %s\n",
-				-task->tk_status, task->tk_client->cl_server);
+		dprintk("RPC: %4d xprt_connect_status: error %d connecting to server %s\n",
+				task->tk_pid, -task->tk_status, task->tk_client->cl_server);
+		xprt_release_write(xprt, task);
+		task->tk_status = -EIO;
+		return;
 	}
-	xprt_release_write(xprt, task);
+
+	/* if soft mounted, just cause this RPC to fail */
+	if (RPC_IS_SOFT(task)) {
+		xprt_release_write(xprt, task);
+		task->tk_status = -EIO;
+	}
 }
 
-/*
- * Look up the RPC request corresponding to a reply, and then lock it.
+/**
+ * xprt_lookup_rqst - find an RPC request corresponding to an XID
+ * @xprt: transport on which the original request was transmitted
+ * @xid: RPC XID of incoming reply
+ *
  */
-static inline struct rpc_rqst *
-xprt_lookup_rqst(struct rpc_xprt *xprt, u32 xid)
+struct rpc_rqst *xprt_lookup_rqst(struct rpc_xprt *xprt, u32 xid)
 {
 	struct list_head *pos;
 	struct rpc_rqst	*req = NULL;
@@ -631,556 +618,68 @@
 	return req;
 }
 
-/*
- * Complete reply received.
- * The TCP code relies on us to remove the request from xprt->pending.
+/**
+ * xprt_update_rtt - update an RPC client's RTT state after receiving a reply
+ * @task: RPC request that recently completed
+ *
  */
-static void
-xprt_complete_rqst(struct rpc_xprt *xprt, struct rpc_rqst *req, int copied)
+void xprt_update_rtt(struct rpc_task *task)
 {
-	struct rpc_task	*task = req->rq_task;
-	struct rpc_clnt *clnt = task->tk_client;
+	struct rpc_rqst *req = task->tk_rqstp;
+	struct rpc_rtt *rtt = task->tk_client->cl_rtt;
+	unsigned timer = task->tk_msg.rpc_proc->p_timer;
 
-	/* Adjust congestion window */
-	if (!xprt->nocong) {
-		unsigned timer = task->tk_msg.rpc_proc->p_timer;
-		xprt_adjust_cwnd(xprt, copied);
-		__xprt_put_cong(xprt, req);
-		if (timer) {
-			if (req->rq_ntrans == 1)
-				rpc_update_rtt(clnt->cl_rtt, timer,
-						(long)jiffies - req->rq_xtime);
-			rpc_set_timeo(clnt->cl_rtt, timer, req->rq_ntrans - 1);
-		}
+	if (timer) {
+		if (req->rq_ntrans == 1)
+			rpc_update_rtt(rtt, timer,
+					(long)jiffies - req->rq_xtime);
+		rpc_set_timeo(rtt, timer, req->rq_ntrans - 1);
 	}
+}
 
-#ifdef RPC_PROFILE
-	/* Profile only reads for now */
-	if (copied > 1024) {
-		static unsigned long	nextstat;
-		static unsigned long	pkt_rtt, pkt_len, pkt_cnt;
+/**
+ * xprt_complete_rqst - called when reply processing is complete
+ * @task: RPC request that recently completed
+ * @copied: actual number of bytes received from the transport
+ *
+ * Caller holds transport lock.
+ */
+void xprt_complete_rqst(struct rpc_task *task, int copied)
+{
+	struct rpc_rqst *req = task->tk_rqstp;
 
-		pkt_cnt++;
-		pkt_len += req->rq_slen + copied;
-		pkt_rtt += jiffies - req->rq_xtime;
-		if (time_before(nextstat, jiffies)) {
-			printk("RPC: %lu %ld cwnd\n", jiffies, xprt->cwnd);
-			printk("RPC: %ld %ld %ld %ld stat\n",
-					jiffies, pkt_cnt, pkt_len, pkt_rtt);
-			pkt_rtt = pkt_len = pkt_cnt = 0;
-			nextstat = jiffies + 5 * HZ;
-		}
-	}
-#endif
+	dprintk("RPC: %5u xid %08x complete (%d bytes received)\n",
+			task->tk_pid, ntohl(req->rq_xid), copied);
 
-	dprintk("RPC: %4d has input (%d bytes)\n", task->tk_pid, copied);
 	list_del_init(&req->rq_list);
 	req->rq_received = req->rq_private_buf.len = copied;
-
-	/* ... and wake up the process. */
 	rpc_wake_up_task(task);
-	return;
 }
 
-static size_t
-skb_read_bits(skb_reader_t *desc, void *to, size_t len)
+static void xprt_timer(struct rpc_task *task)
 {
-	if (len > desc->count)
-		len = desc->count;
-	if (skb_copy_bits(desc->skb, desc->offset, to, len))
-		return 0;
-	desc->count -= len;
-	desc->offset += len;
-	return len;
-}
-
-static size_t
-skb_read_and_csum_bits(skb_reader_t *desc, void *to, size_t len)
-{
-	unsigned int csum2, pos;
-
-	if (len > desc->count)
-		len = desc->count;
-	pos = desc->offset;
-	csum2 = skb_copy_and_csum_bits(desc->skb, pos, to, len, 0);
-	desc->csum = csum_block_add(desc->csum, csum2, pos);
-	desc->count -= len;
-	desc->offset += len;
-	return len;
-}
-
-/*
- * We have set things up such that we perform the checksum of the UDP
- * packet in parallel with the copies into the RPC client iovec.  -DaveM
- */
-int
-csum_partial_copy_to_xdr(struct xdr_buf *xdr, struct sk_buff *skb)
-{
-	skb_reader_t desc;
-
-	desc.skb = skb;
-	desc.offset = sizeof(struct udphdr);
-	desc.count = skb->len - desc.offset;
-
-	if (skb->ip_summed == CHECKSUM_UNNECESSARY)
-		goto no_checksum;
-
-	desc.csum = csum_partial(skb->data, desc.offset, skb->csum);
-	if (xdr_partial_copy_from_skb(xdr, 0, &desc, skb_read_and_csum_bits) < 0)
-		return -1;
-	if (desc.offset != skb->len) {
-		unsigned int csum2;
-		csum2 = skb_checksum(skb, desc.offset, skb->len - desc.offset, 0);
-		desc.csum = csum_block_add(desc.csum, csum2, desc.offset);
-	}
-	if (desc.count)
-		return -1;
-	if ((unsigned short)csum_fold(desc.csum))
-		return -1;
-	return 0;
-no_checksum:
-	if (xdr_partial_copy_from_skb(xdr, 0, &desc, skb_read_bits) < 0)
-		return -1;
-	if (desc.count)
-		return -1;
-	return 0;
-}
-
-/*
- * Input handler for RPC replies. Called from a bottom half and hence
- * atomic.
- */
-static void
-udp_data_ready(struct sock *sk, int len)
-{
-	struct rpc_task	*task;
-	struct rpc_xprt	*xprt;
-	struct rpc_rqst *rovr;
-	struct sk_buff	*skb;
-	int err, repsize, copied;
-	u32 _xid, *xp;
-
-	read_lock(&sk->sk_callback_lock);
-	dprintk("RPC:      udp_data_ready...\n");
-	if (!(xprt = xprt_from_sock(sk))) {
-		printk("RPC:      udp_data_ready request not found!\n");
-		goto out;
-	}
-
-	dprintk("RPC:      udp_data_ready client %p\n", xprt);
-
-	if ((skb = skb_recv_datagram(sk, 0, 1, &err)) == NULL)
-		goto out;
-
-	if (xprt->shutdown)
-		goto dropit;
-
-	repsize = skb->len - sizeof(struct udphdr);
-	if (repsize < 4) {
-		printk("RPC: impossible RPC reply size %d!\n", repsize);
-		goto dropit;
-	}
-
-	/* Copy the XID from the skb... */
-	xp = skb_header_pointer(skb, sizeof(struct udphdr),
-				sizeof(_xid), &_xid);
-	if (xp == NULL)
-		goto dropit;
-
-	/* Look up and lock the request corresponding to the given XID */
-	spin_lock(&xprt->sock_lock);
-	rovr = xprt_lookup_rqst(xprt, *xp);
-	if (!rovr)
-		goto out_unlock;
-	task = rovr->rq_task;
-
-	dprintk("RPC: %4d received reply\n", task->tk_pid);
-
-	if ((copied = rovr->rq_private_buf.buflen) > repsize)
-		copied = repsize;
-
-	/* Suck it into the iovec, verify checksum if not done by hw. */
-	if (csum_partial_copy_to_xdr(&rovr->rq_private_buf, skb))
-		goto out_unlock;
-
-	/* Something worked... */
-	dst_confirm(skb->dst);
-
-	xprt_complete_rqst(xprt, rovr, copied);
-
- out_unlock:
-	spin_unlock(&xprt->sock_lock);
- dropit:
-	skb_free_datagram(sk, skb);
- out:
-	read_unlock(&sk->sk_callback_lock);
-}
-
-/*
- * Copy from an skb into memory and shrink the skb.
- */
-static inline size_t
-tcp_copy_data(skb_reader_t *desc, void *p, size_t len)
-{
-	if (len > desc->count)
-		len = desc->count;
-	if (skb_copy_bits(desc->skb, desc->offset, p, len)) {
-		dprintk("RPC:      failed to copy %zu bytes from skb. %zu bytes remain\n",
-				len, desc->count);
-		return 0;
-	}
-	desc->offset += len;
-	desc->count -= len;
-	dprintk("RPC:      copied %zu bytes from skb. %zu bytes remain\n",
-			len, desc->count);
-	return len;
-}
-
-/*
- * TCP read fragment marker
- */
-static inline void
-tcp_read_fraghdr(struct rpc_xprt *xprt, skb_reader_t *desc)
-{
-	size_t len, used;
-	char *p;
-
-	p = ((char *) &xprt->tcp_recm) + xprt->tcp_offset;
-	len = sizeof(xprt->tcp_recm) - xprt->tcp_offset;
-	used = tcp_copy_data(desc, p, len);
-	xprt->tcp_offset += used;
-	if (used != len)
-		return;
-	xprt->tcp_reclen = ntohl(xprt->tcp_recm);
-	if (xprt->tcp_reclen & 0x80000000)
-		xprt->tcp_flags |= XPRT_LAST_FRAG;
-	else
-		xprt->tcp_flags &= ~XPRT_LAST_FRAG;
-	xprt->tcp_reclen &= 0x7fffffff;
-	xprt->tcp_flags &= ~XPRT_COPY_RECM;
-	xprt->tcp_offset = 0;
-	/* Sanity check of the record length */
-	if (xprt->tcp_reclen < 4) {
-		printk(KERN_ERR "RPC: Invalid TCP record fragment length\n");
-		xprt_disconnect(xprt);
-	}
-	dprintk("RPC:      reading TCP record fragment of length %d\n",
-			xprt->tcp_reclen);
-}
-
-static void
-tcp_check_recm(struct rpc_xprt *xprt)
-{
-	dprintk("RPC:      xprt = %p, tcp_copied = %lu, tcp_offset = %u, tcp_reclen = %u, tcp_flags = %lx\n",
-			xprt, xprt->tcp_copied, xprt->tcp_offset, xprt->tcp_reclen, xprt->tcp_flags);
-	if (xprt->tcp_offset == xprt->tcp_reclen) {
-		xprt->tcp_flags |= XPRT_COPY_RECM;
-		xprt->tcp_offset = 0;
-		if (xprt->tcp_flags & XPRT_LAST_FRAG) {
-			xprt->tcp_flags &= ~XPRT_COPY_DATA;
-			xprt->tcp_flags |= XPRT_COPY_XID;
-			xprt->tcp_copied = 0;
-		}
-	}
-}
-
-/*
- * TCP read xid
- */
-static inline void
-tcp_read_xid(struct rpc_xprt *xprt, skb_reader_t *desc)
-{
-	size_t len, used;
-	char *p;
-
-	len = sizeof(xprt->tcp_xid) - xprt->tcp_offset;
-	dprintk("RPC:      reading XID (%Zu bytes)\n", len);
-	p = ((char *) &xprt->tcp_xid) + xprt->tcp_offset;
-	used = tcp_copy_data(desc, p, len);
-	xprt->tcp_offset += used;
-	if (used != len)
-		return;
-	xprt->tcp_flags &= ~XPRT_COPY_XID;
-	xprt->tcp_flags |= XPRT_COPY_DATA;
-	xprt->tcp_copied = 4;
-	dprintk("RPC:      reading reply for XID %08x\n",
-						ntohl(xprt->tcp_xid));
-	tcp_check_recm(xprt);
-}
-
-/*
- * TCP read and complete request
- */
-static inline void
-tcp_read_request(struct rpc_xprt *xprt, skb_reader_t *desc)
-{
-	struct rpc_rqst *req;
-	struct xdr_buf *rcvbuf;
-	size_t len;
-	ssize_t r;
-
-	/* Find and lock the request corresponding to this xid */
-	spin_lock(&xprt->sock_lock);
-	req = xprt_lookup_rqst(xprt, xprt->tcp_xid);
-	if (!req) {
-		xprt->tcp_flags &= ~XPRT_COPY_DATA;
-		dprintk("RPC:      XID %08x request not found!\n",
-				ntohl(xprt->tcp_xid));
-		spin_unlock(&xprt->sock_lock);
-		return;
-	}
-
-	rcvbuf = &req->rq_private_buf;
-	len = desc->count;
-	if (len > xprt->tcp_reclen - xprt->tcp_offset) {
-		skb_reader_t my_desc;
-
-		len = xprt->tcp_reclen - xprt->tcp_offset;
-		memcpy(&my_desc, desc, sizeof(my_desc));
-		my_desc.count = len;
-		r = xdr_partial_copy_from_skb(rcvbuf, xprt->tcp_copied,
-					  &my_desc, tcp_copy_data);
-		desc->count -= r;
-		desc->offset += r;
-	} else
-		r = xdr_partial_copy_from_skb(rcvbuf, xprt->tcp_copied,
-					  desc, tcp_copy_data);
-
-	if (r > 0) {
-		xprt->tcp_copied += r;
-		xprt->tcp_offset += r;
-	}
-	if (r != len) {
-		/* Error when copying to the receive buffer,
-		 * usually because we weren't able to allocate
-		 * additional buffer pages. All we can do now
-		 * is turn off XPRT_COPY_DATA, so the request
-		 * will not receive any additional updates,
-		 * and time out.
-		 * Any remaining data from this record will
-		 * be discarded.
-		 */
-		xprt->tcp_flags &= ~XPRT_COPY_DATA;
-		dprintk("RPC:      XID %08x truncated request\n",
-				ntohl(xprt->tcp_xid));
-		dprintk("RPC:      xprt = %p, tcp_copied = %lu, tcp_offset = %u, tcp_reclen = %u\n",
-				xprt, xprt->tcp_copied, xprt->tcp_offset, xprt->tcp_reclen);
-		goto out;
-	}
-
-	dprintk("RPC:      XID %08x read %Zd bytes\n",
-			ntohl(xprt->tcp_xid), r);
-	dprintk("RPC:      xprt = %p, tcp_copied = %lu, tcp_offset = %u, tcp_reclen = %u\n",
-			xprt, xprt->tcp_copied, xprt->tcp_offset, xprt->tcp_reclen);
-
-	if (xprt->tcp_copied == req->rq_private_buf.buflen)
-		xprt->tcp_flags &= ~XPRT_COPY_DATA;
-	else if (xprt->tcp_offset == xprt->tcp_reclen) {
-		if (xprt->tcp_flags & XPRT_LAST_FRAG)
-			xprt->tcp_flags &= ~XPRT_COPY_DATA;
-	}
-
-out:
-	if (!(xprt->tcp_flags & XPRT_COPY_DATA)) {
-		dprintk("RPC: %4d received reply complete\n",
-				req->rq_task->tk_pid);
-		xprt_complete_rqst(xprt, req, xprt->tcp_copied);
-	}
-	spin_unlock(&xprt->sock_lock);
-	tcp_check_recm(xprt);
-}
-
-/*
- * TCP discard extra bytes from a short read
- */
-static inline void
-tcp_read_discard(struct rpc_xprt *xprt, skb_reader_t *desc)
-{
-	size_t len;
-
-	len = xprt->tcp_reclen - xprt->tcp_offset;
-	if (len > desc->count)
-		len = desc->count;
-	desc->count -= len;
-	desc->offset += len;
-	xprt->tcp_offset += len;
-	dprintk("RPC:      discarded %Zu bytes\n", len);
-	tcp_check_recm(xprt);
-}
-
-/*
- * TCP record receive routine
- * We first have to grab the record marker, then the XID, then the data.
- */
-static int
-tcp_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb,
-		unsigned int offset, size_t len)
-{
-	struct rpc_xprt *xprt = rd_desc->arg.data;
-	skb_reader_t desc = {
-		.skb	= skb,
-		.offset	= offset,
-		.count	= len,
-		.csum	= 0
-       	};
-
-	dprintk("RPC:      tcp_data_recv\n");
-	do {
-		/* Read in a new fragment marker if necessary */
-		/* Can we ever really expect to get completely empty fragments? */
-		if (xprt->tcp_flags & XPRT_COPY_RECM) {
-			tcp_read_fraghdr(xprt, &desc);
-			continue;
-		}
-		/* Read in the xid if necessary */
-		if (xprt->tcp_flags & XPRT_COPY_XID) {
-			tcp_read_xid(xprt, &desc);
-			continue;
-		}
-		/* Read in the request data */
-		if (xprt->tcp_flags & XPRT_COPY_DATA) {
-			tcp_read_request(xprt, &desc);
-			continue;
-		}
-		/* Skip over any trailing bytes on short reads */
-		tcp_read_discard(xprt, &desc);
-	} while (desc.count);
-	dprintk("RPC:      tcp_data_recv done\n");
-	return len - desc.count;
-}
-
-static void tcp_data_ready(struct sock *sk, int bytes)
-{
-	struct rpc_xprt *xprt;
-	read_descriptor_t rd_desc;
-
-	read_lock(&sk->sk_callback_lock);
-	dprintk("RPC:      tcp_data_ready...\n");
-	if (!(xprt = xprt_from_sock(sk))) {
-		printk("RPC:      tcp_data_ready socket info not found!\n");
-		goto out;
-	}
-	if (xprt->shutdown)
-		goto out;
-
-	/* We use rd_desc to pass struct xprt to tcp_data_recv */
-	rd_desc.arg.data = xprt;
-	rd_desc.count = 65536;
-	tcp_read_sock(sk, &rd_desc, tcp_data_recv);
-out:
-	read_unlock(&sk->sk_callback_lock);
-}
-
-static void
-tcp_state_change(struct sock *sk)
-{
-	struct rpc_xprt	*xprt;
-
-	read_lock(&sk->sk_callback_lock);
-	if (!(xprt = xprt_from_sock(sk)))
-		goto out;
-	dprintk("RPC:      tcp_state_change client %p...\n", xprt);
-	dprintk("RPC:      state %x conn %d dead %d zapped %d\n",
-				sk->sk_state, xprt_connected(xprt),
-				sock_flag(sk, SOCK_DEAD),
-				sock_flag(sk, SOCK_ZAPPED));
-
-	switch (sk->sk_state) {
-	case TCP_ESTABLISHED:
-		spin_lock_bh(&xprt->sock_lock);
-		if (!xprt_test_and_set_connected(xprt)) {
-			/* Reset TCP record info */
-			xprt->tcp_offset = 0;
-			xprt->tcp_reclen = 0;
-			xprt->tcp_copied = 0;
-			xprt->tcp_flags = XPRT_COPY_RECM | XPRT_COPY_XID;
-			rpc_wake_up(&xprt->pending);
-		}
-		spin_unlock_bh(&xprt->sock_lock);
-		break;
-	case TCP_SYN_SENT:
-	case TCP_SYN_RECV:
-		break;
-	default:
-		xprt_disconnect(xprt);
-		break;
-	}
- out:
-	read_unlock(&sk->sk_callback_lock);
-}
-
-/*
- * Called when more output buffer space is available for this socket.
- * We try not to wake our writers until they can make "significant"
- * progress, otherwise we'll waste resources thrashing sock_sendmsg
- * with a bunch of small requests.
- */
-static void
-xprt_write_space(struct sock *sk)
-{
-	struct rpc_xprt	*xprt;
-	struct socket	*sock;
-
-	read_lock(&sk->sk_callback_lock);
-	if (!(xprt = xprt_from_sock(sk)) || !(sock = sk->sk_socket))
-		goto out;
-	if (xprt->shutdown)
-		goto out;
-
-	/* Wait until we have enough socket memory */
-	if (xprt->stream) {
-		/* from net/core/stream.c:sk_stream_write_space */
-		if (sk_stream_wspace(sk) < sk_stream_min_wspace(sk))
-			goto out;
-	} else {
-		/* from net/core/sock.c:sock_def_write_space */
-		if (!sock_writeable(sk))
-			goto out;
-	}
-
-	if (!test_and_clear_bit(SOCK_NOSPACE, &sock->flags))
-		goto out;
-
-	spin_lock_bh(&xprt->sock_lock);
-	if (xprt->snd_task)
-		rpc_wake_up_task(xprt->snd_task);
-	spin_unlock_bh(&xprt->sock_lock);
-out:
-	read_unlock(&sk->sk_callback_lock);
-}
-
-/*
- * RPC receive timeout handler.
- */
-static void
-xprt_timer(struct rpc_task *task)
-{
-	struct rpc_rqst	*req = task->tk_rqstp;
+	struct rpc_rqst *req = task->tk_rqstp;
 	struct rpc_xprt *xprt = req->rq_xprt;
 
-	spin_lock(&xprt->sock_lock);
-	if (req->rq_received)
-		goto out;
+	dprintk("RPC: %4d xprt_timer\n", task->tk_pid);
 
-	xprt_adjust_cwnd(req->rq_xprt, -ETIMEDOUT);
-	__xprt_put_cong(xprt, req);
-
-	dprintk("RPC: %4d xprt_timer (%s request)\n",
-		task->tk_pid, req ? "pending" : "backlogged");
-
-	task->tk_status  = -ETIMEDOUT;
-out:
+	spin_lock(&xprt->transport_lock);
+	if (!req->rq_received) {
+		if (xprt->ops->timer)
+			xprt->ops->timer(task);
+		task->tk_status = -ETIMEDOUT;
+	}
 	task->tk_timeout = 0;
 	rpc_wake_up_task(task);
-	spin_unlock(&xprt->sock_lock);
+	spin_unlock(&xprt->transport_lock);
 }
 
-/*
- * Place the actual RPC call.
- * We have to copy the iovec because sendmsg fiddles with its contents.
+/**
+ * xprt_prepare_transmit - reserve the transport before sending a request
+ * @task: RPC task about to send a request
+ *
  */
-int
-xprt_prepare_transmit(struct rpc_task *task)
+int xprt_prepare_transmit(struct rpc_task *task)
 {
 	struct rpc_rqst	*req = task->tk_rqstp;
 	struct rpc_xprt	*xprt = req->rq_xprt;
@@ -1191,12 +690,12 @@
 	if (xprt->shutdown)
 		return -EIO;
 
-	spin_lock_bh(&xprt->sock_lock);
+	spin_lock_bh(&xprt->transport_lock);
 	if (req->rq_received && !req->rq_bytes_sent) {
 		err = req->rq_received;
 		goto out_unlock;
 	}
-	if (!__xprt_lock_write(xprt, task)) {
+	if (!xprt->ops->reserve_xprt(task)) {
 		err = -EAGAIN;
 		goto out_unlock;
 	}
@@ -1206,39 +705,42 @@
 		goto out_unlock;
 	}
 out_unlock:
-	spin_unlock_bh(&xprt->sock_lock);
+	spin_unlock_bh(&xprt->transport_lock);
 	return err;
 }
 
 void
-xprt_transmit(struct rpc_task *task)
+xprt_abort_transmit(struct rpc_task *task)
 {
-	struct rpc_clnt *clnt = task->tk_client;
+	struct rpc_xprt	*xprt = task->tk_xprt;
+
+	xprt_release_write(xprt, task);
+}
+
+/**
+ * xprt_transmit - send an RPC request on a transport
+ * @task: controlling RPC task
+ *
+ * We have to copy the iovec because sendmsg fiddles with its contents.
+ */
+void xprt_transmit(struct rpc_task *task)
+{
 	struct rpc_rqst	*req = task->tk_rqstp;
 	struct rpc_xprt	*xprt = req->rq_xprt;
-	int status, retry = 0;
-
+	int status;
 
 	dprintk("RPC: %4d xprt_transmit(%u)\n", task->tk_pid, req->rq_slen);
 
-	/* set up everything as needed. */
-	/* Write the record marker */
-	if (xprt->stream) {
-		u32	*marker = req->rq_svec[0].iov_base;
-
-		*marker = htonl(0x80000000|(req->rq_slen-sizeof(*marker)));
-	}
-
 	smp_rmb();
 	if (!req->rq_received) {
 		if (list_empty(&req->rq_list)) {
-			spin_lock_bh(&xprt->sock_lock);
+			spin_lock_bh(&xprt->transport_lock);
 			/* Update the softirq receive buffer */
 			memcpy(&req->rq_private_buf, &req->rq_rcv_buf,
 					sizeof(req->rq_private_buf));
 			/* Add request to the receive list */
 			list_add_tail(&req->rq_list, &xprt->recv);
-			spin_unlock_bh(&xprt->sock_lock);
+			spin_unlock_bh(&xprt->transport_lock);
 			xprt_reset_majortimeo(req);
 			/* Turn off autodisconnect */
 			del_singleshot_timer_sync(&xprt->timer);
@@ -1246,40 +748,19 @@
 	} else if (!req->rq_bytes_sent)
 		return;
 
-	/* Continue transmitting the packet/record. We must be careful
-	 * to cope with writespace callbacks arriving _after_ we have
-	 * called xprt_sendmsg().
-	 */
-	while (1) {
-		req->rq_xtime = jiffies;
-		status = xprt_sendmsg(xprt, req);
-
-		if (status < 0)
-			break;
-
-		if (xprt->stream) {
-			req->rq_bytes_sent += status;
-
-			/* If we've sent the entire packet, immediately
-			 * reset the count of bytes sent. */
-			if (req->rq_bytes_sent >= req->rq_slen) {
-				req->rq_bytes_sent = 0;
-				goto out_receive;
-			}
-		} else {
-			if (status >= req->rq_slen)
-				goto out_receive;
-			status = -EAGAIN;
-			break;
-		}
-
-		dprintk("RPC: %4d xmit incomplete (%d left of %d)\n",
-				task->tk_pid, req->rq_slen - req->rq_bytes_sent,
-				req->rq_slen);
-
-		status = -EAGAIN;
-		if (retry++ > 50)
-			break;
+	status = xprt->ops->send_request(task);
+	if (status == 0) {
+		dprintk("RPC: %4d xmit complete\n", task->tk_pid);
+		spin_lock_bh(&xprt->transport_lock);
+		xprt->ops->set_retrans_timeout(task);
+		/* Don't race with disconnect */
+		if (!xprt_connected(xprt))
+			task->tk_status = -ENOTCONN;
+		else if (!req->rq_received)
+			rpc_sleep_on(&xprt->pending, task, NULL, xprt_timer);
+		xprt->ops->release_xprt(xprt, task);
+		spin_unlock_bh(&xprt->transport_lock);
+		return;
 	}
 
 	/* Note: at this point, task->tk_sleeping has not yet been set,
@@ -1289,60 +770,19 @@
 	task->tk_status = status;
 
 	switch (status) {
-	case -EAGAIN:
-		if (test_bit(SOCK_ASYNC_NOSPACE, &xprt->sock->flags)) {
-			/* Protect against races with xprt_write_space */
-			spin_lock_bh(&xprt->sock_lock);
-			/* Don't race with disconnect */
-			if (!xprt_connected(xprt))
-				task->tk_status = -ENOTCONN;
-			else if (test_bit(SOCK_NOSPACE, &xprt->sock->flags)) {
-				task->tk_timeout = req->rq_timeout;
-				rpc_sleep_on(&xprt->pending, task, NULL, NULL);
-			}
-			spin_unlock_bh(&xprt->sock_lock);
-			return;
-		}
-		/* Keep holding the socket if it is blocked */
-		rpc_delay(task, HZ>>4);
-		return;
 	case -ECONNREFUSED:
-		task->tk_timeout = RPC_REESTABLISH_TIMEOUT;
 		rpc_sleep_on(&xprt->sending, task, NULL, NULL);
+	case -EAGAIN:
 	case -ENOTCONN:
 		return;
 	default:
-		if (xprt->stream)
-			xprt_disconnect(xprt);
+		break;
 	}
 	xprt_release_write(xprt, task);
 	return;
- out_receive:
-	dprintk("RPC: %4d xmit complete\n", task->tk_pid);
-	/* Set the task's receive timeout value */
-	spin_lock_bh(&xprt->sock_lock);
-	if (!xprt->nocong) {
-		int timer = task->tk_msg.rpc_proc->p_timer;
-		task->tk_timeout = rpc_calc_rto(clnt->cl_rtt, timer);
-		task->tk_timeout <<= rpc_ntimeo(clnt->cl_rtt, timer) + req->rq_retries;
-		if (task->tk_timeout > xprt->timeout.to_maxval || task->tk_timeout == 0)
-			task->tk_timeout = xprt->timeout.to_maxval;
-	} else
-		task->tk_timeout = req->rq_timeout;
-	/* Don't race with disconnect */
-	if (!xprt_connected(xprt))
-		task->tk_status = -ENOTCONN;
-	else if (!req->rq_received)
-		rpc_sleep_on(&xprt->pending, task, NULL, xprt_timer);
-	__xprt_release_write(xprt, task);
-	spin_unlock_bh(&xprt->sock_lock);
 }
 
-/*
- * Reserve an RPC call slot.
- */
-static inline void
-do_xprt_reserve(struct rpc_task *task)
+static inline void do_xprt_reserve(struct rpc_task *task)
 {
 	struct rpc_xprt	*xprt = task->tk_xprt;
 
@@ -1362,22 +802,25 @@
 	rpc_sleep_on(&xprt->backlog, task, NULL, NULL);
 }
 
-void
-xprt_reserve(struct rpc_task *task)
+/**
+ * xprt_reserve - allocate an RPC request slot
+ * @task: RPC task requesting a slot allocation
+ *
+ * If no more slots are available, place the task on the transport's
+ * backlog queue.
+ */
+void xprt_reserve(struct rpc_task *task)
 {
 	struct rpc_xprt	*xprt = task->tk_xprt;
 
 	task->tk_status = -EIO;
 	if (!xprt->shutdown) {
-		spin_lock(&xprt->xprt_lock);
+		spin_lock(&xprt->reserve_lock);
 		do_xprt_reserve(task);
-		spin_unlock(&xprt->xprt_lock);
+		spin_unlock(&xprt->reserve_lock);
 	}
 }
 
-/*
- * Allocate a 'unique' XID
- */
 static inline u32 xprt_alloc_xid(struct rpc_xprt *xprt)
 {
 	return xprt->xid++;
@@ -1388,11 +831,7 @@
 	get_random_bytes(&xprt->xid, sizeof(xprt->xid));
 }
 
-/*
- * Initialize RPC request
- */
-static void
-xprt_request_init(struct rpc_task *task, struct rpc_xprt *xprt)
+static void xprt_request_init(struct rpc_task *task, struct rpc_xprt *xprt)
 {
 	struct rpc_rqst	*req = task->tk_rqstp;
 
@@ -1400,128 +839,104 @@
 	req->rq_task	= task;
 	req->rq_xprt    = xprt;
 	req->rq_xid     = xprt_alloc_xid(xprt);
+	req->rq_release_snd_buf = NULL;
 	dprintk("RPC: %4d reserved req %p xid %08x\n", task->tk_pid,
 			req, ntohl(req->rq_xid));
 }
 
-/*
- * Release an RPC call slot
+/**
+ * xprt_release - release an RPC request slot
+ * @task: task which is finished with the slot
+ *
  */
-void
-xprt_release(struct rpc_task *task)
+void xprt_release(struct rpc_task *task)
 {
 	struct rpc_xprt	*xprt = task->tk_xprt;
 	struct rpc_rqst	*req;
 
 	if (!(req = task->tk_rqstp))
 		return;
-	spin_lock_bh(&xprt->sock_lock);
-	__xprt_release_write(xprt, task);
-	__xprt_put_cong(xprt, req);
+	spin_lock_bh(&xprt->transport_lock);
+	xprt->ops->release_xprt(xprt, task);
+	if (xprt->ops->release_request)
+		xprt->ops->release_request(task);
 	if (!list_empty(&req->rq_list))
 		list_del(&req->rq_list);
 	xprt->last_used = jiffies;
 	if (list_empty(&xprt->recv) && !xprt->shutdown)
-		mod_timer(&xprt->timer, xprt->last_used + XPRT_IDLE_TIMEOUT);
-	spin_unlock_bh(&xprt->sock_lock);
+		mod_timer(&xprt->timer,
+				xprt->last_used + xprt->idle_timeout);
+	spin_unlock_bh(&xprt->transport_lock);
 	task->tk_rqstp = NULL;
+	if (req->rq_release_snd_buf)
+		req->rq_release_snd_buf(req);
 	memset(req, 0, sizeof(*req));	/* mark unused */
 
 	dprintk("RPC: %4d release request %p\n", task->tk_pid, req);
 
-	spin_lock(&xprt->xprt_lock);
+	spin_lock(&xprt->reserve_lock);
 	list_add(&req->rq_list, &xprt->free);
-	xprt_clear_backlog(xprt);
-	spin_unlock(&xprt->xprt_lock);
+	rpc_wake_up_next(&xprt->backlog);
+	spin_unlock(&xprt->reserve_lock);
 }
 
-/*
- * Set default timeout parameters
+/**
+ * xprt_set_timeout - set constant RPC timeout
+ * @to: RPC timeout parameters to set up
+ * @retr: number of retries
+ * @incr: amount of increase after each retry
+ *
  */
-static void
-xprt_default_timeout(struct rpc_timeout *to, int proto)
-{
-	if (proto == IPPROTO_UDP)
-		xprt_set_timeout(to, 5,  5 * HZ);
-	else
-		xprt_set_timeout(to, 5, 60 * HZ);
-}
-
-/*
- * Set constant timeout
- */
-void
-xprt_set_timeout(struct rpc_timeout *to, unsigned int retr, unsigned long incr)
+void xprt_set_timeout(struct rpc_timeout *to, unsigned int retr, unsigned long incr)
 {
 	to->to_initval   = 
 	to->to_increment = incr;
-	to->to_maxval    = incr * retr;
+	to->to_maxval    = to->to_initval + (incr * retr);
 	to->to_retries   = retr;
 	to->to_exponential = 0;
 }
 
-unsigned int xprt_udp_slot_table_entries = RPC_DEF_SLOT_TABLE;
-unsigned int xprt_tcp_slot_table_entries = RPC_DEF_SLOT_TABLE;
-
-/*
- * Initialize an RPC client
- */
-static struct rpc_xprt *
-xprt_setup(int proto, struct sockaddr_in *ap, struct rpc_timeout *to)
+static struct rpc_xprt *xprt_setup(int proto, struct sockaddr_in *ap, struct rpc_timeout *to)
 {
+	int result;
 	struct rpc_xprt	*xprt;
-	unsigned int entries;
-	size_t slot_table_size;
 	struct rpc_rqst	*req;
 
-	dprintk("RPC:      setting up %s transport...\n",
-				proto == IPPROTO_UDP? "UDP" : "TCP");
-
-	entries = (proto == IPPROTO_TCP)?
-		xprt_tcp_slot_table_entries : xprt_udp_slot_table_entries;
-
 	if ((xprt = kmalloc(sizeof(struct rpc_xprt), GFP_KERNEL)) == NULL)
 		return ERR_PTR(-ENOMEM);
 	memset(xprt, 0, sizeof(*xprt)); /* Nnnngh! */
-	xprt->max_reqs = entries;
-	slot_table_size = entries * sizeof(xprt->slot[0]);
-	xprt->slot = kmalloc(slot_table_size, GFP_KERNEL);
-	if (xprt->slot == NULL) {
-		kfree(xprt);
-		return ERR_PTR(-ENOMEM);
-	}
-	memset(xprt->slot, 0, slot_table_size);
 
 	xprt->addr = *ap;
-	xprt->prot = proto;
-	xprt->stream = (proto == IPPROTO_TCP)? 1 : 0;
-	if (xprt->stream) {
-		xprt->cwnd = RPC_MAXCWND(xprt);
-		xprt->nocong = 1;
-		xprt->max_payload = (1U << 31) - 1;
-	} else {
-		xprt->cwnd = RPC_INITCWND;
-		xprt->max_payload = (1U << 16) - (MAX_HEADER << 3);
+
+	switch (proto) {
+	case IPPROTO_UDP:
+		result = xs_setup_udp(xprt, to);
+		break;
+	case IPPROTO_TCP:
+		result = xs_setup_tcp(xprt, to);
+		break;
+	default:
+		printk(KERN_ERR "RPC: unrecognized transport protocol: %d\n",
+				proto);
+		result = -EIO;
+		break;
 	}
-	spin_lock_init(&xprt->sock_lock);
-	spin_lock_init(&xprt->xprt_lock);
-	init_waitqueue_head(&xprt->cong_wait);
+	if (result) {
+		kfree(xprt);
+		return ERR_PTR(result);
+	}
+
+	spin_lock_init(&xprt->transport_lock);
+	spin_lock_init(&xprt->reserve_lock);
 
 	INIT_LIST_HEAD(&xprt->free);
 	INIT_LIST_HEAD(&xprt->recv);
-	INIT_WORK(&xprt->sock_connect, xprt_socket_connect, xprt);
-	INIT_WORK(&xprt->task_cleanup, xprt_socket_autoclose, xprt);
+	INIT_WORK(&xprt->task_cleanup, xprt_autoclose, xprt);
 	init_timer(&xprt->timer);
 	xprt->timer.function = xprt_init_autodisconnect;
 	xprt->timer.data = (unsigned long) xprt;
 	xprt->last_used = jiffies;
-	xprt->port = XPRT_MAX_RESVPORT;
-
-	/* Set timeout parameters */
-	if (to) {
-		xprt->timeout = *to;
-	} else
-		xprt_default_timeout(&xprt->timeout, xprt->prot);
+	xprt->cwnd = RPC_INITCWND;
 
 	rpc_init_wait_queue(&xprt->pending, "xprt_pending");
 	rpc_init_wait_queue(&xprt->sending, "xprt_sending");
@@ -1529,139 +944,25 @@
 	rpc_init_priority_wait_queue(&xprt->backlog, "xprt_backlog");
 
 	/* initialize free list */
-	for (req = &xprt->slot[entries-1]; req >= &xprt->slot[0]; req--)
+	for (req = &xprt->slot[xprt->max_reqs-1]; req >= &xprt->slot[0]; req--)
 		list_add(&req->rq_list, &xprt->free);
 
 	xprt_init_xid(xprt);
 
-	/* Check whether we want to use a reserved port */
-	xprt->resvport = capable(CAP_NET_BIND_SERVICE) ? 1 : 0;
-
 	dprintk("RPC:      created transport %p with %u slots\n", xprt,
 			xprt->max_reqs);
 	
 	return xprt;
 }
 
-/*
- * Bind to a reserved port
+/**
+ * xprt_create_proto - create an RPC client transport
+ * @proto: requested transport protocol
+ * @sap: remote peer's address
+ * @to: timeout parameters for new transport
+ *
  */
-static inline int xprt_bindresvport(struct rpc_xprt *xprt, struct socket *sock)
-{
-	struct sockaddr_in myaddr = {
-		.sin_family = AF_INET,
-	};
-	int		err, port;
-
-	/* Were we already bound to a given port? Try to reuse it */
-	port = xprt->port;
-	do {
-		myaddr.sin_port = htons(port);
-		err = sock->ops->bind(sock, (struct sockaddr *) &myaddr,
-						sizeof(myaddr));
-		if (err == 0) {
-			xprt->port = port;
-			return 0;
-		}
-		if (--port == 0)
-			port = XPRT_MAX_RESVPORT;
-	} while (err == -EADDRINUSE && port != xprt->port);
-
-	printk("RPC: Can't bind to reserved port (%d).\n", -err);
-	return err;
-}
-
-static void
-xprt_bind_socket(struct rpc_xprt *xprt, struct socket *sock)
-{
-	struct sock	*sk = sock->sk;
-
-	if (xprt->inet)
-		return;
-
-	write_lock_bh(&sk->sk_callback_lock);
-	sk->sk_user_data = xprt;
-	xprt->old_data_ready = sk->sk_data_ready;
-	xprt->old_state_change = sk->sk_state_change;
-	xprt->old_write_space = sk->sk_write_space;
-	if (xprt->prot == IPPROTO_UDP) {
-		sk->sk_data_ready = udp_data_ready;
-		sk->sk_no_check = UDP_CSUM_NORCV;
-		xprt_set_connected(xprt);
-	} else {
-		tcp_sk(sk)->nonagle = 1;	/* disable Nagle's algorithm */
-		sk->sk_data_ready = tcp_data_ready;
-		sk->sk_state_change = tcp_state_change;
-		xprt_clear_connected(xprt);
-	}
-	sk->sk_write_space = xprt_write_space;
-
-	/* Reset to new socket */
-	xprt->sock = sock;
-	xprt->inet = sk;
-	write_unlock_bh(&sk->sk_callback_lock);
-
-	return;
-}
-
-/*
- * Set socket buffer length
- */
-void
-xprt_sock_setbufsize(struct rpc_xprt *xprt)
-{
-	struct sock *sk = xprt->inet;
-
-	if (xprt->stream)
-		return;
-	if (xprt->rcvsize) {
-		sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
-		sk->sk_rcvbuf = xprt->rcvsize * xprt->max_reqs *  2;
-	}
-	if (xprt->sndsize) {
-		sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
-		sk->sk_sndbuf = xprt->sndsize * xprt->max_reqs * 2;
-		sk->sk_write_space(sk);
-	}
-}
-
-/*
- * Datastream sockets are created here, but xprt_connect will create
- * and connect stream sockets.
- */
-static struct socket * xprt_create_socket(struct rpc_xprt *xprt, int proto, int resvport)
-{
-	struct socket	*sock;
-	int		type, err;
-
-	dprintk("RPC:      xprt_create_socket(%s %d)\n",
-			   (proto == IPPROTO_UDP)? "udp" : "tcp", proto);
-
-	type = (proto == IPPROTO_UDP)? SOCK_DGRAM : SOCK_STREAM;
-
-	if ((err = sock_create_kern(PF_INET, type, proto, &sock)) < 0) {
-		printk("RPC: can't create socket (%d).\n", -err);
-		return NULL;
-	}
-
-	/* If the caller has the capability, bind to a reserved port */
-	if (resvport && xprt_bindresvport(xprt, sock) < 0) {
-		printk("RPC: can't bind to reserved port.\n");
-		goto failed;
-	}
-
-	return sock;
-
-failed:
-	sock_release(sock);
-	return NULL;
-}
-
-/*
- * Create an RPC client transport given the protocol and peer address.
- */
-struct rpc_xprt *
-xprt_create_proto(int proto, struct sockaddr_in *sap, struct rpc_timeout *to)
+struct rpc_xprt *xprt_create_proto(int proto, struct sockaddr_in *sap, struct rpc_timeout *to)
 {
 	struct rpc_xprt	*xprt;
 
@@ -1673,46 +974,26 @@
 	return xprt;
 }
 
-/*
- * Prepare for transport shutdown.
- */
-static void
-xprt_shutdown(struct rpc_xprt *xprt)
+static void xprt_shutdown(struct rpc_xprt *xprt)
 {
 	xprt->shutdown = 1;
 	rpc_wake_up(&xprt->sending);
 	rpc_wake_up(&xprt->resend);
-	rpc_wake_up(&xprt->pending);
+	xprt_wake_pending_tasks(xprt, -EIO);
 	rpc_wake_up(&xprt->backlog);
-	wake_up(&xprt->cong_wait);
 	del_timer_sync(&xprt->timer);
-
-	/* synchronously wait for connect worker to finish */
-	cancel_delayed_work(&xprt->sock_connect);
-	flush_scheduled_work();
 }
 
-/*
- * Clear the xprt backlog queue
+/**
+ * xprt_destroy - destroy an RPC transport, killing off all requests.
+ * @xprt: transport to destroy
+ *
  */
-static int
-xprt_clear_backlog(struct rpc_xprt *xprt) {
-	rpc_wake_up_next(&xprt->backlog);
-	wake_up(&xprt->cong_wait);
-	return 1;
-}
-
-/*
- * Destroy an RPC transport, killing off all requests.
- */
-int
-xprt_destroy(struct rpc_xprt *xprt)
+int xprt_destroy(struct rpc_xprt *xprt)
 {
 	dprintk("RPC:      destroying transport %p\n", xprt);
 	xprt_shutdown(xprt);
-	xprt_disconnect(xprt);
-	xprt_close(xprt);
-	kfree(xprt->slot);
+	xprt->ops->destroy(xprt);
 	kfree(xprt);
 
 	return 0;
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
new file mode 100644
index 0000000..2e15292
--- /dev/null
+++ b/net/sunrpc/xprtsock.c
@@ -0,0 +1,1252 @@
+/*
+ * linux/net/sunrpc/xprtsock.c
+ *
+ * Client-side transport implementation for sockets.
+ *
+ * TCP callback races fixes (C) 1998 Red Hat Software <alan@redhat.com>
+ * TCP send fixes (C) 1998 Red Hat Software <alan@redhat.com>
+ * TCP NFS related read + write fixes
+ *  (C) 1999 Dave Airlie, University of Limerick, Ireland <airlied@linux.ie>
+ *
+ * Rewrite of larges part of the code in order to stabilize TCP stuff.
+ * Fix behaviour when socket buffer is full.
+ *  (C) 1999 Trond Myklebust <trond.myklebust@fys.uio.no>
+ *
+ * IP socket transport implementation, (C) 2005 Chuck Lever <cel@netapp.com>
+ */
+
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/capability.h>
+#include <linux/sched.h>
+#include <linux/pagemap.h>
+#include <linux/errno.h>
+#include <linux/socket.h>
+#include <linux/in.h>
+#include <linux/net.h>
+#include <linux/mm.h>
+#include <linux/udp.h>
+#include <linux/tcp.h>
+#include <linux/sunrpc/clnt.h>
+#include <linux/file.h>
+
+#include <net/sock.h>
+#include <net/checksum.h>
+#include <net/udp.h>
+#include <net/tcp.h>
+
+/*
+ * How many times to try sending a request on a socket before waiting
+ * for the socket buffer to clear.
+ */
+#define XS_SENDMSG_RETRY	(10U)
+
+/*
+ * Time out for an RPC UDP socket connect.  UDP socket connects are
+ * synchronous, but we set a timeout anyway in case of resource
+ * exhaustion on the local host.
+ */
+#define XS_UDP_CONN_TO		(5U * HZ)
+
+/*
+ * Wait duration for an RPC TCP connection to be established.  Solaris
+ * NFS over TCP uses 60 seconds, for example, which is in line with how
+ * long a server takes to reboot.
+ */
+#define XS_TCP_CONN_TO		(60U * HZ)
+
+/*
+ * Wait duration for a reply from the RPC portmapper.
+ */
+#define XS_BIND_TO		(60U * HZ)
+
+/*
+ * Delay if a UDP socket connect error occurs.  This is most likely some
+ * kind of resource problem on the local host.
+ */
+#define XS_UDP_REEST_TO		(2U * HZ)
+
+/*
+ * The reestablish timeout allows clients to delay for a bit before attempting
+ * to reconnect to a server that just dropped our connection.
+ *
+ * We implement an exponential backoff when trying to reestablish a TCP
+ * transport connection with the server.  Some servers like to drop a TCP
+ * connection when they are overworked, so we start with a short timeout and
+ * increase over time if the server is down or not responding.
+ */
+#define XS_TCP_INIT_REEST_TO	(3U * HZ)
+#define XS_TCP_MAX_REEST_TO	(5U * 60 * HZ)
+
+/*
+ * TCP idle timeout; client drops the transport socket if it is idle
+ * for this long.  Note that we also timeout UDP sockets to prevent
+ * holding port numbers when there is no RPC traffic.
+ */
+#define XS_IDLE_DISC_TO		(5U * 60 * HZ)
+
+#ifdef RPC_DEBUG
+# undef  RPC_DEBUG_DATA
+# define RPCDBG_FACILITY	RPCDBG_TRANS
+#endif
+
+#ifdef RPC_DEBUG_DATA
+static void xs_pktdump(char *msg, u32 *packet, unsigned int count)
+{
+	u8 *buf = (u8 *) packet;
+	int j;
+
+	dprintk("RPC:      %s\n", msg);
+	for (j = 0; j < count && j < 128; j += 4) {
+		if (!(j & 31)) {
+			if (j)
+				dprintk("\n");
+			dprintk("0x%04x ", j);
+		}
+		dprintk("%02x%02x%02x%02x ",
+			buf[j], buf[j+1], buf[j+2], buf[j+3]);
+	}
+	dprintk("\n");
+}
+#else
+static inline void xs_pktdump(char *msg, u32 *packet, unsigned int count)
+{
+	/* NOP */
+}
+#endif
+
+#define XS_SENDMSG_FLAGS	(MSG_DONTWAIT | MSG_NOSIGNAL)
+
+static inline int xs_send_head(struct socket *sock, struct sockaddr *addr, int addrlen, struct xdr_buf *xdr, unsigned int base, unsigned int len)
+{
+	struct kvec iov = {
+		.iov_base	= xdr->head[0].iov_base + base,
+		.iov_len	= len - base,
+	};
+	struct msghdr msg = {
+		.msg_name	= addr,
+		.msg_namelen	= addrlen,
+		.msg_flags	= XS_SENDMSG_FLAGS,
+	};
+
+	if (xdr->len > len)
+		msg.msg_flags |= MSG_MORE;
+
+	if (likely(iov.iov_len))
+		return kernel_sendmsg(sock, &msg, &iov, 1, iov.iov_len);
+	return kernel_sendmsg(sock, &msg, NULL, 0, 0);
+}
+
+static int xs_send_tail(struct socket *sock, struct xdr_buf *xdr, unsigned int base, unsigned int len)
+{
+	struct kvec iov = {
+		.iov_base	= xdr->tail[0].iov_base + base,
+		.iov_len	= len - base,
+	};
+	struct msghdr msg = {
+		.msg_flags	= XS_SENDMSG_FLAGS,
+	};
+
+	return kernel_sendmsg(sock, &msg, &iov, 1, iov.iov_len);
+}
+
+/**
+ * xs_sendpages - write pages directly to a socket
+ * @sock: socket to send on
+ * @addr: UDP only -- address of destination
+ * @addrlen: UDP only -- length of destination address
+ * @xdr: buffer containing this request
+ * @base: starting position in the buffer
+ *
+ */
+static inline int xs_sendpages(struct socket *sock, struct sockaddr *addr, int addrlen, struct xdr_buf *xdr, unsigned int base)
+{
+	struct page **ppage = xdr->pages;
+	unsigned int len, pglen = xdr->page_len;
+	int err, ret = 0;
+	ssize_t (*sendpage)(struct socket *, struct page *, int, size_t, int);
+
+	if (unlikely(!sock))
+		return -ENOTCONN;
+
+	clear_bit(SOCK_ASYNC_NOSPACE, &sock->flags);
+
+	len = xdr->head[0].iov_len;
+	if (base < len || (addr != NULL && base == 0)) {
+		err = xs_send_head(sock, addr, addrlen, xdr, base, len);
+		if (ret == 0)
+			ret = err;
+		else if (err > 0)
+			ret += err;
+		if (err != (len - base))
+			goto out;
+		base = 0;
+	} else
+		base -= len;
+
+	if (unlikely(pglen == 0))
+		goto copy_tail;
+	if (unlikely(base >= pglen)) {
+		base -= pglen;
+		goto copy_tail;
+	}
+	if (base || xdr->page_base) {
+		pglen -= base;
+		base += xdr->page_base;
+		ppage += base >> PAGE_CACHE_SHIFT;
+		base &= ~PAGE_CACHE_MASK;
+	}
+
+	sendpage = sock->ops->sendpage ? : sock_no_sendpage;
+	do {
+		int flags = XS_SENDMSG_FLAGS;
+
+		len = PAGE_CACHE_SIZE;
+		if (base)
+			len -= base;
+		if (pglen < len)
+			len = pglen;
+
+		if (pglen != len || xdr->tail[0].iov_len != 0)
+			flags |= MSG_MORE;
+
+		/* Hmm... We might be dealing with highmem pages */
+		if (PageHighMem(*ppage))
+			sendpage = sock_no_sendpage;
+		err = sendpage(sock, *ppage, base, len, flags);
+		if (ret == 0)
+			ret = err;
+		else if (err > 0)
+			ret += err;
+		if (err != len)
+			goto out;
+		base = 0;
+		ppage++;
+	} while ((pglen -= len) != 0);
+copy_tail:
+	len = xdr->tail[0].iov_len;
+	if (base < len) {
+		err = xs_send_tail(sock, xdr, base, len);
+		if (ret == 0)
+			ret = err;
+		else if (err > 0)
+			ret += err;
+	}
+out:
+	return ret;
+}
+
+/**
+ * xs_nospace - place task on wait queue if transmit was incomplete
+ * @task: task to put to sleep
+ *
+ */
+static void xs_nospace(struct rpc_task *task)
+{
+	struct rpc_rqst *req = task->tk_rqstp;
+	struct rpc_xprt *xprt = req->rq_xprt;
+
+	dprintk("RPC: %4d xmit incomplete (%u left of %u)\n",
+			task->tk_pid, req->rq_slen - req->rq_bytes_sent,
+			req->rq_slen);
+
+	if (test_bit(SOCK_ASYNC_NOSPACE, &xprt->sock->flags)) {
+		/* Protect against races with write_space */
+		spin_lock_bh(&xprt->transport_lock);
+
+		/* Don't race with disconnect */
+		if (!xprt_connected(xprt))
+			task->tk_status = -ENOTCONN;
+		else if (test_bit(SOCK_NOSPACE, &xprt->sock->flags))
+			xprt_wait_for_buffer_space(task);
+
+		spin_unlock_bh(&xprt->transport_lock);
+	} else
+		/* Keep holding the socket if it is blocked */
+		rpc_delay(task, HZ>>4);
+}
+
+/**
+ * xs_udp_send_request - write an RPC request to a UDP socket
+ * @task: address of RPC task that manages the state of an RPC request
+ *
+ * Return values:
+ *        0:	The request has been sent
+ *   EAGAIN:	The socket was blocked, please call again later to
+ *		complete the request
+ * ENOTCONN:	Caller needs to invoke connect logic then call again
+ *    other:	Some other error occured, the request was not sent
+ */
+static int xs_udp_send_request(struct rpc_task *task)
+{
+	struct rpc_rqst *req = task->tk_rqstp;
+	struct rpc_xprt *xprt = req->rq_xprt;
+	struct xdr_buf *xdr = &req->rq_snd_buf;
+	int status;
+
+	xs_pktdump("packet data:",
+				req->rq_svec->iov_base,
+				req->rq_svec->iov_len);
+
+	req->rq_xtime = jiffies;
+	status = xs_sendpages(xprt->sock, (struct sockaddr *) &xprt->addr,
+				sizeof(xprt->addr), xdr, req->rq_bytes_sent);
+
+	dprintk("RPC:      xs_udp_send_request(%u) = %d\n",
+			xdr->len - req->rq_bytes_sent, status);
+
+	if (likely(status >= (int) req->rq_slen))
+		return 0;
+
+	/* Still some bytes left; set up for a retry later. */
+	if (status > 0)
+		status = -EAGAIN;
+
+	switch (status) {
+	case -ENETUNREACH:
+	case -EPIPE:
+	case -ECONNREFUSED:
+		/* When the server has died, an ICMP port unreachable message
+		 * prompts ECONNREFUSED. */
+		break;
+	case -EAGAIN:
+		xs_nospace(task);
+		break;
+	default:
+		dprintk("RPC:      sendmsg returned unrecognized error %d\n",
+			-status);
+		break;
+	}
+
+	return status;
+}
+
+static inline void xs_encode_tcp_record_marker(struct xdr_buf *buf)
+{
+	u32 reclen = buf->len - sizeof(rpc_fraghdr);
+	rpc_fraghdr *base = buf->head[0].iov_base;
+	*base = htonl(RPC_LAST_STREAM_FRAGMENT | reclen);
+}
+
+/**
+ * xs_tcp_send_request - write an RPC request to a TCP socket
+ * @task: address of RPC task that manages the state of an RPC request
+ *
+ * Return values:
+ *        0:	The request has been sent
+ *   EAGAIN:	The socket was blocked, please call again later to
+ *		complete the request
+ * ENOTCONN:	Caller needs to invoke connect logic then call again
+ *    other:	Some other error occured, the request was not sent
+ *
+ * XXX: In the case of soft timeouts, should we eventually give up
+ *	if sendmsg is not able to make progress?
+ */
+static int xs_tcp_send_request(struct rpc_task *task)
+{
+	struct rpc_rqst *req = task->tk_rqstp;
+	struct rpc_xprt *xprt = req->rq_xprt;
+	struct xdr_buf *xdr = &req->rq_snd_buf;
+	int status, retry = 0;
+
+	xs_encode_tcp_record_marker(&req->rq_snd_buf);
+
+	xs_pktdump("packet data:",
+				req->rq_svec->iov_base,
+				req->rq_svec->iov_len);
+
+	/* Continue transmitting the packet/record. We must be careful
+	 * to cope with writespace callbacks arriving _after_ we have
+	 * called sendmsg(). */
+	while (1) {
+		req->rq_xtime = jiffies;
+		status = xs_sendpages(xprt->sock, NULL, 0, xdr,
+						req->rq_bytes_sent);
+
+		dprintk("RPC:      xs_tcp_send_request(%u) = %d\n",
+				xdr->len - req->rq_bytes_sent, status);
+
+		if (unlikely(status < 0))
+			break;
+
+		/* If we've sent the entire packet, immediately
+		 * reset the count of bytes sent. */
+		req->rq_bytes_sent += status;
+		if (likely(req->rq_bytes_sent >= req->rq_slen)) {
+			req->rq_bytes_sent = 0;
+			return 0;
+		}
+
+		status = -EAGAIN;
+		if (retry++ > XS_SENDMSG_RETRY)
+			break;
+	}
+
+	switch (status) {
+	case -EAGAIN:
+		xs_nospace(task);
+		break;
+	case -ECONNREFUSED:
+	case -ECONNRESET:
+	case -ENOTCONN:
+	case -EPIPE:
+		status = -ENOTCONN;
+		break;
+	default:
+		dprintk("RPC:      sendmsg returned unrecognized error %d\n",
+			-status);
+		xprt_disconnect(xprt);
+		break;
+	}
+
+	return status;
+}
+
+/**
+ * xs_close - close a socket
+ * @xprt: transport
+ *
+ * This is used when all requests are complete; ie, no DRC state remains
+ * on the server we want to save.
+ */
+static void xs_close(struct rpc_xprt *xprt)
+{
+	struct socket *sock = xprt->sock;
+	struct sock *sk = xprt->inet;
+
+	if (!sk)
+		return;
+
+	dprintk("RPC:      xs_close xprt %p\n", xprt);
+
+	write_lock_bh(&sk->sk_callback_lock);
+	xprt->inet = NULL;
+	xprt->sock = NULL;
+
+	sk->sk_user_data = NULL;
+	sk->sk_data_ready = xprt->old_data_ready;
+	sk->sk_state_change = xprt->old_state_change;
+	sk->sk_write_space = xprt->old_write_space;
+	write_unlock_bh(&sk->sk_callback_lock);
+
+	sk->sk_no_check = 0;
+
+	sock_release(sock);
+}
+
+/**
+ * xs_destroy - prepare to shutdown a transport
+ * @xprt: doomed transport
+ *
+ */
+static void xs_destroy(struct rpc_xprt *xprt)
+{
+	dprintk("RPC:      xs_destroy xprt %p\n", xprt);
+
+	cancel_delayed_work(&xprt->connect_worker);
+	flush_scheduled_work();
+
+	xprt_disconnect(xprt);
+	xs_close(xprt);
+	kfree(xprt->slot);
+}
+
+static inline struct rpc_xprt *xprt_from_sock(struct sock *sk)
+{
+	return (struct rpc_xprt *) sk->sk_user_data;
+}
+
+/**
+ * xs_udp_data_ready - "data ready" callback for UDP sockets
+ * @sk: socket with data to read
+ * @len: how much data to read
+ *
+ */
+static void xs_udp_data_ready(struct sock *sk, int len)
+{
+	struct rpc_task *task;
+	struct rpc_xprt *xprt;
+	struct rpc_rqst *rovr;
+	struct sk_buff *skb;
+	int err, repsize, copied;
+	u32 _xid, *xp;
+
+	read_lock(&sk->sk_callback_lock);
+	dprintk("RPC:      xs_udp_data_ready...\n");
+	if (!(xprt = xprt_from_sock(sk)))
+		goto out;
+
+	if ((skb = skb_recv_datagram(sk, 0, 1, &err)) == NULL)
+		goto out;
+
+	if (xprt->shutdown)
+		goto dropit;
+
+	repsize = skb->len - sizeof(struct udphdr);
+	if (repsize < 4) {
+		dprintk("RPC:      impossible RPC reply size %d!\n", repsize);
+		goto dropit;
+	}
+
+	/* Copy the XID from the skb... */
+	xp = skb_header_pointer(skb, sizeof(struct udphdr),
+				sizeof(_xid), &_xid);
+	if (xp == NULL)
+		goto dropit;
+
+	/* Look up and lock the request corresponding to the given XID */
+	spin_lock(&xprt->transport_lock);
+	rovr = xprt_lookup_rqst(xprt, *xp);
+	if (!rovr)
+		goto out_unlock;
+	task = rovr->rq_task;
+
+	if ((copied = rovr->rq_private_buf.buflen) > repsize)
+		copied = repsize;
+
+	/* Suck it into the iovec, verify checksum if not done by hw. */
+	if (csum_partial_copy_to_xdr(&rovr->rq_private_buf, skb))
+		goto out_unlock;
+
+	/* Something worked... */
+	dst_confirm(skb->dst);
+
+	xprt_adjust_cwnd(task, copied);
+	xprt_update_rtt(task);
+	xprt_complete_rqst(task, copied);
+
+ out_unlock:
+	spin_unlock(&xprt->transport_lock);
+ dropit:
+	skb_free_datagram(sk, skb);
+ out:
+	read_unlock(&sk->sk_callback_lock);
+}
+
+static inline size_t xs_tcp_copy_data(skb_reader_t *desc, void *p, size_t len)
+{
+	if (len > desc->count)
+		len = desc->count;
+	if (skb_copy_bits(desc->skb, desc->offset, p, len)) {
+		dprintk("RPC:      failed to copy %zu bytes from skb. %zu bytes remain\n",
+				len, desc->count);
+		return 0;
+	}
+	desc->offset += len;
+	desc->count -= len;
+	dprintk("RPC:      copied %zu bytes from skb. %zu bytes remain\n",
+			len, desc->count);
+	return len;
+}
+
+static inline void xs_tcp_read_fraghdr(struct rpc_xprt *xprt, skb_reader_t *desc)
+{
+	size_t len, used;
+	char *p;
+
+	p = ((char *) &xprt->tcp_recm) + xprt->tcp_offset;
+	len = sizeof(xprt->tcp_recm) - xprt->tcp_offset;
+	used = xs_tcp_copy_data(desc, p, len);
+	xprt->tcp_offset += used;
+	if (used != len)
+		return;
+
+	xprt->tcp_reclen = ntohl(xprt->tcp_recm);
+	if (xprt->tcp_reclen & RPC_LAST_STREAM_FRAGMENT)
+		xprt->tcp_flags |= XPRT_LAST_FRAG;
+	else
+		xprt->tcp_flags &= ~XPRT_LAST_FRAG;
+	xprt->tcp_reclen &= RPC_FRAGMENT_SIZE_MASK;
+
+	xprt->tcp_flags &= ~XPRT_COPY_RECM;
+	xprt->tcp_offset = 0;
+
+	/* Sanity check of the record length */
+	if (unlikely(xprt->tcp_reclen < 4)) {
+		dprintk("RPC:      invalid TCP record fragment length\n");
+		xprt_disconnect(xprt);
+		return;
+	}
+	dprintk("RPC:      reading TCP record fragment of length %d\n",
+			xprt->tcp_reclen);
+}
+
+static void xs_tcp_check_recm(struct rpc_xprt *xprt)
+{
+	dprintk("RPC:      xprt = %p, tcp_copied = %lu, tcp_offset = %u, tcp_reclen = %u, tcp_flags = %lx\n",
+			xprt, xprt->tcp_copied, xprt->tcp_offset, xprt->tcp_reclen, xprt->tcp_flags);
+	if (xprt->tcp_offset == xprt->tcp_reclen) {
+		xprt->tcp_flags |= XPRT_COPY_RECM;
+		xprt->tcp_offset = 0;
+		if (xprt->tcp_flags & XPRT_LAST_FRAG) {
+			xprt->tcp_flags &= ~XPRT_COPY_DATA;
+			xprt->tcp_flags |= XPRT_COPY_XID;
+			xprt->tcp_copied = 0;
+		}
+	}
+}
+
+static inline void xs_tcp_read_xid(struct rpc_xprt *xprt, skb_reader_t *desc)
+{
+	size_t len, used;
+	char *p;
+
+	len = sizeof(xprt->tcp_xid) - xprt->tcp_offset;
+	dprintk("RPC:      reading XID (%Zu bytes)\n", len);
+	p = ((char *) &xprt->tcp_xid) + xprt->tcp_offset;
+	used = xs_tcp_copy_data(desc, p, len);
+	xprt->tcp_offset += used;
+	if (used != len)
+		return;
+	xprt->tcp_flags &= ~XPRT_COPY_XID;
+	xprt->tcp_flags |= XPRT_COPY_DATA;
+	xprt->tcp_copied = 4;
+	dprintk("RPC:      reading reply for XID %08x\n",
+						ntohl(xprt->tcp_xid));
+	xs_tcp_check_recm(xprt);
+}
+
+static inline void xs_tcp_read_request(struct rpc_xprt *xprt, skb_reader_t *desc)
+{
+	struct rpc_rqst *req;
+	struct xdr_buf *rcvbuf;
+	size_t len;
+	ssize_t r;
+
+	/* Find and lock the request corresponding to this xid */
+	spin_lock(&xprt->transport_lock);
+	req = xprt_lookup_rqst(xprt, xprt->tcp_xid);
+	if (!req) {
+		xprt->tcp_flags &= ~XPRT_COPY_DATA;
+		dprintk("RPC:      XID %08x request not found!\n",
+				ntohl(xprt->tcp_xid));
+		spin_unlock(&xprt->transport_lock);
+		return;
+	}
+
+	rcvbuf = &req->rq_private_buf;
+	len = desc->count;
+	if (len > xprt->tcp_reclen - xprt->tcp_offset) {
+		skb_reader_t my_desc;
+
+		len = xprt->tcp_reclen - xprt->tcp_offset;
+		memcpy(&my_desc, desc, sizeof(my_desc));
+		my_desc.count = len;
+		r = xdr_partial_copy_from_skb(rcvbuf, xprt->tcp_copied,
+					  &my_desc, xs_tcp_copy_data);
+		desc->count -= r;
+		desc->offset += r;
+	} else
+		r = xdr_partial_copy_from_skb(rcvbuf, xprt->tcp_copied,
+					  desc, xs_tcp_copy_data);
+
+	if (r > 0) {
+		xprt->tcp_copied += r;
+		xprt->tcp_offset += r;
+	}
+	if (r != len) {
+		/* Error when copying to the receive buffer,
+		 * usually because we weren't able to allocate
+		 * additional buffer pages. All we can do now
+		 * is turn off XPRT_COPY_DATA, so the request
+		 * will not receive any additional updates,
+		 * and time out.
+		 * Any remaining data from this record will
+		 * be discarded.
+		 */
+		xprt->tcp_flags &= ~XPRT_COPY_DATA;
+		dprintk("RPC:      XID %08x truncated request\n",
+				ntohl(xprt->tcp_xid));
+		dprintk("RPC:      xprt = %p, tcp_copied = %lu, tcp_offset = %u, tcp_reclen = %u\n",
+				xprt, xprt->tcp_copied, xprt->tcp_offset, xprt->tcp_reclen);
+		goto out;
+	}
+
+	dprintk("RPC:      XID %08x read %Zd bytes\n",
+			ntohl(xprt->tcp_xid), r);
+	dprintk("RPC:      xprt = %p, tcp_copied = %lu, tcp_offset = %u, tcp_reclen = %u\n",
+			xprt, xprt->tcp_copied, xprt->tcp_offset, xprt->tcp_reclen);
+
+	if (xprt->tcp_copied == req->rq_private_buf.buflen)
+		xprt->tcp_flags &= ~XPRT_COPY_DATA;
+	else if (xprt->tcp_offset == xprt->tcp_reclen) {
+		if (xprt->tcp_flags & XPRT_LAST_FRAG)
+			xprt->tcp_flags &= ~XPRT_COPY_DATA;
+	}
+
+out:
+	if (!(xprt->tcp_flags & XPRT_COPY_DATA))
+		xprt_complete_rqst(req->rq_task, xprt->tcp_copied);
+	spin_unlock(&xprt->transport_lock);
+	xs_tcp_check_recm(xprt);
+}
+
+static inline void xs_tcp_read_discard(struct rpc_xprt *xprt, skb_reader_t *desc)
+{
+	size_t len;
+
+	len = xprt->tcp_reclen - xprt->tcp_offset;
+	if (len > desc->count)
+		len = desc->count;
+	desc->count -= len;
+	desc->offset += len;
+	xprt->tcp_offset += len;
+	dprintk("RPC:      discarded %Zu bytes\n", len);
+	xs_tcp_check_recm(xprt);
+}
+
+static int xs_tcp_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb, unsigned int offset, size_t len)
+{
+	struct rpc_xprt *xprt = rd_desc->arg.data;
+	skb_reader_t desc = {
+		.skb	= skb,
+		.offset	= offset,
+		.count	= len,
+		.csum	= 0
+	};
+
+	dprintk("RPC:      xs_tcp_data_recv started\n");
+	do {
+		/* Read in a new fragment marker if necessary */
+		/* Can we ever really expect to get completely empty fragments? */
+		if (xprt->tcp_flags & XPRT_COPY_RECM) {
+			xs_tcp_read_fraghdr(xprt, &desc);
+			continue;
+		}
+		/* Read in the xid if necessary */
+		if (xprt->tcp_flags & XPRT_COPY_XID) {
+			xs_tcp_read_xid(xprt, &desc);
+			continue;
+		}
+		/* Read in the request data */
+		if (xprt->tcp_flags & XPRT_COPY_DATA) {
+			xs_tcp_read_request(xprt, &desc);
+			continue;
+		}
+		/* Skip over any trailing bytes on short reads */
+		xs_tcp_read_discard(xprt, &desc);
+	} while (desc.count);
+	dprintk("RPC:      xs_tcp_data_recv done\n");
+	return len - desc.count;
+}
+
+/**
+ * xs_tcp_data_ready - "data ready" callback for TCP sockets
+ * @sk: socket with data to read
+ * @bytes: how much data to read
+ *
+ */
+static void xs_tcp_data_ready(struct sock *sk, int bytes)
+{
+	struct rpc_xprt *xprt;
+	read_descriptor_t rd_desc;
+
+	read_lock(&sk->sk_callback_lock);
+	dprintk("RPC:      xs_tcp_data_ready...\n");
+	if (!(xprt = xprt_from_sock(sk)))
+		goto out;
+	if (xprt->shutdown)
+		goto out;
+
+	/* We use rd_desc to pass struct xprt to xs_tcp_data_recv */
+	rd_desc.arg.data = xprt;
+	rd_desc.count = 65536;
+	tcp_read_sock(sk, &rd_desc, xs_tcp_data_recv);
+out:
+	read_unlock(&sk->sk_callback_lock);
+}
+
+/**
+ * xs_tcp_state_change - callback to handle TCP socket state changes
+ * @sk: socket whose state has changed
+ *
+ */
+static void xs_tcp_state_change(struct sock *sk)
+{
+	struct rpc_xprt *xprt;
+
+	read_lock(&sk->sk_callback_lock);
+	if (!(xprt = xprt_from_sock(sk)))
+		goto out;
+	dprintk("RPC:      xs_tcp_state_change client %p...\n", xprt);
+	dprintk("RPC:      state %x conn %d dead %d zapped %d\n",
+				sk->sk_state, xprt_connected(xprt),
+				sock_flag(sk, SOCK_DEAD),
+				sock_flag(sk, SOCK_ZAPPED));
+
+	switch (sk->sk_state) {
+	case TCP_ESTABLISHED:
+		spin_lock_bh(&xprt->transport_lock);
+		if (!xprt_test_and_set_connected(xprt)) {
+			/* Reset TCP record info */
+			xprt->tcp_offset = 0;
+			xprt->tcp_reclen = 0;
+			xprt->tcp_copied = 0;
+			xprt->tcp_flags = XPRT_COPY_RECM | XPRT_COPY_XID;
+			xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO;
+			xprt_wake_pending_tasks(xprt, 0);
+		}
+		spin_unlock_bh(&xprt->transport_lock);
+		break;
+	case TCP_SYN_SENT:
+	case TCP_SYN_RECV:
+		break;
+	default:
+		xprt_disconnect(xprt);
+		break;
+	}
+ out:
+	read_unlock(&sk->sk_callback_lock);
+}
+
+/**
+ * xs_udp_write_space - callback invoked when socket buffer space
+ *                             becomes available
+ * @sk: socket whose state has changed
+ *
+ * Called when more output buffer space is available for this socket.
+ * We try not to wake our writers until they can make "significant"
+ * progress, otherwise we'll waste resources thrashing kernel_sendmsg
+ * with a bunch of small requests.
+ */
+static void xs_udp_write_space(struct sock *sk)
+{
+	read_lock(&sk->sk_callback_lock);
+
+	/* from net/core/sock.c:sock_def_write_space */
+	if (sock_writeable(sk)) {
+		struct socket *sock;
+		struct rpc_xprt *xprt;
+
+		if (unlikely(!(sock = sk->sk_socket)))
+			goto out;
+		if (unlikely(!(xprt = xprt_from_sock(sk))))
+			goto out;
+		if (unlikely(!test_and_clear_bit(SOCK_NOSPACE, &sock->flags)))
+			goto out;
+
+		xprt_write_space(xprt);
+	}
+
+ out:
+	read_unlock(&sk->sk_callback_lock);
+}
+
+/**
+ * xs_tcp_write_space - callback invoked when socket buffer space
+ *                             becomes available
+ * @sk: socket whose state has changed
+ *
+ * Called when more output buffer space is available for this socket.
+ * We try not to wake our writers until they can make "significant"
+ * progress, otherwise we'll waste resources thrashing kernel_sendmsg
+ * with a bunch of small requests.
+ */
+static void xs_tcp_write_space(struct sock *sk)
+{
+	read_lock(&sk->sk_callback_lock);
+
+	/* from net/core/stream.c:sk_stream_write_space */
+	if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk)) {
+		struct socket *sock;
+		struct rpc_xprt *xprt;
+
+		if (unlikely(!(sock = sk->sk_socket)))
+			goto out;
+		if (unlikely(!(xprt = xprt_from_sock(sk))))
+			goto out;
+		if (unlikely(!test_and_clear_bit(SOCK_NOSPACE, &sock->flags)))
+			goto out;
+
+		xprt_write_space(xprt);
+	}
+
+ out:
+	read_unlock(&sk->sk_callback_lock);
+}
+
+static void xs_udp_do_set_buffer_size(struct rpc_xprt *xprt)
+{
+	struct sock *sk = xprt->inet;
+
+	if (xprt->rcvsize) {
+		sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
+		sk->sk_rcvbuf = xprt->rcvsize * xprt->max_reqs *  2;
+	}
+	if (xprt->sndsize) {
+		sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
+		sk->sk_sndbuf = xprt->sndsize * xprt->max_reqs * 2;
+		sk->sk_write_space(sk);
+	}
+}
+
+/**
+ * xs_udp_set_buffer_size - set send and receive limits
+ * @xprt: generic transport
+ * @sndsize: requested size of send buffer, in bytes
+ * @rcvsize: requested size of receive buffer, in bytes
+ *
+ * Set socket send and receive buffer size limits.
+ */
+static void xs_udp_set_buffer_size(struct rpc_xprt *xprt, size_t sndsize, size_t rcvsize)
+{
+	xprt->sndsize = 0;
+	if (sndsize)
+		xprt->sndsize = sndsize + 1024;
+	xprt->rcvsize = 0;
+	if (rcvsize)
+		xprt->rcvsize = rcvsize + 1024;
+
+	xs_udp_do_set_buffer_size(xprt);
+}
+
+/**
+ * xs_udp_timer - called when a retransmit timeout occurs on a UDP transport
+ * @task: task that timed out
+ *
+ * Adjust the congestion window after a retransmit timeout has occurred.
+ */
+static void xs_udp_timer(struct rpc_task *task)
+{
+	xprt_adjust_cwnd(task, -ETIMEDOUT);
+}
+
+static int xs_bindresvport(struct rpc_xprt *xprt, struct socket *sock)
+{
+	struct sockaddr_in myaddr = {
+		.sin_family = AF_INET,
+	};
+	int err;
+	unsigned short port = xprt->port;
+
+	do {
+		myaddr.sin_port = htons(port);
+		err = sock->ops->bind(sock, (struct sockaddr *) &myaddr,
+						sizeof(myaddr));
+		if (err == 0) {
+			xprt->port = port;
+			dprintk("RPC:      xs_bindresvport bound to port %u\n",
+					port);
+			return 0;
+		}
+		if (port <= xprt_min_resvport)
+			port = xprt_max_resvport;
+		else
+			port--;
+	} while (err == -EADDRINUSE && port != xprt->port);
+
+	dprintk("RPC:      can't bind to reserved port (%d).\n", -err);
+	return err;
+}
+
+/**
+ * xs_udp_connect_worker - set up a UDP socket
+ * @args: RPC transport to connect
+ *
+ * Invoked by a work queue tasklet.
+ */
+static void xs_udp_connect_worker(void *args)
+{
+	struct rpc_xprt *xprt = (struct rpc_xprt *) args;
+	struct socket *sock = xprt->sock;
+	int err, status = -EIO;
+
+	if (xprt->shutdown || xprt->addr.sin_port == 0)
+		goto out;
+
+	dprintk("RPC:      xs_udp_connect_worker for xprt %p\n", xprt);
+
+	/* Start by resetting any existing state */
+	xs_close(xprt);
+
+	if ((err = sock_create_kern(PF_INET, SOCK_DGRAM, IPPROTO_UDP, &sock)) < 0) {
+		dprintk("RPC:      can't create UDP transport socket (%d).\n", -err);
+		goto out;
+	}
+
+	if (xprt->resvport && xs_bindresvport(xprt, sock) < 0) {
+		sock_release(sock);
+		goto out;
+	}
+
+	if (!xprt->inet) {
+		struct sock *sk = sock->sk;
+
+		write_lock_bh(&sk->sk_callback_lock);
+
+		sk->sk_user_data = xprt;
+		xprt->old_data_ready = sk->sk_data_ready;
+		xprt->old_state_change = sk->sk_state_change;
+		xprt->old_write_space = sk->sk_write_space;
+		sk->sk_data_ready = xs_udp_data_ready;
+		sk->sk_write_space = xs_udp_write_space;
+		sk->sk_no_check = UDP_CSUM_NORCV;
+
+		xprt_set_connected(xprt);
+
+		/* Reset to new socket */
+		xprt->sock = sock;
+		xprt->inet = sk;
+
+		write_unlock_bh(&sk->sk_callback_lock);
+	}
+	xs_udp_do_set_buffer_size(xprt);
+	status = 0;
+out:
+	xprt_wake_pending_tasks(xprt, status);
+	xprt_clear_connecting(xprt);
+}
+
+/*
+ * We need to preserve the port number so the reply cache on the server can
+ * find our cached RPC replies when we get around to reconnecting.
+ */
+static void xs_tcp_reuse_connection(struct rpc_xprt *xprt)
+{
+	int result;
+	struct socket *sock = xprt->sock;
+	struct sockaddr any;
+
+	dprintk("RPC:      disconnecting xprt %p to reuse port\n", xprt);
+
+	/*
+	 * Disconnect the transport socket by doing a connect operation
+	 * with AF_UNSPEC.  This should return immediately...
+	 */
+	memset(&any, 0, sizeof(any));
+	any.sa_family = AF_UNSPEC;
+	result = sock->ops->connect(sock, &any, sizeof(any), 0);
+	if (result)
+		dprintk("RPC:      AF_UNSPEC connect return code %d\n",
+				result);
+}
+
+/**
+ * xs_tcp_connect_worker - connect a TCP socket to a remote endpoint
+ * @args: RPC transport to connect
+ *
+ * Invoked by a work queue tasklet.
+ */
+static void xs_tcp_connect_worker(void *args)
+{
+	struct rpc_xprt *xprt = (struct rpc_xprt *)args;
+	struct socket *sock = xprt->sock;
+	int err, status = -EIO;
+
+	if (xprt->shutdown || xprt->addr.sin_port == 0)
+		goto out;
+
+	dprintk("RPC:      xs_tcp_connect_worker for xprt %p\n", xprt);
+
+	if (!xprt->sock) {
+		/* start from scratch */
+		if ((err = sock_create_kern(PF_INET, SOCK_STREAM, IPPROTO_TCP, &sock)) < 0) {
+			dprintk("RPC:      can't create TCP transport socket (%d).\n", -err);
+			goto out;
+		}
+
+		if (xprt->resvport && xs_bindresvport(xprt, sock) < 0) {
+			sock_release(sock);
+			goto out;
+		}
+	} else
+		/* "close" the socket, preserving the local port */
+		xs_tcp_reuse_connection(xprt);
+
+	if (!xprt->inet) {
+		struct sock *sk = sock->sk;
+
+		write_lock_bh(&sk->sk_callback_lock);
+
+		sk->sk_user_data = xprt;
+		xprt->old_data_ready = sk->sk_data_ready;
+		xprt->old_state_change = sk->sk_state_change;
+		xprt->old_write_space = sk->sk_write_space;
+		sk->sk_data_ready = xs_tcp_data_ready;
+		sk->sk_state_change = xs_tcp_state_change;
+		sk->sk_write_space = xs_tcp_write_space;
+
+		/* socket options */
+		sk->sk_userlocks |= SOCK_BINDPORT_LOCK;
+		sock_reset_flag(sk, SOCK_LINGER);
+		tcp_sk(sk)->linger2 = 0;
+		tcp_sk(sk)->nonagle |= TCP_NAGLE_OFF;
+
+		xprt_clear_connected(xprt);
+
+		/* Reset to new socket */
+		xprt->sock = sock;
+		xprt->inet = sk;
+
+		write_unlock_bh(&sk->sk_callback_lock);
+	}
+
+	/* Tell the socket layer to start connecting... */
+	status = sock->ops->connect(sock, (struct sockaddr *) &xprt->addr,
+			sizeof(xprt->addr), O_NONBLOCK);
+	dprintk("RPC: %p  connect status %d connected %d sock state %d\n",
+			xprt, -status, xprt_connected(xprt), sock->sk->sk_state);
+	if (status < 0) {
+		switch (status) {
+			case -EINPROGRESS:
+			case -EALREADY:
+				goto out_clear;
+			case -ECONNREFUSED:
+			case -ECONNRESET:
+				/* retry with existing socket, after a delay */
+				break;
+			default:
+				/* get rid of existing socket, and retry */
+				xs_close(xprt);
+				break;
+		}
+	}
+out:
+	xprt_wake_pending_tasks(xprt, status);
+out_clear:
+	xprt_clear_connecting(xprt);
+}
+
+/**
+ * xs_connect - connect a socket to a remote endpoint
+ * @task: address of RPC task that manages state of connect request
+ *
+ * TCP: If the remote end dropped the connection, delay reconnecting.
+ *
+ * UDP socket connects are synchronous, but we use a work queue anyway
+ * to guarantee that even unprivileged user processes can set up a
+ * socket on a privileged port.
+ *
+ * If a UDP socket connect fails, the delay behavior here prevents
+ * retry floods (hard mounts).
+ */
+static void xs_connect(struct rpc_task *task)
+{
+	struct rpc_xprt *xprt = task->tk_xprt;
+
+	if (xprt_test_and_set_connecting(xprt))
+		return;
+
+	if (xprt->sock != NULL) {
+		dprintk("RPC:      xs_connect delayed xprt %p for %lu seconds\n",
+				xprt, xprt->reestablish_timeout / HZ);
+		schedule_delayed_work(&xprt->connect_worker,
+					xprt->reestablish_timeout);
+		xprt->reestablish_timeout <<= 1;
+		if (xprt->reestablish_timeout > XS_TCP_MAX_REEST_TO)
+			xprt->reestablish_timeout = XS_TCP_MAX_REEST_TO;
+	} else {
+		dprintk("RPC:      xs_connect scheduled xprt %p\n", xprt);
+		schedule_work(&xprt->connect_worker);
+
+		/* flush_scheduled_work can sleep... */
+		if (!RPC_IS_ASYNC(task))
+			flush_scheduled_work();
+	}
+}
+
+static struct rpc_xprt_ops xs_udp_ops = {
+	.set_buffer_size	= xs_udp_set_buffer_size,
+	.reserve_xprt		= xprt_reserve_xprt_cong,
+	.release_xprt		= xprt_release_xprt_cong,
+	.connect		= xs_connect,
+	.send_request		= xs_udp_send_request,
+	.set_retrans_timeout	= xprt_set_retrans_timeout_rtt,
+	.timer			= xs_udp_timer,
+	.release_request	= xprt_release_rqst_cong,
+	.close			= xs_close,
+	.destroy		= xs_destroy,
+};
+
+static struct rpc_xprt_ops xs_tcp_ops = {
+	.reserve_xprt		= xprt_reserve_xprt,
+	.release_xprt		= xprt_release_xprt,
+	.connect		= xs_connect,
+	.send_request		= xs_tcp_send_request,
+	.set_retrans_timeout	= xprt_set_retrans_timeout_def,
+	.close			= xs_close,
+	.destroy		= xs_destroy,
+};
+
+/**
+ * xs_setup_udp - Set up transport to use a UDP socket
+ * @xprt: transport to set up
+ * @to:   timeout parameters
+ *
+ */
+int xs_setup_udp(struct rpc_xprt *xprt, struct rpc_timeout *to)
+{
+	size_t slot_table_size;
+
+	dprintk("RPC:      setting up udp-ipv4 transport...\n");
+
+	xprt->max_reqs = xprt_udp_slot_table_entries;
+	slot_table_size = xprt->max_reqs * sizeof(xprt->slot[0]);
+	xprt->slot = kmalloc(slot_table_size, GFP_KERNEL);
+	if (xprt->slot == NULL)
+		return -ENOMEM;
+	memset(xprt->slot, 0, slot_table_size);
+
+	xprt->prot = IPPROTO_UDP;
+	xprt->port = xprt_max_resvport;
+	xprt->tsh_size = 0;
+	xprt->resvport = capable(CAP_NET_BIND_SERVICE) ? 1 : 0;
+	/* XXX: header size can vary due to auth type, IPv6, etc. */
+	xprt->max_payload = (1U << 16) - (MAX_HEADER << 3);
+
+	INIT_WORK(&xprt->connect_worker, xs_udp_connect_worker, xprt);
+	xprt->bind_timeout = XS_BIND_TO;
+	xprt->connect_timeout = XS_UDP_CONN_TO;
+	xprt->reestablish_timeout = XS_UDP_REEST_TO;
+	xprt->idle_timeout = XS_IDLE_DISC_TO;
+
+	xprt->ops = &xs_udp_ops;
+
+	if (to)
+		xprt->timeout = *to;
+	else
+		xprt_set_timeout(&xprt->timeout, 5, 5 * HZ);
+
+	return 0;
+}
+
+/**
+ * xs_setup_tcp - Set up transport to use a TCP socket
+ * @xprt: transport to set up
+ * @to: timeout parameters
+ *
+ */
+int xs_setup_tcp(struct rpc_xprt *xprt, struct rpc_timeout *to)
+{
+	size_t slot_table_size;
+
+	dprintk("RPC:      setting up tcp-ipv4 transport...\n");
+
+	xprt->max_reqs = xprt_tcp_slot_table_entries;
+	slot_table_size = xprt->max_reqs * sizeof(xprt->slot[0]);
+	xprt->slot = kmalloc(slot_table_size, GFP_KERNEL);
+	if (xprt->slot == NULL)
+		return -ENOMEM;
+	memset(xprt->slot, 0, slot_table_size);
+
+	xprt->prot = IPPROTO_TCP;
+	xprt->port = xprt_max_resvport;
+	xprt->tsh_size = sizeof(rpc_fraghdr) / sizeof(u32);
+	xprt->resvport = capable(CAP_NET_BIND_SERVICE) ? 1 : 0;
+	xprt->max_payload = RPC_MAX_FRAGMENT_SIZE;
+
+	INIT_WORK(&xprt->connect_worker, xs_tcp_connect_worker, xprt);
+	xprt->bind_timeout = XS_BIND_TO;
+	xprt->connect_timeout = XS_TCP_CONN_TO;
+	xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO;
+	xprt->idle_timeout = XS_IDLE_DISC_TO;
+
+	xprt->ops = &xs_tcp_ops;
+
+	if (to)
+		xprt->timeout = *to;
+	else
+		xprt_set_timeout(&xprt->timeout, 2, 60 * HZ);
+
+	return 0;
+}