[rtems-libbsd commit] kern/sys: Import NFS and NFS file system client

Chris Johns chrisj at rtems.org
Thu Sep 2 02:43:01 UTC 2021


Module:    rtems-libbsd
Branch:    6-freebsd-12
Commit:    6138f242be4f9d989a4052be71f1b116bd806e44
Changeset: http://git.rtems.org/rtems-libbsd/commit/?id=6138f242be4f9d989a4052be71f1b116bd806e44

Author:    Chris Johns <chrisj at rtems.org>
Date:      Thu Jul 29 13:35:16 2021 +1000

kern/sys: Import NFS and NFS file system client

Update #4475

---

 freebsd/sys/fs/nfs/nfs.h                 |  805 ++++
 freebsd/sys/fs/nfs/nfs_commonacl.c       |  485 ++
 freebsd/sys/fs/nfs/nfs_commonkrpc.c      | 1436 ++++++
 freebsd/sys/fs/nfs/nfs_commonport.c      |  819 ++++
 freebsd/sys/fs/nfs/nfs_commonsubs.c      | 4769 +++++++++++++++++++
 freebsd/sys/fs/nfs/nfs_var.h             |  742 +++
 freebsd/sys/fs/nfs/nfscl.h               |   84 +
 freebsd/sys/fs/nfs/nfsclstate.h          |  446 ++
 freebsd/sys/fs/nfs/nfsdport.h            |  125 +
 freebsd/sys/fs/nfs/nfskpiport.h          |   75 +
 freebsd/sys/fs/nfs/nfsm_subs.h           |  146 +
 freebsd/sys/fs/nfs/nfsport.h             | 1086 +++++
 freebsd/sys/fs/nfs/nfsproto.h            | 1426 ++++++
 freebsd/sys/fs/nfs/nfsrvcache.h          |  124 +
 freebsd/sys/fs/nfs/nfsrvstate.h          |  410 ++
 freebsd/sys/fs/nfs/nfsv4_errstr.h        |  103 +
 freebsd/sys/fs/nfs/rpcv2.h               |  209 +
 freebsd/sys/fs/nfs/xdr_subs.h            |  101 +
 freebsd/sys/fs/nfsclient/nfs.h           |  128 +
 freebsd/sys/fs/nfsclient/nfs_clbio.c     | 1874 ++++++++
 freebsd/sys/fs/nfsclient/nfs_clcomsubs.c |  439 ++
 freebsd/sys/fs/nfsclient/nfs_clkdtrace.c |  587 +++
 freebsd/sys/fs/nfsclient/nfs_clkrpc.c    |  299 ++
 freebsd/sys/fs/nfsclient/nfs_clnfsiod.c  |  343 ++
 freebsd/sys/fs/nfsclient/nfs_clnode.c    |  365 ++
 freebsd/sys/fs/nfsclient/nfs_clport.c    | 1414 ++++++
 freebsd/sys/fs/nfsclient/nfs_clrpcops.c  | 7666 ++++++++++++++++++++++++++++++
 freebsd/sys/fs/nfsclient/nfs_clstate.c   | 5458 +++++++++++++++++++++
 freebsd/sys/fs/nfsclient/nfs_clsubs.c    |  391 ++
 freebsd/sys/fs/nfsclient/nfs_clvfsops.c  | 2051 ++++++++
 freebsd/sys/fs/nfsclient/nfs_clvnops.c   | 3604 ++++++++++++++
 freebsd/sys/fs/nfsclient/nfs_kdtrace.h   |  122 +
 freebsd/sys/fs/nfsclient/nfsmount.h      |  129 +
 freebsd/sys/fs/nfsclient/nfsnode.h       |  199 +
 freebsd/sys/fs/nfsclient/nlminfo.h       |   43 +
 freebsd/sys/nfs/bootp_subr.c             | 1904 ++++++++
 freebsd/sys/nfs/krpc.h                   |   31 +
 freebsd/sys/nfs/krpc_subr.c              |  470 ++
 freebsd/sys/nfs/nfs_common.h             |  137 +
 freebsd/sys/nfs/nfs_fha.c                |  527 ++
 freebsd/sys/nfs/nfs_fha.h                |  122 +
 freebsd/sys/nfs/nfs_kdtrace.h            |  122 +
 freebsd/sys/nfs/nfs_lock.c               |  403 ++
 freebsd/sys/nfs/nfs_lock.h               |   94 +
 freebsd/sys/nfs/nfs_mountcommon.h        |   56 +
 freebsd/sys/nfs/nfs_nfssvc.c             |  154 +
 freebsd/sys/nfs/nfsdiskless.h            |  116 +
 freebsd/sys/nfs/nfsproto.h               |  701 +++
 freebsd/sys/nfs/nfssvc.h                 |   85 +
 freebsd/sys/nfs/xdr_subs.h               |   93 +
 freebsd/sys/nfsclient/nfs.h              |  295 ++
 freebsd/sys/nfsclient/nfsargs.h          |  106 +
 freebsd/sys/nfsclient/nfsm_subs.h        |  180 +
 freebsd/sys/nfsclient/nfsmount.h         |  132 +
 freebsd/sys/nfsclient/nfsnode.h          |  215 +
 freebsd/sys/nfsclient/nfsstats.h         |   71 +
 freebsd/sys/nfsclient/nlminfo.h          |   44 +
 57 files changed, 44561 insertions(+)

diff --git a/freebsd/sys/fs/nfs/nfs.h b/freebsd/sys/fs/nfs/nfs.h
new file mode 100644
index 0000000..c6fb59f
--- /dev/null
+++ b/freebsd/sys/fs/nfs/nfs.h
@@ -0,0 +1,805 @@
+/*-
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 1989, 1993
+ *	The Regents of the University of California.  All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Rick Macklem at The University of Guelph.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _NFS_NFS_H_
+#define	_NFS_NFS_H_
+/*
+ * Tunable constants for nfs
+ */
+
+#define	NFS_MAXIOVEC	34
+#define	NFS_TICKINTVL	500		/* Desired time for a tick (msec) */
+#define	NFS_HZ		(hz / nfscl_ticks) /* Ticks/sec */
+#define	NFS_TIMEO	(1 * NFS_HZ)	/* Default timeout = 1 second */
+#define	NFS_MINTIMEO	(1 * NFS_HZ)	/* Min timeout to use */
+#define	NFS_MAXTIMEO	(60 * NFS_HZ)	/* Max timeout to backoff to */
+#define	NFS_TCPTIMEO	300		/* TCP timeout */
+#define	NFS_MAXRCVTIMEO	60		/* 1 minute in seconds */
+#define	NFS_MINIDEMTIMEO (5 * NFS_HZ)	/* Min timeout for non-idempotent ops*/
+#define	NFS_MAXREXMIT	100		/* Stop counting after this many */
+#define	NFSV4_CALLBACKTIMEO (2 * NFS_HZ) /* Timeout in ticks */
+#define	NFSV4_CALLBACKRETRY 5		/* Number of retries before failure */
+#define	NFSV4_SLOTS	64		/* Number of slots, fore channel */
+#define	NFSV4_CBSLOTS	8		/* Number of slots, back channel */
+#define	NFSV4_CBRETRYCNT 4		/* # of CBRecall retries upon err */
+#define	NFSV4_UPCALLTIMEO (15 * NFS_HZ)	/* Timeout in ticks for upcalls */
+					/* to gssd or nfsuserd */
+#define	NFSV4_UPCALLRETRY 4		/* Number of retries before failure */
+#define	NFS_MAXWINDOW	1024		/* Max number of outstanding requests */
+#define	NFS_RETRANS	10		/* Num of retrans for soft mounts */
+#define	NFS_RETRANS_TCP	2		/* Num of retrans for TCP soft mounts */
+#define	NFS_MAXGRPS	16		/* Max. size of groups list */
+#define	NFS_TRYLATERDEL	15		/* Maximum delay timeout (sec) */
+#ifndef NFS_REMOVETIMEO
+#define	NFS_REMOVETIMEO 15  /* # sec to wait for delegret in local syscall */
+#endif
+#ifndef NFS_MINATTRTIMO
+#define	NFS_MINATTRTIMO 5		/* Attribute cache timeout in sec */
+#endif
+#ifndef NFS_MAXATTRTIMO
+#define	NFS_MAXATTRTIMO 60
+#endif
+#define	NFS_WSIZE	8192		/* Def. write data size <= 8192 */
+#define	NFS_RSIZE	8192		/* Def. read data size <= 8192 */
+#define	NFS_READDIRSIZE	8192		/* Def. readdir size */
+#define	NFS_DEFRAHEAD	1		/* Def. read ahead # blocks */
+#define	NFS_MAXRAHEAD	16		/* Max. read ahead # blocks */
+#define	NFS_MAXASYNCDAEMON 	64	/* Max. number async_daemons runnable */
+#define	NFS_MAXUIDHASH	64		/* Max. # of hashed uid entries/mp */
+#ifndef	NFSRV_LEASE
+#define	NFSRV_LEASE		120	/* Lease time in seconds for V4 */
+#endif					/* assigned to nfsrv_lease */
+#ifndef NFSRV_STALELEASE
+#define	NFSRV_STALELEASE	(5 * nfsrv_lease)
+#endif
+#ifndef NFSRV_MOULDYLEASE
+#define	NFSRV_MOULDYLEASE	604800	/* One week (in sec) */
+#endif
+#ifndef NFSCLIENTHASHSIZE
+#define	NFSCLIENTHASHSIZE	20	/* Size of server client hash table */
+#endif
+#ifndef NFSLOCKHASHSIZE
+#define	NFSLOCKHASHSIZE		20	/* Size of server nfslock hash table */
+#endif
+#ifndef NFSSESSIONHASHSIZE
+#define	NFSSESSIONHASHSIZE	20	/* Size of server session hash table */
+#endif
+#define	NFSSTATEHASHSIZE	10	/* Size of server stateid hash table */
+#define	NFSLAYOUTHIGHWATER	1000000	/* Upper limit for # of layouts */
+#ifndef	NFSCLDELEGHIGHWATER
+#define	NFSCLDELEGHIGHWATER	10000	/* limit for client delegations */
+#endif
+#ifndef	NFSCLLAYOUTHIGHWATER
+#define	NFSCLLAYOUTHIGHWATER	10000	/* limit for client pNFS layouts */
+#endif
+#ifndef NFSNOOPEN			/* Inactive open owner (sec) */
+#define	NFSNOOPEN		120
+#endif
+#define	NFSRV_LEASEDELTA	15	/* # of seconds to delay beyond lease */
+#define	NFS_IDMAXSIZE		4	/* max sizeof (in_addr_t) */
+#ifndef NFSRVCACHE_UDPTIMEOUT
+#define	NFSRVCACHE_UDPTIMEOUT	30	/* # of sec to hold cached rpcs(udp) */
+#endif
+#ifndef NFSRVCACHE_UDPHIGHWATER
+#define	NFSRVCACHE_UDPHIGHWATER	500	/* Max # of udp cache entries */
+#endif
+#ifndef NFSRVCACHE_TCPTIMEOUT
+#define	NFSRVCACHE_TCPTIMEOUT	(3600*12) /*#of sec to hold cached rpcs(tcp) */
+#endif
+#ifndef	NFSRVCACHE_FLOODLEVEL
+#define	NFSRVCACHE_FLOODLEVEL	16384	/* Very high water mark for cache */
+#endif
+#ifndef	NFSRV_CLIENTHIGHWATER
+#define	NFSRV_CLIENTHIGHWATER	1000
+#endif
+#ifndef	NFSRV_MAXDUMPLIST
+#define	NFSRV_MAXDUMPLIST	10000
+#endif
+#ifndef NFS_ACCESSCACHESIZE
+#define	NFS_ACCESSCACHESIZE	8
+#endif
+#define	NFSV4_CBPORT	7745		/* Callback port for testing */
+
+/*
+ * This macro defines the high water mark for issuing V4 delegations.
+ * (It is currently set at a conservative 20% of nfsrv_v4statelimit. This
+ *  may want to increase when clients can make more effective use of
+ *  delegations.)
+ */
+#define	NFSRV_V4DELEGLIMIT(c) (((c) * 5) > nfsrv_v4statelimit)
+
+#define	NFS_READDIRBLKSIZ	DIRBLKSIZ	/* Minimal nm_readdirsize */
+
+/*
+ * Oddballs
+ */
+#define	NFS_CMPFH(n, f, s) 						\
+    ((n)->n_fhp->nfh_len == (s) && !NFSBCMP((n)->n_fhp->nfh_fh, (caddr_t)(f), (s)))
+#define	NFSRV_CMPFH(nf, ns, f, s) 					\
+	((ns) == (s) && !NFSBCMP((caddr_t)(nf), (caddr_t)(f), (s)))
+#define	NFS_CMPTIME(t1, t2) 						\
+	((t1).tv_sec == (t2).tv_sec && (t1).tv_nsec == (t2).tv_nsec)
+#define	NFS_SETTIME(t) do { 						\
+	(t).tv_sec = time.tv_sec; (t).tv_nsec = 1000 * time.tv_usec; } while (0)
+#define	NFS_SRVMAXDATA(n) 						\
+		(((n)->nd_flag & (ND_NFSV3 | ND_NFSV4)) ? 		\
+		 NFS_SRVMAXIO : NFS_V2MAXDATA)
+#define	NFS64BITSSET	0xffffffffffffffffull
+#define	NFS64BITSMINUS1	0xfffffffffffffffeull
+
+/*
+ * Structures for the nfssvc(2) syscall. Not that anyone but nfsd, mount_nfs
+ * and nfsloaduser should ever try and use it.
+ */
+struct nfsd_addsock_args {
+	int	sock;		/* Socket to serve */
+	caddr_t	name;		/* Client addr for connection based sockets */
+	int	namelen;	/* Length of name */
+};
+
+/*
+ * nfsd argument for new krpc.
+ * (New version supports pNFS, indicated by NFSSVC_NEWSTRUCT flag.)
+ */
+struct nfsd_nfsd_args {
+	const char *principal;	/* GSS-API service principal name */
+	int	minthreads;	/* minimum service thread count */
+	int	maxthreads;	/* maximum service thread count */
+	int	version;	/* Allow multiple variants */
+	char	*addr;		/* pNFS DS addresses */
+	int	addrlen;	/* Length of addrs */
+	char	*dnshost;	/* DNS names for DS addresses */
+	int	dnshostlen;	/* Length of DNS names */
+	char	*dspath;	/* DS Mount path on MDS */
+	int	dspathlen;	/* Length of DS Mount path on MDS */
+	char	*mdspath;	/* MDS mount for DS path on MDS */
+	int	mdspathlen;	/* Length of MDS mount for DS path on MDS */
+	int	mirrorcnt;	/* Number of mirrors to create on DSs */
+};
+
+/*
+ * NFSDEV_MAXMIRRORS - Maximum level of mirroring for a DS.
+ * (Most will only put files on two DSs, but this setting allows up to 4.)
+ * NFSDEV_MAXVERS - maximum number of NFS versions supported by Flex File.
+ */
+#define	NFSDEV_MAXMIRRORS	4
+#define	NFSDEV_MAXVERS		4
+
+struct nfsd_pnfsd_args {
+	int	op;		/* Which pNFSd op to perform. */
+	char	*mdspath;	/* Path of MDS file. */
+	char	*dspath;	/* Path of recovered DS mounted on dir. */
+	char	*curdspath;	/* Path of current DS mounted on dir. */
+};
+
+#define	PNFSDOP_DELDSSERVER	1
+#define	PNFSDOP_COPYMR		2
+#define	PNFSDOP_FORCEDELDS	3
+
+/* Old version. */
+struct nfsd_nfsd_oargs {
+	const char *principal;	/* GSS-API service principal name */
+	int	minthreads;	/* minimum service thread count */
+	int	maxthreads;	/* maximum service thread count */
+};
+
+/*
+ * Arguments for use by the callback daemon.
+ */
+struct nfsd_nfscbd_args {
+	const char *principal;	/* GSS-API service principal name */
+};
+
+struct nfscbd_args {
+	int	sock;		/* Socket to serve */
+	caddr_t	name;		/* Client addr for connection based sockets */
+	int	namelen;	/* Length of name */
+	u_short	port;		/* Port# for callbacks */
+};
+
+struct nfsd_idargs {
+	int		nid_flag;	/* Flags (see below) */
+	uid_t		nid_uid;	/* user/group id */
+	gid_t		nid_gid;
+	int		nid_usermax;	/* Upper bound on user name cache */
+	int		nid_usertimeout;/* User name timeout (minutes) */
+	u_char		*nid_name;	/* Name */
+	int		nid_namelen;	/* and its length */
+	gid_t		*nid_grps;	/* and the list */
+	int		nid_ngroup;	/* Size of groups list */
+};
+
+struct nfsd_oidargs {
+	int		nid_flag;	/* Flags (see below) */
+	uid_t		nid_uid;	/* user/group id */
+	gid_t		nid_gid;
+	int		nid_usermax;	/* Upper bound on user name cache */
+	int		nid_usertimeout;/* User name timeout (minutes) */
+	u_char		*nid_name;	/* Name */
+	int		nid_namelen;	/* and its length */
+};
+
+struct nfsuserd_args {
+	sa_family_t	nuserd_family;	/* Address family to use */
+	u_short		nuserd_port;	/* Port# */
+};
+
+struct nfsd_clid {
+	int		nclid_idlen;	/* Length of client id */
+	u_char		nclid_id[NFSV4_OPAQUELIMIT]; /* and name */
+};
+
+struct nfsd_dumplist {
+	int		ndl_size;	/* Number of elements */
+	void		*ndl_list;	/* and the list of elements */
+};
+
+struct nfsd_dumpclients {
+	u_int32_t	ndcl_flags;		/* LCL_xxx flags */
+	u_int32_t	ndcl_nopenowners;	/* Number of openowners */
+	u_int32_t	ndcl_nopens;		/* and opens */
+	u_int32_t	ndcl_nlockowners;	/* and of lockowners */
+	u_int32_t	ndcl_nlocks;		/* and of locks */
+	u_int32_t	ndcl_ndelegs;		/* and of delegations */
+	u_int32_t	ndcl_nolddelegs;	/* and old delegations */
+	sa_family_t	ndcl_addrfam;		/* Callback address */
+	union {
+		struct in_addr sin_addr;
+		struct in6_addr sin6_addr;
+	} ndcl_cbaddr;
+	struct nfsd_clid ndcl_clid;	/* and client id */
+};
+
+struct nfsd_dumplocklist {
+	char		*ndllck_fname;	/* File Name */
+	int		ndllck_size;	/* Number of elements */
+	void		*ndllck_list;	/* and the list of elements */
+};
+
+struct nfsd_dumplocks {
+	u_int32_t	ndlck_flags;		/* state flags NFSLCK_xxx */
+	nfsv4stateid_t	ndlck_stateid;		/* stateid */
+	u_int64_t	ndlck_first;		/* lock byte range */
+	u_int64_t	ndlck_end;
+	struct nfsd_clid ndlck_owner;		/* Owner of open/lock */
+	sa_family_t	ndlck_addrfam;		/* Callback address */
+	union {
+		struct in_addr sin_addr;
+		struct in6_addr sin6_addr;
+	} ndlck_cbaddr;
+	struct nfsd_clid ndlck_clid;	/* and client id */
+};
+
+/*
+ * Structure for referral information.
+ */
+struct nfsreferral {
+	u_char		*nfr_srvlist;	/* List of servers */
+	int		nfr_srvcnt;	/* number of servers */
+	vnode_t		nfr_vp;	/* vnode for referral */
+	uint64_t	nfr_dfileno;	/* assigned dir inode# */
+};
+
+/*
+ * Flags for lc_flags and opsflags for nfsrv_getclient().
+ */
+#define	LCL_NEEDSCONFIRM	0x00000001
+#define	LCL_DONTCLEAN		0x00000002
+#define	LCL_WAKEUPWANTED	0x00000004
+#define	LCL_TCPCALLBACK		0x00000008
+#define	LCL_CALLBACKSON		0x00000010
+#define	LCL_INDEXNOTOK		0x00000020
+#define	LCL_STAMPEDSTABLE	0x00000040
+#define	LCL_EXPIREIT		0x00000080
+#define	LCL_CBDOWN		0x00000100
+#define	LCL_KERBV		0x00000400
+#define	LCL_NAME		0x00000800
+#define	LCL_NEEDSCBNULL		0x00001000
+#define	LCL_GSSINTEGRITY	0x00002000
+#define	LCL_GSSPRIVACY		0x00004000
+#define	LCL_ADMINREVOKED	0x00008000
+#define	LCL_RECLAIMCOMPLETE	0x00010000
+#define	LCL_NFSV41		0x00020000
+#define	LCL_DONEBINDCONN	0x00040000
+#define	LCL_RECLAIMONEFS	0x00080000
+
+#define	LCL_GSS		LCL_KERBV	/* Or of all mechs */
+
+/*
+ * Bits for flags in nfslock and nfsstate.
+ * The access, deny, NFSLCK_READ and NFSLCK_WRITE bits must be defined as
+ * below, in the correct order, so the shifts work for tests.
+ */
+#define	NFSLCK_READACCESS	0x00000001
+#define	NFSLCK_WRITEACCESS	0x00000002
+#define	NFSLCK_ACCESSBITS	(NFSLCK_READACCESS | NFSLCK_WRITEACCESS)
+#define	NFSLCK_SHIFT		2
+#define	NFSLCK_READDENY		0x00000004
+#define	NFSLCK_WRITEDENY	0x00000008
+#define	NFSLCK_DENYBITS		(NFSLCK_READDENY | NFSLCK_WRITEDENY)
+#define	NFSLCK_SHAREBITS 						\
+    (NFSLCK_READACCESS|NFSLCK_WRITEACCESS|NFSLCK_READDENY|NFSLCK_WRITEDENY)
+#define	NFSLCK_LOCKSHIFT	4
+#define	NFSLCK_READ		0x00000010
+#define	NFSLCK_WRITE		0x00000020
+#define	NFSLCK_BLOCKING		0x00000040
+#define	NFSLCK_RECLAIM		0x00000080
+#define	NFSLCK_OPENTOLOCK	0x00000100
+#define	NFSLCK_TEST		0x00000200
+#define	NFSLCK_LOCK		0x00000400
+#define	NFSLCK_UNLOCK		0x00000800
+#define	NFSLCK_OPEN		0x00001000
+#define	NFSLCK_CLOSE		0x00002000
+#define	NFSLCK_CHECK		0x00004000
+#define	NFSLCK_RELEASE		0x00008000
+#define	NFSLCK_NEEDSCONFIRM	0x00010000
+#define	NFSLCK_CONFIRM		0x00020000
+#define	NFSLCK_DOWNGRADE	0x00040000
+#define	NFSLCK_DELEGREAD	0x00080000
+#define	NFSLCK_DELEGWRITE	0x00100000
+#define	NFSLCK_DELEGCUR		0x00200000
+#define	NFSLCK_DELEGPREV	0x00400000
+#define	NFSLCK_OLDDELEG		0x00800000
+#define	NFSLCK_DELEGRECALL	0x01000000
+#define	NFSLCK_SETATTR		0x02000000
+#define	NFSLCK_DELEGPURGE	0x04000000
+#define	NFSLCK_DELEGRETURN	0x08000000
+#define	NFSLCK_WANTWDELEG	0x10000000
+#define	NFSLCK_WANTRDELEG	0x20000000
+#define	NFSLCK_WANTNODELEG	0x40000000
+#define	NFSLCK_WANTBITS							\
+    (NFSLCK_WANTWDELEG | NFSLCK_WANTRDELEG | NFSLCK_WANTNODELEG)
+
+/* And bits for nid_flag */
+#define	NFSID_INITIALIZE	0x0001
+#define	NFSID_ADDUID		0x0002
+#define	NFSID_DELUID		0x0004
+#define	NFSID_ADDUSERNAME	0x0008
+#define	NFSID_DELUSERNAME	0x0010
+#define	NFSID_ADDGID		0x0020
+#define	NFSID_DELGID		0x0040
+#define	NFSID_ADDGROUPNAME	0x0080
+#define	NFSID_DELGROUPNAME	0x0100
+
+/*
+ * fs.nfs sysctl(3) identifiers
+ */
+#define	NFS_NFSSTATS	1		/* struct: struct nfsstats */
+
+/*
+ * Here is the definition of the attribute bits array and macros that
+ * manipulate it.
+ * THE MACROS MUST BE MANUALLY MODIFIED IF NFSATTRBIT_MAXWORDS CHANGES!!
+ * It is (NFSATTRBIT_MAX + 31) / 32.
+ */
+#define	NFSATTRBIT_MAXWORDS	3
+
+typedef struct {
+	u_int32_t bits[NFSATTRBIT_MAXWORDS];
+} nfsattrbit_t;
+
+#define	NFSZERO_ATTRBIT(b) do {						\
+	(b)->bits[0] = 0;						\
+	(b)->bits[1] = 0;						\
+	(b)->bits[2] = 0;						\
+} while (0)
+
+#define	NFSSET_ATTRBIT(t, f) do {					\
+	(t)->bits[0] = (f)->bits[0];			 		\
+	(t)->bits[1] = (f)->bits[1];					\
+	(t)->bits[2] = (f)->bits[2];					\
+} while (0)
+
+#define	NFSSETSUPP_ATTRBIT(b, n) do { 					\
+	(b)->bits[0] = NFSATTRBIT_SUPP0; 				\
+	(b)->bits[1] = (NFSATTRBIT_SUPP1 | NFSATTRBIT_SUPPSETONLY1);	\
+	(b)->bits[2] = (NFSATTRBIT_SUPP2 | NFSATTRBIT_SUPPSETONLY2);	\
+	if (((n)->nd_flag & ND_NFSV41) == 0) {				\
+		(b)->bits[1] &= ~NFSATTRBIT_NFSV41_1;			\
+		(b)->bits[2] &= ~NFSATTRBIT_NFSV41_2;			\
+	}								\
+} while (0)
+
+#define	NFSISSET_ATTRBIT(b, p)	((b)->bits[(p) / 32] & (1 << ((p) % 32)))
+#define	NFSSETBIT_ATTRBIT(b, p)	((b)->bits[(p) / 32] |= (1 << ((p) % 32)))
+#define	NFSCLRBIT_ATTRBIT(b, p)	((b)->bits[(p) / 32] &= ~(1 << ((p) % 32)))
+
+#define	NFSCLRALL_ATTRBIT(b, a)	do { 					\
+	(b)->bits[0] &= ~((a)->bits[0]);	 			\
+	(b)->bits[1] &= ~((a)->bits[1]);	 			\
+	(b)->bits[2] &= ~((a)->bits[2]);				\
+} while (0)
+
+#define	NFSCLRNOT_ATTRBIT(b, a)	do { 					\
+	(b)->bits[0] &= ((a)->bits[0]);		 			\
+	(b)->bits[1] &= ((a)->bits[1]);		 			\
+	(b)->bits[2] &= ((a)->bits[2]);		 			\
+} while (0)
+
+#define	NFSCLRNOTFILLABLE_ATTRBIT(b, n) do { 				\
+	(b)->bits[0] &= NFSATTRBIT_SUPP0;	 			\
+	(b)->bits[1] &= NFSATTRBIT_SUPP1;				\
+	(b)->bits[2] &= NFSATTRBIT_SUPP2;				\
+	if (((n)->nd_flag & ND_NFSV41) == 0) {				\
+		(b)->bits[1] &= ~NFSATTRBIT_NFSV41_1;			\
+		(b)->bits[2] &= ~NFSATTRBIT_NFSV41_2;			\
+	}								\
+} while (0)
+
+#define	NFSCLRNOTSETABLE_ATTRBIT(b, n) do { 				\
+	(b)->bits[0] &= NFSATTRBIT_SETABLE0;	 			\
+	(b)->bits[1] &= NFSATTRBIT_SETABLE1;				\
+	(b)->bits[2] &= NFSATTRBIT_SETABLE2;				\
+	if (((n)->nd_flag & ND_NFSV41) == 0)				\
+		(b)->bits[2] &= ~NFSATTRBIT_NFSV41_2;			\
+} while (0)
+
+#define	NFSNONZERO_ATTRBIT(b)	((b)->bits[0] || (b)->bits[1] || (b)->bits[2])
+#define	NFSEQUAL_ATTRBIT(b, p)	((b)->bits[0] == (p)->bits[0] &&	\
+	(b)->bits[1] == (p)->bits[1] && (b)->bits[2] == (p)->bits[2])
+
+#define	NFSGETATTR_ATTRBIT(b) do { 					\
+	(b)->bits[0] = NFSATTRBIT_GETATTR0;	 			\
+	(b)->bits[1] = NFSATTRBIT_GETATTR1;				\
+	(b)->bits[2] = NFSATTRBIT_GETATTR2;				\
+} while (0)
+
+#define	NFSWCCATTR_ATTRBIT(b) do { 					\
+	(b)->bits[0] = NFSATTRBIT_WCCATTR0;	 			\
+	(b)->bits[1] = NFSATTRBIT_WCCATTR1;				\
+	(b)->bits[2] = NFSATTRBIT_WCCATTR2;				\
+} while (0)
+
+#define	NFSWRITEGETATTR_ATTRBIT(b) do { 				\
+	(b)->bits[0] = NFSATTRBIT_WRITEGETATTR0;			\
+	(b)->bits[1] = NFSATTRBIT_WRITEGETATTR1;			\
+	(b)->bits[2] = NFSATTRBIT_WRITEGETATTR2;			\
+} while (0)
+
+#define	NFSCBGETATTR_ATTRBIT(b, c) do { 				\
+	(c)->bits[0] = ((b)->bits[0] & NFSATTRBIT_CBGETATTR0);		\
+	(c)->bits[1] = ((b)->bits[1] & NFSATTRBIT_CBGETATTR1);		\
+	(c)->bits[2] = ((b)->bits[2] & NFSATTRBIT_CBGETATTR2);		\
+} while (0)
+
+#define	NFSPATHCONF_GETATTRBIT(b) do { 					\
+	(b)->bits[0] = NFSGETATTRBIT_PATHCONF0;		 		\
+	(b)->bits[1] = NFSGETATTRBIT_PATHCONF1;				\
+	(b)->bits[2] = NFSGETATTRBIT_PATHCONF2;				\
+} while (0)
+
+#define	NFSSTATFS_GETATTRBIT(b)	do { 					\
+	(b)->bits[0] = NFSGETATTRBIT_STATFS0;	 			\
+	(b)->bits[1] = NFSGETATTRBIT_STATFS1;				\
+	(b)->bits[2] = NFSGETATTRBIT_STATFS2;				\
+} while (0)
+
+#define	NFSISSETSTATFS_ATTRBIT(b) 					\
+		(((b)->bits[0] & NFSATTRBIT_STATFS0) || 		\
+		 ((b)->bits[1] & NFSATTRBIT_STATFS1) ||			\
+		 ((b)->bits[2] & NFSATTRBIT_STATFS2))
+
+#define	NFSCLRSTATFS_ATTRBIT(b)	do { 					\
+	(b)->bits[0] &= ~NFSATTRBIT_STATFS0;	 			\
+	(b)->bits[1] &= ~NFSATTRBIT_STATFS1;				\
+	(b)->bits[2] &= ~NFSATTRBIT_STATFS2;				\
+} while (0)
+
+#define	NFSREADDIRPLUS_ATTRBIT(b) do { 					\
+	(b)->bits[0] = NFSATTRBIT_READDIRPLUS0;		 		\
+	(b)->bits[1] = NFSATTRBIT_READDIRPLUS1;				\
+	(b)->bits[2] = NFSATTRBIT_READDIRPLUS2;				\
+} while (0)
+
+#define	NFSREFERRAL_ATTRBIT(b) do { 					\
+	(b)->bits[0] = NFSATTRBIT_REFERRAL0;		 		\
+	(b)->bits[1] = NFSATTRBIT_REFERRAL1;				\
+	(b)->bits[2] = NFSATTRBIT_REFERRAL2;				\
+} while (0)
+
+/*
+ * Store uid, gid creds that were used when the stateid was acquired.
+ * The RPC layer allows NFS_MAXGRPS + 1 groups to go out on the wire,
+ * so that's how many gets stored here.
+ */
+struct nfscred {
+	uid_t 		nfsc_uid;
+	gid_t		nfsc_groups[NFS_MAXGRPS + 1];
+	int		nfsc_ngroups;
+};
+
+/*
+ * Constants that define the file handle for the V4 root directory.
+ * (The FSID must never be used by other file systems that are exported.)
+ */
+#define	NFSV4ROOT_FSID0		((int32_t) -1)
+#define	NFSV4ROOT_FSID1		((int32_t) -1)
+#define	NFSV4ROOT_REFERRAL	((int32_t) -2)
+#define	NFSV4ROOT_INO		2	/* It's traditional */
+#define	NFSV4ROOT_GEN		1
+
+/*
+ * The set of signals the interrupt an I/O in progress for NFSMNT_INT mounts.
+ * What should be in this set is open to debate, but I believe that since
+ * I/O system calls on ufs are never interrupted by signals the set should
+ * be minimal. My reasoning is that many current programs that use signals
+ * such as SIGALRM will not expect file I/O system calls to be interrupted
+ * by them and break.
+ */
+#if defined(_KERNEL) || defined(KERNEL)
+
+struct uio; struct buf; struct vattr; struct nameidata;	/* XXX */
+
+/*
+ * Socket errors ignored for connectionless sockets?
+ * For now, ignore them all
+ */
+#define	NFSIGNORE_SOERROR(s, e) 					\
+		((e) != EINTR && (e) != ERESTART && (e) != EWOULDBLOCK && \
+		((s) & PR_CONNREQUIRED) == 0)
+
+
+/*
+ * This structure holds socket information for a connection. Used by the
+ * client and the server for callbacks.
+ */
+struct nfssockreq {
+	NFSSOCKADDR_T	nr_nam;
+	int		nr_sotype;
+	int		nr_soproto;
+	int		nr_soflags;
+	struct ucred	*nr_cred;
+	int		nr_lock;
+	NFSMUTEX_T	nr_mtx;
+	u_int32_t	nr_prog;
+	u_int32_t	nr_vers;
+	struct __rpc_client *nr_client;
+	AUTH		*nr_auth;
+};
+
+/*
+ * And associated nr_lock bits.
+ */
+#define	NFSR_SNDLOCK		0x01
+#define	NFSR_WANTSND		0x02
+#define	NFSR_RCVLOCK		0x04
+#define	NFSR_WANTRCV		0x08
+#define	NFSR_RESERVEDPORT	0x10
+#define	NFSR_LOCALHOST		0x20
+
+/*
+ * Queue head for nfsreq's
+ */
+TAILQ_HEAD(nfsreqhead, nfsreq);
+
+/* This is the only nfsreq R_xxx flag still used. */
+#define	R_DONTRECOVER	0x00000100	/* don't initiate recovery when this
+					   rpc gets a stale state reply */
+
+/*
+ * Network address hash list element
+ */
+union nethostaddr {
+	struct in_addr	had_inet;
+	struct in6_addr had_inet6;
+};
+
+/*
+ * Structure of list of mechanisms.
+ */
+struct nfsgss_mechlist {
+	int	len;
+	const u_char	*str;
+	int	totlen;
+};
+#define	KERBV_MECH	0	/* position in list */
+
+/*
+ * This structure is used by the server for describing each request.
+ */
+struct nfsrv_descript {
+	mbuf_t			nd_mrep;	/* Request mbuf list */
+	mbuf_t			nd_md;		/* Current dissect mbuf */
+	mbuf_t			nd_mreq;	/* Reply mbuf list */
+	mbuf_t			nd_mb;		/* Current build mbuf */
+	NFSSOCKADDR_T		nd_nam;		/* and socket addr */
+	NFSSOCKADDR_T		nd_nam2;	/* return socket addr */
+	caddr_t			nd_dpos;	/* Current dissect pos */
+	caddr_t			nd_bpos;	/* Current build pos */
+	u_int64_t		nd_flag;	/* nd_flag */
+	u_int16_t		nd_procnum;	/* RPC # */
+	u_int32_t		nd_repstat;	/* Reply status */
+	int			*nd_errp;	/* Pointer to ret status */
+	u_int32_t		nd_retxid;	/* Reply xid */
+	struct nfsrvcache	*nd_rp;		/* Assoc. cache entry */
+	fhandle_t		nd_fh;		/* File handle */
+	struct ucred		*nd_cred;	/* Credentials */
+	uid_t			nd_saveduid;	/* Saved uid */
+	u_int64_t		nd_sockref;	/* Rcv socket ref# */
+	u_int64_t		nd_compref;	/* Compound RPC ref# */
+	time_t			nd_tcpconntime;	/* Time TCP connection est. */
+	nfsquad_t		nd_clientid;	/* Implied clientid */
+	int			nd_gssnamelen;	/* principal name length */
+	char			*nd_gssname;	/* principal name */
+	uint32_t		*nd_slotseq;	/* ptr to slot seq# in req */
+	uint8_t			nd_sessionid[NFSX_V4SESSIONID];	/* Session id */
+	uint32_t		nd_slotid;	/* Slotid for this RPC */
+	SVCXPRT			*nd_xprt;	/* Server RPC handle */
+	uint32_t		*nd_sequence;	/* Sequence Op. ptr */
+	nfsv4stateid_t		nd_curstateid;	/* Current StateID */
+	nfsv4stateid_t		nd_savedcurstateid; /* Saved Current StateID */
+};
+
+#define	nd_princlen	nd_gssnamelen
+#define	nd_principal	nd_gssname
+
+/* Bits for "nd_flag" */
+#define	ND_DONTSAVEREPLY 	0x00000001
+#define	ND_SAVEREPLY		0x00000002
+#define	ND_NFSV2		0x00000004
+#define	ND_NFSV3		0x00000008
+#define	ND_NFSV4		0x00000010
+#define	ND_KERBV		0x00000020
+#define	ND_GSSINTEGRITY		0x00000040
+#define	ND_GSSPRIVACY		0x00000080
+#define	ND_WINDOWVERF		0x00000100
+#define	ND_GSSINITREPLY		0x00000200
+#define	ND_STREAMSOCK		0x00000400
+#define	ND_PUBLOOKUP		0x00000800
+#define	ND_USEGSSNAME		0x00001000
+#define	ND_SAMETCPCONN		0x00002000
+#define	ND_IMPLIEDCLID		0x00004000
+#define	ND_NOMOREDATA		0x00008000
+#define	ND_V4WCCATTR		0x00010000
+#define	ND_NFSCB		0x00020000
+#define	ND_AUTHNONE		0x00040000
+#define	ND_EXAUTHSYS		0x00080000
+#define	ND_EXGSS		0x00100000
+#define	ND_EXGSSINTEGRITY	0x00200000
+#define	ND_EXGSSPRIVACY		0x00400000
+#define	ND_INCRSEQID		0x00800000
+#define	ND_NFSCL		0x01000000
+#define	ND_NFSV41		0x02000000
+#define	ND_HASSEQUENCE		0x04000000
+#define	ND_CACHETHIS		0x08000000
+#define	ND_LASTOP		0x10000000
+#define	ND_LOOPBADSESS		0x20000000
+#define	ND_DSSERVER		0x40000000
+#define	ND_CURSTATEID		0x80000000
+#define	ND_SAVEDCURSTATEID	0x100000000
+#define	ND_HASSLOTID		0x200000000
+
+/*
+ * ND_GSS should be the "or" of all GSS type authentications.
+ */
+#define	ND_GSS		(ND_KERBV)
+
+struct nfsv4_opflag {
+	int	retfh;
+	int	needscfh;
+	int	savereply;
+	int	modifyfs;
+	int	lktype;
+	int	needsseq;
+	int	loopbadsess;
+};
+
+/*
+ * Flags used to indicate what to do w.r.t. seqid checking.
+ */
+#define	NFSRVSEQID_FIRST	0x01
+#define	NFSRVSEQID_LAST		0x02
+#define	NFSRVSEQID_OPEN		0x04
+
+/*
+ * assign a doubly linked list to a new head
+ * and prepend one list into another.
+ */
+#define	LIST_NEWHEAD(nhead, ohead, field) do { 				\
+	if (((nhead)->lh_first = (ohead)->lh_first) != NULL) 		\
+		(ohead)->lh_first->field.le_prev = &(nhead)->lh_first; 	\
+	(ohead)->lh_first = NULL; 					\
+    } while (0)
+
+#define	LIST_PREPEND(head, phead, lelm, field) do {			\
+	if ((head)->lh_first != NULL) {					\
+		(lelm)->field.le_next = (head)->lh_first;		\
+		(lelm)->field.le_next->field.le_prev =			\
+		    &(lelm)->field.le_next;				\
+	}								\
+	(head)->lh_first = (phead)->lh_first;				\
+	(head)->lh_first->field.le_prev = &(head)->lh_first;		\
+    } while (0)
+
+/*
+ * File handle structure for client. Malloc'd to the correct length with
+ * malloc type M_NFSFH.
+ */
+struct nfsfh {
+	u_int16_t	nfh_len;	/* Length of file handle */
+	u_int8_t	nfh_fh[1];	/* and the file handle */
+};
+
+/*
+ * File handle structure for server. The NFSRV_MAXFH constant is
+ * set in nfsdport.h. I use a 32bit length, so that alignment is
+ * preserved.
+ */
+struct nfsrvfh {
+	u_int32_t	nfsrvfh_len;
+	u_int8_t	nfsrvfh_data[NFSRV_MAXFH];
+};
+
+/*
+ * This structure is used for sleep locks on the NFSv4 nfsd threads and
+ * NFSv4 client data structures.
+ */
+struct nfsv4lock {
+	u_int32_t	nfslock_usecnt;
+	u_int8_t	nfslock_lock;
+};
+#define	NFSV4LOCK_LOCK		0x01
+#define	NFSV4LOCK_LOCKWANTED	0x02
+#define	NFSV4LOCK_WANTED	0x04
+
+/*
+ * Values for the override argument for nfsvno_accchk().
+ */
+#define	NFSACCCHK_NOOVERRIDE		0
+#define	NFSACCCHK_ALLOWROOT		1
+#define	NFSACCCHK_ALLOWOWNER		2
+
+/*
+ * and values for the vpislocked argument for nfsvno_accchk().
+ */
+#define	NFSACCCHK_VPNOTLOCKED		0
+#define	NFSACCCHK_VPISLOCKED		1
+
+/*
+ * Slot for the NFSv4.1 Sequence Op.
+ */
+struct nfsslot {
+	int		nfssl_inprog;
+	uint32_t	nfssl_seq;
+	struct mbuf	*nfssl_reply;
+};
+
+/* Enumerated type for nfsuserd state. */
+typedef enum { NOTRUNNING=0, STARTSTOP=1, RUNNING=2 } nfsuserd_state;
+
+#endif	/* _KERNEL */
+
+#endif	/* _NFS_NFS_H */
diff --git a/freebsd/sys/fs/nfs/nfs_commonacl.c b/freebsd/sys/fs/nfs/nfs_commonacl.c
new file mode 100644
index 0000000..3e8cfe2
--- /dev/null
+++ b/freebsd/sys/fs/nfs/nfs_commonacl.c
@@ -0,0 +1,485 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 2009 Rick Macklem, University of Guelph
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#ifndef APPLEKEXT
+#include <fs/nfs/nfsport.h>
+
+extern int nfsrv_useacl;
+#endif
+
+static int nfsrv_acemasktoperm(u_int32_t acetype, u_int32_t mask, int owner,
+    enum vtype type, acl_perm_t *permp);
+
+/*
+ * Handle xdr for an ace.
+ */
+APPLESTATIC int
+nfsrv_dissectace(struct nfsrv_descript *nd, struct acl_entry *acep,
+    int *aceerrp, int *acesizep, NFSPROC_T *p)
+{
+	u_int32_t *tl;
+	int len, gotid = 0, owner = 0, error = 0, aceerr = 0;
+	u_char *name, namestr[NFSV4_SMALLSTR + 1];
+	u_int32_t flag, mask, acetype;
+	gid_t gid;
+	uid_t uid;
+
+	*aceerrp = 0;
+	acep->ae_flags = 0;
+	NFSM_DISSECT(tl, u_int32_t *, 4 * NFSX_UNSIGNED);
+	acetype = fxdr_unsigned(u_int32_t, *tl++);
+	flag = fxdr_unsigned(u_int32_t, *tl++);
+	mask = fxdr_unsigned(u_int32_t, *tl++);
+	len = fxdr_unsigned(int, *tl);
+	if (len < 0) {
+		error = NFSERR_BADXDR;
+		goto nfsmout;
+	} else if (len == 0) {
+		/* Netapp filers return a 0 length who for nil users */
+		acep->ae_tag = ACL_UNDEFINED_TAG;
+		acep->ae_id = ACL_UNDEFINED_ID;
+		acep->ae_perm = (acl_perm_t)0;
+		acep->ae_entry_type = ACL_ENTRY_TYPE_DENY;
+		if (acesizep)
+			*acesizep = 4 * NFSX_UNSIGNED;
+		error = 0;
+		goto nfsmout;
+	}
+	if (len > NFSV4_SMALLSTR)
+		name = malloc(len + 1, M_NFSSTRING, M_WAITOK);
+	else
+		name = namestr;
+	error = nfsrv_mtostr(nd, name, len);
+	if (error) {
+		if (len > NFSV4_SMALLSTR)
+			free(name, M_NFSSTRING);
+		goto nfsmout;
+	}
+	if (len == 6) {
+		if (!NFSBCMP(name, "OWNER@", 6)) {
+			acep->ae_tag = ACL_USER_OBJ;
+			acep->ae_id = ACL_UNDEFINED_ID;
+			owner = 1;
+			gotid = 1;
+		} else if (!NFSBCMP(name, "GROUP@", 6)) {
+			acep->ae_tag = ACL_GROUP_OBJ;
+			acep->ae_id = ACL_UNDEFINED_ID;
+			gotid = 1;
+		}
+	} else if (len == 9 && !NFSBCMP(name, "EVERYONE@", 9)) {
+		acep->ae_tag = ACL_EVERYONE;
+		acep->ae_id = ACL_UNDEFINED_ID;
+		gotid = 1;
+	}
+	if (gotid == 0) {
+		if (flag & NFSV4ACE_IDENTIFIERGROUP) {
+			acep->ae_tag = ACL_GROUP;
+			aceerr = nfsv4_strtogid(nd, name, len, &gid, p);
+			if (aceerr == 0)
+				acep->ae_id = (uid_t)gid;
+		} else {
+			acep->ae_tag = ACL_USER;
+			aceerr = nfsv4_strtouid(nd, name, len, &uid, p);
+			if (aceerr == 0)
+				acep->ae_id = uid;
+		}
+	}
+	if (len > NFSV4_SMALLSTR)
+		free(name, M_NFSSTRING);
+
+	if (aceerr == 0) {
+		/*
+		 * Handle the flags.
+		 */
+		flag &= ~NFSV4ACE_IDENTIFIERGROUP;
+		if (flag & NFSV4ACE_FILEINHERIT) {
+			flag &= ~NFSV4ACE_FILEINHERIT;
+			acep->ae_flags |= ACL_ENTRY_FILE_INHERIT;
+		}
+		if (flag & NFSV4ACE_DIRECTORYINHERIT) {
+			flag &= ~NFSV4ACE_DIRECTORYINHERIT;
+			acep->ae_flags |= ACL_ENTRY_DIRECTORY_INHERIT;
+		}
+		if (flag & NFSV4ACE_NOPROPAGATEINHERIT) {
+			flag &= ~NFSV4ACE_NOPROPAGATEINHERIT;
+			acep->ae_flags |= ACL_ENTRY_NO_PROPAGATE_INHERIT;
+		}
+		if (flag & NFSV4ACE_INHERITONLY) {
+			flag &= ~NFSV4ACE_INHERITONLY;
+			acep->ae_flags |= ACL_ENTRY_INHERIT_ONLY;
+		}
+		if (flag & NFSV4ACE_SUCCESSFULACCESS) {
+			flag &= ~NFSV4ACE_SUCCESSFULACCESS;
+			acep->ae_flags |= ACL_ENTRY_SUCCESSFUL_ACCESS;
+		}
+		if (flag & NFSV4ACE_FAILEDACCESS) {
+			flag &= ~NFSV4ACE_FAILEDACCESS;
+			acep->ae_flags |= ACL_ENTRY_FAILED_ACCESS;
+		}
+		/*
+		 * Set ae_entry_type.
+		 */
+		if (acetype == NFSV4ACE_ALLOWEDTYPE)
+			acep->ae_entry_type = ACL_ENTRY_TYPE_ALLOW;
+		else if (acetype == NFSV4ACE_DENIEDTYPE)
+			acep->ae_entry_type = ACL_ENTRY_TYPE_DENY;
+		else if (acetype == NFSV4ACE_AUDITTYPE)
+			acep->ae_entry_type = ACL_ENTRY_TYPE_AUDIT;
+		else if (acetype == NFSV4ACE_ALARMTYPE)
+			acep->ae_entry_type = ACL_ENTRY_TYPE_ALARM;
+		else
+			aceerr = NFSERR_ATTRNOTSUPP;
+	}
+
+	/*
+	 * Now, check for unsupported flag bits.
+	 */
+	if (aceerr == 0 && flag != 0)
+		aceerr = NFSERR_ATTRNOTSUPP;
+
+	/*
+	 * And turn the mask into perm bits.
+	 */
+	if (aceerr == 0)
+		aceerr = nfsrv_acemasktoperm(acetype, mask, owner, VREG,
+		    &acep->ae_perm);
+	*aceerrp = aceerr;
+	if (acesizep)
+		*acesizep = NFSM_RNDUP(len) + (4 * NFSX_UNSIGNED);
+	error = 0;
+nfsmout:
+	NFSEXITCODE(error);
+	return (error);
+}
+
+/*
+ * Turn an NFSv4 ace mask into R/W/X flag bits.
+ */
+static int
+nfsrv_acemasktoperm(u_int32_t acetype, u_int32_t mask, int owner,
+    enum vtype type, acl_perm_t *permp)
+{
+	acl_perm_t perm = 0x0;
+	int error = 0;
+
+	if (mask & NFSV4ACE_READDATA) {
+		mask &= ~NFSV4ACE_READDATA;
+		perm |= ACL_READ_DATA;
+	}
+	if (mask & NFSV4ACE_LISTDIRECTORY) {
+		mask &= ~NFSV4ACE_LISTDIRECTORY;
+		perm |= ACL_LIST_DIRECTORY;
+	}
+	if (mask & NFSV4ACE_WRITEDATA) {
+		mask &= ~NFSV4ACE_WRITEDATA;
+		perm |= ACL_WRITE_DATA;
+	}
+	if (mask & NFSV4ACE_ADDFILE) {
+		mask &= ~NFSV4ACE_ADDFILE;
+		perm |= ACL_ADD_FILE;
+	}
+	if (mask & NFSV4ACE_APPENDDATA) {
+		mask &= ~NFSV4ACE_APPENDDATA;
+		perm |= ACL_APPEND_DATA;
+	}
+	if (mask & NFSV4ACE_ADDSUBDIRECTORY) {
+		mask &= ~NFSV4ACE_ADDSUBDIRECTORY;
+		perm |= ACL_ADD_SUBDIRECTORY;
+	}
+	if (mask & NFSV4ACE_READNAMEDATTR) {
+		mask &= ~NFSV4ACE_READNAMEDATTR;
+		perm |= ACL_READ_NAMED_ATTRS;
+	}
+	if (mask & NFSV4ACE_WRITENAMEDATTR) {
+		mask &= ~NFSV4ACE_WRITENAMEDATTR;
+		perm |= ACL_WRITE_NAMED_ATTRS;
+	}
+	if (mask & NFSV4ACE_EXECUTE) {
+		mask &= ~NFSV4ACE_EXECUTE;
+		perm |= ACL_EXECUTE;
+	}
+	if (mask & NFSV4ACE_SEARCH) {
+		mask &= ~NFSV4ACE_SEARCH;
+		perm |= ACL_EXECUTE;
+	}
+	if (mask & NFSV4ACE_DELETECHILD) {
+		mask &= ~NFSV4ACE_DELETECHILD;
+		perm |= ACL_DELETE_CHILD;
+	}
+	if (mask & NFSV4ACE_READATTRIBUTES) {
+		mask &= ~NFSV4ACE_READATTRIBUTES;
+		perm |= ACL_READ_ATTRIBUTES;
+	}
+	if (mask & NFSV4ACE_WRITEATTRIBUTES) {
+		mask &= ~NFSV4ACE_WRITEATTRIBUTES;
+		perm |= ACL_WRITE_ATTRIBUTES;
+	}
+	if (mask & NFSV4ACE_DELETE) {
+		mask &= ~NFSV4ACE_DELETE;
+		perm |= ACL_DELETE;
+	}
+	if (mask & NFSV4ACE_READACL) {
+		mask &= ~NFSV4ACE_READACL;
+		perm |= ACL_READ_ACL;
+	}
+	if (mask & NFSV4ACE_WRITEACL) {
+		mask &= ~NFSV4ACE_WRITEACL;
+		perm |= ACL_WRITE_ACL;
+	}
+	if (mask & NFSV4ACE_WRITEOWNER) {
+		mask &= ~NFSV4ACE_WRITEOWNER;
+		perm |= ACL_WRITE_OWNER;
+	}
+	if (mask & NFSV4ACE_SYNCHRONIZE) {
+		mask &= ~NFSV4ACE_SYNCHRONIZE;
+		perm |= ACL_SYNCHRONIZE;
+	}
+	if (mask != 0) {
+		error = NFSERR_ATTRNOTSUPP;
+		goto out;
+	}
+	*permp = perm;
+
+out:
+	NFSEXITCODE(error);
+	return (error);
+}
+
+/* local functions */
+static int nfsrv_buildace(struct nfsrv_descript *, u_char *, int,
+    enum vtype, int, int, struct acl_entry *);
+
+/*
+ * This function builds an NFS ace.
+ */
+static int
+nfsrv_buildace(struct nfsrv_descript *nd, u_char *name, int namelen,
+    enum vtype type, int group, int owner, struct acl_entry *ace)
+{
+	u_int32_t *tl, aceflag = 0x0, acemask = 0x0, acetype;
+	int full_len;
+
+	full_len = NFSM_RNDUP(namelen);
+	NFSM_BUILD(tl, u_int32_t *, 4 * NFSX_UNSIGNED + full_len);
+
+	/*
+	 * Fill in the ace type.
+	 */
+	if (ace->ae_entry_type & ACL_ENTRY_TYPE_ALLOW)
+		acetype = NFSV4ACE_ALLOWEDTYPE;
+	else if (ace->ae_entry_type & ACL_ENTRY_TYPE_DENY)
+		acetype = NFSV4ACE_DENIEDTYPE;
+	else if (ace->ae_entry_type & ACL_ENTRY_TYPE_AUDIT)
+		acetype = NFSV4ACE_AUDITTYPE;
+	else
+		acetype = NFSV4ACE_ALARMTYPE;
+	*tl++ = txdr_unsigned(acetype);
+
+	/*
+	 * Set the flag bits from the ACL.
+	 */
+	if (ace->ae_flags & ACL_ENTRY_FILE_INHERIT)
+		aceflag |= NFSV4ACE_FILEINHERIT;
+	if (ace->ae_flags & ACL_ENTRY_DIRECTORY_INHERIT)
+		aceflag |= NFSV4ACE_DIRECTORYINHERIT;
+	if (ace->ae_flags & ACL_ENTRY_NO_PROPAGATE_INHERIT)
+		aceflag |= NFSV4ACE_NOPROPAGATEINHERIT;
+	if (ace->ae_flags & ACL_ENTRY_INHERIT_ONLY)
+		aceflag |= NFSV4ACE_INHERITONLY;
+	if (ace->ae_flags & ACL_ENTRY_SUCCESSFUL_ACCESS)
+		aceflag |= NFSV4ACE_SUCCESSFULACCESS;
+	if (ace->ae_flags & ACL_ENTRY_FAILED_ACCESS)
+		aceflag |= NFSV4ACE_FAILEDACCESS;
+	if (group)
+		aceflag |= NFSV4ACE_IDENTIFIERGROUP;
+	*tl++ = txdr_unsigned(aceflag);
+	if (type == VDIR) {
+		if (ace->ae_perm & ACL_LIST_DIRECTORY)
+			acemask |= NFSV4ACE_LISTDIRECTORY;
+		if (ace->ae_perm & ACL_ADD_FILE)
+			acemask |= NFSV4ACE_ADDFILE;
+		if (ace->ae_perm & ACL_ADD_SUBDIRECTORY)
+			acemask |= NFSV4ACE_ADDSUBDIRECTORY;
+		if (ace->ae_perm & ACL_READ_NAMED_ATTRS)
+			acemask |= NFSV4ACE_READNAMEDATTR;
+		if (ace->ae_perm & ACL_WRITE_NAMED_ATTRS)
+			acemask |= NFSV4ACE_WRITENAMEDATTR;
+		if (ace->ae_perm & ACL_EXECUTE)
+			acemask |= NFSV4ACE_SEARCH;
+		if (ace->ae_perm & ACL_DELETE_CHILD)
+			acemask |= NFSV4ACE_DELETECHILD;
+		if (ace->ae_perm & ACL_READ_ATTRIBUTES)
+			acemask |= NFSV4ACE_READATTRIBUTES;
+		if (ace->ae_perm & ACL_WRITE_ATTRIBUTES)
+			acemask |= NFSV4ACE_WRITEATTRIBUTES;
+		if (ace->ae_perm & ACL_DELETE)
+			acemask |= NFSV4ACE_DELETE;
+		if (ace->ae_perm & ACL_READ_ACL)
+			acemask |= NFSV4ACE_READACL;
+		if (ace->ae_perm & ACL_WRITE_ACL)
+			acemask |= NFSV4ACE_WRITEACL;
+		if (ace->ae_perm & ACL_WRITE_OWNER)
+			acemask |= NFSV4ACE_WRITEOWNER;
+		if (ace->ae_perm & ACL_SYNCHRONIZE)
+			acemask |= NFSV4ACE_SYNCHRONIZE;
+	} else {
+		if (ace->ae_perm & ACL_READ_DATA)
+			acemask |= NFSV4ACE_READDATA;
+		if (ace->ae_perm & ACL_WRITE_DATA)
+			acemask |= NFSV4ACE_WRITEDATA;
+		if (ace->ae_perm & ACL_APPEND_DATA)
+			acemask |= NFSV4ACE_APPENDDATA;
+		if (ace->ae_perm & ACL_READ_NAMED_ATTRS)
+			acemask |= NFSV4ACE_READNAMEDATTR;
+		if (ace->ae_perm & ACL_WRITE_NAMED_ATTRS)
+			acemask |= NFSV4ACE_WRITENAMEDATTR;
+		if (ace->ae_perm & ACL_EXECUTE)
+			acemask |= NFSV4ACE_EXECUTE;
+		if (ace->ae_perm & ACL_READ_ATTRIBUTES)
+			acemask |= NFSV4ACE_READATTRIBUTES;
+		if (ace->ae_perm & ACL_WRITE_ATTRIBUTES)
+			acemask |= NFSV4ACE_WRITEATTRIBUTES;
+		if (ace->ae_perm & ACL_DELETE)
+			acemask |= NFSV4ACE_DELETE;
+		if (ace->ae_perm & ACL_READ_ACL)
+			acemask |= NFSV4ACE_READACL;
+		if (ace->ae_perm & ACL_WRITE_ACL)
+			acemask |= NFSV4ACE_WRITEACL;
+		if (ace->ae_perm & ACL_WRITE_OWNER)
+			acemask |= NFSV4ACE_WRITEOWNER;
+		if (ace->ae_perm & ACL_SYNCHRONIZE)
+			acemask |= NFSV4ACE_SYNCHRONIZE;
+	}
+	*tl++ = txdr_unsigned(acemask);
+	*tl++ = txdr_unsigned(namelen);
+	if (full_len - namelen)
+		*(tl + (namelen / NFSX_UNSIGNED)) = 0x0;
+	NFSBCOPY(name, (caddr_t)tl, namelen);
+	return (full_len + 4 * NFSX_UNSIGNED);
+}
+
+/*
+ * Build an NFSv4 ACL.
+ */
+APPLESTATIC int
+nfsrv_buildacl(struct nfsrv_descript *nd, NFSACL_T *aclp, enum vtype type,
+    NFSPROC_T *p)
+{
+	int i, entrycnt = 0, retlen;
+	u_int32_t *entrycntp;
+	int isowner, isgroup, namelen, malloced;
+	u_char *name, namestr[NFSV4_SMALLSTR];
+
+	NFSM_BUILD(entrycntp, u_int32_t *, NFSX_UNSIGNED);
+	retlen = NFSX_UNSIGNED;
+	/*
+	 * Loop through the acl entries, building each one.
+	 */
+	for (i = 0; i < aclp->acl_cnt; i++) {
+		isowner = isgroup = malloced = 0;
+		switch (aclp->acl_entry[i].ae_tag) {
+		case ACL_USER_OBJ:
+			isowner = 1;
+			name = "OWNER@";
+			namelen = 6;
+			break;
+		case ACL_GROUP_OBJ:
+			isgroup = 1;
+			name = "GROUP@";
+			namelen = 6;
+			break;
+		case ACL_EVERYONE:
+			name = "EVERYONE@";
+			namelen = 9;
+			break;
+		case ACL_USER:
+			name = namestr;
+			nfsv4_uidtostr(aclp->acl_entry[i].ae_id, &name,
+			    &namelen, p);
+			if (name != namestr)
+				malloced = 1;
+			break;
+		case ACL_GROUP:
+			isgroup = 1;
+			name = namestr;
+			nfsv4_gidtostr((gid_t)aclp->acl_entry[i].ae_id, &name,
+			    &namelen, p);
+			if (name != namestr)
+				malloced = 1;
+			break;
+		default:
+			continue;
+		}
+		retlen += nfsrv_buildace(nd, name, namelen, type, isgroup,
+		    isowner, &aclp->acl_entry[i]);
+		entrycnt++;
+		if (malloced)
+			free(name, M_NFSSTRING);
+	}
+	*entrycntp = txdr_unsigned(entrycnt);
+	return (retlen);
+}
+
+/*
+ * Compare two NFSv4 acls.
+ * Return 0 if they are the same, 1 if not the same.
+ */
+APPLESTATIC int
+nfsrv_compareacl(NFSACL_T *aclp1, NFSACL_T *aclp2)
+{
+	int i;
+	struct acl_entry *acep1, *acep2;
+
+	if (aclp1->acl_cnt != aclp2->acl_cnt)
+		return (1);
+	acep1 = aclp1->acl_entry;
+	acep2 = aclp2->acl_entry;
+	for (i = 0; i < aclp1->acl_cnt; i++) {
+		if (acep1->ae_tag != acep2->ae_tag)
+			return (1);
+		switch (acep1->ae_tag) {
+		case ACL_GROUP:
+		case ACL_USER:
+			if (acep1->ae_id != acep2->ae_id)
+				return (1);
+			/* fall through */
+		case ACL_USER_OBJ:
+		case ACL_GROUP_OBJ:
+		case ACL_OTHER:
+			if (acep1->ae_perm != acep2->ae_perm)
+				return (1);
+		}
+		acep1++;
+		acep2++;
+	}
+	return (0);
+}
diff --git a/freebsd/sys/fs/nfs/nfs_commonkrpc.c b/freebsd/sys/fs/nfs/nfs_commonkrpc.c
new file mode 100644
index 0000000..f85121f
--- /dev/null
+++ b/freebsd/sys/fs/nfs/nfs_commonkrpc.c
@@ -0,0 +1,1436 @@
+/*-
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 1989, 1991, 1993, 1995
+ *	The Regents of the University of California.  All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Rick Macklem at The University of Guelph.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+/*
+ * Socket operations for use by nfs
+ */
+
+#include "opt_kgssapi.h"
+#include "opt_nfs.h"
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/kernel.h>
+#include <sys/limits.h>
+#include <sys/lock.h>
+#include <sys/malloc.h>
+#include <sys/mbuf.h>
+#include <sys/mount.h>
+#include <sys/mutex.h>
+#include <sys/proc.h>
+#include <sys/signalvar.h>
+#include <sys/syscallsubr.h>
+#include <sys/sysctl.h>
+#include <sys/syslog.h>
+#include <sys/vnode.h>
+
+#include <rpc/rpc.h>
+#include <rpc/krpc.h>
+
+#include <kgssapi/krb5/kcrypto.h>
+
+#include <fs/nfs/nfsport.h>
+
+#ifdef KDTRACE_HOOKS
+#include <sys/dtrace_bsd.h>
+
+dtrace_nfsclient_nfs23_start_probe_func_t
+		dtrace_nfscl_nfs234_start_probe;
+
+dtrace_nfsclient_nfs23_done_probe_func_t
+		dtrace_nfscl_nfs234_done_probe;
+
+/*
+ * Registered probes by RPC type.
+ */
+uint32_t	nfscl_nfs2_start_probes[NFSV41_NPROCS + 1];
+uint32_t	nfscl_nfs2_done_probes[NFSV41_NPROCS + 1];
+
+uint32_t	nfscl_nfs3_start_probes[NFSV41_NPROCS + 1];
+uint32_t	nfscl_nfs3_done_probes[NFSV41_NPROCS + 1];
+
+uint32_t	nfscl_nfs4_start_probes[NFSV41_NPROCS + 1];
+uint32_t	nfscl_nfs4_done_probes[NFSV41_NPROCS + 1];
+#endif
+
+NFSSTATESPINLOCK;
+NFSREQSPINLOCK;
+NFSDLOCKMUTEX;
+NFSCLSTATEMUTEX;
+extern struct nfsstatsv1 nfsstatsv1;
+extern struct nfsreqhead nfsd_reqq;
+extern int nfscl_ticks;
+extern void (*ncl_call_invalcaches)(struct vnode *);
+extern int nfs_numnfscbd;
+extern int nfscl_debuglevel;
+extern int nfsrv_lease;
+
+SVCPOOL		*nfscbd_pool;
+static int	nfsrv_gsscallbackson = 0;
+static int	nfs_bufpackets = 4;
+static int	nfs_reconnects;
+static int	nfs3_jukebox_delay = 10;
+static int	nfs_skip_wcc_data_onerr = 1;
+static int	nfs_dsretries = 2;
+
+SYSCTL_DECL(_vfs_nfs);
+
+SYSCTL_INT(_vfs_nfs, OID_AUTO, bufpackets, CTLFLAG_RW, &nfs_bufpackets, 0,
+    "Buffer reservation size 2 < x < 64");
+SYSCTL_INT(_vfs_nfs, OID_AUTO, reconnects, CTLFLAG_RD, &nfs_reconnects, 0,
+    "Number of times the nfs client has had to reconnect");
+SYSCTL_INT(_vfs_nfs, OID_AUTO, nfs3_jukebox_delay, CTLFLAG_RW, &nfs3_jukebox_delay, 0,
+    "Number of seconds to delay a retry after receiving EJUKEBOX");
+SYSCTL_INT(_vfs_nfs, OID_AUTO, skip_wcc_data_onerr, CTLFLAG_RW, &nfs_skip_wcc_data_onerr, 0,
+    "Disable weak cache consistency checking when server returns an error");
+SYSCTL_INT(_vfs_nfs, OID_AUTO, dsretries, CTLFLAG_RW, &nfs_dsretries, 0,
+    "Number of retries for a DS RPC before failure");
+
+static void	nfs_down(struct nfsmount *, struct thread *, const char *,
+    int, int);
+static void	nfs_up(struct nfsmount *, struct thread *, const char *,
+    int, int);
+static int	nfs_msg(struct thread *, const char *, const char *, int);
+
+struct nfs_cached_auth {
+	int		ca_refs; /* refcount, including 1 from the cache */
+	uid_t		ca_uid;	 /* uid that corresponds to this auth */
+	AUTH		*ca_auth; /* RPC auth handle */
+};
+
+static int nfsv2_procid[NFS_V3NPROCS] = {
+	NFSV2PROC_NULL,
+	NFSV2PROC_GETATTR,
+	NFSV2PROC_SETATTR,
+	NFSV2PROC_LOOKUP,
+	NFSV2PROC_NOOP,
+	NFSV2PROC_READLINK,
+	NFSV2PROC_READ,
+	NFSV2PROC_WRITE,
+	NFSV2PROC_CREATE,
+	NFSV2PROC_MKDIR,
+	NFSV2PROC_SYMLINK,
+	NFSV2PROC_CREATE,
+	NFSV2PROC_REMOVE,
+	NFSV2PROC_RMDIR,
+	NFSV2PROC_RENAME,
+	NFSV2PROC_LINK,
+	NFSV2PROC_READDIR,
+	NFSV2PROC_NOOP,
+	NFSV2PROC_STATFS,
+	NFSV2PROC_NOOP,
+	NFSV2PROC_NOOP,
+	NFSV2PROC_NOOP,
+};
+
+/*
+ * Initialize sockets and congestion for a new NFS connection.
+ * We do not free the sockaddr if error.
+ * Which arguments are set to NULL indicate what kind of call it is.
+ * cred == NULL --> a call to connect to a pNFS DS
+ * nmp == NULL --> indicates an upcall to userland or a NFSv4.0 callback
+ */
+int
+newnfs_connect(struct nfsmount *nmp, struct nfssockreq *nrp,
+    struct ucred *cred, NFSPROC_T *p, int callback_retry_mult)
+{
+	int rcvreserve, sndreserve;
+	int pktscale, pktscalesav;
+	struct sockaddr *saddr;
+	struct ucred *origcred;
+	CLIENT *client;
+	struct netconfig *nconf;
+	struct socket *so;
+	int one = 1, retries, error = 0;
+	struct thread *td = curthread;
+	SVCXPRT *xprt;
+	struct timeval timo;
+
+	/*
+	 * We need to establish the socket using the credentials of
+	 * the mountpoint.  Some parts of this process (such as
+	 * sobind() and soconnect()) will use the curent thread's
+	 * credential instead of the socket credential.  To work
+	 * around this, temporarily change the current thread's
+	 * credential to that of the mountpoint.
+	 *
+	 * XXX: It would be better to explicitly pass the correct
+	 * credential to sobind() and soconnect().
+	 */
+	origcred = td->td_ucred;
+
+	/*
+	 * Use the credential in nr_cred, if not NULL.
+	 */
+	if (nrp->nr_cred != NULL)
+		td->td_ucred = nrp->nr_cred;
+	else
+		td->td_ucred = cred;
+	saddr = nrp->nr_nam;
+
+	if (saddr->sa_family == AF_INET)
+		if (nrp->nr_sotype == SOCK_DGRAM)
+			nconf = getnetconfigent("udp");
+		else
+			nconf = getnetconfigent("tcp");
+	else if (saddr->sa_family == AF_LOCAL)
+		nconf = getnetconfigent("local");
+	else
+		if (nrp->nr_sotype == SOCK_DGRAM)
+			nconf = getnetconfigent("udp6");
+		else
+			nconf = getnetconfigent("tcp6");
+			
+	pktscale = nfs_bufpackets;
+	if (pktscale < 2)
+		pktscale = 2;
+	if (pktscale > 64)
+		pktscale = 64;
+	pktscalesav = pktscale;
+	/*
+	 * soreserve() can fail if sb_max is too small, so shrink pktscale
+	 * and try again if there is an error.
+	 * Print a log message suggesting increasing sb_max.
+	 * Creating a socket and doing this is necessary since, if the
+	 * reservation sizes are too large and will make soreserve() fail,
+	 * the connection will work until a large send is attempted and
+	 * then it will loop in the krpc code.
+	 */
+	so = NULL;
+	saddr = NFSSOCKADDR(nrp->nr_nam, struct sockaddr *);
+	error = socreate(saddr->sa_family, &so, nrp->nr_sotype, 
+	    nrp->nr_soproto, td->td_ucred, td);
+	if (error) {
+		td->td_ucred = origcred;
+		goto out;
+	}
+	do {
+	    if (error != 0 && pktscale > 2) {
+		if (nmp != NULL && nrp->nr_sotype == SOCK_STREAM &&
+		    pktscale == pktscalesav)
+		    printf("Consider increasing kern.ipc.maxsockbuf\n");
+		pktscale--;
+	    }
+	    if (nrp->nr_sotype == SOCK_DGRAM) {
+		if (nmp != NULL) {
+			sndreserve = (NFS_MAXDGRAMDATA + NFS_MAXPKTHDR) *
+			    pktscale;
+			rcvreserve = (NFS_MAXDGRAMDATA + NFS_MAXPKTHDR) *
+			    pktscale;
+		} else {
+			sndreserve = rcvreserve = 1024 * pktscale;
+		}
+	    } else {
+		if (nrp->nr_sotype != SOCK_STREAM)
+			panic("nfscon sotype");
+		if (nmp != NULL) {
+			sndreserve = (NFS_MAXBSIZE + NFS_MAXXDR +
+			    sizeof (u_int32_t)) * pktscale;
+			rcvreserve = (NFS_MAXBSIZE + NFS_MAXXDR +
+			    sizeof (u_int32_t)) * pktscale;
+		} else {
+			sndreserve = rcvreserve = 1024 * pktscale;
+		}
+	    }
+	    error = soreserve(so, sndreserve, rcvreserve);
+	    if (error != 0 && nmp != NULL && nrp->nr_sotype == SOCK_STREAM &&
+		pktscale <= 2)
+		printf("Must increase kern.ipc.maxsockbuf or reduce"
+		    " rsize, wsize\n");
+	} while (error != 0 && pktscale > 2);
+	soclose(so);
+	if (error) {
+		td->td_ucred = origcred;
+		goto out;
+	}
+
+	client = clnt_reconnect_create(nconf, saddr, nrp->nr_prog,
+	    nrp->nr_vers, sndreserve, rcvreserve);
+	CLNT_CONTROL(client, CLSET_WAITCHAN, "nfsreq");
+	if (nmp != NULL) {
+		if ((nmp->nm_flag & NFSMNT_INT))
+			CLNT_CONTROL(client, CLSET_INTERRUPTIBLE, &one);
+		if ((nmp->nm_flag & NFSMNT_RESVPORT))
+			CLNT_CONTROL(client, CLSET_PRIVPORT, &one);
+		if (NFSHASSOFT(nmp)) {
+			if (nmp->nm_sotype == SOCK_DGRAM)
+				/*
+				 * For UDP, the large timeout for a reconnect
+				 * will be set to "nm_retry * nm_timeo / 2", so
+				 * we only want to do 2 reconnect timeout
+				 * retries.
+				 */
+				retries = 2;
+			else
+				retries = nmp->nm_retry;
+		} else
+			retries = INT_MAX;
+		if (NFSHASNFSV4N(nmp)) {
+			if (cred != NULL) {
+				if (NFSHASSOFT(nmp)) {
+					/*
+					 * This should be a DS mount.
+					 * Use CLSET_TIMEOUT to set the timeout
+					 * for connections to DSs instead of
+					 * specifying a timeout on each RPC.
+					 * This is done so that SO_SNDTIMEO
+					 * is set on the TCP socket as well
+					 * as specifying a time limit when
+					 * waiting for an RPC reply.  Useful
+					 * if the send queue for the TCP
+					 * connection has become constipated,
+					 * due to a failed DS.
+					 * The choice of lease_duration / 4 is
+					 * fairly arbitrary, but seems to work
+					 * ok, with a lower bound of 10sec.
+					 */
+					timo.tv_sec = nfsrv_lease / 4;
+					if (timo.tv_sec < 10)
+						timo.tv_sec = 10;
+					timo.tv_usec = 0;
+					CLNT_CONTROL(client, CLSET_TIMEOUT,
+					    &timo);
+				}
+				/*
+				 * Make sure the nfscbd_pool doesn't get
+				 * destroyed while doing this.
+				 */
+				NFSD_LOCK();
+				if (nfs_numnfscbd > 0) {
+					nfs_numnfscbd++;
+					NFSD_UNLOCK();
+					xprt = svc_vc_create_backchannel(
+					    nfscbd_pool);
+					CLNT_CONTROL(client, CLSET_BACKCHANNEL,
+					    xprt);
+					NFSD_LOCK();
+					nfs_numnfscbd--;
+					if (nfs_numnfscbd == 0)
+						wakeup(&nfs_numnfscbd);
+				}
+				NFSD_UNLOCK();
+			} else {
+				/*
+				 * cred == NULL for a DS connect.
+				 * For connects to a DS, set a retry limit
+				 * so that failed DSs will be detected.
+				 * This is ok for NFSv4.1, since a DS does
+				 * not maintain open/lock state and is the
+				 * only case where using a "soft" mount is
+				 * recommended for NFSv4.
+				 * For mounts from the MDS to DS, this is done
+				 * via mount options, but that is not the case
+				 * here.  The retry limit here can be adjusted
+				 * via the sysctl vfs.nfs.dsretries.
+				 * See the comment above w.r.t. timeout.
+				 */
+				timo.tv_sec = nfsrv_lease / 4;
+				if (timo.tv_sec < 10)
+					timo.tv_sec = 10;
+				timo.tv_usec = 0;
+				CLNT_CONTROL(client, CLSET_TIMEOUT, &timo);
+				retries = nfs_dsretries;
+			}
+		}
+	} else {
+		/*
+		 * Three cases:
+		 * - Null RPC callback to client
+		 * - Non-Null RPC callback to client, wait a little longer
+		 * - upcalls to nfsuserd and gssd (clp == NULL)
+		 */
+		if (callback_retry_mult == 0) {
+			retries = NFSV4_UPCALLRETRY;
+			CLNT_CONTROL(client, CLSET_PRIVPORT, &one);
+		} else {
+			retries = NFSV4_CALLBACKRETRY * callback_retry_mult;
+		}
+	}
+	CLNT_CONTROL(client, CLSET_RETRIES, &retries);
+
+	if (nmp != NULL) {
+		/*
+		 * For UDP, there are 2 timeouts:
+		 * - CLSET_RETRY_TIMEOUT sets the initial timeout for the timer
+		 *   that does a retransmit of an RPC request using the same 
+		 *   socket and xid. This is what you normally want to do,
+		 *   since NFS servers depend on "same xid" for their
+		 *   Duplicate Request Cache.
+		 * - timeout specified in CLNT_CALL_MBUF(), which specifies when
+		 *   retransmits on the same socket should fail and a fresh
+		 *   socket created. Each of these timeouts counts as one
+		 *   CLSET_RETRIES as set above.
+		 * Set the initial retransmit timeout for UDP. This timeout
+		 * doesn't exist for TCP and the following call just fails,
+		 * which is ok.
+		 */
+		timo.tv_sec = nmp->nm_timeo / NFS_HZ;
+		timo.tv_usec = (nmp->nm_timeo % NFS_HZ) * 1000000 / NFS_HZ;
+		CLNT_CONTROL(client, CLSET_RETRY_TIMEOUT, &timo);
+	}
+
+	mtx_lock(&nrp->nr_mtx);
+	if (nrp->nr_client != NULL) {
+		mtx_unlock(&nrp->nr_mtx);
+		/*
+		 * Someone else already connected.
+		 */
+		CLNT_RELEASE(client);
+	} else {
+		nrp->nr_client = client;
+		/*
+		 * Protocols that do not require connections may be optionally
+		 * left unconnected for servers that reply from a port other
+		 * than NFS_PORT.
+		 */
+		if (nmp == NULL || (nmp->nm_flag & NFSMNT_NOCONN) == 0) {
+			mtx_unlock(&nrp->nr_mtx);
+			CLNT_CONTROL(client, CLSET_CONNECT, &one);
+		} else
+			mtx_unlock(&nrp->nr_mtx);
+	}
+
+
+	/* Restore current thread's credentials. */
+	td->td_ucred = origcred;
+
+out:
+	NFSEXITCODE(error);
+	return (error);
+}
+
+/*
+ * NFS disconnect. Clean up and unlink.
+ */
+void
+newnfs_disconnect(struct nfssockreq *nrp)
+{
+	CLIENT *client;
+
+	mtx_lock(&nrp->nr_mtx);
+	if (nrp->nr_client != NULL) {
+		client = nrp->nr_client;
+		nrp->nr_client = NULL;
+		mtx_unlock(&nrp->nr_mtx);
+		rpc_gss_secpurge_call(client);
+		CLNT_CLOSE(client);
+		CLNT_RELEASE(client);
+	} else {
+		mtx_unlock(&nrp->nr_mtx);
+	}
+}
+
+static AUTH *
+nfs_getauth(struct nfssockreq *nrp, int secflavour, char *clnt_principal,
+    char *srv_principal, gss_OID mech_oid, struct ucred *cred)
+{
+	rpc_gss_service_t svc;
+	AUTH *auth;
+
+	switch (secflavour) {
+	case RPCSEC_GSS_KRB5:
+	case RPCSEC_GSS_KRB5I:
+	case RPCSEC_GSS_KRB5P:
+		if (!mech_oid) {
+			if (!rpc_gss_mech_to_oid_call("kerberosv5", &mech_oid))
+				return (NULL);
+		}
+		if (secflavour == RPCSEC_GSS_KRB5)
+			svc = rpc_gss_svc_none;
+		else if (secflavour == RPCSEC_GSS_KRB5I)
+			svc = rpc_gss_svc_integrity;
+		else
+			svc = rpc_gss_svc_privacy;
+
+		if (clnt_principal == NULL)
+			auth = rpc_gss_secfind_call(nrp->nr_client, cred,
+			    srv_principal, mech_oid, svc);
+		else {
+			auth = rpc_gss_seccreate_call(nrp->nr_client, cred,
+			    clnt_principal, srv_principal, "kerberosv5",
+			    svc, NULL, NULL, NULL);
+			return (auth);
+		}
+		if (auth != NULL)
+			return (auth);
+		/* fallthrough */
+	case AUTH_SYS:
+	default:
+		return (authunix_create(cred));
+
+	}
+}
+
+/*
+ * Callback from the RPC code to generate up/down notifications.
+ */
+
+struct nfs_feedback_arg {
+	struct nfsmount *nf_mount;
+	int		nf_lastmsg;	/* last tprintf */
+	int		nf_tprintfmsg;
+	struct thread	*nf_td;
+};
+
+static void
+nfs_feedback(int type, int proc, void *arg)
+{
+	struct nfs_feedback_arg *nf = (struct nfs_feedback_arg *) arg;
+	struct nfsmount *nmp = nf->nf_mount;
+	time_t now;
+
+	switch (type) {
+	case FEEDBACK_REXMIT2:
+	case FEEDBACK_RECONNECT:
+		now = NFSD_MONOSEC;
+		if (nf->nf_lastmsg + nmp->nm_tprintf_delay < now) {
+			nfs_down(nmp, nf->nf_td,
+			    "not responding", 0, NFSSTA_TIMEO);
+			nf->nf_tprintfmsg = TRUE;
+			nf->nf_lastmsg = now;
+		}
+		break;
+
+	case FEEDBACK_OK:
+		nfs_up(nf->nf_mount, nf->nf_td,
+		    "is alive again", NFSSTA_TIMEO, nf->nf_tprintfmsg);
+		break;
+	}
+}
+
+/*
+ * newnfs_request - goes something like this
+ *	- does the rpc by calling the krpc layer
+ *	- break down rpc header and return with nfs reply
+ * nb: always frees up nd_mreq mbuf list
+ */
+int
+newnfs_request(struct nfsrv_descript *nd, struct nfsmount *nmp,
+    struct nfsclient *clp, struct nfssockreq *nrp, vnode_t vp,
+    struct thread *td, struct ucred *cred, u_int32_t prog, u_int32_t vers,
+    u_char *retsum, int toplevel, u_int64_t *xidp, struct nfsclsession *dssep)
+{
+	uint32_t retseq, retval, slotseq, *tl;
+	time_t waituntil;
+	int i = 0, j = 0, opcnt, set_sigset = 0, slot;
+	int error = 0, usegssname = 0, secflavour = AUTH_SYS;
+	int freeslot, maxslot, reterr, slotpos, timeo;
+	u_int16_t procnum;
+	u_int trylater_delay = 1;
+	struct nfs_feedback_arg nf;
+	struct timeval timo;
+	AUTH *auth;
+	struct rpc_callextra ext;
+	enum clnt_stat stat;
+	struct nfsreq *rep = NULL;
+	char *srv_principal = NULL, *clnt_principal = NULL;
+	sigset_t oldset;
+	struct ucred *authcred;
+	struct nfsclsession *sep;
+	uint8_t sessionid[NFSX_V4SESSIONID];
+
+	sep = dssep;
+	if (xidp != NULL)
+		*xidp = 0;
+	/* Reject requests while attempting a forced unmount. */
+	if (nmp != NULL && NFSCL_FORCEDISM(nmp->nm_mountp)) {
+		m_freem(nd->nd_mreq);
+		return (ESTALE);
+	}
+
+	/*
+	 * Set authcred, which is used to acquire RPC credentials to
+	 * the cred argument, by default. The crhold() should not be
+	 * necessary, but will ensure that some future code change
+	 * doesn't result in the credential being free'd prematurely.
+	 */
+	authcred = crhold(cred);
+
+	/* For client side interruptible mounts, mask off the signals. */
+	if (nmp != NULL && td != NULL && NFSHASINT(nmp)) {
+		newnfs_set_sigmask(td, &oldset);
+		set_sigset = 1;
+	}
+
+	/*
+	 * XXX if not already connected call nfs_connect now. Longer
+	 * term, change nfs_mount to call nfs_connect unconditionally
+	 * and let clnt_reconnect_create handle reconnects.
+	 */
+	if (nrp->nr_client == NULL)
+		newnfs_connect(nmp, nrp, cred, td, 0);
+
+	/*
+	 * For a client side mount, nmp is != NULL and clp == NULL. For
+	 * server calls (callbacks or upcalls), nmp == NULL.
+	 */
+	if (clp != NULL) {
+		NFSLOCKSTATE();
+		if ((clp->lc_flags & LCL_GSS) && nfsrv_gsscallbackson) {
+			secflavour = RPCSEC_GSS_KRB5;
+			if (nd->nd_procnum != NFSPROC_NULL) {
+				if (clp->lc_flags & LCL_GSSINTEGRITY)
+					secflavour = RPCSEC_GSS_KRB5I;
+				else if (clp->lc_flags & LCL_GSSPRIVACY)
+					secflavour = RPCSEC_GSS_KRB5P;
+			}
+		}
+		NFSUNLOCKSTATE();
+	} else if (nmp != NULL && NFSHASKERB(nmp) &&
+	     nd->nd_procnum != NFSPROC_NULL) {
+		if (NFSHASALLGSSNAME(nmp) && nmp->nm_krbnamelen > 0)
+			nd->nd_flag |= ND_USEGSSNAME;
+		if ((nd->nd_flag & ND_USEGSSNAME) != 0) {
+			/*
+			 * If there is a client side host based credential,
+			 * use that, otherwise use the system uid, if set.
+			 * The system uid is in the nmp->nm_sockreq.nr_cred
+			 * credentials.
+			 */
+			if (nmp->nm_krbnamelen > 0) {
+				usegssname = 1;
+				clnt_principal = nmp->nm_krbname;
+			} else if (nmp->nm_uid != (uid_t)-1) {
+				KASSERT(nmp->nm_sockreq.nr_cred != NULL,
+				    ("newnfs_request: NULL nr_cred"));
+				crfree(authcred);
+				authcred = crhold(nmp->nm_sockreq.nr_cred);
+			}
+		} else if (nmp->nm_krbnamelen == 0 &&
+		    nmp->nm_uid != (uid_t)-1 && cred->cr_uid == (uid_t)0) {
+			/*
+			 * If there is no host based principal name and
+			 * the system uid is set and this is root, use the
+			 * system uid, since root won't have user
+			 * credentials in a credentials cache file.
+			 * The system uid is in the nmp->nm_sockreq.nr_cred
+			 * credentials.
+			 */
+			KASSERT(nmp->nm_sockreq.nr_cred != NULL,
+			    ("newnfs_request: NULL nr_cred"));
+			crfree(authcred);
+			authcred = crhold(nmp->nm_sockreq.nr_cred);
+		}
+		if (NFSHASINTEGRITY(nmp))
+			secflavour = RPCSEC_GSS_KRB5I;
+		else if (NFSHASPRIVACY(nmp))
+			secflavour = RPCSEC_GSS_KRB5P;
+		else
+			secflavour = RPCSEC_GSS_KRB5;
+		srv_principal = NFSMNT_SRVKRBNAME(nmp);
+	} else if (nmp != NULL && !NFSHASKERB(nmp) &&
+	    nd->nd_procnum != NFSPROC_NULL &&
+	    (nd->nd_flag & ND_USEGSSNAME) != 0) {
+		/*
+		 * Use the uid that did the mount when the RPC is doing
+		 * NFSv4 system operations, as indicated by the
+		 * ND_USEGSSNAME flag, for the AUTH_SYS case.
+		 * The credentials in nm_sockreq.nr_cred were used for the
+		 * mount.
+		 */
+		KASSERT(nmp->nm_sockreq.nr_cred != NULL,
+		    ("newnfs_request: NULL nr_cred"));
+		crfree(authcred);
+		authcred = crhold(nmp->nm_sockreq.nr_cred);
+	}
+
+	if (nmp != NULL) {
+		bzero(&nf, sizeof(struct nfs_feedback_arg));
+		nf.nf_mount = nmp;
+		nf.nf_td = td;
+		nf.nf_lastmsg = NFSD_MONOSEC -
+		    ((nmp->nm_tprintf_delay)-(nmp->nm_tprintf_initial_delay));
+	}
+
+	if (nd->nd_procnum == NFSPROC_NULL)
+		auth = authnone_create();
+	else if (usegssname) {
+		/*
+		 * For this case, the authenticator is held in the
+		 * nfssockreq structure, so don't release the reference count
+		 * held on it. --> Don't AUTH_DESTROY() it in this function.
+		 */
+		if (nrp->nr_auth == NULL)
+			nrp->nr_auth = nfs_getauth(nrp, secflavour,
+			    clnt_principal, srv_principal, NULL, authcred);
+		else
+			rpc_gss_refresh_auth_call(nrp->nr_auth);
+		auth = nrp->nr_auth;
+	} else
+		auth = nfs_getauth(nrp, secflavour, NULL,
+		    srv_principal, NULL, authcred);
+	crfree(authcred);
+	if (auth == NULL) {
+		m_freem(nd->nd_mreq);
+		if (set_sigset)
+			newnfs_restore_sigmask(td, &oldset);
+		return (EACCES);
+	}
+	bzero(&ext, sizeof(ext));
+	ext.rc_auth = auth;
+	if (nmp != NULL) {
+		ext.rc_feedback = nfs_feedback;
+		ext.rc_feedback_arg = &nf;
+	}
+
+	procnum = nd->nd_procnum;
+	if ((nd->nd_flag & ND_NFSV4) &&
+	    nd->nd_procnum != NFSPROC_NULL &&
+	    nd->nd_procnum != NFSV4PROC_CBCOMPOUND)
+		procnum = NFSV4PROC_COMPOUND;
+
+	if (nmp != NULL) {
+		NFSINCRGLOBAL(nfsstatsv1.rpcrequests);
+
+		/* Map the procnum to the old NFSv2 one, as required. */
+		if ((nd->nd_flag & ND_NFSV2) != 0) {
+			if (nd->nd_procnum < NFS_V3NPROCS)
+				procnum = nfsv2_procid[nd->nd_procnum];
+			else
+				procnum = NFSV2PROC_NOOP;
+		}
+
+		/*
+		 * Now only used for the R_DONTRECOVER case, but until that is
+		 * supported within the krpc code, I need to keep a queue of
+		 * outstanding RPCs for nfsv4 client requests.
+		 */
+		if ((nd->nd_flag & ND_NFSV4) && procnum == NFSV4PROC_COMPOUND)
+			rep = malloc(sizeof(struct nfsreq),
+			    M_NFSDREQ, M_WAITOK);
+#ifdef KDTRACE_HOOKS
+		if (dtrace_nfscl_nfs234_start_probe != NULL) {
+			uint32_t probe_id;
+			int probe_procnum;
+	
+			if (nd->nd_flag & ND_NFSV4) {
+				probe_id =
+				    nfscl_nfs4_start_probes[nd->nd_procnum];
+				probe_procnum = nd->nd_procnum;
+			} else if (nd->nd_flag & ND_NFSV3) {
+				probe_id = nfscl_nfs3_start_probes[procnum];
+				probe_procnum = procnum;
+			} else {
+				probe_id =
+				    nfscl_nfs2_start_probes[nd->nd_procnum];
+				probe_procnum = procnum;
+			}
+			if (probe_id != 0)
+				(dtrace_nfscl_nfs234_start_probe)
+				    (probe_id, vp, nd->nd_mreq, cred,
+				     probe_procnum);
+		}
+#endif
+	}
+	freeslot = -1;		/* Set to slot that needs to be free'd */
+tryagain:
+	slot = -1;		/* Slot that needs a sequence# increment. */
+	/*
+	 * This timeout specifies when a new socket should be created,
+	 * along with new xid values. For UDP, this should be done
+	 * infrequently, since retransmits of RPC requests should normally
+	 * use the same xid.
+	 */
+	if (nmp == NULL) {
+		timo.tv_usec = 0;
+		if (clp == NULL)
+			timo.tv_sec = NFSV4_UPCALLTIMEO;
+		else
+			timo.tv_sec = NFSV4_CALLBACKTIMEO;
+	} else {
+		if (nrp->nr_sotype != SOCK_DGRAM) {
+			timo.tv_usec = 0;
+			if ((nmp->nm_flag & NFSMNT_NFSV4))
+				timo.tv_sec = INT_MAX;
+			else
+				timo.tv_sec = NFS_TCPTIMEO;
+		} else {
+			if (NFSHASSOFT(nmp)) {
+				/*
+				 * CLSET_RETRIES is set to 2, so this should be
+				 * half of the total timeout required.
+				 */
+				timeo = nmp->nm_retry * nmp->nm_timeo / 2;
+				if (timeo < 1)
+					timeo = 1;
+				timo.tv_sec = timeo / NFS_HZ;
+				timo.tv_usec = (timeo % NFS_HZ) * 1000000 /
+				    NFS_HZ;
+			} else {
+				/* For UDP hard mounts, use a large value. */
+				timo.tv_sec = NFS_MAXTIMEO / NFS_HZ;
+				timo.tv_usec = 0;
+			}
+		}
+
+		if (rep != NULL) {
+			rep->r_flags = 0;
+			rep->r_nmp = nmp;
+			/*
+			 * Chain request into list of outstanding requests.
+			 */
+			NFSLOCKREQ();
+			TAILQ_INSERT_TAIL(&nfsd_reqq, rep, r_chain);
+			NFSUNLOCKREQ();
+		}
+	}
+
+	nd->nd_mrep = NULL;
+	if (clp != NULL && sep != NULL)
+		stat = clnt_bck_call(nrp->nr_client, &ext, procnum,
+		    nd->nd_mreq, &nd->nd_mrep, timo, sep->nfsess_xprt);
+	else
+		stat = CLNT_CALL_MBUF(nrp->nr_client, &ext, procnum,
+		    nd->nd_mreq, &nd->nd_mrep, timo);
+	NFSCL_DEBUG(2, "clnt call=%d\n", stat);
+
+	if (rep != NULL) {
+		/*
+		 * RPC done, unlink the request.
+		 */
+		NFSLOCKREQ();
+		TAILQ_REMOVE(&nfsd_reqq, rep, r_chain);
+		NFSUNLOCKREQ();
+	}
+
+	/*
+	 * If there was a successful reply and a tprintf msg.
+	 * tprintf a response.
+	 */
+	if (stat == RPC_SUCCESS) {
+		error = 0;
+	} else if (stat == RPC_TIMEDOUT) {
+		NFSINCRGLOBAL(nfsstatsv1.rpctimeouts);
+		error = ETIMEDOUT;
+	} else if (stat == RPC_VERSMISMATCH) {
+		NFSINCRGLOBAL(nfsstatsv1.rpcinvalid);
+		error = EOPNOTSUPP;
+	} else if (stat == RPC_PROGVERSMISMATCH) {
+		NFSINCRGLOBAL(nfsstatsv1.rpcinvalid);
+		error = EPROTONOSUPPORT;
+	} else if (stat == RPC_INTR) {
+		error = EINTR;
+	} else if (stat == RPC_CANTSEND || stat == RPC_CANTRECV ||
+	     stat == RPC_SYSTEMERROR) {
+		/* Check for a session slot that needs to be free'd. */
+		if ((nd->nd_flag & (ND_NFSV41 | ND_HASSLOTID)) ==
+		    (ND_NFSV41 | ND_HASSLOTID) && nmp != NULL &&
+		    nd->nd_procnum != NFSPROC_NULL) {
+			/*
+			 * This should only occur when either the MDS or
+			 * a client has an RPC against a DS fail.
+			 * This happens because these cases use "soft"
+			 * connections that can time out and fail.
+			 * The slot used for this RPC is now in a
+			 * non-deterministic state, but if the slot isn't
+			 * free'd, threads can get stuck waiting for a slot.
+			 */
+			if (sep == NULL)
+				sep = nfsmnt_mdssession(nmp);
+			/*
+			 * Bump the sequence# out of range, so that reuse of
+			 * this slot will result in an NFSERR_SEQMISORDERED
+			 * error and not a bogus cached RPC reply.
+			 */
+			mtx_lock(&sep->nfsess_mtx);
+			sep->nfsess_slotseq[nd->nd_slotid] += 10;
+			mtx_unlock(&sep->nfsess_mtx);
+			/* And free the slot. */
+			nfsv4_freeslot(sep, nd->nd_slotid);
+		}
+		NFSINCRGLOBAL(nfsstatsv1.rpcinvalid);
+		error = ENXIO;
+	} else {
+		NFSINCRGLOBAL(nfsstatsv1.rpcinvalid);
+		error = EACCES;
+	}
+	if (error) {
+		m_freem(nd->nd_mreq);
+		if (usegssname == 0)
+			AUTH_DESTROY(auth);
+		if (rep != NULL)
+			free(rep, M_NFSDREQ);
+		if (set_sigset)
+			newnfs_restore_sigmask(td, &oldset);
+		return (error);
+	}
+
+	KASSERT(nd->nd_mrep != NULL, ("mrep shouldn't be NULL if no error\n"));
+
+	/*
+	 * Search for any mbufs that are not a multiple of 4 bytes long
+	 * or with m_data not longword aligned.
+	 * These could cause pointer alignment problems, so copy them to
+	 * well aligned mbufs.
+	 */
+	newnfs_realign(&nd->nd_mrep, M_WAITOK);
+	nd->nd_md = nd->nd_mrep;
+	nd->nd_dpos = NFSMTOD(nd->nd_md, caddr_t);
+	nd->nd_repstat = 0;
+	if (nd->nd_procnum != NFSPROC_NULL &&
+	    nd->nd_procnum != NFSV4PROC_CBNULL) {
+		/* If sep == NULL, set it to the default in nmp. */
+		if (sep == NULL && nmp != NULL)
+			sep = nfsmnt_mdssession(nmp);
+		/*
+		 * and now the actual NFS xdr.
+		 */
+		NFSM_DISSECT(tl, u_int32_t *, NFSX_UNSIGNED);
+		nd->nd_repstat = fxdr_unsigned(u_int32_t, *tl);
+		if (nd->nd_repstat >= 10000)
+			NFSCL_DEBUG(1, "proc=%d reps=%d\n", (int)nd->nd_procnum,
+			    (int)nd->nd_repstat);
+
+		/*
+		 * Get rid of the tag, return count and SEQUENCE result for
+		 * NFSv4.
+		 */
+		if ((nd->nd_flag & ND_NFSV4) != 0 && nd->nd_repstat !=
+		    NFSERR_MINORVERMISMATCH) {
+			NFSM_DISSECT(tl, u_int32_t *, NFSX_UNSIGNED);
+			i = fxdr_unsigned(int, *tl);
+			error = nfsm_advance(nd, NFSM_RNDUP(i), -1);
+			if (error)
+				goto nfsmout;
+			NFSM_DISSECT(tl, u_int32_t *, 3 * NFSX_UNSIGNED);
+			opcnt = fxdr_unsigned(int, *tl++);
+			i = fxdr_unsigned(int, *tl++);
+			j = fxdr_unsigned(int, *tl);
+			if (j >= 10000)
+				NFSCL_DEBUG(1, "fop=%d fst=%d\n", i, j);
+			/*
+			 * If the first op is Sequence, free up the slot.
+			 */
+			if ((nmp != NULL && i == NFSV4OP_SEQUENCE && j != 0) ||
+			    (clp != NULL && i == NFSV4OP_CBSEQUENCE && j != 0))
+				NFSCL_DEBUG(1, "failed seq=%d\n", j);
+			if (((nmp != NULL && i == NFSV4OP_SEQUENCE && j == 0) ||
+			    (clp != NULL && i == NFSV4OP_CBSEQUENCE &&
+			    j == 0)) && sep != NULL) {
+				if (i == NFSV4OP_SEQUENCE)
+					NFSM_DISSECT(tl, uint32_t *,
+					    NFSX_V4SESSIONID +
+					    5 * NFSX_UNSIGNED);
+				else
+					NFSM_DISSECT(tl, uint32_t *,
+					    NFSX_V4SESSIONID +
+					    4 * NFSX_UNSIGNED);
+				mtx_lock(&sep->nfsess_mtx);
+				if (bcmp(tl, sep->nfsess_sessionid,
+				    NFSX_V4SESSIONID) == 0) {
+					tl += NFSX_V4SESSIONID / NFSX_UNSIGNED;
+					retseq = fxdr_unsigned(uint32_t, *tl++);
+					slot = fxdr_unsigned(int, *tl++);
+					freeslot = slot;
+					if (retseq != sep->nfsess_slotseq[slot])
+						printf("retseq diff 0x%x\n",
+						    retseq);
+					retval = fxdr_unsigned(uint32_t, *++tl);
+					if ((retval + 1) < sep->nfsess_foreslots
+					    )
+						sep->nfsess_foreslots = (retval
+						    + 1);
+					else if ((retval + 1) >
+					    sep->nfsess_foreslots)
+						sep->nfsess_foreslots = (retval
+						    < 64) ? (retval + 1) : 64;
+				}
+				mtx_unlock(&sep->nfsess_mtx);
+
+				/* Grab the op and status for the next one. */
+				if (opcnt > 1) {
+					NFSM_DISSECT(tl, uint32_t *,
+					    2 * NFSX_UNSIGNED);
+					i = fxdr_unsigned(int, *tl++);
+					j = fxdr_unsigned(int, *tl);
+				}
+			}
+		}
+		if (nd->nd_repstat != 0) {
+			if (nd->nd_repstat == NFSERR_BADSESSION &&
+			    nmp != NULL && dssep == NULL &&
+			    (nd->nd_flag & ND_NFSV41) != 0) {
+				/*
+				 * If this is a client side MDS RPC, mark
+				 * the MDS session defunct and initiate
+				 * recovery, as required.
+				 * The nfsess_defunct field is protected by
+				 * the NFSLOCKMNT()/nm_mtx lock and not the
+				 * nfsess_mtx lock to simplify its handling,
+				 * for the MDS session. This lock is also
+				 * sufficient for nfsess_sessionid, since it
+				 * never changes in the structure.
+				 */
+				NFSCL_DEBUG(1, "Got badsession\n");
+				NFSLOCKCLSTATE();
+				NFSLOCKMNT(nmp);
+				sep = NFSMNT_MDSSESSION(nmp);
+				if (bcmp(sep->nfsess_sessionid, nd->nd_sequence,
+				    NFSX_V4SESSIONID) == 0) {
+					/* Initiate recovery. */
+					sep->nfsess_defunct = 1;
+					NFSCL_DEBUG(1, "Marked defunct\n");
+					if (nmp->nm_clp != NULL) {
+						nmp->nm_clp->nfsc_flags |=
+						    NFSCLFLAGS_RECOVER;
+						wakeup(nmp->nm_clp);
+					}
+				}
+				NFSUNLOCKCLSTATE();
+				/*
+				 * Sleep for up to 1sec waiting for a new
+				 * session.
+				 */
+				mtx_sleep(&nmp->nm_sess, &nmp->nm_mtx, PZERO,
+				    "nfsbadsess", hz);
+				/*
+				 * Get the session again, in case a new one
+				 * has been created during the sleep.
+				 */
+				sep = NFSMNT_MDSSESSION(nmp);
+				NFSUNLOCKMNT(nmp);
+				if ((nd->nd_flag & ND_LOOPBADSESS) != 0) {
+					reterr = nfsv4_sequencelookup(nmp, sep,
+					    &slotpos, &maxslot, &slotseq,
+					    sessionid);
+					if (reterr == 0) {
+						/* Fill in new session info. */
+						NFSCL_DEBUG(1,
+						  "Filling in new sequence\n");
+						tl = nd->nd_sequence;
+						bcopy(sessionid, tl,
+						    NFSX_V4SESSIONID);
+						tl += NFSX_V4SESSIONID /
+						    NFSX_UNSIGNED;
+						*tl++ = txdr_unsigned(slotseq);
+						*tl++ = txdr_unsigned(slotpos);
+						*tl = txdr_unsigned(maxslot);
+					}
+					if (reterr == NFSERR_BADSESSION ||
+					    reterr == 0) {
+						NFSCL_DEBUG(1,
+						    "Badsession looping\n");
+						m_freem(nd->nd_mrep);
+						nd->nd_mrep = NULL;
+						goto tryagain;
+					}
+					nd->nd_repstat = reterr;
+					NFSCL_DEBUG(1, "Got err=%d\n", reterr);
+				}
+			}
+			/*
+			 * When clp != NULL, it is a callback and all
+			 * callback operations can be retried for NFSERR_DELAY.
+			 */
+			if (((nd->nd_repstat == NFSERR_DELAY ||
+			      nd->nd_repstat == NFSERR_GRACE) &&
+			     (nd->nd_flag & ND_NFSV4) && (clp != NULL ||
+			     (nd->nd_procnum != NFSPROC_DELEGRETURN &&
+			     nd->nd_procnum != NFSPROC_SETATTR &&
+			     nd->nd_procnum != NFSPROC_READ &&
+			     nd->nd_procnum != NFSPROC_READDS &&
+			     nd->nd_procnum != NFSPROC_WRITE &&
+			     nd->nd_procnum != NFSPROC_WRITEDS &&
+			     nd->nd_procnum != NFSPROC_OPEN &&
+			     nd->nd_procnum != NFSPROC_CREATE &&
+			     nd->nd_procnum != NFSPROC_OPENCONFIRM &&
+			     nd->nd_procnum != NFSPROC_OPENDOWNGRADE &&
+			     nd->nd_procnum != NFSPROC_CLOSE &&
+			     nd->nd_procnum != NFSPROC_LOCK &&
+			     nd->nd_procnum != NFSPROC_LOCKU))) ||
+			    (nd->nd_repstat == NFSERR_DELAY &&
+			     (nd->nd_flag & ND_NFSV4) == 0) ||
+			    nd->nd_repstat == NFSERR_RESOURCE) {
+				if (trylater_delay > NFS_TRYLATERDEL)
+					trylater_delay = NFS_TRYLATERDEL;
+				waituntil = NFSD_MONOSEC + trylater_delay;
+				while (NFSD_MONOSEC < waituntil)
+					(void) nfs_catnap(PZERO, 0, "nfstry");
+				trylater_delay *= 2;
+				if (slot != -1) {
+					mtx_lock(&sep->nfsess_mtx);
+					sep->nfsess_slotseq[slot]++;
+					*nd->nd_slotseq = txdr_unsigned(
+					    sep->nfsess_slotseq[slot]);
+					mtx_unlock(&sep->nfsess_mtx);
+				}
+				m_freem(nd->nd_mrep);
+				nd->nd_mrep = NULL;
+				goto tryagain;
+			}
+
+			/*
+			 * If the File Handle was stale, invalidate the
+			 * lookup cache, just in case.
+			 * (vp != NULL implies a client side call)
+			 */
+			if (nd->nd_repstat == ESTALE && vp != NULL) {
+				cache_purge(vp);
+				if (ncl_call_invalcaches != NULL)
+					(*ncl_call_invalcaches)(vp);
+			}
+		}
+		if ((nd->nd_flag & ND_NFSV4) != 0) {
+			/* Free the slot, as required. */
+			if (freeslot != -1)
+				nfsv4_freeslot(sep, freeslot);
+			/*
+			 * If this op is Putfh, throw its results away.
+			 */
+			if (j >= 10000)
+				NFSCL_DEBUG(1, "nop=%d nst=%d\n", i, j);
+			if (nmp != NULL && i == NFSV4OP_PUTFH && j == 0) {
+				NFSM_DISSECT(tl,u_int32_t *,2 * NFSX_UNSIGNED);
+				i = fxdr_unsigned(int, *tl++);
+				j = fxdr_unsigned(int, *tl);
+				if (j >= 10000)
+					NFSCL_DEBUG(1, "n2op=%d n2st=%d\n", i,
+					    j);
+				/*
+				 * All Compounds that do an Op that must
+				 * be in sequence consist of NFSV4OP_PUTFH
+				 * followed by one of these. As such, we
+				 * can determine if the seqid# should be
+				 * incremented, here.
+				 */
+				if ((i == NFSV4OP_OPEN ||
+				     i == NFSV4OP_OPENCONFIRM ||
+				     i == NFSV4OP_OPENDOWNGRADE ||
+				     i == NFSV4OP_CLOSE ||
+				     i == NFSV4OP_LOCK ||
+				     i == NFSV4OP_LOCKU) &&
+				    (j == 0 ||
+				     (j != NFSERR_STALECLIENTID &&
+				      j != NFSERR_STALESTATEID &&
+				      j != NFSERR_BADSTATEID &&
+				      j != NFSERR_BADSEQID &&
+				      j != NFSERR_BADXDR &&	 
+				      j != NFSERR_RESOURCE &&
+				      j != NFSERR_NOFILEHANDLE)))		 
+					nd->nd_flag |= ND_INCRSEQID;
+			}
+			/*
+			 * If this op's status is non-zero, mark
+			 * that there is no more data to process.
+			 * The exception is Setattr, which always has xdr
+			 * when it has failed.
+			 */
+			if (j != 0 && i != NFSV4OP_SETATTR)
+				nd->nd_flag |= ND_NOMOREDATA;
+
+			/*
+			 * If R_DONTRECOVER is set, replace the stale error
+			 * reply, so that recovery isn't initiated.
+			 */
+			if ((nd->nd_repstat == NFSERR_STALECLIENTID ||
+			     nd->nd_repstat == NFSERR_BADSESSION ||
+			     nd->nd_repstat == NFSERR_STALESTATEID) &&
+			    rep != NULL && (rep->r_flags & R_DONTRECOVER))
+				nd->nd_repstat = NFSERR_STALEDONTRECOVER;
+		}
+	}
+
+#ifdef KDTRACE_HOOKS
+	if (nmp != NULL && dtrace_nfscl_nfs234_done_probe != NULL) {
+		uint32_t probe_id;
+		int probe_procnum;
+
+		if (nd->nd_flag & ND_NFSV4) {
+			probe_id = nfscl_nfs4_done_probes[nd->nd_procnum];
+			probe_procnum = nd->nd_procnum;
+		} else if (nd->nd_flag & ND_NFSV3) {
+			probe_id = nfscl_nfs3_done_probes[procnum];
+			probe_procnum = procnum;
+		} else {
+			probe_id = nfscl_nfs2_done_probes[nd->nd_procnum];
+			probe_procnum = procnum;
+		}
+		if (probe_id != 0)
+			(dtrace_nfscl_nfs234_done_probe)(probe_id, vp,
+			    nd->nd_mreq, cred, probe_procnum, 0);
+	}
+#endif
+
+	m_freem(nd->nd_mreq);
+	if (usegssname == 0)
+		AUTH_DESTROY(auth);
+	if (rep != NULL)
+		free(rep, M_NFSDREQ);
+	if (set_sigset)
+		newnfs_restore_sigmask(td, &oldset);
+	return (0);
+nfsmout:
+	mbuf_freem(nd->nd_mrep);
+	mbuf_freem(nd->nd_mreq);
+	if (usegssname == 0)
+		AUTH_DESTROY(auth);
+	if (rep != NULL)
+		free(rep, M_NFSDREQ);
+	if (set_sigset)
+		newnfs_restore_sigmask(td, &oldset);
+	return (error);
+}
+
+/*
+ * Mark all of an nfs mount's outstanding requests with R_SOFTTERM and
+ * wait for all requests to complete. This is used by forced unmounts
+ * to terminate any outstanding RPCs.
+ */
+int
+newnfs_nmcancelreqs(struct nfsmount *nmp)
+{
+	struct nfsclds *dsp;
+	struct __rpc_client *cl;
+
+	if (nmp->nm_sockreq.nr_client != NULL)
+		CLNT_CLOSE(nmp->nm_sockreq.nr_client);
+lookformore:
+	NFSLOCKMNT(nmp);
+	TAILQ_FOREACH(dsp, &nmp->nm_sess, nfsclds_list) {
+		NFSLOCKDS(dsp);
+		if (dsp != TAILQ_FIRST(&nmp->nm_sess) &&
+		    (dsp->nfsclds_flags & NFSCLDS_CLOSED) == 0 &&
+		    dsp->nfsclds_sockp != NULL &&
+		    dsp->nfsclds_sockp->nr_client != NULL) {
+			dsp->nfsclds_flags |= NFSCLDS_CLOSED;
+			cl = dsp->nfsclds_sockp->nr_client;
+			NFSUNLOCKDS(dsp);
+			NFSUNLOCKMNT(nmp);
+			CLNT_CLOSE(cl);
+			goto lookformore;
+		}
+		NFSUNLOCKDS(dsp);
+	}
+	NFSUNLOCKMNT(nmp);
+	return (0);
+}
+
+/*
+ * Any signal that can interrupt an NFS operation in an intr mount
+ * should be added to this set. SIGSTOP and SIGKILL cannot be masked.
+ */
+int newnfs_sig_set[] = {
+	SIGINT,
+	SIGTERM,
+	SIGHUP,
+	SIGKILL,
+	SIGQUIT
+};
+
+/*
+ * Check to see if one of the signals in our subset is pending on
+ * the process (in an intr mount).
+ */
+static int
+nfs_sig_pending(sigset_t set)
+{
+	int i;
+	
+	for (i = 0 ; i < nitems(newnfs_sig_set); i++)
+		if (SIGISMEMBER(set, newnfs_sig_set[i]))
+			return (1);
+	return (0);
+}
+ 
+/*
+ * The set/restore sigmask functions are used to (temporarily) overwrite
+ * the thread td_sigmask during an RPC call (for example). These are also
+ * used in other places in the NFS client that might tsleep().
+ */
+void
+newnfs_set_sigmask(struct thread *td, sigset_t *oldset)
+{
+	sigset_t newset;
+	int i;
+	struct proc *p;
+	
+	SIGFILLSET(newset);
+	if (td == NULL)
+		td = curthread; /* XXX */
+	p = td->td_proc;
+	/* Remove the NFS set of signals from newset */
+	PROC_LOCK(p);
+	mtx_lock(&p->p_sigacts->ps_mtx);
+	for (i = 0 ; i < nitems(newnfs_sig_set); i++) {
+		/*
+		 * But make sure we leave the ones already masked
+		 * by the process, ie. remove the signal from the
+		 * temporary signalmask only if it wasn't already
+		 * in p_sigmask.
+		 */
+		if (!SIGISMEMBER(td->td_sigmask, newnfs_sig_set[i]) &&
+		    !SIGISMEMBER(p->p_sigacts->ps_sigignore, newnfs_sig_set[i]))
+			SIGDELSET(newset, newnfs_sig_set[i]);
+	}
+	mtx_unlock(&p->p_sigacts->ps_mtx);
+	kern_sigprocmask(td, SIG_SETMASK, &newset, oldset,
+	    SIGPROCMASK_PROC_LOCKED);
+	PROC_UNLOCK(p);
+}
+
+void
+newnfs_restore_sigmask(struct thread *td, sigset_t *set)
+{
+	if (td == NULL)
+		td = curthread; /* XXX */
+	kern_sigprocmask(td, SIG_SETMASK, set, NULL, 0);
+}
+
+/*
+ * NFS wrapper to msleep(), that shoves a new p_sigmask and restores the
+ * old one after msleep() returns.
+ */
+int
+newnfs_msleep(struct thread *td, void *ident, struct mtx *mtx, int priority, char *wmesg, int timo)
+{
+	sigset_t oldset;
+	int error;
+
+	if ((priority & PCATCH) == 0)
+		return msleep(ident, mtx, priority, wmesg, timo);
+	if (td == NULL)
+		td = curthread; /* XXX */
+	newnfs_set_sigmask(td, &oldset);
+	error = msleep(ident, mtx, priority, wmesg, timo);
+	newnfs_restore_sigmask(td, &oldset);
+	return (error);
+}
+
+/*
+ * Test for a termination condition pending on the process.
+ * This is used for NFSMNT_INT mounts.
+ */
+int
+newnfs_sigintr(struct nfsmount *nmp, struct thread *td)
+{
+	struct proc *p;
+	sigset_t tmpset;
+	
+	/* Terminate all requests while attempting a forced unmount. */
+	if (NFSCL_FORCEDISM(nmp->nm_mountp))
+		return (EIO);
+	if (!(nmp->nm_flag & NFSMNT_INT))
+		return (0);
+	if (td == NULL)
+		return (0);
+	p = td->td_proc;
+	PROC_LOCK(p);
+	tmpset = p->p_siglist;
+	SIGSETOR(tmpset, td->td_siglist);
+	SIGSETNAND(tmpset, td->td_sigmask);
+	mtx_lock(&p->p_sigacts->ps_mtx);
+	SIGSETNAND(tmpset, p->p_sigacts->ps_sigignore);
+	mtx_unlock(&p->p_sigacts->ps_mtx);
+	if ((SIGNOTEMPTY(p->p_siglist) || SIGNOTEMPTY(td->td_siglist))
+	    && nfs_sig_pending(tmpset)) {
+		PROC_UNLOCK(p);
+		return (EINTR);
+	}
+	PROC_UNLOCK(p);
+	return (0);
+}
+
+static int
+nfs_msg(struct thread *td, const char *server, const char *msg, int error)
+{
+	struct proc *p;
+
+	p = td ? td->td_proc : NULL;
+	if (error) {
+		tprintf(p, LOG_INFO, "nfs server %s: %s, error %d\n",
+		    server, msg, error);
+	} else {
+		tprintf(p, LOG_INFO, "nfs server %s: %s\n", server, msg);
+	}
+	return (0);
+}
+
+static void
+nfs_down(struct nfsmount *nmp, struct thread *td, const char *msg,
+    int error, int flags)
+{
+	if (nmp == NULL)
+		return;
+	mtx_lock(&nmp->nm_mtx);
+	if ((flags & NFSSTA_TIMEO) && !(nmp->nm_state & NFSSTA_TIMEO)) {
+		nmp->nm_state |= NFSSTA_TIMEO;
+		mtx_unlock(&nmp->nm_mtx);
+		vfs_event_signal(&nmp->nm_mountp->mnt_stat.f_fsid,
+		    VQ_NOTRESP, 0);
+	} else
+		mtx_unlock(&nmp->nm_mtx);
+	mtx_lock(&nmp->nm_mtx);
+	if ((flags & NFSSTA_LOCKTIMEO) && !(nmp->nm_state & NFSSTA_LOCKTIMEO)) {
+		nmp->nm_state |= NFSSTA_LOCKTIMEO;
+		mtx_unlock(&nmp->nm_mtx);
+		vfs_event_signal(&nmp->nm_mountp->mnt_stat.f_fsid,
+		    VQ_NOTRESPLOCK, 0);
+	} else
+		mtx_unlock(&nmp->nm_mtx);
+	nfs_msg(td, nmp->nm_mountp->mnt_stat.f_mntfromname, msg, error);
+}
+
+static void
+nfs_up(struct nfsmount *nmp, struct thread *td, const char *msg,
+    int flags, int tprintfmsg)
+{
+	if (nmp == NULL)
+		return;
+	if (tprintfmsg) {
+		nfs_msg(td, nmp->nm_mountp->mnt_stat.f_mntfromname, msg, 0);
+	}
+
+	mtx_lock(&nmp->nm_mtx);
+	if ((flags & NFSSTA_TIMEO) && (nmp->nm_state & NFSSTA_TIMEO)) {
+		nmp->nm_state &= ~NFSSTA_TIMEO;
+		mtx_unlock(&nmp->nm_mtx);
+		vfs_event_signal(&nmp->nm_mountp->mnt_stat.f_fsid,
+		    VQ_NOTRESP, 1);
+	} else
+		mtx_unlock(&nmp->nm_mtx);
+	
+	mtx_lock(&nmp->nm_mtx);
+	if ((flags & NFSSTA_LOCKTIMEO) && (nmp->nm_state & NFSSTA_LOCKTIMEO)) {
+		nmp->nm_state &= ~NFSSTA_LOCKTIMEO;
+		mtx_unlock(&nmp->nm_mtx);
+		vfs_event_signal(&nmp->nm_mountp->mnt_stat.f_fsid,
+		    VQ_NOTRESPLOCK, 1);
+	} else
+		mtx_unlock(&nmp->nm_mtx);
+}
+
diff --git a/freebsd/sys/fs/nfs/nfs_commonport.c b/freebsd/sys/fs/nfs/nfs_commonport.c
new file mode 100644
index 0000000..1a1e64c
--- /dev/null
+++ b/freebsd/sys/fs/nfs/nfs_commonport.c
@@ -0,0 +1,819 @@
+/*-
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 1989, 1993
+ *	The Regents of the University of California.  All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Rick Macklem at The University of Guelph.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+/*
+ * Functions that need to be different for different versions of BSD
+ * kernel should be kept here, along with any global storage specific
+ * to this BSD variant.
+ */
+#include <fs/nfs/nfsport.h>
+#include <sys/smp.h>
+#include <sys/sysctl.h>
+#include <sys/taskqueue.h>
+#include <rpc/rpc_com.h>
+#include <vm/vm.h>
+#include <vm/vm_object.h>
+#include <vm/vm_page.h>
+#include <vm/vm_param.h>
+#include <vm/vm_map.h>
+#include <vm/vm_kern.h>
+#include <vm/vm_extern.h>
+#include <vm/uma.h>
+
+extern int nfscl_ticks;
+extern nfsuserd_state nfsrv_nfsuserd;
+extern struct nfssockreq nfsrv_nfsuserdsock;
+extern void (*nfsd_call_recall)(struct vnode *, int, struct ucred *,
+    struct thread *);
+extern int nfsrv_useacl;
+struct mount nfsv4root_mnt;
+int newnfs_numnfsd = 0;
+struct nfsstatsv1 nfsstatsv1;
+int nfs_numnfscbd = 0;
+int nfscl_debuglevel = 0;
+char nfsv4_callbackaddr[INET6_ADDRSTRLEN];
+struct callout newnfsd_callout;
+int nfsrv_lughashsize = 100;
+struct mtx nfsrv_dslock_mtx;
+struct nfsdevicehead nfsrv_devidhead;
+volatile int nfsrv_devidcnt = 0;
+void (*nfsd_call_servertimer)(void) = NULL;
+void (*ncl_call_invalcaches)(struct vnode *) = NULL;
+
+int nfs_pnfsio(task_fn_t *, void *);
+
+static int nfs_realign_test;
+static int nfs_realign_count;
+static struct ext_nfsstats oldnfsstats;
+
+SYSCTL_NODE(_vfs, OID_AUTO, nfs, CTLFLAG_RW, 0, "NFS filesystem");
+SYSCTL_INT(_vfs_nfs, OID_AUTO, realign_test, CTLFLAG_RW, &nfs_realign_test,
+    0, "Number of realign tests done");
+SYSCTL_INT(_vfs_nfs, OID_AUTO, realign_count, CTLFLAG_RW, &nfs_realign_count,
+    0, "Number of mbuf realignments done");
+SYSCTL_STRING(_vfs_nfs, OID_AUTO, callback_addr, CTLFLAG_RW,
+    nfsv4_callbackaddr, sizeof(nfsv4_callbackaddr),
+    "NFSv4 callback addr for server to use");
+SYSCTL_INT(_vfs_nfs, OID_AUTO, debuglevel, CTLFLAG_RW, &nfscl_debuglevel,
+    0, "Debug level for NFS client");
+SYSCTL_INT(_vfs_nfs, OID_AUTO, userhashsize, CTLFLAG_RDTUN, &nfsrv_lughashsize,
+    0, "Size of hash tables for uid/name mapping");
+int nfs_pnfsiothreads = -1;
+SYSCTL_INT(_vfs_nfs, OID_AUTO, pnfsiothreads, CTLFLAG_RW, &nfs_pnfsiothreads,
+    0, "Number of pNFS mirror I/O threads");
+
+/*
+ * Defines for malloc
+ * (Here for FreeBSD, since they allocate storage.)
+ */
+MALLOC_DEFINE(M_NEWNFSRVCACHE, "NFSD srvcache", "NFSD Server Request Cache");
+MALLOC_DEFINE(M_NEWNFSDCLIENT, "NFSD V4client", "NFSD V4 Client Id");
+MALLOC_DEFINE(M_NEWNFSDSTATE, "NFSD V4state",
+    "NFSD V4 State (Openowner, Open, Lockowner, Delegation");
+MALLOC_DEFINE(M_NEWNFSDLOCK, "NFSD V4lock", "NFSD V4 byte range lock");
+MALLOC_DEFINE(M_NEWNFSDLOCKFILE, "NFSD lckfile", "NFSD Open/Lock file");
+MALLOC_DEFINE(M_NEWNFSSTRING, "NFSD string", "NFSD V4 long string");
+MALLOC_DEFINE(M_NEWNFSUSERGROUP, "NFSD usrgroup", "NFSD V4 User/group map");
+MALLOC_DEFINE(M_NEWNFSDREQ, "NFS req", "NFS request header");
+MALLOC_DEFINE(M_NEWNFSFH, "NFS fh", "NFS file handle");
+MALLOC_DEFINE(M_NEWNFSCLOWNER, "NFSCL owner", "NFSCL Open Owner");
+MALLOC_DEFINE(M_NEWNFSCLOPEN, "NFSCL open", "NFSCL Open");
+MALLOC_DEFINE(M_NEWNFSCLDELEG, "NFSCL deleg", "NFSCL Delegation");
+MALLOC_DEFINE(M_NEWNFSCLCLIENT, "NFSCL client", "NFSCL Client");
+MALLOC_DEFINE(M_NEWNFSCLLOCKOWNER, "NFSCL lckown", "NFSCL Lock Owner");
+MALLOC_DEFINE(M_NEWNFSCLLOCK, "NFSCL lck", "NFSCL Lock");
+MALLOC_DEFINE(M_NEWNFSV4NODE, "NEWNFSnode", "NFS vnode");
+MALLOC_DEFINE(M_NEWNFSDIRECTIO, "NEWdirectio", "NFS Direct IO buffer");
+MALLOC_DEFINE(M_NEWNFSDIROFF, "NFSCL diroffdiroff",
+    "NFS directory offset data");
+MALLOC_DEFINE(M_NEWNFSDROLLBACK, "NFSD rollback",
+    "NFS local lock rollback");
+MALLOC_DEFINE(M_NEWNFSLAYOUT, "NFSCL layout", "NFSv4.1 Layout");
+MALLOC_DEFINE(M_NEWNFSFLAYOUT, "NFSCL flayout", "NFSv4.1 File Layout");
+MALLOC_DEFINE(M_NEWNFSDEVINFO, "NFSCL devinfo", "NFSv4.1 Device Info");
+MALLOC_DEFINE(M_NEWNFSSOCKREQ, "NFSCL sockreq", "NFS Sock Req");
+MALLOC_DEFINE(M_NEWNFSCLDS, "NFSCL session", "NFSv4.1 Session");
+MALLOC_DEFINE(M_NEWNFSLAYRECALL, "NFSCL layrecall", "NFSv4.1 Layout Recall");
+MALLOC_DEFINE(M_NEWNFSDSESSION, "NFSD session", "NFSD Session for a client");
+
+/*
+ * Definition of mutex locks.
+ * newnfsd_mtx is used in nfsrvd_nfsd() to protect the nfs socket list
+ * and assorted other nfsd structures.
+ */
+struct mtx newnfsd_mtx;
+struct mtx nfs_sockl_mutex;
+struct mtx nfs_state_mutex;
+struct mtx nfs_nameid_mutex;
+struct mtx nfs_req_mutex;
+struct mtx nfs_slock_mutex;
+struct mtx nfs_clstate_mutex;
+
+/* local functions */
+static int nfssvc_call(struct thread *, struct nfssvc_args *, struct ucred *);
+
+#ifdef __NO_STRICT_ALIGNMENT
+/*
+ * These architectures don't need re-alignment, so just return.
+ */
+int
+newnfs_realign(struct mbuf **pm, int how)
+{
+
+	return (0);
+}
+#else	/* !__NO_STRICT_ALIGNMENT */
+/*
+ *	newnfs_realign:
+ *
+ *	Check for badly aligned mbuf data and realign by copying the unaligned
+ *	portion of the data into a new mbuf chain and freeing the portions
+ *	of the old chain that were replaced.
+ *
+ *	We cannot simply realign the data within the existing mbuf chain
+ *	because the underlying buffers may contain other rpc commands and
+ *	we cannot afford to overwrite them.
+ *
+ *	We would prefer to avoid this situation entirely.  The situation does
+ *	not occur with NFS/UDP and is supposed to only occasionally occur
+ *	with TCP.  Use vfs.nfs.realign_count and realign_test to check this.
+ *
+ */
+int
+newnfs_realign(struct mbuf **pm, int how)
+{
+	struct mbuf *m, *n;
+	int off, space;
+
+	++nfs_realign_test;
+	while ((m = *pm) != NULL) {
+		if ((m->m_len & 0x3) || (mtod(m, intptr_t) & 0x3)) {
+			/*
+			 * NB: we can't depend on m_pkthdr.len to help us
+			 * decide what to do here.  May not be worth doing
+			 * the m_length calculation as m_copyback will
+			 * expand the mbuf chain below as needed.
+			 */
+			space = m_length(m, NULL);
+			if (space >= MINCLSIZE) {
+				/* NB: m_copyback handles space > MCLBYTES */
+				n = m_getcl(how, MT_DATA, 0);
+			} else
+				n = m_get(how, MT_DATA);
+			if (n == NULL)
+				return (ENOMEM);
+			/*
+			 * Align the remainder of the mbuf chain.
+			 */
+			n->m_len = 0;
+			off = 0;
+			while (m != NULL) {
+				m_copyback(n, off, m->m_len, mtod(m, caddr_t));
+				off += m->m_len;
+				m = m->m_next;
+			}
+			m_freem(*pm);
+			*pm = n;
+			++nfs_realign_count;
+			break;
+		}
+		pm = &m->m_next;
+	}
+
+	return (0);
+}
+#endif	/* __NO_STRICT_ALIGNMENT */
+
+#ifdef notdef
+static void
+nfsrv_object_create(struct vnode *vp, struct thread *td)
+{
+
+	if (vp == NULL || vp->v_type != VREG)
+		return;
+	(void) vfs_object_create(vp, td, td->td_ucred);
+}
+#endif
+
+/*
+ * Look up a file name. Basically just initialize stuff and call namei().
+ */
+int
+nfsrv_lookupfilename(struct nameidata *ndp, char *fname, NFSPROC_T *p)
+{
+	int error;
+
+	NDINIT(ndp, LOOKUP, FOLLOW | LOCKLEAF, UIO_USERSPACE, fname,
+	    p);
+	error = namei(ndp);
+	if (!error) {
+		NDFREE(ndp, NDF_ONLY_PNBUF);
+	}
+	return (error);
+}
+
+/*
+ * Copy NFS uid, gids to the cred structure.
+ */
+void
+newnfs_copycred(struct nfscred *nfscr, struct ucred *cr)
+{
+
+	KASSERT(nfscr->nfsc_ngroups >= 0,
+	    ("newnfs_copycred: negative nfsc_ngroups"));
+	cr->cr_uid = nfscr->nfsc_uid;
+	crsetgroups(cr, nfscr->nfsc_ngroups, nfscr->nfsc_groups);
+}
+
+/*
+ * Map args from nfsmsleep() to msleep().
+ */
+int
+nfsmsleep(void *chan, void *mutex, int prio, const char *wmesg,
+    struct timespec *ts)
+{
+	u_int64_t nsecval;
+	int error, timeo;
+
+	if (ts) {
+		timeo = hz * ts->tv_sec;
+		nsecval = (u_int64_t)ts->tv_nsec;
+		nsecval = ((nsecval * ((u_int64_t)hz)) + 500000000) /
+		    1000000000;
+		timeo += (int)nsecval;
+	} else {
+		timeo = 0;
+	}
+	error = msleep(chan, (struct mtx *)mutex, prio, wmesg, timeo);
+	return (error);
+}
+
+/*
+ * Get the file system info for the server. For now, just assume FFS.
+ */
+void
+nfsvno_getfs(struct nfsfsinfo *sip, int isdgram)
+{
+	int pref;
+
+	/*
+	 * XXX
+	 * There should be file system VFS OP(s) to get this information.
+	 * For now, assume ufs.
+	 */
+	if (isdgram)
+		pref = NFS_MAXDGRAMDATA;
+	else
+		pref = NFS_SRVMAXIO;
+	sip->fs_rtmax = NFS_SRVMAXIO;
+	sip->fs_rtpref = pref;
+	sip->fs_rtmult = NFS_FABLKSIZE;
+	sip->fs_wtmax = NFS_SRVMAXIO;
+	sip->fs_wtpref = pref;
+	sip->fs_wtmult = NFS_FABLKSIZE;
+	sip->fs_dtpref = pref;
+	sip->fs_maxfilesize = 0xffffffffffffffffull;
+	sip->fs_timedelta.tv_sec = 0;
+	sip->fs_timedelta.tv_nsec = 1;
+	sip->fs_properties = (NFSV3FSINFO_LINK |
+	    NFSV3FSINFO_SYMLINK | NFSV3FSINFO_HOMOGENEOUS |
+	    NFSV3FSINFO_CANSETTIME);
+}
+
+/*
+ * Do the pathconf vnode op.
+ */
+int
+nfsvno_pathconf(struct vnode *vp, int flag, long *retf,
+    struct ucred *cred, struct thread *p)
+{
+	int error;
+
+	error = VOP_PATHCONF(vp, flag, retf);
+	if (error == EOPNOTSUPP || error == EINVAL) {
+		/*
+		 * Some file systems return EINVAL for name arguments not
+		 * supported and some return EOPNOTSUPP for this case.
+		 * So the NFSv3 Pathconf RPC doesn't fail for these cases,
+		 * just fake them.
+		 */
+		switch (flag) {
+		case _PC_LINK_MAX:
+			*retf = NFS_LINK_MAX;
+			break;
+		case _PC_NAME_MAX:
+			*retf = NAME_MAX;
+			break;
+		case _PC_CHOWN_RESTRICTED:
+			*retf = 1;
+			break;
+		case _PC_NO_TRUNC:
+			*retf = 1;
+			break;
+		default:
+			/*
+			 * Only happens if a _PC_xxx is added to the server,
+			 * but this isn't updated.
+			 */
+			*retf = 0;
+			printf("nfsrvd pathconf flag=%d not supp\n", flag);
+		}
+		error = 0;
+	}
+	NFSEXITCODE(error);
+	return (error);
+}
+
+/* Fake nfsrv_atroot. Just return 0 */
+int
+nfsrv_atroot(struct vnode *vp, uint64_t *retp)
+{
+
+	return (0);
+}
+
+/*
+ * Set the credentials to refer to root.
+ * If only the various BSDen could agree on whether cr_gid is a separate
+ * field or cr_groups[0]...
+ */
+void
+newnfs_setroot(struct ucred *cred)
+{
+
+	cred->cr_uid = 0;
+	cred->cr_groups[0] = 0;
+	cred->cr_ngroups = 1;
+}
+
+/*
+ * Get the client credential. Used for Renew and recovery.
+ */
+struct ucred *
+newnfs_getcred(void)
+{
+	struct ucred *cred;
+	struct thread *td = curthread;
+
+	cred = crdup(td->td_ucred);
+	newnfs_setroot(cred);
+	return (cred);
+}
+
+/*
+ * Nfs timer routine
+ * Call the nfsd's timer function once/sec.
+ */
+void
+newnfs_timer(void *arg)
+{
+	static time_t lasttime = 0;
+	/*
+	 * Call the server timer, if set up.
+	 * The argument indicates if it is the next second and therefore
+	 * leases should be checked.
+	 */
+	if (lasttime != NFSD_MONOSEC) {
+		lasttime = NFSD_MONOSEC;
+		if (nfsd_call_servertimer != NULL)
+			(*nfsd_call_servertimer)();
+	}
+	callout_reset(&newnfsd_callout, nfscl_ticks, newnfs_timer, NULL);
+}
+
+
+/*
+ * Sleep for a short period of time unless errval == NFSERR_GRACE, where
+ * the sleep should be for 5 seconds.
+ * Since lbolt doesn't exist in FreeBSD-CURRENT, just use a timeout on
+ * an event that never gets a wakeup. Only return EINTR or 0.
+ */
+int
+nfs_catnap(int prio, int errval, const char *wmesg)
+{
+	static int non_event;
+	int ret;
+
+	if (errval == NFSERR_GRACE)
+		ret = tsleep(&non_event, prio, wmesg, 5 * hz);
+	else
+		ret = tsleep(&non_event, prio, wmesg, 1);
+	if (ret != EINTR)
+		ret = 0;
+	return (ret);
+}
+
+/*
+ * Get referral. For now, just fail.
+ */
+struct nfsreferral *
+nfsv4root_getreferral(struct vnode *vp, struct vnode *dvp, u_int32_t fileno)
+{
+
+	return (NULL);
+}
+
+static int
+nfssvc_nfscommon(struct thread *td, struct nfssvc_args *uap)
+{
+	int error;
+
+	error = nfssvc_call(td, uap, td->td_ucred);
+	NFSEXITCODE(error);
+	return (error);
+}
+
+static int
+nfssvc_call(struct thread *p, struct nfssvc_args *uap, struct ucred *cred)
+{
+	int error = EINVAL, i, j;
+	struct nfsd_idargs nid;
+	struct nfsd_oidargs onid;
+	struct {
+		int vers;	/* Just the first field of nfsstats. */
+	} nfsstatver;
+
+	if (uap->flag & NFSSVC_IDNAME) {
+		if ((uap->flag & NFSSVC_NEWSTRUCT) != 0)
+			error = copyin(uap->argp, &nid, sizeof(nid));
+		else {
+			error = copyin(uap->argp, &onid, sizeof(onid));
+			if (error == 0) {
+				nid.nid_flag = onid.nid_flag;
+				nid.nid_uid = onid.nid_uid;
+				nid.nid_gid = onid.nid_gid;
+				nid.nid_usermax = onid.nid_usermax;
+				nid.nid_usertimeout = onid.nid_usertimeout;
+				nid.nid_name = onid.nid_name;
+				nid.nid_namelen = onid.nid_namelen;
+				nid.nid_ngroup = 0;
+				nid.nid_grps = NULL;
+			}
+		}
+		if (error)
+			goto out;
+		error = nfssvc_idname(&nid);
+		goto out;
+	} else if (uap->flag & NFSSVC_GETSTATS) {
+		if ((uap->flag & NFSSVC_NEWSTRUCT) == 0) {
+			/* Copy fields to the old ext_nfsstat structure. */
+			oldnfsstats.attrcache_hits =
+			    nfsstatsv1.attrcache_hits;
+			oldnfsstats.attrcache_misses =
+			    nfsstatsv1.attrcache_misses;
+			oldnfsstats.lookupcache_hits =
+			    nfsstatsv1.lookupcache_hits;
+			oldnfsstats.lookupcache_misses =
+			    nfsstatsv1.lookupcache_misses;
+			oldnfsstats.direofcache_hits =
+			    nfsstatsv1.direofcache_hits;
+			oldnfsstats.direofcache_misses =
+			    nfsstatsv1.direofcache_misses;
+			oldnfsstats.accesscache_hits =
+			    nfsstatsv1.accesscache_hits;
+			oldnfsstats.accesscache_misses =
+			    nfsstatsv1.accesscache_misses;
+			oldnfsstats.biocache_reads =
+			    nfsstatsv1.biocache_reads;
+			oldnfsstats.read_bios =
+			    nfsstatsv1.read_bios;
+			oldnfsstats.read_physios =
+			    nfsstatsv1.read_physios;
+			oldnfsstats.biocache_writes =
+			    nfsstatsv1.biocache_writes;
+			oldnfsstats.write_bios =
+			    nfsstatsv1.write_bios;
+			oldnfsstats.write_physios =
+			    nfsstatsv1.write_physios;
+			oldnfsstats.biocache_readlinks =
+			    nfsstatsv1.biocache_readlinks;
+			oldnfsstats.readlink_bios =
+			    nfsstatsv1.readlink_bios;
+			oldnfsstats.biocache_readdirs =
+			    nfsstatsv1.biocache_readdirs;
+			oldnfsstats.readdir_bios =
+			    nfsstatsv1.readdir_bios;
+			for (i = 0; i < NFSV4_NPROCS; i++)
+				oldnfsstats.rpccnt[i] = nfsstatsv1.rpccnt[i];
+			oldnfsstats.rpcretries = nfsstatsv1.rpcretries;
+			for (i = 0; i < NFSV4OP_NOPS; i++)
+				oldnfsstats.srvrpccnt[i] =
+				    nfsstatsv1.srvrpccnt[i];
+			for (i = NFSV42_NOPS, j = NFSV4OP_NOPS;
+			    i < NFSV42_NOPS + NFSV4OP_FAKENOPS; i++, j++)
+				oldnfsstats.srvrpccnt[j] =
+				    nfsstatsv1.srvrpccnt[i];
+			oldnfsstats.srvrpc_errs = nfsstatsv1.srvrpc_errs;
+			oldnfsstats.srv_errs = nfsstatsv1.srv_errs;
+			oldnfsstats.rpcrequests = nfsstatsv1.rpcrequests;
+			oldnfsstats.rpctimeouts = nfsstatsv1.rpctimeouts;
+			oldnfsstats.rpcunexpected = nfsstatsv1.rpcunexpected;
+			oldnfsstats.rpcinvalid = nfsstatsv1.rpcinvalid;
+			oldnfsstats.srvcache_inproghits =
+			    nfsstatsv1.srvcache_inproghits;
+			oldnfsstats.srvcache_idemdonehits =
+			    nfsstatsv1.srvcache_idemdonehits;
+			oldnfsstats.srvcache_nonidemdonehits =
+			    nfsstatsv1.srvcache_nonidemdonehits;
+			oldnfsstats.srvcache_misses =
+			    nfsstatsv1.srvcache_misses;
+			oldnfsstats.srvcache_tcppeak =
+			    nfsstatsv1.srvcache_tcppeak;
+			oldnfsstats.srvcache_size = nfsstatsv1.srvcache_size;
+			oldnfsstats.srvclients = nfsstatsv1.srvclients;
+			oldnfsstats.srvopenowners = nfsstatsv1.srvopenowners;
+			oldnfsstats.srvopens = nfsstatsv1.srvopens;
+			oldnfsstats.srvlockowners = nfsstatsv1.srvlockowners;
+			oldnfsstats.srvlocks = nfsstatsv1.srvlocks;
+			oldnfsstats.srvdelegates = nfsstatsv1.srvdelegates;
+			for (i = 0; i < NFSV4OP_CBNOPS; i++)
+				oldnfsstats.cbrpccnt[i] =
+				    nfsstatsv1.cbrpccnt[i];
+			oldnfsstats.clopenowners = nfsstatsv1.clopenowners;
+			oldnfsstats.clopens = nfsstatsv1.clopens;
+			oldnfsstats.cllockowners = nfsstatsv1.cllockowners;
+			oldnfsstats.cllocks = nfsstatsv1.cllocks;
+			oldnfsstats.cldelegates = nfsstatsv1.cldelegates;
+			oldnfsstats.cllocalopenowners =
+			    nfsstatsv1.cllocalopenowners;
+			oldnfsstats.cllocalopens = nfsstatsv1.cllocalopens;
+			oldnfsstats.cllocallockowners =
+			    nfsstatsv1.cllocallockowners;
+			oldnfsstats.cllocallocks = nfsstatsv1.cllocallocks;
+			error = copyout(&oldnfsstats, uap->argp,
+			    sizeof (oldnfsstats));
+		} else {
+			error = copyin(uap->argp, &nfsstatver,
+			    sizeof(nfsstatver));
+			if (error == 0 && nfsstatver.vers != NFSSTATS_V1)
+				error = EPERM;
+			if (error == 0)
+				error = copyout(&nfsstatsv1, uap->argp,
+				    sizeof (nfsstatsv1));
+		}
+		if (error == 0) {
+			if ((uap->flag & NFSSVC_ZEROCLTSTATS) != 0) {
+				nfsstatsv1.attrcache_hits = 0;
+				nfsstatsv1.attrcache_misses = 0;
+				nfsstatsv1.lookupcache_hits = 0;
+				nfsstatsv1.lookupcache_misses = 0;
+				nfsstatsv1.direofcache_hits = 0;
+				nfsstatsv1.direofcache_misses = 0;
+				nfsstatsv1.accesscache_hits = 0;
+				nfsstatsv1.accesscache_misses = 0;
+				nfsstatsv1.biocache_reads = 0;
+				nfsstatsv1.read_bios = 0;
+				nfsstatsv1.read_physios = 0;
+				nfsstatsv1.biocache_writes = 0;
+				nfsstatsv1.write_bios = 0;
+				nfsstatsv1.write_physios = 0;
+				nfsstatsv1.biocache_readlinks = 0;
+				nfsstatsv1.readlink_bios = 0;
+				nfsstatsv1.biocache_readdirs = 0;
+				nfsstatsv1.readdir_bios = 0;
+				nfsstatsv1.rpcretries = 0;
+				nfsstatsv1.rpcrequests = 0;
+				nfsstatsv1.rpctimeouts = 0;
+				nfsstatsv1.rpcunexpected = 0;
+				nfsstatsv1.rpcinvalid = 0;
+				bzero(nfsstatsv1.rpccnt,
+				    sizeof(nfsstatsv1.rpccnt));
+			}
+			if ((uap->flag & NFSSVC_ZEROSRVSTATS) != 0) {
+				nfsstatsv1.srvrpc_errs = 0;
+				nfsstatsv1.srv_errs = 0;
+				nfsstatsv1.srvcache_inproghits = 0;
+				nfsstatsv1.srvcache_idemdonehits = 0;
+				nfsstatsv1.srvcache_nonidemdonehits = 0;
+				nfsstatsv1.srvcache_misses = 0;
+				nfsstatsv1.srvcache_tcppeak = 0;
+				bzero(nfsstatsv1.srvrpccnt,
+				    sizeof(nfsstatsv1.srvrpccnt));
+				bzero(nfsstatsv1.cbrpccnt,
+				    sizeof(nfsstatsv1.cbrpccnt));
+			}
+		}
+		goto out;
+	} else if (uap->flag & NFSSVC_NFSUSERDPORT) {
+		u_short sockport;
+		struct nfsuserd_args nargs;
+
+		if ((uap->flag & NFSSVC_NEWSTRUCT) == 0) {
+			error = copyin(uap->argp, (caddr_t)&sockport,
+			    sizeof (u_short));
+			if (error == 0) {
+				nargs.nuserd_family = AF_INET;
+				nargs.nuserd_port = sockport;
+			}
+		} else {
+			/*
+			 * New nfsuserd_args structure, which indicates
+			 * which IP version to use along with the port#.
+			 */
+			error = copyin(uap->argp, &nargs, sizeof(nargs));
+		}
+		if (!error)
+			error = nfsrv_nfsuserdport(&nargs, p);
+	} else if (uap->flag & NFSSVC_NFSUSERDDELPORT) {
+		nfsrv_nfsuserddelport();
+		error = 0;
+	}
+
+out:
+	NFSEXITCODE(error);
+	return (error);
+}
+
+/*
+ * called by all three modevent routines, so that it gets things
+ * initialized soon enough.
+ */
+void
+newnfs_portinit(void)
+{
+	static int inited = 0;
+
+	if (inited)
+		return;
+	inited = 1;
+	/* Initialize SMP locks used by both client and server. */
+	mtx_init(&newnfsd_mtx, "newnfsd_mtx", NULL, MTX_DEF);
+	mtx_init(&nfs_state_mutex, "nfs_state_mutex", NULL, MTX_DEF);
+	mtx_init(&nfs_clstate_mutex, "nfs_clstate_mutex", NULL, MTX_DEF);
+}
+
+/*
+ * Determine if the file system supports NFSv4 ACLs.
+ * Return 1 if it does, 0 otherwise.
+ */
+int
+nfs_supportsnfsv4acls(struct vnode *vp)
+{
+	int error;
+	long retval;
+
+	ASSERT_VOP_LOCKED(vp, "nfs supports nfsv4acls");
+
+	if (nfsrv_useacl == 0)
+		return (0);
+	error = VOP_PATHCONF(vp, _PC_ACL_NFS4, &retval);
+	if (error == 0 && retval != 0)
+		return (1);
+	return (0);
+}
+
+/*
+ * These are the first fields of all the context structures passed into
+ * nfs_pnfsio().
+ */
+struct pnfsio {
+	int		done;
+	int		inprog;
+	struct task	tsk;
+};
+
+/*
+ * Do a mirror I/O on a pNFS thread.
+ */
+int
+nfs_pnfsio(task_fn_t *func, void *context)
+{
+	struct pnfsio *pio;
+	int ret;
+	static struct taskqueue *pnfsioq = NULL;
+
+	pio = (struct pnfsio *)context;
+	if (pnfsioq == NULL) {
+		if (nfs_pnfsiothreads == 0)
+			return (EPERM);
+		if (nfs_pnfsiothreads < 0)
+			nfs_pnfsiothreads = mp_ncpus * 4;
+		pnfsioq = taskqueue_create("pnfsioq", M_WAITOK,
+		    taskqueue_thread_enqueue, &pnfsioq);
+		if (pnfsioq == NULL)
+			return (ENOMEM);
+		ret = taskqueue_start_threads(&pnfsioq, nfs_pnfsiothreads,
+		    0, "pnfsiot");
+		if (ret != 0) {
+			taskqueue_free(pnfsioq);
+			pnfsioq = NULL;
+			return (ret);
+		}
+	}
+	pio->inprog = 1;
+	TASK_INIT(&pio->tsk, 0, func, context);
+	ret = taskqueue_enqueue(pnfsioq, &pio->tsk);
+	if (ret != 0)
+		pio->inprog = 0;
+	return (ret);
+}
+
+extern int (*nfsd_call_nfscommon)(struct thread *, struct nfssvc_args *);
+
+/*
+ * Called once to initialize data structures...
+ */
+static int
+nfscommon_modevent(module_t mod, int type, void *data)
+{
+	int error = 0;
+	static int loaded = 0;
+
+	switch (type) {
+	case MOD_LOAD:
+		if (loaded)
+			goto out;
+		newnfs_portinit();
+		mtx_init(&nfs_nameid_mutex, "nfs_nameid_mutex", NULL, MTX_DEF);
+		mtx_init(&nfs_sockl_mutex, "nfs_sockl_mutex", NULL, MTX_DEF);
+		mtx_init(&nfs_slock_mutex, "nfs_slock_mutex", NULL, MTX_DEF);
+		mtx_init(&nfs_req_mutex, "nfs_req_mutex", NULL, MTX_DEF);
+		mtx_init(&nfsrv_nfsuserdsock.nr_mtx, "nfsuserd", NULL,
+		    MTX_DEF);
+		mtx_init(&nfsrv_dslock_mtx, "nfs4ds", NULL, MTX_DEF);
+		TAILQ_INIT(&nfsrv_devidhead);
+		callout_init(&newnfsd_callout, 1);
+		newnfs_init();
+		nfsd_call_nfscommon = nfssvc_nfscommon;
+		loaded = 1;
+		break;
+
+	case MOD_UNLOAD:
+		if (newnfs_numnfsd != 0 || nfsrv_nfsuserd != NOTRUNNING ||
+		    nfs_numnfscbd != 0) {
+			error = EBUSY;
+			break;
+		}
+
+		nfsd_call_nfscommon = NULL;
+		callout_drain(&newnfsd_callout);
+		/* Clean out the name<-->id cache. */
+		nfsrv_cleanusergroup();
+		/* and get rid of the mutexes */
+		mtx_destroy(&nfs_nameid_mutex);
+		mtx_destroy(&newnfsd_mtx);
+		mtx_destroy(&nfs_state_mutex);
+		mtx_destroy(&nfs_clstate_mutex);
+		mtx_destroy(&nfs_sockl_mutex);
+		mtx_destroy(&nfs_slock_mutex);
+		mtx_destroy(&nfs_req_mutex);
+		mtx_destroy(&nfsrv_nfsuserdsock.nr_mtx);
+		mtx_destroy(&nfsrv_dslock_mtx);
+		loaded = 0;
+		break;
+	default:
+		error = EOPNOTSUPP;
+		break;
+	}
+
+out:
+	NFSEXITCODE(error);
+	return error;
+}
+static moduledata_t nfscommon_mod = {
+	"nfscommon",
+	nfscommon_modevent,
+	NULL,
+};
+DECLARE_MODULE(nfscommon, nfscommon_mod, SI_SUB_VFS, SI_ORDER_ANY);
+
+/* So that loader and kldload(2) can find us, wherever we are.. */
+MODULE_VERSION(nfscommon, 1);
+MODULE_DEPEND(nfscommon, nfssvc, 1, 1, 1);
+MODULE_DEPEND(nfscommon, krpc, 1, 1, 1);
+
diff --git a/freebsd/sys/fs/nfs/nfs_commonsubs.c b/freebsd/sys/fs/nfs/nfs_commonsubs.c
new file mode 100644
index 0000000..9214674
--- /dev/null
+++ b/freebsd/sys/fs/nfs/nfs_commonsubs.c
@@ -0,0 +1,4769 @@
+/*-
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 1989, 1993
+ *	The Regents of the University of California.  All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Rick Macklem at The University of Guelph.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+/*
+ * These functions support the macros and help fiddle mbuf chains for
+ * the nfs op functions. They do things like create the rpc header and
+ * copy data between mbuf chains and uio lists.
+ */
+#ifndef APPLEKEXT
+#include "opt_inet.h"
+#include "opt_inet6.h"
+
+#include <fs/nfs/nfsport.h>
+
+#include <security/mac/mac_framework.h>
+
+/*
+ * Data items converted to xdr at startup, since they are constant
+ * This is kinda hokey, but may save a little time doing byte swaps
+ */
+u_int32_t newnfs_true, newnfs_false, newnfs_xdrneg1;
+
+/* And other global data */
+nfstype nfsv34_type[9] = { NFNON, NFREG, NFDIR, NFBLK, NFCHR, NFLNK, NFSOCK,
+		      NFFIFO, NFNON };
+enum vtype newnv2tov_type[8] = { VNON, VREG, VDIR, VBLK, VCHR, VLNK, VNON, VNON };
+enum vtype nv34tov_type[8]={ VNON, VREG, VDIR, VBLK, VCHR, VLNK, VSOCK, VFIFO };
+struct timeval nfsboottime;	/* Copy boottime once, so it never changes */
+int nfscl_ticks;
+int nfsrv_useacl = 1;
+struct nfssockreq nfsrv_nfsuserdsock;
+nfsuserd_state nfsrv_nfsuserd = NOTRUNNING;
+static int nfsrv_userdupcalls = 0;
+struct nfsreqhead nfsd_reqq;
+uid_t nfsrv_defaultuid = UID_NOBODY;
+gid_t nfsrv_defaultgid = GID_NOGROUP;
+int nfsrv_lease = NFSRV_LEASE;
+int ncl_mbuf_mlen = MLEN;
+int nfsd_enable_stringtouid = 0;
+int nfsrv_doflexfile = 0;
+static int nfs_enable_uidtostring = 0;
+NFSNAMEIDMUTEX;
+NFSSOCKMUTEX;
+extern int nfsrv_lughashsize;
+extern struct mtx nfsrv_dslock_mtx;
+extern volatile int nfsrv_devidcnt;
+extern int nfscl_debuglevel;
+extern struct nfsdevicehead nfsrv_devidhead;
+extern struct nfsstatsv1 nfsstatsv1;
+
+SYSCTL_DECL(_vfs_nfs);
+SYSCTL_INT(_vfs_nfs, OID_AUTO, enable_uidtostring, CTLFLAG_RW,
+    &nfs_enable_uidtostring, 0, "Make nfs always send numeric owner_names");
+
+int nfsrv_maxpnfsmirror = 1;
+SYSCTL_INT(_vfs_nfs, OID_AUTO, pnfsmirror, CTLFLAG_RD,
+    &nfsrv_maxpnfsmirror, 0, "Mirror level for pNFS service");
+
+/*
+ * This array of structures indicates, for V4:
+ * retfh - which of 3 types of calling args are used
+ *	0 - doesn't change cfh or use a sfh
+ *	1 - replaces cfh with a new one (unless it returns an error status)
+ *	2 - uses cfh and sfh
+ * needscfh - if the op wants a cfh and premtime
+ *	0 - doesn't use a cfh
+ *	1 - uses a cfh, but doesn't want pre-op attributes
+ *	2 - uses a cfh and wants pre-op attributes
+ * savereply - indicates a non-idempotent Op
+ *	0 - not non-idempotent
+ *	1 - non-idempotent
+ * Ops that are ordered via seqid# are handled separately from these
+ * non-idempotent Ops.
+ * Define it here, since it is used by both the client and server.
+ */
+struct nfsv4_opflag nfsv4_opflag[NFSV41_NOPS] = {
+	{ 0, 0, 0, 0, LK_EXCLUSIVE, 1, 1 },		/* undef */
+	{ 0, 0, 0, 0, LK_EXCLUSIVE, 1, 1 },		/* undef */
+	{ 0, 0, 0, 0, LK_EXCLUSIVE, 1, 1 },		/* undef */
+	{ 0, 1, 0, 0, LK_SHARED, 1, 1 },		/* Access */
+	{ 0, 1, 0, 0, LK_EXCLUSIVE, 1, 0 },		/* Close */
+	{ 0, 2, 0, 1, LK_EXCLUSIVE, 1, 1 },		/* Commit */
+	{ 1, 2, 1, 1, LK_EXCLUSIVE, 1, 1 },		/* Create */
+	{ 0, 0, 0, 0, LK_EXCLUSIVE, 1, 0 },		/* Delegpurge */
+	{ 0, 1, 0, 0, LK_EXCLUSIVE, 1, 0 },		/* Delegreturn */
+	{ 0, 1, 0, 0, LK_SHARED, 1, 1 },		/* Getattr */
+	{ 0, 1, 0, 0, LK_EXCLUSIVE, 1, 1 },		/* GetFH */
+	{ 2, 1, 1, 1, LK_EXCLUSIVE, 1, 1 },		/* Link */
+	{ 0, 1, 0, 0, LK_EXCLUSIVE, 1, 0 },		/* Lock */
+	{ 0, 1, 0, 0, LK_EXCLUSIVE, 1, 0 },		/* LockT */
+	{ 0, 1, 0, 0, LK_EXCLUSIVE, 1, 0 },		/* LockU */
+	{ 1, 2, 0, 0, LK_EXCLUSIVE, 1, 1 },		/* Lookup */
+	{ 1, 2, 0, 0, LK_EXCLUSIVE, 1, 1 },		/* Lookupp */
+	{ 0, 1, 0, 0, LK_EXCLUSIVE, 1, 1 },		/* NVerify */
+	{ 1, 1, 0, 1, LK_EXCLUSIVE, 1, 0 },		/* Open */
+	{ 1, 1, 0, 0, LK_EXCLUSIVE, 1, 0 },		/* OpenAttr */
+	{ 0, 1, 0, 0, LK_EXCLUSIVE, 1, 0 },		/* OpenConfirm */
+	{ 0, 1, 0, 0, LK_EXCLUSIVE, 1, 0 },		/* OpenDowngrade */
+	{ 1, 0, 0, 0, LK_EXCLUSIVE, 1, 1 },		/* PutFH */
+	{ 1, 0, 0, 0, LK_EXCLUSIVE, 1, 1 },		/* PutPubFH */
+	{ 1, 0, 0, 0, LK_EXCLUSIVE, 1, 1 },		/* PutRootFH */
+	{ 0, 1, 0, 0, LK_SHARED, 1, 0 },		/* Read */
+	{ 0, 1, 0, 0, LK_SHARED, 1, 1 },		/* Readdir */
+	{ 0, 1, 0, 0, LK_SHARED, 1, 1 },		/* ReadLink */
+	{ 0, 2, 1, 1, LK_EXCLUSIVE, 1, 1 },		/* Remove */
+	{ 2, 1, 1, 1, LK_EXCLUSIVE, 1, 1 },		/* Rename */
+	{ 0, 0, 0, 0, LK_EXCLUSIVE, 1, 0 },		/* Renew */
+	{ 0, 0, 0, 0, LK_EXCLUSIVE, 1, 1 },		/* RestoreFH */
+	{ 0, 1, 0, 0, LK_EXCLUSIVE, 1, 1 },		/* SaveFH */
+	{ 0, 1, 0, 0, LK_EXCLUSIVE, 1, 1 },		/* SecInfo */
+	{ 0, 2, 1, 1, LK_EXCLUSIVE, 1, 0 },		/* Setattr */
+	{ 0, 0, 0, 0, LK_EXCLUSIVE, 1, 1 },		/* SetClientID */
+	{ 0, 0, 0, 0, LK_EXCLUSIVE, 1, 1 },		/* SetClientIDConfirm */
+	{ 0, 1, 0, 0, LK_EXCLUSIVE, 1, 1 },		/* Verify */
+	{ 0, 2, 1, 1, LK_EXCLUSIVE, 1, 0 },		/* Write */
+	{ 0, 0, 0, 0, LK_EXCLUSIVE, 1, 0 },		/* ReleaseLockOwner */
+	{ 0, 0, 0, 0, LK_EXCLUSIVE, 1, 1 },		/* Backchannel Ctrl */
+	{ 0, 0, 0, 0, LK_EXCLUSIVE, 0, 0 },		/* Bind Conn to Sess */
+	{ 0, 0, 0, 0, LK_EXCLUSIVE, 0, 0 },		/* Exchange ID */
+	{ 0, 0, 0, 0, LK_EXCLUSIVE, 0, 0 },		/* Create Session */
+	{ 0, 0, 0, 0, LK_EXCLUSIVE, 0, 0 },		/* Destroy Session */
+	{ 0, 0, 0, 0, LK_EXCLUSIVE, 1, 0 },		/* Free StateID */
+	{ 0, 0, 0, 0, LK_EXCLUSIVE, 1, 1 },		/* Get Dir Deleg */
+	{ 0, 0, 0, 0, LK_EXCLUSIVE, 1, 1 },		/* Get Device Info */
+	{ 0, 0, 0, 0, LK_EXCLUSIVE, 1, 1 },		/* Get Device List */
+	{ 0, 1, 0, 1, LK_EXCLUSIVE, 1, 1 },		/* Layout Commit */
+	{ 0, 1, 0, 0, LK_EXCLUSIVE, 1, 1 },		/* Layout Get */
+	{ 0, 1, 0, 1, LK_EXCLUSIVE, 1, 0 },		/* Layout Return */
+	{ 0, 0, 0, 0, LK_EXCLUSIVE, 1, 1 },		/* Secinfo No name */
+	{ 0, 0, 0, 0, LK_EXCLUSIVE, 1, 0 },		/* Sequence */
+	{ 0, 0, 0, 0, LK_EXCLUSIVE, 1, 1 },		/* Set SSV */
+	{ 0, 0, 0, 0, LK_EXCLUSIVE, 1, 1 },		/* Test StateID */
+	{ 0, 0, 0, 0, LK_EXCLUSIVE, 1, 1 },		/* Want Delegation */
+	{ 0, 0, 0, 0, LK_EXCLUSIVE, 0, 0 },		/* Destroy ClientID */
+	{ 0, 0, 0, 0, LK_EXCLUSIVE, 1, 0 },		/* Reclaim Complete */
+};
+#endif	/* !APPLEKEXT */
+
+static int ncl_mbuf_mhlen = MHLEN;
+static int nfsrv_usercnt = 0;
+static int nfsrv_dnsnamelen;
+static u_char *nfsrv_dnsname = NULL;
+static int nfsrv_usermax = 999999999;
+struct nfsrv_lughash {
+	struct mtx		mtx;
+	struct nfsuserhashhead	lughead;
+};
+static struct nfsrv_lughash	*nfsuserhash;
+static struct nfsrv_lughash	*nfsusernamehash;
+static struct nfsrv_lughash	*nfsgrouphash;
+static struct nfsrv_lughash	*nfsgroupnamehash;
+
+/*
+ * This static array indicates whether or not the RPC generates a large
+ * reply. This is used by nfs_reply() to decide whether or not an mbuf
+ * cluster should be allocated. (If a cluster is required by an RPC
+ * marked 0 in this array, the code will still work, just not quite as
+ * efficiently.)
+ */
+static int nfs_bigreply[NFSV41_NPROCS] = { 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0,
+    0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0 };
+
+/* local functions */
+static int nfsrv_skipace(struct nfsrv_descript *nd, int *acesizep);
+static void nfsv4_wanted(struct nfsv4lock *lp);
+static int nfsrv_cmpmixedcase(u_char *cp, u_char *cp2, int len);
+static int nfsrv_getuser(int procnum, uid_t uid, gid_t gid, char *name,
+    NFSPROC_T *p);
+static void nfsrv_removeuser(struct nfsusrgrp *usrp, int isuser);
+static int nfsrv_getrefstr(struct nfsrv_descript *, u_char **, u_char **,
+    int *, int *);
+static void nfsrv_refstrbigenough(int, u_char **, u_char **, int *);
+
+static struct {
+	int	op;
+	int	opcnt;
+	const u_char *tag;
+	int	taglen;
+} nfsv4_opmap[NFSV41_NPROCS] = {
+	{ 0, 1, "Null", 4 },
+	{ NFSV4OP_GETATTR, 1, "Getattr", 7, },
+	{ NFSV4OP_SETATTR, 2, "Setattr", 7, },
+	{ NFSV4OP_LOOKUP, 3, "Lookup", 6, },
+	{ NFSV4OP_ACCESS, 2, "Access", 6, },
+	{ NFSV4OP_READLINK, 2, "Readlink", 8, },
+	{ NFSV4OP_READ, 1, "Read", 4, },
+	{ NFSV4OP_WRITE, 2, "Write", 5, },
+	{ NFSV4OP_OPEN, 5, "Open", 4, },
+	{ NFSV4OP_CREATE, 5, "Create", 6, },
+	{ NFSV4OP_CREATE, 1, "Create", 6, },
+	{ NFSV4OP_CREATE, 3, "Create", 6, },
+	{ NFSV4OP_REMOVE, 1, "Remove", 6, },
+	{ NFSV4OP_REMOVE, 1, "Remove", 6, },
+	{ NFSV4OP_SAVEFH, 5, "Rename", 6, },
+	{ NFSV4OP_SAVEFH, 4, "Link", 4, },
+	{ NFSV4OP_READDIR, 2, "Readdir", 7, },
+	{ NFSV4OP_READDIR, 2, "Readdir", 7, },
+	{ NFSV4OP_GETATTR, 1, "Getattr", 7, },
+	{ NFSV4OP_GETATTR, 1, "Getattr", 7, },
+	{ NFSV4OP_GETATTR, 1, "Getattr", 7, },
+	{ NFSV4OP_COMMIT, 2, "Commit", 6, },
+	{ NFSV4OP_LOOKUPP, 3, "Lookupp", 7, },
+	{ NFSV4OP_SETCLIENTID, 1, "SetClientID", 11, },
+	{ NFSV4OP_SETCLIENTIDCFRM, 1, "SetClientIDConfirm", 18, },
+	{ NFSV4OP_LOCK, 1, "Lock", 4, },
+	{ NFSV4OP_LOCKU, 1, "LockU", 5, },
+	{ NFSV4OP_OPEN, 2, "Open", 4, },
+	{ NFSV4OP_CLOSE, 1, "Close", 5, },
+	{ NFSV4OP_OPENCONFIRM, 1, "Openconfirm", 11, },
+	{ NFSV4OP_LOCKT, 1, "LockT", 5, },
+	{ NFSV4OP_OPENDOWNGRADE, 1, "Opendowngrade", 13, },
+	{ NFSV4OP_RENEW, 1, "Renew", 5, },
+	{ NFSV4OP_PUTROOTFH, 1, "Dirpath", 7, },
+	{ NFSV4OP_RELEASELCKOWN, 1, "Rellckown", 9, },
+	{ NFSV4OP_DELEGRETURN, 1, "Delegret", 8, },
+	{ NFSV4OP_DELEGRETURN, 3, "DelegRemove", 11, },
+	{ NFSV4OP_DELEGRETURN, 7, "DelegRename1", 12, },
+	{ NFSV4OP_DELEGRETURN, 9, "DelegRename2", 12, },
+	{ NFSV4OP_GETATTR, 1, "Getacl", 6, },
+	{ NFSV4OP_SETATTR, 1, "Setacl", 6, },
+	{ NFSV4OP_EXCHANGEID, 1, "ExchangeID", 10, },
+	{ NFSV4OP_CREATESESSION, 1, "CreateSession", 13, },
+	{ NFSV4OP_DESTROYSESSION, 1, "DestroySession", 14, },
+	{ NFSV4OP_DESTROYCLIENTID, 1, "DestroyClient", 13, },
+	{ NFSV4OP_FREESTATEID, 1, "FreeStateID", 11, },
+	{ NFSV4OP_LAYOUTGET, 1, "LayoutGet", 9, },
+	{ NFSV4OP_GETDEVINFO, 1, "GetDeviceInfo", 13, },
+	{ NFSV4OP_LAYOUTCOMMIT, 1, "LayoutCommit", 12, },
+	{ NFSV4OP_LAYOUTRETURN, 1, "LayoutReturn", 12, },
+	{ NFSV4OP_RECLAIMCOMPL, 1, "ReclaimComplete", 15, },
+	{ NFSV4OP_WRITE, 1, "WriteDS", 7, },
+	{ NFSV4OP_READ, 1, "ReadDS", 6, },
+	{ NFSV4OP_COMMIT, 1, "CommitDS", 8, },
+	{ NFSV4OP_OPEN, 3, "OpenLayoutGet", 13, },
+	{ NFSV4OP_OPEN, 8, "CreateLayGet", 12, },
+};
+
+/*
+ * NFS RPCS that have large request message size.
+ */
+static int nfs_bigrequest[NFSV41_NPROCS] = {
+	0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0
+};
+
+/*
+ * Start building a request. Mostly just put the first file handle in
+ * place.
+ */
+APPLESTATIC void
+nfscl_reqstart(struct nfsrv_descript *nd, int procnum, struct nfsmount *nmp,
+    u_int8_t *nfhp, int fhlen, u_int32_t **opcntpp, struct nfsclsession *sep,
+    int vers, int minorvers)
+{
+	struct mbuf *mb;
+	u_int32_t *tl;
+	int opcnt;
+	nfsattrbit_t attrbits;
+
+	/*
+	 * First, fill in some of the fields of nd.
+	 */
+	nd->nd_slotseq = NULL;
+	if (vers == NFS_VER4) {
+		nd->nd_flag = ND_NFSV4 | ND_NFSCL;
+		if (minorvers == NFSV41_MINORVERSION)
+			nd->nd_flag |= ND_NFSV41;
+	} else if (vers == NFS_VER3)
+		nd->nd_flag = ND_NFSV3 | ND_NFSCL;
+	else {
+		if (NFSHASNFSV4(nmp)) {
+			nd->nd_flag = ND_NFSV4 | ND_NFSCL;
+			if (NFSHASNFSV4N(nmp))
+				nd->nd_flag |= ND_NFSV41;
+		} else if (NFSHASNFSV3(nmp))
+			nd->nd_flag = ND_NFSV3 | ND_NFSCL;
+		else
+			nd->nd_flag = ND_NFSV2 | ND_NFSCL;
+	}
+	nd->nd_procnum = procnum;
+	nd->nd_repstat = 0;
+
+	/*
+	 * Get the first mbuf for the request.
+	 */
+	if (nfs_bigrequest[procnum])
+		NFSMCLGET(mb, M_WAITOK);
+	else
+		NFSMGET(mb);
+	mbuf_setlen(mb, 0);
+	nd->nd_mreq = nd->nd_mb = mb;
+	nd->nd_bpos = NFSMTOD(mb, caddr_t);
+	
+	/*
+	 * And fill the first file handle into the request.
+	 */
+	if (nd->nd_flag & ND_NFSV4) {
+		opcnt = nfsv4_opmap[procnum].opcnt +
+		    nfsv4_opflag[nfsv4_opmap[procnum].op].needscfh;
+		if ((nd->nd_flag & ND_NFSV41) != 0) {
+			opcnt += nfsv4_opflag[nfsv4_opmap[procnum].op].needsseq;
+			if (procnum == NFSPROC_RENEW)
+				/*
+				 * For the special case of Renew, just do a
+				 * Sequence Op.
+				 */
+				opcnt = 1;
+			else if (procnum == NFSPROC_WRITEDS ||
+			    procnum == NFSPROC_COMMITDS)
+				/*
+				 * For the special case of a Writeor Commit to
+				 * a DS, the opcnt == 3, for Sequence, PutFH,
+				 * Write/Commit.
+				 */
+				opcnt = 3;
+		}
+		/*
+		 * What should the tag really be?
+		 */
+		(void) nfsm_strtom(nd, nfsv4_opmap[procnum].tag,
+			nfsv4_opmap[procnum].taglen);
+		NFSM_BUILD(tl, u_int32_t *, 2 * NFSX_UNSIGNED);
+		if ((nd->nd_flag & ND_NFSV41) != 0)
+			*tl++ = txdr_unsigned(NFSV41_MINORVERSION);
+		else
+			*tl++ = txdr_unsigned(NFSV4_MINORVERSION);
+		if (opcntpp != NULL)
+			*opcntpp = tl;
+		*tl = txdr_unsigned(opcnt);
+		if ((nd->nd_flag & ND_NFSV41) != 0 &&
+		    nfsv4_opflag[nfsv4_opmap[procnum].op].needsseq > 0) {
+			if (nfsv4_opflag[nfsv4_opmap[procnum].op].loopbadsess >
+			    0)
+				nd->nd_flag |= ND_LOOPBADSESS;
+			NFSM_BUILD(tl, u_int32_t *, NFSX_UNSIGNED);
+			*tl = txdr_unsigned(NFSV4OP_SEQUENCE);
+			if (sep == NULL) {
+				sep = nfsmnt_mdssession(nmp);
+				nfsv4_setsequence(nmp, nd, sep,
+				    nfs_bigreply[procnum]);
+			} else
+				nfsv4_setsequence(nmp, nd, sep,
+				    nfs_bigreply[procnum]);
+		}
+		if (nfsv4_opflag[nfsv4_opmap[procnum].op].needscfh > 0) {
+			NFSM_BUILD(tl, u_int32_t *, NFSX_UNSIGNED);
+			*tl = txdr_unsigned(NFSV4OP_PUTFH);
+			(void) nfsm_fhtom(nd, nfhp, fhlen, 0);
+			if (nfsv4_opflag[nfsv4_opmap[procnum].op].needscfh
+			    == 2 && procnum != NFSPROC_WRITEDS &&
+			    procnum != NFSPROC_COMMITDS) {
+				NFSM_BUILD(tl, u_int32_t *, NFSX_UNSIGNED);
+				*tl = txdr_unsigned(NFSV4OP_GETATTR);
+				/*
+				 * For Lookup Ops, we want all the directory
+				 * attributes, so we can load the name cache.
+				 */
+				if (procnum == NFSPROC_LOOKUP ||
+				    procnum == NFSPROC_LOOKUPP)
+					NFSGETATTR_ATTRBIT(&attrbits);
+				else {
+					NFSWCCATTR_ATTRBIT(&attrbits);
+					nd->nd_flag |= ND_V4WCCATTR;
+				}
+				(void) nfsrv_putattrbit(nd, &attrbits);
+			}
+		}
+		if (procnum != NFSPROC_RENEW ||
+		    (nd->nd_flag & ND_NFSV41) == 0) {
+			NFSM_BUILD(tl, u_int32_t *, NFSX_UNSIGNED);
+			*tl = txdr_unsigned(nfsv4_opmap[procnum].op);
+		}
+	} else {
+		(void) nfsm_fhtom(nd, nfhp, fhlen, 0);
+	}
+	if (procnum < NFSV41_NPROCS)
+		NFSINCRGLOBAL(nfsstatsv1.rpccnt[procnum]);
+}
+
+/*
+ * Put a state Id in the mbuf list.
+ */
+APPLESTATIC void
+nfsm_stateidtom(struct nfsrv_descript *nd, nfsv4stateid_t *stateidp, int flag)
+{
+	nfsv4stateid_t *st;
+
+	NFSM_BUILD(st, nfsv4stateid_t *, NFSX_STATEID);
+	if (flag == NFSSTATEID_PUTALLZERO) {
+		st->seqid = 0;
+		st->other[0] = 0;
+		st->other[1] = 0;
+		st->other[2] = 0;
+	} else if (flag == NFSSTATEID_PUTALLONE) {
+		st->seqid = 0xffffffff;
+		st->other[0] = 0xffffffff;
+		st->other[1] = 0xffffffff;
+		st->other[2] = 0xffffffff;
+	} else if (flag == NFSSTATEID_PUTSEQIDZERO) {
+		st->seqid = 0;
+		st->other[0] = stateidp->other[0];
+		st->other[1] = stateidp->other[1];
+		st->other[2] = stateidp->other[2];
+	} else {
+		st->seqid = stateidp->seqid;
+		st->other[0] = stateidp->other[0];
+		st->other[1] = stateidp->other[1];
+		st->other[2] = stateidp->other[2];
+	}
+}
+
+/*
+ * Fill in the setable attributes. The full argument indicates whether
+ * to fill in them all or just mode and time.
+ */
+void
+nfscl_fillsattr(struct nfsrv_descript *nd, struct vattr *vap,
+    struct vnode *vp, int flags, u_int32_t rdev)
+{
+	u_int32_t *tl;
+	struct nfsv2_sattr *sp;
+	nfsattrbit_t attrbits;
+
+	switch (nd->nd_flag & (ND_NFSV2 | ND_NFSV3 | ND_NFSV4)) {
+	case ND_NFSV2:
+		NFSM_BUILD(sp, struct nfsv2_sattr *, NFSX_V2SATTR);
+		if (vap->va_mode == (mode_t)VNOVAL)
+			sp->sa_mode = newnfs_xdrneg1;
+		else
+			sp->sa_mode = vtonfsv2_mode(vap->va_type, vap->va_mode);
+		if (vap->va_uid == (uid_t)VNOVAL)
+			sp->sa_uid = newnfs_xdrneg1;
+		else
+			sp->sa_uid = txdr_unsigned(vap->va_uid);
+		if (vap->va_gid == (gid_t)VNOVAL)
+			sp->sa_gid = newnfs_xdrneg1;
+		else
+			sp->sa_gid = txdr_unsigned(vap->va_gid);
+		if (flags & NFSSATTR_SIZE0)
+			sp->sa_size = 0;
+		else if (flags & NFSSATTR_SIZENEG1)
+			sp->sa_size = newnfs_xdrneg1;
+		else if (flags & NFSSATTR_SIZERDEV)
+			sp->sa_size = txdr_unsigned(rdev);
+		else
+			sp->sa_size = txdr_unsigned(vap->va_size);
+		txdr_nfsv2time(&vap->va_atime, &sp->sa_atime);
+		txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime);
+		break;
+	case ND_NFSV3:
+		if (vap->va_mode != (mode_t)VNOVAL) {
+			NFSM_BUILD(tl, u_int32_t *, 2 * NFSX_UNSIGNED);
+			*tl++ = newnfs_true;
+			*tl = txdr_unsigned(vap->va_mode);
+		} else {
+			NFSM_BUILD(tl, u_int32_t *, NFSX_UNSIGNED);
+			*tl = newnfs_false;
+		}
+		if ((flags & NFSSATTR_FULL) && vap->va_uid != (uid_t)VNOVAL) {
+			NFSM_BUILD(tl, u_int32_t *, 2 * NFSX_UNSIGNED);
+			*tl++ = newnfs_true;
+			*tl = txdr_unsigned(vap->va_uid);
+		} else {
+			NFSM_BUILD(tl, u_int32_t *, NFSX_UNSIGNED);
+			*tl = newnfs_false;
+		}
+		if ((flags & NFSSATTR_FULL) && vap->va_gid != (gid_t)VNOVAL) {
+			NFSM_BUILD(tl, u_int32_t *, 2 * NFSX_UNSIGNED);
+			*tl++ = newnfs_true;
+			*tl = txdr_unsigned(vap->va_gid);
+		} else {
+			NFSM_BUILD(tl, u_int32_t *, NFSX_UNSIGNED);
+			*tl = newnfs_false;
+		}
+		if ((flags & NFSSATTR_FULL) && vap->va_size != VNOVAL) {
+			NFSM_BUILD(tl, u_int32_t *, 3 * NFSX_UNSIGNED);
+			*tl++ = newnfs_true;
+			txdr_hyper(vap->va_size, tl);
+		} else {
+			NFSM_BUILD(tl, u_int32_t *, NFSX_UNSIGNED);
+			*tl = newnfs_false;
+		}
+		if (vap->va_atime.tv_sec != VNOVAL) {
+			if ((vap->va_vaflags & VA_UTIMES_NULL) == 0) {
+				NFSM_BUILD(tl, u_int32_t *, 3 * NFSX_UNSIGNED);
+				*tl++ = txdr_unsigned(NFSV3SATTRTIME_TOCLIENT);
+				txdr_nfsv3time(&vap->va_atime, tl);
+			} else {
+				NFSM_BUILD(tl, u_int32_t *, NFSX_UNSIGNED);
+				*tl = txdr_unsigned(NFSV3SATTRTIME_TOSERVER);
+			}
+		} else {
+			NFSM_BUILD(tl, u_int32_t *, NFSX_UNSIGNED);
+			*tl = txdr_unsigned(NFSV3SATTRTIME_DONTCHANGE);
+		}
+		if (vap->va_mtime.tv_sec != VNOVAL) {
+			if ((vap->va_vaflags & VA_UTIMES_NULL) == 0) {
+				NFSM_BUILD(tl, u_int32_t *, 3 * NFSX_UNSIGNED);
+				*tl++ = txdr_unsigned(NFSV3SATTRTIME_TOCLIENT);
+				txdr_nfsv3time(&vap->va_mtime, tl);
+			} else {
+				NFSM_BUILD(tl, u_int32_t *, NFSX_UNSIGNED);
+				*tl = txdr_unsigned(NFSV3SATTRTIME_TOSERVER);
+			}
+		} else {
+			NFSM_BUILD(tl, u_int32_t *, NFSX_UNSIGNED);
+			*tl = txdr_unsigned(NFSV3SATTRTIME_DONTCHANGE);
+		}
+		break;
+	case ND_NFSV4:
+		NFSZERO_ATTRBIT(&attrbits);
+		if (vap->va_mode != (mode_t)VNOVAL)
+			NFSSETBIT_ATTRBIT(&attrbits, NFSATTRBIT_MODE);
+		if ((flags & NFSSATTR_FULL) && vap->va_uid != (uid_t)VNOVAL)
+			NFSSETBIT_ATTRBIT(&attrbits, NFSATTRBIT_OWNER);
+		if ((flags & NFSSATTR_FULL) && vap->va_gid != (gid_t)VNOVAL)
+			NFSSETBIT_ATTRBIT(&attrbits, NFSATTRBIT_OWNERGROUP);
+		if ((flags & NFSSATTR_FULL) && vap->va_size != VNOVAL)
+			NFSSETBIT_ATTRBIT(&attrbits, NFSATTRBIT_SIZE);
+		if (vap->va_atime.tv_sec != VNOVAL)
+			NFSSETBIT_ATTRBIT(&attrbits, NFSATTRBIT_TIMEACCESSSET);
+		if (vap->va_mtime.tv_sec != VNOVAL)
+			NFSSETBIT_ATTRBIT(&attrbits, NFSATTRBIT_TIMEMODIFYSET);
+		(void) nfsv4_fillattr(nd, vp->v_mount, vp, NULL, vap, NULL, 0,
+		    &attrbits, NULL, NULL, 0, 0, 0, 0, (uint64_t)0, NULL);
+		break;
+	}
+}
+
+#ifndef APPLE
+/*
+ * copies mbuf chain to the uio scatter/gather list
+ */
+int
+nfsm_mbufuio(struct nfsrv_descript *nd, struct uio *uiop, int siz)
+{
+	char *mbufcp, *uiocp;
+	int xfer, left, len;
+	mbuf_t mp;
+	long uiosiz, rem;
+	int error = 0;
+
+	mp = nd->nd_md;
+	mbufcp = nd->nd_dpos;
+	len = NFSMTOD(mp, caddr_t) + mbuf_len(mp) - mbufcp;
+	rem = NFSM_RNDUP(siz) - siz;
+	while (siz > 0) {
+		if (uiop->uio_iovcnt <= 0 || uiop->uio_iov == NULL) {
+			error = EBADRPC;
+			goto out;
+		}
+		left = uiop->uio_iov->iov_len;
+		uiocp = uiop->uio_iov->iov_base;
+		if (left > siz)
+			left = siz;
+		uiosiz = left;
+		while (left > 0) {
+			while (len == 0) {
+				mp = mbuf_next(mp);
+				if (mp == NULL) {
+					error = EBADRPC;
+					goto out;
+				}
+				mbufcp = NFSMTOD(mp, caddr_t);
+				len = mbuf_len(mp);
+				KASSERT(len >= 0,
+				    ("len %d, corrupted mbuf?", len));
+			}
+			xfer = (left > len) ? len : left;
+#ifdef notdef
+			/* Not Yet.. */
+			if (uiop->uio_iov->iov_op != NULL)
+				(*(uiop->uio_iov->iov_op))
+				(mbufcp, uiocp, xfer);
+			else
+#endif
+			if (uiop->uio_segflg == UIO_SYSSPACE)
+				NFSBCOPY(mbufcp, uiocp, xfer);
+			else
+				copyout(mbufcp, CAST_USER_ADDR_T(uiocp), xfer);
+			left -= xfer;
+			len -= xfer;
+			mbufcp += xfer;
+			uiocp += xfer;
+			uiop->uio_offset += xfer;
+			uiop->uio_resid -= xfer;
+		}
+		if (uiop->uio_iov->iov_len <= siz) {
+			uiop->uio_iovcnt--;
+			uiop->uio_iov++;
+		} else {
+			uiop->uio_iov->iov_base = (void *)
+				((char *)uiop->uio_iov->iov_base + uiosiz);
+			uiop->uio_iov->iov_len -= uiosiz;
+		}
+		siz -= uiosiz;
+	}
+	nd->nd_dpos = mbufcp;
+	nd->nd_md = mp;
+	if (rem > 0) {
+		if (len < rem)
+			error = nfsm_advance(nd, rem, len);
+		else
+			nd->nd_dpos += rem;
+	}
+
+out:
+	NFSEXITCODE2(error, nd);
+	return (error);
+}
+#endif	/* !APPLE */
+
+/*
+ * Help break down an mbuf chain by setting the first siz bytes contiguous
+ * pointed to by returned val.
+ * This is used by the macro NFSM_DISSECT for tough
+ * cases.
+ */
+APPLESTATIC void *
+nfsm_dissct(struct nfsrv_descript *nd, int siz, int how)
+{
+	mbuf_t mp2;
+	int siz2, xfer;
+	caddr_t p;
+	int left;
+	caddr_t retp;
+
+	retp = NULL;
+	left = NFSMTOD(nd->nd_md, caddr_t) + mbuf_len(nd->nd_md) - nd->nd_dpos;
+	while (left == 0) {
+		nd->nd_md = mbuf_next(nd->nd_md);
+		if (nd->nd_md == NULL)
+			return (retp);
+		left = mbuf_len(nd->nd_md);
+		nd->nd_dpos = NFSMTOD(nd->nd_md, caddr_t);
+	}
+	if (left >= siz) {
+		retp = nd->nd_dpos;
+		nd->nd_dpos += siz;
+	} else if (mbuf_next(nd->nd_md) == NULL) {
+		return (retp);
+	} else if (siz > ncl_mbuf_mhlen) {
+		panic("nfs S too big");
+	} else {
+		MGET(mp2, MT_DATA, how);
+		if (mp2 == NULL)
+			return (NULL);
+		mbuf_setnext(mp2, mbuf_next(nd->nd_md));
+		mbuf_setnext(nd->nd_md, mp2);
+		mbuf_setlen(nd->nd_md, mbuf_len(nd->nd_md) - left);
+		nd->nd_md = mp2;
+		retp = p = NFSMTOD(mp2, caddr_t);
+		NFSBCOPY(nd->nd_dpos, p, left);	/* Copy what was left */
+		siz2 = siz - left;
+		p += left;
+		mp2 = mbuf_next(mp2);
+		/* Loop around copying up the siz2 bytes */
+		while (siz2 > 0) {
+			if (mp2 == NULL)
+				return (NULL);
+			xfer = (siz2 > mbuf_len(mp2)) ? mbuf_len(mp2) : siz2;
+			if (xfer > 0) {
+				NFSBCOPY(NFSMTOD(mp2, caddr_t), p, xfer);
+				NFSM_DATAP(mp2, xfer);
+				mbuf_setlen(mp2, mbuf_len(mp2) - xfer);
+				p += xfer;
+				siz2 -= xfer;
+			}
+			if (siz2 > 0)
+				mp2 = mbuf_next(mp2);
+		}
+		mbuf_setlen(nd->nd_md, siz);
+		nd->nd_md = mp2;
+		nd->nd_dpos = NFSMTOD(mp2, caddr_t);
+	}
+	return (retp);
+}
+
+/*
+ * Advance the position in the mbuf chain.
+ * If offs == 0, this is a no-op, but it is simpler to just return from
+ * here than check for offs > 0 for all calls to nfsm_advance.
+ * If left == -1, it should be calculated here.
+ */
+APPLESTATIC int
+nfsm_advance(struct nfsrv_descript *nd, int offs, int left)
+{
+	int error = 0;
+
+	if (offs == 0)
+		goto out;
+	/*
+	 * A negative offs might indicate a corrupted mbuf chain and,
+	 * as such, a printf is logged.
+	 */
+	if (offs < 0) {
+		printf("nfsrv_advance: negative offs\n");
+		error = EBADRPC;
+		goto out;
+	}
+
+	/*
+	 * If left == -1, calculate it here.
+	 */
+	if (left == -1)
+		left = NFSMTOD(nd->nd_md, caddr_t) + mbuf_len(nd->nd_md) -
+		    nd->nd_dpos;
+
+	/*
+	 * Loop around, advancing over the mbuf data.
+	 */
+	while (offs > left) {
+		offs -= left;
+		nd->nd_md = mbuf_next(nd->nd_md);
+		if (nd->nd_md == NULL) {
+			error = EBADRPC;
+			goto out;
+		}
+		left = mbuf_len(nd->nd_md);
+		nd->nd_dpos = NFSMTOD(nd->nd_md, caddr_t);
+	}
+	nd->nd_dpos += offs;
+
+out:
+	NFSEXITCODE(error);
+	return (error);
+}
+
+/*
+ * Copy a string into mbuf(s).
+ * Return the number of bytes output, including XDR overheads.
+ */
+APPLESTATIC int
+nfsm_strtom(struct nfsrv_descript *nd, const char *cp, int siz)
+{
+	mbuf_t m2;
+	int xfer, left;
+	mbuf_t m1;
+	int rem, bytesize;
+	u_int32_t *tl;
+	char *cp2;
+
+	NFSM_BUILD(tl, u_int32_t *, NFSX_UNSIGNED);
+	*tl = txdr_unsigned(siz);
+	rem = NFSM_RNDUP(siz) - siz;
+	bytesize = NFSX_UNSIGNED + siz + rem;
+	m2 = nd->nd_mb;
+	cp2 = nd->nd_bpos;
+	left = M_TRAILINGSPACE(m2);
+
+	/*
+	 * Loop around copying the string to mbuf(s).
+	 */
+	while (siz > 0) {
+		if (left == 0) {
+			if (siz > ncl_mbuf_mlen)
+				NFSMCLGET(m1, M_WAITOK);
+			else
+				NFSMGET(m1);
+			mbuf_setlen(m1, 0);
+			mbuf_setnext(m2, m1);
+			m2 = m1;
+			cp2 = NFSMTOD(m2, caddr_t);
+			left = M_TRAILINGSPACE(m2);
+		}
+		if (left >= siz)
+			xfer = siz;
+		else
+			xfer = left;
+		NFSBCOPY(cp, cp2, xfer);
+		cp += xfer;
+		mbuf_setlen(m2, mbuf_len(m2) + xfer);
+		siz -= xfer;
+		left -= xfer;
+		if (siz == 0 && rem) {
+			if (left < rem)
+				panic("nfsm_strtom");
+			NFSBZERO(cp2 + xfer, rem);
+			mbuf_setlen(m2, mbuf_len(m2) + rem);
+		}
+	}
+	nd->nd_mb = m2;
+	nd->nd_bpos = NFSMTOD(m2, caddr_t) + mbuf_len(m2);
+	return (bytesize);
+}
+
+/*
+ * Called once to initialize data structures...
+ */
+APPLESTATIC void
+newnfs_init(void)
+{
+	static int nfs_inited = 0;
+
+	if (nfs_inited)
+		return;
+	nfs_inited = 1;
+
+	newnfs_true = txdr_unsigned(TRUE);
+	newnfs_false = txdr_unsigned(FALSE);
+	newnfs_xdrneg1 = txdr_unsigned(-1);
+	nfscl_ticks = (hz * NFS_TICKINTVL + 500) / 1000;
+	if (nfscl_ticks < 1)
+		nfscl_ticks = 1;
+	NFSSETBOOTTIME(nfsboottime);
+
+	/*
+	 * Initialize reply list and start timer
+	 */
+	TAILQ_INIT(&nfsd_reqq);
+	NFS_TIMERINIT;
+}
+
+/*
+ * Put a file handle in an mbuf list.
+ * If the size argument == 0, just use the default size.
+ * set_true == 1 if there should be an newnfs_true prepended on the file handle.
+ * Return the number of bytes output, including XDR overhead.
+ */
+APPLESTATIC int
+nfsm_fhtom(struct nfsrv_descript *nd, u_int8_t *fhp, int size, int set_true)
+{
+	u_int32_t *tl;
+	u_int8_t *cp;
+	int fullsiz, rem, bytesize = 0;
+
+	if (size == 0)
+		size = NFSX_MYFH;
+	switch (nd->nd_flag & (ND_NFSV2 | ND_NFSV3 | ND_NFSV4)) {
+	case ND_NFSV2:
+		if (size > NFSX_V2FH)
+			panic("fh size > NFSX_V2FH for NFSv2");
+		NFSM_BUILD(cp, u_int8_t *, NFSX_V2FH);
+		NFSBCOPY(fhp, cp, size);
+		if (size < NFSX_V2FH)
+			NFSBZERO(cp + size, NFSX_V2FH - size);
+		bytesize = NFSX_V2FH;
+		break;
+	case ND_NFSV3:
+	case ND_NFSV4:
+		fullsiz = NFSM_RNDUP(size);
+		rem = fullsiz - size;
+		if (set_true) {
+		    bytesize = 2 * NFSX_UNSIGNED + fullsiz;
+		    NFSM_BUILD(tl, u_int32_t *, NFSX_UNSIGNED);
+		    *tl = newnfs_true;
+		} else {
+		    bytesize = NFSX_UNSIGNED + fullsiz;
+		}
+		(void) nfsm_strtom(nd, fhp, size);
+		break;
+	}
+	return (bytesize);
+}
+
+/*
+ * This function compares two net addresses by family and returns TRUE
+ * if they are the same host.
+ * If there is any doubt, return FALSE.
+ * The AF_INET family is handled as a special case so that address mbufs
+ * don't need to be saved to store "struct in_addr", which is only 4 bytes.
+ */
+APPLESTATIC int
+nfsaddr_match(int family, union nethostaddr *haddr, NFSSOCKADDR_T nam)
+{
+	struct sockaddr_in *inetaddr;
+
+	switch (family) {
+	case AF_INET:
+		inetaddr = NFSSOCKADDR(nam, struct sockaddr_in *);
+		if (inetaddr->sin_family == AF_INET &&
+		    inetaddr->sin_addr.s_addr == haddr->had_inet.s_addr)
+			return (1);
+		break;
+#ifdef INET6
+	case AF_INET6:
+		{
+		struct sockaddr_in6 *inetaddr6;
+
+		inetaddr6 = NFSSOCKADDR(nam, struct sockaddr_in6 *);
+		/* XXX - should test sin6_scope_id ? */
+		if (inetaddr6->sin6_family == AF_INET6 &&
+		    IN6_ARE_ADDR_EQUAL(&inetaddr6->sin6_addr,
+			  &haddr->had_inet6))
+			return (1);
+		}
+		break;
+#endif
+	}
+	return (0);
+}
+
+/*
+ * Similar to the above, but takes to NFSSOCKADDR_T args.
+ */
+APPLESTATIC int
+nfsaddr2_match(NFSSOCKADDR_T nam1, NFSSOCKADDR_T nam2)
+{
+	struct sockaddr_in *addr1, *addr2;
+	struct sockaddr *inaddr;
+
+	inaddr = NFSSOCKADDR(nam1, struct sockaddr *);
+	switch (inaddr->sa_family) {
+	case AF_INET:
+		addr1 = NFSSOCKADDR(nam1, struct sockaddr_in *);
+		addr2 = NFSSOCKADDR(nam2, struct sockaddr_in *);
+		if (addr2->sin_family == AF_INET &&
+		    addr1->sin_addr.s_addr == addr2->sin_addr.s_addr)
+			return (1);
+		break;
+#ifdef INET6
+	case AF_INET6:
+		{
+		struct sockaddr_in6 *inet6addr1, *inet6addr2;
+
+		inet6addr1 = NFSSOCKADDR(nam1, struct sockaddr_in6 *);
+		inet6addr2 = NFSSOCKADDR(nam2, struct sockaddr_in6 *);
+		/* XXX - should test sin6_scope_id ? */
+		if (inet6addr2->sin6_family == AF_INET6 &&
+		    IN6_ARE_ADDR_EQUAL(&inet6addr1->sin6_addr,
+			  &inet6addr2->sin6_addr))
+			return (1);
+		}
+		break;
+#endif
+	}
+	return (0);
+}
+
+
+/*
+ * Trim the stuff already dissected off the mbuf list.
+ */
+APPLESTATIC void
+newnfs_trimleading(nd)
+	struct nfsrv_descript *nd;
+{
+	mbuf_t m, n;
+	int offs;
+
+	/*
+	 * First, free up leading mbufs.
+	 */
+	if (nd->nd_mrep != nd->nd_md) {
+		m = nd->nd_mrep;
+		while (mbuf_next(m) != nd->nd_md) {
+			if (mbuf_next(m) == NULL)
+				panic("nfsm trim leading");
+			m = mbuf_next(m);
+		}
+		mbuf_setnext(m, NULL);
+		mbuf_freem(nd->nd_mrep);
+	}
+	m = nd->nd_md;
+
+	/*
+	 * Now, adjust this mbuf, based on nd_dpos.
+	 */
+	offs = nd->nd_dpos - NFSMTOD(m, caddr_t);
+	if (offs == mbuf_len(m)) {
+		n = m;
+		m = mbuf_next(m);
+		if (m == NULL)
+			panic("nfsm trim leading2");
+		mbuf_setnext(n, NULL);
+		mbuf_freem(n);
+	} else if (offs > 0) {
+		mbuf_setlen(m, mbuf_len(m) - offs);
+		NFSM_DATAP(m, offs);
+	} else if (offs < 0)
+		panic("nfsm trimleading offs");
+	nd->nd_mrep = m;
+	nd->nd_md = m;
+	nd->nd_dpos = NFSMTOD(m, caddr_t);
+}
+
+/*
+ * Trim trailing data off the mbuf list being built.
+ */
+APPLESTATIC void
+newnfs_trimtrailing(nd, mb, bpos)
+	struct nfsrv_descript *nd;
+	mbuf_t mb;
+	caddr_t bpos;
+{
+
+	if (mbuf_next(mb)) {
+		mbuf_freem(mbuf_next(mb));
+		mbuf_setnext(mb, NULL);
+	}
+	mbuf_setlen(mb, bpos - NFSMTOD(mb, caddr_t));
+	nd->nd_mb = mb;
+	nd->nd_bpos = bpos;
+}
+
+/*
+ * Dissect a file handle on the client.
+ */
+APPLESTATIC int
+nfsm_getfh(struct nfsrv_descript *nd, struct nfsfh **nfhpp)
+{
+	u_int32_t *tl;
+	struct nfsfh *nfhp;
+	int error, len;
+
+	*nfhpp = NULL;
+	if (nd->nd_flag & (ND_NFSV3 | ND_NFSV4)) {
+		NFSM_DISSECT(tl, u_int32_t *, NFSX_UNSIGNED);
+		if ((len = fxdr_unsigned(int, *tl)) <= 0 ||
+			len > NFSX_FHMAX) {
+			error = EBADRPC;
+			goto nfsmout;
+		}
+	} else
+		len = NFSX_V2FH;
+	nfhp = malloc(sizeof (struct nfsfh) + len,
+	    M_NFSFH, M_WAITOK);
+	error = nfsrv_mtostr(nd, nfhp->nfh_fh, len);
+	if (error) {
+		free(nfhp, M_NFSFH);
+		goto nfsmout;
+	}
+	nfhp->nfh_len = len;
+	*nfhpp = nfhp;
+nfsmout:
+	NFSEXITCODE2(error, nd);
+	return (error);
+}
+
+/*
+ * Break down the nfsv4 acl.
+ * If the aclp == NULL or won't fit in an acl, just discard the acl info.
+ */
+APPLESTATIC int
+nfsrv_dissectacl(struct nfsrv_descript *nd, NFSACL_T *aclp, int *aclerrp,
+    int *aclsizep, __unused NFSPROC_T *p)
+{
+	u_int32_t *tl;
+	int i, aclsize;
+	int acecnt, error = 0, aceerr = 0, acesize;
+
+	*aclerrp = 0;
+	if (aclp)
+		aclp->acl_cnt = 0;
+	/*
+	 * Parse out the ace entries and expect them to conform to
+	 * what can be supported by R/W/X bits.
+	 */
+	NFSM_DISSECT(tl, u_int32_t *, NFSX_UNSIGNED);
+	aclsize = NFSX_UNSIGNED;
+	acecnt = fxdr_unsigned(int, *tl);
+	if (acecnt > ACL_MAX_ENTRIES)
+		aceerr = NFSERR_ATTRNOTSUPP;
+	if (nfsrv_useacl == 0)
+		aceerr = NFSERR_ATTRNOTSUPP;
+	for (i = 0; i < acecnt; i++) {
+		if (aclp && !aceerr)
+			error = nfsrv_dissectace(nd, &aclp->acl_entry[i],
+			    &aceerr, &acesize, p);
+		else
+			error = nfsrv_skipace(nd, &acesize);
+		if (error)
+			goto nfsmout;
+		aclsize += acesize;
+	}
+	if (aclp && !aceerr)
+		aclp->acl_cnt = acecnt;
+	if (aceerr)
+		*aclerrp = aceerr;
+	if (aclsizep)
+		*aclsizep = aclsize;
+nfsmout:
+	NFSEXITCODE2(error, nd);
+	return (error);
+}
+
+/*
+ * Skip over an NFSv4 ace entry. Just dissect the xdr and discard it.
+ */
+static int
+nfsrv_skipace(struct nfsrv_descript *nd, int *acesizep)
+{
+	u_int32_t *tl;
+	int error, len = 0;
+
+	NFSM_DISSECT(tl, u_int32_t *, 4 * NFSX_UNSIGNED);
+	len = fxdr_unsigned(int, *(tl + 3));
+	error = nfsm_advance(nd, NFSM_RNDUP(len), -1);
+nfsmout:
+	*acesizep = NFSM_RNDUP(len) + (4 * NFSX_UNSIGNED);
+	NFSEXITCODE2(error, nd);
+	return (error);
+}
+
+/*
+ * Get attribute bits from an mbuf list.
+ * Returns EBADRPC for a parsing error, 0 otherwise.
+ * If the clearinvalid flag is set, clear the bits not supported.
+ */
+APPLESTATIC int
+nfsrv_getattrbits(struct nfsrv_descript *nd, nfsattrbit_t *attrbitp, int *cntp,
+    int *retnotsupp)
+{
+	u_int32_t *tl;
+	int cnt, i, outcnt;
+	int error = 0;
+
+	NFSM_DISSECT(tl, u_int32_t *, NFSX_UNSIGNED);
+	cnt = fxdr_unsigned(int, *tl);
+	if (cnt < 0) {
+		error = NFSERR_BADXDR;
+		goto nfsmout;
+	}
+	if (cnt > NFSATTRBIT_MAXWORDS)
+		outcnt = NFSATTRBIT_MAXWORDS;
+	else
+		outcnt = cnt;
+	NFSZERO_ATTRBIT(attrbitp);
+	if (outcnt > 0) {
+		NFSM_DISSECT(tl, u_int32_t *, outcnt * NFSX_UNSIGNED);
+		for (i = 0; i < outcnt; i++)
+			attrbitp->bits[i] = fxdr_unsigned(u_int32_t, *tl++);
+	}
+	for (i = 0; i < (cnt - outcnt); i++) {
+		NFSM_DISSECT(tl, u_int32_t *, NFSX_UNSIGNED);
+		if (retnotsupp != NULL && *tl != 0)
+			*retnotsupp = NFSERR_ATTRNOTSUPP;
+	}
+	if (cntp)
+		*cntp = NFSX_UNSIGNED + (cnt * NFSX_UNSIGNED);
+nfsmout:
+	NFSEXITCODE2(error, nd);
+	return (error);
+}
+
+/*
+ * Get the attributes for V4.
+ * If the compare flag is true, test for any attribute changes,
+ * otherwise return the attribute values.
+ * These attributes cover fields in "struct vattr", "struct statfs",
+ * "struct nfsfsinfo", the file handle and the lease duration.
+ * The value of retcmpp is set to 1 if all attributes are the same,
+ * and 0 otherwise.
+ * Returns EBADRPC if it can't be parsed, 0 otherwise.
+ */
+APPLESTATIC int
+nfsv4_loadattr(struct nfsrv_descript *nd, vnode_t vp,
+    struct nfsvattr *nap, struct nfsfh **nfhpp, fhandle_t *fhp, int fhsize,
+    struct nfsv3_pathconf *pc, struct statfs *sbp, struct nfsstatfs *sfp,
+    struct nfsfsinfo *fsp, NFSACL_T *aclp, int compare, int *retcmpp,
+    u_int32_t *leasep, u_int32_t *rderrp, NFSPROC_T *p, struct ucred *cred)
+{
+	u_int32_t *tl;
+	int i = 0, j, k, l = 0, m, bitpos, attrsum = 0;
+	int error, tfhsize, aceerr, attrsize, cnt, retnotsup;
+	u_char *cp, *cp2, namestr[NFSV4_SMALLSTR + 1];
+	nfsattrbit_t attrbits, retattrbits, checkattrbits;
+	struct nfsfh *tnfhp;
+	struct nfsreferral *refp;
+	u_quad_t tquad;
+	nfsquad_t tnfsquad;
+	struct timespec temptime;
+	uid_t uid;
+	gid_t gid;
+	u_int32_t freenum = 0, tuint;
+	u_int64_t uquad = 0, thyp, thyp2;
+#ifdef QUOTA
+	struct dqblk dqb;
+	uid_t savuid;
+#endif
+
+	CTASSERT(sizeof(ino_t) == sizeof(uint64_t));
+	if (compare) {
+		retnotsup = 0;
+		error = nfsrv_getattrbits(nd, &attrbits, NULL, &retnotsup);
+	} else {
+		error = nfsrv_getattrbits(nd, &attrbits, NULL, NULL);
+	}
+	if (error)
+		goto nfsmout;
+
+	if (compare) {
+		*retcmpp = retnotsup;
+	} else {
+		/*
+		 * Just set default values to some of the important ones.
+		 */
+		if (nap != NULL) {
+			nap->na_type = VREG;
+			nap->na_mode = 0;
+			nap->na_rdev = (NFSDEV_T)0;
+			nap->na_mtime.tv_sec = 0;
+			nap->na_mtime.tv_nsec = 0;
+			nap->na_gen = 0;
+			nap->na_flags = 0;
+			nap->na_blocksize = NFS_FABLKSIZE;
+		}
+		if (sbp != NULL) {
+			sbp->f_bsize = NFS_FABLKSIZE;
+			sbp->f_blocks = 0;
+			sbp->f_bfree = 0;
+			sbp->f_bavail = 0;
+			sbp->f_files = 0;
+			sbp->f_ffree = 0;
+		}
+		if (fsp != NULL) {
+			fsp->fs_rtmax = 8192;
+			fsp->fs_rtpref = 8192;
+			fsp->fs_maxname = NFS_MAXNAMLEN;
+			fsp->fs_wtmax = 8192;
+			fsp->fs_wtpref = 8192;
+			fsp->fs_wtmult = NFS_FABLKSIZE;
+			fsp->fs_dtpref = 8192;
+			fsp->fs_maxfilesize = 0xffffffffffffffffull;
+			fsp->fs_timedelta.tv_sec = 0;
+			fsp->fs_timedelta.tv_nsec = 1;
+			fsp->fs_properties = (NFSV3_FSFLINK | NFSV3_FSFSYMLINK |
+				NFSV3_FSFHOMOGENEOUS | NFSV3_FSFCANSETTIME);
+		}
+		if (pc != NULL) {
+			pc->pc_linkmax = NFS_LINK_MAX;
+			pc->pc_namemax = NAME_MAX;
+			pc->pc_notrunc = 0;
+			pc->pc_chownrestricted = 0;
+			pc->pc_caseinsensitive = 0;
+			pc->pc_casepreserving = 1;
+		}
+		if (sfp != NULL) {
+			sfp->sf_ffiles = UINT64_MAX;
+			sfp->sf_tfiles = UINT64_MAX;
+			sfp->sf_afiles = UINT64_MAX;
+			sfp->sf_fbytes = UINT64_MAX;
+			sfp->sf_tbytes = UINT64_MAX;
+			sfp->sf_abytes = UINT64_MAX;
+		}
+	}
+
+	/*
+	 * Loop around getting the attributes.
+	 */
+	NFSM_DISSECT(tl, u_int32_t *, NFSX_UNSIGNED);
+	attrsize = fxdr_unsigned(int, *tl);
+	for (bitpos = 0; bitpos < NFSATTRBIT_MAX; bitpos++) {
+	    if (attrsum > attrsize) {
+		error = NFSERR_BADXDR;
+		goto nfsmout;
+	    }
+	    if (NFSISSET_ATTRBIT(&attrbits, bitpos))
+		switch (bitpos) {
+		case NFSATTRBIT_SUPPORTEDATTRS:
+			retnotsup = 0;
+			if (compare || nap == NULL)
+			    error = nfsrv_getattrbits(nd, &retattrbits,
+				&cnt, &retnotsup);
+			else
+			    error = nfsrv_getattrbits(nd, &nap->na_suppattr,
+				&cnt, &retnotsup);
+			if (error)
+			    goto nfsmout;
+			if (compare && !(*retcmpp)) {
+			   NFSSETSUPP_ATTRBIT(&checkattrbits, nd);
+
+			   /* Some filesystem do not support NFSv4ACL   */
+			   if (nfsrv_useacl == 0 || nfs_supportsnfsv4acls(vp) == 0) {
+				NFSCLRBIT_ATTRBIT(&checkattrbits, NFSATTRBIT_ACL);
+				NFSCLRBIT_ATTRBIT(&checkattrbits, NFSATTRBIT_ACLSUPPORT);
+		   	   }
+			   if (!NFSEQUAL_ATTRBIT(&retattrbits, &checkattrbits)
+			       || retnotsup)
+				*retcmpp = NFSERR_NOTSAME;
+			}
+			attrsum += cnt;
+			break;
+		case NFSATTRBIT_TYPE:
+			NFSM_DISSECT(tl, u_int32_t *, NFSX_UNSIGNED);
+			if (compare) {
+				if (!(*retcmpp)) {
+				    if (nap->na_type != nfsv34tov_type(*tl))
+					*retcmpp = NFSERR_NOTSAME;
+				}
+			} else if (nap != NULL) {
+				nap->na_type = nfsv34tov_type(*tl);
+			}
+			attrsum += NFSX_UNSIGNED;
+			break;
+		case NFSATTRBIT_FHEXPIRETYPE:
+			NFSM_DISSECT(tl, u_int32_t *, NFSX_UNSIGNED);
+			if (compare && !(*retcmpp)) {
+				if (fxdr_unsigned(int, *tl) !=
+					NFSV4FHTYPE_PERSISTENT)
+					*retcmpp = NFSERR_NOTSAME;
+			}
+			attrsum += NFSX_UNSIGNED;
+			break;
+		case NFSATTRBIT_CHANGE:
+			NFSM_DISSECT(tl, u_int32_t *, NFSX_HYPER);
+			if (compare) {
+				if (!(*retcmpp)) {
+				    if (nap->na_filerev != fxdr_hyper(tl))
+					*retcmpp = NFSERR_NOTSAME;
+				}
+			} else if (nap != NULL) {
+				nap->na_filerev = fxdr_hyper(tl);
+			}
+			attrsum += NFSX_HYPER;
+			break;
+		case NFSATTRBIT_SIZE:
+			NFSM_DISSECT(tl, u_int32_t *, NFSX_HYPER);
+			if (compare) {
+				if (!(*retcmpp)) {
+				    if (nap->na_size != fxdr_hyper(tl))
+					*retcmpp = NFSERR_NOTSAME;
+				}
+			} else if (nap != NULL) {
+				nap->na_size = fxdr_hyper(tl);
+			}
+			attrsum += NFSX_HYPER;
+			break;
+		case NFSATTRBIT_LINKSUPPORT:
+			NFSM_DISSECT(tl, u_int32_t *, NFSX_UNSIGNED);
+			if (compare) {
+				if (!(*retcmpp)) {
+				    if (fsp->fs_properties & NFSV3_FSFLINK) {
+					if (*tl == newnfs_false)
+						*retcmpp = NFSERR_NOTSAME;
+				    } else {
+					if (*tl == newnfs_true)
+						*retcmpp = NFSERR_NOTSAME;
+				    }
+				}
+			} else if (fsp != NULL) {
+				if (*tl == newnfs_true)
+					fsp->fs_properties |= NFSV3_FSFLINK;
+				else
+					fsp->fs_properties &= ~NFSV3_FSFLINK;
+			}
+			attrsum += NFSX_UNSIGNED;
+			break;
+		case NFSATTRBIT_SYMLINKSUPPORT:
+			NFSM_DISSECT(tl, u_int32_t *, NFSX_UNSIGNED);
+			if (compare) {
+				if (!(*retcmpp)) {
+				    if (fsp->fs_properties & NFSV3_FSFSYMLINK) {
+					if (*tl == newnfs_false)
+						*retcmpp = NFSERR_NOTSAME;
+				    } else {
+					if (*tl == newnfs_true)
+						*retcmpp = NFSERR_NOTSAME;
+				    }
+				}
+			} else if (fsp != NULL) {
+				if (*tl == newnfs_true)
+					fsp->fs_properties |= NFSV3_FSFSYMLINK;
+				else
+					fsp->fs_properties &= ~NFSV3_FSFSYMLINK;
+			}
+			attrsum += NFSX_UNSIGNED;
+			break;
+		case NFSATTRBIT_NAMEDATTR:
+			NFSM_DISSECT(tl, u_int32_t *, NFSX_UNSIGNED);
+			if (compare && !(*retcmpp)) {
+				if (*tl != newnfs_false)
+					*retcmpp = NFSERR_NOTSAME;
+			}
+			attrsum += NFSX_UNSIGNED;
+			break;
+		case NFSATTRBIT_FSID:
+			NFSM_DISSECT(tl, u_int32_t *, 4 * NFSX_UNSIGNED);
+			thyp = fxdr_hyper(tl);
+			tl += 2;
+			thyp2 = fxdr_hyper(tl);
+			if (compare) {
+			    if (*retcmpp == 0) {
+				if (thyp != (u_int64_t)
+				    vfs_statfs(vnode_mount(vp))->f_fsid.val[0] ||
+				    thyp2 != (u_int64_t)
+				    vfs_statfs(vnode_mount(vp))->f_fsid.val[1])
+					*retcmpp = NFSERR_NOTSAME;
+			    }
+			} else if (nap != NULL) {
+				nap->na_filesid[0] = thyp;
+				nap->na_filesid[1] = thyp2;
+			}
+			attrsum += (4 * NFSX_UNSIGNED);
+			break;
+		case NFSATTRBIT_UNIQUEHANDLES:
+			NFSM_DISSECT(tl, u_int32_t *, NFSX_UNSIGNED);
+			if (compare && !(*retcmpp)) {
+				if (*tl != newnfs_true)
+					*retcmpp = NFSERR_NOTSAME;
+			}
+			attrsum += NFSX_UNSIGNED;
+			break;
+		case NFSATTRBIT_LEASETIME:
+			NFSM_DISSECT(tl, u_int32_t *, NFSX_UNSIGNED);
+			if (compare) {
+				if (fxdr_unsigned(int, *tl) != nfsrv_lease &&
+				    !(*retcmpp))
+					*retcmpp = NFSERR_NOTSAME;
+			} else if (leasep != NULL) {
+				*leasep = fxdr_unsigned(u_int32_t, *tl);
+			}
+			attrsum += NFSX_UNSIGNED;
+			break;
+		case NFSATTRBIT_RDATTRERROR:
+			NFSM_DISSECT(tl, u_int32_t *, NFSX_UNSIGNED);
+			if (compare) {
+				 if (!(*retcmpp))
+					*retcmpp = NFSERR_INVAL;
+			} else if (rderrp != NULL) {
+				*rderrp = fxdr_unsigned(u_int32_t, *tl);
+			}
+			attrsum += NFSX_UNSIGNED;
+			break;
+		case NFSATTRBIT_ACL:
+			if (compare) {
+			  if (!(*retcmpp)) {
+			    if (nfsrv_useacl && nfs_supportsnfsv4acls(vp)) {
+				NFSACL_T *naclp;
+
+				naclp = acl_alloc(M_WAITOK);
+				error = nfsrv_dissectacl(nd, naclp, &aceerr,
+				    &cnt, p);
+				if (error) {
+				    acl_free(naclp);
+				    goto nfsmout;
+				}
+				if (aceerr || aclp == NULL ||
+				    nfsrv_compareacl(aclp, naclp))
+				    *retcmpp = NFSERR_NOTSAME;
+				acl_free(naclp);
+			    } else {
+				error = nfsrv_dissectacl(nd, NULL, &aceerr,
+				    &cnt, p);
+				*retcmpp = NFSERR_ATTRNOTSUPP;
+			    }
+			  }
+			} else {
+				if (vp != NULL && aclp != NULL)
+				    error = nfsrv_dissectacl(nd, aclp, &aceerr,
+					&cnt, p);
+				else
+				    error = nfsrv_dissectacl(nd, NULL, &aceerr,
+					&cnt, p);
+				if (error)
+				    goto nfsmout;
+			}
+			
+			attrsum += cnt;
+			break;
+		case NFSATTRBIT_ACLSUPPORT:
+			NFSM_DISSECT(tl, u_int32_t *, NFSX_UNSIGNED);
+			if (compare && !(*retcmpp)) {
+				if (nfsrv_useacl && nfs_supportsnfsv4acls(vp)) {
+					if (fxdr_unsigned(u_int32_t, *tl) !=
+					    NFSV4ACE_SUPTYPES)
+						*retcmpp = NFSERR_NOTSAME;
+				} else {
+					*retcmpp = NFSERR_ATTRNOTSUPP;
+				}
+			}
+			attrsum += NFSX_UNSIGNED;
+			break;
+		case NFSATTRBIT_ARCHIVE:
+			NFSM_DISSECT(tl, u_int32_t *, NFSX_UNSIGNED);
+			if (compare && !(*retcmpp))
+				*retcmpp = NFSERR_ATTRNOTSUPP;
+			attrsum += NFSX_UNSIGNED;
+			break;
+		case NFSATTRBIT_CANSETTIME:
+			NFSM_DISSECT(tl, u_int32_t *, NFSX_UNSIGNED);
+			if (compare) {
+				if (!(*retcmpp)) {
+				    if (fsp->fs_properties & NFSV3_FSFCANSETTIME) {
+					if (*tl == newnfs_false)
+						*retcmpp = NFSERR_NOTSAME;
+				    } else {
+					if (*tl == newnfs_true)
+						*retcmpp = NFSERR_NOTSAME;
+				    }
+				}
+			} else if (fsp != NULL) {
+				if (*tl == newnfs_true)
+					fsp->fs_properties |= NFSV3_FSFCANSETTIME;
+				else
+					fsp->fs_properties &= ~NFSV3_FSFCANSETTIME;
+			}
+			attrsum += NFSX_UNSIGNED;
+			break;
+		case NFSATTRBIT_CASEINSENSITIVE:
+			NFSM_DISSECT(tl, u_int32_t *, NFSX_UNSIGNED);
+			if (compare) {
+				if (!(*retcmpp)) {
+				    if (*tl != newnfs_false)
+					*retcmpp = NFSERR_NOTSAME;
+				}
+			} else if (pc != NULL) {
+				pc->pc_caseinsensitive =
+				    fxdr_unsigned(u_int32_t, *tl);
+			}
+			attrsum += NFSX_UNSIGNED;
+			break;
+		case NFSATTRBIT_CASEPRESERVING:
+			NFSM_DISSECT(tl, u_int32_t *, NFSX_UNSIGNED);
+			if (compare) {
+				if (!(*retcmpp)) {
+				    if (*tl != newnfs_true)
+					*retcmpp = NFSERR_NOTSAME;
+				}
+			} else if (pc != NULL) {
+				pc->pc_casepreserving =
+				    fxdr_unsigned(u_int32_t, *tl);
+			}
+			attrsum += NFSX_UNSIGNED;
+			break;
+		case NFSATTRBIT_CHOWNRESTRICTED:
+			NFSM_DISSECT(tl, u_int32_t *, NFSX_UNSIGNED);
+			if (compare) {
+				if (!(*retcmpp)) {
+				    if (*tl != newnfs_true)
+					*retcmpp = NFSERR_NOTSAME;
+				}
+			} else if (pc != NULL) {
+				pc->pc_chownrestricted =
+				    fxdr_unsigned(u_int32_t, *tl);
+			}
+			attrsum += NFSX_UNSIGNED;
+			break;
+		case NFSATTRBIT_FILEHANDLE:
+			error = nfsm_getfh(nd, &tnfhp);
+			if (error)
+				goto nfsmout;
+			tfhsize = tnfhp->nfh_len;
+			if (compare) {
+				if (!(*retcmpp) &&
+				    !NFSRV_CMPFH(tnfhp->nfh_fh, tfhsize,
+				     fhp, fhsize))
+					*retcmpp = NFSERR_NOTSAME;
+				free(tnfhp, M_NFSFH);
+			} else if (nfhpp != NULL) {
+				*nfhpp = tnfhp;
+			} else {
+				free(tnfhp, M_NFSFH);
+			}
+			attrsum += (NFSX_UNSIGNED + NFSM_RNDUP(tfhsize));
+			break;
+		case NFSATTRBIT_FILEID:
+			NFSM_DISSECT(tl, u_int32_t *, NFSX_HYPER);
+			thyp = fxdr_hyper(tl);
+			if (compare) {
+				if (!(*retcmpp)) {
+					if (nap->na_fileid != thyp)
+						*retcmpp = NFSERR_NOTSAME;
+				}
+			} else if (nap != NULL)
+				nap->na_fileid = thyp;
+			attrsum += NFSX_HYPER;
+			break;
+		case NFSATTRBIT_FILESAVAIL:
+			NFSM_DISSECT(tl, u_int32_t *, NFSX_HYPER);
+			if (compare) {
+				if (!(*retcmpp) &&
+				    sfp->sf_afiles != fxdr_hyper(tl))
+					*retcmpp = NFSERR_NOTSAME;
+			} else if (sfp != NULL) {
+				sfp->sf_afiles = fxdr_hyper(tl);
+			}
+			attrsum += NFSX_HYPER;
+			break;
+		case NFSATTRBIT_FILESFREE:
+			NFSM_DISSECT(tl, u_int32_t *, NFSX_HYPER);
+			if (compare) {
+				if (!(*retcmpp) &&
+				    sfp->sf_ffiles != fxdr_hyper(tl))
+					*retcmpp = NFSERR_NOTSAME;
+			} else if (sfp != NULL) {
+				sfp->sf_ffiles = fxdr_hyper(tl);
+			}
+			attrsum += NFSX_HYPER;
+			break;
+		case NFSATTRBIT_FILESTOTAL:
+			NFSM_DISSECT(tl, u_int32_t *, NFSX_HYPER);
+			if (compare) {
+				if (!(*retcmpp) &&
+				    sfp->sf_tfiles != fxdr_hyper(tl))
+					*retcmpp = NFSERR_NOTSAME;
+			} else if (sfp != NULL) {
+				sfp->sf_tfiles = fxdr_hyper(tl);
+			}
+			attrsum += NFSX_HYPER;
+			break;
+		case NFSATTRBIT_FSLOCATIONS:
+			error = nfsrv_getrefstr(nd, &cp, &cp2, &l, &m);
+			if (error)
+				goto nfsmout;
+			attrsum += l;
+			if (compare && !(*retcmpp)) {
+				refp = nfsv4root_getreferral(vp, NULL, 0);
+				if (refp != NULL) {
+					if (cp == NULL || cp2 == NULL ||
+					    strcmp(cp, "/") ||
+					    strcmp(cp2, refp->nfr_srvlist))
+						*retcmpp = NFSERR_NOTSAME;
+				} else if (m == 0) {
+					*retcmpp = NFSERR_NOTSAME;
+				}
+			}
+			if (cp != NULL)
+				free(cp, M_NFSSTRING);
+			if (cp2 != NULL)
+				free(cp2, M_NFSSTRING);
+			break;
+		case NFSATTRBIT_HIDDEN:
+			NFSM_DISSECT(tl, u_int32_t *, NFSX_UNSIGNED);
+			if (compare && !(*retcmpp))
+				*retcmpp = NFSERR_ATTRNOTSUPP;
+			attrsum += NFSX_UNSIGNED;
+			break;
+		case NFSATTRBIT_HOMOGENEOUS:
+			NFSM_DISSECT(tl, u_int32_t *, NFSX_UNSIGNED);
+			if (compare) {
+				if (!(*retcmpp)) {
+				    if (fsp->fs_properties &
+					NFSV3_FSFHOMOGENEOUS) {
+					if (*tl == newnfs_false)
+						*retcmpp = NFSERR_NOTSAME;
+				    } else {
+					if (*tl == newnfs_true)
+						*retcmpp = NFSERR_NOTSAME;
+				    }
+				}
+			} else if (fsp != NULL) {
+				if (*tl == newnfs_true)
+				    fsp->fs_properties |= NFSV3_FSFHOMOGENEOUS;
+				else
+				    fsp->fs_properties &= ~NFSV3_FSFHOMOGENEOUS;
+			}
+			attrsum += NFSX_UNSIGNED;
+			break;
+		case NFSATTRBIT_MAXFILESIZE:
+			NFSM_DISSECT(tl, u_int32_t *, NFSX_HYPER);
+			tnfsquad.qval = fxdr_hyper(tl);
+			if (compare) {
+				if (!(*retcmpp)) {
+					tquad = NFSRV_MAXFILESIZE;
+					if (tquad != tnfsquad.qval)
+						*retcmpp = NFSERR_NOTSAME;
+				}
+			} else if (fsp != NULL) {
+				fsp->fs_maxfilesize = tnfsquad.qval;
+			}
+			attrsum += NFSX_HYPER;
+			break;
+		case NFSATTRBIT_MAXLINK:
+			NFSM_DISSECT(tl, u_int32_t *, NFSX_UNSIGNED);
+			if (compare) {
+				if (!(*retcmpp)) {
+				    if (fxdr_unsigned(int, *tl) != NFS_LINK_MAX)
+					*retcmpp = NFSERR_NOTSAME;
+				}
+			} else if (pc != NULL) {
+				pc->pc_linkmax = fxdr_unsigned(u_int32_t, *tl);
+			}
+			attrsum += NFSX_UNSIGNED;
+			break;
+		case NFSATTRBIT_MAXNAME:
+			NFSM_DISSECT(tl, u_int32_t *, NFSX_UNSIGNED);
+			if (compare) {
+				if (!(*retcmpp)) {
+				    if (fsp->fs_maxname !=
+					fxdr_unsigned(u_int32_t, *tl))
+						*retcmpp = NFSERR_NOTSAME;
+				}
+			} else {
+				tuint = fxdr_unsigned(u_int32_t, *tl);
+				/*
+				 * Some Linux NFSv4 servers report this
+				 * as 0 or 4billion, so I'll set it to
+				 * NFS_MAXNAMLEN. If a server actually creates
+				 * a name longer than NFS_MAXNAMLEN, it will
+				 * get an error back.
+				 */
+				if (tuint == 0 || tuint > NFS_MAXNAMLEN)
+					tuint = NFS_MAXNAMLEN;
+				if (fsp != NULL)
+					fsp->fs_maxname = tuint;
+				if (pc != NULL)
+					pc->pc_namemax = tuint;
+			}
+			attrsum += NFSX_UNSIGNED;
+			break;
+		case NFSATTRBIT_MAXREAD:
+			NFSM_DISSECT(tl, u_int32_t *, NFSX_HYPER);
+			if (compare) {
+				if (!(*retcmpp)) {
+				    if (fsp->fs_rtmax != fxdr_unsigned(u_int32_t,
+					*(tl + 1)) || *tl != 0)
+					*retcmpp = NFSERR_NOTSAME;
+				}
+			} else if (fsp != NULL) {
+				fsp->fs_rtmax = fxdr_unsigned(u_int32_t, *++tl);
+				fsp->fs_rtpref = fsp->fs_rtmax;
+				fsp->fs_dtpref = fsp->fs_rtpref;
+			}
+			attrsum += NFSX_HYPER;
+			break;
+		case NFSATTRBIT_MAXWRITE:
+			NFSM_DISSECT(tl, u_int32_t *, NFSX_HYPER);
+			if (compare) {
+				if (!(*retcmpp)) {
+				    if (fsp->fs_wtmax != fxdr_unsigned(u_int32_t,
+					*(tl + 1)) || *tl != 0)
+					*retcmpp = NFSERR_NOTSAME;
+				}
+			} else if (fsp != NULL) {
+				fsp->fs_wtmax = fxdr_unsigned(int, *++tl);
+				fsp->fs_wtpref = fsp->fs_wtmax;
+			}
+			attrsum += NFSX_HYPER;
+			break;
+		case NFSATTRBIT_MIMETYPE:
+			NFSM_DISSECT(tl, u_int32_t *, NFSX_UNSIGNED);
+			i = fxdr_unsigned(int, *tl);
+			attrsum += (NFSX_UNSIGNED + NFSM_RNDUP(i));
+			error = nfsm_advance(nd, NFSM_RNDUP(i), -1);
+			if (error)
+				goto nfsmout;
+			if (compare && !(*retcmpp))
+				*retcmpp = NFSERR_ATTRNOTSUPP;
+			break;
+		case NFSATTRBIT_MODE:
+			NFSM_DISSECT(tl, u_int32_t *, NFSX_UNSIGNED);
+			if (compare) {
+				if (!(*retcmpp)) {
+				    if (nap->na_mode != nfstov_mode(*tl))
+					*retcmpp = NFSERR_NOTSAME;
+				}
+			} else if (nap != NULL) {
+				nap->na_mode = nfstov_mode(*tl);
+			}
+			attrsum += NFSX_UNSIGNED;
+			break;
+		case NFSATTRBIT_NOTRUNC:
+			NFSM_DISSECT(tl, u_int32_t *, NFSX_UNSIGNED);
+			if (compare) {
+				if (!(*retcmpp)) {
+				    if (*tl != newnfs_true)
+					*retcmpp = NFSERR_NOTSAME;
+				}
+			} else if (pc != NULL) {
+				pc->pc_notrunc = fxdr_unsigned(u_int32_t, *tl);
+			}
+			attrsum += NFSX_UNSIGNED;
+			break;
+		case NFSATTRBIT_NUMLINKS:
+			NFSM_DISSECT(tl, u_int32_t *, NFSX_UNSIGNED);
+			tuint = fxdr_unsigned(u_int32_t, *tl);
+			if (compare) {
+			    if (!(*retcmpp)) {
+				if ((u_int32_t)nap->na_nlink != tuint)
+					*retcmpp = NFSERR_NOTSAME;
+			    }
+			} else if (nap != NULL) {
+				nap->na_nlink = tuint;
+			}
+			attrsum += NFSX_UNSIGNED;
+			break;
+		case NFSATTRBIT_OWNER:
+			NFSM_DISSECT(tl, u_int32_t *, NFSX_UNSIGNED);
+			j = fxdr_unsigned(int, *tl);
+			if (j < 0) {
+				error = NFSERR_BADXDR;
+				goto nfsmout;
+			}
+			attrsum += (NFSX_UNSIGNED + NFSM_RNDUP(j));
+			if (j > NFSV4_SMALLSTR)
+				cp = malloc(j + 1, M_NFSSTRING, M_WAITOK);
+			else
+				cp = namestr;
+			error = nfsrv_mtostr(nd, cp, j);
+			if (error) {
+				if (j > NFSV4_SMALLSTR)
+					free(cp, M_NFSSTRING);
+				goto nfsmout;
+			}
+			if (compare) {
+			    if (!(*retcmpp)) {
+				if (nfsv4_strtouid(nd, cp, j, &uid, p) ||
+				    nap->na_uid != uid)
+				    *retcmpp = NFSERR_NOTSAME;
+			    }
+			} else if (nap != NULL) {
+				if (nfsv4_strtouid(nd, cp, j, &uid, p))
+					nap->na_uid = nfsrv_defaultuid;
+				else
+					nap->na_uid = uid;
+			}
+			if (j > NFSV4_SMALLSTR)
+				free(cp, M_NFSSTRING);
+			break;
+		case NFSATTRBIT_OWNERGROUP:
+			NFSM_DISSECT(tl, u_int32_t *, NFSX_UNSIGNED);
+			j = fxdr_unsigned(int, *tl);
+			if (j < 0) {
+				error =  NFSERR_BADXDR;
+				goto nfsmout;
+			}
+			attrsum += (NFSX_UNSIGNED + NFSM_RNDUP(j));
+			if (j > NFSV4_SMALLSTR)
+				cp = malloc(j + 1, M_NFSSTRING, M_WAITOK);
+			else
+				cp = namestr;
+			error = nfsrv_mtostr(nd, cp, j);
+			if (error) {
+				if (j > NFSV4_SMALLSTR)
+					free(cp, M_NFSSTRING);
+				goto nfsmout;
+			}
+			if (compare) {
+			    if (!(*retcmpp)) {
+				if (nfsv4_strtogid(nd, cp, j, &gid, p) ||
+				    nap->na_gid != gid)
+				    *retcmpp = NFSERR_NOTSAME;
+			    }
+			} else if (nap != NULL) {
+				if (nfsv4_strtogid(nd, cp, j, &gid, p))
+					nap->na_gid = nfsrv_defaultgid;
+				else
+					nap->na_gid = gid;
+			}
+			if (j > NFSV4_SMALLSTR)
+				free(cp, M_NFSSTRING);
+			break;
+		case NFSATTRBIT_QUOTAHARD:
+			NFSM_DISSECT(tl, u_int32_t *, NFSX_HYPER);
+			if (sbp != NULL) {
+			    if (priv_check_cred(cred, PRIV_VFS_EXCEEDQUOTA, 0))
+				freenum = sbp->f_bfree;
+			    else
+				freenum = sbp->f_bavail;
+#ifdef QUOTA
+			    /*
+			     * ufs_quotactl() insists that the uid argument
+			     * equal p_ruid for non-root quota access, so
+			     * we'll just make sure that's the case.
+			     */
+			    savuid = p->p_cred->p_ruid;
+			    p->p_cred->p_ruid = cred->cr_uid;
+			    if (!VFS_QUOTACTL(vnode_mount(vp),QCMD(Q_GETQUOTA,
+				USRQUOTA), cred->cr_uid, (caddr_t)&dqb))
+				freenum = min(dqb.dqb_bhardlimit, freenum);
+			    p->p_cred->p_ruid = savuid;
+#endif	/* QUOTA */
+			    uquad = (u_int64_t)freenum;
+			    NFSQUOTABLKTOBYTE(uquad, sbp->f_bsize);
+			}
+			if (compare && !(*retcmpp)) {
+				if (uquad != fxdr_hyper(tl))
+					*retcmpp = NFSERR_NOTSAME;
+			}
+			attrsum += NFSX_HYPER;
+			break;
+		case NFSATTRBIT_QUOTASOFT:
+			NFSM_DISSECT(tl, u_int32_t *, NFSX_HYPER);
+			if (sbp != NULL) {
+			    if (priv_check_cred(cred, PRIV_VFS_EXCEEDQUOTA, 0))
+				freenum = sbp->f_bfree;
+			    else
+				freenum = sbp->f_bavail;
+#ifdef QUOTA
+			    /*
+			     * ufs_quotactl() insists that the uid argument
+			     * equal p_ruid for non-root quota access, so
+			     * we'll just make sure that's the case.
+			     */
+			    savuid = p->p_cred->p_ruid;
+			    p->p_cred->p_ruid = cred->cr_uid;
+			    if (!VFS_QUOTACTL(vnode_mount(vp),QCMD(Q_GETQUOTA,
+				USRQUOTA), cred->cr_uid, (caddr_t)&dqb))
+				freenum = min(dqb.dqb_bsoftlimit, freenum);
+			    p->p_cred->p_ruid = savuid;
+#endif	/* QUOTA */
+			    uquad = (u_int64_t)freenum;
+			    NFSQUOTABLKTOBYTE(uquad, sbp->f_bsize);
+			}
+			if (compare && !(*retcmpp)) {
+				if (uquad != fxdr_hyper(tl))
+					*retcmpp = NFSERR_NOTSAME;
+			}
+			attrsum += NFSX_HYPER;
+			break;
+		case NFSATTRBIT_QUOTAUSED:
+			NFSM_DISSECT(tl, u_int32_t *, NFSX_HYPER);
+			if (sbp != NULL) {
+			    freenum = 0;
+#ifdef QUOTA
+			    /*
+			     * ufs_quotactl() insists that the uid argument
+			     * equal p_ruid for non-root quota access, so
+			     * we'll just make sure that's the case.
+			     */
+			    savuid = p->p_cred->p_ruid;
+			    p->p_cred->p_ruid = cred->cr_uid;
+			    if (!VFS_QUOTACTL(vnode_mount(vp),QCMD(Q_GETQUOTA,
+				USRQUOTA), cred->cr_uid, (caddr_t)&dqb))
+				freenum = dqb.dqb_curblocks;
+			    p->p_cred->p_ruid = savuid;
+#endif	/* QUOTA */
+			    uquad = (u_int64_t)freenum;
+			    NFSQUOTABLKTOBYTE(uquad, sbp->f_bsize);
+			}
+			if (compare && !(*retcmpp)) {
+				if (uquad != fxdr_hyper(tl))
+					*retcmpp = NFSERR_NOTSAME;
+			}
+			attrsum += NFSX_HYPER;
+			break;
+		case NFSATTRBIT_RAWDEV:
+			NFSM_DISSECT(tl, u_int32_t *, NFSX_V4SPECDATA);
+			j = fxdr_unsigned(int, *tl++);
+			k = fxdr_unsigned(int, *tl);
+			if (compare) {
+			    if (!(*retcmpp)) {
+				if (nap->na_rdev != NFSMAKEDEV(j, k))
+					*retcmpp = NFSERR_NOTSAME;
+			    }
+			} else if (nap != NULL) {
+				nap->na_rdev = NFSMAKEDEV(j, k);
+			}
+			attrsum += NFSX_V4SPECDATA;
+			break;
+		case NFSATTRBIT_SPACEAVAIL:
+			NFSM_DISSECT(tl, u_int32_t *, NFSX_HYPER);
+			if (compare) {
+				if (!(*retcmpp) &&
+				    sfp->sf_abytes != fxdr_hyper(tl))
+					*retcmpp = NFSERR_NOTSAME;
+			} else if (sfp != NULL) {
+				sfp->sf_abytes = fxdr_hyper(tl);
+			}
+			attrsum += NFSX_HYPER;
+			break;
+		case NFSATTRBIT_SPACEFREE:
+			NFSM_DISSECT(tl, u_int32_t *, NFSX_HYPER);
+			if (compare) {
+				if (!(*retcmpp) &&
+				    sfp->sf_fbytes != fxdr_hyper(tl))
+					*retcmpp = NFSERR_NOTSAME;
+			} else if (sfp != NULL) {
+				sfp->sf_fbytes = fxdr_hyper(tl);
+			}
+			attrsum += NFSX_HYPER;
+			break;
+		case NFSATTRBIT_SPACETOTAL:
+			NFSM_DISSECT(tl, u_int32_t *, NFSX_HYPER);
+			if (compare) {
+				if (!(*retcmpp) &&
+				    sfp->sf_tbytes != fxdr_hyper(tl))
+					*retcmpp = NFSERR_NOTSAME;
+			} else if (sfp != NULL) {
+				sfp->sf_tbytes = fxdr_hyper(tl);
+			}
+			attrsum += NFSX_HYPER;
+			break;
+		case NFSATTRBIT_SPACEUSED:
+			NFSM_DISSECT(tl, u_int32_t *, NFSX_HYPER);
+			thyp = fxdr_hyper(tl);
+			if (compare) {
+			    if (!(*retcmpp)) {
+				if ((u_int64_t)nap->na_bytes != thyp)
+					*retcmpp = NFSERR_NOTSAME;
+			    }
+			} else if (nap != NULL) {
+				nap->na_bytes = thyp;
+			}
+			attrsum += NFSX_HYPER;
+			break;
+		case NFSATTRBIT_SYSTEM:
+			NFSM_DISSECT(tl, u_int32_t *, NFSX_UNSIGNED);
+			if (compare && !(*retcmpp))
+				*retcmpp = NFSERR_ATTRNOTSUPP;
+			attrsum += NFSX_UNSIGNED;
+			break;
+		case NFSATTRBIT_TIMEACCESS:
+			NFSM_DISSECT(tl, u_int32_t *, NFSX_V4TIME);
+			fxdr_nfsv4time(tl, &temptime);
+			if (compare) {
+			    if (!(*retcmpp)) {
+				if (!NFS_CMPTIME(temptime, nap->na_atime))
+					*retcmpp = NFSERR_NOTSAME;
+			    }
+			} else if (nap != NULL) {
+				nap->na_atime = temptime;
+			}
+			attrsum += NFSX_V4TIME;
+			break;
+		case NFSATTRBIT_TIMEACCESSSET:
+			NFSM_DISSECT(tl, u_int32_t *, NFSX_UNSIGNED);
+			attrsum += NFSX_UNSIGNED;
+			i = fxdr_unsigned(int, *tl);
+			if (i == NFSV4SATTRTIME_TOCLIENT) {
+				NFSM_DISSECT(tl, u_int32_t *, NFSX_V4TIME);
+				attrsum += NFSX_V4TIME;
+			}
+			if (compare && !(*retcmpp))
+				*retcmpp = NFSERR_INVAL;
+			break;
+		case NFSATTRBIT_TIMEBACKUP:
+			NFSM_DISSECT(tl, u_int32_t *, NFSX_V4TIME);
+			if (compare && !(*retcmpp))
+				*retcmpp = NFSERR_ATTRNOTSUPP;
+			attrsum += NFSX_V4TIME;
+			break;
+		case NFSATTRBIT_TIMECREATE:
+			NFSM_DISSECT(tl, u_int32_t *, NFSX_V4TIME);
+			if (compare && !(*retcmpp))
+				*retcmpp = NFSERR_ATTRNOTSUPP;
+			attrsum += NFSX_V4TIME;
+			break;
+		case NFSATTRBIT_TIMEDELTA:
+			NFSM_DISSECT(tl, u_int32_t *, NFSX_V4TIME);
+			if (fsp != NULL) {
+			    if (compare) {
+				if (!(*retcmpp)) {
+				    if ((u_int32_t)fsp->fs_timedelta.tv_sec !=
+					fxdr_unsigned(u_int32_t, *(tl + 1)) ||
+				        (u_int32_t)fsp->fs_timedelta.tv_nsec !=
+					(fxdr_unsigned(u_int32_t, *(tl + 2)) %
+					 1000000000) ||
+					*tl != 0)
+					    *retcmpp = NFSERR_NOTSAME;
+				}
+			    } else {
+				fxdr_nfsv4time(tl, &fsp->fs_timedelta);
+			    }
+			}
+			attrsum += NFSX_V4TIME;
+			break;
+		case NFSATTRBIT_TIMEMETADATA:
+			NFSM_DISSECT(tl, u_int32_t *, NFSX_V4TIME);
+			fxdr_nfsv4time(tl, &temptime);
+			if (compare) {
+			    if (!(*retcmpp)) {
+				if (!NFS_CMPTIME(temptime, nap->na_ctime))
+					*retcmpp = NFSERR_NOTSAME;
+			    }
+			} else if (nap != NULL) {
+				nap->na_ctime = temptime;
+			}
+			attrsum += NFSX_V4TIME;
+			break;
+		case NFSATTRBIT_TIMEMODIFY:
+			NFSM_DISSECT(tl, u_int32_t *, NFSX_V4TIME);
+			fxdr_nfsv4time(tl, &temptime);
+			if (compare) {
+			    if (!(*retcmpp)) {
+				if (!NFS_CMPTIME(temptime, nap->na_mtime))
+					*retcmpp = NFSERR_NOTSAME;
+			    }
+			} else if (nap != NULL) {
+				nap->na_mtime = temptime;
+			}
+			attrsum += NFSX_V4TIME;
+			break;
+		case NFSATTRBIT_TIMEMODIFYSET:
+			NFSM_DISSECT(tl, u_int32_t *, NFSX_UNSIGNED);
+			attrsum += NFSX_UNSIGNED;
+			i = fxdr_unsigned(int, *tl);
+			if (i == NFSV4SATTRTIME_TOCLIENT) {
+				NFSM_DISSECT(tl, u_int32_t *, NFSX_V4TIME);
+				attrsum += NFSX_V4TIME;
+			}
+			if (compare && !(*retcmpp))
+				*retcmpp = NFSERR_INVAL;
+			break;
+		case NFSATTRBIT_MOUNTEDONFILEID:
+			NFSM_DISSECT(tl, u_int32_t *, NFSX_HYPER);
+			thyp = fxdr_hyper(tl);
+			if (compare) {
+				if (!(*retcmpp)) {
+					if (!vp || !nfsrv_atroot(vp, &thyp2))
+						thyp2 = nap->na_fileid;
+					if (thyp2 != thyp)
+						*retcmpp = NFSERR_NOTSAME;
+				}
+			} else if (nap != NULL)
+				nap->na_mntonfileno = thyp;
+			attrsum += NFSX_HYPER;
+			break;
+		case NFSATTRBIT_SUPPATTREXCLCREAT:
+			retnotsup = 0;
+			error = nfsrv_getattrbits(nd, &retattrbits,
+			    &cnt, &retnotsup);
+			if (error)
+			    goto nfsmout;
+			if (compare && !(*retcmpp)) {
+			   NFSSETSUPP_ATTRBIT(&checkattrbits, nd);
+			   NFSCLRNOTSETABLE_ATTRBIT(&checkattrbits, nd);
+			   NFSCLRBIT_ATTRBIT(&checkattrbits,
+				NFSATTRBIT_TIMEACCESSSET);
+			   if (!NFSEQUAL_ATTRBIT(&retattrbits, &checkattrbits)
+			       || retnotsup)
+				*retcmpp = NFSERR_NOTSAME;
+			}
+			attrsum += cnt;
+			break;
+		case NFSATTRBIT_FSLAYOUTTYPE:
+		case NFSATTRBIT_LAYOUTTYPE:
+			NFSM_DISSECT(tl, u_int32_t *, NFSX_UNSIGNED);
+			attrsum += NFSX_UNSIGNED;
+			i = fxdr_unsigned(int, *tl);
+			if (i > 0) {
+				NFSM_DISSECT(tl, u_int32_t *, i *
+				    NFSX_UNSIGNED);
+				attrsum += i * NFSX_UNSIGNED;
+				j = fxdr_unsigned(int, *tl);
+				if (i == 1 && compare && !(*retcmpp) &&
+				    (((nfsrv_doflexfile != 0 ||
+				       nfsrv_maxpnfsmirror > 1) &&
+				      j != NFSLAYOUT_FLEXFILE) ||
+				    (nfsrv_doflexfile == 0 &&
+				     j != NFSLAYOUT_NFSV4_1_FILES)))
+					*retcmpp = NFSERR_NOTSAME;
+			}
+			if (nfsrv_devidcnt == 0) {
+				if (compare && !(*retcmpp) && i > 0)
+					*retcmpp = NFSERR_NOTSAME;
+			} else {
+				if (compare && !(*retcmpp) && i != 1)
+					*retcmpp = NFSERR_NOTSAME;
+			}
+			break;
+		case NFSATTRBIT_LAYOUTALIGNMENT:
+		case NFSATTRBIT_LAYOUTBLKSIZE:
+			NFSM_DISSECT(tl, u_int32_t *, NFSX_UNSIGNED);
+			attrsum += NFSX_UNSIGNED;
+			i = fxdr_unsigned(int, *tl);
+			if (compare && !(*retcmpp) && i != NFS_SRVMAXIO)
+				*retcmpp = NFSERR_NOTSAME;
+			break;
+		default:
+			printf("EEK! nfsv4_loadattr unknown attr=%d\n",
+				bitpos);
+			if (compare && !(*retcmpp))
+				*retcmpp = NFSERR_ATTRNOTSUPP;
+			/*
+			 * and get out of the loop, since we can't parse
+			 * the unknown attrbute data.
+			 */
+			bitpos = NFSATTRBIT_MAX;
+			break;
+		}
+	}
+
+	/*
+	 * some clients pad the attrlist, so we need to skip over the
+	 * padding.
+	 */
+	if (attrsum > attrsize) {
+		error = NFSERR_BADXDR;
+	} else {
+		attrsize = NFSM_RNDUP(attrsize);
+		if (attrsum < attrsize)
+			error = nfsm_advance(nd, attrsize - attrsum, -1);
+	}
+nfsmout:
+	NFSEXITCODE2(error, nd);
+	return (error);
+}
+
+/*
+ * Implement sleep locks for newnfs. The nfslock_usecnt allows for a
+ * shared lock and the NFSXXX_LOCK flag permits an exclusive lock.
+ * The first argument is a pointer to an nfsv4lock structure.
+ * The second argument is 1 iff a blocking lock is wanted.
+ * If this argument is 0, the call waits until no thread either wants nor
+ * holds an exclusive lock.
+ * It returns 1 if the lock was acquired, 0 otherwise.
+ * If several processes call this function concurrently wanting the exclusive
+ * lock, one will get the lock and the rest will return without getting the
+ * lock. (If the caller must have the lock, it simply calls this function in a
+ *  loop until the function returns 1 to indicate the lock was acquired.)
+ * Any usecnt must be decremented by calling nfsv4_relref() before
+ * calling nfsv4_lock(). It was done this way, so nfsv4_lock() could
+ * be called in a loop.
+ * The isleptp argument is set to indicate if the call slept, iff not NULL
+ * and the mp argument indicates to check for a forced dismount, iff not
+ * NULL.
+ */
+APPLESTATIC int
+nfsv4_lock(struct nfsv4lock *lp, int iwantlock, int *isleptp,
+    void *mutex, struct mount *mp)
+{
+
+	if (isleptp)
+		*isleptp = 0;
+	/*
+	 * If a lock is wanted, loop around until the lock is acquired by
+	 * someone and then released. If I want the lock, try to acquire it.
+	 * For a lock to be issued, no lock must be in force and the usecnt
+	 * must be zero.
+	 */
+	if (iwantlock) {
+	    if (!(lp->nfslock_lock & NFSV4LOCK_LOCK) &&
+		lp->nfslock_usecnt == 0) {
+		lp->nfslock_lock &= ~NFSV4LOCK_LOCKWANTED;
+		lp->nfslock_lock |= NFSV4LOCK_LOCK;
+		return (1);
+	    }
+	    lp->nfslock_lock |= NFSV4LOCK_LOCKWANTED;
+	}
+	while (lp->nfslock_lock & (NFSV4LOCK_LOCK | NFSV4LOCK_LOCKWANTED)) {
+		if (mp != NULL && NFSCL_FORCEDISM(mp)) {
+			lp->nfslock_lock &= ~NFSV4LOCK_LOCKWANTED;
+			return (0);
+		}
+		lp->nfslock_lock |= NFSV4LOCK_WANTED;
+		if (isleptp)
+			*isleptp = 1;
+		(void) nfsmsleep(&lp->nfslock_lock, mutex,
+		    PZERO - 1, "nfsv4lck", NULL);
+		if (iwantlock && !(lp->nfslock_lock & NFSV4LOCK_LOCK) &&
+		    lp->nfslock_usecnt == 0) {
+			lp->nfslock_lock &= ~NFSV4LOCK_LOCKWANTED;
+			lp->nfslock_lock |= NFSV4LOCK_LOCK;
+			return (1);
+		}
+	}
+	return (0);
+}
+
+/*
+ * Release the lock acquired by nfsv4_lock().
+ * The second argument is set to 1 to indicate the nfslock_usecnt should be
+ * incremented, as well.
+ */
+APPLESTATIC void
+nfsv4_unlock(struct nfsv4lock *lp, int incref)
+{
+
+	lp->nfslock_lock &= ~NFSV4LOCK_LOCK;
+	if (incref)
+		lp->nfslock_usecnt++;
+	nfsv4_wanted(lp);
+}
+
+/*
+ * Release a reference cnt.
+ */
+APPLESTATIC void
+nfsv4_relref(struct nfsv4lock *lp)
+{
+
+	if (lp->nfslock_usecnt <= 0)
+		panic("nfsv4root ref cnt");
+	lp->nfslock_usecnt--;
+	if (lp->nfslock_usecnt == 0)
+		nfsv4_wanted(lp);
+}
+
+/*
+ * Get a reference cnt.
+ * This function will wait for any exclusive lock to be released, but will
+ * not wait for threads that want the exclusive lock. If priority needs
+ * to be given to threads that need the exclusive lock, a call to nfsv4_lock()
+ * with the 2nd argument == 0 should be done before calling nfsv4_getref().
+ * If the mp argument is not NULL, check for NFSCL_FORCEDISM() being set and
+ * return without getting a refcnt for that case.
+ */
+APPLESTATIC void
+nfsv4_getref(struct nfsv4lock *lp, int *isleptp, void *mutex,
+    struct mount *mp)
+{
+
+	if (isleptp)
+		*isleptp = 0;
+
+	/*
+	 * Wait for a lock held.
+	 */
+	while (lp->nfslock_lock & NFSV4LOCK_LOCK) {
+		if (mp != NULL && NFSCL_FORCEDISM(mp))
+			return;
+		lp->nfslock_lock |= NFSV4LOCK_WANTED;
+		if (isleptp)
+			*isleptp = 1;
+		(void) nfsmsleep(&lp->nfslock_lock, mutex,
+		    PZERO - 1, "nfsv4gr", NULL);
+	}
+	if (mp != NULL && NFSCL_FORCEDISM(mp))
+		return;
+
+	lp->nfslock_usecnt++;
+}
+
+/*
+ * Get a reference as above, but return failure instead of sleeping if
+ * an exclusive lock is held.
+ */
+APPLESTATIC int
+nfsv4_getref_nonblock(struct nfsv4lock *lp)
+{
+
+	if ((lp->nfslock_lock & NFSV4LOCK_LOCK) != 0)
+		return (0);
+
+	lp->nfslock_usecnt++;
+	return (1);
+}
+
+/*
+ * Test for a lock. Return 1 if locked, 0 otherwise.
+ */
+APPLESTATIC int
+nfsv4_testlock(struct nfsv4lock *lp)
+{
+
+	if ((lp->nfslock_lock & NFSV4LOCK_LOCK) == 0 &&
+	    lp->nfslock_usecnt == 0)
+		return (0);
+	return (1);
+}
+
+/*
+ * Wake up anyone sleeping, waiting for this lock.
+ */
+static void
+nfsv4_wanted(struct nfsv4lock *lp)
+{
+
+	if (lp->nfslock_lock & NFSV4LOCK_WANTED) {
+		lp->nfslock_lock &= ~NFSV4LOCK_WANTED;
+		wakeup((caddr_t)&lp->nfslock_lock);
+	}
+}
+
+/*
+ * Copy a string from an mbuf list into a character array.
+ * Return EBADRPC if there is an mbuf error,
+ * 0 otherwise.
+ */
+APPLESTATIC int
+nfsrv_mtostr(struct nfsrv_descript *nd, char *str, int siz)
+{
+	char *cp;
+	int xfer, len;
+	mbuf_t mp;
+	int rem, error = 0;
+
+	mp = nd->nd_md;
+	cp = nd->nd_dpos;
+	len = NFSMTOD(mp, caddr_t) + mbuf_len(mp) - cp;
+	rem = NFSM_RNDUP(siz) - siz;
+	while (siz > 0) {
+		if (len > siz)
+			xfer = siz;
+		else
+			xfer = len;
+		NFSBCOPY(cp, str, xfer);
+		str += xfer;
+		siz -= xfer;
+		if (siz > 0) {
+			mp = mbuf_next(mp);
+			if (mp == NULL) {
+				error = EBADRPC;
+				goto out;
+			}
+			cp = NFSMTOD(mp, caddr_t);
+			len = mbuf_len(mp);
+		} else {
+			cp += xfer;
+			len -= xfer;
+		}
+	}
+	*str = '\0';
+	nd->nd_dpos = cp;
+	nd->nd_md = mp;
+	if (rem > 0) {
+		if (len < rem)
+			error = nfsm_advance(nd, rem, len);
+		else
+			nd->nd_dpos += rem;
+	}
+
+out:
+	NFSEXITCODE2(error, nd);
+	return (error);
+}
+
+/*
+ * Fill in the attributes as marked by the bitmap (V4).
+ */
+APPLESTATIC int
+nfsv4_fillattr(struct nfsrv_descript *nd, struct mount *mp, vnode_t vp,
+    NFSACL_T *saclp, struct vattr *vap, fhandle_t *fhp, int rderror,
+    nfsattrbit_t *attrbitp, struct ucred *cred, NFSPROC_T *p, int isdgram,
+    int reterr, int supports_nfsv4acls, int at_root, uint64_t mounted_on_fileno,
+    struct statfs *pnfssf)
+{
+	int bitpos, retnum = 0;
+	u_int32_t *tl;
+	int siz, prefixnum, error;
+	u_char *cp, namestr[NFSV4_SMALLSTR];
+	nfsattrbit_t attrbits, retbits;
+	nfsattrbit_t *retbitp = &retbits;
+	u_int32_t freenum, *retnump;
+	u_int64_t uquad;
+	struct statfs *fs;
+	struct nfsfsinfo fsinf;
+	struct timespec temptime;
+	NFSACL_T *aclp, *naclp = NULL;
+#ifdef QUOTA
+	struct dqblk dqb;
+	uid_t savuid;
+#endif
+
+	/*
+	 * First, set the bits that can be filled and get fsinfo.
+	 */
+	NFSSET_ATTRBIT(retbitp, attrbitp);
+	/*
+	 * If both p and cred are NULL, it is a client side setattr call.
+	 * If both p and cred are not NULL, it is a server side reply call.
+	 * If p is not NULL and cred is NULL, it is a client side callback
+	 * reply call.
+	 */
+	if (p == NULL && cred == NULL) {
+		NFSCLRNOTSETABLE_ATTRBIT(retbitp, nd);
+		aclp = saclp;
+	} else {
+		NFSCLRNOTFILLABLE_ATTRBIT(retbitp, nd);
+		naclp = acl_alloc(M_WAITOK);
+		aclp = naclp;
+	}
+	nfsvno_getfs(&fsinf, isdgram);
+#ifndef APPLE
+	/*
+	 * Get the VFS_STATFS(), since some attributes need them.
+	 */
+	fs = malloc(sizeof(struct statfs), M_STATFS, M_WAITOK);
+	if (NFSISSETSTATFS_ATTRBIT(retbitp)) {
+		error = VFS_STATFS(mp, fs);
+		if (error != 0) {
+			if (reterr) {
+				nd->nd_repstat = NFSERR_ACCES;
+				free(fs, M_STATFS);
+				return (0);
+			}
+			NFSCLRSTATFS_ATTRBIT(retbitp);
+		}
+	}
+#endif
+
+	/*
+	 * And the NFSv4 ACL...
+	 */
+	if (NFSISSET_ATTRBIT(retbitp, NFSATTRBIT_ACLSUPPORT) &&
+	    (nfsrv_useacl == 0 || ((cred != NULL || p != NULL) &&
+		supports_nfsv4acls == 0))) {
+		NFSCLRBIT_ATTRBIT(retbitp, NFSATTRBIT_ACLSUPPORT);
+	}
+	if (NFSISSET_ATTRBIT(retbitp, NFSATTRBIT_ACL)) {
+		if (nfsrv_useacl == 0 || ((cred != NULL || p != NULL) &&
+		    supports_nfsv4acls == 0)) {
+			NFSCLRBIT_ATTRBIT(retbitp, NFSATTRBIT_ACL);
+		} else if (naclp != NULL) {
+			if (NFSVOPLOCK(vp, LK_SHARED) == 0) {
+				error = VOP_ACCESSX(vp, VREAD_ACL, cred, p);
+				if (error == 0)
+					error = VOP_GETACL(vp, ACL_TYPE_NFS4,
+					    naclp, cred, p);
+				NFSVOPUNLOCK(vp, 0);
+			} else
+				error = NFSERR_PERM;
+			if (error != 0) {
+				if (reterr) {
+					nd->nd_repstat = NFSERR_ACCES;
+					free(fs, M_STATFS);
+					return (0);
+				}
+				NFSCLRBIT_ATTRBIT(retbitp, NFSATTRBIT_ACL);
+			}
+		}
+	}
+
+	/*
+	 * Put out the attribute bitmap for the ones being filled in
+	 * and get the field for the number of attributes returned.
+	 */
+	prefixnum = nfsrv_putattrbit(nd, retbitp);
+	NFSM_BUILD(retnump, u_int32_t *, NFSX_UNSIGNED);
+	prefixnum += NFSX_UNSIGNED;
+
+	/*
+	 * Now, loop around filling in the attributes for each bit set.
+	 */
+	for (bitpos = 0; bitpos < NFSATTRBIT_MAX; bitpos++) {
+	    if (NFSISSET_ATTRBIT(retbitp, bitpos)) {
+		switch (bitpos) {
+		case NFSATTRBIT_SUPPORTEDATTRS:
+			NFSSETSUPP_ATTRBIT(&attrbits, nd);
+			if (nfsrv_useacl == 0 || ((cred != NULL || p != NULL)
+			    && supports_nfsv4acls == 0)) {
+			    NFSCLRBIT_ATTRBIT(&attrbits,NFSATTRBIT_ACLSUPPORT);
+			    NFSCLRBIT_ATTRBIT(&attrbits,NFSATTRBIT_ACL);
+			}
+			retnum += nfsrv_putattrbit(nd, &attrbits);
+			break;
+		case NFSATTRBIT_TYPE:
+			NFSM_BUILD(tl, u_int32_t *, NFSX_UNSIGNED);
+			*tl = vtonfsv34_type(vap->va_type);
+			retnum += NFSX_UNSIGNED;
+			break;
+		case NFSATTRBIT_FHEXPIRETYPE:
+			NFSM_BUILD(tl, u_int32_t *, NFSX_UNSIGNED);
+			*tl = txdr_unsigned(NFSV4FHTYPE_PERSISTENT);
+			retnum += NFSX_UNSIGNED;
+			break;
+		case NFSATTRBIT_CHANGE:
+			NFSM_BUILD(tl, u_int32_t *, NFSX_HYPER);
+			txdr_hyper(vap->va_filerev, tl);
+			retnum += NFSX_HYPER;
+			break;
+		case NFSATTRBIT_SIZE:
+			NFSM_BUILD(tl, u_int32_t *, NFSX_HYPER);
+			txdr_hyper(vap->va_size, tl);
+			retnum += NFSX_HYPER;
+			break;
+		case NFSATTRBIT_LINKSUPPORT:
+			NFSM_BUILD(tl, u_int32_t *, NFSX_UNSIGNED);
+			if (fsinf.fs_properties & NFSV3FSINFO_LINK)
+				*tl = newnfs_true;
+			else
+				*tl = newnfs_false;
+			retnum += NFSX_UNSIGNED;
+			break;
+		case NFSATTRBIT_SYMLINKSUPPORT:
+			NFSM_BUILD(tl, u_int32_t *, NFSX_UNSIGNED);
+			if (fsinf.fs_properties & NFSV3FSINFO_SYMLINK)
+				*tl = newnfs_true;
+			else
+				*tl = newnfs_false;
+			retnum += NFSX_UNSIGNED;
+			break;
+		case NFSATTRBIT_NAMEDATTR:
+			NFSM_BUILD(tl, u_int32_t *, NFSX_UNSIGNED);
+			*tl = newnfs_false;
+			retnum += NFSX_UNSIGNED;
+			break;
+		case NFSATTRBIT_FSID:
+			NFSM_BUILD(tl, u_int32_t *, NFSX_V4FSID);
+			*tl++ = 0;
+			*tl++ = txdr_unsigned(mp->mnt_stat.f_fsid.val[0]);
+			*tl++ = 0;
+			*tl = txdr_unsigned(mp->mnt_stat.f_fsid.val[1]);
+			retnum += NFSX_V4FSID;
+			break;
+		case NFSATTRBIT_UNIQUEHANDLES:
+			NFSM_BUILD(tl, u_int32_t *, NFSX_UNSIGNED);
+			*tl = newnfs_true;
+			retnum += NFSX_UNSIGNED;
+			break;
+		case NFSATTRBIT_LEASETIME:
+			NFSM_BUILD(tl, u_int32_t *, NFSX_UNSIGNED);
+			*tl = txdr_unsigned(nfsrv_lease);
+			retnum += NFSX_UNSIGNED;
+			break;
+		case NFSATTRBIT_RDATTRERROR:
+			NFSM_BUILD(tl, u_int32_t *, NFSX_UNSIGNED);
+			*tl = txdr_unsigned(rderror);
+			retnum += NFSX_UNSIGNED;
+			break;
+		/*
+		 * Recommended Attributes. (Only the supported ones.)
+		 */
+		case NFSATTRBIT_ACL:
+			retnum += nfsrv_buildacl(nd, aclp, vnode_vtype(vp), p);
+			break;
+		case NFSATTRBIT_ACLSUPPORT:
+			NFSM_BUILD(tl, u_int32_t *, NFSX_UNSIGNED);
+			*tl = txdr_unsigned(NFSV4ACE_SUPTYPES);
+			retnum += NFSX_UNSIGNED;
+			break;
+		case NFSATTRBIT_CANSETTIME:
+			NFSM_BUILD(tl, u_int32_t *, NFSX_UNSIGNED);
+			if (fsinf.fs_properties & NFSV3FSINFO_CANSETTIME)
+				*tl = newnfs_true;
+			else
+				*tl = newnfs_false;
+			retnum += NFSX_UNSIGNED;
+			break;
+		case NFSATTRBIT_CASEINSENSITIVE:
+			NFSM_BUILD(tl, u_int32_t *, NFSX_UNSIGNED);
+			*tl = newnfs_false;
+			retnum += NFSX_UNSIGNED;
+			break;
+		case NFSATTRBIT_CASEPRESERVING:
+			NFSM_BUILD(tl, u_int32_t *, NFSX_UNSIGNED);
+			*tl = newnfs_true;
+			retnum += NFSX_UNSIGNED;
+			break;
+		case NFSATTRBIT_CHOWNRESTRICTED:
+			NFSM_BUILD(tl, u_int32_t *, NFSX_UNSIGNED);
+			*tl = newnfs_true;
+			retnum += NFSX_UNSIGNED;
+			break;
+		case NFSATTRBIT_FILEHANDLE:
+			retnum += nfsm_fhtom(nd, (u_int8_t *)fhp, 0, 0);
+			break;
+		case NFSATTRBIT_FILEID:
+			NFSM_BUILD(tl, u_int32_t *, NFSX_HYPER);
+			uquad = vap->va_fileid;
+			txdr_hyper(uquad, tl);
+			retnum += NFSX_HYPER;
+			break;
+		case NFSATTRBIT_FILESAVAIL:
+			/*
+			 * Check quota and use min(quota, f_ffree).
+			 */
+			freenum = fs->f_ffree;
+#ifdef QUOTA
+			/*
+			 * ufs_quotactl() insists that the uid argument
+			 * equal p_ruid for non-root quota access, so
+			 * we'll just make sure that's the case.
+			 */
+			savuid = p->p_cred->p_ruid;
+			p->p_cred->p_ruid = cred->cr_uid;
+			if (!VFS_QUOTACTL(mp, QCMD(Q_GETQUOTA,USRQUOTA),
+			    cred->cr_uid, (caddr_t)&dqb))
+			    freenum = min(dqb.dqb_isoftlimit-dqb.dqb_curinodes,
+				freenum);
+			p->p_cred->p_ruid = savuid;
+#endif	/* QUOTA */
+			NFSM_BUILD(tl, u_int32_t *, NFSX_HYPER);
+			*tl++ = 0;
+			*tl = txdr_unsigned(freenum);
+			retnum += NFSX_HYPER;
+			break;
+		case NFSATTRBIT_FILESFREE:
+			NFSM_BUILD(tl, u_int32_t *, NFSX_HYPER);
+			*tl++ = 0;
+			*tl = txdr_unsigned(fs->f_ffree);
+			retnum += NFSX_HYPER;
+			break;
+		case NFSATTRBIT_FILESTOTAL:
+			NFSM_BUILD(tl, u_int32_t *, NFSX_HYPER);
+			*tl++ = 0;
+			*tl = txdr_unsigned(fs->f_files);
+			retnum += NFSX_HYPER;
+			break;
+		case NFSATTRBIT_FSLOCATIONS:
+			NFSM_BUILD(tl, u_int32_t *, 2 * NFSX_UNSIGNED);
+			*tl++ = 0;
+			*tl = 0;
+			retnum += 2 * NFSX_UNSIGNED;
+			break;
+		case NFSATTRBIT_HOMOGENEOUS:
+			NFSM_BUILD(tl, u_int32_t *, NFSX_UNSIGNED);
+			if (fsinf.fs_properties & NFSV3FSINFO_HOMOGENEOUS)
+				*tl = newnfs_true;
+			else
+				*tl = newnfs_false;
+			retnum += NFSX_UNSIGNED;
+			break;
+		case NFSATTRBIT_MAXFILESIZE:
+			NFSM_BUILD(tl, u_int32_t *, NFSX_HYPER);
+			uquad = NFSRV_MAXFILESIZE;
+			txdr_hyper(uquad, tl);
+			retnum += NFSX_HYPER;
+			break;
+		case NFSATTRBIT_MAXLINK:
+			NFSM_BUILD(tl, u_int32_t *, NFSX_UNSIGNED);
+			*tl = txdr_unsigned(NFS_LINK_MAX);
+			retnum += NFSX_UNSIGNED;
+			break;
+		case NFSATTRBIT_MAXNAME:
+			NFSM_BUILD(tl, u_int32_t *, NFSX_UNSIGNED);
+			*tl = txdr_unsigned(NFS_MAXNAMLEN);
+			retnum += NFSX_UNSIGNED;
+			break;
+		case NFSATTRBIT_MAXREAD:
+			NFSM_BUILD(tl, u_int32_t *, NFSX_HYPER);
+			*tl++ = 0;
+			*tl = txdr_unsigned(fsinf.fs_rtmax);
+			retnum += NFSX_HYPER;
+			break;
+		case NFSATTRBIT_MAXWRITE:
+			NFSM_BUILD(tl, u_int32_t *, NFSX_HYPER);
+			*tl++ = 0;
+			*tl = txdr_unsigned(fsinf.fs_wtmax);
+			retnum += NFSX_HYPER;
+			break;
+		case NFSATTRBIT_MODE:
+			NFSM_BUILD(tl, u_int32_t *, NFSX_UNSIGNED);
+			*tl = vtonfsv34_mode(vap->va_mode);
+			retnum += NFSX_UNSIGNED;
+			break;
+		case NFSATTRBIT_NOTRUNC:
+			NFSM_BUILD(tl, u_int32_t *, NFSX_UNSIGNED);
+			*tl = newnfs_true;
+			retnum += NFSX_UNSIGNED;
+			break;
+		case NFSATTRBIT_NUMLINKS:
+			NFSM_BUILD(tl, u_int32_t *, NFSX_UNSIGNED);
+			*tl = txdr_unsigned(vap->va_nlink);
+			retnum += NFSX_UNSIGNED;
+			break;
+		case NFSATTRBIT_OWNER:
+			cp = namestr;
+			nfsv4_uidtostr(vap->va_uid, &cp, &siz, p);
+			retnum += nfsm_strtom(nd, cp, siz);
+			if (cp != namestr)
+				free(cp, M_NFSSTRING);
+			break;
+		case NFSATTRBIT_OWNERGROUP:
+			cp = namestr;
+			nfsv4_gidtostr(vap->va_gid, &cp, &siz, p);
+			retnum += nfsm_strtom(nd, cp, siz);
+			if (cp != namestr)
+				free(cp, M_NFSSTRING);
+			break;
+		case NFSATTRBIT_QUOTAHARD:
+			if (priv_check_cred(cred, PRIV_VFS_EXCEEDQUOTA, 0))
+				freenum = fs->f_bfree;
+			else
+				freenum = fs->f_bavail;
+#ifdef QUOTA
+			/*
+			 * ufs_quotactl() insists that the uid argument
+			 * equal p_ruid for non-root quota access, so
+			 * we'll just make sure that's the case.
+			 */
+			savuid = p->p_cred->p_ruid;
+			p->p_cred->p_ruid = cred->cr_uid;
+			if (!VFS_QUOTACTL(mp, QCMD(Q_GETQUOTA,USRQUOTA),
+			    cred->cr_uid, (caddr_t)&dqb))
+			    freenum = min(dqb.dqb_bhardlimit, freenum);
+			p->p_cred->p_ruid = savuid;
+#endif	/* QUOTA */
+			NFSM_BUILD(tl, u_int32_t *, NFSX_HYPER);
+			uquad = (u_int64_t)freenum;
+			NFSQUOTABLKTOBYTE(uquad, fs->f_bsize);
+			txdr_hyper(uquad, tl);
+			retnum += NFSX_HYPER;
+			break;
+		case NFSATTRBIT_QUOTASOFT:
+			if (priv_check_cred(cred, PRIV_VFS_EXCEEDQUOTA, 0))
+				freenum = fs->f_bfree;
+			else
+				freenum = fs->f_bavail;
+#ifdef QUOTA
+			/*
+			 * ufs_quotactl() insists that the uid argument
+			 * equal p_ruid for non-root quota access, so
+			 * we'll just make sure that's the case.
+			 */
+			savuid = p->p_cred->p_ruid;
+			p->p_cred->p_ruid = cred->cr_uid;
+			if (!VFS_QUOTACTL(mp, QCMD(Q_GETQUOTA,USRQUOTA),
+			    cred->cr_uid, (caddr_t)&dqb))
+			    freenum = min(dqb.dqb_bsoftlimit, freenum);
+			p->p_cred->p_ruid = savuid;
+#endif	/* QUOTA */
+			NFSM_BUILD(tl, u_int32_t *, NFSX_HYPER);
+			uquad = (u_int64_t)freenum;
+			NFSQUOTABLKTOBYTE(uquad, fs->f_bsize);
+			txdr_hyper(uquad, tl);
+			retnum += NFSX_HYPER;
+			break;
+		case NFSATTRBIT_QUOTAUSED:
+			freenum = 0;
+#ifdef QUOTA
+			/*
+			 * ufs_quotactl() insists that the uid argument
+			 * equal p_ruid for non-root quota access, so
+			 * we'll just make sure that's the case.
+			 */
+			savuid = p->p_cred->p_ruid;
+			p->p_cred->p_ruid = cred->cr_uid;
+			if (!VFS_QUOTACTL(mp, QCMD(Q_GETQUOTA,USRQUOTA),
+			    cred->cr_uid, (caddr_t)&dqb))
+			    freenum = dqb.dqb_curblocks;
+			p->p_cred->p_ruid = savuid;
+#endif	/* QUOTA */
+			NFSM_BUILD(tl, u_int32_t *, NFSX_HYPER);
+			uquad = (u_int64_t)freenum;
+			NFSQUOTABLKTOBYTE(uquad, fs->f_bsize);
+			txdr_hyper(uquad, tl);
+			retnum += NFSX_HYPER;
+			break;
+		case NFSATTRBIT_RAWDEV:
+			NFSM_BUILD(tl, u_int32_t *, NFSX_V4SPECDATA);
+			*tl++ = txdr_unsigned(NFSMAJOR(vap->va_rdev));
+			*tl = txdr_unsigned(NFSMINOR(vap->va_rdev));
+			retnum += NFSX_V4SPECDATA;
+			break;
+		case NFSATTRBIT_SPACEAVAIL:
+			NFSM_BUILD(tl, u_int32_t *, NFSX_HYPER);
+			if (priv_check_cred(cred, PRIV_VFS_BLOCKRESERVE, 0)) {
+				if (pnfssf != NULL)
+					uquad = (u_int64_t)pnfssf->f_bfree;
+				else
+					uquad = (u_int64_t)fs->f_bfree;
+			} else {
+				if (pnfssf != NULL)
+					uquad = (u_int64_t)pnfssf->f_bavail;
+				else
+					uquad = (u_int64_t)fs->f_bavail;
+			}
+			if (pnfssf != NULL)
+				uquad *= pnfssf->f_bsize;
+			else
+				uquad *= fs->f_bsize;
+			txdr_hyper(uquad, tl);
+			retnum += NFSX_HYPER;
+			break;
+		case NFSATTRBIT_SPACEFREE:
+			NFSM_BUILD(tl, u_int32_t *, NFSX_HYPER);
+			if (pnfssf != NULL) {
+				uquad = (u_int64_t)pnfssf->f_bfree;
+				uquad *= pnfssf->f_bsize;
+			} else {
+				uquad = (u_int64_t)fs->f_bfree;
+				uquad *= fs->f_bsize;
+			}
+			txdr_hyper(uquad, tl);
+			retnum += NFSX_HYPER;
+			break;
+		case NFSATTRBIT_SPACETOTAL:
+			NFSM_BUILD(tl, u_int32_t *, NFSX_HYPER);
+			if (pnfssf != NULL) {
+				uquad = (u_int64_t)pnfssf->f_blocks;
+				uquad *= pnfssf->f_bsize;
+			} else {
+				uquad = (u_int64_t)fs->f_blocks;
+				uquad *= fs->f_bsize;
+			}
+			txdr_hyper(uquad, tl);
+			retnum += NFSX_HYPER;
+			break;
+		case NFSATTRBIT_SPACEUSED:
+			NFSM_BUILD(tl, u_int32_t *, NFSX_HYPER);
+			txdr_hyper(vap->va_bytes, tl);
+			retnum += NFSX_HYPER;
+			break;
+		case NFSATTRBIT_TIMEACCESS:
+			NFSM_BUILD(tl, u_int32_t *, NFSX_V4TIME);
+			txdr_nfsv4time(&vap->va_atime, tl);
+			retnum += NFSX_V4TIME;
+			break;
+		case NFSATTRBIT_TIMEACCESSSET:
+			if ((vap->va_vaflags & VA_UTIMES_NULL) == 0) {
+				NFSM_BUILD(tl, u_int32_t *, NFSX_V4SETTIME);
+				*tl++ = txdr_unsigned(NFSV4SATTRTIME_TOCLIENT);
+				txdr_nfsv4time(&vap->va_atime, tl);
+				retnum += NFSX_V4SETTIME;
+			} else {
+				NFSM_BUILD(tl, u_int32_t *, NFSX_UNSIGNED);
+				*tl = txdr_unsigned(NFSV4SATTRTIME_TOSERVER);
+				retnum += NFSX_UNSIGNED;
+			}
+			break;
+		case NFSATTRBIT_TIMEDELTA:
+			NFSM_BUILD(tl, u_int32_t *, NFSX_V4TIME);
+			temptime.tv_sec = 0;
+			temptime.tv_nsec = 1000000000 / hz;
+			txdr_nfsv4time(&temptime, tl);
+			retnum += NFSX_V4TIME;
+			break;
+		case NFSATTRBIT_TIMEMETADATA:
+			NFSM_BUILD(tl, u_int32_t *, NFSX_V4TIME);
+			txdr_nfsv4time(&vap->va_ctime, tl);
+			retnum += NFSX_V4TIME;
+			break;
+		case NFSATTRBIT_TIMEMODIFY:
+			NFSM_BUILD(tl, u_int32_t *, NFSX_V4TIME);
+			txdr_nfsv4time(&vap->va_mtime, tl);
+			retnum += NFSX_V4TIME;
+			break;
+		case NFSATTRBIT_TIMEMODIFYSET:
+			if ((vap->va_vaflags & VA_UTIMES_NULL) == 0) {
+				NFSM_BUILD(tl, u_int32_t *, NFSX_V4SETTIME);
+				*tl++ = txdr_unsigned(NFSV4SATTRTIME_TOCLIENT);
+				txdr_nfsv4time(&vap->va_mtime, tl);
+				retnum += NFSX_V4SETTIME;
+			} else {
+				NFSM_BUILD(tl, u_int32_t *, NFSX_UNSIGNED);
+				*tl = txdr_unsigned(NFSV4SATTRTIME_TOSERVER);
+				retnum += NFSX_UNSIGNED;
+			}
+			break;
+		case NFSATTRBIT_MOUNTEDONFILEID:
+			NFSM_BUILD(tl, u_int32_t *, NFSX_HYPER);
+			if (at_root != 0)
+				uquad = mounted_on_fileno;
+			else
+				uquad = vap->va_fileid;
+			txdr_hyper(uquad, tl);
+			retnum += NFSX_HYPER;
+			break;
+		case NFSATTRBIT_SUPPATTREXCLCREAT:
+			NFSSETSUPP_ATTRBIT(&attrbits, nd);
+			NFSCLRNOTSETABLE_ATTRBIT(&attrbits, nd);
+			NFSCLRBIT_ATTRBIT(&attrbits, NFSATTRBIT_TIMEACCESSSET);
+			retnum += nfsrv_putattrbit(nd, &attrbits);
+			break;
+		case NFSATTRBIT_FSLAYOUTTYPE:
+		case NFSATTRBIT_LAYOUTTYPE:
+			if (nfsrv_devidcnt == 0)
+				siz = 1;
+			else
+				siz = 2;
+			if (siz == 2) {
+				NFSM_BUILD(tl, u_int32_t *, 2 * NFSX_UNSIGNED);
+				*tl++ = txdr_unsigned(1);	/* One entry. */
+				if (nfsrv_doflexfile != 0 ||
+				    nfsrv_maxpnfsmirror > 1)
+					*tl = txdr_unsigned(NFSLAYOUT_FLEXFILE);
+				else
+					*tl = txdr_unsigned(
+					    NFSLAYOUT_NFSV4_1_FILES);
+			} else {
+				NFSM_BUILD(tl, u_int32_t *, NFSX_UNSIGNED);
+				*tl = 0;
+			}
+			retnum += siz * NFSX_UNSIGNED;
+			break;
+		case NFSATTRBIT_LAYOUTALIGNMENT:
+		case NFSATTRBIT_LAYOUTBLKSIZE:
+			NFSM_BUILD(tl, u_int32_t *, NFSX_UNSIGNED);
+			*tl = txdr_unsigned(NFS_SRVMAXIO);
+			retnum += NFSX_UNSIGNED;
+			break;
+		default:
+			printf("EEK! Bad V4 attribute bitpos=%d\n", bitpos);
+		}
+	    }
+	}
+	if (naclp != NULL)
+		acl_free(naclp);
+	free(fs, M_STATFS);
+	*retnump = txdr_unsigned(retnum);
+	return (retnum + prefixnum);
+}
+
+/*
+ * Put the attribute bits onto an mbuf list.
+ * Return the number of bytes of output generated.
+ */
+APPLESTATIC int
+nfsrv_putattrbit(struct nfsrv_descript *nd, nfsattrbit_t *attrbitp)
+{
+	u_int32_t *tl;
+	int cnt, i, bytesize;
+
+	for (cnt = NFSATTRBIT_MAXWORDS; cnt > 0; cnt--)
+		if (attrbitp->bits[cnt - 1])
+			break;
+	bytesize = (cnt + 1) * NFSX_UNSIGNED;
+	NFSM_BUILD(tl, u_int32_t *, bytesize);
+	*tl++ = txdr_unsigned(cnt);
+	for (i = 0; i < cnt; i++)
+		*tl++ = txdr_unsigned(attrbitp->bits[i]);
+	return (bytesize);
+}
+
+/*
+ * Convert a uid to a string.
+ * If the lookup fails, just output the digits.
+ * uid - the user id
+ * cpp - points to a buffer of size NFSV4_SMALLSTR
+ *       (malloc a larger one, as required)
+ * retlenp - pointer to length to be returned
+ */
+APPLESTATIC void
+nfsv4_uidtostr(uid_t uid, u_char **cpp, int *retlenp, NFSPROC_T *p)
+{
+	int i;
+	struct nfsusrgrp *usrp;
+	u_char *cp = *cpp;
+	uid_t tmp;
+	int cnt, hasampersand, len = NFSV4_SMALLSTR, ret;
+	struct nfsrv_lughash *hp;
+
+	cnt = 0;
+tryagain:
+	if (nfsrv_dnsnamelen > 0 && !nfs_enable_uidtostring) {
+		/*
+		 * Always map nfsrv_defaultuid to "nobody".
+		 */
+		if (uid == nfsrv_defaultuid) {
+			i = nfsrv_dnsnamelen + 7;
+			if (i > len) {
+				if (len > NFSV4_SMALLSTR)
+					free(cp, M_NFSSTRING);
+				cp = malloc(i, M_NFSSTRING, M_WAITOK);
+				*cpp = cp;
+				len = i;
+				goto tryagain;
+			}
+			*retlenp = i;
+			NFSBCOPY("nobody@", cp, 7);
+			cp += 7;
+			NFSBCOPY(nfsrv_dnsname, cp, nfsrv_dnsnamelen);
+			return;
+		}
+		hasampersand = 0;
+		hp = NFSUSERHASH(uid);
+		mtx_lock(&hp->mtx);
+		TAILQ_FOREACH(usrp, &hp->lughead, lug_numhash) {
+			if (usrp->lug_uid == uid) {
+				if (usrp->lug_expiry < NFSD_MONOSEC)
+					break;
+				/*
+				 * If the name doesn't already have an '@'
+				 * in it, append @domainname to it.
+				 */
+				for (i = 0; i < usrp->lug_namelen; i++) {
+					if (usrp->lug_name[i] == '@') {
+						hasampersand = 1;
+						break;
+					}
+				}
+				if (hasampersand)
+					i = usrp->lug_namelen;
+				else
+					i = usrp->lug_namelen +
+					    nfsrv_dnsnamelen + 1;
+				if (i > len) {
+					mtx_unlock(&hp->mtx);
+					if (len > NFSV4_SMALLSTR)
+						free(cp, M_NFSSTRING);
+					cp = malloc(i, M_NFSSTRING, M_WAITOK);
+					*cpp = cp;
+					len = i;
+					goto tryagain;
+				}
+				*retlenp = i;
+				NFSBCOPY(usrp->lug_name, cp, usrp->lug_namelen);
+				if (!hasampersand) {
+					cp += usrp->lug_namelen;
+					*cp++ = '@';
+					NFSBCOPY(nfsrv_dnsname, cp, nfsrv_dnsnamelen);
+				}
+				TAILQ_REMOVE(&hp->lughead, usrp, lug_numhash);
+				TAILQ_INSERT_TAIL(&hp->lughead, usrp,
+				    lug_numhash);
+				mtx_unlock(&hp->mtx);
+				return;
+			}
+		}
+		mtx_unlock(&hp->mtx);
+		cnt++;
+		ret = nfsrv_getuser(RPCNFSUSERD_GETUID, uid, (gid_t)0,
+		    NULL, p);
+		if (ret == 0 && cnt < 2)
+			goto tryagain;
+	}
+
+	/*
+	 * No match, just return a string of digits.
+	 */
+	tmp = uid;
+	i = 0;
+	while (tmp || i == 0) {
+		tmp /= 10;
+		i++;
+	}
+	len = (i > len) ? len : i;
+	*retlenp = len;
+	cp += (len - 1);
+	tmp = uid;
+	for (i = 0; i < len; i++) {
+		*cp-- = '0' + (tmp % 10);
+		tmp /= 10;
+	}
+	return;
+}
+
+/*
+ * Get a credential for the uid with the server's group list.
+ * If none is found, just return the credential passed in after
+ * logging a warning message.
+ */
+struct ucred *
+nfsrv_getgrpscred(struct ucred *oldcred)
+{
+	struct nfsusrgrp *usrp;
+	struct ucred *newcred;
+	int cnt, ret;
+	uid_t uid;
+	struct nfsrv_lughash *hp;
+
+	cnt = 0;
+	uid = oldcred->cr_uid;
+tryagain:
+	if (nfsrv_dnsnamelen > 0) {
+		hp = NFSUSERHASH(uid);
+		mtx_lock(&hp->mtx);
+		TAILQ_FOREACH(usrp, &hp->lughead, lug_numhash) {
+			if (usrp->lug_uid == uid) {
+				if (usrp->lug_expiry < NFSD_MONOSEC)
+					break;
+				if (usrp->lug_cred != NULL) {
+					newcred = crhold(usrp->lug_cred);
+					crfree(oldcred);
+				} else
+					newcred = oldcred;
+				TAILQ_REMOVE(&hp->lughead, usrp, lug_numhash);
+				TAILQ_INSERT_TAIL(&hp->lughead, usrp,
+				    lug_numhash);
+				mtx_unlock(&hp->mtx);
+				return (newcred);
+			}
+		}
+		mtx_unlock(&hp->mtx);
+		cnt++;
+		ret = nfsrv_getuser(RPCNFSUSERD_GETUID, uid, (gid_t)0,
+		    NULL, curthread);
+		if (ret == 0 && cnt < 2)
+			goto tryagain;
+	}
+	return (oldcred);
+}
+
+/*
+ * Convert a string to a uid.
+ * If no conversion is possible return NFSERR_BADOWNER, otherwise
+ * return 0.
+ * If this is called from a client side mount using AUTH_SYS and the
+ * string is made up entirely of digits, just convert the string to
+ * a number.
+ */
+APPLESTATIC int
+nfsv4_strtouid(struct nfsrv_descript *nd, u_char *str, int len, uid_t *uidp,
+    NFSPROC_T *p)
+{
+	int i;
+	char *cp, *endstr, *str0;
+	struct nfsusrgrp *usrp;
+	int cnt, ret;
+	int error = 0;
+	uid_t tuid;
+	struct nfsrv_lughash *hp, *hp2;
+
+	if (len == 0) {
+		error = NFSERR_BADOWNER;
+		goto out;
+	}
+	/* If a string of digits and an AUTH_SYS mount, just convert it. */
+	str0 = str;
+	tuid = (uid_t)strtoul(str0, &endstr, 10);
+	if ((endstr - str0) == len) {
+		/* A numeric string. */
+		if ((nd->nd_flag & ND_KERBV) == 0 &&
+		    ((nd->nd_flag & ND_NFSCL) != 0 ||
+		      nfsd_enable_stringtouid != 0))
+			*uidp = tuid;
+		else
+			error = NFSERR_BADOWNER;
+		goto out;
+	}
+	/*
+	 * Look for an '@'.
+	 */
+	cp = strchr(str0, '@');
+	if (cp != NULL)
+		i = (int)(cp++ - str0);
+	else
+		i = len;
+
+	cnt = 0;
+tryagain:
+	if (nfsrv_dnsnamelen > 0) {
+		/*
+		 * If an '@' is found and the domain name matches, search for
+		 * the name with dns stripped off.
+		 * Mixed case alpahbetics will match for the domain name, but
+		 * all upper case will not.
+		 */
+		if (cnt == 0 && i < len && i > 0 &&
+		    (len - 1 - i) == nfsrv_dnsnamelen &&
+		    !nfsrv_cmpmixedcase(cp, nfsrv_dnsname, nfsrv_dnsnamelen)) {
+			len -= (nfsrv_dnsnamelen + 1);
+			*(cp - 1) = '\0';
+		}
+	
+		/*
+		 * Check for the special case of "nobody".
+		 */
+		if (len == 6 && !NFSBCMP(str, "nobody", 6)) {
+			*uidp = nfsrv_defaultuid;
+			error = 0;
+			goto out;
+		}
+	
+		hp = NFSUSERNAMEHASH(str, len);
+		mtx_lock(&hp->mtx);
+		TAILQ_FOREACH(usrp, &hp->lughead, lug_namehash) {
+			if (usrp->lug_namelen == len &&
+			    !NFSBCMP(usrp->lug_name, str, len)) {
+				if (usrp->lug_expiry < NFSD_MONOSEC)
+					break;
+				hp2 = NFSUSERHASH(usrp->lug_uid);
+				mtx_lock(&hp2->mtx);
+				TAILQ_REMOVE(&hp2->lughead, usrp, lug_numhash);
+				TAILQ_INSERT_TAIL(&hp2->lughead, usrp,
+				    lug_numhash);
+				*uidp = usrp->lug_uid;
+				mtx_unlock(&hp2->mtx);
+				mtx_unlock(&hp->mtx);
+				error = 0;
+				goto out;
+			}
+		}
+		mtx_unlock(&hp->mtx);
+		cnt++;
+		ret = nfsrv_getuser(RPCNFSUSERD_GETUSER, (uid_t)0, (gid_t)0,
+		    str, p);
+		if (ret == 0 && cnt < 2)
+			goto tryagain;
+	}
+	error = NFSERR_BADOWNER;
+
+out:
+	NFSEXITCODE(error);
+	return (error);
+}
+
+/*
+ * Convert a gid to a string.
+ * gid - the group id
+ * cpp - points to a buffer of size NFSV4_SMALLSTR
+ *       (malloc a larger one, as required)
+ * retlenp - pointer to length to be returned
+ */
+APPLESTATIC void
+nfsv4_gidtostr(gid_t gid, u_char **cpp, int *retlenp, NFSPROC_T *p)
+{
+	int i;
+	struct nfsusrgrp *usrp;
+	u_char *cp = *cpp;
+	gid_t tmp;
+	int cnt, hasampersand, len = NFSV4_SMALLSTR, ret;
+	struct nfsrv_lughash *hp;
+
+	cnt = 0;
+tryagain:
+	if (nfsrv_dnsnamelen > 0 && !nfs_enable_uidtostring) {
+		/*
+		 * Always map nfsrv_defaultgid to "nogroup".
+		 */
+		if (gid == nfsrv_defaultgid) {
+			i = nfsrv_dnsnamelen + 8;
+			if (i > len) {
+				if (len > NFSV4_SMALLSTR)
+					free(cp, M_NFSSTRING);
+				cp = malloc(i, M_NFSSTRING, M_WAITOK);
+				*cpp = cp;
+				len = i;
+				goto tryagain;
+			}
+			*retlenp = i;
+			NFSBCOPY("nogroup@", cp, 8);
+			cp += 8;
+			NFSBCOPY(nfsrv_dnsname, cp, nfsrv_dnsnamelen);
+			return;
+		}
+		hasampersand = 0;
+		hp = NFSGROUPHASH(gid);
+		mtx_lock(&hp->mtx);
+		TAILQ_FOREACH(usrp, &hp->lughead, lug_numhash) {
+			if (usrp->lug_gid == gid) {
+				if (usrp->lug_expiry < NFSD_MONOSEC)
+					break;
+				/*
+				 * If the name doesn't already have an '@'
+				 * in it, append @domainname to it.
+				 */
+				for (i = 0; i < usrp->lug_namelen; i++) {
+					if (usrp->lug_name[i] == '@') {
+						hasampersand = 1;
+						break;
+					}
+				}
+				if (hasampersand)
+					i = usrp->lug_namelen;
+				else
+					i = usrp->lug_namelen +
+					    nfsrv_dnsnamelen + 1;
+				if (i > len) {
+					mtx_unlock(&hp->mtx);
+					if (len > NFSV4_SMALLSTR)
+						free(cp, M_NFSSTRING);
+					cp = malloc(i, M_NFSSTRING, M_WAITOK);
+					*cpp = cp;
+					len = i;
+					goto tryagain;
+				}
+				*retlenp = i;
+				NFSBCOPY(usrp->lug_name, cp, usrp->lug_namelen);
+				if (!hasampersand) {
+					cp += usrp->lug_namelen;
+					*cp++ = '@';
+					NFSBCOPY(nfsrv_dnsname, cp, nfsrv_dnsnamelen);
+				}
+				TAILQ_REMOVE(&hp->lughead, usrp, lug_numhash);
+				TAILQ_INSERT_TAIL(&hp->lughead, usrp,
+				    lug_numhash);
+				mtx_unlock(&hp->mtx);
+				return;
+			}
+		}
+		mtx_unlock(&hp->mtx);
+		cnt++;
+		ret = nfsrv_getuser(RPCNFSUSERD_GETGID, (uid_t)0, gid,
+		    NULL, p);
+		if (ret == 0 && cnt < 2)
+			goto tryagain;
+	}
+
+	/*
+	 * No match, just return a string of digits.
+	 */
+	tmp = gid;
+	i = 0;
+	while (tmp || i == 0) {
+		tmp /= 10;
+		i++;
+	}
+	len = (i > len) ? len : i;
+	*retlenp = len;
+	cp += (len - 1);
+	tmp = gid;
+	for (i = 0; i < len; i++) {
+		*cp-- = '0' + (tmp % 10);
+		tmp /= 10;
+	}
+	return;
+}
+
+/*
+ * Convert a string to a gid.
+ * If no conversion is possible return NFSERR_BADOWNER, otherwise
+ * return 0.
+ * If this is called from a client side mount using AUTH_SYS and the
+ * string is made up entirely of digits, just convert the string to
+ * a number.
+ */
+APPLESTATIC int
+nfsv4_strtogid(struct nfsrv_descript *nd, u_char *str, int len, gid_t *gidp,
+    NFSPROC_T *p)
+{
+	int i;
+	char *cp, *endstr, *str0;
+	struct nfsusrgrp *usrp;
+	int cnt, ret;
+	int error = 0;
+	gid_t tgid;
+	struct nfsrv_lughash *hp, *hp2;
+
+	if (len == 0) {
+		error =  NFSERR_BADOWNER;
+		goto out;
+	}
+	/* If a string of digits and an AUTH_SYS mount, just convert it. */
+	str0 = str;
+	tgid = (gid_t)strtoul(str0, &endstr, 10);
+	if ((endstr - str0) == len) {
+		/* A numeric string. */
+		if ((nd->nd_flag & ND_KERBV) == 0 &&
+		    ((nd->nd_flag & ND_NFSCL) != 0 ||
+		      nfsd_enable_stringtouid != 0))
+			*gidp = tgid;
+		else
+			error = NFSERR_BADOWNER;
+		goto out;
+	}
+	/*
+	 * Look for an '@'.
+	 */
+	cp = strchr(str0, '@');
+	if (cp != NULL)
+		i = (int)(cp++ - str0);
+	else
+		i = len;
+
+	cnt = 0;
+tryagain:
+	if (nfsrv_dnsnamelen > 0) {
+		/*
+		 * If an '@' is found and the dns name matches, search for the
+		 * name with the dns stripped off.
+		 */
+		if (cnt == 0 && i < len && i > 0 &&
+		    (len - 1 - i) == nfsrv_dnsnamelen &&
+		    !nfsrv_cmpmixedcase(cp, nfsrv_dnsname, nfsrv_dnsnamelen)) {
+			len -= (nfsrv_dnsnamelen + 1);
+			*(cp - 1) = '\0';
+		}
+	
+		/*
+		 * Check for the special case of "nogroup".
+		 */
+		if (len == 7 && !NFSBCMP(str, "nogroup", 7)) {
+			*gidp = nfsrv_defaultgid;
+			error = 0;
+			goto out;
+		}
+	
+		hp = NFSGROUPNAMEHASH(str, len);
+		mtx_lock(&hp->mtx);
+		TAILQ_FOREACH(usrp, &hp->lughead, lug_namehash) {
+			if (usrp->lug_namelen == len &&
+			    !NFSBCMP(usrp->lug_name, str, len)) {
+				if (usrp->lug_expiry < NFSD_MONOSEC)
+					break;
+				hp2 = NFSGROUPHASH(usrp->lug_gid);
+				mtx_lock(&hp2->mtx);
+				TAILQ_REMOVE(&hp2->lughead, usrp, lug_numhash);
+				TAILQ_INSERT_TAIL(&hp2->lughead, usrp,
+				    lug_numhash);
+				*gidp = usrp->lug_gid;
+				mtx_unlock(&hp2->mtx);
+				mtx_unlock(&hp->mtx);
+				error = 0;
+				goto out;
+			}
+		}
+		mtx_unlock(&hp->mtx);
+		cnt++;
+		ret = nfsrv_getuser(RPCNFSUSERD_GETGROUP, (uid_t)0, (gid_t)0,
+		    str, p);
+		if (ret == 0 && cnt < 2)
+			goto tryagain;
+	}
+	error = NFSERR_BADOWNER;
+
+out:
+	NFSEXITCODE(error);
+	return (error);
+}
+
+/*
+ * Cmp len chars, allowing mixed case in the first argument to match lower
+ * case in the second, but not if the first argument is all upper case.
+ * Return 0 for a match, 1 otherwise.
+ */
+static int
+nfsrv_cmpmixedcase(u_char *cp, u_char *cp2, int len)
+{
+	int i;
+	u_char tmp;
+	int fndlower = 0;
+
+	for (i = 0; i < len; i++) {
+		if (*cp >= 'A' && *cp <= 'Z') {
+			tmp = *cp++ + ('a' - 'A');
+		} else {
+			tmp = *cp++;
+			if (tmp >= 'a' && tmp <= 'z')
+				fndlower = 1;
+		}
+		if (tmp != *cp2++)
+			return (1);
+	}
+	if (fndlower)
+		return (0);
+	else
+		return (1);
+}
+
+/*
+ * Set the port for the nfsuserd.
+ */
+APPLESTATIC int
+nfsrv_nfsuserdport(struct nfsuserd_args *nargs, NFSPROC_T *p)
+{
+	struct nfssockreq *rp;
+#ifdef INET
+	struct sockaddr_in *ad;
+#endif
+#ifdef INET6
+	struct sockaddr_in6 *ad6;
+	const struct in6_addr in6loopback = IN6ADDR_LOOPBACK_INIT;
+#endif
+	int error;
+
+	NFSLOCKNAMEID();
+	if (nfsrv_nfsuserd != NOTRUNNING) {
+		NFSUNLOCKNAMEID();
+		error = EPERM;
+		goto out;
+	}
+	nfsrv_nfsuserd = STARTSTOP;
+	/*
+	 * Set up the socket record and connect.
+	 * Set nr_client NULL before unlocking, just to ensure that no other
+	 * process/thread/core will use a bogus old value.  This could only
+	 * occur if the use of the nameid lock to protect nfsrv_nfsuserd is
+	 * broken.
+	 */
+	rp = &nfsrv_nfsuserdsock;
+	rp->nr_client = NULL;
+	NFSUNLOCKNAMEID();
+	rp->nr_sotype = SOCK_DGRAM;
+	rp->nr_soproto = IPPROTO_UDP;
+	rp->nr_lock = (NFSR_RESERVEDPORT | NFSR_LOCALHOST);
+	rp->nr_cred = NULL;
+	rp->nr_prog = RPCPROG_NFSUSERD;
+	error = 0;
+	switch (nargs->nuserd_family) {
+#ifdef INET
+	case AF_INET:
+		rp->nr_nam = malloc(sizeof(struct sockaddr_in), M_SONAME,
+		    M_WAITOK | M_ZERO);
+ 		ad = (struct sockaddr_in *)rp->nr_nam;
+		ad->sin_len = sizeof(struct sockaddr_in);
+ 		ad->sin_family = AF_INET;
+		ad->sin_addr.s_addr = htonl(INADDR_LOOPBACK);
+		ad->sin_port = nargs->nuserd_port;
+		break;
+#endif
+#ifdef INET6
+	case AF_INET6:
+		rp->nr_nam = malloc(sizeof(struct sockaddr_in6), M_SONAME,
+		    M_WAITOK | M_ZERO);
+		ad6 = (struct sockaddr_in6 *)rp->nr_nam;
+		ad6->sin6_len = sizeof(struct sockaddr_in6);
+		ad6->sin6_family = AF_INET6;
+		ad6->sin6_addr = in6loopback;
+		ad6->sin6_port = nargs->nuserd_port;
+		break;
+#endif
+	default:
+		error = ENXIO;
+ 	}
+	rp->nr_vers = RPCNFSUSERD_VERS;
+	if (error == 0)
+		error = newnfs_connect(NULL, rp, NFSPROCCRED(p), p, 0);
+	if (error == 0) {
+		NFSLOCKNAMEID();
+		nfsrv_nfsuserd = RUNNING;
+		NFSUNLOCKNAMEID();
+	} else {
+		free(rp->nr_nam, M_SONAME);
+		NFSLOCKNAMEID();
+		nfsrv_nfsuserd = NOTRUNNING;
+		NFSUNLOCKNAMEID();
+	}
+out:
+	NFSEXITCODE(error);
+	return (error);
+}
+
+/*
+ * Delete the nfsuserd port.
+ */
+APPLESTATIC void
+nfsrv_nfsuserddelport(void)
+{
+
+	NFSLOCKNAMEID();
+	if (nfsrv_nfsuserd != RUNNING) {
+		NFSUNLOCKNAMEID();
+		return;
+	}
+	nfsrv_nfsuserd = STARTSTOP;
+	/* Wait for all upcalls to complete. */
+	while (nfsrv_userdupcalls > 0)
+		msleep(&nfsrv_userdupcalls, NFSNAMEIDMUTEXPTR, PVFS,
+		    "nfsupcalls", 0);
+	NFSUNLOCKNAMEID();
+	newnfs_disconnect(&nfsrv_nfsuserdsock);
+	free(nfsrv_nfsuserdsock.nr_nam, M_SONAME);
+	NFSLOCKNAMEID();
+	nfsrv_nfsuserd = NOTRUNNING;
+	NFSUNLOCKNAMEID();
+}
+
+/*
+ * Do upcalls to the nfsuserd, for cache misses of the owner/ownergroup
+ * name<-->id cache.
+ * Returns 0 upon success, non-zero otherwise.
+ */
+static int
+nfsrv_getuser(int procnum, uid_t uid, gid_t gid, char *name, NFSPROC_T *p)
+{
+	u_int32_t *tl;
+	struct nfsrv_descript *nd;
+	int len;
+	struct nfsrv_descript nfsd;
+	struct ucred *cred;
+	int error;
+
+	NFSLOCKNAMEID();
+	if (nfsrv_nfsuserd != RUNNING) {
+		NFSUNLOCKNAMEID();
+		error = EPERM;
+		goto out;
+	}
+	/*
+	 * Maintain a count of upcalls in progress, so that nfsrv_X()
+	 * can wait until no upcalls are in progress.
+	 */
+	nfsrv_userdupcalls++;
+	NFSUNLOCKNAMEID();
+	KASSERT(nfsrv_userdupcalls > 0,
+	    ("nfsrv_getuser: non-positive upcalls"));
+	nd = &nfsd;
+	cred = newnfs_getcred();
+	nd->nd_flag = ND_GSSINITREPLY;
+	nfsrvd_rephead(nd);
+
+	nd->nd_procnum = procnum;
+	if (procnum == RPCNFSUSERD_GETUID || procnum == RPCNFSUSERD_GETGID) {
+		NFSM_BUILD(tl, u_int32_t *, NFSX_UNSIGNED);
+		if (procnum == RPCNFSUSERD_GETUID)
+			*tl = txdr_unsigned(uid);
+		else
+			*tl = txdr_unsigned(gid);
+	} else {
+		len = strlen(name);
+		(void) nfsm_strtom(nd, name, len);
+	}
+	error = newnfs_request(nd, NULL, NULL, &nfsrv_nfsuserdsock, NULL, NULL,
+		cred, RPCPROG_NFSUSERD, RPCNFSUSERD_VERS, NULL, 0, NULL, NULL);
+	NFSLOCKNAMEID();
+	if (--nfsrv_userdupcalls == 0 && nfsrv_nfsuserd == STARTSTOP)
+		wakeup(&nfsrv_userdupcalls);
+	NFSUNLOCKNAMEID();
+	NFSFREECRED(cred);
+	if (!error) {
+		mbuf_freem(nd->nd_mrep);
+		error = nd->nd_repstat;
+	}
+out:
+	NFSEXITCODE(error);
+	return (error);
+}
+
+/*
+ * This function is called from the nfssvc(2) system call, to update the
+ * kernel user/group name list(s) for the V4 owner and ownergroup attributes.
+ */
+APPLESTATIC int
+nfssvc_idname(struct nfsd_idargs *nidp)
+{
+	struct nfsusrgrp *nusrp, *usrp, *newusrp;
+	struct nfsrv_lughash *hp_name, *hp_idnum, *thp;
+	int i, group_locked, groupname_locked, user_locked, username_locked;
+	int error = 0;
+	u_char *cp;
+	gid_t *grps;
+	struct ucred *cr;
+	static int onethread = 0;
+	static time_t lasttime = 0;
+
+	if (nidp->nid_namelen <= 0 || nidp->nid_namelen > MAXHOSTNAMELEN) {
+		error = EINVAL;
+		goto out;
+	}
+	if (nidp->nid_flag & NFSID_INITIALIZE) {
+		cp = malloc(nidp->nid_namelen + 1, M_NFSSTRING, M_WAITOK);
+		error = copyin(CAST_USER_ADDR_T(nidp->nid_name), cp,
+		    nidp->nid_namelen);
+		if (error != 0) {
+			free(cp, M_NFSSTRING);
+			goto out;
+		}
+		if (atomic_cmpset_acq_int(&nfsrv_dnsnamelen, 0, 0) == 0) {
+			/*
+			 * Free up all the old stuff and reinitialize hash
+			 * lists.  All mutexes for both lists must be locked,
+			 * with the user/group name ones before the uid/gid
+			 * ones, to avoid a LOR.
+			 */
+			for (i = 0; i < nfsrv_lughashsize; i++)
+				mtx_lock(&nfsusernamehash[i].mtx);
+			for (i = 0; i < nfsrv_lughashsize; i++)
+				mtx_lock(&nfsuserhash[i].mtx);
+			for (i = 0; i < nfsrv_lughashsize; i++)
+				TAILQ_FOREACH_SAFE(usrp,
+				    &nfsuserhash[i].lughead, lug_numhash, nusrp)
+					nfsrv_removeuser(usrp, 1);
+			for (i = 0; i < nfsrv_lughashsize; i++)
+				mtx_unlock(&nfsuserhash[i].mtx);
+			for (i = 0; i < nfsrv_lughashsize; i++)
+				mtx_unlock(&nfsusernamehash[i].mtx);
+			for (i = 0; i < nfsrv_lughashsize; i++)
+				mtx_lock(&nfsgroupnamehash[i].mtx);
+			for (i = 0; i < nfsrv_lughashsize; i++)
+				mtx_lock(&nfsgrouphash[i].mtx);
+			for (i = 0; i < nfsrv_lughashsize; i++)
+				TAILQ_FOREACH_SAFE(usrp,
+				    &nfsgrouphash[i].lughead, lug_numhash,
+				    nusrp)
+					nfsrv_removeuser(usrp, 0);
+			for (i = 0; i < nfsrv_lughashsize; i++)
+				mtx_unlock(&nfsgrouphash[i].mtx);
+			for (i = 0; i < nfsrv_lughashsize; i++)
+				mtx_unlock(&nfsgroupnamehash[i].mtx);
+			free(nfsrv_dnsname, M_NFSSTRING);
+			nfsrv_dnsname = NULL;
+		}
+		if (nfsuserhash == NULL) {
+			/* Allocate the hash tables. */
+			nfsuserhash = malloc(sizeof(struct nfsrv_lughash) *
+			    nfsrv_lughashsize, M_NFSUSERGROUP, M_WAITOK |
+			    M_ZERO);
+			for (i = 0; i < nfsrv_lughashsize; i++)
+				mtx_init(&nfsuserhash[i].mtx, "nfsuidhash",
+				    NULL, MTX_DEF | MTX_DUPOK);
+			nfsusernamehash = malloc(sizeof(struct nfsrv_lughash) *
+			    nfsrv_lughashsize, M_NFSUSERGROUP, M_WAITOK |
+			    M_ZERO);
+			for (i = 0; i < nfsrv_lughashsize; i++)
+				mtx_init(&nfsusernamehash[i].mtx,
+				    "nfsusrhash", NULL, MTX_DEF |
+				    MTX_DUPOK);
+			nfsgrouphash = malloc(sizeof(struct nfsrv_lughash) *
+			    nfsrv_lughashsize, M_NFSUSERGROUP, M_WAITOK |
+			    M_ZERO);
+			for (i = 0; i < nfsrv_lughashsize; i++)
+				mtx_init(&nfsgrouphash[i].mtx, "nfsgidhash",
+				    NULL, MTX_DEF | MTX_DUPOK);
+			nfsgroupnamehash = malloc(sizeof(struct nfsrv_lughash) *
+			    nfsrv_lughashsize, M_NFSUSERGROUP, M_WAITOK |
+			    M_ZERO);
+			for (i = 0; i < nfsrv_lughashsize; i++)
+			    mtx_init(&nfsgroupnamehash[i].mtx,
+			    "nfsgrphash", NULL, MTX_DEF | MTX_DUPOK);
+		}
+		/* (Re)initialize the list heads. */
+		for (i = 0; i < nfsrv_lughashsize; i++)
+			TAILQ_INIT(&nfsuserhash[i].lughead);
+		for (i = 0; i < nfsrv_lughashsize; i++)
+			TAILQ_INIT(&nfsusernamehash[i].lughead);
+		for (i = 0; i < nfsrv_lughashsize; i++)
+			TAILQ_INIT(&nfsgrouphash[i].lughead);
+		for (i = 0; i < nfsrv_lughashsize; i++)
+			TAILQ_INIT(&nfsgroupnamehash[i].lughead);
+
+		/*
+		 * Put name in "DNS" string.
+		 */
+		nfsrv_dnsname = cp;
+		nfsrv_defaultuid = nidp->nid_uid;
+		nfsrv_defaultgid = nidp->nid_gid;
+		nfsrv_usercnt = 0;
+		nfsrv_usermax = nidp->nid_usermax;
+		atomic_store_rel_int(&nfsrv_dnsnamelen, nidp->nid_namelen);
+		goto out;
+	}
+
+	/*
+	 * malloc the new one now, so any potential sleep occurs before
+	 * manipulation of the lists.
+	 */
+	newusrp = malloc(sizeof(struct nfsusrgrp) + nidp->nid_namelen,
+	    M_NFSUSERGROUP, M_WAITOK | M_ZERO);
+	error = copyin(CAST_USER_ADDR_T(nidp->nid_name), newusrp->lug_name,
+	    nidp->nid_namelen);
+	if (error == 0 && nidp->nid_ngroup > 0 &&
+	    (nidp->nid_flag & NFSID_ADDUID) != 0) {
+		grps = malloc(sizeof(gid_t) * nidp->nid_ngroup, M_TEMP,
+		    M_WAITOK);
+		error = copyin(CAST_USER_ADDR_T(nidp->nid_grps), grps,
+		    sizeof(gid_t) * nidp->nid_ngroup);
+		if (error == 0) {
+			/*
+			 * Create a credential just like svc_getcred(),
+			 * but using the group list provided.
+			 */
+			cr = crget();
+			cr->cr_uid = cr->cr_ruid = cr->cr_svuid = nidp->nid_uid;
+			crsetgroups(cr, nidp->nid_ngroup, grps);
+			cr->cr_rgid = cr->cr_svgid = cr->cr_groups[0];
+			cr->cr_prison = &prison0;
+			prison_hold(cr->cr_prison);
+#ifdef MAC
+			mac_cred_associate_nfsd(cr);
+#endif
+			newusrp->lug_cred = cr;
+		}
+		free(grps, M_TEMP);
+	}
+	if (error) {
+		free(newusrp, M_NFSUSERGROUP);
+		goto out;
+	}
+	newusrp->lug_namelen = nidp->nid_namelen;
+
+	/*
+	 * The lock order is username[0]->[nfsrv_lughashsize - 1] followed
+	 * by uid[0]->[nfsrv_lughashsize - 1], with the same for group.
+	 * The flags user_locked, username_locked, group_locked and
+	 * groupname_locked are set to indicate all of those hash lists are
+	 * locked. hp_name != NULL  and hp_idnum != NULL indicates that
+	 * the respective one mutex is locked.
+	 */
+	user_locked = username_locked = group_locked = groupname_locked = 0;
+	hp_name = hp_idnum = NULL;
+
+	/*
+	 * Delete old entries, as required.
+	 */
+	if (nidp->nid_flag & (NFSID_DELUID | NFSID_ADDUID)) {
+		/* Must lock all username hash lists first, to avoid a LOR. */
+		for (i = 0; i < nfsrv_lughashsize; i++)
+			mtx_lock(&nfsusernamehash[i].mtx);
+		username_locked = 1;
+		hp_idnum = NFSUSERHASH(nidp->nid_uid);
+		mtx_lock(&hp_idnum->mtx);
+		TAILQ_FOREACH_SAFE(usrp, &hp_idnum->lughead, lug_numhash,
+		    nusrp) {
+			if (usrp->lug_uid == nidp->nid_uid)
+				nfsrv_removeuser(usrp, 1);
+		}
+	} else if (nidp->nid_flag & (NFSID_DELUSERNAME | NFSID_ADDUSERNAME)) {
+		hp_name = NFSUSERNAMEHASH(newusrp->lug_name,
+		    newusrp->lug_namelen);
+		mtx_lock(&hp_name->mtx);
+		TAILQ_FOREACH_SAFE(usrp, &hp_name->lughead, lug_namehash,
+		    nusrp) {
+			if (usrp->lug_namelen == newusrp->lug_namelen &&
+			    !NFSBCMP(usrp->lug_name, newusrp->lug_name,
+			    usrp->lug_namelen)) {
+				thp = NFSUSERHASH(usrp->lug_uid);
+				mtx_lock(&thp->mtx);
+				nfsrv_removeuser(usrp, 1);
+				mtx_unlock(&thp->mtx);
+			}
+		}
+		hp_idnum = NFSUSERHASH(nidp->nid_uid);
+		mtx_lock(&hp_idnum->mtx);
+	} else if (nidp->nid_flag & (NFSID_DELGID | NFSID_ADDGID)) {
+		/* Must lock all groupname hash lists first, to avoid a LOR. */
+		for (i = 0; i < nfsrv_lughashsize; i++)
+			mtx_lock(&nfsgroupnamehash[i].mtx);
+		groupname_locked = 1;
+		hp_idnum = NFSGROUPHASH(nidp->nid_gid);
+		mtx_lock(&hp_idnum->mtx);
+		TAILQ_FOREACH_SAFE(usrp, &hp_idnum->lughead, lug_numhash,
+		    nusrp) {
+			if (usrp->lug_gid == nidp->nid_gid)
+				nfsrv_removeuser(usrp, 0);
+		}
+	} else if (nidp->nid_flag & (NFSID_DELGROUPNAME | NFSID_ADDGROUPNAME)) {
+		hp_name = NFSGROUPNAMEHASH(newusrp->lug_name,
+		    newusrp->lug_namelen);
+		mtx_lock(&hp_name->mtx);
+		TAILQ_FOREACH_SAFE(usrp, &hp_name->lughead, lug_namehash,
+		    nusrp) {
+			if (usrp->lug_namelen == newusrp->lug_namelen &&
+			    !NFSBCMP(usrp->lug_name, newusrp->lug_name,
+			    usrp->lug_namelen)) {
+				thp = NFSGROUPHASH(usrp->lug_gid);
+				mtx_lock(&thp->mtx);
+				nfsrv_removeuser(usrp, 0);
+				mtx_unlock(&thp->mtx);
+			}
+		}
+		hp_idnum = NFSGROUPHASH(nidp->nid_gid);
+		mtx_lock(&hp_idnum->mtx);
+	}
+
+	/*
+	 * Now, we can add the new one.
+	 */
+	if (nidp->nid_usertimeout)
+		newusrp->lug_expiry = NFSD_MONOSEC + nidp->nid_usertimeout;
+	else
+		newusrp->lug_expiry = NFSD_MONOSEC + 5;
+	if (nidp->nid_flag & (NFSID_ADDUID | NFSID_ADDUSERNAME)) {
+		newusrp->lug_uid = nidp->nid_uid;
+		thp = NFSUSERHASH(newusrp->lug_uid);
+		mtx_assert(&thp->mtx, MA_OWNED);
+		TAILQ_INSERT_TAIL(&thp->lughead, newusrp, lug_numhash);
+		thp = NFSUSERNAMEHASH(newusrp->lug_name, newusrp->lug_namelen);
+		mtx_assert(&thp->mtx, MA_OWNED);
+		TAILQ_INSERT_TAIL(&thp->lughead, newusrp, lug_namehash);
+		atomic_add_int(&nfsrv_usercnt, 1);
+	} else if (nidp->nid_flag & (NFSID_ADDGID | NFSID_ADDGROUPNAME)) {
+		newusrp->lug_gid = nidp->nid_gid;
+		thp = NFSGROUPHASH(newusrp->lug_gid);
+		mtx_assert(&thp->mtx, MA_OWNED);
+		TAILQ_INSERT_TAIL(&thp->lughead, newusrp, lug_numhash);
+		thp = NFSGROUPNAMEHASH(newusrp->lug_name, newusrp->lug_namelen);
+		mtx_assert(&thp->mtx, MA_OWNED);
+		TAILQ_INSERT_TAIL(&thp->lughead, newusrp, lug_namehash);
+		atomic_add_int(&nfsrv_usercnt, 1);
+	} else {
+		if (newusrp->lug_cred != NULL)
+			crfree(newusrp->lug_cred);
+		free(newusrp, M_NFSUSERGROUP);
+	}
+
+	/*
+	 * Once per second, allow one thread to trim the cache.
+	 */
+	if (lasttime < NFSD_MONOSEC &&
+	    atomic_cmpset_acq_int(&onethread, 0, 1) != 0) {
+		/*
+		 * First, unlock the single mutexes, so that all entries
+		 * can be locked and any LOR is avoided.
+		 */
+		if (hp_name != NULL) {
+			mtx_unlock(&hp_name->mtx);
+			hp_name = NULL;
+		}
+		if (hp_idnum != NULL) {
+			mtx_unlock(&hp_idnum->mtx);
+			hp_idnum = NULL;
+		}
+
+		if ((nidp->nid_flag & (NFSID_DELUID | NFSID_ADDUID |
+		    NFSID_DELUSERNAME | NFSID_ADDUSERNAME)) != 0) {
+			if (username_locked == 0) {
+				for (i = 0; i < nfsrv_lughashsize; i++)
+					mtx_lock(&nfsusernamehash[i].mtx);
+				username_locked = 1;
+			}
+			KASSERT(user_locked == 0,
+			    ("nfssvc_idname: user_locked"));
+			for (i = 0; i < nfsrv_lughashsize; i++)
+				mtx_lock(&nfsuserhash[i].mtx);
+			user_locked = 1;
+			for (i = 0; i < nfsrv_lughashsize; i++) {
+				TAILQ_FOREACH_SAFE(usrp,
+				    &nfsuserhash[i].lughead, lug_numhash,
+				    nusrp)
+					if (usrp->lug_expiry < NFSD_MONOSEC)
+						nfsrv_removeuser(usrp, 1);
+			}
+			for (i = 0; i < nfsrv_lughashsize; i++) {
+				/*
+				 * Trim the cache using an approximate LRU
+				 * algorithm.  This code deletes the least
+				 * recently used entry on each hash list.
+				 */
+				if (nfsrv_usercnt <= nfsrv_usermax)
+					break;
+				usrp = TAILQ_FIRST(&nfsuserhash[i].lughead);
+				if (usrp != NULL)
+					nfsrv_removeuser(usrp, 1);
+			}
+		} else {
+			if (groupname_locked == 0) {
+				for (i = 0; i < nfsrv_lughashsize; i++)
+					mtx_lock(&nfsgroupnamehash[i].mtx);
+				groupname_locked = 1;
+			}
+			KASSERT(group_locked == 0,
+			    ("nfssvc_idname: group_locked"));
+			for (i = 0; i < nfsrv_lughashsize; i++)
+				mtx_lock(&nfsgrouphash[i].mtx);
+			group_locked = 1;
+			for (i = 0; i < nfsrv_lughashsize; i++) {
+				TAILQ_FOREACH_SAFE(usrp,
+				    &nfsgrouphash[i].lughead, lug_numhash,
+				    nusrp)
+					if (usrp->lug_expiry < NFSD_MONOSEC)
+						nfsrv_removeuser(usrp, 0);
+			}
+			for (i = 0; i < nfsrv_lughashsize; i++) {
+				/*
+				 * Trim the cache using an approximate LRU
+				 * algorithm.  This code deletes the least
+				 * recently user entry on each hash list.
+				 */
+				if (nfsrv_usercnt <= nfsrv_usermax)
+					break;
+				usrp = TAILQ_FIRST(&nfsgrouphash[i].lughead);
+				if (usrp != NULL)
+					nfsrv_removeuser(usrp, 0);
+			}
+		}
+		lasttime = NFSD_MONOSEC;
+		atomic_store_rel_int(&onethread, 0);
+	}
+
+	/* Now, unlock all locked mutexes. */
+	if (hp_idnum != NULL)
+		mtx_unlock(&hp_idnum->mtx);
+	if (hp_name != NULL)
+		mtx_unlock(&hp_name->mtx);
+	if (user_locked != 0)
+		for (i = 0; i < nfsrv_lughashsize; i++)
+			mtx_unlock(&nfsuserhash[i].mtx);
+	if (username_locked != 0)
+		for (i = 0; i < nfsrv_lughashsize; i++)
+			mtx_unlock(&nfsusernamehash[i].mtx);
+	if (group_locked != 0)
+		for (i = 0; i < nfsrv_lughashsize; i++)
+			mtx_unlock(&nfsgrouphash[i].mtx);
+	if (groupname_locked != 0)
+		for (i = 0; i < nfsrv_lughashsize; i++)
+			mtx_unlock(&nfsgroupnamehash[i].mtx);
+out:
+	NFSEXITCODE(error);
+	return (error);
+}
+
+/*
+ * Remove a user/group name element.
+ */
+static void
+nfsrv_removeuser(struct nfsusrgrp *usrp, int isuser)
+{
+	struct nfsrv_lughash *hp;
+
+	if (isuser != 0) {
+		hp = NFSUSERHASH(usrp->lug_uid);
+		mtx_assert(&hp->mtx, MA_OWNED);
+		TAILQ_REMOVE(&hp->lughead, usrp, lug_numhash);
+		hp = NFSUSERNAMEHASH(usrp->lug_name, usrp->lug_namelen);
+		mtx_assert(&hp->mtx, MA_OWNED);
+		TAILQ_REMOVE(&hp->lughead, usrp, lug_namehash);
+	} else {
+		hp = NFSGROUPHASH(usrp->lug_gid);
+		mtx_assert(&hp->mtx, MA_OWNED);
+		TAILQ_REMOVE(&hp->lughead, usrp, lug_numhash);
+		hp = NFSGROUPNAMEHASH(usrp->lug_name, usrp->lug_namelen);
+		mtx_assert(&hp->mtx, MA_OWNED);
+		TAILQ_REMOVE(&hp->lughead, usrp, lug_namehash);
+	}
+	atomic_add_int(&nfsrv_usercnt, -1);
+	if (usrp->lug_cred != NULL)
+		crfree(usrp->lug_cred);
+	free(usrp, M_NFSUSERGROUP);
+}
+
+/*
+ * Free up all the allocations related to the name<-->id cache.
+ * This function should only be called when the nfsuserd daemon isn't
+ * running, since it doesn't do any locking.
+ * This function is meant to be used when the nfscommon module is unloaded.
+ */
+APPLESTATIC void
+nfsrv_cleanusergroup(void)
+{
+	struct nfsrv_lughash *hp, *hp2;
+	struct nfsusrgrp *nusrp, *usrp;
+	int i;
+
+	if (nfsuserhash == NULL)
+		return;
+
+	for (i = 0; i < nfsrv_lughashsize; i++) {
+		hp = &nfsuserhash[i];
+		TAILQ_FOREACH_SAFE(usrp, &hp->lughead, lug_numhash, nusrp) {
+			TAILQ_REMOVE(&hp->lughead, usrp, lug_numhash);
+			hp2 = NFSUSERNAMEHASH(usrp->lug_name,
+			    usrp->lug_namelen);
+			TAILQ_REMOVE(&hp2->lughead, usrp, lug_namehash);
+			if (usrp->lug_cred != NULL)
+				crfree(usrp->lug_cred);
+			free(usrp, M_NFSUSERGROUP);
+		}
+		hp = &nfsgrouphash[i];
+		TAILQ_FOREACH_SAFE(usrp, &hp->lughead, lug_numhash, nusrp) {
+			TAILQ_REMOVE(&hp->lughead, usrp, lug_numhash);
+			hp2 = NFSGROUPNAMEHASH(usrp->lug_name,
+			    usrp->lug_namelen);
+			TAILQ_REMOVE(&hp2->lughead, usrp, lug_namehash);
+			if (usrp->lug_cred != NULL)
+				crfree(usrp->lug_cred);
+			free(usrp, M_NFSUSERGROUP);
+		}
+		mtx_destroy(&nfsuserhash[i].mtx);
+		mtx_destroy(&nfsusernamehash[i].mtx);
+		mtx_destroy(&nfsgroupnamehash[i].mtx);
+		mtx_destroy(&nfsgrouphash[i].mtx);
+	}
+	free(nfsuserhash, M_NFSUSERGROUP);
+	free(nfsusernamehash, M_NFSUSERGROUP);
+	free(nfsgrouphash, M_NFSUSERGROUP);
+	free(nfsgroupnamehash, M_NFSUSERGROUP);
+	free(nfsrv_dnsname, M_NFSSTRING);
+}
+
+/*
+ * This function scans a byte string and checks for UTF-8 compliance.
+ * It returns 0 if it conforms and NFSERR_INVAL if not.
+ */
+APPLESTATIC int
+nfsrv_checkutf8(u_int8_t *cp, int len)
+{
+	u_int32_t val = 0x0;
+	int cnt = 0, gotd = 0, shift = 0;
+	u_int8_t byte;
+	static int utf8_shift[5] = { 7, 11, 16, 21, 26 };
+	int error = 0;
+
+	/*
+	 * Here are what the variables are used for:
+	 * val - the calculated value of a multibyte char, used to check
+	 *       that it was coded with the correct range
+	 * cnt - the number of 10xxxxxx bytes to follow
+	 * gotd - set for a char of Dxxx, so D800<->DFFF can be checked for
+	 * shift - lower order bits of range (ie. "val >> shift" should
+	 *       not be 0, in other words, dividing by the lower bound
+	 *       of the range should get a non-zero value)
+	 * byte - used to calculate cnt
+	 */
+	while (len > 0) {
+		if (cnt > 0) {
+			/* This handles the 10xxxxxx bytes */
+			if ((*cp & 0xc0) != 0x80 ||
+			    (gotd && (*cp & 0x20))) {
+				error = NFSERR_INVAL;
+				goto out;
+			}
+			gotd = 0;
+			val <<= 6;
+			val |= (*cp & 0x3f);
+			cnt--;
+			if (cnt == 0 && (val >> shift) == 0x0) {
+				error = NFSERR_INVAL;
+				goto out;
+			}
+		} else if (*cp & 0x80) {
+			/* first byte of multi byte char */
+			byte = *cp;
+			while ((byte & 0x40) && cnt < 6) {
+				cnt++;
+				byte <<= 1;
+			}
+			if (cnt == 0 || cnt == 6) {
+				error = NFSERR_INVAL;
+				goto out;
+			}
+			val = (*cp & (0x3f >> cnt));
+			shift = utf8_shift[cnt - 1];
+			if (cnt == 2 && val == 0xd)
+				/* Check for the 0xd800-0xdfff case */
+				gotd = 1;
+		}
+		cp++;
+		len--;
+	}
+	if (cnt > 0)
+		error = NFSERR_INVAL;
+
+out:
+	NFSEXITCODE(error);
+	return (error);
+}
+
+/*
+ * Parse the xdr for an NFSv4 FsLocations attribute. Return two malloc'd
+ * strings, one with the root path in it and the other with the list of
+ * locations. The list is in the same format as is found in nfr_refs.
+ * It is a "," separated list of entries, where each of them is of the
+ * form <server>:<rootpath>. For example
+ * "nfsv4-test:/sub2,nfsv4-test2:/user/mnt,nfsv4-test2:/user/mnt2"
+ * The nilp argument is set to 1 for the special case of a null fs_root
+ * and an empty server list.
+ * It returns NFSERR_BADXDR, if the xdr can't be parsed and returns the
+ * number of xdr bytes parsed in sump.
+ */
+static int
+nfsrv_getrefstr(struct nfsrv_descript *nd, u_char **fsrootp, u_char **srvp,
+    int *sump, int *nilp)
+{
+	u_int32_t *tl;
+	u_char *cp = NULL, *cp2 = NULL, *cp3, *str;
+	int i, j, len, stringlen, cnt, slen, siz, xdrsum, error = 0, nsrv;
+	struct list {
+		SLIST_ENTRY(list) next;
+		int len;
+		u_char host[1];
+	} *lsp, *nlsp;
+	SLIST_HEAD(, list) head;
+
+	*fsrootp = NULL;
+	*srvp = NULL;
+	*nilp = 0;
+
+	/*
+	 * Get the fs_root path and check for the special case of null path
+	 * and 0 length server list.
+	 */
+	NFSM_DISSECT(tl, u_int32_t *, NFSX_UNSIGNED);
+	len = fxdr_unsigned(int, *tl);
+	if (len < 0 || len > 10240) {
+		error = NFSERR_BADXDR;
+		goto nfsmout;
+	}
+	if (len == 0) {
+		NFSM_DISSECT(tl, u_int32_t *, NFSX_UNSIGNED);
+		if (*tl != 0) {
+			error = NFSERR_BADXDR;
+			goto nfsmout;
+		}
+		*nilp = 1;
+		*sump = 2 * NFSX_UNSIGNED;
+		error = 0;
+		goto nfsmout;
+	}
+	cp = malloc(len + 1, M_NFSSTRING, M_WAITOK);
+	error = nfsrv_mtostr(nd, cp, len);
+	if (!error) {
+		NFSM_DISSECT(tl, u_int32_t *, NFSX_UNSIGNED);
+		cnt = fxdr_unsigned(int, *tl);
+		if (cnt <= 0)
+			error = NFSERR_BADXDR;
+	}
+	if (error)
+		goto nfsmout;
+
+	/*
+	 * Now, loop through the location list and make up the srvlist.
+	 */
+	xdrsum = (2 * NFSX_UNSIGNED) + NFSM_RNDUP(len);
+	cp2 = cp3 = malloc(1024, M_NFSSTRING, M_WAITOK);
+	slen = 1024;
+	siz = 0;
+	for (i = 0; i < cnt; i++) {
+		SLIST_INIT(&head);
+		NFSM_DISSECT(tl, u_int32_t *, NFSX_UNSIGNED);
+		nsrv = fxdr_unsigned(int, *tl);
+		if (nsrv <= 0) {
+			error = NFSERR_BADXDR;
+			goto nfsmout;
+		}
+
+		/*
+		 * Handle the first server by putting it in the srvstr.
+		 */
+		NFSM_DISSECT(tl, u_int32_t *, NFSX_UNSIGNED);
+		len = fxdr_unsigned(int, *tl);
+		if (len <= 0 || len > 1024) {
+			error = NFSERR_BADXDR;
+			goto nfsmout;
+		}
+		nfsrv_refstrbigenough(siz + len + 3, &cp2, &cp3, &slen);
+		if (cp3 != cp2) {
+			*cp3++ = ',';
+			siz++;
+		}
+		error = nfsrv_mtostr(nd, cp3, len);
+		if (error)
+			goto nfsmout;
+		cp3 += len;
+		*cp3++ = ':';
+		siz += (len + 1);
+		xdrsum += (2 * NFSX_UNSIGNED) + NFSM_RNDUP(len);
+		for (j = 1; j < nsrv; j++) {
+			/*
+			 * Yuck, put them in an slist and process them later.
+			 */
+			NFSM_DISSECT(tl, u_int32_t *, NFSX_UNSIGNED);
+			len = fxdr_unsigned(int, *tl);
+			if (len <= 0 || len > 1024) {
+				error = NFSERR_BADXDR;
+				goto nfsmout;
+			}
+			lsp = (struct list *)malloc(sizeof (struct list)
+			    + len, M_TEMP, M_WAITOK);
+			error = nfsrv_mtostr(nd, lsp->host, len);
+			if (error)
+				goto nfsmout;
+			xdrsum += NFSX_UNSIGNED + NFSM_RNDUP(len);
+			lsp->len = len;
+			SLIST_INSERT_HEAD(&head, lsp, next);
+		}
+
+		/*
+		 * Finally, we can get the path.
+		 */
+		NFSM_DISSECT(tl, u_int32_t *, NFSX_UNSIGNED);
+		len = fxdr_unsigned(int, *tl);
+		if (len <= 0 || len > 1024) {
+			error = NFSERR_BADXDR;
+			goto nfsmout;
+		}
+		nfsrv_refstrbigenough(siz + len + 1, &cp2, &cp3, &slen);
+		error = nfsrv_mtostr(nd, cp3, len);
+		if (error)
+			goto nfsmout;
+		xdrsum += NFSX_UNSIGNED + NFSM_RNDUP(len);
+		str = cp3;
+		stringlen = len;
+		cp3 += len;
+		siz += len;
+		SLIST_FOREACH_SAFE(lsp, &head, next, nlsp) {
+			nfsrv_refstrbigenough(siz + lsp->len + stringlen + 3,
+			    &cp2, &cp3, &slen);
+			*cp3++ = ',';
+			NFSBCOPY(lsp->host, cp3, lsp->len);
+			cp3 += lsp->len;
+			*cp3++ = ':';
+			NFSBCOPY(str, cp3, stringlen);
+			cp3 += stringlen;
+			*cp3 = '\0';
+			siz += (lsp->len + stringlen + 2);
+			free(lsp, M_TEMP);
+		}
+	}
+	*fsrootp = cp;
+	*srvp = cp2;
+	*sump = xdrsum;
+	NFSEXITCODE2(0, nd);
+	return (0);
+nfsmout:
+	if (cp != NULL)
+		free(cp, M_NFSSTRING);
+	if (cp2 != NULL)
+		free(cp2, M_NFSSTRING);
+	NFSEXITCODE2(error, nd);
+	return (error);
+}
+
+/*
+ * Make the malloc'd space large enough. This is a pain, but the xdr
+ * doesn't set an upper bound on the side, so...
+ */
+static void
+nfsrv_refstrbigenough(int siz, u_char **cpp, u_char **cpp2, int *slenp)
+{
+	u_char *cp;
+	int i;
+
+	if (siz <= *slenp)
+		return;
+	cp = malloc(siz + 1024, M_NFSSTRING, M_WAITOK);
+	NFSBCOPY(*cpp, cp, *slenp);
+	free(*cpp, M_NFSSTRING);
+	i = *cpp2 - *cpp;
+	*cpp = cp;
+	*cpp2 = cp + i;
+	*slenp = siz + 1024;
+}
+
+/*
+ * Initialize the reply header data structures.
+ */
+APPLESTATIC void
+nfsrvd_rephead(struct nfsrv_descript *nd)
+{
+	mbuf_t mreq;
+
+	/*
+	 * If this is a big reply, use a cluster.
+	 */
+	if ((nd->nd_flag & ND_GSSINITREPLY) == 0 &&
+	    nfs_bigreply[nd->nd_procnum]) {
+		NFSMCLGET(mreq, M_WAITOK);
+		nd->nd_mreq = mreq;
+		nd->nd_mb = mreq;
+	} else {
+		NFSMGET(mreq);
+		nd->nd_mreq = mreq;
+		nd->nd_mb = mreq;
+	}
+	nd->nd_bpos = NFSMTOD(mreq, caddr_t);
+	mbuf_setlen(mreq, 0);
+
+	if ((nd->nd_flag & ND_GSSINITREPLY) == 0)
+		NFSM_BUILD(nd->nd_errp, int *, NFSX_UNSIGNED);
+}
+
+/*
+ * Lock a socket against others.
+ * Currently used to serialize connect/disconnect attempts.
+ */
+int
+newnfs_sndlock(int *flagp)
+{
+	struct timespec ts;
+
+	NFSLOCKSOCK();
+	while (*flagp & NFSR_SNDLOCK) {
+		*flagp |= NFSR_WANTSND;
+		ts.tv_sec = 0;
+		ts.tv_nsec = 0;
+		(void) nfsmsleep((caddr_t)flagp, NFSSOCKMUTEXPTR,
+		    PZERO - 1, "nfsndlck", &ts);
+	}
+	*flagp |= NFSR_SNDLOCK;
+	NFSUNLOCKSOCK();
+	return (0);
+}
+
+/*
+ * Unlock the stream socket for others.
+ */
+void
+newnfs_sndunlock(int *flagp)
+{
+
+	NFSLOCKSOCK();
+	if ((*flagp & NFSR_SNDLOCK) == 0)
+		panic("nfs sndunlock");
+	*flagp &= ~NFSR_SNDLOCK;
+	if (*flagp & NFSR_WANTSND) {
+		*flagp &= ~NFSR_WANTSND;
+		wakeup((caddr_t)flagp);
+	}
+	NFSUNLOCKSOCK();
+}
+
+APPLESTATIC int
+nfsv4_getipaddr(struct nfsrv_descript *nd, struct sockaddr_in *sin,
+    struct sockaddr_in6 *sin6, sa_family_t *saf, int *isudp)
+{
+	struct in_addr saddr;
+	uint32_t portnum, *tl;
+	int i, j, k;
+	sa_family_t af = AF_UNSPEC;
+	char addr[64], protocol[5], *cp;
+	int cantparse = 0, error = 0;
+	uint16_t portv;
+
+	NFSM_DISSECT(tl, u_int32_t *, NFSX_UNSIGNED);
+	i = fxdr_unsigned(int, *tl);
+	if (i >= 3 && i <= 4) {
+		error = nfsrv_mtostr(nd, protocol, i);
+		if (error)
+			goto nfsmout;
+		if (strcmp(protocol, "tcp") == 0) {
+			af = AF_INET;
+			*isudp = 0;
+		} else if (strcmp(protocol, "udp") == 0) {
+			af = AF_INET;
+			*isudp = 1;
+		} else if (strcmp(protocol, "tcp6") == 0) {
+			af = AF_INET6;
+			*isudp = 0;
+		} else if (strcmp(protocol, "udp6") == 0) {
+			af = AF_INET6;
+			*isudp = 1;
+		} else
+			cantparse = 1;
+	} else {
+		cantparse = 1;
+		if (i > 0) {
+			error = nfsm_advance(nd, NFSM_RNDUP(i), -1);
+			if (error)
+				goto nfsmout;
+		}
+	}
+	NFSM_DISSECT(tl, u_int32_t *, NFSX_UNSIGNED);
+	i = fxdr_unsigned(int, *tl);
+	if (i < 0) {
+		error = NFSERR_BADXDR;
+		goto nfsmout;
+	} else if (cantparse == 0 && i >= 11 && i < 64) {
+		/*
+		 * The shortest address is 11chars and the longest is < 64.
+		 */
+		error = nfsrv_mtostr(nd, addr, i);
+		if (error)
+			goto nfsmout;
+
+		/* Find the port# at the end and extract that. */
+		i = strlen(addr);
+		k = 0;
+		cp = &addr[i - 1];
+		/* Count back two '.'s from end to get port# field. */
+		for (j = 0; j < i; j++) {
+			if (*cp == '.') {
+				k++;
+				if (k == 2)
+					break;
+			}
+			cp--;
+		}
+		if (k == 2) {
+			/*
+			 * The NFSv4 port# is appended as .N.N, where N is
+			 * a decimal # in the range 0-255, just like an inet4
+			 * address. Cheat and use inet_aton(), which will
+			 * return a Class A address and then shift the high
+			 * order 8bits over to convert it to the port#.
+			 */
+			*cp++ = '\0';
+			if (inet_aton(cp, &saddr) == 1) {
+				portnum = ntohl(saddr.s_addr);
+				portv = (uint16_t)((portnum >> 16) |
+				    (portnum & 0xff));
+			} else
+				cantparse = 1;
+		} else
+			cantparse = 1;
+		if (cantparse == 0) {
+			if (af == AF_INET) {
+				if (inet_pton(af, addr, &sin->sin_addr) == 1) {
+					sin->sin_len = sizeof(*sin);
+					sin->sin_family = AF_INET;
+					sin->sin_port = htons(portv);
+					*saf = af;
+					return (0);
+				}
+			} else {
+				if (inet_pton(af, addr, &sin6->sin6_addr)
+				    == 1) {
+					sin6->sin6_len = sizeof(*sin6);
+					sin6->sin6_family = AF_INET6;
+					sin6->sin6_port = htons(portv);
+					*saf = af;
+					return (0);
+				}
+			}
+		}
+	} else {
+		if (i > 0) {
+			error = nfsm_advance(nd, NFSM_RNDUP(i), -1);
+			if (error)
+				goto nfsmout;
+		}
+	}
+	error = EPERM;
+nfsmout:
+	return (error);
+}
+
+/*
+ * Handle an NFSv4.1 Sequence request for the session.
+ * If reply != NULL, use it to return the cached reply, as required.
+ * The client gets a cached reply via this call for callbacks, however the
+ * server gets a cached reply via the nfsv4_seqsess_cachereply() call.
+ */
+int
+nfsv4_seqsession(uint32_t seqid, uint32_t slotid, uint32_t highslot,
+    struct nfsslot *slots, struct mbuf **reply, uint16_t maxslot)
+{
+	int error;
+
+	error = 0;
+	if (reply != NULL)
+		*reply = NULL;
+	if (slotid > maxslot)
+		return (NFSERR_BADSLOT);
+	if (seqid == slots[slotid].nfssl_seq) {
+		/* A retry. */
+		if (slots[slotid].nfssl_inprog != 0)
+			error = NFSERR_DELAY;
+		else if (slots[slotid].nfssl_reply != NULL) {
+			if (reply != NULL) {
+				*reply = slots[slotid].nfssl_reply;
+				slots[slotid].nfssl_reply = NULL;
+			}
+			slots[slotid].nfssl_inprog = 1;
+			error = NFSERR_REPLYFROMCACHE;
+		} else
+			/* No reply cached, so just do it. */
+			slots[slotid].nfssl_inprog = 1;
+	} else if ((slots[slotid].nfssl_seq + 1) == seqid) {
+		if (slots[slotid].nfssl_reply != NULL)
+			m_freem(slots[slotid].nfssl_reply);
+		slots[slotid].nfssl_reply = NULL;
+		slots[slotid].nfssl_inprog = 1;
+		slots[slotid].nfssl_seq++;
+	} else
+		error = NFSERR_SEQMISORDERED;
+	return (error);
+}
+
+/*
+ * Cache this reply for the slot.
+ * Use the "rep" argument to return the cached reply if repstat is set to
+ * NFSERR_REPLYFROMCACHE. The client never sets repstat to this value.
+ */
+void
+nfsv4_seqsess_cacherep(uint32_t slotid, struct nfsslot *slots, int repstat,
+   struct mbuf **rep)
+{
+
+	if (repstat == NFSERR_REPLYFROMCACHE) {
+		*rep = slots[slotid].nfssl_reply;
+		slots[slotid].nfssl_reply = NULL;
+	} else {
+		if (slots[slotid].nfssl_reply != NULL)
+			m_freem(slots[slotid].nfssl_reply);
+		slots[slotid].nfssl_reply = *rep;
+	}
+	slots[slotid].nfssl_inprog = 0;
+}
+
+/*
+ * Generate the xdr for an NFSv4.1 Sequence Operation.
+ */
+APPLESTATIC void
+nfsv4_setsequence(struct nfsmount *nmp, struct nfsrv_descript *nd,
+    struct nfsclsession *sep, int dont_replycache)
+{
+	uint32_t *tl, slotseq = 0;
+	int error, maxslot, slotpos;
+	uint8_t sessionid[NFSX_V4SESSIONID];
+
+	error = nfsv4_sequencelookup(nmp, sep, &slotpos, &maxslot, &slotseq,
+	    sessionid);
+
+	/* Build the Sequence arguments. */
+	NFSM_BUILD(tl, uint32_t *, NFSX_V4SESSIONID + 4 * NFSX_UNSIGNED);
+	nd->nd_sequence = tl;
+	bcopy(sessionid, tl, NFSX_V4SESSIONID);
+	tl += NFSX_V4SESSIONID / NFSX_UNSIGNED;
+	nd->nd_slotseq = tl;
+	if (error == 0) {
+		nd->nd_flag |= ND_HASSLOTID;
+		nd->nd_slotid = slotpos;
+		*tl++ = txdr_unsigned(slotseq);
+		*tl++ = txdr_unsigned(slotpos);
+		*tl++ = txdr_unsigned(maxslot);
+		if (dont_replycache == 0)
+			*tl = newnfs_true;
+		else
+			*tl = newnfs_false;
+	} else {
+		/*
+		 * There are two errors and the rest of the session can
+		 * just be zeros.
+		 * NFSERR_BADSESSION: This bad session should just generate
+		 *    the same error again when the RPC is retried.
+		 * ESTALE: A forced dismount is in progress and will cause the
+		 *    RPC to fail later.
+		 */
+		*tl++ = 0;
+		*tl++ = 0;
+		*tl++ = 0;
+		*tl = 0;
+	}
+	nd->nd_flag |= ND_HASSEQUENCE;
+}
+
+int
+nfsv4_sequencelookup(struct nfsmount *nmp, struct nfsclsession *sep,
+    int *slotposp, int *maxslotp, uint32_t *slotseqp, uint8_t *sessionid)
+{
+	int i, maxslot, slotpos;
+	uint64_t bitval;
+
+	/* Find an unused slot. */
+	slotpos = -1;
+	maxslot = -1;
+	mtx_lock(&sep->nfsess_mtx);
+	do {
+		if (nmp != NULL && sep->nfsess_defunct != 0) {
+			/* Just return the bad session. */
+			bcopy(sep->nfsess_sessionid, sessionid,
+			    NFSX_V4SESSIONID);
+			mtx_unlock(&sep->nfsess_mtx);
+			return (NFSERR_BADSESSION);
+		}
+		bitval = 1;
+		for (i = 0; i < sep->nfsess_foreslots; i++) {
+			if ((bitval & sep->nfsess_slots) == 0) {
+				slotpos = i;
+				sep->nfsess_slots |= bitval;
+				sep->nfsess_slotseq[i]++;
+				*slotseqp = sep->nfsess_slotseq[i];
+				break;
+			}
+			bitval <<= 1;
+		}
+		if (slotpos == -1) {
+			/*
+			 * If a forced dismount is in progress, just return.
+			 * This RPC attempt will fail when it calls
+			 * newnfs_request().
+			 */
+			if (nmp != NULL && NFSCL_FORCEDISM(nmp->nm_mountp)) {
+				mtx_unlock(&sep->nfsess_mtx);
+				return (ESTALE);
+			}
+			/* Wake up once/sec, to check for a forced dismount. */
+			(void)mtx_sleep(&sep->nfsess_slots, &sep->nfsess_mtx,
+			    PZERO, "nfsclseq", hz);
+		}
+	} while (slotpos == -1);
+	/* Now, find the highest slot in use. (nfsc_slots is 64bits) */
+	bitval = 1;
+	for (i = 0; i < 64; i++) {
+		if ((bitval & sep->nfsess_slots) != 0)
+			maxslot = i;
+		bitval <<= 1;
+	}
+	bcopy(sep->nfsess_sessionid, sessionid, NFSX_V4SESSIONID);
+	mtx_unlock(&sep->nfsess_mtx);
+	*slotposp = slotpos;
+	*maxslotp = maxslot;
+	return (0);
+}
+
+/*
+ * Free a session slot.
+ */
+APPLESTATIC void
+nfsv4_freeslot(struct nfsclsession *sep, int slot)
+{
+	uint64_t bitval;
+
+	bitval = 1;
+	if (slot > 0)
+		bitval <<= slot;
+	mtx_lock(&sep->nfsess_mtx);
+	if ((bitval & sep->nfsess_slots) == 0)
+		printf("freeing free slot!!\n");
+	sep->nfsess_slots &= ~bitval;
+	wakeup(&sep->nfsess_slots);
+	mtx_unlock(&sep->nfsess_mtx);
+}
+
+/*
+ * Search for a matching pnfsd DS, based on the nmp arg.
+ * Return one if found, NULL otherwise.
+ */
+struct nfsdevice *
+nfsv4_findmirror(struct nfsmount *nmp)
+{
+	struct nfsdevice *ds;
+
+	mtx_assert(NFSDDSMUTEXPTR, MA_OWNED);
+	/*
+	 * Search the DS server list for a match with nmp.
+	 */
+	if (nfsrv_devidcnt == 0)
+		return (NULL);
+	TAILQ_FOREACH(ds, &nfsrv_devidhead, nfsdev_list) {
+		if (ds->nfsdev_nmp == nmp) {
+			NFSCL_DEBUG(4, "nfsv4_findmirror: fnd main ds\n");
+			break;
+		}
+	}
+	return (ds);
+}
+
diff --git a/freebsd/sys/fs/nfs/nfs_var.h b/freebsd/sys/fs/nfs/nfs_var.h
new file mode 100644
index 0000000..9162286
--- /dev/null
+++ b/freebsd/sys/fs/nfs/nfs_var.h
@@ -0,0 +1,742 @@
+/*-
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 1989, 1993
+ *	The Regents of the University of California.  All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Rick Macklem at The University of Guelph.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+/*
+ * XXX needs <nfs/rpcv2.h> and <nfs/nfs.h> because of typedefs
+ */
+
+struct uio;
+struct ucred;
+struct nfscred;
+NFSPROC_T;
+struct buf;
+struct sockaddr_in;
+struct nfs_dlmount;
+struct file;
+struct nfsmount;
+struct socket;
+struct nfsreq;
+struct nfssockreq;
+struct vattr;
+struct nameidata;
+struct nfsnode;
+struct nfsfh;
+struct sillyrename;
+struct componentname;
+struct nfsd_srvargs;
+struct nfsrv_descript;
+struct nfs_fattr;
+union nethostaddr;
+struct nfsstate;
+struct nfslock;
+struct nfsclient;
+struct nfslayout;
+struct nfsdsession;
+struct nfslockconflict;
+struct nfsd_idargs;
+struct nfsd_clid;
+struct nfsusrgrp;
+struct nfsclowner;
+struct nfsclopen;
+struct nfsclopenhead;
+struct nfsclclient;
+struct nfsclsession;
+struct nfscllockowner;
+struct nfscllock;
+struct nfscldeleg;
+struct nfscllayout;
+struct nfscldevinfo;
+struct nfsv4lock;
+struct nfsvattr;
+struct nfs_vattr;
+struct NFSSVCARGS;
+struct nfsdevice;
+struct pnfsdsfile;
+struct pnfsdsattr;
+#ifdef __FreeBSD__
+NFS_ACCESS_ARGS;
+NFS_OPEN_ARGS;
+NFS_GETATTR_ARGS;
+NFS_LOOKUP_ARGS;
+NFS_READDIR_ARGS;
+#endif
+
+/* nfs_nfsdstate.c */
+int nfsrv_setclient(struct nfsrv_descript *, struct nfsclient **,
+    nfsquad_t *, nfsquad_t *, NFSPROC_T *);
+int nfsrv_getclient(nfsquad_t, int, struct nfsclient **, struct nfsdsession *,
+    nfsquad_t, uint32_t, struct nfsrv_descript *, NFSPROC_T *);
+int nfsrv_destroyclient(nfsquad_t, NFSPROC_T *);
+int nfsrv_destroysession(struct nfsrv_descript *, uint8_t *);
+int nfsrv_bindconnsess(struct nfsrv_descript *, uint8_t *, int *);
+int nfsrv_freestateid(struct nfsrv_descript *, nfsv4stateid_t *, NFSPROC_T *);
+int nfsrv_teststateid(struct nfsrv_descript *, nfsv4stateid_t *, NFSPROC_T *);
+int nfsrv_adminrevoke(struct nfsd_clid *, NFSPROC_T *);
+void nfsrv_dumpclients(struct nfsd_dumpclients *, int);
+void nfsrv_dumplocks(vnode_t, struct nfsd_dumplocks *, int, NFSPROC_T *);
+int nfsrv_lockctrl(vnode_t, struct nfsstate **,
+    struct nfslock **, struct nfslockconflict *, nfsquad_t, nfsv4stateid_t *,
+    struct nfsexstuff *, struct nfsrv_descript *, NFSPROC_T *);
+int nfsrv_openctrl(struct nfsrv_descript *, vnode_t,
+    struct nfsstate **, nfsquad_t, nfsv4stateid_t *, nfsv4stateid_t *, 
+    u_int32_t *, struct nfsexstuff *, NFSPROC_T *, u_quad_t);
+int nfsrv_opencheck(nfsquad_t, nfsv4stateid_t *, struct nfsstate *,
+    vnode_t, struct nfsrv_descript *, NFSPROC_T *, int);
+int nfsrv_openupdate(vnode_t, struct nfsstate *, nfsquad_t,
+    nfsv4stateid_t *, struct nfsrv_descript *, NFSPROC_T *, int *);
+int nfsrv_delegupdate(struct nfsrv_descript *, nfsquad_t, nfsv4stateid_t *,
+    vnode_t, int, struct ucred *, NFSPROC_T *, int *);
+int nfsrv_releaselckown(struct nfsstate *, nfsquad_t, NFSPROC_T *);
+void nfsrv_zapclient(struct nfsclient *, NFSPROC_T *);
+int nfssvc_idname(struct nfsd_idargs *);
+void nfsrv_servertimer(void);
+int nfsrv_getclientipaddr(struct nfsrv_descript *, struct nfsclient *);
+void nfsrv_setupstable(NFSPROC_T *);
+void nfsrv_updatestable(NFSPROC_T *);
+void nfsrv_writestable(u_char *, int, int, NFSPROC_T *);
+void nfsrv_throwawayopens(NFSPROC_T *);
+int nfsrv_checkremove(vnode_t, int, NFSPROC_T *);
+void nfsd_recalldelegation(vnode_t, NFSPROC_T *);
+void nfsd_disabledelegation(vnode_t, NFSPROC_T *);
+int nfsrv_checksetattr(vnode_t, struct nfsrv_descript *,
+    nfsv4stateid_t *, struct nfsvattr *, nfsattrbit_t *, struct nfsexstuff *,
+    NFSPROC_T *);
+int nfsrv_checkgetattr(struct nfsrv_descript *, vnode_t,
+    struct nfsvattr *, nfsattrbit_t *, NFSPROC_T *);
+int nfsrv_nfsuserdport(struct nfsuserd_args *, NFSPROC_T *);
+void nfsrv_nfsuserddelport(void);
+void nfsrv_throwawayallstate(NFSPROC_T *);
+int nfsrv_checksequence(struct nfsrv_descript *, uint32_t, uint32_t *,
+    uint32_t *, int, uint32_t *, NFSPROC_T *);
+int nfsrv_checkreclaimcomplete(struct nfsrv_descript *, int);
+void nfsrv_cache_session(uint8_t *, uint32_t, int, struct mbuf **);
+void nfsrv_freeallbackchannel_xprts(void);
+int nfsrv_layoutcommit(struct nfsrv_descript *, vnode_t, int, int, uint64_t,
+    uint64_t, uint64_t, int, struct timespec *, int, nfsv4stateid_t *,
+    int, char *, int *, uint64_t *, struct ucred *, NFSPROC_T *);
+int nfsrv_layoutget(struct nfsrv_descript *, vnode_t, struct nfsexstuff *,
+    int, int *, uint64_t *, uint64_t *, uint64_t, nfsv4stateid_t *, int, int *,
+    int *, char *, struct ucred *, NFSPROC_T *);
+void nfsrv_flexmirrordel(char *, NFSPROC_T *);
+void nfsrv_recalloldlayout(NFSPROC_T *);
+int nfsrv_layoutreturn(struct nfsrv_descript *, vnode_t, int, int, uint64_t,
+    uint64_t, int, int, nfsv4stateid_t *, int, uint32_t *, int *,
+    struct ucred *, NFSPROC_T *);
+int nfsrv_getdevinfo(char *, int, uint32_t *, uint32_t *, int *, char **);
+void nfsrv_freeonedevid(struct nfsdevice *);
+void nfsrv_freealllayoutsanddevids(void);
+void nfsrv_freefilelayouts(fhandle_t *);
+int nfsrv_deldsserver(int, char *, NFSPROC_T *);
+struct nfsdevice *nfsrv_deldsnmp(int, struct nfsmount *, NFSPROC_T *);
+int nfsrv_createdevids(struct nfsd_nfsd_args *, NFSPROC_T *);
+int nfsrv_checkdsattr(struct nfsrv_descript *, vnode_t, NFSPROC_T *);
+int nfsrv_copymr(vnode_t, vnode_t, vnode_t, struct nfsdevice *,
+    struct pnfsdsfile *, struct pnfsdsfile *, int, struct ucred *, NFSPROC_T *);
+int nfsrv_mdscopymr(char *, char *, char *, char *, int *, char *, NFSPROC_T *,
+    struct vnode **, struct vnode **, struct pnfsdsfile **, struct nfsdevice **,
+    struct nfsdevice **);
+
+/* nfs_nfsdserv.c */
+int nfsrvd_access(struct nfsrv_descript *, int,
+    vnode_t, NFSPROC_T *, struct nfsexstuff *);
+int nfsrvd_getattr(struct nfsrv_descript *, int,
+    vnode_t, NFSPROC_T *, struct nfsexstuff *);
+int nfsrvd_setattr(struct nfsrv_descript *, int,
+    vnode_t, NFSPROC_T *, struct nfsexstuff *);
+int nfsrvd_lookup(struct nfsrv_descript *, int,
+    vnode_t, vnode_t *, fhandle_t *, NFSPROC_T *,
+    struct nfsexstuff *);
+int nfsrvd_readlink(struct nfsrv_descript *, int,
+    vnode_t, NFSPROC_T *, struct nfsexstuff *);
+int nfsrvd_read(struct nfsrv_descript *, int,
+    vnode_t, NFSPROC_T *, struct nfsexstuff *);
+int nfsrvd_write(struct nfsrv_descript *, int,
+    vnode_t, NFSPROC_T *, struct nfsexstuff *);
+int nfsrvd_create(struct nfsrv_descript *, int,
+    vnode_t, NFSPROC_T *, struct nfsexstuff *);
+int nfsrvd_mknod(struct nfsrv_descript *, int,
+    vnode_t, vnode_t *, fhandle_t *, NFSPROC_T *,
+    struct nfsexstuff *);
+int nfsrvd_remove(struct nfsrv_descript *, int,
+    vnode_t, NFSPROC_T *, struct nfsexstuff *);
+int nfsrvd_rename(struct nfsrv_descript *, int,
+    vnode_t, vnode_t, NFSPROC_T *, struct nfsexstuff *,
+    struct nfsexstuff *);
+int nfsrvd_link(struct nfsrv_descript *, int,
+    vnode_t, vnode_t, NFSPROC_T *, struct nfsexstuff *,
+    struct nfsexstuff *);
+int nfsrvd_symlink(struct nfsrv_descript *, int,
+    vnode_t, vnode_t *, fhandle_t *, NFSPROC_T *,
+    struct nfsexstuff *);
+int nfsrvd_mkdir(struct nfsrv_descript *, int,
+    vnode_t, vnode_t *, fhandle_t *, NFSPROC_T *,
+    struct nfsexstuff *);
+int nfsrvd_readdir(struct nfsrv_descript *, int,
+    vnode_t, NFSPROC_T *, struct nfsexstuff *);
+int nfsrvd_readdirplus(struct nfsrv_descript *, int,
+    vnode_t, NFSPROC_T *, struct nfsexstuff *);
+int nfsrvd_commit(struct nfsrv_descript *, int,
+    vnode_t, NFSPROC_T *, struct nfsexstuff *);
+int nfsrvd_statfs(struct nfsrv_descript *, int,
+    vnode_t, NFSPROC_T *, struct nfsexstuff *);
+int nfsrvd_fsinfo(struct nfsrv_descript *, int,
+    vnode_t, NFSPROC_T *, struct nfsexstuff *);
+int nfsrvd_close(struct nfsrv_descript *, int,
+    vnode_t, NFSPROC_T *, struct nfsexstuff *);
+int nfsrvd_delegpurge(struct nfsrv_descript *, int,
+    vnode_t, NFSPROC_T *, struct nfsexstuff *);
+int nfsrvd_delegreturn(struct nfsrv_descript *, int,
+    vnode_t, NFSPROC_T *, struct nfsexstuff *);
+int nfsrvd_getfh(struct nfsrv_descript *, int,
+    vnode_t, NFSPROC_T *, struct nfsexstuff *);
+int nfsrvd_lock(struct nfsrv_descript *, int,
+    vnode_t, NFSPROC_T *, struct nfsexstuff *);
+int nfsrvd_lockt(struct nfsrv_descript *, int,
+    vnode_t, NFSPROC_T *, struct nfsexstuff *);
+int nfsrvd_locku(struct nfsrv_descript *, int,
+    vnode_t, NFSPROC_T *, struct nfsexstuff *);
+int nfsrvd_openconfirm(struct nfsrv_descript *, int,
+    vnode_t, NFSPROC_T *, struct nfsexstuff *);
+int nfsrvd_opendowngrade(struct nfsrv_descript *, int,
+    vnode_t, NFSPROC_T *, struct nfsexstuff *);
+int nfsrvd_renew(struct nfsrv_descript *, int,
+    vnode_t, NFSPROC_T *, struct nfsexstuff *);
+int nfsrvd_secinfo(struct nfsrv_descript *, int,
+    vnode_t, NFSPROC_T *, struct nfsexstuff *);
+int nfsrvd_setclientid(struct nfsrv_descript *, int,
+    vnode_t, NFSPROC_T *, struct nfsexstuff *);
+int nfsrvd_setclientidcfrm(struct nfsrv_descript *, int,
+    vnode_t, NFSPROC_T *, struct nfsexstuff *);
+int nfsrvd_verify(struct nfsrv_descript *, int,
+    vnode_t, NFSPROC_T *, struct nfsexstuff *);
+int nfsrvd_open(struct nfsrv_descript *, int,
+    vnode_t, vnode_t *, fhandle_t *, NFSPROC_T *,
+    struct nfsexstuff *);
+int nfsrvd_openattr(struct nfsrv_descript *, int,
+    vnode_t, vnode_t *, fhandle_t *, NFSPROC_T *,
+    struct nfsexstuff *);
+int nfsrvd_releaselckown(struct nfsrv_descript *, int,
+    vnode_t, NFSPROC_T *, struct nfsexstuff *);
+int nfsrvd_pathconf(struct nfsrv_descript *, int,
+    vnode_t, NFSPROC_T *, struct nfsexstuff *);
+int nfsrvd_exchangeid(struct nfsrv_descript *, int,
+    vnode_t, NFSPROC_T *, struct nfsexstuff *);
+int nfsrvd_createsession(struct nfsrv_descript *, int,
+    vnode_t, NFSPROC_T *, struct nfsexstuff *);
+int nfsrvd_sequence(struct nfsrv_descript *, int,
+    vnode_t, NFSPROC_T *, struct nfsexstuff *);
+int nfsrvd_reclaimcomplete(struct nfsrv_descript *, int,
+    vnode_t, NFSPROC_T *, struct nfsexstuff *);
+int nfsrvd_destroyclientid(struct nfsrv_descript *, int,
+    vnode_t, NFSPROC_T *, struct nfsexstuff *);
+int nfsrvd_bindconnsess(struct nfsrv_descript *, int,
+    vnode_t, NFSPROC_T *, struct nfsexstuff *);
+int nfsrvd_destroysession(struct nfsrv_descript *, int,
+    vnode_t, NFSPROC_T *, struct nfsexstuff *);
+int nfsrvd_freestateid(struct nfsrv_descript *, int,
+    vnode_t, NFSPROC_T *, struct nfsexstuff *);
+int nfsrvd_layoutget(struct nfsrv_descript *, int,
+    vnode_t, NFSPROC_T *, struct nfsexstuff *);
+int nfsrvd_getdevinfo(struct nfsrv_descript *, int,
+    vnode_t, NFSPROC_T *, struct nfsexstuff *);
+int nfsrvd_layoutcommit(struct nfsrv_descript *, int,
+    vnode_t, NFSPROC_T *, struct nfsexstuff *);
+int nfsrvd_layoutreturn(struct nfsrv_descript *, int,
+    vnode_t, NFSPROC_T *, struct nfsexstuff *);
+int nfsrvd_teststateid(struct nfsrv_descript *, int,
+    vnode_t, NFSPROC_T *, struct nfsexstuff *);
+int nfsrvd_notsupp(struct nfsrv_descript *, int,
+    vnode_t, NFSPROC_T *, struct nfsexstuff *);
+
+/* nfs_nfsdsocket.c */
+void nfsrvd_rephead(struct nfsrv_descript *);
+void nfsrvd_dorpc(struct nfsrv_descript *, int, u_char *, int, u_int32_t,
+    NFSPROC_T *);
+
+/* nfs_nfsdcache.c */
+void nfsrvd_initcache(void);
+int nfsrvd_getcache(struct nfsrv_descript *);
+struct nfsrvcache *nfsrvd_updatecache(struct nfsrv_descript *);
+void nfsrvd_sentcache(struct nfsrvcache *, int, uint32_t);
+void nfsrvd_cleancache(void);
+void nfsrvd_refcache(struct nfsrvcache *);
+void nfsrvd_derefcache(struct nfsrvcache *);
+void nfsrvd_delcache(struct nfsrvcache *);
+void nfsrc_trimcache(uint64_t, uint32_t, int);
+
+/* nfs_commonsubs.c */
+void nfscl_reqstart(struct nfsrv_descript *, int, struct nfsmount *,
+    u_int8_t *, int, u_int32_t **, struct nfsclsession *, int, int);
+void nfsm_stateidtom(struct nfsrv_descript *, nfsv4stateid_t *, int);
+void nfscl_fillsattr(struct nfsrv_descript *, struct vattr *,
+      vnode_t, int, u_int32_t);
+void newnfs_init(void);
+int nfsaddr_match(int, union nethostaddr *, NFSSOCKADDR_T);
+int nfsaddr2_match(NFSSOCKADDR_T, NFSSOCKADDR_T);
+int nfsm_strtom(struct nfsrv_descript *, const char *, int);
+int nfsm_mbufuio(struct nfsrv_descript *, struct uio *, int);
+int nfsm_fhtom(struct nfsrv_descript *, u_int8_t *, int, int);
+int nfsm_advance(struct nfsrv_descript *, int, int);
+void *nfsm_dissct(struct nfsrv_descript *, int, int);
+void newnfs_trimleading(struct nfsrv_descript *);
+void newnfs_trimtrailing(struct nfsrv_descript *, mbuf_t,
+    caddr_t);
+void newnfs_copycred(struct nfscred *, struct ucred *);
+void newnfs_copyincred(struct ucred *, struct nfscred *);
+int nfsrv_dissectacl(struct nfsrv_descript *, NFSACL_T *, int *,
+    int *, NFSPROC_T *);
+int nfsrv_getattrbits(struct nfsrv_descript *, nfsattrbit_t *, int *,
+    int *);
+int nfsv4_loadattr(struct nfsrv_descript *, vnode_t,
+    struct nfsvattr *, struct nfsfh **, fhandle_t *, int,
+    struct nfsv3_pathconf *, struct statfs *, struct nfsstatfs *,
+    struct nfsfsinfo *, NFSACL_T *,
+    int, int *, u_int32_t *, u_int32_t *, NFSPROC_T *, struct ucred *);
+int nfsv4_lock(struct nfsv4lock *, int, int *, void *, struct mount *);
+void nfsv4_unlock(struct nfsv4lock *, int);
+void nfsv4_relref(struct nfsv4lock *);
+void nfsv4_getref(struct nfsv4lock *, int *, void *, struct mount *);
+int nfsv4_getref_nonblock(struct nfsv4lock *);
+int nfsv4_testlock(struct nfsv4lock *);
+int nfsrv_mtostr(struct nfsrv_descript *, char *, int);
+void nfsrv_cleanusergroup(void);
+int nfsrv_checkutf8(u_int8_t *, int);
+int newnfs_sndlock(int *);
+void newnfs_sndunlock(int *);
+int nfsv4_getipaddr(struct nfsrv_descript *, struct sockaddr_in *,
+    struct sockaddr_in6 *, sa_family_t *, int *);
+int nfsv4_seqsession(uint32_t, uint32_t, uint32_t, struct nfsslot *,
+    struct mbuf **, uint16_t);
+void nfsv4_seqsess_cacherep(uint32_t, struct nfsslot *, int, struct mbuf **);
+void nfsv4_setsequence(struct nfsmount *, struct nfsrv_descript *,
+    struct nfsclsession *, int);
+int nfsv4_sequencelookup(struct nfsmount *, struct nfsclsession *, int *,
+    int *, uint32_t *, uint8_t *);
+void nfsv4_freeslot(struct nfsclsession *, int);
+struct ucred *nfsrv_getgrpscred(struct ucred *);
+struct nfsdevice *nfsv4_findmirror(struct nfsmount *);
+
+/* nfs_clcomsubs.c */
+void nfsm_uiombuf(struct nfsrv_descript *, struct uio *, int);
+struct mbuf *nfsm_uiombuflist(struct uio *, int, struct mbuf **, char **);
+nfsuint64 *nfscl_getcookie(struct nfsnode *, off_t off, int);
+u_int8_t *nfscl_getmyip(struct nfsmount *, struct in6_addr *, int *);
+int nfsm_getfh(struct nfsrv_descript *, struct nfsfh **);
+int nfscl_mtofh(struct nfsrv_descript *, struct nfsfh **,
+        struct nfsvattr *, int *);
+int nfscl_postop_attr(struct nfsrv_descript *, struct nfsvattr *, int *,
+    void *);
+int nfscl_wcc_data(struct nfsrv_descript *, vnode_t,
+    struct nfsvattr *, int *, int *, void *);
+int nfsm_loadattr(struct nfsrv_descript *, struct nfsvattr *);
+int nfscl_request(struct nfsrv_descript *, vnode_t,
+         NFSPROC_T *, struct ucred *, void *);
+
+/* nfs_nfsdsubs.c */
+void nfsd_fhtovp(struct nfsrv_descript *, struct nfsrvfh *, int,
+    vnode_t *, struct nfsexstuff *,
+    mount_t *, int, NFSPROC_T *);
+int nfsd_excred(struct nfsrv_descript *, struct nfsexstuff *, struct ucred *);
+int nfsrv_mtofh(struct nfsrv_descript *, struct nfsrvfh *);
+int nfsrv_putattrbit(struct nfsrv_descript *, nfsattrbit_t *);
+void nfsrv_wcc(struct nfsrv_descript *, int, struct nfsvattr *, int,
+    struct nfsvattr *);
+int nfsv4_fillattr(struct nfsrv_descript *, struct mount *, vnode_t, NFSACL_T *,
+    struct vattr *, fhandle_t *, int, nfsattrbit_t *,
+    struct ucred *, NFSPROC_T *, int, int, int, int, uint64_t, struct statfs *);
+void nfsrv_fillattr(struct nfsrv_descript *, struct nfsvattr *);
+void nfsrv_adj(mbuf_t, int, int);
+void nfsrv_postopattr(struct nfsrv_descript *, int, struct nfsvattr *);
+int nfsd_errmap(struct nfsrv_descript *);
+void nfsv4_uidtostr(uid_t, u_char **, int *, NFSPROC_T *);
+int nfsv4_strtouid(struct nfsrv_descript *, u_char *, int, uid_t *,
+    NFSPROC_T *);
+void nfsv4_gidtostr(gid_t, u_char **, int *, NFSPROC_T *);
+int nfsv4_strtogid(struct nfsrv_descript *, u_char *, int, gid_t *,
+    NFSPROC_T *);
+int nfsrv_checkuidgid(struct nfsrv_descript *, struct nfsvattr *);
+void nfsrv_fixattr(struct nfsrv_descript *, vnode_t,
+    struct nfsvattr *, NFSACL_T *, NFSPROC_T *, nfsattrbit_t *,
+    struct nfsexstuff *);
+int nfsrv_errmoved(int);
+int nfsrv_putreferralattr(struct nfsrv_descript *, nfsattrbit_t *,
+    struct nfsreferral *, int, int *);
+int nfsrv_parsename(struct nfsrv_descript *, char *, u_long *,
+    NFSPATHLEN_T *);
+void nfsd_init(void);
+int nfsd_checkrootexp(struct nfsrv_descript *);
+void nfsd_getminorvers(struct nfsrv_descript *, u_char *, u_char **, int *,
+    u_int32_t *);
+
+/* nfs_clvfsops.c */
+void nfscl_retopts(struct nfsmount *, char *, size_t);
+
+/* nfs_commonport.c */
+int nfsrv_lookupfilename(struct nameidata *, char *, NFSPROC_T *);
+void nfsrv_object_create(vnode_t, NFSPROC_T *);
+int nfsrv_mallocmget_limit(void);
+int nfsvno_v4rootexport(struct nfsrv_descript *);
+void newnfs_portinit(void);
+struct ucred *newnfs_getcred(void);
+void newnfs_setroot(struct ucred *);
+int nfs_catnap(int, int, const char *);
+struct nfsreferral *nfsv4root_getreferral(vnode_t, vnode_t, u_int32_t);
+int nfsvno_pathconf(vnode_t, int, long *, struct ucred *, NFSPROC_T *);
+int nfsrv_atroot(vnode_t, uint64_t *);
+void newnfs_timer(void *);
+int nfs_supportsnfsv4acls(vnode_t);
+
+/* nfs_commonacl.c */
+int nfsrv_dissectace(struct nfsrv_descript *, struct acl_entry *,
+    int *, int *, NFSPROC_T *);
+int nfsrv_buildacl(struct nfsrv_descript *, NFSACL_T *, enum vtype,
+    NFSPROC_T *);
+int nfsrv_compareacl(NFSACL_T *, NFSACL_T *);
+
+/* nfs_clrpcops.c */
+int nfsrpc_null(vnode_t, struct ucred *, NFSPROC_T *);
+int nfsrpc_access(vnode_t, int, struct ucred *, NFSPROC_T *,
+    struct nfsvattr *, int *);
+int nfsrpc_accessrpc(vnode_t, u_int32_t, struct ucred *,
+    NFSPROC_T *, struct nfsvattr *, int *, u_int32_t *, void *);
+int nfsrpc_open(vnode_t, int, struct ucred *, NFSPROC_T *);
+int nfsrpc_openrpc(struct nfsmount *, vnode_t, u_int8_t *, int, u_int8_t *, int,
+    u_int32_t, struct nfsclopen *, u_int8_t *, int, struct nfscldeleg **, int,
+    u_int32_t, struct ucred *, NFSPROC_T *, int, int);
+int nfsrpc_opendowngrade(vnode_t, u_int32_t, struct nfsclopen *,
+    struct ucred *, NFSPROC_T *);
+int nfsrpc_close(vnode_t, int, NFSPROC_T *);
+int nfsrpc_closerpc(struct nfsrv_descript *, struct nfsmount *,
+    struct nfsclopen *, struct ucred *, NFSPROC_T *, int);
+int nfsrpc_openconfirm(vnode_t, u_int8_t *, int, struct nfsclopen *,
+    struct ucred *, NFSPROC_T *);
+int nfsrpc_setclient(struct nfsmount *, struct nfsclclient *, int,
+    struct ucred *, NFSPROC_T *);
+int nfsrpc_getattr(vnode_t, struct ucred *, NFSPROC_T *,
+    struct nfsvattr *, void *);
+int nfsrpc_getattrnovp(struct nfsmount *, u_int8_t *, int, int,
+    struct ucred *, NFSPROC_T *, struct nfsvattr *, u_int64_t *, uint32_t *);
+int nfsrpc_setattr(vnode_t, struct vattr *, NFSACL_T *, struct ucred *,
+    NFSPROC_T *, struct nfsvattr *, int *, void *);
+int nfsrpc_lookup(vnode_t, char *, int, struct ucred *, NFSPROC_T *,
+    struct nfsvattr *, struct nfsvattr *, struct nfsfh **, int *, int *,
+    void *);
+int nfsrpc_readlink(vnode_t, struct uio *, struct ucred *,
+    NFSPROC_T *, struct nfsvattr *, int *, void *);
+int nfsrpc_read(vnode_t, struct uio *, struct ucred *, NFSPROC_T *,
+    struct nfsvattr *, int *, void *);
+int nfsrpc_write(vnode_t, struct uio *, int *, int *,
+    struct ucred *, NFSPROC_T *, struct nfsvattr *, int *, void *, int);
+int nfsrpc_mknod(vnode_t, char *, int, struct vattr *, u_int32_t,
+    enum vtype, struct ucred *, NFSPROC_T *, struct nfsvattr *,
+    struct nfsvattr *, struct nfsfh **, int *, int *, void *);
+int nfsrpc_create(vnode_t, char *, int, struct vattr *, nfsquad_t,
+    int, struct ucred *, NFSPROC_T *, struct nfsvattr *, struct nfsvattr *,
+    struct nfsfh **, int *, int *, void *);
+int nfsrpc_remove(vnode_t, char *, int, vnode_t, struct ucred *, NFSPROC_T *,
+    struct nfsvattr *, int *, void *);
+int nfsrpc_rename(vnode_t, vnode_t, char *, int, vnode_t, vnode_t, char *, int,
+    struct ucred *, NFSPROC_T *, struct nfsvattr *, struct nfsvattr *,
+    int *, int *, void *, void *);
+int nfsrpc_link(vnode_t, vnode_t, char *, int,
+    struct ucred *, NFSPROC_T *, struct nfsvattr *, struct nfsvattr *,
+    int *, int *, void *);
+int nfsrpc_symlink(vnode_t, char *, int, char *, struct vattr *,
+    struct ucred *, NFSPROC_T *, struct nfsvattr *, struct nfsvattr *,
+    struct nfsfh **, int *, int *, void *);
+int nfsrpc_mkdir(vnode_t, char *, int, struct vattr *,
+    struct ucred *, NFSPROC_T *, struct nfsvattr *, struct nfsvattr *,
+    struct nfsfh **, int *, int *, void *);
+int nfsrpc_rmdir(vnode_t, char *, int, struct ucred *, NFSPROC_T *,
+    struct nfsvattr *, int *, void *);
+int nfsrpc_readdir(vnode_t, struct uio *, nfsuint64 *, struct ucred *,
+    NFSPROC_T *, struct nfsvattr *, int *, int *, void *);
+int nfsrpc_readdirplus(vnode_t, struct uio *, nfsuint64 *, 
+    struct ucred *, NFSPROC_T *, struct nfsvattr *, int *, int *, void *);
+int nfsrpc_commit(vnode_t, u_quad_t, int, struct ucred *,
+    NFSPROC_T *, struct nfsvattr *, int *, void *);
+int nfsrpc_advlock(vnode_t, off_t, int, struct flock *, int,
+    struct ucred *, NFSPROC_T *, void *, int);
+int nfsrpc_lockt(struct nfsrv_descript *, vnode_t,
+    struct nfsclclient *, u_int64_t, u_int64_t, struct flock *,
+    struct ucred *, NFSPROC_T *, void *, int);
+int nfsrpc_lock(struct nfsrv_descript *, struct nfsmount *, vnode_t,
+    u_int8_t *, int, struct nfscllockowner *, int, int, u_int64_t,
+    u_int64_t, short, struct ucred *, NFSPROC_T *, int);
+int nfsrpc_statfs(vnode_t, struct nfsstatfs *, struct nfsfsinfo *,
+    struct ucred *, NFSPROC_T *, struct nfsvattr *, int *, void *);
+int nfsrpc_fsinfo(vnode_t, struct nfsfsinfo *, struct ucred *,
+    NFSPROC_T *, struct nfsvattr *, int *, void *);
+int nfsrpc_pathconf(vnode_t, struct nfsv3_pathconf *,
+    struct ucred *, NFSPROC_T *, struct nfsvattr *, int *, void *);
+int nfsrpc_renew(struct nfsclclient *, struct nfsclds *, struct ucred *,
+    NFSPROC_T *);
+int nfsrpc_rellockown(struct nfsmount *, struct nfscllockowner *, uint8_t *,
+    int, struct ucred *, NFSPROC_T *);
+int nfsrpc_getdirpath(struct nfsmount *, u_char *, struct ucred *,
+    NFSPROC_T *);
+int nfsrpc_delegreturn(struct nfscldeleg *, struct ucred *,
+    struct nfsmount *, NFSPROC_T *, int);
+int nfsrpc_getacl(vnode_t, struct ucred *, NFSPROC_T *, NFSACL_T *, void *);
+int nfsrpc_setacl(vnode_t, struct ucred *, NFSPROC_T *, NFSACL_T *, void *);
+int nfsrpc_exchangeid(struct nfsmount *, struct nfsclclient *,
+    struct nfssockreq *, uint32_t, struct nfsclds **, struct ucred *,
+    NFSPROC_T *);
+int nfsrpc_createsession(struct nfsmount *, struct nfsclsession *,
+    struct nfssockreq *, uint32_t, int, struct ucred *, NFSPROC_T *);
+int nfsrpc_destroysession(struct nfsmount *, struct nfsclclient *,
+    struct ucred *, NFSPROC_T *);
+int nfsrpc_destroyclient(struct nfsmount *, struct nfsclclient *,
+    struct ucred *, NFSPROC_T *);
+int nfsrpc_getdeviceinfo(struct nfsmount *, uint8_t *, int, uint32_t *,
+    struct nfscldevinfo **, struct ucred *, NFSPROC_T *);
+int nfsrpc_layoutcommit(struct nfsmount *, uint8_t *, int, int,
+    uint64_t, uint64_t, uint64_t, nfsv4stateid_t *, int, struct ucred *,
+    NFSPROC_T *, void *);
+int nfsrpc_layoutreturn(struct nfsmount *, uint8_t *, int, int, int, uint32_t,
+    int, uint64_t, uint64_t, nfsv4stateid_t *, struct ucred *, NFSPROC_T *,
+    uint32_t, uint32_t, char *);
+int nfsrpc_reclaimcomplete(struct nfsmount *, struct ucred *, NFSPROC_T *);
+int nfscl_doiods(vnode_t, struct uio *, int *, int *, uint32_t, int,
+    struct ucred *, NFSPROC_T *);
+int nfscl_findlayoutforio(struct nfscllayout *, uint64_t, uint32_t,
+    struct nfsclflayout **);
+void nfscl_freenfsclds(struct nfsclds *);
+
+/* nfs_clstate.c */
+int nfscl_open(vnode_t, u_int8_t *, int, u_int32_t, int,
+    struct ucred *, NFSPROC_T *, struct nfsclowner **, struct nfsclopen **,
+    int *, int *, int);
+int nfscl_getstateid(vnode_t, u_int8_t *, int, u_int32_t, int, struct ucred *,
+    NFSPROC_T *, nfsv4stateid_t *, void **);
+void nfscl_ownerrelease(struct nfsmount *, struct nfsclowner *, int, int, int);
+void nfscl_openrelease(struct nfsmount *, struct nfsclopen *, int, int);
+int nfscl_getcl(struct mount *, struct ucred *, NFSPROC_T *, int,
+    struct nfsclclient **);
+struct nfsclclient *nfscl_findcl(struct nfsmount *);
+void nfscl_clientrelease(struct nfsclclient *);
+void nfscl_freelock(struct nfscllock *, int);
+void nfscl_freelockowner(struct nfscllockowner *, int);
+int nfscl_getbytelock(vnode_t, u_int64_t, u_int64_t, short,
+    struct ucred *, NFSPROC_T *, struct nfsclclient *, int, void *, int,
+    u_int8_t *, u_int8_t *, struct nfscllockowner **, int *, int *);
+int nfscl_relbytelock(vnode_t, u_int64_t, u_int64_t,
+    struct ucred *, NFSPROC_T *, int, struct nfsclclient *,
+    void *, int, struct nfscllockowner **, int *);
+int nfscl_checkwritelocked(vnode_t, struct flock *,
+    struct ucred *, NFSPROC_T *, void *, int);
+void nfscl_lockrelease(struct nfscllockowner *, int, int);
+void nfscl_fillclid(u_int64_t, char *, u_int8_t *, u_int16_t);
+void nfscl_filllockowner(void *, u_int8_t *, int);
+void nfscl_freeopen(struct nfsclopen *, int);
+void nfscl_umount(struct nfsmount *, NFSPROC_T *);
+void nfscl_renewthread(struct nfsclclient *, NFSPROC_T *);
+void nfscl_initiate_recovery(struct nfsclclient *);
+int nfscl_hasexpired(struct nfsclclient *, u_int32_t, NFSPROC_T *);
+void nfscl_dumpstate(struct nfsmount *, int, int, int, int);
+void nfscl_dupopen(vnode_t, int);
+int nfscl_getclose(vnode_t, struct nfsclclient **);
+int nfscl_doclose(vnode_t, struct nfsclclient **, NFSPROC_T *);
+void nfsrpc_doclose(struct nfsmount *, struct nfsclopen *, NFSPROC_T *);
+int nfscl_deleg(mount_t, struct nfsclclient *, u_int8_t *, int,
+    struct ucred *, NFSPROC_T *, struct nfscldeleg **);
+void nfscl_lockinit(struct nfsv4lock *);
+void nfscl_lockexcl(struct nfsv4lock *, void *);
+void nfscl_lockunlock(struct nfsv4lock *);
+void nfscl_lockderef(struct nfsv4lock *);
+void nfscl_docb(struct nfsrv_descript *, NFSPROC_T *);
+void nfscl_releasealllocks(struct nfsclclient *, vnode_t, NFSPROC_T *, void *,
+    int);
+int nfscl_lockt(vnode_t, struct nfsclclient *, u_int64_t,
+    u_int64_t, struct flock *, NFSPROC_T *, void *, int);
+int nfscl_mustflush(vnode_t);
+int nfscl_nodeleg(vnode_t, int);
+int nfscl_removedeleg(vnode_t, NFSPROC_T *, nfsv4stateid_t *);
+int nfscl_getref(struct nfsmount *);
+void nfscl_relref(struct nfsmount *);
+int nfscl_renamedeleg(vnode_t, nfsv4stateid_t *, int *, vnode_t,
+    nfsv4stateid_t *, int *, NFSPROC_T *);
+void nfscl_reclaimnode(vnode_t);
+void nfscl_newnode(vnode_t);
+void nfscl_delegmodtime(vnode_t);
+void nfscl_deleggetmodtime(vnode_t, struct timespec *);
+int nfscl_tryclose(struct nfsclopen *, struct ucred *,
+    struct nfsmount *, NFSPROC_T *);
+void nfscl_cleanup(NFSPROC_T *);
+int nfscl_layout(struct nfsmount *, vnode_t, u_int8_t *, int, nfsv4stateid_t *,
+    int, int, struct nfsclflayouthead *, struct nfscllayout **, struct ucred *,
+    NFSPROC_T *);
+struct nfscllayout *nfscl_getlayout(struct nfsclclient *, uint8_t *, int,
+    uint64_t, struct nfsclflayout **, int *);
+void nfscl_dserr(uint32_t, uint32_t, struct nfscldevinfo *,
+    struct nfscllayout *, struct nfsclds *);
+void nfscl_cancelreqs(struct nfsclds *);
+void nfscl_rellayout(struct nfscllayout *, int);
+struct nfscldevinfo *nfscl_getdevinfo(struct nfsclclient *, uint8_t *,
+    struct nfscldevinfo *);
+void nfscl_reldevinfo(struct nfscldevinfo *);
+int nfscl_adddevinfo(struct nfsmount *, struct nfscldevinfo *, int,
+    struct nfsclflayout *);
+void nfscl_freelayout(struct nfscllayout *);
+void nfscl_freeflayout(struct nfsclflayout *);
+void nfscl_freedevinfo(struct nfscldevinfo *);
+int nfscl_layoutcommit(vnode_t, NFSPROC_T *);
+
+/* nfs_clport.c */
+int nfscl_nget(mount_t, vnode_t, struct nfsfh *,
+    struct componentname *, NFSPROC_T *, struct nfsnode **, void *, int);
+NFSPROC_T *nfscl_getparent(NFSPROC_T *);
+void nfscl_start_renewthread(struct nfsclclient *);
+void nfscl_loadsbinfo(struct nfsmount *, struct nfsstatfs *, void *);
+void nfscl_loadfsinfo (struct nfsmount *, struct nfsfsinfo *);
+void nfscl_delegreturn(struct nfscldeleg *, int, struct nfsmount *,
+    struct ucred *, NFSPROC_T *);
+void nfsrvd_cbinit(int);
+int nfscl_checksattr(struct vattr *, struct nfsvattr *);
+int nfscl_ngetreopen(mount_t, u_int8_t *, int, NFSPROC_T *,
+    struct nfsnode **);
+int nfscl_procdoesntexist(u_int8_t *);
+int nfscl_maperr(NFSPROC_T *, int, uid_t, gid_t);
+
+/* nfs_clsubs.c */
+void nfscl_init(void);
+
+/* nfs_clbio.c */
+int ncl_flush(vnode_t, int, NFSPROC_T *, int, int);
+
+/* nfs_clnode.c */
+void ncl_invalcaches(vnode_t);
+
+/* nfs_nfsdport.c */
+int nfsvno_getattr(vnode_t, struct nfsvattr *, struct nfsrv_descript *,
+    NFSPROC_T *, int, nfsattrbit_t *);
+int nfsvno_setattr(vnode_t, struct nfsvattr *, struct ucred *,
+    NFSPROC_T *, struct nfsexstuff *);
+int nfsvno_getfh(vnode_t, fhandle_t *, NFSPROC_T *);
+int nfsvno_accchk(vnode_t, accmode_t, struct ucred *,
+    struct nfsexstuff *, NFSPROC_T *, int, int, u_int32_t *);
+int nfsvno_namei(struct nfsrv_descript *, struct nameidata *,
+    vnode_t, int, struct nfsexstuff *, NFSPROC_T *, vnode_t *);
+void nfsvno_setpathbuf(struct nameidata *, char **, u_long **);
+void nfsvno_relpathbuf(struct nameidata *);
+int nfsvno_readlink(vnode_t, struct ucred *, NFSPROC_T *, mbuf_t *,
+    mbuf_t *, int *);
+int nfsvno_read(vnode_t, off_t, int, struct ucred *, NFSPROC_T *,
+    mbuf_t *, mbuf_t *);
+int nfsvno_write(vnode_t, off_t, int, int, int *, mbuf_t,
+    char *, struct ucred *, NFSPROC_T *);
+int nfsvno_createsub(struct nfsrv_descript *, struct nameidata *,
+    vnode_t *, struct nfsvattr *, int *, int32_t *, NFSDEV_T, NFSPROC_T *,
+    struct nfsexstuff *);
+int nfsvno_mknod(struct nameidata *, struct nfsvattr *, struct ucred *,
+    NFSPROC_T *);
+int nfsvno_mkdir(struct nameidata *,
+    struct nfsvattr *, uid_t, struct ucred *, NFSPROC_T *,
+    struct nfsexstuff *);
+int nfsvno_symlink(struct nameidata *, struct nfsvattr *, char *, int, int,
+    uid_t, struct ucred *, NFSPROC_T *, struct nfsexstuff *);
+int nfsvno_getsymlink(struct nfsrv_descript *, struct nfsvattr *,
+    NFSPROC_T *, char **, int *);
+int nfsvno_removesub(struct nameidata *, int, struct ucred *, NFSPROC_T *,
+    struct nfsexstuff *);
+int nfsvno_rmdirsub(struct nameidata *, int, struct ucred *, NFSPROC_T *,
+    struct nfsexstuff *);
+int nfsvno_rename(struct nameidata *, struct nameidata *, u_int32_t,
+    u_int32_t, struct ucred *, NFSPROC_T *);
+int nfsvno_link(struct nameidata *, vnode_t, struct ucred *,
+    NFSPROC_T *, struct nfsexstuff *);
+int nfsvno_fsync(vnode_t, u_int64_t, int, struct ucred *, NFSPROC_T *);
+int nfsvno_statfs(vnode_t, struct statfs *);
+void nfsvno_getfs(struct nfsfsinfo *, int);
+void nfsvno_open(struct nfsrv_descript *, struct nameidata *, nfsquad_t,
+    nfsv4stateid_t *, struct nfsstate *, int *, struct nfsvattr *, int32_t *,
+    int, NFSACL_T *, nfsattrbit_t *, struct ucred *, NFSPROC_T *,
+    struct nfsexstuff *, vnode_t *);
+int nfsvno_updfilerev(vnode_t, struct nfsvattr *, struct nfsrv_descript *,
+    NFSPROC_T *);
+int nfsvno_fillattr(struct nfsrv_descript *, struct mount *, vnode_t,
+    struct nfsvattr *, fhandle_t *, int, nfsattrbit_t *,
+    struct ucred *, NFSPROC_T *, int, int, int, int, uint64_t);
+int nfsrv_sattr(struct nfsrv_descript *, vnode_t, struct nfsvattr *, nfsattrbit_t *,
+    NFSACL_T *, NFSPROC_T *);
+int nfsv4_sattr(struct nfsrv_descript *, vnode_t, struct nfsvattr *, nfsattrbit_t *,
+    NFSACL_T *, NFSPROC_T *);
+int nfsvno_checkexp(mount_t, NFSSOCKADDR_T, struct nfsexstuff *,
+    struct ucred **);
+int nfsvno_fhtovp(mount_t, fhandle_t *, NFSSOCKADDR_T, int,
+    vnode_t *, struct nfsexstuff *, struct ucred **);
+vnode_t nfsvno_getvp(fhandle_t *);
+int nfsvno_advlock(vnode_t, int, u_int64_t, u_int64_t, NFSPROC_T *);
+int nfsrv_v4rootexport(void *, struct ucred *, NFSPROC_T *);
+int nfsvno_testexp(struct nfsrv_descript *, struct nfsexstuff *);
+uint32_t nfsrv_hashfh(fhandle_t *);
+uint32_t nfsrv_hashsessionid(uint8_t *);
+void nfsrv_backupstable(void);
+int nfsrv_dsgetdevandfh(struct vnode *, NFSPROC_T *, int *, fhandle_t *,
+    char *);
+int nfsrv_dsgetsockmnt(struct vnode *, int, char *, int *, int *,
+    NFSPROC_T *, struct vnode **, fhandle_t *, char *, char *,
+    struct vnode **, struct nfsmount **, struct nfsmount *, int *, int *);
+int nfsrv_dscreate(struct vnode *, struct vattr *, struct vattr *,
+    fhandle_t *, struct pnfsdsfile *, struct pnfsdsattr *, char *,
+    struct ucred *, NFSPROC_T *, struct vnode **);
+int nfsrv_updatemdsattr(struct vnode *, struct nfsvattr *, NFSPROC_T *);
+void nfsrv_killrpcs(struct nfsmount *);
+int nfsrv_setacl(struct vnode *, NFSACL_T *, struct ucred *, NFSPROC_T *);
+
+/* nfs_commonkrpc.c */
+int newnfs_nmcancelreqs(struct nfsmount *);
+void newnfs_set_sigmask(struct thread *, sigset_t *);
+void newnfs_restore_sigmask(struct thread *, sigset_t *);
+int newnfs_msleep(struct thread *, void *, struct mtx *, int, char *, int);
+int newnfs_request(struct nfsrv_descript *, struct nfsmount *,
+    struct nfsclient *, struct nfssockreq *, vnode_t, NFSPROC_T *,
+    struct ucred *, u_int32_t, u_int32_t, u_char *, int, u_int64_t *,
+    struct nfsclsession *);
+int newnfs_connect(struct nfsmount *, struct nfssockreq *,
+    struct ucred *, NFSPROC_T *, int);
+void newnfs_disconnect(struct nfssockreq *);
+int newnfs_sigintr(struct nfsmount *, NFSPROC_T *);
+
+/* nfs_nfsdkrpc.c */
+int nfsrvd_addsock(struct file *);
+int nfsrvd_nfsd(NFSPROC_T *, struct nfsd_nfsd_args *);
+void nfsrvd_init(int);
+
+/* nfs_clkrpc.c */
+int nfscbd_addsock(struct file *);
+int nfscbd_nfsd(NFSPROC_T *, struct nfsd_nfscbd_args *);
+
diff --git a/freebsd/sys/fs/nfs/nfscl.h b/freebsd/sys/fs/nfs/nfscl.h
new file mode 100644
index 0000000..52da0af
--- /dev/null
+++ b/freebsd/sys/fs/nfs/nfscl.h
@@ -0,0 +1,84 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 2009 Rick Macklem, University of Guelph
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef	_NFS_NFSCL_H
+#define	_NFS_NFSCL_H
+
+/*
+ * Extra stuff for a NFSv4 nfsnode.
+ * MALLOC'd to the correct length for the name and file handle.
+ * n4_data has the file handle, followed by the file name.
+ * The macro NFS4NODENAME() returns a pointer to the start of the
+ * name.
+ */
+struct nfsv4node {
+	u_int16_t	n4_fhlen;
+	u_int16_t	n4_namelen;
+	u_int8_t	n4_data[1];
+};
+
+#define	NFS4NODENAME(n)	(&((n)->n4_data[(n)->n4_fhlen]))
+
+/*
+ * Just a macro to convert the nfscl_reqstart arguments.
+ */
+#define	NFSCL_REQSTART(n, p, v) 					\
+	nfscl_reqstart((n), (p), VFSTONFS((v)->v_mount), 		\
+	    VTONFS(v)->n_fhp->nfh_fh, VTONFS(v)->n_fhp->nfh_len, NULL,	\
+	    NULL, 0, 0)
+
+/*
+ * These two macros convert between a lease duration and renew interval.
+ * For now, just make the renew interval 1/2 the lease duration.
+ * (They should be inverse operators.)
+ */
+#define	NFSCL_RENEW(l)	(((l) < 2) ? 1 : ((l) / 2))
+#define	NFSCL_LEASE(r)	((r) * 2)
+
+/* This macro checks to see if a forced dismount is about to occur. */
+#define	NFSCL_FORCEDISM(m)	(((m)->mnt_kern_flag & MNTK_UNMOUNTF) != 0 || \
+    (VFSTONFS(m)->nm_privflag & NFSMNTP_FORCEDISM) != 0)
+
+/*
+ * These flag bits are used for the argument to nfscl_fillsattr() to
+ * indicate special handling of the attributes.
+ */
+#define	NFSSATTR_FULL		0x1
+#define	NFSSATTR_SIZE0		0x2
+#define	NFSSATTR_SIZENEG1	0x4
+#define	NFSSATTR_SIZERDEV	0x8
+
+/* Use this macro for debug printfs. */
+#define	NFSCL_DEBUG(level, ...)	do {					\
+		if (nfscl_debuglevel >= (level))			\
+			printf(__VA_ARGS__);				\
+	} while (0)
+
+#endif	/* _NFS_NFSCL_H */
diff --git a/freebsd/sys/fs/nfs/nfsclstate.h b/freebsd/sys/fs/nfs/nfsclstate.h
new file mode 100644
index 0000000..2ada4bf
--- /dev/null
+++ b/freebsd/sys/fs/nfs/nfsclstate.h
@@ -0,0 +1,446 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 2009 Rick Macklem, University of Guelph
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _NFS_NFSCLSTATE_H_
+#define	_NFS_NFSCLSTATE_H_
+
+/*
+ * Definitions for NFS V4 client state handling.
+ */
+LIST_HEAD(nfsclopenhead, nfsclopen);
+LIST_HEAD(nfscllockownerhead, nfscllockowner);
+SLIST_HEAD(nfscllockownerfhhead, nfscllockownerfh);
+LIST_HEAD(nfscllockhead, nfscllock);
+LIST_HEAD(nfsclhead, nfsclclient);
+LIST_HEAD(nfsclownerhead, nfsclowner);
+TAILQ_HEAD(nfscldeleghead, nfscldeleg);
+LIST_HEAD(nfscldeleghash, nfscldeleg);
+TAILQ_HEAD(nfscllayouthead, nfscllayout);
+LIST_HEAD(nfscllayouthash, nfscllayout);
+LIST_HEAD(nfsclflayouthead, nfsclflayout);
+LIST_HEAD(nfscldevinfohead, nfscldevinfo);
+LIST_HEAD(nfsclrecalllayouthead, nfsclrecalllayout);
+#define	NFSCLDELEGHASHSIZE	256
+#define	NFSCLDELEGHASH(c, f, l)							\
+	(&((c)->nfsc_deleghash[ncl_hash((f), (l)) % NFSCLDELEGHASHSIZE]))
+#define	NFSCLLAYOUTHASHSIZE	256
+#define	NFSCLLAYOUTHASH(c, f, l)						\
+	(&((c)->nfsc_layouthash[ncl_hash((f), (l)) % NFSCLLAYOUTHASHSIZE]))
+
+/* Structure for NFSv4.1 session stuff. */
+struct nfsclsession {
+	struct mtx	nfsess_mtx;
+	struct nfsslot	nfsess_cbslots[NFSV4_CBSLOTS];
+	nfsquad_t	nfsess_clientid;
+	SVCXPRT		*nfsess_xprt;		/* For backchannel callback */
+	uint32_t	nfsess_slotseq[64];	/* Max for 64bit nm_slots */
+	uint64_t	nfsess_slots;
+	uint32_t	nfsess_sequenceid;
+	uint32_t	nfsess_maxcache;	/* Max size for cached reply. */
+	uint16_t	nfsess_foreslots;
+	uint16_t	nfsess_backslots;
+	uint8_t		nfsess_sessionid[NFSX_V4SESSIONID];
+	uint8_t		nfsess_defunct;		/* Non-zero for old sessions */
+};
+
+/*
+ * This structure holds the session, clientid and related information
+ * needed for an NFSv4.1 Meta Data Server (MDS) or Data Server (DS).
+ * It is malloc'd to the correct length.
+ */
+struct nfsclds {
+	TAILQ_ENTRY(nfsclds)	nfsclds_list;
+	struct nfsclsession	nfsclds_sess;
+	struct mtx		nfsclds_mtx;
+	struct nfssockreq	*nfsclds_sockp;
+	time_t			nfsclds_expire;
+	uint16_t		nfsclds_flags;
+	uint16_t		nfsclds_servownlen;
+	uint8_t			nfsclds_verf[NFSX_VERF];
+	uint8_t			nfsclds_serverown[0];
+};
+
+/*
+ * Flags for nfsclds_flags.
+ */
+#define	NFSCLDS_HASWRITEVERF	0x0001
+#define	NFSCLDS_MDS		0x0002
+#define	NFSCLDS_DS		0x0004
+#define	NFSCLDS_CLOSED		0x0008
+#define	NFSCLDS_SAMECONN	0x0010
+
+struct nfsclclient {
+	LIST_ENTRY(nfsclclient) nfsc_list;
+	struct nfsclownerhead	nfsc_owner;
+	struct nfscldeleghead	nfsc_deleg;
+	struct nfscldeleghash	nfsc_deleghash[NFSCLDELEGHASHSIZE];
+	struct nfscllayouthead	nfsc_layout;
+	struct nfscllayouthash	nfsc_layouthash[NFSCLLAYOUTHASHSIZE];
+	struct nfscldevinfohead	nfsc_devinfo;
+	struct nfsv4lock	nfsc_lock;
+	struct proc		*nfsc_renewthread;
+	struct nfsmount		*nfsc_nmp;
+	time_t			nfsc_expire;
+	u_int32_t		nfsc_clientidrev;
+	u_int32_t		nfsc_rev;
+	u_int32_t		nfsc_renew;
+	u_int32_t		nfsc_cbident;
+	u_int16_t		nfsc_flags;
+	u_int16_t		nfsc_idlen;
+	u_int8_t		nfsc_id[1];	/* Malloc'd to correct length */
+};
+
+/*
+ * Bits for nfsc_flags.
+ */
+#define	NFSCLFLAGS_INITED	0x0001
+#define	NFSCLFLAGS_HASCLIENTID	0x0002
+#define	NFSCLFLAGS_RECOVER	0x0004
+#define	NFSCLFLAGS_UMOUNT	0x0008
+#define	NFSCLFLAGS_HASTHREAD	0x0010
+#define	NFSCLFLAGS_AFINET6	0x0020
+#define	NFSCLFLAGS_EXPIREIT	0x0040
+#define	NFSCLFLAGS_FIRSTDELEG	0x0080
+#define	NFSCLFLAGS_GOTDELEG	0x0100
+#define	NFSCLFLAGS_RECVRINPROG	0x0200
+
+struct nfsclowner {
+	LIST_ENTRY(nfsclowner)	nfsow_list;
+	struct nfsclopenhead	nfsow_open;
+	struct nfsclclient	*nfsow_clp;
+	u_int32_t		nfsow_seqid;
+	u_int32_t		nfsow_defunct;
+	struct nfsv4lock	nfsow_rwlock;
+	u_int8_t		nfsow_owner[NFSV4CL_LOCKNAMELEN];
+};
+
+/*
+ * MALLOC'd to the correct length to accommodate the file handle.
+ */
+struct nfscldeleg {
+	TAILQ_ENTRY(nfscldeleg)	nfsdl_list;
+	LIST_ENTRY(nfscldeleg)	nfsdl_hash;
+	struct nfsclownerhead	nfsdl_owner;	/* locally issued state */
+	struct nfscllockownerhead nfsdl_lock;
+	nfsv4stateid_t		nfsdl_stateid;
+	struct acl_entry	nfsdl_ace;	/* Delegation ace */
+	struct nfsclclient	*nfsdl_clp;
+	struct nfsv4lock	nfsdl_rwlock;	/* for active I/O ops */
+	struct nfscred		nfsdl_cred;	/* Cred. used for Open */
+	time_t			nfsdl_timestamp; /* used for stale cleanup */
+	u_int64_t		nfsdl_sizelimit; /* Limit for file growth */
+	u_int64_t		nfsdl_size;	/* saved copy of file size */
+	u_int64_t		nfsdl_change;	/* and change attribute */
+	struct timespec		nfsdl_modtime;	/* local modify time */
+	u_int16_t		nfsdl_fhlen;
+	u_int8_t		nfsdl_flags;
+	u_int8_t		nfsdl_fh[1];	/* must be last */
+};
+
+/*
+ * nfsdl_flags bits.
+ */
+#define	NFSCLDL_READ		0x01
+#define	NFSCLDL_WRITE		0x02
+#define	NFSCLDL_RECALL		0x04
+#define	NFSCLDL_NEEDRECLAIM	0x08
+#define	NFSCLDL_ZAPPED		0x10
+#define	NFSCLDL_MODTIMESET	0x20
+#define	NFSCLDL_DELEGRET	0x40
+
+/*
+ * MALLOC'd to the correct length to accommodate the file handle.
+ */
+struct nfsclopen {
+	LIST_ENTRY(nfsclopen)	nfso_list;
+	struct nfscllockownerhead nfso_lock;
+	nfsv4stateid_t		nfso_stateid;
+	struct nfsclowner	*nfso_own;
+	struct nfscred		nfso_cred;	/* Cred. used for Open */
+	u_int32_t		nfso_mode;
+	u_int32_t		nfso_opencnt;
+	u_int16_t		nfso_fhlen;
+	u_int8_t		nfso_posixlock;	/* 1 for POSIX type locking */
+	u_int8_t		nfso_fh[1];	/* must be last */
+};
+
+/*
+ * Return values for nfscl_open(). NFSCLOPEN_OK must == 0.
+ */
+#define	NFSCLOPEN_OK		0
+#define	NFSCLOPEN_DOOPEN	1
+#define	NFSCLOPEN_DOOPENDOWNGRADE 2
+#define	NFSCLOPEN_SETCRED	3
+
+struct nfscllockowner {
+	LIST_ENTRY(nfscllockowner) nfsl_list;
+	struct nfscllockhead	nfsl_lock;
+	struct nfsclopen	*nfsl_open;
+	NFSPROC_T		*nfsl_inprog;
+	nfsv4stateid_t		nfsl_stateid;
+	int			nfsl_lockflags;
+	u_int32_t		nfsl_seqid;
+	struct nfsv4lock	nfsl_rwlock;
+	u_int8_t		nfsl_owner[NFSV4CL_LOCKNAMELEN];
+	u_int8_t		nfsl_openowner[NFSV4CL_LOCKNAMELEN];
+};
+
+/*
+ * Byte range entry for the above lock owner.
+ */
+struct nfscllock {
+	LIST_ENTRY(nfscllock)	nfslo_list;
+	u_int64_t		nfslo_first;
+	u_int64_t		nfslo_end;
+	short			nfslo_type;
+};
+
+/* This structure is used to collect a list of lockowners to free up. */
+struct nfscllockownerfh {
+	SLIST_ENTRY(nfscllockownerfh)	nfslfh_list;
+	struct nfscllockownerhead	nfslfh_lock;
+	int				nfslfh_len;
+	uint8_t				nfslfh_fh[NFSX_V4FHMAX];
+};
+
+/*
+ * MALLOC'd to the correct length to accommodate the file handle.
+ */
+struct nfscllayout {
+	TAILQ_ENTRY(nfscllayout)	nfsly_list;
+	LIST_ENTRY(nfscllayout)		nfsly_hash;
+	nfsv4stateid_t			nfsly_stateid;
+	struct nfsv4lock		nfsly_lock;
+	uint64_t			nfsly_filesid[2];
+	uint64_t			nfsly_lastbyte;
+	struct nfsclflayouthead		nfsly_flayread;
+	struct nfsclflayouthead		nfsly_flayrw;
+	struct nfsclrecalllayouthead	nfsly_recall;
+	time_t				nfsly_timestamp;
+	struct nfsclclient		*nfsly_clp;
+	uint16_t			nfsly_flags;
+	uint16_t			nfsly_fhlen;
+	uint8_t				nfsly_fh[1];
+};
+
+/*
+ * Flags for nfsly_flags.
+ */
+#define	NFSLY_FILES		0x0001
+#define	NFSLY_BLOCK		0x0002
+#define	NFSLY_OBJECT		0x0004
+#define	NFSLY_RECALL		0x0008
+#define	NFSLY_RECALLFILE	0x0010
+#define	NFSLY_RECALLFSID	0x0020
+#define	NFSLY_RECALLALL		0x0040
+#define	NFSLY_RETONCLOSE	0x0080
+#define	NFSLY_WRITTEN		0x0100	/* Has been used to write to a DS. */
+#define	NFSLY_FLEXFILE		0x0200
+
+/*
+ * Flex file layout mirror specific stuff for nfsclflayout.
+ */
+struct nfsffm {
+	nfsv4stateid_t		st;
+	struct nfscldevinfo	*devp;
+	char			dev[NFSX_V4DEVICEID];
+	uint32_t		eff;
+	uid_t			user;
+	gid_t			group;
+	struct nfsfh		*fh[NFSDEV_MAXVERS];
+	uint16_t		fhcnt;
+};
+
+/*
+ * MALLOC'd to the correct length to accommodate the file handle list for File
+ * layout and the list of mirrors for the Flex File Layout.
+ * These hang off of nfsly_flayread and nfsly_flayrw, sorted in increasing
+ * offset order.
+ * The nfsly_flayread list holds the ones with iomode == NFSLAYOUTIOMODE_READ,
+ * whereas the nfsly_flayrw holds the ones with iomode == NFSLAYOUTIOMODE_RW.
+ */
+struct nfsclflayout {
+	LIST_ENTRY(nfsclflayout)	nfsfl_list;
+	uint64_t			nfsfl_off;
+	uint64_t			nfsfl_end;
+	uint32_t			nfsfl_iomode;
+	uint16_t			nfsfl_flags;
+	union {
+		struct {
+			uint64_t	patoff;
+			uint32_t	util;
+			uint32_t	stripe1;
+			uint8_t		dev[NFSX_V4DEVICEID];
+			uint16_t	fhcnt;
+			struct nfscldevinfo *devp;
+		} fl;
+		struct {
+			uint64_t	stripeunit;
+			uint32_t	fflags;
+			uint32_t	statshint;
+			uint16_t	mirrorcnt;
+		} ff;
+	} nfsfl_un;
+	union {
+		struct nfsfh		*fh[0];	/* FH list for DS File layout */
+		struct nfsffm		ffm[0];	/* Mirror list for Flex File */
+	} nfsfl_un2;	/* Must be last. Malloc'd to correct array length */
+};
+#define	nfsfl_patoff		nfsfl_un.fl.patoff
+#define	nfsfl_util		nfsfl_un.fl.util
+#define	nfsfl_stripe1		nfsfl_un.fl.stripe1
+#define	nfsfl_dev		nfsfl_un.fl.dev
+#define	nfsfl_fhcnt		nfsfl_un.fl.fhcnt
+#define	nfsfl_devp		nfsfl_un.fl.devp
+#define	nfsfl_stripeunit	nfsfl_un.ff.stripeunit
+#define	nfsfl_fflags		nfsfl_un.ff.fflags
+#define	nfsfl_statshint		nfsfl_un.ff.statshint
+#define	nfsfl_mirrorcnt		nfsfl_un.ff.mirrorcnt
+#define	nfsfl_fh		nfsfl_un2.fh
+#define	nfsfl_ffm		nfsfl_un2.ffm
+
+/*
+ * Flags for nfsfl_flags.
+ */
+#define	NFSFL_RECALL	0x0001		/* File layout has been recalled */
+#define	NFSFL_FILE	0x0002		/* File layout */
+#define	NFSFL_FLEXFILE	0x0004		/* Flex File layout */
+
+/*
+ * Structure that is used to store a LAYOUTRECALL.
+ */
+struct nfsclrecalllayout {
+	LIST_ENTRY(nfsclrecalllayout)	nfsrecly_list;
+	uint64_t			nfsrecly_off;
+	uint64_t			nfsrecly_len;
+	int				nfsrecly_recalltype;
+	uint32_t			nfsrecly_iomode;
+	uint32_t			nfsrecly_stateseqid;
+	uint32_t			nfsrecly_stat;
+	uint32_t			nfsrecly_op;
+	char				nfsrecly_devid[NFSX_V4DEVICEID];
+};
+
+/*
+ * Stores the NFSv4.1 Device Info. Malloc'd to the correct length to
+ * store the list of network connections and list of indices.
+ * nfsdi_data[] is allocated the following way:
+ * - nfsdi_addrcnt * struct nfsclds
+ * - stripe indices, each stored as one byte, since there can be many
+ *   of them. (This implies a limit of 256 on nfsdi_addrcnt, since the
+ *   indices select which address.)
+ * For Flex File, the addrcnt is always one and no stripe indices exist.
+ */
+struct nfscldevinfo {
+	LIST_ENTRY(nfscldevinfo)	nfsdi_list;
+	uint8_t				nfsdi_deviceid[NFSX_V4DEVICEID];
+	struct nfsclclient		*nfsdi_clp;
+	uint32_t			nfsdi_refcnt;
+	uint32_t			nfsdi_layoutrefs;
+	union {
+		struct {
+			uint16_t	stripecnt;
+		} fl;
+		struct {
+			int		versindex;
+			uint32_t	vers;
+			uint32_t	minorvers;
+			uint32_t	rsize;
+			uint32_t	wsize;
+		} ff;
+	} nfsdi_un;
+	uint16_t			nfsdi_addrcnt;
+	uint16_t			nfsdi_flags;
+	struct nfsclds			*nfsdi_data[0];
+};
+#define	nfsdi_stripecnt	nfsdi_un.fl.stripecnt
+#define	nfsdi_versindex	nfsdi_un.ff.versindex
+#define	nfsdi_vers	nfsdi_un.ff.vers
+#define	nfsdi_minorvers	nfsdi_un.ff.minorvers
+#define	nfsdi_rsize	nfsdi_un.ff.rsize
+#define	nfsdi_wsize	nfsdi_un.ff.wsize
+
+/* Flags for nfsdi_flags. */
+#define	NFSDI_FILELAYOUT	0x0001
+#define	NFSDI_FLEXFILE		0x0002
+#define	NFSDI_TIGHTCOUPLED	0X0004
+
+/* These inline functions return values from nfsdi_data[]. */
+/*
+ * Return a pointer to the address at "pos".
+ */
+static __inline struct nfsclds **
+nfsfldi_addr(struct nfscldevinfo *ndi, int pos)
+{
+
+	if (pos >= ndi->nfsdi_addrcnt)
+		return (NULL);
+	return (&ndi->nfsdi_data[pos]);
+}
+
+/*
+ * Return the Nth ("pos") stripe index.
+ */
+static __inline int
+nfsfldi_stripeindex(struct nfscldevinfo *ndi, int pos)
+{
+	uint8_t *valp;
+
+	if (pos >= ndi->nfsdi_stripecnt)
+		return (-1);
+	valp = (uint8_t *)&ndi->nfsdi_data[ndi->nfsdi_addrcnt];
+	valp += pos;
+	return ((int)*valp);
+}
+
+/*
+ * Set the Nth ("pos") stripe index to "val".
+ */
+static __inline void
+nfsfldi_setstripeindex(struct nfscldevinfo *ndi, int pos, uint8_t val)
+{
+	uint8_t *valp;
+
+	if (pos >= ndi->nfsdi_stripecnt)
+		return;
+	valp = (uint8_t *)&ndi->nfsdi_data[ndi->nfsdi_addrcnt];
+	valp += pos;
+	*valp = val;
+}
+
+/*
+ * Macro for incrementing the seqid#.
+ */
+#define	NFSCL_INCRSEQID(s, n)	do { 					\
+	    if (((n)->nd_flag & ND_INCRSEQID))				\
+		(s)++; 							\
+	} while (0)
+
+#endif	/* _NFS_NFSCLSTATE_H_ */
diff --git a/freebsd/sys/fs/nfs/nfsdport.h b/freebsd/sys/fs/nfs/nfsdport.h
new file mode 100644
index 0000000..f683476
--- /dev/null
+++ b/freebsd/sys/fs/nfs/nfsdport.h
@@ -0,0 +1,125 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 2009 Rick Macklem, University of Guelph
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+/*
+ * These macros handle nfsvattr fields. They look a bit silly here, but
+ * are quite different for the Darwin port.
+ */
+#define	NFSVNO_ATTRINIT(n)		(VATTR_NULL(&((n)->na_vattr)))
+#define	NFSVNO_SETATTRVAL(n, f, v)	((n)->na_##f = (v))
+#define	NFSVNO_SETACTIVE(n, f)
+#define	NFSVNO_UNSET(n, f)		((n)->na_##f = VNOVAL)
+#define	NFSVNO_NOTSETMODE(n)		((n)->na_mode == ((mode_t)VNOVAL))
+#define	NFSVNO_ISSETMODE(n)		((n)->na_mode != ((mode_t)VNOVAL))
+#define	NFSVNO_NOTSETUID(n)		((n)->na_uid == ((uid_t)VNOVAL))
+#define	NFSVNO_ISSETUID(n)		((n)->na_uid != ((uid_t)VNOVAL))
+#define	NFSVNO_NOTSETGID(n)		((n)->na_gid == ((gid_t)VNOVAL))
+#define	NFSVNO_ISSETGID(n)		((n)->na_gid != ((gid_t)VNOVAL))
+#define	NFSVNO_NOTSETSIZE(n)		((n)->na_size == VNOVAL)
+#define	NFSVNO_ISSETSIZE(n)		((n)->na_size != VNOVAL)
+#define	NFSVNO_NOTSETATIME(n)		((n)->na_atime.tv_sec == VNOVAL)
+#define	NFSVNO_ISSETATIME(n)		((n)->na_atime.tv_sec != VNOVAL)
+#define	NFSVNO_NOTSETMTIME(n)		((n)->na_mtime.tv_sec == VNOVAL)
+#define	NFSVNO_ISSETMTIME(n)		((n)->na_mtime.tv_sec != VNOVAL)
+
+/*
+ * This structure acts as a "catch-all" for information that
+ * needs to be returned by nfsd_fhtovp().
+ */
+struct nfsexstuff {
+	int	nes_exflag;			/* export flags */
+	int	nes_numsecflavor;		/* # of security flavors */
+	int	nes_secflavors[MAXSECFLAVORS];	/* and the flavors */
+};
+
+/*
+ * These are NO-OPS for BSD until Isilon upstreams EXITCODE support.
+ * EXITCODE is an in-memory ring buffer that holds the routines failing status.
+ * This is a valuable tool to use when debugging and analyzing issues.
+ * In addition to recording a routine's failing status, it offers
+ * logging of routines for call stack tracing.
+ * EXITCODE should be used only in routines that return a true errno value, as
+ * that value will be formatted to a displayable errno string.  Routines that 
+ * return regular int status that are not true errno should not set EXITCODE.
+ * If you want to log routine tracing, you can add EXITCODE(0) to any routine.
+ * NFS extended the EXITCODE with EXITCODE2 to record either the routine's
+ * exit errno status or the nd_repstat.
+ */
+#define	NFSEXITCODE(error)
+#define	NFSEXITCODE2(error, nd)
+
+#define	NFSVNO_EXINIT(e)		((e)->nes_exflag = 0)
+#define	NFSVNO_EXPORTED(e)		((e)->nes_exflag & MNT_EXPORTED)
+#define	NFSVNO_EXRDONLY(e)		((e)->nes_exflag & MNT_EXRDONLY)
+#define	NFSVNO_EXPORTANON(e)		((e)->nes_exflag & MNT_EXPORTANON)
+#define	NFSVNO_EXSTRICTACCESS(e)	((e)->nes_exflag & MNT_EXSTRICTACCESS)
+#define	NFSVNO_EXV4ONLY(e)		((e)->nes_exflag & MNT_EXV4ONLY)
+
+#define	NFSVNO_SETEXRDONLY(e)	((e)->nes_exflag = (MNT_EXPORTED|MNT_EXRDONLY))
+
+#define	NFSVNO_CMPFH(f1, f2)						\
+    ((f1)->fh_fsid.val[0] == (f2)->fh_fsid.val[0] &&			\
+     (f1)->fh_fsid.val[1] == (f2)->fh_fsid.val[1] &&			\
+     bcmp(&(f1)->fh_fid, &(f2)->fh_fid, sizeof(struct fid)) == 0)
+
+#define	NFSLOCKHASH(f) 							\
+	(&nfslockhash[nfsrv_hashfh(f) % nfsrv_lockhashsize])
+
+#define	NFSFPVNODE(f)	((struct vnode *)((f)->f_data))
+#define	NFSFPCRED(f)	((f)->f_cred)
+#define	NFSFPFLAG(f)	((f)->f_flag)
+
+#define	NFSNAMEICNDSET(n, c, o, f)	do {				\
+	(n)->cn_cred = (c);						\
+	(n)->cn_nameiop = (o);						\
+	(n)->cn_flags = (f);						\
+    } while (0)
+
+/*
+ * A little bit of Darwin vfs kpi.
+ */
+#define	vnode_mount(v)	((v)->v_mount)
+#define	vfs_statfs(m)	(&((m)->mnt_stat))
+
+#define	NFSPATHLEN_T	size_t
+
+/*
+ * These are set to the minimum and maximum size of a server file
+ * handle.
+ */
+#define	NFSRV_MINFH	(sizeof (fhandle_t))
+#define	NFSRV_MAXFH	(sizeof (fhandle_t))
+
+/* Use this macro for debug printfs. */
+#define	NFSD_DEBUG(level, ...)	do {					\
+		if (nfsd_debuglevel >= (level))				\
+			printf(__VA_ARGS__);				\
+	} while (0)
+
diff --git a/freebsd/sys/fs/nfs/nfskpiport.h b/freebsd/sys/fs/nfs/nfskpiport.h
new file mode 100644
index 0000000..f428d9d
--- /dev/null
+++ b/freebsd/sys/fs/nfs/nfskpiport.h
@@ -0,0 +1,75 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 2009 Rick Macklem, University of Guelph
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _NFS_NFSKPIPORT_H_
+#define	_NFS_NFSKPIPORT_H_
+/*
+ * These definitions are needed since the generic code is now using Darwin8
+ * KPI stuff. (I know, seems a bit silly, but I want the code to build on
+ * Darwin8 and hopefully subsequent releases from Apple.)
+ */
+typedef	struct mount *		mount_t;
+#define	vfs_statfs(m)		(&((m)->mnt_stat))
+#define	vfs_flags(m)		((m)->mnt_flag)
+
+typedef struct vnode *		vnode_t;
+#define	vnode_mount(v)		((v)->v_mount)
+#define	vnode_vtype(v)		((v)->v_type)
+
+typedef struct mbuf *		mbuf_t;
+#define	mbuf_freem(m)		m_freem(m)
+#define	mbuf_data(m)		mtod((m), void *)
+#define	mbuf_len(m)		((m)->m_len)
+#define	mbuf_next(m)		((m)->m_next)
+#define	mbuf_setlen(m, l)	((m)->m_len = (l))
+#define	mbuf_setnext(m, p)	((m)->m_next = (p))
+#define	mbuf_pkthdr_len(m)	((m)->m_pkthdr.len)
+#define	mbuf_pkthdr_setlen(m, l) ((m)->m_pkthdr.len = (l))
+#define	mbuf_pkthdr_setrcvif(m, p) ((m)->m_pkthdr.rcvif = (p))
+
+/*
+ * This stuff is needed by Darwin for handling the uio structure.
+ */
+#define	CAST_USER_ADDR_T(a)	(a)
+#define	CAST_DOWN(c, a)		((c) (a))
+#define	uio_uio_resid(p)	((p)->uio_resid)
+#define	uio_uio_resid_add(p, v)	((p)->uio_resid += (v))
+#define	uio_uio_resid_set(p, v)	((p)->uio_resid = (v))
+#define	uio_iov_base(p)		((p)->uio_iov->iov_base)
+#define	uio_iov_base_add(p, v)	do {					\
+	char *pp;							\
+	pp = (char *)(p)->uio_iov->iov_base;				\
+	pp += (v);							\
+	(p)->uio_iov->iov_base = (void *)pp;				\
+    } while (0)
+#define	uio_iov_len(p)		((p)->uio_iov->iov_len)
+#define	uio_iov_len_add(p, v)	((p)->uio_iov->iov_len += (v))
+
+#endif	/* _NFS_NFSKPIPORT_H */
diff --git a/freebsd/sys/fs/nfs/nfsm_subs.h b/freebsd/sys/fs/nfs/nfsm_subs.h
new file mode 100644
index 0000000..72ebbdb
--- /dev/null
+++ b/freebsd/sys/fs/nfs/nfsm_subs.h
@@ -0,0 +1,146 @@
+/*-
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 1989, 1993
+ *	The Regents of the University of California.  All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Rick Macklem at The University of Guelph.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _NFS_NFSM_SUBS_H_
+#define	_NFS_NFSM_SUBS_H_
+
+
+/*
+ * These macros do strange and peculiar things to mbuf chains for
+ * the assistance of the nfs code. To attempt to use them for any
+ * other purpose will be dangerous. (they make weird assumptions)
+ */
+
+#ifndef APPLE
+/*
+ * First define what the actual subs. return
+ */
+#define	NFSM_DATAP(m, s)	(m)->m_data += (s)
+
+/*
+ * Now for the macros that do the simple stuff and call the functions
+ * for the hard stuff.
+ * They use fields in struct nfsrv_descript to handle the mbuf queues.
+ * Replace most of the macro with an inline function, to minimize
+ * the machine code. The inline functions in lower case can be called
+ * directly, bypassing the macro.
+ */
+static __inline void *
+nfsm_build(struct nfsrv_descript *nd, int siz)
+{
+	void *retp;
+	struct mbuf *mb2;
+
+	if (siz > M_TRAILINGSPACE(nd->nd_mb)) {
+		NFSMCLGET(mb2, M_NOWAIT);
+		if (siz > MLEN)
+			panic("build > MLEN");
+		mbuf_setlen(mb2, 0);
+		nd->nd_bpos = NFSMTOD(mb2, caddr_t);
+		nd->nd_mb->m_next = mb2;
+		nd->nd_mb = mb2;
+	}
+	retp = (void *)(nd->nd_bpos);
+	nd->nd_mb->m_len += siz;
+	nd->nd_bpos += siz;
+	return (retp);
+}
+
+#define	NFSM_BUILD(a, c, s)	((a) = (c)nfsm_build(nd, (s)))
+
+static __inline void *
+nfsm_dissect(struct nfsrv_descript *nd, int siz)
+{
+	int tt1; 
+	void *retp;
+
+	tt1 = NFSMTOD(nd->nd_md, caddr_t) + nd->nd_md->m_len - nd->nd_dpos; 
+	if (tt1 >= siz) { 
+		retp = (void *)nd->nd_dpos; 
+		nd->nd_dpos += siz; 
+	} else { 
+		retp = nfsm_dissct(nd, siz, M_WAITOK); 
+	}
+	return (retp);
+}
+
+static __inline void *
+nfsm_dissect_nonblock(struct nfsrv_descript *nd, int siz)
+{
+	int tt1; 
+	void *retp;
+
+	tt1 = NFSMTOD(nd->nd_md, caddr_t) + nd->nd_md->m_len - nd->nd_dpos; 
+	if (tt1 >= siz) { 
+		retp = (void *)nd->nd_dpos; 
+		nd->nd_dpos += siz; 
+	} else { 
+		retp = nfsm_dissct(nd, siz, M_NOWAIT); 
+	}
+	return (retp);
+}
+
+#define	NFSM_DISSECT(a, c, s) 						\
+	do {								\
+		(a) = (c)nfsm_dissect(nd, (s));	 			\
+		if ((a) == NULL) { 					\
+			error = EBADRPC; 				\
+			goto nfsmout; 					\
+		}							\
+	} while (0)
+
+#define	NFSM_DISSECT_NONBLOCK(a, c, s) 					\
+	do {								\
+		(a) = (c)nfsm_dissect_nonblock(nd, (s));		\
+		if ((a) == NULL) { 					\
+			error = EBADRPC; 				\
+			goto nfsmout; 					\
+		}							\
+	} while (0)
+#endif	/* !APPLE */
+
+#define	NFSM_STRSIZ(s, m)  						\
+	do {								\
+		tl = (u_int32_t *)nfsm_dissect(nd, NFSX_UNSIGNED);	\
+		if (!tl || ((s) = fxdr_unsigned(int32_t, *tl)) > (m)) { \
+			error = EBADRPC; 				\
+			goto nfsmout; 					\
+		}							\
+	} while (0)
+
+#define	NFSM_RNDUP(a)	(((a)+3)&(~0x3))
+
+#endif	/* _NFS_NFSM_SUBS_H_ */
diff --git a/freebsd/sys/fs/nfs/nfsport.h b/freebsd/sys/fs/nfs/nfsport.h
new file mode 100644
index 0000000..8abb6a5
--- /dev/null
+++ b/freebsd/sys/fs/nfs/nfsport.h
@@ -0,0 +1,1086 @@
+/*-
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 1989, 1993
+ *	The Regents of the University of California.  All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Rick Macklem at The University of Guelph.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _NFS_NFSPORT_H_
+#define	_NFS_NFSPORT_H_
+
+/*
+ * In general, I'm not fond of #includes in .h files, but this seems
+ * to be the cleanest way to handle #include files for the ports.
+ */
+#ifdef _KERNEL
+#include <sys/unistd.h>
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/conf.h>
+#include <sys/dirent.h>
+#include <sys/domain.h>
+#include <sys/fcntl.h>
+#include <sys/file.h>
+#include <sys/filedesc.h>
+#include <sys/jail.h>
+#include <sys/kernel.h>
+#include <sys/lockf.h>
+#include <sys/malloc.h>
+#include <sys/mbuf.h>
+#include <sys/mount.h>
+#include <sys/mutex.h>
+#include <sys/namei.h>
+#include <sys/proc.h>
+#include <sys/protosw.h>
+#include <sys/reboot.h>
+#include <sys/resourcevar.h>
+#include <sys/signalvar.h>
+#include <sys/socket.h>
+#include <sys/socketvar.h>
+#include <sys/stat.h>
+#include <sys/syslog.h>
+#include <sys/sysproto.h>
+#include <sys/time.h>
+#include <sys/uio.h>
+#include <sys/vnode.h>
+#include <sys/bio.h>
+#include <sys/buf.h>
+#include <sys/acl.h>
+#include <sys/module.h>
+#include <sys/sysent.h>
+#include <sys/syscall.h>
+#include <sys/priv.h>
+#include <sys/kthread.h>
+#include <sys/syscallsubr.h>
+#include <net/if.h>
+#include <net/if_var.h>
+#include <net/radix.h>
+#include <net/route.h>
+#include <net/if_dl.h>
+#include <netinet/in.h>
+#include <netinet/in_pcb.h>
+#include <netinet/in_systm.h>
+#include <netinet/in_var.h>
+#include <netinet/ip.h>
+#include <netinet/ip_var.h>
+#include <netinet/tcp.h>
+#include <netinet/tcp_fsm.h>
+#include <netinet/tcp_seq.h>
+#include <netinet/tcp_timer.h>
+#include <netinet/tcp_var.h>
+#include <machine/in_cksum.h>
+#include <crypto/des/des.h>
+#include <sys/md5.h>
+#include <rpc/rpc.h>
+#include <rpc/rpcsec_gss.h>
+
+/*
+ * For Darwin, these functions should be "static" when built in a kext.
+ * (This is always defined as nil otherwise.)
+ */
+#define	APPLESTATIC
+#include <ufs/ufs/dir.h>
+#include <ufs/ufs/quota.h>
+#include <ufs/ufs/inode.h>
+#include <ufs/ufs/extattr.h>
+#include <ufs/ufs/ufsmount.h>
+#include <vm/uma.h>
+#include <vm/vm.h>
+#include <vm/vm_object.h>
+#include <vm/vm_extern.h>
+#include <nfs/nfssvc.h>
+#include "opt_nfs.h"
+#include "opt_ufs.h"
+
+/*
+ * These types must be defined before the nfs includes.
+ */
+#define	NFSSOCKADDR_T	struct sockaddr *
+#define	NFSPROC_T	struct thread
+#define	NFSDEV_T	dev_t
+#define	NFSSVCARGS	nfssvc_args
+#define	NFSACL_T	struct acl
+
+/*
+ * These should be defined as the types used for the corresponding VOP's
+ * argument type.
+ */
+#define	NFS_ACCESS_ARGS		struct vop_access_args
+#define	NFS_OPEN_ARGS		struct vop_open_args
+#define	NFS_GETATTR_ARGS	struct vop_getattr_args
+#define	NFS_LOOKUP_ARGS		struct vop_lookup_args
+#define	NFS_READDIR_ARGS	struct vop_readdir_args
+
+/*
+ * Allocate mbufs. Must succeed and never set the mbuf ptr to NULL.
+ */
+#define	NFSMGET(m)	do { 					\
+		MGET((m), M_WAITOK, MT_DATA); 			\
+		while ((m) == NULL ) { 				\
+			(void) nfs_catnap(PZERO, 0, "nfsmget");	\
+			MGET((m), M_WAITOK, MT_DATA); 		\
+		} 						\
+	} while (0)
+#define	NFSMGETHDR(m)	do { 					\
+		MGETHDR((m), M_WAITOK, MT_DATA);		\
+		while ((m) == NULL ) { 				\
+			(void) nfs_catnap(PZERO, 0, "nfsmget");	\
+			MGETHDR((m), M_WAITOK, MT_DATA); 	\
+		} 						\
+	} while (0)
+#define	NFSMCLGET(m, w)	do { 					\
+		MGET((m), M_WAITOK, MT_DATA); 			\
+		while ((m) == NULL ) { 				\
+			(void) nfs_catnap(PZERO, 0, "nfsmget");	\
+			MGET((m), M_WAITOK, MT_DATA); 		\
+		} 						\
+		MCLGET((m), (w));				\
+	} while (0)
+#define	NFSMCLGETHDR(m, w) do { 				\
+		MGETHDR((m), M_WAITOK, MT_DATA);		\
+		while ((m) == NULL ) { 				\
+			(void) nfs_catnap(PZERO, 0, "nfsmget");	\
+			MGETHDR((m), M_WAITOK, MT_DATA); 	\
+		} 						\
+	} while (0)
+#define	NFSMTOD	mtod
+
+/*
+ * Client side constant for size of a lockowner name.
+ */
+#define	NFSV4CL_LOCKNAMELEN	12
+
+/*
+ * Type for a mutex lock.
+ */
+#define	NFSMUTEX_T		struct mtx
+
+#endif	/* _KERNEL */
+
+/*
+ * NFSv4 Operation numbers.
+ */
+#define	NFSV4OP_ACCESS		3
+#define	NFSV4OP_CLOSE		4
+#define	NFSV4OP_COMMIT		5
+#define	NFSV4OP_CREATE		6
+#define	NFSV4OP_DELEGPURGE	7
+#define	NFSV4OP_DELEGRETURN	8
+#define	NFSV4OP_GETATTR		9
+#define	NFSV4OP_GETFH		10
+#define	NFSV4OP_LINK		11
+#define	NFSV4OP_LOCK		12
+#define	NFSV4OP_LOCKT		13
+#define	NFSV4OP_LOCKU		14
+#define	NFSV4OP_LOOKUP		15
+#define	NFSV4OP_LOOKUPP		16
+#define	NFSV4OP_NVERIFY		17
+#define	NFSV4OP_OPEN		18
+#define	NFSV4OP_OPENATTR	19
+#define	NFSV4OP_OPENCONFIRM	20
+#define	NFSV4OP_OPENDOWNGRADE	21
+#define	NFSV4OP_PUTFH		22
+#define	NFSV4OP_PUTPUBFH	23
+#define	NFSV4OP_PUTROOTFH	24
+#define	NFSV4OP_READ		25
+#define	NFSV4OP_READDIR		26
+#define	NFSV4OP_READLINK	27
+#define	NFSV4OP_REMOVE		28
+#define	NFSV4OP_RENAME		29
+#define	NFSV4OP_RENEW		30
+#define	NFSV4OP_RESTOREFH	31
+#define	NFSV4OP_SAVEFH		32
+#define	NFSV4OP_SECINFO		33
+#define	NFSV4OP_SETATTR		34
+#define	NFSV4OP_SETCLIENTID	35
+#define	NFSV4OP_SETCLIENTIDCFRM	36
+#define	NFSV4OP_VERIFY		37
+#define	NFSV4OP_WRITE		38
+#define	NFSV4OP_RELEASELCKOWN	39
+
+/*
+ * Must be one greater than the last Operation#.
+ */
+#define	NFSV4OP_NOPS		40
+
+/*
+ * Additional Ops for NFSv4.1.
+ */
+#define	NFSV4OP_BACKCHANNELCTL	40
+#define	NFSV4OP_BINDCONNTOSESS	41
+#define	NFSV4OP_EXCHANGEID	42
+#define	NFSV4OP_CREATESESSION	43
+#define	NFSV4OP_DESTROYSESSION	44
+#define	NFSV4OP_FREESTATEID	45
+#define	NFSV4OP_GETDIRDELEG	46
+#define	NFSV4OP_GETDEVINFO	47
+#define	NFSV4OP_GETDEVLIST	48
+#define	NFSV4OP_LAYOUTCOMMIT	49
+#define	NFSV4OP_LAYOUTGET	50
+#define	NFSV4OP_LAYOUTRETURN	51
+#define	NFSV4OP_SECINFONONAME	52
+#define	NFSV4OP_SEQUENCE	53
+#define	NFSV4OP_SETSSV		54
+#define	NFSV4OP_TESTSTATEID	55
+#define	NFSV4OP_WANTDELEG	56
+#define	NFSV4OP_DESTROYCLIENTID	57
+#define	NFSV4OP_RECLAIMCOMPL	58
+
+/*
+ * Must be one more than last op#.
+ * NFSv4.2 isn't implemented yet, but define the op# limit for it.
+ */
+#define	NFSV41_NOPS		59
+#define	NFSV42_NOPS		72
+
+/* Quirky case if the illegal op code */
+#define	NFSV4OP_OPILLEGAL	10044
+
+/*
+ * Fake NFSV4OP_xxx used for nfsstat. Start at NFSV42_NOPS.
+ */
+#define	NFSV4OP_SYMLINK		(NFSV42_NOPS)
+#define	NFSV4OP_MKDIR		(NFSV42_NOPS + 1)
+#define	NFSV4OP_RMDIR		(NFSV42_NOPS + 2)
+#define	NFSV4OP_READDIRPLUS	(NFSV42_NOPS + 3)
+#define	NFSV4OP_MKNOD		(NFSV42_NOPS + 4)
+#define	NFSV4OP_FSSTAT		(NFSV42_NOPS + 5)
+#define	NFSV4OP_FSINFO		(NFSV42_NOPS + 6)
+#define	NFSV4OP_PATHCONF	(NFSV42_NOPS + 7)
+#define	NFSV4OP_V3CREATE	(NFSV42_NOPS + 8)
+
+/*
+ * This is the count of the fake operations listed above.
+ */
+#define	NFSV4OP_FAKENOPS	9
+
+/*
+ * and the Callback OPs
+ */
+#define	NFSV4OP_CBGETATTR	3
+#define	NFSV4OP_CBRECALL	4
+
+/*
+ * Must be one greater than the last Callback Operation# for NFSv4.0.
+ */
+#define	NFSV4OP_CBNOPS		5
+
+/*
+ * Additional Callback Ops for NFSv4.1 only.
+ */
+#define	NFSV4OP_CBLAYOUTRECALL	5
+#define	NFSV4OP_CBNOTIFY	6
+#define	NFSV4OP_CBPUSHDELEG	7
+#define	NFSV4OP_CBRECALLANY	8
+#define	NFSV4OP_CBRECALLOBJAVAIL 9
+#define	NFSV4OP_CBRECALLSLOT	10
+#define	NFSV4OP_CBSEQUENCE	11
+#define	NFSV4OP_CBWANTCANCELLED	12
+#define	NFSV4OP_CBNOTIFYLOCK	13
+#define	NFSV4OP_CBNOTIFYDEVID	14
+
+#define	NFSV41_CBNOPS		15
+#define	NFSV42_CBNOPS		16
+
+/*
+ * The lower numbers -> 21 are used by NFSv2 and v3. These define higher
+ * numbers used by NFSv4.
+ * NFS_V3NPROCS is one greater than the last V3 op and NFS_NPROCS is
+ * one greater than the last number.
+ */
+#ifndef	NFS_V3NPROCS
+#define	NFS_V3NPROCS		22
+
+#define	NFSPROC_LOOKUPP		22
+#define	NFSPROC_SETCLIENTID	23
+#define	NFSPROC_SETCLIENTIDCFRM	24
+#define	NFSPROC_LOCK		25
+#define	NFSPROC_LOCKU		26
+#define	NFSPROC_OPEN		27
+#define	NFSPROC_CLOSE		28
+#define	NFSPROC_OPENCONFIRM	29
+#define	NFSPROC_LOCKT		30
+#define	NFSPROC_OPENDOWNGRADE	31
+#define	NFSPROC_RENEW		32
+#define	NFSPROC_PUTROOTFH	33
+#define	NFSPROC_RELEASELCKOWN	34
+#define	NFSPROC_DELEGRETURN	35
+#define	NFSPROC_RETDELEGREMOVE	36
+#define	NFSPROC_RETDELEGRENAME1	37
+#define	NFSPROC_RETDELEGRENAME2	38
+#define	NFSPROC_GETACL		39
+#define	NFSPROC_SETACL		40
+
+/*
+ * Must be defined as one higher than the last Proc# above.
+ */
+#define	NFSV4_NPROCS		41
+
+/* Additional procedures for NFSv4.1. */
+#define	NFSPROC_EXCHANGEID	41
+#define	NFSPROC_CREATESESSION	42
+#define	NFSPROC_DESTROYSESSION	43
+#define	NFSPROC_DESTROYCLIENT	44
+#define	NFSPROC_FREESTATEID	45
+#define	NFSPROC_LAYOUTGET	46
+#define	NFSPROC_GETDEVICEINFO	47
+#define	NFSPROC_LAYOUTCOMMIT	48
+#define	NFSPROC_LAYOUTRETURN	49
+#define	NFSPROC_RECLAIMCOMPL	50
+#define	NFSPROC_WRITEDS		51
+#define	NFSPROC_READDS		52
+#define	NFSPROC_COMMITDS	53
+#define	NFSPROC_OPENLAYGET	54
+#define	NFSPROC_CREATELAYGET	55
+
+/*
+ * Must be defined as one higher than the last NFSv4.1 Proc# above.
+ */
+#define	NFSV41_NPROCS		56
+
+#endif	/* NFS_V3NPROCS */
+
+/*
+ * New stats structure.
+ * The vers field will be set to NFSSTATS_V1 by the caller.
+ */
+#define	NFSSTATS_V1	1
+struct nfsstatsv1 {
+	int		vers;	/* Set to version requested by caller. */
+	uint64_t	attrcache_hits;
+	uint64_t	attrcache_misses;
+	uint64_t	lookupcache_hits;
+	uint64_t	lookupcache_misses;
+	uint64_t	direofcache_hits;
+	uint64_t	direofcache_misses;
+	uint64_t	accesscache_hits;
+	uint64_t	accesscache_misses;
+	uint64_t	biocache_reads;
+	uint64_t	read_bios;
+	uint64_t	read_physios;
+	uint64_t	biocache_writes;
+	uint64_t	write_bios;
+	uint64_t	write_physios;
+	uint64_t	biocache_readlinks;
+	uint64_t	readlink_bios;
+	uint64_t	biocache_readdirs;
+	uint64_t	readdir_bios;
+	uint64_t	rpccnt[NFSV41_NPROCS + 13];
+	uint64_t	rpcretries;
+	uint64_t	srvrpccnt[NFSV42_NOPS + NFSV4OP_FAKENOPS];
+	uint64_t	srvrpc_errs;
+	uint64_t	srv_errs;
+	uint64_t	rpcrequests;
+	uint64_t	rpctimeouts;
+	uint64_t	rpcunexpected;
+	uint64_t	rpcinvalid;
+	uint64_t	srvcache_inproghits;
+	uint64_t	srvcache_idemdonehits;
+	uint64_t	srvcache_nonidemdonehits;
+	uint64_t	srvcache_misses;
+	uint64_t	srvcache_tcppeak;
+	int		srvcache_size;	/* Updated by atomic_xx_int(). */
+	uint64_t	srvclients;
+	uint64_t	srvopenowners;
+	uint64_t	srvopens;
+	uint64_t	srvlockowners;
+	uint64_t	srvlocks;
+	uint64_t	srvdelegates;
+	uint64_t	cbrpccnt[NFSV42_CBNOPS];
+	uint64_t	clopenowners;
+	uint64_t	clopens;
+	uint64_t	cllockowners;
+	uint64_t	cllocks;
+	uint64_t	cldelegates;
+	uint64_t	cllocalopenowners;
+	uint64_t	cllocalopens;
+	uint64_t	cllocallockowners;
+	uint64_t	cllocallocks;
+	uint64_t	srvstartcnt;
+	uint64_t	srvdonecnt;
+	uint64_t	srvbytes[NFSV42_NOPS + NFSV4OP_FAKENOPS];
+	uint64_t	srvops[NFSV42_NOPS + NFSV4OP_FAKENOPS];
+	struct bintime	srvduration[NFSV42_NOPS + NFSV4OP_FAKENOPS];
+	struct bintime	busyfrom;
+	struct bintime	busytime;
+};
+
+/*
+ * Old stats structure.
+ */
+struct ext_nfsstats {
+	int	attrcache_hits;
+	int	attrcache_misses;
+	int	lookupcache_hits;
+	int	lookupcache_misses;
+	int	direofcache_hits;
+	int	direofcache_misses;
+	int	accesscache_hits;
+	int	accesscache_misses;
+	int	biocache_reads;
+	int	read_bios;
+	int	read_physios;
+	int	biocache_writes;
+	int	write_bios;
+	int	write_physios;
+	int	biocache_readlinks;
+	int	readlink_bios;
+	int	biocache_readdirs;
+	int	readdir_bios;
+	int	rpccnt[NFSV4_NPROCS];
+	int	rpcretries;
+	int	srvrpccnt[NFSV4OP_NOPS + NFSV4OP_FAKENOPS];
+	int	srvrpc_errs;
+	int	srv_errs;
+	int	rpcrequests;
+	int	rpctimeouts;
+	int	rpcunexpected;
+	int	rpcinvalid;
+	int	srvcache_inproghits;
+	int	srvcache_idemdonehits;
+	int	srvcache_nonidemdonehits;
+	int	srvcache_misses;
+	int	srvcache_tcppeak;
+	int	srvcache_size;
+	int	srvclients;
+	int	srvopenowners;
+	int	srvopens;
+	int	srvlockowners;
+	int	srvlocks;
+	int	srvdelegates;
+	int	cbrpccnt[NFSV4OP_CBNOPS];
+	int	clopenowners;
+	int	clopens;
+	int	cllockowners;
+	int	cllocks;
+	int	cldelegates;
+	int	cllocalopenowners;
+	int	cllocalopens;
+	int	cllocallockowners;
+	int	cllocallocks;
+};
+
+#ifdef _KERNEL
+/*
+ * Define NFS_NPROCS as NFSV4_NPROCS for the experimental kernel code.
+ */
+#ifndef	NFS_NPROCS
+#define	NFS_NPROCS		NFSV4_NPROCS
+#endif
+
+#include <fs/nfs/nfskpiport.h>
+#include <fs/nfs/nfsdport.h>
+#include <fs/nfs/rpcv2.h>
+#include <fs/nfs/nfsproto.h>
+#include <fs/nfs/nfs.h>
+#include <fs/nfs/nfsclstate.h>
+#include <fs/nfs/nfs_var.h>
+#include <fs/nfs/nfsm_subs.h>
+#include <fs/nfs/nfsrvcache.h>
+#include <fs/nfs/nfsrvstate.h>
+#include <fs/nfs/xdr_subs.h>
+#include <fs/nfs/nfscl.h>
+#include <nfsclient/nfsargs.h>
+#include <fs/nfsclient/nfsmount.h>
+
+/*
+ * Just to keep nfs_var.h happy.
+ */
+struct nfs_vattr {
+	int	junk;
+};
+
+struct nfsvattr {
+	struct vattr	na_vattr;
+	nfsattrbit_t	na_suppattr;
+	u_int64_t	na_mntonfileno;
+	u_int64_t	na_filesid[2];
+};
+
+#define	na_type		na_vattr.va_type
+#define	na_mode		na_vattr.va_mode
+#define	na_nlink	na_vattr.va_nlink
+#define	na_uid		na_vattr.va_uid
+#define	na_gid		na_vattr.va_gid
+#define	na_fsid		na_vattr.va_fsid
+#define	na_fileid	na_vattr.va_fileid
+#define	na_size		na_vattr.va_size
+#define	na_blocksize	na_vattr.va_blocksize
+#define	na_atime	na_vattr.va_atime
+#define	na_mtime	na_vattr.va_mtime
+#define	na_ctime	na_vattr.va_ctime
+#define	na_gen		na_vattr.va_gen
+#define	na_flags	na_vattr.va_flags
+#define	na_rdev		na_vattr.va_rdev
+#define	na_bytes	na_vattr.va_bytes
+#define	na_filerev	na_vattr.va_filerev
+#define	na_vaflags	na_vattr.va_vaflags
+
+#include <fs/nfsclient/nfsnode.h>
+
+/*
+ * This is the header structure used for the lists, etc. (It has the
+ * above record in it.
+ */
+struct nfsrv_stablefirst {
+	LIST_HEAD(, nfsrv_stable) nsf_head;	/* Head of nfsrv_stable list */
+	time_t		nsf_eograce;	/* Time grace period ends */
+	time_t		*nsf_bootvals;	/* Previous boottime values */
+	struct file	*nsf_fp;	/* File table pointer */
+	u_char		nsf_flags;	/* NFSNSF_ flags */
+	struct nfsf_rec	nsf_rec;	/* and above first record */
+};
+#define	nsf_lease	nsf_rec.lease
+#define	nsf_numboots	nsf_rec.numboots
+
+/* NFSNSF_xxx flags */
+#define	NFSNSF_UPDATEDONE	0x01
+#define	NFSNSF_GRACEOVER	0x02
+#define	NFSNSF_NEEDLOCK		0x04
+#define	NFSNSF_EXPIREDCLIENT	0x08
+#define	NFSNSF_NOOPENS		0x10
+#define	NFSNSF_OK		0x20
+
+/*
+ * Maximum number of boot times allowed in record. Although there is
+ * really no need for a fixed upper bound, this serves as a sanity check
+ * for a corrupted file.
+ */
+#define	NFSNSF_MAXNUMBOOTS	10000
+
+/*
+ * This structure defines the other records in the file. The
+ * nst_client array is actually the size of the client string name.
+ */
+struct nfst_rec {
+	u_int16_t	len;
+	u_char		flag;
+	u_char		client[1];
+};
+/* and the values for flag */
+#define	NFSNST_NEWSTATE	0x1
+#define	NFSNST_REVOKE		0x2
+#define	NFSNST_GOTSTATE		0x4
+#define	NFSNST_RECLAIMED	0x8
+
+/*
+ * This structure is linked onto nfsrv_stablefirst for the duration of
+ * reclaim.
+ */
+struct nfsrv_stable {
+	LIST_ENTRY(nfsrv_stable) nst_list;
+	struct nfsclient	*nst_clp;
+	struct nfst_rec		nst_rec;
+};
+#define	nst_timestamp	nst_rec.timestamp
+#define	nst_len		nst_rec.len
+#define	nst_flag	nst_rec.flag
+#define	nst_client	nst_rec.client
+
+/*
+ * At some point the server will run out of kernel storage for
+ * state structures. For FreeBSD5.2, this results in a panic
+ * kmem_map is full. It happens at well over 1000000 opens plus
+ * locks on a PIII-800 with 256Mbytes, so that is where I've set
+ * the limit. If your server panics due to too many opens/locks,
+ * decrease the size of NFSRV_V4STATELIMIT. If you find the server
+ * returning NFS4ERR_RESOURCE a lot and have lots of memory, try
+ * increasing it.
+ */
+#define	NFSRV_V4STATELIMIT	500000	/* Max # of Opens + Locks */
+
+/*
+ * The type required differs with BSDen (just the second arg).
+ */
+void nfsrvd_rcv(struct socket *, void *, int);
+
+/*
+ * Macros for handling socket addresses. (Hopefully this makes the code
+ * more portable, since I've noticed some 'BSD don't have sockaddrs in
+ * mbufs any more.)
+ */
+#define	NFSSOCKADDR(a, t)	((t)(a))
+#define	NFSSOCKADDRSIZE(a, s)		((a)->sa_len = (s))
+
+/*
+ * These should be defined as a process or thread structure, as required
+ * for signal handling, etc.
+ */
+#define	NFSNEWCRED(c)		(crdup(c))
+#define	NFSPROCCRED(p)		((p)->td_ucred)
+#define	NFSFREECRED(c)		(crfree(c))
+#define	NFSUIOPROC(u, p)	((u)->uio_td = NULL)
+#define	NFSPROCP(p)		((p)->td_proc)
+
+/*
+ * Define these so that cn_hash and its length is ignored.
+ */
+#define	NFSCNHASHZERO(c)
+#define	NFSCNHASH(c, v)
+#define	NCHNAMLEN	9999999
+
+/*
+ * These macros are defined to initialize and set the timer routine.
+ */
+#define	NFS_TIMERINIT \
+	newnfs_timer(NULL)
+
+/*
+ * Handle SMP stuff:
+ */
+#define	NFSSTATESPINLOCK	extern struct mtx nfs_state_mutex
+#define	NFSLOCKSTATE()		mtx_lock(&nfs_state_mutex)
+#define	NFSUNLOCKSTATE()	mtx_unlock(&nfs_state_mutex)
+#define	NFSSTATEMUTEXPTR	(&nfs_state_mutex)
+#define	NFSREQSPINLOCK		extern struct mtx nfs_req_mutex
+#define	NFSLOCKREQ()		mtx_lock(&nfs_req_mutex)
+#define	NFSUNLOCKREQ()		mtx_unlock(&nfs_req_mutex)
+#define	NFSSOCKMUTEX		extern struct mtx nfs_slock_mutex
+#define	NFSSOCKMUTEXPTR		(&nfs_slock_mutex)
+#define	NFSLOCKSOCK()		mtx_lock(&nfs_slock_mutex)
+#define	NFSUNLOCKSOCK()		mtx_unlock(&nfs_slock_mutex)
+#define	NFSNAMEIDMUTEX		extern struct mtx nfs_nameid_mutex
+#define	NFSNAMEIDMUTEXPTR	(&nfs_nameid_mutex)
+#define	NFSLOCKNAMEID()		mtx_lock(&nfs_nameid_mutex)
+#define	NFSUNLOCKNAMEID()	mtx_unlock(&nfs_nameid_mutex)
+#define	NFSNAMEIDREQUIRED()	mtx_assert(&nfs_nameid_mutex, MA_OWNED)
+#define	NFSCLSTATEMUTEX		extern struct mtx nfs_clstate_mutex
+#define	NFSCLSTATEMUTEXPTR	(&nfs_clstate_mutex)
+#define	NFSLOCKCLSTATE()	mtx_lock(&nfs_clstate_mutex)
+#define	NFSUNLOCKCLSTATE()	mtx_unlock(&nfs_clstate_mutex)
+#define	NFSDLOCKMUTEX		extern struct mtx newnfsd_mtx
+#define	NFSDLOCKMUTEXPTR	(&newnfsd_mtx)
+#define	NFSD_LOCK()		mtx_lock(&newnfsd_mtx)
+#define	NFSD_UNLOCK()		mtx_unlock(&newnfsd_mtx)
+#define	NFSD_LOCK_ASSERT()	mtx_assert(&newnfsd_mtx, MA_OWNED)
+#define	NFSD_UNLOCK_ASSERT()	mtx_assert(&newnfsd_mtx, MA_NOTOWNED)
+#define	NFSV4ROOTLOCKMUTEX	extern struct mtx nfs_v4root_mutex
+#define	NFSV4ROOTLOCKMUTEXPTR	(&nfs_v4root_mutex)
+#define	NFSLOCKV4ROOTMUTEX()	mtx_lock(&nfs_v4root_mutex)
+#define	NFSUNLOCKV4ROOTMUTEX()	mtx_unlock(&nfs_v4root_mutex)
+#define	NFSLOCKNODE(n)		mtx_lock(&((n)->n_mtx))
+#define	NFSUNLOCKNODE(n)	mtx_unlock(&((n)->n_mtx))
+#define	NFSASSERTNODE(n)	mtx_assert(&((n)->n_mtx), MA_OWNED)
+#define	NFSLOCKMNT(m)		mtx_lock(&((m)->nm_mtx))
+#define	NFSUNLOCKMNT(m)		mtx_unlock(&((m)->nm_mtx))
+#define	NFSLOCKIOD()		mtx_lock(&ncl_iod_mutex)
+#define	NFSUNLOCKIOD()		mtx_unlock(&ncl_iod_mutex)
+#define	NFSASSERTIOD()		mtx_assert(&ncl_iod_mutex, MA_OWNED)
+#define	NFSLOCKREQUEST(r)	mtx_lock(&((r)->r_mtx))
+#define	NFSUNLOCKREQUEST(r)	mtx_unlock(&((r)->r_mtx))
+#define	NFSPROCLISTLOCK()	sx_slock(&allproc_lock)
+#define	NFSPROCLISTUNLOCK()	sx_sunlock(&allproc_lock)
+#define	NFSLOCKSOCKREQ(r)	mtx_lock(&((r)->nr_mtx))
+#define	NFSUNLOCKSOCKREQ(r)	mtx_unlock(&((r)->nr_mtx))
+#define	NFSLOCKDS(d)		mtx_lock(&((d)->nfsclds_mtx))
+#define	NFSUNLOCKDS(d)		mtx_unlock(&((d)->nfsclds_mtx))
+#define	NFSSESSIONMUTEXPTR(s)	(&((s)->mtx))
+#define	NFSLOCKSESSION(s)	mtx_lock(&((s)->mtx))
+#define	NFSUNLOCKSESSION(s)	mtx_unlock(&((s)->mtx))
+#define	NFSLAYOUTMUTEXPTR(l)	(&((l)->mtx))
+#define	NFSLOCKLAYOUT(l)	mtx_lock(&((l)->mtx))
+#define	NFSUNLOCKLAYOUT(l)	mtx_unlock(&((l)->mtx))
+#define	NFSDDSMUTEXPTR		(&nfsrv_dslock_mtx)
+#define	NFSDDSLOCK()		mtx_lock(&nfsrv_dslock_mtx)
+#define	NFSDDSUNLOCK()		mtx_unlock(&nfsrv_dslock_mtx)
+#define	NFSDDONTLISTMUTEXPTR	(&nfsrv_dontlistlock_mtx)
+#define	NFSDDONTLISTLOCK()	mtx_lock(&nfsrv_dontlistlock_mtx)
+#define	NFSDDONTLISTUNLOCK()	mtx_unlock(&nfsrv_dontlistlock_mtx)
+#define	NFSDRECALLMUTEXPTR	(&nfsrv_recalllock_mtx)
+#define	NFSDRECALLLOCK()	mtx_lock(&nfsrv_recalllock_mtx)
+#define	NFSDRECALLUNLOCK()	mtx_unlock(&nfsrv_recalllock_mtx)
+
+/*
+ * Use these macros to initialize/free a mutex.
+ */
+#define	NFSINITSOCKMUTEX(m)	mtx_init((m), "nfssock", NULL, MTX_DEF)
+#define	NFSFREEMUTEX(m)		mtx_destroy((m))
+
+int nfsmsleep(void *, void *, int, const char *, struct timespec *);
+
+/*
+ * And weird vm stuff in the nfs server.
+ */
+#define	PDIRUNLOCK	0x0
+#define	MAX_COMMIT_COUNT	(1024 * 1024)
+
+/*
+ * Define these to handle the type of va_rdev.
+ */
+#define	NFSMAKEDEV(m, n)	makedev((m), (n))
+#define	NFSMAJOR(d)		major(d)
+#define	NFSMINOR(d)		minor(d)
+
+/*
+ * The vnode tag for nfsv4root.
+ */
+#define	VT_NFSV4ROOT		"nfsv4root"
+
+/*
+ * Define whatever it takes to do a vn_rdwr().
+ */
+#define	NFSD_RDWR(r, v, b, l, o, s, i, c, a, p) \
+	vn_rdwr((r), (v), (b), (l), (o), (s), (i), (c), NULL, (a), (p))
+
+/*
+ * Macros for handling memory for different BSDen.
+ * NFSBCOPY(src, dst, len) - copies len bytes, non-overlapping
+ * NFSOVBCOPY(src, dst, len) - ditto, but data areas might overlap
+ * NFSBCMP(cp1, cp2, len) - compare len bytes, return 0 if same
+ * NFSBZERO(cp, len) - set len bytes to 0x0
+ */
+#define	NFSBCOPY(s, d, l)	bcopy((s), (d), (l))
+#define	NFSOVBCOPY(s, d, l)	ovbcopy((s), (d), (l))
+#define	NFSBCMP(s, d, l)	bcmp((s), (d), (l))
+#define	NFSBZERO(s, l)		bzero((s), (l))
+
+/*
+ * Some queue.h files don't have these dfined in them.
+ */
+#define	LIST_END(head)		NULL
+#define	SLIST_END(head)		NULL
+#define	TAILQ_END(head)		NULL
+
+/*
+ * This must be defined to be a global variable that increments once
+ * per second, but never stops or goes backwards, even when a "date"
+ * command changes the TOD clock. It is used for delta times for
+ * leases, etc.
+ */
+#define	NFSD_MONOSEC		time_uptime
+
+/*
+ * Declare the malloc types.
+ */
+MALLOC_DECLARE(M_NEWNFSRVCACHE);
+MALLOC_DECLARE(M_NEWNFSDCLIENT);
+MALLOC_DECLARE(M_NEWNFSDSTATE);
+MALLOC_DECLARE(M_NEWNFSDLOCK);
+MALLOC_DECLARE(M_NEWNFSDLOCKFILE);
+MALLOC_DECLARE(M_NEWNFSSTRING);
+MALLOC_DECLARE(M_NEWNFSUSERGROUP);
+MALLOC_DECLARE(M_NEWNFSDREQ);
+MALLOC_DECLARE(M_NEWNFSFH);
+MALLOC_DECLARE(M_NEWNFSCLOWNER);
+MALLOC_DECLARE(M_NEWNFSCLOPEN);
+MALLOC_DECLARE(M_NEWNFSCLDELEG);
+MALLOC_DECLARE(M_NEWNFSCLCLIENT);
+MALLOC_DECLARE(M_NEWNFSCLLOCKOWNER);
+MALLOC_DECLARE(M_NEWNFSCLLOCK);
+MALLOC_DECLARE(M_NEWNFSDIROFF);
+MALLOC_DECLARE(M_NEWNFSV4NODE);
+MALLOC_DECLARE(M_NEWNFSDIRECTIO);
+MALLOC_DECLARE(M_NEWNFSMNT);
+MALLOC_DECLARE(M_NEWNFSDROLLBACK);
+MALLOC_DECLARE(M_NEWNFSLAYOUT);
+MALLOC_DECLARE(M_NEWNFSFLAYOUT);
+MALLOC_DECLARE(M_NEWNFSDEVINFO);
+MALLOC_DECLARE(M_NEWNFSSOCKREQ);
+MALLOC_DECLARE(M_NEWNFSCLDS);
+MALLOC_DECLARE(M_NEWNFSLAYRECALL);
+MALLOC_DECLARE(M_NEWNFSDSESSION);
+#define	M_NFSRVCACHE	M_NEWNFSRVCACHE
+#define	M_NFSDCLIENT	M_NEWNFSDCLIENT
+#define	M_NFSDSTATE	M_NEWNFSDSTATE
+#define	M_NFSDLOCK	M_NEWNFSDLOCK
+#define	M_NFSDLOCKFILE	M_NEWNFSDLOCKFILE
+#define	M_NFSSTRING	M_NEWNFSSTRING
+#define	M_NFSUSERGROUP	M_NEWNFSUSERGROUP
+#define	M_NFSDREQ	M_NEWNFSDREQ
+#define	M_NFSFH		M_NEWNFSFH
+#define	M_NFSCLOWNER	M_NEWNFSCLOWNER
+#define	M_NFSCLOPEN	M_NEWNFSCLOPEN
+#define	M_NFSCLDELEG	M_NEWNFSCLDELEG
+#define	M_NFSCLCLIENT	M_NEWNFSCLCLIENT
+#define	M_NFSCLLOCKOWNER M_NEWNFSCLLOCKOWNER
+#define	M_NFSCLLOCK	M_NEWNFSCLLOCK
+#define	M_NFSDIROFF	M_NEWNFSDIROFF
+#define	M_NFSV4NODE	M_NEWNFSV4NODE
+#define	M_NFSDIRECTIO	M_NEWNFSDIRECTIO
+#define	M_NFSDROLLBACK	M_NEWNFSDROLLBACK
+#define	M_NFSLAYOUT	M_NEWNFSLAYOUT
+#define	M_NFSFLAYOUT	M_NEWNFSFLAYOUT
+#define	M_NFSDEVINFO	M_NEWNFSDEVINFO
+#define	M_NFSSOCKREQ	M_NEWNFSSOCKREQ
+#define	M_NFSCLDS	M_NEWNFSCLDS
+#define	M_NFSLAYRECALL	M_NEWNFSLAYRECALL
+#define	M_NFSDSESSION	M_NEWNFSDSESSION
+
+#define	NFSINT_SIGMASK(set) 						\
+	(SIGISMEMBER(set, SIGINT) || SIGISMEMBER(set, SIGTERM) ||	\
+	 SIGISMEMBER(set, SIGHUP) || SIGISMEMBER(set, SIGKILL) ||	\
+	 SIGISMEMBER(set, SIGQUIT))
+
+/*
+ * Convert a quota block count to byte count.
+ */
+#define	NFSQUOTABLKTOBYTE(q, b)	(q) *= (b)
+
+/*
+ * Define this as the largest file size supported. (It should probably
+ * be available via a VFS_xxx Op, but it isn't.
+ */
+#define	NFSRV_MAXFILESIZE	((u_int64_t)0x800000000000)
+
+/*
+ * Set this macro to index() or strchr(), whichever is supported.
+ */
+#define	STRCHR(s, c)		strchr((s), (c))
+
+/*
+ * Set the n_time in the client write rpc, as required.
+ */
+#define	NFSWRITERPC_SETTIME(w, n, a, v4)				\
+	do {								\
+		if (w) {						\
+			NFSLOCKNODE(n);					\
+			(n)->n_mtime = (a)->na_mtime;			\
+			if (v4)						\
+				(n)->n_change = (a)->na_filerev;	\
+			NFSUNLOCKNODE(n);				\
+		}							\
+	} while (0)
+
+/*
+ * Fake value, just to make the client work.
+ */
+#define	NFS_LATTR_NOSHRINK	1
+
+/*
+ * Prototypes for functions where the arguments vary for different ports.
+ */
+int nfscl_loadattrcache(struct vnode **, struct nfsvattr *, void *, void *,
+    int, int);
+int newnfs_realign(struct mbuf **, int);
+bool ncl_pager_setsize(struct vnode *vp, u_quad_t *nsizep);
+
+/*
+ * If the port runs on an SMP box that can enforce Atomic ops with low
+ * overheads, define these as atomic increments/decrements. If not,
+ * don't worry about it, since these are used for stats that can be
+ * "out by one" without disastrous consequences.
+ */
+#define	NFSINCRGLOBAL(a)	((a)++)
+
+/*
+ * Assorted funky stuff to make things work under Darwin8.
+ */
+/*
+ * These macros checks for a field in vattr being set.
+ */
+#define	NFSATTRISSET(t, v, a)	((v)->a != (t)VNOVAL)
+#define	NFSATTRISSETTIME(v, a)	((v)->a.tv_sec != VNOVAL)
+
+/*
+ * Manipulate mount flags.
+ */
+#define	NFSSTA_HASWRITEVERF	0x00040000  /* Has write verifier */
+#define	NFSSTA_GOTFSINFO	0x00100000  /* Got the fsinfo */
+#define	NFSSTA_OPENMODE		0x00200000  /* Must use correct open mode */
+#define	NFSSTA_FLEXFILE		0x00800000  /* Use Flex File Layout */
+#define	NFSSTA_NOLAYOUTCOMMIT	0x04000000  /* Don't do LayoutCommit */
+#define	NFSSTA_SESSPERSIST	0x08000000  /* Has a persistent session */
+#define	NFSSTA_TIMEO		0x10000000  /* Experiencing a timeout */
+#define	NFSSTA_LOCKTIMEO	0x20000000  /* Experiencing a lockd timeout */
+#define	NFSSTA_HASSETFSID	0x40000000  /* Has set the fsid */
+#define	NFSSTA_PNFS		0x80000000  /* pNFS is enabled */
+
+#define	NFSHASNFSV3(n)		((n)->nm_flag & NFSMNT_NFSV3)
+#define	NFSHASNFSV4(n)		((n)->nm_flag & NFSMNT_NFSV4)
+#define	NFSHASNFSV4N(n)		((n)->nm_minorvers > 0)
+#define	NFSHASNFSV3OR4(n)	((n)->nm_flag & (NFSMNT_NFSV3 | NFSMNT_NFSV4))
+#define	NFSHASGOTFSINFO(n)	((n)->nm_state & NFSSTA_GOTFSINFO)
+#define	NFSHASHASSETFSID(n)	((n)->nm_state & NFSSTA_HASSETFSID)
+#define	NFSHASSTRICT3530(n)	((n)->nm_flag & NFSMNT_STRICT3530)
+#define	NFSHASWRITEVERF(n)	((n)->nm_state & NFSSTA_HASWRITEVERF)
+#define	NFSHASINT(n)		((n)->nm_flag & NFSMNT_INT)
+#define	NFSHASSOFT(n)		((n)->nm_flag & NFSMNT_SOFT)
+#define	NFSHASINTORSOFT(n)	((n)->nm_flag & (NFSMNT_INT | NFSMNT_SOFT))
+#define	NFSHASDUMBTIMR(n)	((n)->nm_flag & NFSMNT_DUMBTIMR)
+#define	NFSHASNOCONN(n)		((n)->nm_flag & NFSMNT_MNTD)
+#define	NFSHASKERB(n)		((n)->nm_flag & NFSMNT_KERB)
+#define	NFSHASALLGSSNAME(n)	((n)->nm_flag & NFSMNT_ALLGSSNAME)
+#define	NFSHASINTEGRITY(n)	((n)->nm_flag & NFSMNT_INTEGRITY)
+#define	NFSHASPRIVACY(n)	((n)->nm_flag & NFSMNT_PRIVACY)
+#define	NFSSETWRITEVERF(n)	((n)->nm_state |= NFSSTA_HASWRITEVERF)
+#define	NFSSETHASSETFSID(n)	((n)->nm_state |= NFSSTA_HASSETFSID)
+#define	NFSHASPNFSOPT(n)	((n)->nm_flag & NFSMNT_PNFS)
+#define	NFSHASNOLAYOUTCOMMIT(n)	((n)->nm_state & NFSSTA_NOLAYOUTCOMMIT)
+#define	NFSHASSESSPERSIST(n)	((n)->nm_state & NFSSTA_SESSPERSIST)
+#define	NFSHASPNFS(n)		((n)->nm_state & NFSSTA_PNFS)
+#define	NFSHASFLEXFILE(n)	((n)->nm_state & NFSSTA_FLEXFILE)
+#define	NFSHASOPENMODE(n)	((n)->nm_state & NFSSTA_OPENMODE)
+#define	NFSHASONEOPENOWN(n)	(((n)->nm_flag & NFSMNT_ONEOPENOWN) != 0 &&	\
+				    (n)->nm_minorvers > 0)
+
+/*
+ * Gets the stats field out of the mount structure.
+ */
+#define	vfs_statfs(m)	(&((m)->mnt_stat))
+
+/*
+ * Set boottime.
+ */
+#define	NFSSETBOOTTIME(b)	(getboottime(&b))
+
+/*
+ * The size of directory blocks in the buffer cache.
+ * MUST BE in the range of PAGE_SIZE <= NFS_DIRBLKSIZ <= MAXBSIZE!!
+ */
+#define	NFS_DIRBLKSIZ	(16 * DIRBLKSIZ) /* Must be a multiple of DIRBLKSIZ */
+
+/*
+ * Define these macros to access mnt_flag fields.
+ */
+#define	NFSMNT_RDONLY(m)	((m)->mnt_flag & MNT_RDONLY)
+#endif	/* _KERNEL */
+
+/*
+ * Define a structure similar to ufs_args for use in exporting the V4 root.
+ */
+struct nfsex_args {
+	char	*fspec;
+	struct export_args	export;
+};
+
+/*
+ * These export flags should be defined, but there are no bits left.
+ * Maybe a separate mnt_exflag field could be added or the mnt_flag
+ * field increased to 64 bits?
+ */
+#ifndef	MNT_EXSTRICTACCESS
+#define	MNT_EXSTRICTACCESS	0x0
+#endif
+#ifndef MNT_EXV4ONLY
+#define	MNT_EXV4ONLY		0x0
+#endif
+
+#ifdef _KERNEL
+/*
+ * Define this to invalidate the attribute cache for the nfs node.
+ */
+#define	NFSINVALATTRCACHE(n)	((n)->n_attrstamp = 0)
+
+/* Used for FreeBSD only */
+void nfsd_mntinit(void);
+
+/*
+ * Define these for vnode lock/unlock ops.
+ *
+ * These are good abstractions to macro out, so that they can be added to
+ * later, for debugging or stats, etc.
+ */
+#define	NFSVOPLOCK(v, f)	vn_lock((v), (f))
+#define	NFSVOPUNLOCK(v, f)	VOP_UNLOCK((v), (f))
+#define	NFSVOPISLOCKED(v)	VOP_ISLOCKED((v))
+
+/*
+ * Define ncl_hash().
+ */
+#define	ncl_hash(f, l)	(fnv_32_buf((f), (l), FNV1_32_INIT))
+
+int newnfs_iosize(struct nfsmount *);
+
+int newnfs_vncmpf(struct vnode *, void *);
+
+#ifndef NFS_MINDIRATTRTIMO
+#define	NFS_MINDIRATTRTIMO 3		/* VDIR attrib cache timeout in sec */
+#endif
+#ifndef NFS_MAXDIRATTRTIMO
+#define	NFS_MAXDIRATTRTIMO 60
+#endif
+
+/*
+ * Nfs outstanding request list element
+ */
+struct nfsreq {
+	TAILQ_ENTRY(nfsreq) r_chain;
+	u_int32_t	r_flags;	/* flags on request, see below */
+	struct nfsmount *r_nmp;		/* Client mnt ptr */
+	struct mtx	r_mtx;		/* Mutex lock for this structure */
+};
+
+#ifndef NFS_MAXBSIZE
+#define	NFS_MAXBSIZE	(maxbcachebuf)
+#endif
+
+/*
+ * This macro checks to see if issuing of delegations is allowed for this
+ * vnode.
+ */
+#ifdef VV_DISABLEDELEG
+#define	NFSVNO_DELEGOK(v)						\
+	((v) == NULL || ((v)->v_vflag & VV_DISABLEDELEG) == 0)
+#else
+#define	NFSVNO_DELEGOK(v)	(1)
+#endif
+
+/*
+ * Name used by getnewvnode() to describe filesystem, "nfs".
+ * For performance reasons it is useful to have the same string
+ * used in both places that call getnewvnode().
+ */
+extern const char nfs_vnode_tag[];
+
+/*
+ * Check for the errors that indicate a DS should be disabled.
+ * ENXIO indicates that the krpc cannot do an RPC on the DS.
+ * EIO is returned by the RPC as an indication of I/O problems on the
+ * server.
+ * Are there other fatal errors?
+ */
+#define	nfsds_failerr(e)	((e) == ENXIO || (e) == EIO)
+
+/*
+ * Get a pointer to the MDS session, which is always the first element
+ * in the list.
+ * This macro can only be safely used when the NFSLOCKMNT() lock is held.
+ * The inline function can be used when the lock isn't held.
+ */
+#define	NFSMNT_MDSSESSION(m)	(&(TAILQ_FIRST(&((m)->nm_sess))->nfsclds_sess))
+
+static __inline struct nfsclsession *
+nfsmnt_mdssession(struct nfsmount *nmp)
+{
+	struct nfsclsession *tsep;
+
+	tsep = NULL;
+	mtx_lock(&nmp->nm_mtx);
+	if (TAILQ_FIRST(&nmp->nm_sess) != NULL)
+		tsep = NFSMNT_MDSSESSION(nmp);
+	mtx_unlock(&nmp->nm_mtx);
+	return (tsep);
+}
+
+#endif	/* _KERNEL */
+
+#endif	/* _NFS_NFSPORT_H */
diff --git a/freebsd/sys/fs/nfs/nfsproto.h b/freebsd/sys/fs/nfs/nfsproto.h
new file mode 100644
index 0000000..2e9f6d7
--- /dev/null
+++ b/freebsd/sys/fs/nfs/nfsproto.h
@@ -0,0 +1,1426 @@
+/*-
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 1989, 1993
+ *	The Regents of the University of California.  All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Rick Macklem at The University of Guelph.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _NFS_NFSPROTO_H_
+#define	_NFS_NFSPROTO_H_
+
+/*
+ * nfs definitions as per the Version 2, 3 and 4 specs
+ */
+
+/*
+ * Constants as defined in the NFS Version 2, 3 and 4 specs.
+ * "NFS: Network File System Protocol Specification" RFC1094
+ * and in the "NFS: Network File System Version 3 Protocol
+ * Specification"
+ */
+
+#define	NFS_PORT	2049
+#define	NFS_PROG	100003
+#define	NFS_CALLBCKPROG	0x40000000	/* V4 only */
+#define	NFS_VER2	2
+#define	NFS_VER3	3
+#define	NFS_VER4	4
+#define	NFS_V2MAXDATA	8192
+#define	NFS_MAXDGRAMDATA 16384
+#define	NFS_MAXPATHLEN	1024
+#define	NFS_MAXNAMLEN	255
+/*
+ * Calculating the maximum XDR overhead for an NFS RPC isn't easy.
+ * NFS_MAXPKTHDR is antiquated and assumes AUTH_SYS over UDP.
+ * NFS_MAXXDR should be sufficient for all NFS versions over TCP.
+ * It includes:
+ * - Maximum RPC message header. It can include 2 400byte authenticators plus
+ *   a machine name of unlimited length, although it is usually relatively
+ *   small.
+ * - XDR overheads for the NFSv4 compound. This can include Owner and
+ *   Owner_group strings, which are usually fairly small, but are allowed
+ *   to be up to 1024 bytes each.
+ * 4096 is overkill, but should always be sufficient.
+ */
+#define	NFS_MAXPKTHDR	404
+#define	NFS_MAXXDR	4096
+#define	NFS_MAXPACKET	(NFS_SRVMAXIO + NFS_MAXXDR)
+#define	NFS_MINPACKET	20
+#define	NFS_FABLKSIZE	512	/* Size in bytes of a block wrt fa_blocks */
+#define	NFSV4_MINORVERSION	0	/* V4 Minor version */
+#define	NFSV41_MINORVERSION	1	/* V4 Minor version */
+#define	NFSV4_CBVERS		1	/* V4 CB Version */
+#define	NFSV41_CBVERS		4	/* V4.1 CB Version */
+#define	NFSV4_SMALLSTR	50		/* Strings small enough for stack */
+
+/*
+ * This value isn't a fixed value in the RFCs.
+ * It is the maximum data size supported by NFSv3 or NFSv4 over TCP for
+ * the server.  It should be set to the I/O size preferred by ZFS or
+ * MAXBSIZE, whichever is greater.
+ * ZFS currently prefers 128K.
+ * It used to be called NFS_MAXDATA, but has been renamed to clarify that
+ * it refers to server side only and doesn't conflict with the NFS_MAXDATA
+ * defined in rpcsvc/nfs_prot.h for userland.
+ */
+#define	NFS_SRVMAXIO	(128 * 1024)
+
+/* Stat numbers for rpc returns (version 2, 3 and 4) */
+/*
+ * These numbers are hard-wired in the RFCs, so they can't be changed.
+ * The code currently assumes that the ones < 10000 are the same as
+ * sys/errno.h and that sys/errno.h will never go as high as 10000.
+ * If the value in sys/errno.h of any entry listed below is changed,
+ * the NFS code must be modified to do the mapping between them.
+ * (You can ignore NFSERR_WFLUSH, since it is never actually used.)
+ */
+#define	NFSERR_OK		0
+#define	NFSERR_PERM		1
+#define	NFSERR_NOENT		2
+#define	NFSERR_IO		5
+#define	NFSERR_NXIO		6
+#define	NFSERR_ACCES		13
+#define	NFSERR_EXIST		17
+#define	NFSERR_XDEV		18	/* Version 3, 4 only */
+#define	NFSERR_NODEV		19
+#define	NFSERR_NOTDIR		20
+#define	NFSERR_ISDIR		21
+#define	NFSERR_INVAL		22	/* Version 3, 4 only */
+#define	NFSERR_FBIG		27
+#define	NFSERR_NOSPC		28
+#define	NFSERR_ROFS		30
+#define	NFSERR_MLINK		31	/* Version 3, 4 only */
+#define	NFSERR_NAMETOL		63
+#define	NFSERR_NOTEMPTY		66
+#define	NFSERR_DQUOT		69
+#define	NFSERR_STALE		70
+#define	NFSERR_REMOTE		71	/* Version 3 only */
+#define	NFSERR_WFLUSH		99	/* Version 2 only */
+#define	NFSERR_BADHANDLE	10001	/* These are Version 3, 4 only */
+#define	NFSERR_NOT_SYNC		10002	/* Version 3 Only */
+#define	NFSERR_BAD_COOKIE	10003
+#define	NFSERR_NOTSUPP		10004
+#define	NFSERR_TOOSMALL		10005
+#define	NFSERR_SERVERFAULT	10006
+#define	NFSERR_BADTYPE		10007
+#define	NFSERR_DELAY		10008	/* Called NFSERR_JUKEBOX for V3 */
+#define	NFSERR_SAME		10009	/* These are Version 4 only */
+#define	NFSERR_DENIED		10010
+#define	NFSERR_EXPIRED		10011
+#define	NFSERR_LOCKED		10012
+#define	NFSERR_GRACE		10013
+#define	NFSERR_FHEXPIRED	10014
+#define	NFSERR_SHAREDENIED	10015
+#define	NFSERR_WRONGSEC		10016
+#define	NFSERR_CLIDINUSE	10017
+#define	NFSERR_RESOURCE		10018
+#define	NFSERR_MOVED		10019
+#define	NFSERR_NOFILEHANDLE	10020
+#define	NFSERR_MINORVERMISMATCH	10021
+#define	NFSERR_STALECLIENTID	10022
+#define	NFSERR_STALESTATEID	10023
+#define	NFSERR_OLDSTATEID	10024
+#define	NFSERR_BADSTATEID	10025
+#define	NFSERR_BADSEQID		10026
+#define	NFSERR_NOTSAME		10027
+#define	NFSERR_LOCKRANGE	10028
+#define	NFSERR_SYMLINK		10029
+#define	NFSERR_RESTOREFH	10030
+#define	NFSERR_LEASEMOVED	10031
+#define	NFSERR_ATTRNOTSUPP	10032
+#define	NFSERR_NOGRACE		10033
+#define	NFSERR_RECLAIMBAD	10034
+#define	NFSERR_RECLAIMCONFLICT	10035
+#define	NFSERR_BADXDR		10036
+#define	NFSERR_LOCKSHELD	10037
+#define	NFSERR_OPENMODE		10038
+#define	NFSERR_BADOWNER		10039
+#define	NFSERR_BADCHAR		10040
+#define	NFSERR_BADNAME		10041
+#define	NFSERR_BADRANGE		10042
+#define	NFSERR_LOCKNOTSUPP	10043
+#define	NFSERR_OPILLEGAL	10044
+#define	NFSERR_DEADLOCK		10045
+#define	NFSERR_FILEOPEN		10046
+#define	NFSERR_ADMINREVOKED	10047
+#define	NFSERR_CBPATHDOWN	10048
+
+/* NFSv4.1 specific errors. */
+#define	NFSERR_BADIOMODE	10049
+#define	NFSERR_BADLAYOUT	10050
+#define	NFSERR_BADSESSIONDIGEST	10051
+#define	NFSERR_BADSESSION	10052
+#define	NFSERR_BADSLOT		10053
+#define	NFSERR_COMPLETEALREADY	10054
+#define	NFSERR_NOTBNDTOSESS	10055
+#define	NFSERR_DELEGALREADYWANT	10056
+#define	NFSERR_BACKCHANBUSY	10057
+#define	NFSERR_LAYOUTTRYLATER	10058
+#define	NFSERR_LAYOUTUNAVAIL	10059
+#define	NFSERR_NOMATCHLAYOUT	10060
+#define	NFSERR_RECALLCONFLICT	10061
+#define	NFSERR_UNKNLAYOUTTYPE	10062
+#define	NFSERR_SEQMISORDERED	10063
+#define	NFSERR_SEQUENCEPOS	10064
+#define	NFSERR_REQTOOBIG	10065
+#define	NFSERR_REPTOOBIG	10066
+#define	NFSERR_REPTOOBIGTOCACHE	10067
+#define	NFSERR_RETRYUNCACHEDREP	10068
+#define	NFSERR_UNSAFECOMPOUND	10069
+#define	NFSERR_TOOMANYOPS	10070
+#define	NFSERR_OPNOTINSESS	10071
+#define	NFSERR_HASHALGUNSUPP	10072
+#define	NFSERR_CLIENTIDBUSY	10074
+#define	NFSERR_PNFSIOHOLE	10075
+#define	NFSERR_SEQFALSERETRY	10076
+#define	NFSERR_BADHIGHSLOT	10077
+#define	NFSERR_DEADSESSION	10078
+#define	NFSERR_ENCRALGUNSUPP	10079
+#define	NFSERR_PNFSNOLAYOUT	10080
+#define	NFSERR_NOTONLYOP	10081
+#define	NFSERR_WRONGCRED	10082
+#define	NFSERR_WRONGTYPE	10083
+#define	NFSERR_DIRDELEGUNAVAIL	10084
+#define	NFSERR_REJECTDELEG	10085
+#define	NFSERR_RETURNCONFLICT	10086
+#define	NFSERR_DELEGREVOKED	10087
+
+#define	NFSERR_STALEWRITEVERF	30001	/* Fake return for nfs_commit() */
+#define	NFSERR_DONTREPLY	30003	/* Don't process request */
+#define	NFSERR_RETVOID		30004	/* Return void, not error */
+#define	NFSERR_REPLYFROMCACHE	30005	/* Reply from recent request cache */
+#define	NFSERR_STALEDONTRECOVER	30006	/* Don't initiate recovery */
+
+#define	NFSERR_RPCERR		0x40000000 /* Mark an RPC layer error */
+#define	NFSERR_AUTHERR		0x80000000 /* Mark an authentication error */
+
+#define	NFSERR_RPCMISMATCH	(NFSERR_RPCERR | RPC_MISMATCH)
+#define	NFSERR_PROGUNAVAIL	(NFSERR_RPCERR | RPC_PROGUNAVAIL)
+#define	NFSERR_PROGMISMATCH	(NFSERR_RPCERR | RPC_PROGMISMATCH)
+#define	NFSERR_PROGNOTV4	(NFSERR_RPCERR | 0xffff)
+#define	NFSERR_PROCUNAVAIL	(NFSERR_RPCERR | RPC_PROCUNAVAIL)
+#define	NFSERR_GARBAGE		(NFSERR_RPCERR | RPC_GARBAGE)
+
+/* Sizes in bytes of various nfs rpc components */
+#define	NFSX_UNSIGNED	4
+#define	NFSX_HYPER	(2 * NFSX_UNSIGNED)
+
+/* specific to NFS Version 2 */
+#define	NFSX_V2FH	32
+#define	NFSX_V2FATTR	68
+#define	NFSX_V2SATTR	32
+#define	NFSX_V2COOKIE	4
+#define	NFSX_V2STATFS	20
+
+/* specific to NFS Version 3 */
+#define	NFSX_V3FHMAX		64	/* max. allowed by protocol */
+#define	NFSX_V3FATTR		84
+#define	NFSX_V3SATTR		60	/* max. all fields filled in */
+#define	NFSX_V3SRVSATTR		(sizeof (struct nfsv3_sattr))
+#define	NFSX_V3POSTOPATTR	(NFSX_V3FATTR + NFSX_UNSIGNED)
+#define	NFSX_V3WCCDATA		(NFSX_V3POSTOPATTR + 8 * NFSX_UNSIGNED)
+#define	NFSX_V3STATFS		52
+#define	NFSX_V3FSINFO		48
+#define	NFSX_V3PATHCONF		24
+
+/* specific to NFS Version 4 */
+#define	NFSX_V4FHMAX		128
+#define	NFSX_V4FSID		(2 * NFSX_HYPER)
+#define	NFSX_V4SPECDATA		(2 * NFSX_UNSIGNED)
+#define	NFSX_V4TIME		(NFSX_HYPER + NFSX_UNSIGNED)
+#define	NFSX_V4SETTIME		(NFSX_UNSIGNED + NFSX_V4TIME)
+#define	NFSX_V4SESSIONID	16
+#define	NFSX_V4DEVICEID		16
+#define	NFSX_V4PNFSFH		(sizeof(fhandle_t) + 1)
+#define	NFSX_V4FILELAYOUT	(4 * NFSX_UNSIGNED + NFSX_V4DEVICEID +	\
+				 NFSX_HYPER + NFSM_RNDUP(NFSX_V4PNFSFH))
+#define	NFSX_V4FLEXLAYOUT(m)	(NFSX_HYPER + 3 * NFSX_UNSIGNED +		\
+    ((m) * (NFSX_V4DEVICEID + NFSX_STATEID + NFSM_RNDUP(NFSX_V4PNFSFH) +	\
+    8 * NFSX_UNSIGNED)))
+
+/* sizes common to multiple NFS versions */
+#define	NFSX_FHMAX		(NFSX_V4FHMAX)
+#define	NFSX_MYFH		(sizeof (fhandle_t)) /* size this server uses */
+#define	NFSX_VERF 		8
+#define	NFSX_STATEIDOTHER	12
+#define	NFSX_STATEID		(NFSX_UNSIGNED + NFSX_STATEIDOTHER)
+#define	NFSX_GSSH		12
+
+/* variants for multiple versions */
+#define	NFSX_STATFS(v3)		((v3) ? NFSX_V3STATFS : NFSX_V2STATFS)
+
+/*
+ * Beware.  NFSPROC_NULL and friends are defined in
+ * <rpcsvc/nfs_prot.h> as well and the numbers are different.
+ */
+#ifndef	NFSPROC_NULL
+/* nfs rpc procedure numbers (before version mapping) */
+#define	NFSPROC_NULL		0
+#define	NFSPROC_GETATTR		1
+#define	NFSPROC_SETATTR		2
+#define	NFSPROC_LOOKUP		3
+#define	NFSPROC_ACCESS		4
+#define	NFSPROC_READLINK	5
+#define	NFSPROC_READ		6
+#define	NFSPROC_WRITE		7
+#define	NFSPROC_CREATE		8
+#define	NFSPROC_MKDIR		9
+#define	NFSPROC_SYMLINK		10
+#define	NFSPROC_MKNOD		11
+#define	NFSPROC_REMOVE		12
+#define	NFSPROC_RMDIR		13
+#define	NFSPROC_RENAME		14
+#define	NFSPROC_LINK		15
+#define	NFSPROC_READDIR		16
+#define	NFSPROC_READDIRPLUS	17
+#define	NFSPROC_FSSTAT		18
+#define	NFSPROC_FSINFO		19
+#define	NFSPROC_PATHCONF	20
+#define	NFSPROC_COMMIT		21
+#endif	/* NFSPROC_NULL */
+
+/*
+ * The lower numbers -> 21 are used by NFSv2 and v3. These define higher
+ * numbers used by NFSv4.
+ * NFS_V3NPROCS is one greater than the last V3 op and NFS_NPROCS is
+ * one greater than the last number.
+ */
+#ifndef	NFS_V3NPROCS
+#define	NFS_V3NPROCS		22
+
+#define	NFSPROC_LOOKUPP		22
+#define	NFSPROC_SETCLIENTID	23
+#define	NFSPROC_SETCLIENTIDCFRM	24
+#define	NFSPROC_LOCK		25
+#define	NFSPROC_LOCKU		26
+#define	NFSPROC_OPEN		27
+#define	NFSPROC_CLOSE		28
+#define	NFSPROC_OPENCONFIRM	29
+#define	NFSPROC_LOCKT		30
+#define	NFSPROC_OPENDOWNGRADE	31
+#define	NFSPROC_RENEW		32
+#define	NFSPROC_PUTROOTFH	33
+#define	NFSPROC_RELEASELCKOWN	34
+#define	NFSPROC_DELEGRETURN	35
+#define	NFSPROC_RETDELEGREMOVE	36
+#define	NFSPROC_RETDELEGRENAME1	37
+#define	NFSPROC_RETDELEGRENAME2	38
+#define	NFSPROC_GETACL		39
+#define	NFSPROC_SETACL		40
+
+/*
+ * Must be defined as one higher than the last Proc# above.
+ */
+#define	NFSV4_NPROCS		41
+
+/* Additional procedures for NFSv4.1. */
+#define	NFSPROC_EXCHANGEID	41
+#define	NFSPROC_CREATESESSION	42
+#define	NFSPROC_DESTROYSESSION	43
+#define	NFSPROC_DESTROYCLIENT	44
+#define	NFSPROC_FREESTATEID	45
+#define	NFSPROC_LAYOUTGET	46
+#define	NFSPROC_GETDEVICEINFO	47
+#define	NFSPROC_LAYOUTCOMMIT	48
+#define	NFSPROC_LAYOUTRETURN	49
+#define	NFSPROC_RECLAIMCOMPL	50
+#define	NFSPROC_WRITEDS		51
+#define	NFSPROC_READDS		52
+#define	NFSPROC_COMMITDS	53
+#define	NFSPROC_OPENLAYGET	54
+#define	NFSPROC_CREATELAYGET	55
+
+/*
+ * Must be defined as one higher than the last NFSv4.1 Proc# above.
+ */
+#define	NFSV41_NPROCS		56
+
+#endif	/* NFS_V3NPROCS */
+
+/*
+ * Define NFS_NPROCS as NFSV4_NPROCS for the experimental kernel code.
+ */
+#ifndef	NFS_NPROCS
+#define	NFS_NPROCS		NFSV4_NPROCS
+#endif
+
+/*
+ * NFSPROC_NOOP is a fake op# that can't be the same as any V2/3/4 Procedure
+ * or Operation#. Since the NFS V4 Op #s go higher, use NFSV42_NOPS, which
+ * is one greater than the highest Op#.
+ */
+#define	NFSPROC_NOOP		NFSV42_NOPS
+
+/* Actual Version 2 procedure numbers */
+#define	NFSV2PROC_NULL		0
+#define	NFSV2PROC_GETATTR	1
+#define	NFSV2PROC_SETATTR	2
+#define	NFSV2PROC_NOOP		3
+#define	NFSV2PROC_ROOT		NFSV2PROC_NOOP	/* Obsolete */
+#define	NFSV2PROC_LOOKUP	4
+#define	NFSV2PROC_READLINK	5
+#define	NFSV2PROC_READ		6
+#define	NFSV2PROC_WRITECACHE	NFSV2PROC_NOOP	/* Obsolete */
+#define	NFSV2PROC_WRITE		8
+#define	NFSV2PROC_CREATE	9
+#define	NFSV2PROC_REMOVE	10
+#define	NFSV2PROC_RENAME	11
+#define	NFSV2PROC_LINK		12
+#define	NFSV2PROC_SYMLINK	13
+#define	NFSV2PROC_MKDIR		14
+#define	NFSV2PROC_RMDIR		15
+#define	NFSV2PROC_READDIR	16
+#define	NFSV2PROC_STATFS	17
+
+/*
+ * V4 Procedure numbers
+ */
+#define	NFSV4PROC_COMPOUND	1
+#define	NFSV4PROC_CBNULL	0
+#define	NFSV4PROC_CBCOMPOUND	1
+
+/*
+ * Constants used by the Version 3 and 4 protocols for various RPCs
+ */
+#define	NFSV3SATTRTIME_DONTCHANGE	0
+#define	NFSV3SATTRTIME_TOSERVER		1
+#define	NFSV3SATTRTIME_TOCLIENT		2
+
+#define	NFSV4SATTRTIME_TOSERVER		0
+#define	NFSV4SATTRTIME_TOCLIENT		1
+
+#define	NFSV4LOCKT_READ			1
+#define	NFSV4LOCKT_WRITE		2
+#define	NFSV4LOCKT_READW		3
+#define	NFSV4LOCKT_WRITEW		4
+#define	NFSV4LOCKT_RELEASE		5
+
+#define	NFSV4OPEN_NOCREATE		0
+#define	NFSV4OPEN_CREATE		1
+#define	NFSV4OPEN_CLAIMNULL		0
+#define	NFSV4OPEN_CLAIMPREVIOUS		1
+#define	NFSV4OPEN_CLAIMDELEGATECUR	2
+#define	NFSV4OPEN_CLAIMDELEGATEPREV	3
+#define	NFSV4OPEN_CLAIMFH		4
+#define	NFSV4OPEN_CLAIMDELEGATECURFH	5
+#define	NFSV4OPEN_CLAIMDELEGATEPREVFH	6
+#define	NFSV4OPEN_DELEGATENONE		0
+#define	NFSV4OPEN_DELEGATEREAD		1
+#define	NFSV4OPEN_DELEGATEWRITE		2
+#define	NFSV4OPEN_DELEGATENONEEXT	3
+#define	NFSV4OPEN_LIMITSIZE		1
+#define	NFSV4OPEN_LIMITBLOCKS		2
+
+/*
+ * Nfs V4 ACE stuff
+ */
+#define	NFSV4ACE_ALLOWEDTYPE		0x00000000
+#define	NFSV4ACE_DENIEDTYPE		0x00000001
+#define	NFSV4ACE_AUDITTYPE		0x00000002
+#define	NFSV4ACE_ALARMTYPE		0x00000003
+
+#define	NFSV4ACE_SUPALLOWED		0x00000001
+#define	NFSV4ACE_SUPDENIED		0x00000002
+#define	NFSV4ACE_SUPAUDIT		0x00000004
+#define	NFSV4ACE_SUPALARM		0x00000008
+
+#define	NFSV4ACE_SUPTYPES	(NFSV4ACE_SUPALLOWED | NFSV4ACE_SUPDENIED)
+
+#define	NFSV4ACE_FILEINHERIT		0x00000001
+#define	NFSV4ACE_DIRECTORYINHERIT	0x00000002
+#define	NFSV4ACE_NOPROPAGATEINHERIT	0x00000004
+#define	NFSV4ACE_INHERITONLY		0x00000008
+#define	NFSV4ACE_SUCCESSFULACCESS	0x00000010
+#define	NFSV4ACE_FAILEDACCESS		0x00000020
+#define	NFSV4ACE_IDENTIFIERGROUP	0x00000040
+
+#define	NFSV4ACE_READDATA		0x00000001
+#define	NFSV4ACE_LISTDIRECTORY		0x00000001
+#define	NFSV4ACE_WRITEDATA		0x00000002
+#define	NFSV4ACE_ADDFILE		0x00000002
+#define	NFSV4ACE_APPENDDATA		0x00000004
+#define	NFSV4ACE_ADDSUBDIRECTORY	0x00000004
+#define	NFSV4ACE_READNAMEDATTR		0x00000008
+#define	NFSV4ACE_WRITENAMEDATTR		0x00000010
+#define	NFSV4ACE_EXECUTE		0x00000020
+#define	NFSV4ACE_SEARCH			0x00000020
+#define	NFSV4ACE_DELETECHILD		0x00000040
+#define	NFSV4ACE_READATTRIBUTES		0x00000080
+#define	NFSV4ACE_WRITEATTRIBUTES	0x00000100
+#define	NFSV4ACE_DELETE			0x00010000
+#define	NFSV4ACE_READACL		0x00020000
+#define	NFSV4ACE_WRITEACL		0x00040000
+#define	NFSV4ACE_WRITEOWNER		0x00080000
+#define	NFSV4ACE_SYNCHRONIZE		0x00100000
+
+/*
+ * Here are the mappings between mode bits and acl mask bits for
+ * directories and other files.
+ * (Named attributes have not been included, since named attributes are
+ *  not yet supported.)
+ * The mailing list seems to indicate that NFSV4ACE_EXECUTE refers to
+ * searching a directory, although I can't find a statement of that in
+ * the RFC.
+ */
+#define	NFSV4ACE_ALLFILESMASK	(NFSV4ACE_READATTRIBUTES | NFSV4ACE_READACL)
+#define	NFSV4ACE_OWNERMASK	(NFSV4ACE_WRITEATTRIBUTES | NFSV4ACE_WRITEACL)
+#define	NFSV4ACE_DIRREADMASK	NFSV4ACE_LISTDIRECTORY
+#define	NFSV4ACE_DIREXECUTEMASK	NFSV4ACE_EXECUTE
+#define	NFSV4ACE_DIRWRITEMASK	(NFSV4ACE_ADDFILE | 			\
+		NFSV4ACE_ADDSUBDIRECTORY | NFSV4ACE_DELETECHILD)
+#define	NFSV4ACE_READMASK	NFSV4ACE_READDATA
+#define	NFSV4ACE_WRITEMASK	(NFSV4ACE_WRITEDATA | NFSV4ACE_APPENDDATA)
+#define	NFSV4ACE_EXECUTEMASK	NFSV4ACE_EXECUTE
+#define	NFSV4ACE_ALLFILEBITS	(NFSV4ACE_READMASK | NFSV4ACE_WRITEMASK | \
+	NFSV4ACE_EXECUTEMASK | NFSV4ACE_SYNCHRONIZE)
+#define	NFSV4ACE_ALLDIRBITS	(NFSV4ACE_DIRREADMASK | 		\
+	NFSV4ACE_DIRWRITEMASK | NFSV4ACE_DIREXECUTEMASK)
+#define	NFSV4ACE_AUDITMASK	0x0
+
+/*
+ * These GENERIC masks are not used and are no longer believed to be useful.
+ */
+#define	NFSV4ACE_GENERICREAD		0x00120081
+#define	NFSV4ACE_GENERICWRITE		0x00160106
+#define	NFSV4ACE_GENERICEXECUTE		0x001200a0
+
+#define	NFSSTATEID_PUTALLZERO		0
+#define	NFSSTATEID_PUTALLONE		1
+#define	NFSSTATEID_PUTSTATEID		2
+#define	NFSSTATEID_PUTSEQIDZERO		3
+
+/*
+ * Bits for share access and deny.
+ */
+#define	NFSV4OPEN_ACCESSREAD		0x00000001
+#define	NFSV4OPEN_ACCESSWRITE		0x00000002
+#define	NFSV4OPEN_ACCESSBOTH		0x00000003
+#define	NFSV4OPEN_WANTDELEGMASK		0x0000ff00
+#define	NFSV4OPEN_WANTREADDELEG		0x00000100
+#define	NFSV4OPEN_WANTWRITEDELEG	0x00000200
+#define	NFSV4OPEN_WANTANYDELEG		0x00000300
+#define	NFSV4OPEN_WANTNODELEG		0x00000400
+#define	NFSV4OPEN_WANTCANCEL		0x00000500
+#define	NFSV4OPEN_WANTSIGNALDELEG	0x00010000
+#define	NFSV4OPEN_WANTPUSHDELEG		0x00020000
+
+#define	NFSV4OPEN_DENYNONE		0x00000000
+#define	NFSV4OPEN_DENYREAD		0x00000001
+#define	NFSV4OPEN_DENYWRITE		0x00000002
+#define	NFSV4OPEN_DENYBOTH		0x00000003
+
+/*
+ * Delegate_none_ext reply values.
+ */
+#define	NFSV4OPEN_NOTWANTED		0
+#define	NFSV4OPEN_CONTENTION		1
+#define	NFSV4OPEN_RESOURCE		2
+#define	NFSV4OPEN_NOTSUPPFTYPE		3
+#define	NFSV4OPEN_NOTSUPPWRITEFTYPE	4
+#define	NFSV4OPEN_NOTSUPPUPGRADE	5
+#define	NFSV4OPEN_NOTSUPPDOWNGRADE	6
+#define	NFSV4OPEN_CANCELLED		7
+#define	NFSV4OPEN_ISDIR			8
+
+/*
+ * Open result flags
+ * (The first four are in the spec. The rest are used internally.)
+ */
+#define	NFSV4OPEN_RESULTCONFIRM		0x00000002
+#define	NFSV4OPEN_LOCKTYPEPOSIX		0x00000004
+#define	NFSV4OPEN_PRESERVEUNLINKED	0x00000008
+#define	NFSV4OPEN_MAYNOTIFYLOCK		0x00000020
+#define	NFSV4OPEN_RFLAGS 						\
+    (NFSV4OPEN_RESULTCONFIRM | NFSV4OPEN_LOCKTYPEPOSIX |		\
+    NFSV4OPEN_PRESERVEUNLINKED | NFSV4OPEN_MAYNOTIFYLOCK)
+#define	NFSV4OPEN_RECALL		0x00010000
+#define	NFSV4OPEN_READDELEGATE		0x00020000
+#define	NFSV4OPEN_WRITEDELEGATE		0x00040000
+#define	NFSV4OPEN_WDRESOURCE		0x00080000
+#define	NFSV4OPEN_WDCONTENTION		0x00100000
+#define	NFSV4OPEN_WDNOTWANTED		0x00200000
+#define	NFSV4OPEN_WDSUPPFTYPE		0x00400000
+
+/*
+ * NFS V4 File Handle types
+ */
+#define	NFSV4FHTYPE_PERSISTENT		0x0
+#define	NFSV4FHTYPE_NOEXPIREWITHOPEN	0x1
+#define	NFSV4FHTYPE_VOLATILEANY		0x2
+#define	NFSV4FHTYPE_VOLATILEMIGRATE	0x4
+#define	NFSV4FHTYPE_VOLATILERENAME	0x8
+
+/*
+ * Maximum size of V4 opaque strings.
+ */
+#define	NFSV4_OPAQUELIMIT	1024
+
+/*
+ * These are the same for V3 and V4.
+ */
+#define	NFSACCESS_READ			0x01
+#define	NFSACCESS_LOOKUP		0x02
+#define	NFSACCESS_MODIFY		0x04
+#define	NFSACCESS_EXTEND		0x08
+#define	NFSACCESS_DELETE		0x10
+#define	NFSACCESS_EXECUTE		0x20
+
+#define	NFSWRITE_UNSTABLE		0
+#define	NFSWRITE_DATASYNC		1
+#define	NFSWRITE_FILESYNC		2
+
+#define	NFSCREATE_UNCHECKED		0
+#define	NFSCREATE_GUARDED		1
+#define	NFSCREATE_EXCLUSIVE		2
+#define	NFSCREATE_EXCLUSIVE41		3
+
+#define	NFSV3FSINFO_LINK		0x01
+#define	NFSV3FSINFO_SYMLINK		0x02
+#define	NFSV3FSINFO_HOMOGENEOUS		0x08
+#define	NFSV3FSINFO_CANSETTIME		0x10
+
+/* Flags for Exchange ID */
+#define	NFSV4EXCH_SUPPMOVEDREFER	0x00000001
+#define	NFSV4EXCH_SUPPMOVEDMIGR	0x00000002
+#define	NFSV4EXCH_BINDPRINCSTATEID	0x00000100
+#define	NFSV4EXCH_USENONPNFS		0x00010000
+#define	NFSV4EXCH_USEPNFSMDS		0x00020000
+#define	NFSV4EXCH_USEPNFSDS		0x00040000
+#define	NFSV4EXCH_MASKPNFS		0x00070000
+#define	NFSV4EXCH_UPDCONFIRMEDRECA	0x40000000
+#define	NFSV4EXCH_CONFIRMEDR		0x80000000
+
+/* State Protects */
+#define	NFSV4EXCH_SP4NONE		0
+#define	NFSV4EXCH_SP4MACHCRED		1
+#define	NFSV4EXCH_SP4SSV		2
+
+/* Flags for Create Session */
+#define	NFSV4CRSESS_PERSIST		0x00000001
+#define	NFSV4CRSESS_CONNBACKCHAN	0x00000002
+#define	NFSV4CRSESS_CONNRDMA		0x00000004
+
+/* Flags for Sequence */
+#define	NFSV4SEQ_CBPATHDOWN		0x00000001
+#define	NFSV4SEQ_CBGSSCONTEXPIRING	0x00000002
+#define	NFSV4SEQ_CBGSSCONTEXPIRED	0x00000004
+#define	NFSV4SEQ_EXPIREDALLSTATEREVOKED	0x00000008
+#define	NFSV4SEQ_EXPIREDSOMESTATEREVOKED 0x00000010
+#define	NFSV4SEQ_ADMINSTATEREVOKED	0x00000020
+#define	NFSV4SEQ_RECALLABLESTATEREVOKED	0x00000040
+#define	NFSV4SEQ_LEASEMOVED		0x00000080
+#define	NFSV4SEQ_RESTARTRECLAIMNEEDED	0x00000100
+#define	NFSV4SEQ_CBPATHDOWNSESSION	0x00000200
+#define	NFSV4SEQ_BACKCHANNELFAULT	0x00000400
+#define	NFSV4SEQ_DEVIDCHANGED		0x00000800
+#define	NFSV4SEQ_DEVIDDELETED		0x00001000
+
+/* Flags for Layout. */
+#define	NFSLAYOUTRETURN_FILE		1
+#define	NFSLAYOUTRETURN_FSID		2
+#define	NFSLAYOUTRETURN_ALL		3
+
+#define	NFSLAYOUT_NFSV4_1_FILES		0x1
+#define	NFSLAYOUT_OSD2_OBJECTS		0x2
+#define	NFSLAYOUT_BLOCK_VOLUME		0x3
+#define	NFSLAYOUT_FLEXFILE		0x4
+
+#define	NFSLAYOUTIOMODE_READ		1
+#define	NFSLAYOUTIOMODE_RW		2
+#define	NFSLAYOUTIOMODE_ANY		3
+
+/* Flags for Get Device Info. */
+#define	NFSDEVICEIDNOTIFY_CHANGEBIT	0x1
+#define	NFSDEVICEIDNOTIFY_DELETEBIT	0x2
+
+/* Flags for File Layout. */
+#define	NFSFLAYUTIL_DENSE		0x1
+#define	NFSFLAYUTIL_COMMIT_THRU_MDS	0x2
+#define	NFSFLAYUTIL_STRIPE_MASK		0xffffffc0
+
+/* Flags for Flex File Layout. */
+#define	NFSFLEXFLAG_NO_LAYOUTCOMMIT	0x00000001
+#define	NFSFLEXFLAG_NOIO_MDS		0x00000002
+#define	NFSFLEXFLAG_NO_READIO		0x00000004
+#define	NFSFLEXFLAG_WRITE_ONEMIRROR	0x00000008
+
+/* Enum values for Bind Connection to Session. */
+#define	NFSCDFC4_FORE		0x1
+#define	NFSCDFC4_BACK		0x2
+#define	NFSCDFC4_FORE_OR_BOTH	0x3
+#define	NFSCDFC4_BACK_OR_BOTH	0x7
+#define	NFSCDFS4_FORE		0x1
+#define	NFSCDFS4_BACK		0x2
+#define	NFSCDFS4_BOTH		0x3
+
+#if defined(_KERNEL) || defined(KERNEL)
+/* Conversion macros */
+#define	vtonfsv2_mode(t,m) 						\
+		txdr_unsigned(((t) == VFIFO) ? MAKEIMODE(VCHR, (m)) : 	\
+				MAKEIMODE((t), (m)))
+#define	vtonfsv34_mode(m)	txdr_unsigned((m) & 07777)
+#define	nfstov_mode(a)		(fxdr_unsigned(u_int16_t, (a))&07777)
+#define	vtonfsv2_type(a)  (((u_int32_t)(a)) >= 9 ? txdr_unsigned(NFNON) : \
+		txdr_unsigned(newnfsv2_type[((u_int32_t)(a))]))
+#define	vtonfsv34_type(a)  (((u_int32_t)(a)) >= 9 ? txdr_unsigned(NFNON) : \
+		txdr_unsigned(nfsv34_type[((u_int32_t)(a))]))
+#define	nfsv2tov_type(a)	newnv2tov_type[fxdr_unsigned(u_int32_t,(a))&0x7]
+#define	nfsv34tov_type(a)	nv34tov_type[fxdr_unsigned(u_int32_t,(a))&0x7]
+#define	vtonfs_dtype(a)	(((u_int32_t)(a)) >= 9 ? IFTODT(VTTOIF(VNON)) : \
+			 IFTODT(VTTOIF(a)))
+
+/* File types */
+typedef enum { NFNON=0, NFREG=1, NFDIR=2, NFBLK=3, NFCHR=4, NFLNK=5,
+	NFSOCK=6, NFFIFO=7, NFATTRDIR=8, NFNAMEDATTR=9 } nfstype;
+
+/* Structs for common parts of the rpc's */
+
+struct nfsv2_time {
+	u_int32_t nfsv2_sec;
+	u_int32_t nfsv2_usec;
+};
+typedef struct nfsv2_time	nfstime2;
+
+struct nfsv3_time {
+	u_int32_t nfsv3_sec;
+	u_int32_t nfsv3_nsec;
+};
+typedef struct nfsv3_time	nfstime3;
+
+struct nfsv4_time {
+	u_int32_t nfsv4_highsec;
+	u_int32_t nfsv4_sec;
+	u_int32_t nfsv4_nsec;
+};
+typedef struct nfsv4_time	nfstime4;
+
+/*
+ * Quads are defined as arrays of 2 longs to ensure dense packing for the
+ * protocol and to facilitate xdr conversion.
+ */
+struct nfs_uquad {
+	u_int32_t nfsuquad[2];
+};
+typedef	struct nfs_uquad	nfsuint64;
+
+/*
+ * Used to convert between two u_longs and a u_quad_t.
+ */
+union nfs_quadconvert {
+	u_int32_t lval[2];
+	u_quad_t  qval;
+};
+typedef union nfs_quadconvert	nfsquad_t;
+
+/*
+ * NFS Version 3 special file number.
+ */
+struct nfsv3_spec {
+	u_int32_t specdata1;
+	u_int32_t specdata2;
+};
+typedef	struct nfsv3_spec	nfsv3spec;
+
+/*
+ * File attributes and setable attributes. These structures cover both
+ * NFS version 2 and the version 3 protocol. Note that the union is only
+ * used so that one pointer can refer to both variants. These structures
+ * go out on the wire and must be densely packed, so no quad data types
+ * are used. (all fields are longs or u_longs or structures of same)
+ * NB: You can't do sizeof(struct nfs_fattr), you must use the
+ *     NFSX_FATTR(v3) macro.
+ */
+struct nfs_fattr {
+	u_int32_t fa_type;
+	u_int32_t fa_mode;
+	u_int32_t fa_nlink;
+	u_int32_t fa_uid;
+	u_int32_t fa_gid;
+	union {
+		struct {
+			u_int32_t nfsv2fa_size;
+			u_int32_t nfsv2fa_blocksize;
+			u_int32_t nfsv2fa_rdev;
+			u_int32_t nfsv2fa_blocks;
+			u_int32_t nfsv2fa_fsid;
+			u_int32_t nfsv2fa_fileid;
+			nfstime2  nfsv2fa_atime;
+			nfstime2  nfsv2fa_mtime;
+			nfstime2  nfsv2fa_ctime;
+		} fa_nfsv2;
+		struct {
+			nfsuint64 nfsv3fa_size;
+			nfsuint64 nfsv3fa_used;
+			nfsv3spec nfsv3fa_rdev;
+			nfsuint64 nfsv3fa_fsid;
+			nfsuint64 nfsv3fa_fileid;
+			nfstime3  nfsv3fa_atime;
+			nfstime3  nfsv3fa_mtime;
+			nfstime3  nfsv3fa_ctime;
+		} fa_nfsv3;
+	} fa_un;
+};
+
+/* and some ugly defines for accessing union components */
+#define	fa2_size		fa_un.fa_nfsv2.nfsv2fa_size
+#define	fa2_blocksize		fa_un.fa_nfsv2.nfsv2fa_blocksize
+#define	fa2_rdev		fa_un.fa_nfsv2.nfsv2fa_rdev
+#define	fa2_blocks		fa_un.fa_nfsv2.nfsv2fa_blocks
+#define	fa2_fsid		fa_un.fa_nfsv2.nfsv2fa_fsid
+#define	fa2_fileid		fa_un.fa_nfsv2.nfsv2fa_fileid
+#define	fa2_atime		fa_un.fa_nfsv2.nfsv2fa_atime
+#define	fa2_mtime		fa_un.fa_nfsv2.nfsv2fa_mtime
+#define	fa2_ctime		fa_un.fa_nfsv2.nfsv2fa_ctime
+#define	fa3_size		fa_un.fa_nfsv3.nfsv3fa_size
+#define	fa3_used		fa_un.fa_nfsv3.nfsv3fa_used
+#define	fa3_rdev		fa_un.fa_nfsv3.nfsv3fa_rdev
+#define	fa3_fsid		fa_un.fa_nfsv3.nfsv3fa_fsid
+#define	fa3_fileid		fa_un.fa_nfsv3.nfsv3fa_fileid
+#define	fa3_atime		fa_un.fa_nfsv3.nfsv3fa_atime
+#define	fa3_mtime		fa_un.fa_nfsv3.nfsv3fa_mtime
+#define	fa3_ctime		fa_un.fa_nfsv3.nfsv3fa_ctime
+
+#define	NFS_LINK_MAX	UINT32_MAX
+
+struct nfsv2_sattr {
+	u_int32_t sa_mode;
+	u_int32_t sa_uid;
+	u_int32_t sa_gid;
+	u_int32_t sa_size;
+	nfstime2  sa_atime;
+	nfstime2  sa_mtime;
+};
+
+/*
+ * NFS Version 3 sattr structure for the new node creation case.
+ */
+struct nfsv3_sattr {
+	u_int32_t sa_modetrue;
+	u_int32_t sa_mode;
+	u_int32_t sa_uidfalse;
+	u_int32_t sa_gidfalse;
+	u_int32_t sa_sizefalse;
+	u_int32_t sa_atimetype;
+	nfstime3  sa_atime;
+	u_int32_t sa_mtimetype;
+	nfstime3  sa_mtime;
+};
+#endif	/* _KERNEL */
+
+/*
+ * The attribute bits used for V4.
+ * NFSATTRBIT_xxx defines the attribute# (and its bit position)
+ * NFSATTRBM_xxx is a 32bit mask with the correct bit set within the
+ *	appropriate 32bit word.
+ * NFSATTRBIT_MAX is one greater than the largest NFSATTRBIT_xxx
+ */
+#define	NFSATTRBIT_SUPPORTEDATTRS	0
+#define	NFSATTRBIT_TYPE			1
+#define	NFSATTRBIT_FHEXPIRETYPE		2
+#define	NFSATTRBIT_CHANGE		3
+#define	NFSATTRBIT_SIZE			4
+#define	NFSATTRBIT_LINKSUPPORT		5
+#define	NFSATTRBIT_SYMLINKSUPPORT	6
+#define	NFSATTRBIT_NAMEDATTR		7
+#define	NFSATTRBIT_FSID			8
+#define	NFSATTRBIT_UNIQUEHANDLES	9
+#define	NFSATTRBIT_LEASETIME		10
+#define	NFSATTRBIT_RDATTRERROR		11
+#define	NFSATTRBIT_ACL			12
+#define	NFSATTRBIT_ACLSUPPORT		13
+#define	NFSATTRBIT_ARCHIVE		14
+#define	NFSATTRBIT_CANSETTIME		15
+#define	NFSATTRBIT_CASEINSENSITIVE	16
+#define	NFSATTRBIT_CASEPRESERVING	17
+#define	NFSATTRBIT_CHOWNRESTRICTED	18
+#define	NFSATTRBIT_FILEHANDLE		19
+#define	NFSATTRBIT_FILEID		20
+#define	NFSATTRBIT_FILESAVAIL		21
+#define	NFSATTRBIT_FILESFREE		22
+#define	NFSATTRBIT_FILESTOTAL		23
+#define	NFSATTRBIT_FSLOCATIONS		24
+#define	NFSATTRBIT_HIDDEN		25
+#define	NFSATTRBIT_HOMOGENEOUS		26
+#define	NFSATTRBIT_MAXFILESIZE		27
+#define	NFSATTRBIT_MAXLINK		28
+#define	NFSATTRBIT_MAXNAME		29
+#define	NFSATTRBIT_MAXREAD		30
+#define	NFSATTRBIT_MAXWRITE		31
+#define	NFSATTRBIT_MIMETYPE		32
+#define	NFSATTRBIT_MODE			33
+#define	NFSATTRBIT_NOTRUNC		34
+#define	NFSATTRBIT_NUMLINKS		35
+#define	NFSATTRBIT_OWNER		36
+#define	NFSATTRBIT_OWNERGROUP		37
+#define	NFSATTRBIT_QUOTAHARD		38
+#define	NFSATTRBIT_QUOTASOFT		39
+#define	NFSATTRBIT_QUOTAUSED		40
+#define	NFSATTRBIT_RAWDEV		41
+#define	NFSATTRBIT_SPACEAVAIL		42
+#define	NFSATTRBIT_SPACEFREE		43
+#define	NFSATTRBIT_SPACETOTAL		44
+#define	NFSATTRBIT_SPACEUSED		45
+#define	NFSATTRBIT_SYSTEM		46
+#define	NFSATTRBIT_TIMEACCESS		47
+#define	NFSATTRBIT_TIMEACCESSSET	48
+#define	NFSATTRBIT_TIMEBACKUP		49
+#define	NFSATTRBIT_TIMECREATE		50
+#define	NFSATTRBIT_TIMEDELTA		51
+#define	NFSATTRBIT_TIMEMETADATA		52
+#define	NFSATTRBIT_TIMEMODIFY		53
+#define	NFSATTRBIT_TIMEMODIFYSET	54
+#define	NFSATTRBIT_MOUNTEDONFILEID	55
+#define	NFSATTRBIT_DIRNOTIFDELAY	56
+#define	NFSATTRBIT_DIRENTNOTIFDELAY	57
+#define	NFSATTRBIT_DACL			58
+#define	NFSATTRBIT_SACL			59
+#define	NFSATTRBIT_CHANGEPOLICY		60
+#define	NFSATTRBIT_FSSTATUS		61
+#define	NFSATTRBIT_FSLAYOUTTYPE		62
+#define	NFSATTRBIT_LAYOUTHINT		63
+#define	NFSATTRBIT_LAYOUTTYPE		64
+#define	NFSATTRBIT_LAYOUTBLKSIZE	65
+#define	NFSATTRBIT_LAYOUTALIGNMENT	66
+#define	NFSATTRBIT_FSLOCATIONSINFO	67
+#define	NFSATTRBIT_MDSTHRESHOLD		68
+#define	NFSATTRBIT_RETENTIONGET		69
+#define	NFSATTRBIT_RETENTIONSET		70
+#define	NFSATTRBIT_RETENTEVTGET		71
+#define	NFSATTRBIT_RETENTEVTSET		72
+#define	NFSATTRBIT_RETENTIONHOLD	73
+#define	NFSATTRBIT_MODESETMASKED	74
+#define	NFSATTRBIT_SUPPATTREXCLCREAT	75
+#define	NFSATTRBIT_FSCHARSETCAP		76
+
+#define	NFSATTRBM_SUPPORTEDATTRS	0x00000001
+#define	NFSATTRBM_TYPE			0x00000002
+#define	NFSATTRBM_FHEXPIRETYPE		0x00000004
+#define	NFSATTRBM_CHANGE		0x00000008
+#define	NFSATTRBM_SIZE			0x00000010
+#define	NFSATTRBM_LINKSUPPORT		0x00000020
+#define	NFSATTRBM_SYMLINKSUPPORT	0x00000040
+#define	NFSATTRBM_NAMEDATTR		0x00000080
+#define	NFSATTRBM_FSID			0x00000100
+#define	NFSATTRBM_UNIQUEHANDLES		0x00000200
+#define	NFSATTRBM_LEASETIME		0x00000400
+#define	NFSATTRBM_RDATTRERROR		0x00000800
+#define	NFSATTRBM_ACL			0x00001000
+#define	NFSATTRBM_ACLSUPPORT		0x00002000
+#define	NFSATTRBM_ARCHIVE		0x00004000
+#define	NFSATTRBM_CANSETTIME		0x00008000
+#define	NFSATTRBM_CASEINSENSITIVE	0x00010000
+#define	NFSATTRBM_CASEPRESERVING	0x00020000
+#define	NFSATTRBM_CHOWNRESTRICTED	0x00040000
+#define	NFSATTRBM_FILEHANDLE		0x00080000
+#define	NFSATTRBM_FILEID		0x00100000
+#define	NFSATTRBM_FILESAVAIL		0x00200000
+#define	NFSATTRBM_FILESFREE		0x00400000
+#define	NFSATTRBM_FILESTOTAL		0x00800000
+#define	NFSATTRBM_FSLOCATIONS		0x01000000
+#define	NFSATTRBM_HIDDEN		0x02000000
+#define	NFSATTRBM_HOMOGENEOUS		0x04000000
+#define	NFSATTRBM_MAXFILESIZE		0x08000000
+#define	NFSATTRBM_MAXLINK		0x10000000
+#define	NFSATTRBM_MAXNAME		0x20000000
+#define	NFSATTRBM_MAXREAD		0x40000000
+#define	NFSATTRBM_MAXWRITE		0x80000000
+#define	NFSATTRBM_MIMETYPE		0x00000001
+#define	NFSATTRBM_MODE			0x00000002
+#define	NFSATTRBM_NOTRUNC		0x00000004
+#define	NFSATTRBM_NUMLINKS		0x00000008
+#define	NFSATTRBM_OWNER			0x00000010
+#define	NFSATTRBM_OWNERGROUP		0x00000020
+#define	NFSATTRBM_QUOTAHARD		0x00000040
+#define	NFSATTRBM_QUOTASOFT		0x00000080
+#define	NFSATTRBM_QUOTAUSED		0x00000100
+#define	NFSATTRBM_RAWDEV		0x00000200
+#define	NFSATTRBM_SPACEAVAIL		0x00000400
+#define	NFSATTRBM_SPACEFREE		0x00000800
+#define	NFSATTRBM_SPACETOTAL		0x00001000
+#define	NFSATTRBM_SPACEUSED		0x00002000
+#define	NFSATTRBM_SYSTEM		0x00004000
+#define	NFSATTRBM_TIMEACCESS		0x00008000
+#define	NFSATTRBM_TIMEACCESSSET		0x00010000
+#define	NFSATTRBM_TIMEBACKUP		0x00020000
+#define	NFSATTRBM_TIMECREATE		0x00040000
+#define	NFSATTRBM_TIMEDELTA		0x00080000
+#define	NFSATTRBM_TIMEMETADATA		0x00100000
+#define	NFSATTRBM_TIMEMODIFY		0x00200000
+#define	NFSATTRBM_TIMEMODIFYSET		0x00400000
+#define	NFSATTRBM_MOUNTEDONFILEID	0x00800000
+#define	NFSATTRBM_DIRNOTIFDELAY		0x01000000
+#define	NFSATTRBM_DIRENTNOTIFDELAY	0x02000000
+#define	NFSATTRBM_DACL			0x04000000
+#define	NFSATTRBM_SACL			0x08000000
+#define	NFSATTRBM_CHANGEPOLICY		0x10000000
+#define	NFSATTRBM_FSSTATUS		0x20000000
+#define	NFSATTRBM_FSLAYOUTTYPE		0x40000000
+#define	NFSATTRBM_LAYOUTHINT		0x80000000
+#define	NFSATTRBM_LAYOUTTYPE		0x00000001
+#define	NFSATTRBM_LAYOUTBLKSIZE		0x00000002
+#define	NFSATTRBM_LAYOUTALIGNMENT	0x00000004
+#define	NFSATTRBM_FSLOCATIONSINFO	0x00000008
+#define	NFSATTRBM_MDSTHRESHOLD		0x00000010
+#define	NFSATTRBM_RETENTIONGET		0x00000020
+#define	NFSATTRBM_RETENTIONSET		0x00000040
+#define	NFSATTRBM_RETENTEVTGET		0x00000080
+#define	NFSATTRBM_RETENTEVTSET		0x00000100
+#define	NFSATTRBM_RETENTIONHOLD		0x00000200
+#define	NFSATTRBM_MODESETMASKED		0x00000400
+#define	NFSATTRBM_SUPPATTREXCLCREAT	0x00000800
+#define	NFSATTRBM_FSCHARSETCAP		0x00001000
+
+#define	NFSATTRBIT_MAX			77
+
+/*
+ * Sets of attributes that are supported, by words in the bitmap.
+ */
+/*
+ * NFSATTRBIT_SUPPORTED - SUPP0 - bits 0<->31
+ *			  SUPP1 - bits 32<->63
+ *			  SUPP2 - bits 64<->95
+ */
+#define	NFSATTRBIT_SUPP0						\
+ 	(NFSATTRBM_SUPPORTEDATTRS |					\
+ 	NFSATTRBM_TYPE |						\
+ 	NFSATTRBM_FHEXPIRETYPE |					\
+ 	NFSATTRBM_CHANGE |						\
+ 	NFSATTRBM_SIZE |						\
+ 	NFSATTRBM_LINKSUPPORT |						\
+ 	NFSATTRBM_SYMLINKSUPPORT |					\
+ 	NFSATTRBM_NAMEDATTR |						\
+ 	NFSATTRBM_FSID |						\
+ 	NFSATTRBM_UNIQUEHANDLES |					\
+ 	NFSATTRBM_LEASETIME |						\
+ 	NFSATTRBM_RDATTRERROR |						\
+ 	NFSATTRBM_ACL |							\
+ 	NFSATTRBM_ACLSUPPORT |						\
+ 	NFSATTRBM_CANSETTIME |						\
+ 	NFSATTRBM_CASEINSENSITIVE |					\
+ 	NFSATTRBM_CASEPRESERVING |					\
+ 	NFSATTRBM_CHOWNRESTRICTED |					\
+ 	NFSATTRBM_FILEHANDLE |						\
+ 	NFSATTRBM_FILEID |						\
+ 	NFSATTRBM_FILESAVAIL |						\
+ 	NFSATTRBM_FILESFREE |						\
+ 	NFSATTRBM_FILESTOTAL |						\
+	NFSATTRBM_FSLOCATIONS |						\
+ 	NFSATTRBM_HOMOGENEOUS |						\
+ 	NFSATTRBM_MAXFILESIZE |						\
+ 	NFSATTRBM_MAXLINK |						\
+ 	NFSATTRBM_MAXNAME |						\
+ 	NFSATTRBM_MAXREAD |						\
+ 	NFSATTRBM_MAXWRITE)
+
+/*
+ * NFSATTRBIT_S1 - subset of SUPP1 - OR of the following bits:
+ */
+#define	NFSATTRBIT_S1							\
+ 	(NFSATTRBM_MODE |						\
+ 	NFSATTRBM_NOTRUNC |						\
+ 	NFSATTRBM_NUMLINKS |						\
+ 	NFSATTRBM_OWNER |						\
+ 	NFSATTRBM_OWNERGROUP |						\
+ 	NFSATTRBM_RAWDEV |						\
+ 	NFSATTRBM_SPACEAVAIL |						\
+ 	NFSATTRBM_SPACEFREE |						\
+ 	NFSATTRBM_SPACETOTAL |						\
+ 	NFSATTRBM_SPACEUSED |						\
+ 	NFSATTRBM_TIMEACCESS |						\
+ 	NFSATTRBM_TIMEDELTA |						\
+ 	NFSATTRBM_TIMEMETADATA |					\
+ 	NFSATTRBM_TIMEMODIFY |						\
+ 	NFSATTRBM_MOUNTEDONFILEID |					\
+	NFSATTRBM_QUOTAHARD |                        			\
+    	NFSATTRBM_QUOTASOFT |                        			\
+    	NFSATTRBM_QUOTAUSED |						\
+	NFSATTRBM_FSLAYOUTTYPE)
+
+
+#ifdef QUOTA
+/*
+ * If QUOTA OR in NFSATTRBIT_QUOTAHARD, NFSATTRBIT_QUOTASOFT and
+ * NFSATTRBIT_QUOTAUSED.
+ */
+#define	NFSATTRBIT_SUPP1	(NFSATTRBIT_S1 |			\
+				NFSATTRBM_QUOTAHARD |			\
+				NFSATTRBM_QUOTASOFT |			\
+				NFSATTRBM_QUOTAUSED)
+#else
+#define	NFSATTRBIT_SUPP1	NFSATTRBIT_S1
+#endif
+
+#define	NFSATTRBIT_SUPP2						\
+	(NFSATTRBM_LAYOUTTYPE |						\
+	NFSATTRBM_LAYOUTBLKSIZE |					\
+	NFSATTRBM_LAYOUTALIGNMENT |					\
+	NFSATTRBM_SUPPATTREXCLCREAT)
+
+/*
+ * These are the set only attributes.
+ */
+#define	NFSATTRBIT_SUPPSETONLY1	 (NFSATTRBM_TIMEACCESSSET |		\
+				 NFSATTRBM_TIMEMODIFYSET)
+#define	NFSATTRBIT_SUPPSETONLY2	(NFSATTRBM_MODESETMASKED)
+
+/*
+ * NFSATTRBIT_SETABLE - SETABLE0 - bits 0<->31
+ *			SETABLE1 - bits 32<->63
+ *			SETABLE2 - bits 64<->95
+ */
+#define	NFSATTRBIT_SETABLE0						\
+	(NFSATTRBM_SIZE |						\
+	NFSATTRBM_ACL)
+#define	NFSATTRBIT_SETABLE1						\
+ 	(NFSATTRBM_MODE |						\
+ 	NFSATTRBM_OWNER |						\
+ 	NFSATTRBM_OWNERGROUP |						\
+ 	NFSATTRBM_TIMEACCESSSET |					\
+ 	NFSATTRBM_TIMEMODIFYSET)
+#define	NFSATTRBIT_SETABLE2						\
+	(NFSATTRBM_MODESETMASKED)
+
+/*
+ * NFSATTRBIT_NFSV41 - Attributes only supported by NFSv4.1.
+ */
+#define	NFSATTRBIT_NFSV41_1						\
+	(NFSATTRBM_FSLAYOUTTYPE)
+#define	NFSATTRBIT_NFSV41_2						\
+	(NFSATTRBM_LAYOUTTYPE |						\
+	NFSATTRBM_LAYOUTBLKSIZE |					\
+	NFSATTRBM_LAYOUTALIGNMENT |					\
+	NFSATTRBM_MODESETMASKED |					\
+	NFSATTRBM_SUPPATTREXCLCREAT)
+
+/*
+ * Set of attributes that the getattr vnode op needs.
+ * OR of the following bits.
+ * NFSATTRBIT_GETATTR0 - bits 0<->31
+ */
+#define	NFSATTRBIT_GETATTR0						\
+ 	(NFSATTRBM_SUPPORTEDATTRS |					\
+ 	NFSATTRBM_TYPE |						\
+ 	NFSATTRBM_CHANGE |						\
+ 	NFSATTRBM_SIZE |						\
+ 	NFSATTRBM_FSID |						\
+ 	NFSATTRBM_FILEID |						\
+ 	NFSATTRBM_MAXREAD)
+
+/*
+ * NFSATTRBIT_GETATTR1 - bits 32<->63
+ */
+#define	NFSATTRBIT_GETATTR1						\
+ 	(NFSATTRBM_MODE |						\
+ 	NFSATTRBM_NUMLINKS |						\
+ 	NFSATTRBM_OWNER |						\
+ 	NFSATTRBM_OWNERGROUP |						\
+ 	NFSATTRBM_RAWDEV |						\
+ 	NFSATTRBM_SPACEUSED |						\
+ 	NFSATTRBM_TIMEACCESS |						\
+ 	NFSATTRBM_TIMEMETADATA |					\
+ 	NFSATTRBM_TIMEMODIFY)
+
+/*
+ * NFSATTRBIT_GETATTR2 - bits 64<->95
+ */
+#define	NFSATTRBIT_GETATTR2		0
+
+/*
+ * Subset of the above that the Write RPC gets.
+ * OR of the following bits.
+ * NFSATTRBIT_WRITEGETATTR0 - bits 0<->31
+ */
+#define	NFSATTRBIT_WRITEGETATTR0					\
+ 	(NFSATTRBM_SUPPORTEDATTRS |					\
+ 	NFSATTRBM_TYPE |						\
+ 	NFSATTRBM_CHANGE |						\
+ 	NFSATTRBM_SIZE |						\
+ 	NFSATTRBM_FSID |						\
+ 	NFSATTRBM_FILEID |						\
+ 	NFSATTRBM_MAXREAD)
+
+/*
+ * NFSATTRBIT_WRITEGETATTR1 - bits 32<->63
+ */
+#define	NFSATTRBIT_WRITEGETATTR1					\
+ 	(NFSATTRBM_MODE |						\
+ 	NFSATTRBM_NUMLINKS |						\
+ 	NFSATTRBM_RAWDEV |						\
+ 	NFSATTRBM_SPACEUSED |						\
+ 	NFSATTRBM_TIMEACCESS |						\
+ 	NFSATTRBM_TIMEMETADATA |					\
+ 	NFSATTRBM_TIMEMODIFY)
+
+/*
+ * NFSATTRBIT_WRITEGETATTR2 - bits 64<->95
+ */
+#define	NFSATTRBIT_WRITEGETATTR2	0
+
+/*
+ * Set of attributes that the wccattr operation op needs.
+ * OR of the following bits.
+ * NFSATTRBIT_WCCATTR0 - bits 0<->31
+ */
+#define	NFSATTRBIT_WCCATTR0	0
+
+/*
+ * NFSATTRBIT_WCCATTR1 - bits 32<->63
+ */
+#define	NFSATTRBIT_WCCATTR1						\
+ 	(NFSATTRBM_TIMEMODIFY)
+
+/*
+ * NFSATTRBIT_WCCATTR2 - bits 64<->95
+ */
+#define	NFSATTRBIT_WCCATTR2		0
+
+/*
+ * NFSATTRBIT_CBGETATTR0 - bits 0<->31
+ */
+#define	NFSATTRBIT_CBGETATTR0	(NFSATTRBM_CHANGE | NFSATTRBM_SIZE)
+
+/*
+ * NFSATTRBIT_CBGETATTR1 - bits 32<->63
+ */
+#define	NFSATTRBIT_CBGETATTR1		0x0
+
+/*
+ * NFSATTRBIT_CBGETATTR2 - bits 64<->95
+ */
+#define	NFSATTRBIT_CBGETATTR2		0x0
+
+/*
+ * Sets of attributes that require a VFS_STATFS() call to get the
+ * values of.
+ * NFSATTRBIT_STATFS0 - bits 0<->31
+ */
+#define	NFSATTRBIT_STATFS0						\
+	(NFSATTRBM_LINKSUPPORT |					\
+	NFSATTRBM_SYMLINKSUPPORT |					\
+	NFSATTRBM_CANSETTIME |						\
+ 	NFSATTRBM_FILESAVAIL |						\
+ 	NFSATTRBM_FILESFREE |						\
+ 	NFSATTRBM_FILESTOTAL |						\
+ 	NFSATTRBM_HOMOGENEOUS |						\
+ 	NFSATTRBM_MAXFILESIZE |						\
+	NFSATTRBM_MAXNAME |						\
+	NFSATTRBM_MAXREAD |						\
+	NFSATTRBM_MAXWRITE)
+
+/*
+ * NFSATTRBIT_STATFS1 - bits 32<->63
+ */
+#define	NFSATTRBIT_STATFS1						\
+ 	(NFSATTRBM_QUOTAHARD |						\
+ 	NFSATTRBM_QUOTASOFT |						\
+ 	NFSATTRBM_QUOTAUSED |						\
+ 	NFSATTRBM_SPACEAVAIL |						\
+ 	NFSATTRBM_SPACEFREE |						\
+ 	NFSATTRBM_SPACETOTAL |						\
+ 	NFSATTRBM_SPACEUSED |						\
+	NFSATTRBM_TIMEDELTA)
+
+/*
+ * NFSATTRBIT_STATFS2 - bits 64<->95
+ */
+#define	NFSATTRBIT_STATFS2		0
+
+/*
+ * These are the bits that are needed by the nfs_statfs() call.
+ * (The regular getattr bits are or'd in so the vnode gets the correct
+ *  type, etc.)
+ * NFSGETATTRBIT_STATFS0 - bits 0<->31
+ */
+#define	NFSGETATTRBIT_STATFS0	(NFSATTRBIT_GETATTR0 |			\
+				NFSATTRBM_LINKSUPPORT |			\
+				NFSATTRBM_SYMLINKSUPPORT |		\
+				NFSATTRBM_CANSETTIME |			\
+				NFSATTRBM_FILESFREE |			\
+				NFSATTRBM_FILESTOTAL |			\
+				NFSATTRBM_HOMOGENEOUS |			\
+				NFSATTRBM_MAXFILESIZE |			\
+				NFSATTRBM_MAXNAME |			\
+				NFSATTRBM_MAXREAD |			\
+				NFSATTRBM_MAXWRITE)
+
+/*
+ * NFSGETATTRBIT_STATFS1 - bits 32<->63
+ */
+#define	NFSGETATTRBIT_STATFS1	(NFSATTRBIT_GETATTR1 |			\
+				NFSATTRBM_SPACEAVAIL |			\
+				NFSATTRBM_SPACEFREE |			\
+				NFSATTRBM_SPACETOTAL |			\
+				NFSATTRBM_TIMEDELTA)
+
+/*
+ * NFSGETATTRBIT_STATFS2 - bits 64<->95
+ */
+#define	NFSGETATTRBIT_STATFS2		0
+
+/*
+ * Set of attributes for the equivalent of an nfsv3 pathconf rpc.
+ * NFSGETATTRBIT_PATHCONF0 - bits 0<->31
+ */
+#define	NFSGETATTRBIT_PATHCONF0	(NFSATTRBIT_GETATTR0 |			\
+			 	NFSATTRBM_CASEINSENSITIVE |		\
+			 	NFSATTRBM_CASEPRESERVING |		\
+			 	NFSATTRBM_CHOWNRESTRICTED |		\
+			 	NFSATTRBM_MAXLINK |			\
+			 	NFSATTRBM_MAXNAME)
+
+/*
+ * NFSGETATTRBIT_PATHCONF1 - bits 32<->63
+ */
+#define	NFSGETATTRBIT_PATHCONF1	(NFSATTRBIT_GETATTR1 |			\
+				NFSATTRBM_NOTRUNC)
+
+/*
+ * NFSGETATTRBIT_PATHCONF2 - bits 64<->95
+ */
+#define	NFSGETATTRBIT_PATHCONF2		0
+
+/*
+ * Sets of attributes required by readdir and readdirplus.
+ * NFSATTRBIT_READDIRPLUS0	(NFSATTRBIT_GETATTR0 | NFSATTRBIT_FILEHANDLE |
+ *				 NFSATTRBIT_RDATTRERROR)
+ */
+#define	NFSATTRBIT_READDIRPLUS0	(NFSATTRBIT_GETATTR0 | NFSATTRBM_FILEHANDLE | \
+				NFSATTRBM_RDATTRERROR)
+#define	NFSATTRBIT_READDIRPLUS1	NFSATTRBIT_GETATTR1
+#define	NFSATTRBIT_READDIRPLUS2		0
+
+/*
+ * Set of attributes supported by Referral vnodes.
+ */
+#define	NFSATTRBIT_REFERRAL0	(NFSATTRBM_TYPE | NFSATTRBM_FSID |	\
+	NFSATTRBM_RDATTRERROR | NFSATTRBM_FSLOCATIONS)
+#define	NFSATTRBIT_REFERRAL1	NFSATTRBM_MOUNTEDONFILEID
+#define	NFSATTRBIT_REFERRAL2		0
+
+/*
+ * Structure for data handled by the statfs rpc. Since some fields are
+ * u_int64_t, this cannot be used for copying data on/off the wire, due
+ * to alignment concerns.
+ */
+struct nfsstatfs {
+	union {
+		struct {
+			u_int32_t nfsv2sf_tsize;
+			u_int32_t nfsv2sf_bsize;
+			u_int32_t nfsv2sf_blocks;
+			u_int32_t nfsv2sf_bfree;
+			u_int32_t nfsv2sf_bavail;
+		} sf_nfsv2;
+		struct {
+			u_int64_t nfsv3sf_tbytes;
+			u_int64_t nfsv3sf_fbytes;
+			u_int64_t nfsv3sf_abytes;
+			u_int64_t nfsv3sf_tfiles;
+			u_int64_t nfsv3sf_ffiles;
+			u_int64_t nfsv3sf_afiles;
+			u_int32_t nfsv3sf_invarsec;
+		} sf_nfsv3;
+	} sf_un;
+};
+
+#define	sf_tsize	sf_un.sf_nfsv2.nfsv2sf_tsize
+#define	sf_bsize	sf_un.sf_nfsv2.nfsv2sf_bsize
+#define	sf_blocks	sf_un.sf_nfsv2.nfsv2sf_blocks
+#define	sf_bfree	sf_un.sf_nfsv2.nfsv2sf_bfree
+#define	sf_bavail	sf_un.sf_nfsv2.nfsv2sf_bavail
+#define	sf_tbytes	sf_un.sf_nfsv3.nfsv3sf_tbytes
+#define	sf_fbytes	sf_un.sf_nfsv3.nfsv3sf_fbytes
+#define	sf_abytes	sf_un.sf_nfsv3.nfsv3sf_abytes
+#define	sf_tfiles	sf_un.sf_nfsv3.nfsv3sf_tfiles
+#define	sf_ffiles	sf_un.sf_nfsv3.nfsv3sf_ffiles
+#define	sf_afiles	sf_un.sf_nfsv3.nfsv3sf_afiles
+#define	sf_invarsec	sf_un.sf_nfsv3.nfsv3sf_invarsec
+
+/*
+ * Now defined using u_int64_t for the 64 bit field(s).
+ * (Cannot be used to move data on/off the wire, due to alignment concerns.)
+ */
+struct nfsfsinfo {
+	u_int32_t fs_rtmax;
+	u_int32_t fs_rtpref;
+	u_int32_t fs_rtmult;
+	u_int32_t fs_wtmax;
+	u_int32_t fs_wtpref;
+	u_int32_t fs_wtmult;
+	u_int32_t fs_dtpref;
+	u_int64_t fs_maxfilesize;
+	struct timespec fs_timedelta;
+	u_int32_t fs_properties;
+};
+
+/*
+ * Bits for fs_properties
+ */
+#define	NFSV3_FSFLINK		0x1
+#define	NFSV3_FSFSYMLINK	0x2
+#define	NFSV3_FSFHOMOGENEOUS	0x4
+#define	NFSV3_FSFCANSETTIME	0x8
+
+/*
+ * Yikes, overload fs_rtmult as fs_maxname for V4.
+ */
+#define	fs_maxname	fs_rtmult
+
+struct nfsv3_pathconf {
+	u_int32_t pc_linkmax;
+	u_int32_t pc_namemax;
+	u_int32_t pc_notrunc;
+	u_int32_t pc_chownrestricted;
+	u_int32_t pc_caseinsensitive;
+	u_int32_t pc_casepreserving;
+};
+
+/*
+ * NFS V4 data structures.
+ */
+struct nfsv4stateid {
+	u_int32_t	seqid;
+	u_int32_t	other[NFSX_STATEIDOTHER / NFSX_UNSIGNED];
+};
+typedef struct nfsv4stateid nfsv4stateid_t;
+
+/* Notify bits and notify bitmap size. */
+#define	NFSV4NOTIFY_CHANGE	1
+#define	NFSV4NOTIFY_DELETE	2
+#define	NFSV4_NOTIFYBITMAP	1	/* # of 32bit values needed for bits */
+
+/* Layoutreturn kinds. */
+#define	NFSV4LAYOUTRET_FILE	1
+#define	NFSV4LAYOUTRET_FSID	2
+#define	NFSV4LAYOUTRET_ALL	3
+
+#endif	/* _NFS_NFSPROTO_H_ */
diff --git a/freebsd/sys/fs/nfs/nfsrvcache.h b/freebsd/sys/fs/nfs/nfsrvcache.h
new file mode 100644
index 0000000..436dc4e
--- /dev/null
+++ b/freebsd/sys/fs/nfs/nfsrvcache.h
@@ -0,0 +1,124 @@
+/*-
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 1989, 1993
+ *	The Regents of the University of California.  All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Rick Macklem at The University of Guelph.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _NFS_NFSRVCACHE_H_
+#define	_NFS_NFSRVCACHE_H_
+
+/*
+ * Definitions for the server recent request cache
+ */
+#define	NFSRVCACHE_MAX_SIZE	2048
+#define	NFSRVCACHE_MIN_SIZE	  64
+
+#define	NFSRVCACHE_HASHSIZE	500
+
+/* Cache table entry. */
+struct nfsrvcache {
+	LIST_ENTRY(nfsrvcache) rc_hash;		/* Hash chain */
+	LIST_ENTRY(nfsrvcache) rc_ahash;	/* ACK hash chain */
+	TAILQ_ENTRY(nfsrvcache)	rc_lru;		/* UDP lru chain */
+	u_int32_t	rc_xid;			/* rpc id number */
+	time_t		rc_timestamp;		/* Time done */
+	union {
+		mbuf_t repmb;			/* Reply mbuf list OR */
+		int repstat;			/* Reply status */
+	} rc_un;
+	union {
+		struct {
+			union nethostaddr haddr; /* Host address */
+		} udp;
+		struct {
+			u_int64_t	sockref;
+			u_int32_t	len;
+			u_int32_t	tcpseq;
+			int16_t		refcnt;
+			u_int16_t	cksum;
+			time_t		cachetime;
+			int		acked;
+		} ot;
+	} rc_un2;
+	u_int16_t	rc_proc;		/* rpc proc number */
+	u_int16_t	rc_flag;		/* Flag bits */
+};
+
+#define	rc_reply	rc_un.repmb
+#define	rc_status	rc_un.repstat
+#define	rc_inet		rc_un2.udp.haddr.had_inet.s_addr
+#define	rc_inet6	rc_un2.udp.haddr.had_inet6
+#define	rc_haddr	rc_un2.udp.haddr
+#define	rc_sockref	rc_un2.ot.sockref
+#define	rc_tcpseq	rc_un2.ot.tcpseq
+#define	rc_refcnt	rc_un2.ot.refcnt
+#define	rc_reqlen	rc_un2.ot.len
+#define	rc_cksum	rc_un2.ot.cksum
+#define	rc_cachetime	rc_un2.ot.cachetime
+#define	rc_acked	rc_un2.ot.acked
+
+/* TCP ACK values */
+#define	RC_NO_SEQ		0
+#define	RC_NO_ACK		1
+#define	RC_ACK			2
+#define	RC_NACK			3
+
+/* Return values */
+#define	RC_DROPIT		0
+#define	RC_REPLY		1
+#define	RC_DOIT			2
+
+/* Flag bits */
+#define	RC_LOCKED	0x0001
+#define	RC_WANTED	0x0002
+#define	RC_REPSTATUS	0x0004
+#define	RC_REPMBUF	0x0008
+#define	RC_UDP		0x0010
+#define	RC_INETIPV6	0x0020
+#define	RC_INPROG	0x0040
+#define	RC_NFSV2	0x0100
+#define	RC_NFSV3	0x0200
+#define	RC_NFSV4	0x0400
+#define	RC_NFSVERS	(RC_NFSV2 | RC_NFSV3 | RC_NFSV4)
+#define	RC_REFCNT	0x0800
+#define	RC_SAMETCPCONN	0x1000
+
+LIST_HEAD(nfsrvhashhead, nfsrvcache);
+
+/* The fine-grained locked cache hash table for TCP. */
+struct nfsrchash_bucket {
+	struct mtx		mtx;
+	struct nfsrvhashhead	tbl;
+};
+
+#endif	/* _NFS_NFSRVCACHE_H_ */
diff --git a/freebsd/sys/fs/nfs/nfsrvstate.h b/freebsd/sys/fs/nfs/nfsrvstate.h
new file mode 100644
index 0000000..2d60e8e
--- /dev/null
+++ b/freebsd/sys/fs/nfs/nfsrvstate.h
@@ -0,0 +1,410 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 2009 Rick Macklem, University of Guelph
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _NFS_NFSRVSTATE_H_
+#define	_NFS_NFSRVSTATE_H_
+
+#if defined(_KERNEL) || defined(KERNEL)
+/*
+ * Definitions for NFS V4 server state handling.
+ */
+
+/*
+ * List heads for nfsclient, nfsstate and nfslockfile.
+ * (Some systems seem to like to dynamically size these things, but I
+ *  don't see any point in doing so for these ones.)
+ */
+LIST_HEAD(nfsclienthashhead, nfsclient);
+LIST_HEAD(nfsstatehead, nfsstate);
+LIST_HEAD(nfslockhead, nfslock);
+LIST_HEAD(nfslockhashhead, nfslockfile);
+LIST_HEAD(nfssessionhead, nfsdsession);
+LIST_HEAD(nfssessionhashhead, nfsdsession);
+TAILQ_HEAD(nfslayouthead, nfslayout);
+SLIST_HEAD(nfsdsdirhead, nfsdsdir);
+TAILQ_HEAD(nfsdevicehead, nfsdevice);
+LIST_HEAD(nfsdontlisthead, nfsdontlist);
+
+/*
+ * List head for nfsusrgrp.
+ */
+TAILQ_HEAD(nfsuserhashhead, nfsusrgrp);
+
+#define	NFSCLIENTHASH(id)						\
+	(&nfsclienthash[(id).lval[1] % nfsrv_clienthashsize])
+#define	NFSSTATEHASH(clp, id)						\
+	(&((clp)->lc_stateid[(id).other[2] % nfsrv_statehashsize]))
+#define	NFSUSERHASH(id)							\
+	(&nfsuserhash[(id) % nfsrv_lughashsize])
+#define	NFSUSERNAMEHASH(p, l)						\
+	(&nfsusernamehash[((l)>=4?(*(p)+*((p)+1)+*((p)+2)+*((p)+3)):*(p)) \
+		% nfsrv_lughashsize])
+#define	NFSGROUPHASH(id)						\
+	(&nfsgrouphash[(id) % nfsrv_lughashsize])
+#define	NFSGROUPNAMEHASH(p, l)						\
+	(&nfsgroupnamehash[((l)>=4?(*(p)+*((p)+1)+*((p)+2)+*((p)+3)):*(p)) \
+		% nfsrv_lughashsize])
+
+struct nfssessionhash {
+	struct mtx			mtx;
+	struct nfssessionhashhead	list;
+};
+#define	NFSSESSIONHASH(f) 						\
+	(&nfssessionhash[nfsrv_hashsessionid(f) % nfsrv_sessionhashsize])
+
+struct nfslayouthash {
+	struct mtx		mtx;
+	struct nfslayouthead	list;
+};
+#define	NFSLAYOUTHASH(f) 						\
+	(&nfslayouthash[nfsrv_hashfh(f) % nfsrv_layouthashsize])
+
+/*
+ * Client server structure for V4. It is doubly linked into two lists.
+ * The first is a hash table based on the clientid and the second is a
+ * list of all clients maintained in LRU order.
+ * The actual size malloc'd is large enough to accommodate the id string.
+ */
+struct nfsclient {
+	LIST_ENTRY(nfsclient) lc_hash;		/* Clientid hash list */
+	struct nfsstatehead *lc_stateid;	/* Stateid hash */
+	struct nfsstatehead lc_open;		/* Open owner list */
+	struct nfsstatehead lc_deleg;		/* Delegations */
+	struct nfsstatehead lc_olddeleg;	/* and old delegations */
+	struct nfssessionhead lc_session;	/* List of NFSv4.1 sessions */
+	time_t		lc_expiry;		/* Expiry time (sec) */
+	time_t		lc_delegtime;		/* Old deleg expiry (sec) */
+	nfsquad_t	lc_clientid;		/* 64 bit clientid */
+	nfsquad_t	lc_confirm;		/* 64 bit confirm value */
+	u_int32_t	lc_program;		/* RPC Program # */
+	u_int32_t	lc_callback;		/* Callback id */
+	u_int32_t	lc_stateindex;		/* Current state index# */
+	u_int32_t	lc_statemaxindex;	/* Max state index# */
+	u_int32_t	lc_cbref;		/* Cnt of callbacks */
+	uid_t		lc_uid;			/* User credential */
+	gid_t		lc_gid;
+	u_int16_t	lc_idlen;		/* Client ID and len */
+	u_int16_t	lc_namelen;		/* plus GSS principal and len */
+	u_char		*lc_name;
+	struct nfssockreq lc_req;		/* Callback info */
+	u_int32_t	lc_flags;		/* LCL_ flag bits */
+	u_char		lc_verf[NFSX_VERF];	 /* client verifier */
+	u_char		lc_id[1];		/* Malloc'd correct size */
+};
+
+#define	CLOPS_CONFIRM		0x0001
+#define	CLOPS_RENEW		0x0002
+#define	CLOPS_RENEWOP		0x0004
+
+/*
+ * Structure for NFSv4.1 Layouts.
+ * Malloc'd to correct size for the lay_xdr.
+ */
+struct nfslayout {
+	TAILQ_ENTRY(nfslayout)	lay_list;
+	nfsv4stateid_t		lay_stateid;
+	nfsquad_t		lay_clientid;
+	fhandle_t		lay_fh;
+	fsid_t			lay_fsid;
+	uint32_t		lay_layoutlen;
+	uint16_t		lay_mirrorcnt;
+	uint16_t		lay_trycnt;
+	uint16_t		lay_type;
+	uint16_t		lay_flags;
+	uint32_t		lay_xdr[0];
+};
+
+/* Flags for lay_flags. */
+#define	NFSLAY_READ	0x0001
+#define	NFSLAY_RW	0x0002
+#define	NFSLAY_RECALL	0x0004
+#define	NFSLAY_RETURNED	0x0008
+#define	NFSLAY_CALLB	0x0010
+
+/*
+ * Structure for an NFSv4.1 session.
+ * Locking rules for this structure.
+ * To add/delete one of these structures from the lists, you must lock
+ * both: NFSLOCKSTATE() and NFSLOCKSESSION(session hashhead) in that order.
+ * To traverse the lists looking for one of these, you must hold one
+ * of these two locks.
+ * The exception is if the thread holds the exclusive root sleep lock.
+ * In this case, all other nfsd threads are blocked, so locking the
+ * mutexes isn't required.
+ * When manipulating sess_refcnt, NFSLOCKSTATE() must be locked.
+ * When manipulating the fields withinsess_cbsess except nfsess_xprt,
+ * sess_cbsess.nfsess_mtx must be locked.
+ * When manipulating sess_slots and sess_cbsess.nfsess_xprt,
+ * NFSLOCKSESSION(session hashhead) must be locked.
+ */
+struct nfsdsession {
+	uint64_t		sess_refcnt;	/* Reference count. */
+	LIST_ENTRY(nfsdsession)	sess_hash;	/* Hash list of sessions. */
+	LIST_ENTRY(nfsdsession)	sess_list;	/* List of client sessions. */
+	struct nfsslot		sess_slots[NFSV4_SLOTS];
+	struct nfsclient	*sess_clp;	/* Associated clientid. */
+	uint32_t		sess_crflags;
+	uint32_t		sess_cbprogram;
+	uint32_t		sess_maxreq;
+	uint32_t		sess_maxresp;
+	uint32_t		sess_maxrespcached;
+	uint32_t		sess_maxops;
+	uint32_t		sess_maxslots;
+	uint32_t		sess_cbmaxreq;
+	uint32_t		sess_cbmaxresp;
+	uint32_t		sess_cbmaxrespcached;
+	uint32_t		sess_cbmaxops;
+	uint8_t			sess_sessionid[NFSX_V4SESSIONID];
+	struct nfsclsession	sess_cbsess;	/* Callback session. */
+};
+
+/*
+ * Nfs state structure. I couldn't resist overloading this one, since
+ * it makes cleanup, etc. simpler. These structures are used in four ways:
+ * - open_owner structures chained off of nfsclient
+ * - open file structures chained off an open_owner structure
+ * - lock_owner structures chained off an open file structure
+ * - delegated file structures chained off of nfsclient and nfslockfile
+ * - the ls_list field is used for the chain it is in
+ * - the ls_head structure is used to chain off the sibling structure
+ *   (it is a union between an nfsstate and nfslock structure head)
+ *    If it is a lockowner stateid, nfslock structures hang off it.
+ * For the open file and lockowner cases, it is in the hash table in
+ * nfsclient for stateid.
+ */
+struct nfsstate {
+	LIST_ENTRY(nfsstate)	ls_hash;	/* Hash list entry */
+	LIST_ENTRY(nfsstate)	ls_list;	/* List of opens/delegs */
+	LIST_ENTRY(nfsstate)	ls_file;	/* Opens/Delegs for a file */
+	union {
+		struct nfsstatehead	open; /* Opens list */
+		struct nfslockhead	lock; /* Locks list */
+	} ls_head;
+	nfsv4stateid_t		ls_stateid;	/* The state id */
+	u_int32_t		ls_seq;		/* seq id */
+	uid_t			ls_uid;		/* uid of locker */
+	u_int32_t		ls_flags;	/* Type of lock, etc. */
+	union {
+		struct nfsstate	*openowner;	/* Open only */
+		u_int32_t	opentolockseq;	/* Lock call only */
+		u_int32_t	noopens;	/* Openowner only */
+		struct {
+			u_quad_t	filerev; /* Delegations only */
+			time_t		expiry;
+			time_t		limit;
+			u_int64_t	compref;
+		} deleg;
+	} ls_un;
+	struct nfslockfile	*ls_lfp;	/* Back pointer */
+	struct nfsrvcache	*ls_op;		/* Op cache reference */
+	struct nfsclient	*ls_clp;	/* Back pointer */
+	u_short			ls_ownerlen;	/* Length of ls_owner */
+	u_char			ls_owner[1];	/* malloc'd the correct size */
+};
+#define	ls_lock			ls_head.lock
+#define	ls_open			ls_head.open
+#define	ls_opentolockseq	ls_un.opentolockseq
+#define	ls_openowner		ls_un.openowner
+#define	ls_openstp		ls_un.openowner
+#define	ls_noopens		ls_un.noopens
+#define	ls_filerev		ls_un.deleg.filerev
+#define	ls_delegtime		ls_un.deleg.expiry
+#define	ls_delegtimelimit	ls_un.deleg.limit
+#define	ls_compref		ls_un.deleg.compref
+
+/*
+ * Nfs lock structure.
+ * This structure is chained off of the nfsstate (the lockowner) and
+ * nfslockfile (the file) structures, for the file and owner it
+ * refers to. It holds flags and a byte range.
+ * It also has back pointers to the associated lock_owner and lockfile.
+ */
+struct nfslock {
+	LIST_ENTRY(nfslock)	lo_lckowner;
+	LIST_ENTRY(nfslock)	lo_lckfile;
+	struct nfsstate		*lo_stp;
+	struct nfslockfile	*lo_lfp;
+	u_int64_t		lo_first;
+	u_int64_t		lo_end;
+	u_int32_t		lo_flags;
+};
+
+/*
+ * Structure used to return a conflicting lock. (Must be large
+ * enough for the largest lock owner we can have.)
+ */
+struct nfslockconflict {
+	nfsquad_t		cl_clientid;
+	u_int64_t		cl_first;
+	u_int64_t		cl_end;
+	u_int32_t		cl_flags;
+	u_short			cl_ownerlen;
+	u_char			cl_owner[NFSV4_OPAQUELIMIT];
+};
+
+/*
+ * This structure is used to keep track of local locks that might need
+ * to be rolled back.
+ */
+struct nfsrollback {
+	LIST_ENTRY(nfsrollback)	rlck_list;
+	uint64_t		rlck_first;
+	uint64_t		rlck_end;
+	int			rlck_type;
+};
+
+/*
+ * This structure refers to a file for which lock(s) and/or open(s) exist.
+ * Searched via hash table on file handle or found via the back pointer from an
+ * open or lock owner.
+ */
+struct nfslockfile {
+	LIST_HEAD(, nfsstate)	lf_open;	/* Open list */
+	LIST_HEAD(, nfsstate)	lf_deleg;	/* Delegation list */
+	LIST_HEAD(, nfslock)	lf_lock;	/* Lock list */
+	LIST_HEAD(, nfslock)	lf_locallock;	/* Local lock list */
+	LIST_HEAD(, nfsrollback) lf_rollback;	/* Local lock rollback list */
+	LIST_ENTRY(nfslockfile)	lf_hash;	/* Hash list entry */
+	fhandle_t		lf_fh;		/* The file handle */
+	struct nfsv4lock	lf_locallock_lck; /* serialize local locking */
+	int			lf_usecount;	/* Ref count for locking */
+};
+
+/*
+ * This structure is malloc'd an chained off hash lists for user/group
+ * names.
+ */
+struct nfsusrgrp {
+	TAILQ_ENTRY(nfsusrgrp)	lug_numhash;	/* Hash by id# */
+	TAILQ_ENTRY(nfsusrgrp)	lug_namehash;	/* and by name */
+	time_t			lug_expiry;	/* Expiry time in sec */
+	union {
+		uid_t		un_uid;		/* id# */
+		gid_t		un_gid;
+	} lug_un;
+	struct ucred		*lug_cred;	/* Cred. with groups list */
+	int			lug_namelen;	/* Name length */
+	u_char			lug_name[1];	/* malloc'd correct length */
+};
+#define	lug_uid		lug_un.un_uid
+#define	lug_gid		lug_un.un_gid
+
+/*
+ * These structures are used for the stable storage restart stuff.
+ */
+/*
+ * Record at beginning of file.
+ */
+struct nfsf_rec {
+	u_int32_t	lease;			/* Lease duration */
+	u_int32_t	numboots;		/* Number of boottimes */
+};
+
+void nfsrv_cleanclient(struct nfsclient *, NFSPROC_T *);
+void nfsrv_freedeleglist(struct nfsstatehead *);
+
+/*
+ * This structure is used to create the list of device info entries for
+ * a GetDeviceInfo operation and stores the DS server info.
+ * The nfsdev_addrandhost field has the fully qualified host domain name
+ * followed by the network address in XDR.
+ * It is allocated with nfsrv_dsdirsize nfsdev_dsdir[] entries.
+ */
+struct nfsdevice {
+	TAILQ_ENTRY(nfsdevice)	nfsdev_list;
+	vnode_t			nfsdev_dvp;
+	struct nfsmount		*nfsdev_nmp;
+	char			nfsdev_deviceid[NFSX_V4DEVICEID];
+	uint16_t		nfsdev_hostnamelen;
+	uint16_t		nfsdev_fileaddrlen;
+	uint16_t		nfsdev_flexaddrlen;
+	uint16_t		nfsdev_mdsisset;
+	char			*nfsdev_fileaddr;
+	char			*nfsdev_flexaddr;
+	char			*nfsdev_host;
+	fsid_t			nfsdev_mdsfsid;
+	uint32_t		nfsdev_nextdir;
+	vnode_t			nfsdev_dsdir[0];
+};
+
+/*
+ * This structure holds the va_size, va_filerev, va_atime, va_mtime and
+ * va_bytes for the DS file and is stored in the metadata file's extended
+ * attribute pnfsd.dsattr.
+ * opnfsdsattr was missing the va_bytes field and, as such, it was updated.
+ */
+struct opnfsdsattr {
+	uint64_t	dsa_filerev;
+	uint64_t	dsa_size;
+	struct timespec	dsa_atime;
+	struct timespec	dsa_mtime;
+};
+
+struct pnfsdsattr {
+	uint64_t	dsa_filerev;
+	uint64_t	dsa_size;
+	struct timespec	dsa_atime;
+	struct timespec	dsa_mtime;
+	uint64_t	dsa_bytes;
+};
+
+/*
+ * This structure is a list element for a list the pNFS server uses to
+ * mark that the recovery of a mirror file is in progress.
+ */
+struct nfsdontlist {
+	LIST_ENTRY(nfsdontlist)	nfsmr_list;
+	uint32_t		nfsmr_flags;
+	fhandle_t		nfsmr_fh;
+};
+
+/* nfsmr_flags bits. */
+#define	NFSMR_DONTLAYOUT	0x00000001
+
+#endif	/* defined(_KERNEL) || defined(KERNEL) */
+
+/*
+ * This structure holds the information about the DS file and is stored
+ * in the metadata file's extended attribute called pnfsd.dsfile.
+ */
+#define	PNFS_FILENAME_LEN	(2 * sizeof(fhandle_t))
+struct pnfsdsfile {
+	fhandle_t	dsf_fh;
+	uint32_t	dsf_dir;
+	union {
+		struct sockaddr_in	sin;
+		struct sockaddr_in6	sin6;
+	} dsf_nam;
+	char		dsf_filename[PNFS_FILENAME_LEN + 1];
+};
+#define	dsf_sin		dsf_nam.sin
+#define	dsf_sin6	dsf_nam.sin6
+
+#endif	/* _NFS_NFSRVSTATE_H_ */
diff --git a/freebsd/sys/fs/nfs/nfsv4_errstr.h b/freebsd/sys/fs/nfs/nfsv4_errstr.h
new file mode 100644
index 0000000..7d32425
--- /dev/null
+++ b/freebsd/sys/fs/nfs/nfsv4_errstr.h
@@ -0,0 +1,103 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 2009 Rick Macklem, University of Guelph
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _NFS_NFSV4ERRSTR_H_
+#define	_NFS_NFSV4ERRSTR_H_
+
+/*
+ * Defines static storage in the C file, but I can't be bothered creating
+ * a library of one function for this, since it is only currently used by
+ * mount_newnfs.c.
+ */
+static const char *nfsv4_errstr[48] = {
+	"Illegal filehandle",
+	"Undefined NFSv4 err",
+	"READDIR cookie is stale",
+	"operation not supported",
+	"response limit exceeded",
+	"undefined server error",
+	"type invalid for CREATE",
+	"file busy - retry",
+	"nverify says attrs same",
+	"lock unavailable",
+	"lock lease expired",
+	"I/O failed due to lock",
+	"in grace period",
+	"filehandle expired",
+	"share reserve denied",
+	"wrong security flavor",
+	"clientid in use",
+	"resource exhaustion",
+	"filesystem relocated",
+	"current FH is not set",
+	"minor vers not supp",
+	"server has rebooted",
+	"server has rebooted",
+	"state is out of sync",
+	"incorrect stateid",
+	"request is out of seq",
+	"verify - attrs not same",
+	"lock range not supported",
+	"should be file/directory",
+	"no saved filehandle",
+	"some filesystem moved",
+	"recommended attr not sup",
+	"reclaim outside of grace",
+	"reclaim error at server",
+	"conflict on reclaim",
+	"XDR decode failed",
+	"file locks held at CLOSE",
+	"conflict in OPEN and I/O",
+	"owner translation bad",
+	"utf-8 char not supported",
+	"name not supported",
+	"lock range not supported",
+	"no atomic up/downgrade",
+	"undefined operation",
+	"file locking deadlock",
+	"open file blocks op",
+	"lockowner state revoked",
+	"callback path down"
+};
+
+/*
+ * Return the error string for the NFS4ERR_xxx. The pointers returned are
+ * static and must not be free'd.
+ */
+static const char *
+nfsv4_geterrstr(int errval)
+{
+
+	if (errval < NFSERR_BADHANDLE || errval > NFSERR_CBPATHDOWN)
+		return (NULL);
+	return (nfsv4_errstr[errval - NFSERR_BADHANDLE]);
+}
+
+#endif	/* _NFS_NFSV4ERRSTR_H_ */
diff --git a/freebsd/sys/fs/nfs/rpcv2.h b/freebsd/sys/fs/nfs/rpcv2.h
new file mode 100644
index 0000000..799d9d6
--- /dev/null
+++ b/freebsd/sys/fs/nfs/rpcv2.h
@@ -0,0 +1,209 @@
+/*-
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 1989, 1993
+ *	The Regents of the University of California.  All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Rick Macklem at The University of Guelph.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _NFS_RPCV2_H_
+#define	_NFS_RPCV2_H_
+
+/*
+ * Definitions for Sun RPC Version 2, from
+ * "RPC: Remote Procedure Call Protocol Specification" RFC1057
+ */
+
+/* Version # */
+#define	RPC_VER2		2
+
+/* Authentication flavours */
+#define	RPCAUTH_NULL			0
+#define	RPCAUTH_UNIX			1
+#define	RPCAUTH_SHORT			2
+#define	RPCAUTH_KERB4			4
+#define	RPCAUTH_GSS			6
+#define	RPCAUTH_GSSKRB5			390003
+#define	RPCAUTH_GSSKRB5INTEGRITY	390004
+#define	RPCAUTH_GSSKRB5PRIVACY		390005
+
+#define	RPCAUTH_MAXSIZ		400
+#define	RPCVERF_MAXSIZ	12	/* For Kerb, can actually be 400 */
+
+/*
+ * RPCAUTH_UNIX defs.
+ */
+#define	RPCAUTHUNIX_MINSIZ	(5 * NFSX_UNSIGNED)
+#define	RPCAUTH_UNIXGIDS 16
+
+/*
+ * RPCAUTH_GSS defs.
+ */
+#define	RPCAUTHGSS_VERS1	1
+
+#define	RPCAUTHGSS_DATA		0
+#define	RPCAUTHGSS_INIT		1
+#define	RPCAUTHGSS_CONTINIT	2
+#define	RPCAUTHGSS_DESTROY	3
+
+#define	RPCAUTHGSS_SVCNONE	1
+#define	RPCAUTHGSS_SVCINTEGRITY	2
+#define	RPCAUTHGSS_SVCPRIVACY	3
+
+#define	RPCAUTHGSS_MAXSEQ	0x80000000
+
+#define	RPCAUTHGSS_WINDOW	64	/* # of bits in u_int64_t */
+#define	RPCAUTHGSS_SEQWINDOW	(RPCAUTHGSS_WINDOW + 1)
+
+#define	RPCAUTHGSS_MIC		1
+#define	RPCAUTHGSS_WRAP		2
+
+/*
+ * Qop values for the types of security services.
+ */
+#define	GSS_KERBV_QOP		0
+
+/*
+ * Sizes of GSS stuff.
+ */
+#define	RPCGSS_KEYSIZ		8
+
+#define	GSSX_AUTHHEAD	(5 * NFSX_UNSIGNED)
+#define	GSSX_MYHANDLE	(sizeof (long) + sizeof (u_int64_t))
+#define	GSSX_RPCHEADER	(13 * NFSX_UNSIGNED + GSSX_MYHANDLE)
+#define	GSSX_MINWRAP	(2 * NFSX_UNSIGNED)
+#define	GSSX_KERBVTOKEN	24
+#define	GSSX_LOCALHANDLE (sizeof (void *))
+
+/*
+ * Stuff for the gssd.
+ */
+#define	RPCPROG_GSSD		0x20101010
+#define	RPCGSSD_VERS		1
+#define	RPCGSSD_INIT		1
+#define	RPCGSSD_CONTINIT	2
+#define	RPCGSSD_CONTINITDESTROY	3
+#define	RPCGSSD_CLINIT		4
+#define	RPCGSSD_CLINITUID	5
+#define	RPCGSSD_CLCONT		6
+#define	RPCGSSD_CLCONTUID	7
+#define	RPCGSSD_CLINITNAME	8
+#define	RPCGSSD_CLCONTNAME	9
+
+/*
+ * Stuff for the nfsuserd
+ */
+#define	RPCPROG_NFSUSERD	0x21010101
+#define	RPCNFSUSERD_VERS	1
+#define	RPCNFSUSERD_GETUID	1
+#define	RPCNFSUSERD_GETGID	2
+#define	RPCNFSUSERD_GETUSER	3
+#define	RPCNFSUSERD_GETGROUP	4
+
+/*
+ * Some major status codes.
+ */
+#if !defined(_GSSAPI_H_) && !defined(GSSAPI_H_) && !defined(_GSSAPI_GSSAPI_H_) && !defined(_RPCSEC_GSS_H)
+#define	 GSS_S_COMPLETE                  0x00000000
+#define	 GSS_S_CONTINUE_NEEDED           0x00000001
+#define	 GSS_S_DUPLICATE_TOKEN           0x00000002
+#define	 GSS_S_OLD_TOKEN                 0x00000004
+#define	 GSS_S_UNSEQ_TOKEN               0x00000008
+#define	 GSS_S_GAP_TOKEN                 0x00000010
+#define	 GSS_S_BAD_MECH                  0x00010000
+#define	 GSS_S_BAD_NAME                  0x00020000
+#define	 GSS_S_BAD_NAMETYPE              0x00030000
+#define	 GSS_S_BAD_BINDINGS              0x00040000
+#define	 GSS_S_BAD_STATUS                0x00050000
+#define	 GSS_S_BAD_MIC                   0x00060000
+#define	 GSS_S_BAD_SIG                   0x00060000
+#define	 GSS_S_NO_CRED                   0x00070000
+#define	 GSS_S_NO_CONTEXT                0x00080000
+#define	 GSS_S_DEFECTIVE_TOKEN           0x00090000
+#define	 GSS_S_DEFECTIVE_CREDENTIAL      0x000a0000
+#define	 GSS_S_CREDENTIALS_EXPIRED       0x000b0000
+#define	 GSS_S_CONTEXT_EXPIRED           0x000c0000
+#define	 GSS_S_FAILURE                   0x000d0000
+#define	 GSS_S_BAD_QOP                   0x000e0000
+#define	 GSS_S_UNAUTHORIZED              0x000f0000
+#define	 GSS_S_UNAVAILABLE               0x00100000
+#define	 GSS_S_DUPLICATE_ELEMENT         0x00110000
+#define	 GSS_S_NAME_NOT_MN               0x00120000
+#define	 GSS_S_CALL_INACCESSIBLE_READ    0x01000000
+#define	 GSS_S_CALL_INACCESSIBLE_WRITE   0x02000000
+#define	 GSS_S_CALL_BAD_STRUCTURE        0x03000000
+#endif	/* _GSSAPI_H_ */
+
+/* Rpc Constants */
+#define	RPC_CALL	0
+#define	RPC_REPLY	1
+#define	RPC_MSGACCEPTED	0
+#define	RPC_MSGDENIED	1
+#define	RPC_PROGUNAVAIL	1
+#define	RPC_PROGMISMATCH	2
+#define	RPC_PROCUNAVAIL	3
+#define	RPC_GARBAGE	4		/* I like this one */
+#define	RPC_MISMATCH	0
+#define	RPC_AUTHERR	1
+
+/* Authentication failures */
+#define	AUTH_BADCRED	1
+#define	AUTH_REJECTCRED	2
+#define	AUTH_BADVERF	3
+#define	AUTH_REJECTVERF	4
+#define	AUTH_TOOWEAK	5		/* Give em wheaties */
+#define	AUTH_PROBCRED	13
+#define	AUTH_CTXCRED	14
+
+/* Sizes of rpc header parts */
+#define	RPC_SIZ		24
+#define	RPC_REPLYSIZ	28
+
+/* RPC Prog definitions */
+#define	RPCPROG_MNT	100005
+#define	RPCMNT_VER1	1
+#define	RPCMNT_VER3	3
+#define	RPCMNT_MOUNT	1
+#define	RPCMNT_DUMP	2
+#define	RPCMNT_UMOUNT	3
+#define	RPCMNT_UMNTALL	4
+#define	RPCMNT_EXPORT	5
+#define	RPCMNT_NAMELEN	255
+#define	RPCMNT_PATHLEN	1024
+#define	RPCPROG_NFS	100003
+ 
+/* Structs for common parts of the rpc's */
+struct rpcv2_time {
+	u_int32_t rpc_sec;
+	u_int32_t rpc_usec;
+};
+
+#endif	/* _NFS_RPCV2_H_ */
diff --git a/freebsd/sys/fs/nfs/xdr_subs.h b/freebsd/sys/fs/nfs/xdr_subs.h
new file mode 100644
index 0000000..54c7d91
--- /dev/null
+++ b/freebsd/sys/fs/nfs/xdr_subs.h
@@ -0,0 +1,101 @@
+/*-
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 1989, 1993
+ *	The Regents of the University of California.  All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Rick Macklem at The University of Guelph.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _NFS_XDR_SUBS_H_
+#define	_NFS_XDR_SUBS_H_
+
+/*
+ * Macros used for conversion to/from xdr representation by nfs...
+ * These use the MACHINE DEPENDENT routines ntohl, htonl
+ * As defined by "XDR: External Data Representation Standard" RFC1014
+ *
+ * To simplify the implementation, we use ntohl/htonl even on big-endian
+ * machines, and count on them being `#define'd away.  Some of these
+ * might be slightly more efficient as quad_t copies on a big-endian,
+ * but we cannot count on their alignment anyway.
+ */
+
+#define	fxdr_unsigned(t, v)	((t)ntohl((int32_t)(v)))
+#define	txdr_unsigned(v)	(htonl((int32_t)(v)))
+
+#define	fxdr_nfsv2time(f, t) do { 					\
+    (t)->tv_sec = ntohl(((struct nfsv2_time *)(f))->nfsv2_sec); 	\
+    if (((struct nfsv2_time *)(f))->nfsv2_usec != 0xffffffff) 		\
+	(t)->tv_nsec = 1000 * ntohl(((struct nfsv2_time *)(f))->nfsv2_usec); \
+    else 								\
+	(t)->tv_nsec = 0; 						\
+    } while (0)
+
+#define	txdr_nfsv2time(f, t) do { 					\
+    ((struct nfsv2_time *)(t))->nfsv2_sec = htonl((f)->tv_sec); 	\
+    if ((f)->tv_nsec != -1) 						\
+	((struct nfsv2_time *)(t))->nfsv2_usec = htonl((f)->tv_nsec / 1000); \
+    else 								\
+	((struct nfsv2_time *)(t))->nfsv2_usec = 0xffffffff; 		\
+    } while (0)
+
+#define	fxdr_nfsv3time(f, t) do { 					\
+	(t)->tv_sec = ntohl(((struct nfsv3_time *)(f))->nfsv3_sec); 	\
+	(t)->tv_nsec = ntohl(((struct nfsv3_time *)(f))->nfsv3_nsec); 	\
+    } while (0)
+
+#define	txdr_nfsv3time(f, t) do { 					\
+	((struct nfsv3_time *)(t))->nfsv3_sec = htonl((f)->tv_sec); 	\
+	((struct nfsv3_time *)(t))->nfsv3_nsec = htonl((f)->tv_nsec); 	\
+    } while (0)
+
+#define	fxdr_nfsv4time(f, t) do { 					\
+	(t)->tv_sec = ntohl(((struct nfsv4_time *)(f))->nfsv4_sec); 	\
+	(t)->tv_nsec = (ntohl(((struct nfsv4_time *)(f))->nfsv4_nsec) % \
+		1000000000); 						\
+    } while (0)
+
+#define	txdr_nfsv4time(f, t) do { 					\
+	((struct nfsv4_time *)(t))->nfsv4_highsec = 0; 			\
+	((struct nfsv4_time *)(t))->nfsv4_sec = htonl((f)->tv_sec); 	\
+	((struct nfsv4_time *)(t))->nfsv4_nsec = htonl((f)->tv_nsec); 	\
+    } while (0)
+
+#define	fxdr_hyper(f) 							\
+        ((((u_quad_t)ntohl(((u_int32_t *)(f))[0])) << 32) |		\
+	 (u_quad_t)(ntohl(((u_int32_t *)(f))[1])))
+
+#define	txdr_hyper(f, t) do {						\
+	((u_int32_t *)(t))[0] = htonl((u_int32_t)((f) >> 32));		\
+	((u_int32_t *)(t))[1] = htonl((u_int32_t)((f) & 0xffffffff));	\
+    } while (0)
+
+#endif	/* _NFS_XDR_SUBS_H_ */
diff --git a/freebsd/sys/fs/nfsclient/nfs.h b/freebsd/sys/fs/nfsclient/nfs.h
new file mode 100644
index 0000000..ce1747a
--- /dev/null
+++ b/freebsd/sys/fs/nfsclient/nfs.h
@@ -0,0 +1,128 @@
+/*-
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 1989, 1993
+ *	The Regents of the University of California.  All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Rick Macklem at The University of Guelph.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _NFSCLIENT_NFS_H_
+#define	_NFSCLIENT_NFS_H_
+
+#if defined(_KERNEL)
+
+#ifndef NFS_TPRINTF_INITIAL_DELAY
+#define	NFS_TPRINTF_INITIAL_DELAY       12
+#endif
+
+#ifndef NFS_TPRINTF_DELAY
+#define	NFS_TPRINTF_DELAY               30
+#endif
+
+/*
+ * Nfs version macros.
+ */
+#define	NFS_ISV3(v) \
+	(VFSTONFS((v)->v_mount)->nm_flag & NFSMNT_NFSV3)
+#define	NFS_ISV4(v) \
+	(VFSTONFS((v)->v_mount)->nm_flag & NFSMNT_NFSV4)
+#define	NFS_ISV34(v) \
+	(VFSTONFS((v)->v_mount)->nm_flag & (NFSMNT_NFSV3 | NFSMNT_NFSV4))
+
+#ifdef NFS_DEBUG
+
+extern int nfs_debug;
+#define	NFS_DEBUG_ASYNCIO	1 /* asynchronous i/o */
+#define	NFS_DEBUG_WG		2 /* server write gathering */
+#define	NFS_DEBUG_RC		4 /* server request caching */
+
+#define	NFS_DPF(cat, args)					\
+	do {							\
+		if (nfs_debug & NFS_DEBUG_##cat) printf args;	\
+	} while (0)
+
+#else
+
+#define	NFS_DPF(cat, args)
+
+#endif
+
+/*
+ * NFS iod threads can be in one of these three states once spawned.
+ * NFSIOD_NOT_AVAILABLE - Cannot be assigned an I/O operation at this time.
+ * NFSIOD_AVAILABLE - Available to be assigned an I/O operation.
+ * NFSIOD_CREATED_FOR_NFS_ASYNCIO - Newly created for nfs_asyncio() and
+ *	will be used by the thread that called nfs_asyncio().
+ */
+enum nfsiod_state {
+	NFSIOD_NOT_AVAILABLE = 0,
+	NFSIOD_AVAILABLE = 1,
+	NFSIOD_CREATED_FOR_NFS_ASYNCIO = 2,
+};
+
+/*
+ * Function prototypes.
+ */
+int ncl_meta_setsize(struct vnode *, struct thread *, u_quad_t);
+void ncl_doio_directwrite(struct buf *);
+int ncl_bioread(struct vnode *, struct uio *, int, struct ucred *);
+int ncl_biowrite(struct vnode *, struct uio *, int, struct ucred *);
+int ncl_vinvalbuf(struct vnode *, int, struct thread *, int);
+int ncl_asyncio(struct nfsmount *, struct buf *, struct ucred *,
+    struct thread *);
+int ncl_doio(struct vnode *, struct buf *, struct ucred *, struct thread *,
+    int);
+void ncl_nhinit(void);
+void ncl_nhuninit(void);
+void ncl_nodelock(struct nfsnode *);
+void ncl_nodeunlock(struct nfsnode *);
+int ncl_getattrcache(struct vnode *, struct vattr *);
+int ncl_readrpc(struct vnode *, struct uio *, struct ucred *);
+int ncl_writerpc(struct vnode *, struct uio *, struct ucred *, int *, int *,
+    int);
+int ncl_readlinkrpc(struct vnode *, struct uio *, struct ucred *);
+int ncl_readdirrpc(struct vnode *, struct uio *, struct ucred *,
+    struct thread *);
+int ncl_readdirplusrpc(struct vnode *, struct uio *, struct ucred *,
+    struct thread *);
+int ncl_writebp(struct buf *, int, struct thread *);
+int ncl_commit(struct vnode *, u_quad_t, int, struct ucred *, struct thread *);
+void ncl_clearcommit(struct mount *);
+int ncl_fsinfo(struct nfsmount *, struct vnode *, struct ucred *,
+    struct thread *);
+int ncl_init(struct vfsconf *);
+int ncl_uninit(struct vfsconf *);
+void	ncl_nfsiodnew(void);
+void	ncl_nfsiodnew_tq(__unused void *, int);
+
+#endif	/* _KERNEL */
+
+#endif	/* _NFSCLIENT_NFS_H_ */
diff --git a/freebsd/sys/fs/nfsclient/nfs_clbio.c b/freebsd/sys/fs/nfsclient/nfs_clbio.c
new file mode 100644
index 0000000..105ab25
--- /dev/null
+++ b/freebsd/sys/fs/nfsclient/nfs_clbio.c
@@ -0,0 +1,1874 @@
+/*-
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 1989, 1993
+ *	The Regents of the University of California.  All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Rick Macklem at The University of Guelph.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ *	@(#)nfs_bio.c	8.9 (Berkeley) 3/30/95
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/bio.h>
+#include <sys/buf.h>
+#include <sys/kernel.h>
+#include <sys/mount.h>
+#include <sys/rwlock.h>
+#include <sys/vmmeter.h>
+#include <sys/vnode.h>
+
+#include <vm/vm.h>
+#include <vm/vm_param.h>
+#include <vm/vm_extern.h>
+#include <vm/vm_page.h>
+#include <vm/vm_object.h>
+#include <vm/vm_pager.h>
+#include <vm/vnode_pager.h>
+
+#include <fs/nfs/nfsport.h>
+#include <fs/nfsclient/nfsmount.h>
+#include <fs/nfsclient/nfs.h>
+#include <fs/nfsclient/nfsnode.h>
+#include <fs/nfsclient/nfs_kdtrace.h>
+
+extern int newnfs_directio_allow_mmap;
+extern struct nfsstatsv1 nfsstatsv1;
+extern struct mtx ncl_iod_mutex;
+extern int ncl_numasync;
+extern enum nfsiod_state ncl_iodwant[NFS_MAXASYNCDAEMON];
+extern struct nfsmount *ncl_iodmount[NFS_MAXASYNCDAEMON];
+extern int newnfs_directio_enable;
+extern int nfs_keep_dirty_on_error;
+
+int ncl_pbuf_freecnt = -1;	/* start out unlimited */
+
+static struct buf *nfs_getcacheblk(struct vnode *vp, daddr_t bn, int size,
+    struct thread *td);
+static int nfs_directio_write(struct vnode *vp, struct uio *uiop,
+    struct ucred *cred, int ioflag);
+
+/*
+ * Vnode op for VM getpages.
+ */
+SYSCTL_DECL(_vfs_nfs);
+static int use_buf_pager = 1;
+SYSCTL_INT(_vfs_nfs, OID_AUTO, use_buf_pager, CTLFLAG_RWTUN,
+    &use_buf_pager, 0,
+    "Use buffer pager instead of direct readrpc call");
+
+static daddr_t
+ncl_gbp_getblkno(struct vnode *vp, vm_ooffset_t off)
+{
+
+	return (off / vp->v_bufobj.bo_bsize);
+}
+
+static int
+ncl_gbp_getblksz(struct vnode *vp, daddr_t lbn)
+{
+	struct nfsnode *np;
+	u_quad_t nsize;
+	int biosize, bcount;
+
+	np = VTONFS(vp);
+	NFSLOCKNODE(np);
+	nsize = np->n_size;
+	NFSUNLOCKNODE(np);
+
+	biosize = vp->v_bufobj.bo_bsize;
+	bcount = biosize;
+	if ((off_t)lbn * biosize >= nsize)
+		bcount = 0;
+	else if ((off_t)(lbn + 1) * biosize > nsize)
+		bcount = nsize - (off_t)lbn * biosize;
+	return (bcount);
+}
+
+int
+ncl_getpages(struct vop_getpages_args *ap)
+{
+	int i, error, nextoff, size, toff, count, npages;
+	struct uio uio;
+	struct iovec iov;
+	vm_offset_t kva;
+	struct buf *bp;
+	struct vnode *vp;
+	struct thread *td;
+	struct ucred *cred;
+	struct nfsmount *nmp;
+	vm_object_t object;
+	vm_page_t *pages;
+	struct nfsnode *np;
+
+	vp = ap->a_vp;
+	np = VTONFS(vp);
+	td = curthread;
+	cred = curthread->td_ucred;
+	nmp = VFSTONFS(vp->v_mount);
+	pages = ap->a_m;
+	npages = ap->a_count;
+
+	if ((object = vp->v_object) == NULL) {
+		printf("ncl_getpages: called with non-merged cache vnode\n");
+		return (VM_PAGER_ERROR);
+	}
+
+	if (newnfs_directio_enable && !newnfs_directio_allow_mmap) {
+		NFSLOCKNODE(np);
+		if ((np->n_flag & NNONCACHE) && (vp->v_type == VREG)) {
+			NFSUNLOCKNODE(np);
+			printf("ncl_getpages: called on non-cacheable vnode\n");
+			return (VM_PAGER_ERROR);
+		} else
+			NFSUNLOCKNODE(np);
+	}
+
+	mtx_lock(&nmp->nm_mtx);
+	if ((nmp->nm_flag & NFSMNT_NFSV3) != 0 &&
+	    (nmp->nm_state & NFSSTA_GOTFSINFO) == 0) {
+		mtx_unlock(&nmp->nm_mtx);
+		/* We'll never get here for v4, because we always have fsinfo */
+		(void)ncl_fsinfo(nmp, vp, cred, td);
+	} else
+		mtx_unlock(&nmp->nm_mtx);
+
+	if (use_buf_pager)
+		return (vfs_bio_getpages(vp, pages, npages, ap->a_rbehind,
+		    ap->a_rahead, ncl_gbp_getblkno, ncl_gbp_getblksz));
+
+	/*
+	 * If the requested page is partially valid, just return it and
+	 * allow the pager to zero-out the blanks.  Partially valid pages
+	 * can only occur at the file EOF.
+	 *
+	 * XXXGL: is that true for NFS, where short read can occur???
+	 */
+	VM_OBJECT_WLOCK(object);
+	if (pages[npages - 1]->valid != 0 && --npages == 0)
+		goto out;
+	VM_OBJECT_WUNLOCK(object);
+
+	/*
+	 * We use only the kva address for the buffer, but this is extremely
+	 * convenient and fast.
+	 */
+	bp = getpbuf(&ncl_pbuf_freecnt);
+
+	kva = (vm_offset_t) bp->b_data;
+	pmap_qenter(kva, pages, npages);
+	VM_CNT_INC(v_vnodein);
+	VM_CNT_ADD(v_vnodepgsin, npages);
+
+	count = npages << PAGE_SHIFT;
+	iov.iov_base = (caddr_t) kva;
+	iov.iov_len = count;
+	uio.uio_iov = &iov;
+	uio.uio_iovcnt = 1;
+	uio.uio_offset = IDX_TO_OFF(pages[0]->pindex);
+	uio.uio_resid = count;
+	uio.uio_segflg = UIO_SYSSPACE;
+	uio.uio_rw = UIO_READ;
+	uio.uio_td = td;
+
+	error = ncl_readrpc(vp, &uio, cred);
+	pmap_qremove(kva, npages);
+
+	relpbuf(bp, &ncl_pbuf_freecnt);
+
+	if (error && (uio.uio_resid == count)) {
+		printf("ncl_getpages: error %d\n", error);
+		return (VM_PAGER_ERROR);
+	}
+
+	/*
+	 * Calculate the number of bytes read and validate only that number
+	 * of bytes.  Note that due to pending writes, size may be 0.  This
+	 * does not mean that the remaining data is invalid!
+	 */
+
+	size = count - uio.uio_resid;
+	VM_OBJECT_WLOCK(object);
+	for (i = 0, toff = 0; i < npages; i++, toff = nextoff) {
+		vm_page_t m;
+		nextoff = toff + PAGE_SIZE;
+		m = pages[i];
+
+		if (nextoff <= size) {
+			/*
+			 * Read operation filled an entire page
+			 */
+			m->valid = VM_PAGE_BITS_ALL;
+			KASSERT(m->dirty == 0,
+			    ("nfs_getpages: page %p is dirty", m));
+		} else if (size > toff) {
+			/*
+			 * Read operation filled a partial page.
+			 */
+			m->valid = 0;
+			vm_page_set_valid_range(m, 0, size - toff);
+			KASSERT(m->dirty == 0,
+			    ("nfs_getpages: page %p is dirty", m));
+		} else {
+			/*
+			 * Read operation was short.  If no error
+			 * occurred we may have hit a zero-fill
+			 * section.  We leave valid set to 0, and page
+			 * is freed by vm_page_readahead_finish() if
+			 * its index is not equal to requested, or
+			 * page is zeroed and set valid by
+			 * vm_pager_get_pages() for requested page.
+			 */
+			;
+		}
+	}
+out:
+	VM_OBJECT_WUNLOCK(object);
+	if (ap->a_rbehind)
+		*ap->a_rbehind = 0;
+	if (ap->a_rahead)
+		*ap->a_rahead = 0;
+	return (VM_PAGER_OK);
+}
+
+/*
+ * Vnode op for VM putpages.
+ */
+int
+ncl_putpages(struct vop_putpages_args *ap)
+{
+	struct uio uio;
+	struct iovec iov;
+	int i, error, npages, count;
+	off_t offset;
+	int *rtvals;
+	struct vnode *vp;
+	struct thread *td;
+	struct ucred *cred;
+	struct nfsmount *nmp;
+	struct nfsnode *np;
+	vm_page_t *pages;
+
+	vp = ap->a_vp;
+	np = VTONFS(vp);
+	td = curthread;				/* XXX */
+	/* Set the cred to n_writecred for the write rpcs. */
+	if (np->n_writecred != NULL)
+		cred = crhold(np->n_writecred);
+	else
+		cred = crhold(curthread->td_ucred);	/* XXX */
+	nmp = VFSTONFS(vp->v_mount);
+	pages = ap->a_m;
+	count = ap->a_count;
+	rtvals = ap->a_rtvals;
+	npages = btoc(count);
+	offset = IDX_TO_OFF(pages[0]->pindex);
+
+	mtx_lock(&nmp->nm_mtx);
+	if ((nmp->nm_flag & NFSMNT_NFSV3) != 0 &&
+	    (nmp->nm_state & NFSSTA_GOTFSINFO) == 0) {
+		mtx_unlock(&nmp->nm_mtx);
+		(void)ncl_fsinfo(nmp, vp, cred, td);
+	} else
+		mtx_unlock(&nmp->nm_mtx);
+
+	NFSLOCKNODE(np);
+	if (newnfs_directio_enable && !newnfs_directio_allow_mmap &&
+	    (np->n_flag & NNONCACHE) && (vp->v_type == VREG)) {
+		NFSUNLOCKNODE(np);
+		printf("ncl_putpages: called on noncache-able vnode\n");
+		NFSLOCKNODE(np);
+	}
+	/*
+	 * When putting pages, do not extend file past EOF.
+	 */
+	if (offset + count > np->n_size) {
+		count = np->n_size - offset;
+		if (count < 0)
+			count = 0;
+	}
+	NFSUNLOCKNODE(np);
+
+	for (i = 0; i < npages; i++)
+		rtvals[i] = VM_PAGER_ERROR;
+
+	VM_CNT_INC(v_vnodeout);
+	VM_CNT_ADD(v_vnodepgsout, count);
+
+	iov.iov_base = unmapped_buf;
+	iov.iov_len = count;
+	uio.uio_iov = &iov;
+	uio.uio_iovcnt = 1;
+	uio.uio_offset = offset;
+	uio.uio_resid = count;
+	uio.uio_segflg = UIO_NOCOPY;
+	uio.uio_rw = UIO_WRITE;
+	uio.uio_td = td;
+
+	error = VOP_WRITE(vp, &uio, vnode_pager_putpages_ioflags(ap->a_sync),
+	    cred);
+	crfree(cred);
+
+	if (error == 0 || !nfs_keep_dirty_on_error) {
+		vnode_pager_undirty_pages(pages, rtvals, count - uio.uio_resid,
+		    np->n_size - offset, npages * PAGE_SIZE);
+	}
+	return (rtvals[0]);
+}
+
+/*
+ * For nfs, cache consistency can only be maintained approximately.
+ * Although RFC1094 does not specify the criteria, the following is
+ * believed to be compatible with the reference port.
+ * For nfs:
+ * If the file's modify time on the server has changed since the
+ * last read rpc or you have written to the file,
+ * you may have lost data cache consistency with the
+ * server, so flush all of the file's data out of the cache.
+ * Then force a getattr rpc to ensure that you have up to date
+ * attributes.
+ * NB: This implies that cache data can be read when up to
+ * NFS_ATTRTIMEO seconds out of date. If you find that you need current
+ * attributes this could be forced by setting n_attrstamp to 0 before
+ * the VOP_GETATTR() call.
+ */
+static inline int
+nfs_bioread_check_cons(struct vnode *vp, struct thread *td, struct ucred *cred)
+{
+	int error = 0;
+	struct vattr vattr;
+	struct nfsnode *np = VTONFS(vp);
+	bool old_lock;
+
+	/*
+	 * Ensure the exclusove access to the node before checking
+	 * whether the cache is consistent.
+	 */
+	old_lock = ncl_excl_start(vp);
+	NFSLOCKNODE(np);
+	if (np->n_flag & NMODIFIED) {
+		NFSUNLOCKNODE(np);
+		if (vp->v_type != VREG) {
+			if (vp->v_type != VDIR)
+				panic("nfs: bioread, not dir");
+			ncl_invaldir(vp);
+			error = ncl_vinvalbuf(vp, V_SAVE | V_ALLOWCLEAN, td, 1);
+			if (error != 0)
+				goto out;
+		}
+		np->n_attrstamp = 0;
+		KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp);
+		error = VOP_GETATTR(vp, &vattr, cred);
+		if (error)
+			goto out;
+		NFSLOCKNODE(np);
+		np->n_mtime = vattr.va_mtime;
+		NFSUNLOCKNODE(np);
+	} else {
+		NFSUNLOCKNODE(np);
+		error = VOP_GETATTR(vp, &vattr, cred);
+		if (error)
+			goto out;
+		NFSLOCKNODE(np);
+		if ((np->n_flag & NSIZECHANGED)
+		    || (NFS_TIMESPEC_COMPARE(&np->n_mtime, &vattr.va_mtime))) {
+			NFSUNLOCKNODE(np);
+			if (vp->v_type == VDIR)
+				ncl_invaldir(vp);
+			error = ncl_vinvalbuf(vp, V_SAVE | V_ALLOWCLEAN, td, 1);
+			if (error != 0)
+				goto out;
+			NFSLOCKNODE(np);
+			np->n_mtime = vattr.va_mtime;
+			np->n_flag &= ~NSIZECHANGED;
+		}
+		NFSUNLOCKNODE(np);
+	}
+out:
+	ncl_excl_finish(vp, old_lock);
+	return (error);
+}
+
+/*
+ * Vnode op for read using bio
+ */
+int
+ncl_bioread(struct vnode *vp, struct uio *uio, int ioflag, struct ucred *cred)
+{
+	struct nfsnode *np = VTONFS(vp);
+	int biosize, i;
+	struct buf *bp, *rabp;
+	struct thread *td;
+	struct nfsmount *nmp = VFSTONFS(vp->v_mount);
+	daddr_t lbn, rabn;
+	int bcount;
+	int seqcount;
+	int nra, error = 0, n = 0, on = 0;
+	off_t tmp_off;
+
+	KASSERT(uio->uio_rw == UIO_READ, ("ncl_read mode"));
+	if (uio->uio_resid == 0)
+		return (0);
+	if (uio->uio_offset < 0)	/* XXX VDIR cookies can be negative */
+		return (EINVAL);
+	td = uio->uio_td;
+
+	mtx_lock(&nmp->nm_mtx);
+	if ((nmp->nm_flag & NFSMNT_NFSV3) != 0 &&
+	    (nmp->nm_state & NFSSTA_GOTFSINFO) == 0) {
+		mtx_unlock(&nmp->nm_mtx);
+		(void)ncl_fsinfo(nmp, vp, cred, td);
+		mtx_lock(&nmp->nm_mtx);
+	}
+	if (nmp->nm_rsize == 0 || nmp->nm_readdirsize == 0)
+		(void) newnfs_iosize(nmp);
+
+	tmp_off = uio->uio_offset + uio->uio_resid;
+	if (vp->v_type != VDIR &&
+	    (tmp_off > nmp->nm_maxfilesize || tmp_off < uio->uio_offset)) {
+		mtx_unlock(&nmp->nm_mtx);
+		return (EFBIG);
+	}
+	mtx_unlock(&nmp->nm_mtx);
+
+	if (newnfs_directio_enable && (ioflag & IO_DIRECT) && (vp->v_type == VREG))
+		/* No caching/ no readaheads. Just read data into the user buffer */
+		return ncl_readrpc(vp, uio, cred);
+
+	biosize = vp->v_bufobj.bo_bsize;
+	seqcount = (int)((off_t)(ioflag >> IO_SEQSHIFT) * biosize / BKVASIZE);
+
+	error = nfs_bioread_check_cons(vp, td, cred);
+	if (error)
+		return error;
+
+	do {
+	    u_quad_t nsize;
+
+	    NFSLOCKNODE(np);
+	    nsize = np->n_size;
+	    NFSUNLOCKNODE(np);
+
+	    switch (vp->v_type) {
+	    case VREG:
+		NFSINCRGLOBAL(nfsstatsv1.biocache_reads);
+		lbn = uio->uio_offset / biosize;
+		on = uio->uio_offset - (lbn * biosize);
+
+		/*
+		 * Start the read ahead(s), as required.
+		 */
+		if (nmp->nm_readahead > 0) {
+		    for (nra = 0; nra < nmp->nm_readahead && nra < seqcount &&
+			(off_t)(lbn + 1 + nra) * biosize < nsize; nra++) {
+			rabn = lbn + 1 + nra;
+			if (incore(&vp->v_bufobj, rabn) == NULL) {
+			    rabp = nfs_getcacheblk(vp, rabn, biosize, td);
+			    if (!rabp) {
+				error = newnfs_sigintr(nmp, td);
+				return (error ? error : EINTR);
+			    }
+			    if ((rabp->b_flags & (B_CACHE|B_DELWRI)) == 0) {
+				rabp->b_flags |= B_ASYNC;
+				rabp->b_iocmd = BIO_READ;
+				vfs_busy_pages(rabp, 0);
+				if (ncl_asyncio(nmp, rabp, cred, td)) {
+				    rabp->b_flags |= B_INVAL;
+				    rabp->b_ioflags |= BIO_ERROR;
+				    vfs_unbusy_pages(rabp);
+				    brelse(rabp);
+				    break;
+				}
+			    } else {
+				brelse(rabp);
+			    }
+			}
+		    }
+		}
+
+		/* Note that bcount is *not* DEV_BSIZE aligned. */
+		bcount = biosize;
+		if ((off_t)lbn * biosize >= nsize) {
+			bcount = 0;
+		} else if ((off_t)(lbn + 1) * biosize > nsize) {
+			bcount = nsize - (off_t)lbn * biosize;
+		}
+		bp = nfs_getcacheblk(vp, lbn, bcount, td);
+
+		if (!bp) {
+			error = newnfs_sigintr(nmp, td);
+			return (error ? error : EINTR);
+		}
+
+		/*
+		 * If B_CACHE is not set, we must issue the read.  If this
+		 * fails, we return an error.
+		 */
+
+		if ((bp->b_flags & B_CACHE) == 0) {
+		    bp->b_iocmd = BIO_READ;
+		    vfs_busy_pages(bp, 0);
+		    error = ncl_doio(vp, bp, cred, td, 0);
+		    if (error) {
+			brelse(bp);
+			return (error);
+		    }
+		}
+
+		/*
+		 * on is the offset into the current bp.  Figure out how many
+		 * bytes we can copy out of the bp.  Note that bcount is
+		 * NOT DEV_BSIZE aligned.
+		 *
+		 * Then figure out how many bytes we can copy into the uio.
+		 */
+
+		n = 0;
+		if (on < bcount)
+			n = MIN((unsigned)(bcount - on), uio->uio_resid);
+		break;
+	    case VLNK:
+		NFSINCRGLOBAL(nfsstatsv1.biocache_readlinks);
+		bp = nfs_getcacheblk(vp, (daddr_t)0, NFS_MAXPATHLEN, td);
+		if (!bp) {
+			error = newnfs_sigintr(nmp, td);
+			return (error ? error : EINTR);
+		}
+		if ((bp->b_flags & B_CACHE) == 0) {
+		    bp->b_iocmd = BIO_READ;
+		    vfs_busy_pages(bp, 0);
+		    error = ncl_doio(vp, bp, cred, td, 0);
+		    if (error) {
+			bp->b_ioflags |= BIO_ERROR;
+			brelse(bp);
+			return (error);
+		    }
+		}
+		n = MIN(uio->uio_resid, NFS_MAXPATHLEN - bp->b_resid);
+		on = 0;
+		break;
+	    case VDIR:
+		NFSINCRGLOBAL(nfsstatsv1.biocache_readdirs);
+		if (np->n_direofoffset
+		    && uio->uio_offset >= np->n_direofoffset) {
+		    return (0);
+		}
+		lbn = (uoff_t)uio->uio_offset / NFS_DIRBLKSIZ;
+		on = uio->uio_offset & (NFS_DIRBLKSIZ - 1);
+		bp = nfs_getcacheblk(vp, lbn, NFS_DIRBLKSIZ, td);
+		if (!bp) {
+		    error = newnfs_sigintr(nmp, td);
+		    return (error ? error : EINTR);
+		}
+		if ((bp->b_flags & B_CACHE) == 0) {
+		    bp->b_iocmd = BIO_READ;
+		    vfs_busy_pages(bp, 0);
+		    error = ncl_doio(vp, bp, cred, td, 0);
+		    if (error) {
+			    brelse(bp);
+		    }
+		    while (error == NFSERR_BAD_COOKIE) {
+			ncl_invaldir(vp);
+			error = ncl_vinvalbuf(vp, 0, td, 1);
+
+			/*
+			 * Yuck! The directory has been modified on the
+			 * server. The only way to get the block is by
+			 * reading from the beginning to get all the
+			 * offset cookies.
+			 *
+			 * Leave the last bp intact unless there is an error.
+			 * Loop back up to the while if the error is another
+			 * NFSERR_BAD_COOKIE (double yuch!).
+			 */
+			for (i = 0; i <= lbn && !error; i++) {
+			    if (np->n_direofoffset
+				&& (i * NFS_DIRBLKSIZ) >= np->n_direofoffset)
+				    return (0);
+			    bp = nfs_getcacheblk(vp, i, NFS_DIRBLKSIZ, td);
+			    if (!bp) {
+				error = newnfs_sigintr(nmp, td);
+				return (error ? error : EINTR);
+			    }
+			    if ((bp->b_flags & B_CACHE) == 0) {
+				    bp->b_iocmd = BIO_READ;
+				    vfs_busy_pages(bp, 0);
+				    error = ncl_doio(vp, bp, cred, td, 0);
+				    /*
+				     * no error + B_INVAL == directory EOF,
+				     * use the block.
+				     */
+				    if (error == 0 && (bp->b_flags & B_INVAL))
+					    break;
+			    }
+			    /*
+			     * An error will throw away the block and the
+			     * for loop will break out.  If no error and this
+			     * is not the block we want, we throw away the
+			     * block and go for the next one via the for loop.
+			     */
+			    if (error || i < lbn)
+				    brelse(bp);
+			}
+		    }
+		    /*
+		     * The above while is repeated if we hit another cookie
+		     * error.  If we hit an error and it wasn't a cookie error,
+		     * we give up.
+		     */
+		    if (error)
+			    return (error);
+		}
+
+		/*
+		 * If not eof and read aheads are enabled, start one.
+		 * (You need the current block first, so that you have the
+		 *  directory offset cookie of the next block.)
+		 */
+		if (nmp->nm_readahead > 0 &&
+		    (bp->b_flags & B_INVAL) == 0 &&
+		    (np->n_direofoffset == 0 ||
+		    (lbn + 1) * NFS_DIRBLKSIZ < np->n_direofoffset) &&
+		    incore(&vp->v_bufobj, lbn + 1) == NULL) {
+			rabp = nfs_getcacheblk(vp, lbn + 1, NFS_DIRBLKSIZ, td);
+			if (rabp) {
+			    if ((rabp->b_flags & (B_CACHE|B_DELWRI)) == 0) {
+				rabp->b_flags |= B_ASYNC;
+				rabp->b_iocmd = BIO_READ;
+				vfs_busy_pages(rabp, 0);
+				if (ncl_asyncio(nmp, rabp, cred, td)) {
+				    rabp->b_flags |= B_INVAL;
+				    rabp->b_ioflags |= BIO_ERROR;
+				    vfs_unbusy_pages(rabp);
+				    brelse(rabp);
+				}
+			    } else {
+				brelse(rabp);
+			    }
+			}
+		}
+		/*
+		 * Unlike VREG files, whos buffer size ( bp->b_bcount ) is
+		 * chopped for the EOF condition, we cannot tell how large
+		 * NFS directories are going to be until we hit EOF.  So
+		 * an NFS directory buffer is *not* chopped to its EOF.  Now,
+		 * it just so happens that b_resid will effectively chop it
+		 * to EOF.  *BUT* this information is lost if the buffer goes
+		 * away and is reconstituted into a B_CACHE state ( due to
+		 * being VMIO ) later.  So we keep track of the directory eof
+		 * in np->n_direofoffset and chop it off as an extra step
+		 * right here.
+		 */
+		n = lmin(uio->uio_resid, NFS_DIRBLKSIZ - bp->b_resid - on);
+		if (np->n_direofoffset && n > np->n_direofoffset - uio->uio_offset)
+			n = np->n_direofoffset - uio->uio_offset;
+		break;
+	    default:
+		printf(" ncl_bioread: type %x unexpected\n", vp->v_type);
+		bp = NULL;
+		break;
+	    }
+
+	    if (n > 0) {
+		    error = vn_io_fault_uiomove(bp->b_data + on, (int)n, uio);
+	    }
+	    if (vp->v_type == VLNK)
+		n = 0;
+	    if (bp != NULL)
+		brelse(bp);
+	} while (error == 0 && uio->uio_resid > 0 && n > 0);
+	return (error);
+}
+
+/*
+ * The NFS write path cannot handle iovecs with len > 1. So we need to
+ * break up iovecs accordingly (restricting them to wsize).
+ * For the SYNC case, we can do this with 1 copy (user buffer -> mbuf).
+ * For the ASYNC case, 2 copies are needed. The first a copy from the
+ * user buffer to a staging buffer and then a second copy from the staging
+ * buffer to mbufs. This can be optimized by copying from the user buffer
+ * directly into mbufs and passing the chain down, but that requires a
+ * fair amount of re-working of the relevant codepaths (and can be done
+ * later).
+ */
+static int
+nfs_directio_write(vp, uiop, cred, ioflag)
+	struct vnode *vp;
+	struct uio *uiop;
+	struct ucred *cred;
+	int ioflag;
+{
+	int error;
+	struct nfsmount *nmp = VFSTONFS(vp->v_mount);
+	struct thread *td = uiop->uio_td;
+	int size;
+	int wsize;
+
+	mtx_lock(&nmp->nm_mtx);
+	wsize = nmp->nm_wsize;
+	mtx_unlock(&nmp->nm_mtx);
+	if (ioflag & IO_SYNC) {
+		int iomode, must_commit;
+		struct uio uio;
+		struct iovec iov;
+do_sync:
+		while (uiop->uio_resid > 0) {
+			size = MIN(uiop->uio_resid, wsize);
+			size = MIN(uiop->uio_iov->iov_len, size);
+			iov.iov_base = uiop->uio_iov->iov_base;
+			iov.iov_len = size;
+			uio.uio_iov = &iov;
+			uio.uio_iovcnt = 1;
+			uio.uio_offset = uiop->uio_offset;
+			uio.uio_resid = size;
+			uio.uio_segflg = UIO_USERSPACE;
+			uio.uio_rw = UIO_WRITE;
+			uio.uio_td = td;
+			iomode = NFSWRITE_FILESYNC;
+			error = ncl_writerpc(vp, &uio, cred, &iomode,
+			    &must_commit, 0);
+			KASSERT((must_commit == 0),
+				("ncl_directio_write: Did not commit write"));
+			if (error)
+				return (error);
+			uiop->uio_offset += size;
+			uiop->uio_resid -= size;
+			if (uiop->uio_iov->iov_len <= size) {
+				uiop->uio_iovcnt--;
+				uiop->uio_iov++;
+			} else {
+				uiop->uio_iov->iov_base =
+					(char *)uiop->uio_iov->iov_base + size;
+				uiop->uio_iov->iov_len -= size;
+			}
+		}
+	} else {
+		struct uio *t_uio;
+		struct iovec *t_iov;
+		struct buf *bp;
+
+		/*
+		 * Break up the write into blocksize chunks and hand these
+		 * over to nfsiod's for write back.
+		 * Unfortunately, this incurs a copy of the data. Since
+		 * the user could modify the buffer before the write is
+		 * initiated.
+		 *
+		 * The obvious optimization here is that one of the 2 copies
+		 * in the async write path can be eliminated by copying the
+		 * data here directly into mbufs and passing the mbuf chain
+		 * down. But that will require a fair amount of re-working
+		 * of the code and can be done if there's enough interest
+		 * in NFS directio access.
+		 */
+		while (uiop->uio_resid > 0) {
+			size = MIN(uiop->uio_resid, wsize);
+			size = MIN(uiop->uio_iov->iov_len, size);
+			bp = getpbuf(&ncl_pbuf_freecnt);
+			t_uio = malloc(sizeof(struct uio), M_NFSDIRECTIO, M_WAITOK);
+			t_iov = malloc(sizeof(struct iovec), M_NFSDIRECTIO, M_WAITOK);
+			t_iov->iov_base = malloc(size, M_NFSDIRECTIO, M_WAITOK);
+			t_iov->iov_len = size;
+			t_uio->uio_iov = t_iov;
+			t_uio->uio_iovcnt = 1;
+			t_uio->uio_offset = uiop->uio_offset;
+			t_uio->uio_resid = size;
+			t_uio->uio_segflg = UIO_SYSSPACE;
+			t_uio->uio_rw = UIO_WRITE;
+			t_uio->uio_td = td;
+			KASSERT(uiop->uio_segflg == UIO_USERSPACE ||
+			    uiop->uio_segflg == UIO_SYSSPACE,
+			    ("nfs_directio_write: Bad uio_segflg"));
+			if (uiop->uio_segflg == UIO_USERSPACE) {
+				error = copyin(uiop->uio_iov->iov_base,
+				    t_iov->iov_base, size);
+				if (error != 0)
+					goto err_free;
+			} else
+				/*
+				 * UIO_SYSSPACE may never happen, but handle
+				 * it just in case it does.
+				 */
+				bcopy(uiop->uio_iov->iov_base, t_iov->iov_base,
+				    size);
+			bp->b_flags |= B_DIRECT;
+			bp->b_iocmd = BIO_WRITE;
+			if (cred != NOCRED) {
+				crhold(cred);
+				bp->b_wcred = cred;
+			} else
+				bp->b_wcred = NOCRED;
+			bp->b_caller1 = (void *)t_uio;
+			bp->b_vp = vp;
+			error = ncl_asyncio(nmp, bp, NOCRED, td);
+err_free:
+			if (error) {
+				free(t_iov->iov_base, M_NFSDIRECTIO);
+				free(t_iov, M_NFSDIRECTIO);
+				free(t_uio, M_NFSDIRECTIO);
+				bp->b_vp = NULL;
+				relpbuf(bp, &ncl_pbuf_freecnt);
+				if (error == EINTR)
+					return (error);
+				goto do_sync;
+			}
+			uiop->uio_offset += size;
+			uiop->uio_resid -= size;
+			if (uiop->uio_iov->iov_len <= size) {
+				uiop->uio_iovcnt--;
+				uiop->uio_iov++;
+			} else {
+				uiop->uio_iov->iov_base =
+					(char *)uiop->uio_iov->iov_base + size;
+				uiop->uio_iov->iov_len -= size;
+			}
+		}
+	}
+	return (0);
+}
+
+/*
+ * Vnode op for write using bio
+ */
+int
+ncl_write(struct vop_write_args *ap)
+{
+	int biosize;
+	struct uio *uio = ap->a_uio;
+	struct thread *td = uio->uio_td;
+	struct vnode *vp = ap->a_vp;
+	struct nfsnode *np = VTONFS(vp);
+	struct ucred *cred = ap->a_cred;
+	int ioflag = ap->a_ioflag;
+	struct buf *bp;
+	struct vattr vattr;
+	struct nfsmount *nmp = VFSTONFS(vp->v_mount);
+	daddr_t lbn;
+	int bcount, noncontig_write, obcount;
+	int bp_cached, n, on, error = 0, error1, wouldcommit;
+	size_t orig_resid, local_resid;
+	off_t orig_size, tmp_off;
+
+	KASSERT(uio->uio_rw == UIO_WRITE, ("ncl_write mode"));
+	KASSERT(uio->uio_segflg != UIO_USERSPACE || uio->uio_td == curthread,
+	    ("ncl_write proc"));
+	if (vp->v_type != VREG)
+		return (EIO);
+	NFSLOCKNODE(np);
+	if (np->n_flag & NWRITEERR) {
+		np->n_flag &= ~NWRITEERR;
+		NFSUNLOCKNODE(np);
+		return (np->n_error);
+	} else
+		NFSUNLOCKNODE(np);
+	mtx_lock(&nmp->nm_mtx);
+	if ((nmp->nm_flag & NFSMNT_NFSV3) != 0 &&
+	    (nmp->nm_state & NFSSTA_GOTFSINFO) == 0) {
+		mtx_unlock(&nmp->nm_mtx);
+		(void)ncl_fsinfo(nmp, vp, cred, td);
+		mtx_lock(&nmp->nm_mtx);
+	}
+	if (nmp->nm_wsize == 0)
+		(void) newnfs_iosize(nmp);
+	mtx_unlock(&nmp->nm_mtx);
+
+	/*
+	 * Synchronously flush pending buffers if we are in synchronous
+	 * mode or if we are appending.
+	 */
+	if (ioflag & (IO_APPEND | IO_SYNC)) {
+		NFSLOCKNODE(np);
+		if (np->n_flag & NMODIFIED) {
+			NFSUNLOCKNODE(np);
+#ifdef notyet /* Needs matching nonblock semantics elsewhere, too. */
+			/*
+			 * Require non-blocking, synchronous writes to
+			 * dirty files to inform the program it needs
+			 * to fsync(2) explicitly.
+			 */
+			if (ioflag & IO_NDELAY)
+				return (EAGAIN);
+#endif
+			np->n_attrstamp = 0;
+			KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp);
+			error = ncl_vinvalbuf(vp, V_SAVE | ((ioflag &
+			    IO_VMIO) != 0 ? V_VMIO : 0), td, 1);
+			if (error != 0)
+				return (error);
+		} else
+			NFSUNLOCKNODE(np);
+	}
+
+	orig_resid = uio->uio_resid;
+	NFSLOCKNODE(np);
+	orig_size = np->n_size;
+	NFSUNLOCKNODE(np);
+
+	/*
+	 * If IO_APPEND then load uio_offset.  We restart here if we cannot
+	 * get the append lock.
+	 */
+	if (ioflag & IO_APPEND) {
+		np->n_attrstamp = 0;
+		KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp);
+		error = VOP_GETATTR(vp, &vattr, cred);
+		if (error)
+			return (error);
+		NFSLOCKNODE(np);
+		uio->uio_offset = np->n_size;
+		NFSUNLOCKNODE(np);
+	}
+
+	if (uio->uio_offset < 0)
+		return (EINVAL);
+	tmp_off = uio->uio_offset + uio->uio_resid;
+	if (tmp_off > nmp->nm_maxfilesize || tmp_off < uio->uio_offset)
+		return (EFBIG);
+	if (uio->uio_resid == 0)
+		return (0);
+
+	if (newnfs_directio_enable && (ioflag & IO_DIRECT) && vp->v_type == VREG)
+		return nfs_directio_write(vp, uio, cred, ioflag);
+
+	/*
+	 * Maybe this should be above the vnode op call, but so long as
+	 * file servers have no limits, i don't think it matters
+	 */
+	if (vn_rlimit_fsize(vp, uio, td))
+		return (EFBIG);
+
+	biosize = vp->v_bufobj.bo_bsize;
+	/*
+	 * Find all of this file's B_NEEDCOMMIT buffers.  If our writes
+	 * would exceed the local maximum per-file write commit size when
+	 * combined with those, we must decide whether to flush,
+	 * go synchronous, or return error.  We don't bother checking
+	 * IO_UNIT -- we just make all writes atomic anyway, as there's
+	 * no point optimizing for something that really won't ever happen.
+	 */
+	wouldcommit = 0;
+	if (!(ioflag & IO_SYNC)) {
+		int nflag;
+
+		NFSLOCKNODE(np);
+		nflag = np->n_flag;
+		NFSUNLOCKNODE(np);
+		if (nflag & NMODIFIED) {
+			BO_LOCK(&vp->v_bufobj);
+			if (vp->v_bufobj.bo_dirty.bv_cnt != 0) {
+				TAILQ_FOREACH(bp, &vp->v_bufobj.bo_dirty.bv_hd,
+				    b_bobufs) {
+					if (bp->b_flags & B_NEEDCOMMIT)
+						wouldcommit += bp->b_bcount;
+				}
+			}
+			BO_UNLOCK(&vp->v_bufobj);
+		}
+	}
+
+	do {
+		if (!(ioflag & IO_SYNC)) {
+			wouldcommit += biosize;
+			if (wouldcommit > nmp->nm_wcommitsize) {
+				np->n_attrstamp = 0;
+				KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp);
+				error = ncl_vinvalbuf(vp, V_SAVE | ((ioflag &
+				    IO_VMIO) != 0 ? V_VMIO : 0), td, 1);
+				if (error != 0)
+					return (error);
+				wouldcommit = biosize;
+			}
+		}
+
+		NFSINCRGLOBAL(nfsstatsv1.biocache_writes);
+		lbn = uio->uio_offset / biosize;
+		on = uio->uio_offset - (lbn * biosize);
+		n = MIN((unsigned)(biosize - on), uio->uio_resid);
+again:
+		/*
+		 * Handle direct append and file extension cases, calculate
+		 * unaligned buffer size.
+		 */
+		NFSLOCKNODE(np);
+		if ((np->n_flag & NHASBEENLOCKED) == 0 &&
+		    (nmp->nm_flag & NFSMNT_NONCONTIGWR) != 0)
+			noncontig_write = 1;
+		else
+			noncontig_write = 0;
+		if ((uio->uio_offset == np->n_size ||
+		    (noncontig_write != 0 &&
+		    lbn == (np->n_size / biosize) &&
+		    uio->uio_offset + n > np->n_size)) && n) {
+			NFSUNLOCKNODE(np);
+			/*
+			 * Get the buffer (in its pre-append state to maintain
+			 * B_CACHE if it was previously set).  Resize the
+			 * nfsnode after we have locked the buffer to prevent
+			 * readers from reading garbage.
+			 */
+			obcount = np->n_size - (lbn * biosize);
+			bp = nfs_getcacheblk(vp, lbn, obcount, td);
+
+			if (bp != NULL) {
+				long save;
+
+				NFSLOCKNODE(np);
+				np->n_size = uio->uio_offset + n;
+				np->n_flag |= NMODIFIED;
+				vnode_pager_setsize(vp, np->n_size);
+				NFSUNLOCKNODE(np);
+
+				save = bp->b_flags & B_CACHE;
+				bcount = on + n;
+				allocbuf(bp, bcount);
+				bp->b_flags |= save;
+				if (noncontig_write != 0 && on > obcount)
+					vfs_bio_bzero_buf(bp, obcount, on -
+					    obcount);
+			}
+		} else {
+			/*
+			 * Obtain the locked cache block first, and then
+			 * adjust the file's size as appropriate.
+			 */
+			bcount = on + n;
+			if ((off_t)lbn * biosize + bcount < np->n_size) {
+				if ((off_t)(lbn + 1) * biosize < np->n_size)
+					bcount = biosize;
+				else
+					bcount = np->n_size - (off_t)lbn * biosize;
+			}
+			NFSUNLOCKNODE(np);
+			bp = nfs_getcacheblk(vp, lbn, bcount, td);
+			NFSLOCKNODE(np);
+			if (uio->uio_offset + n > np->n_size) {
+				np->n_size = uio->uio_offset + n;
+				np->n_flag |= NMODIFIED;
+				vnode_pager_setsize(vp, np->n_size);
+			}
+			NFSUNLOCKNODE(np);
+		}
+
+		if (!bp) {
+			error = newnfs_sigintr(nmp, td);
+			if (!error)
+				error = EINTR;
+			break;
+		}
+
+		/*
+		 * Issue a READ if B_CACHE is not set.  In special-append
+		 * mode, B_CACHE is based on the buffer prior to the write
+		 * op and is typically set, avoiding the read.  If a read
+		 * is required in special append mode, the server will
+		 * probably send us a short-read since we extended the file
+		 * on our end, resulting in b_resid == 0 and, thusly,
+		 * B_CACHE getting set.
+		 *
+		 * We can also avoid issuing the read if the write covers
+		 * the entire buffer.  We have to make sure the buffer state
+		 * is reasonable in this case since we will not be initiating
+		 * I/O.  See the comments in kern/vfs_bio.c's getblk() for
+		 * more information.
+		 *
+		 * B_CACHE may also be set due to the buffer being cached
+		 * normally.
+		 */
+
+		bp_cached = 1;
+		if (on == 0 && n == bcount) {
+			if ((bp->b_flags & B_CACHE) == 0)
+				bp_cached = 0;
+			bp->b_flags |= B_CACHE;
+			bp->b_flags &= ~B_INVAL;
+			bp->b_ioflags &= ~BIO_ERROR;
+		}
+
+		if ((bp->b_flags & B_CACHE) == 0) {
+			bp->b_iocmd = BIO_READ;
+			vfs_busy_pages(bp, 0);
+			error = ncl_doio(vp, bp, cred, td, 0);
+			if (error) {
+				brelse(bp);
+				break;
+			}
+		}
+		if (bp->b_wcred == NOCRED)
+			bp->b_wcred = crhold(cred);
+		NFSLOCKNODE(np);
+		np->n_flag |= NMODIFIED;
+		NFSUNLOCKNODE(np);
+
+		/*
+		 * If dirtyend exceeds file size, chop it down.  This should
+		 * not normally occur but there is an append race where it
+		 * might occur XXX, so we log it.
+		 *
+		 * If the chopping creates a reverse-indexed or degenerate
+		 * situation with dirtyoff/end, we 0 both of them.
+		 */
+
+		if (bp->b_dirtyend > bcount) {
+			printf("NFS append race @%lx:%d\n",
+			    (long)bp->b_blkno * DEV_BSIZE,
+			    bp->b_dirtyend - bcount);
+			bp->b_dirtyend = bcount;
+		}
+
+		if (bp->b_dirtyoff >= bp->b_dirtyend)
+			bp->b_dirtyoff = bp->b_dirtyend = 0;
+
+		/*
+		 * If the new write will leave a contiguous dirty
+		 * area, just update the b_dirtyoff and b_dirtyend,
+		 * otherwise force a write rpc of the old dirty area.
+		 *
+		 * If there has been a file lock applied to this file
+		 * or vfs.nfs.old_noncontig_writing is set, do the following:
+		 * While it is possible to merge discontiguous writes due to
+		 * our having a B_CACHE buffer ( and thus valid read data
+		 * for the hole), we don't because it could lead to
+		 * significant cache coherency problems with multiple clients,
+		 * especially if locking is implemented later on.
+		 *
+		 * If vfs.nfs.old_noncontig_writing is not set and there has
+		 * not been file locking done on this file:
+		 * Relax coherency a bit for the sake of performance and
+		 * expand the current dirty region to contain the new
+		 * write even if it means we mark some non-dirty data as
+		 * dirty.
+		 */
+
+		if (noncontig_write == 0 && bp->b_dirtyend > 0 &&
+		    (on > bp->b_dirtyend || (on + n) < bp->b_dirtyoff)) {
+			if (bwrite(bp) == EINTR) {
+				error = EINTR;
+				break;
+			}
+			goto again;
+		}
+
+		local_resid = uio->uio_resid;
+		error = vn_io_fault_uiomove((char *)bp->b_data + on, n, uio);
+
+		if (error != 0 && !bp_cached) {
+			/*
+			 * This block has no other content then what
+			 * possibly was written by the faulty uiomove.
+			 * Release it, forgetting the data pages, to
+			 * prevent the leak of uninitialized data to
+			 * usermode.
+			 */
+			bp->b_ioflags |= BIO_ERROR;
+			brelse(bp);
+			uio->uio_offset -= local_resid - uio->uio_resid;
+			uio->uio_resid = local_resid;
+			break;
+		}
+
+		/*
+		 * Since this block is being modified, it must be written
+		 * again and not just committed.  Since write clustering does
+		 * not work for the stage 1 data write, only the stage 2
+		 * commit rpc, we have to clear B_CLUSTEROK as well.
+		 */
+		bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK);
+
+		/*
+		 * Get the partial update on the progress made from
+		 * uiomove, if an error occurred.
+		 */
+		if (error != 0)
+			n = local_resid - uio->uio_resid;
+
+		/*
+		 * Only update dirtyoff/dirtyend if not a degenerate
+		 * condition.
+		 */
+		if (n > 0) {
+			if (bp->b_dirtyend > 0) {
+				bp->b_dirtyoff = min(on, bp->b_dirtyoff);
+				bp->b_dirtyend = max((on + n), bp->b_dirtyend);
+			} else {
+				bp->b_dirtyoff = on;
+				bp->b_dirtyend = on + n;
+			}
+			vfs_bio_set_valid(bp, on, n);
+		}
+
+		/*
+		 * If IO_SYNC do bwrite().
+		 *
+		 * IO_INVAL appears to be unused.  The idea appears to be
+		 * to turn off caching in this case.  Very odd.  XXX
+		 */
+		if ((ioflag & IO_SYNC)) {
+			if (ioflag & IO_INVAL)
+				bp->b_flags |= B_NOCACHE;
+			error1 = bwrite(bp);
+			if (error1 != 0) {
+				if (error == 0)
+					error = error1;
+				break;
+			}
+		} else if ((n + on) == biosize || (ioflag & IO_ASYNC) != 0) {
+			bp->b_flags |= B_ASYNC;
+			(void) ncl_writebp(bp, 0, NULL);
+		} else {
+			bdwrite(bp);
+		}
+
+		if (error != 0)
+			break;
+	} while (uio->uio_resid > 0 && n > 0);
+
+	if (error != 0) {
+		if (ioflag & IO_UNIT) {
+			VATTR_NULL(&vattr);
+			vattr.va_size = orig_size;
+			/* IO_SYNC is handled implicitely */
+			(void)VOP_SETATTR(vp, &vattr, cred);
+			uio->uio_offset -= orig_resid - uio->uio_resid;
+			uio->uio_resid = orig_resid;
+		}
+	}
+
+	return (error);
+}
+
+/*
+ * Get an nfs cache block.
+ *
+ * Allocate a new one if the block isn't currently in the cache
+ * and return the block marked busy. If the calling process is
+ * interrupted by a signal for an interruptible mount point, return
+ * NULL.
+ *
+ * The caller must carefully deal with the possible B_INVAL state of
+ * the buffer.  ncl_doio() clears B_INVAL (and ncl_asyncio() clears it
+ * indirectly), so synchronous reads can be issued without worrying about
+ * the B_INVAL state.  We have to be a little more careful when dealing
+ * with writes (see comments in nfs_write()) when extending a file past
+ * its EOF.
+ */
+static struct buf *
+nfs_getcacheblk(struct vnode *vp, daddr_t bn, int size, struct thread *td)
+{
+	struct buf *bp;
+	struct mount *mp;
+	struct nfsmount *nmp;
+
+	mp = vp->v_mount;
+	nmp = VFSTONFS(mp);
+
+	if (nmp->nm_flag & NFSMNT_INT) {
+		sigset_t oldset;
+
+		newnfs_set_sigmask(td, &oldset);
+		bp = getblk(vp, bn, size, PCATCH, 0, 0);
+		newnfs_restore_sigmask(td, &oldset);
+		while (bp == NULL) {
+			if (newnfs_sigintr(nmp, td))
+				return (NULL);
+			bp = getblk(vp, bn, size, 0, 2 * hz, 0);
+		}
+	} else {
+		bp = getblk(vp, bn, size, 0, 0, 0);
+	}
+
+	if (vp->v_type == VREG)
+		bp->b_blkno = bn * (vp->v_bufobj.bo_bsize / DEV_BSIZE);
+	return (bp);
+}
+
+/*
+ * Flush and invalidate all dirty buffers. If another process is already
+ * doing the flush, just wait for completion.
+ */
+int
+ncl_vinvalbuf(struct vnode *vp, int flags, struct thread *td, int intrflg)
+{
+	struct nfsnode *np = VTONFS(vp);
+	struct nfsmount *nmp = VFSTONFS(vp->v_mount);
+	int error = 0, slpflag, slptimeo;
+	bool old_lock;
+
+	ASSERT_VOP_LOCKED(vp, "ncl_vinvalbuf");
+
+	if ((nmp->nm_flag & NFSMNT_INT) == 0)
+		intrflg = 0;
+	if (NFSCL_FORCEDISM(nmp->nm_mountp))
+		intrflg = 1;
+	if (intrflg) {
+		slpflag = PCATCH;
+		slptimeo = 2 * hz;
+	} else {
+		slpflag = 0;
+		slptimeo = 0;
+	}
+
+	old_lock = ncl_excl_start(vp);
+	if (old_lock)
+		flags |= V_ALLOWCLEAN;
+
+	/*
+	 * Now, flush as required.
+	 */
+	if ((flags & (V_SAVE | V_VMIO)) == V_SAVE &&
+	     vp->v_bufobj.bo_object != NULL) {
+		VM_OBJECT_WLOCK(vp->v_bufobj.bo_object);
+		vm_object_page_clean(vp->v_bufobj.bo_object, 0, 0, OBJPC_SYNC);
+		VM_OBJECT_WUNLOCK(vp->v_bufobj.bo_object);
+		/*
+		 * If the page clean was interrupted, fail the invalidation.
+		 * Not doing so, we run the risk of losing dirty pages in the
+		 * vinvalbuf() call below.
+		 */
+		if (intrflg && (error = newnfs_sigintr(nmp, td)))
+			goto out;
+	}
+
+	error = vinvalbuf(vp, flags, slpflag, 0);
+	while (error) {
+		if (intrflg && (error = newnfs_sigintr(nmp, td)))
+			goto out;
+		error = vinvalbuf(vp, flags, 0, slptimeo);
+	}
+	if (NFSHASPNFS(nmp)) {
+		nfscl_layoutcommit(vp, td);
+		/*
+		 * Invalidate the attribute cache, since writes to a DS
+		 * won't update the size attribute.
+		 */
+		NFSLOCKNODE(np);
+		np->n_attrstamp = 0;
+	} else
+		NFSLOCKNODE(np);
+	if (np->n_directio_asyncwr == 0)
+		np->n_flag &= ~NMODIFIED;
+	NFSUNLOCKNODE(np);
+out:
+	ncl_excl_finish(vp, old_lock);
+	return error;
+}
+
+/*
+ * Initiate asynchronous I/O. Return an error if no nfsiods are available.
+ * This is mainly to avoid queueing async I/O requests when the nfsiods
+ * are all hung on a dead server.
+ *
+ * Note: ncl_asyncio() does not clear (BIO_ERROR|B_INVAL) but when the bp
+ * is eventually dequeued by the async daemon, ncl_doio() *will*.
+ */
+int
+ncl_asyncio(struct nfsmount *nmp, struct buf *bp, struct ucred *cred, struct thread *td)
+{
+	int iod;
+	int gotiod;
+	int slpflag = 0;
+	int slptimeo = 0;
+	int error, error2;
+
+	/*
+	 * Commits are usually short and sweet so lets save some cpu and
+	 * leave the async daemons for more important rpc's (such as reads
+	 * and writes).
+	 *
+	 * Readdirplus RPCs do vget()s to acquire the vnodes for entries
+	 * in the directory in order to update attributes. This can deadlock
+	 * with another thread that is waiting for async I/O to be done by
+	 * an nfsiod thread while holding a lock on one of these vnodes.
+	 * To avoid this deadlock, don't allow the async nfsiod threads to
+	 * perform Readdirplus RPCs.
+	 */
+	NFSLOCKIOD();
+	if ((bp->b_iocmd == BIO_WRITE && (bp->b_flags & B_NEEDCOMMIT) &&
+	     (nmp->nm_bufqiods > ncl_numasync / 2)) ||
+	    (bp->b_vp->v_type == VDIR && (nmp->nm_flag & NFSMNT_RDIRPLUS))) {
+		NFSUNLOCKIOD();
+		return(EIO);
+	}
+again:
+	if (nmp->nm_flag & NFSMNT_INT)
+		slpflag = PCATCH;
+	gotiod = FALSE;
+
+	/*
+	 * Find a free iod to process this request.
+	 */
+	for (iod = 0; iod < ncl_numasync; iod++)
+		if (ncl_iodwant[iod] == NFSIOD_AVAILABLE) {
+			gotiod = TRUE;
+			break;
+		}
+
+	/*
+	 * Try to create one if none are free.
+	 */
+	if (!gotiod)
+		ncl_nfsiodnew();
+	else {
+		/*
+		 * Found one, so wake it up and tell it which
+		 * mount to process.
+		 */
+		NFS_DPF(ASYNCIO, ("ncl_asyncio: waking iod %d for mount %p\n",
+		    iod, nmp));
+		ncl_iodwant[iod] = NFSIOD_NOT_AVAILABLE;
+		ncl_iodmount[iod] = nmp;
+		nmp->nm_bufqiods++;
+		wakeup(&ncl_iodwant[iod]);
+	}
+
+	/*
+	 * If none are free, we may already have an iod working on this mount
+	 * point.  If so, it will process our request.
+	 */
+	if (!gotiod) {
+		if (nmp->nm_bufqiods > 0) {
+			NFS_DPF(ASYNCIO,
+				("ncl_asyncio: %d iods are already processing mount %p\n",
+				 nmp->nm_bufqiods, nmp));
+			gotiod = TRUE;
+		}
+	}
+
+	/*
+	 * If we have an iod which can process the request, then queue
+	 * the buffer.
+	 */
+	if (gotiod) {
+		/*
+		 * Ensure that the queue never grows too large.  We still want
+		 * to asynchronize so we block rather then return EIO.
+		 */
+		while (nmp->nm_bufqlen >= 2*ncl_numasync) {
+			NFS_DPF(ASYNCIO,
+				("ncl_asyncio: waiting for mount %p queue to drain\n", nmp));
+			nmp->nm_bufqwant = TRUE;
+			error = newnfs_msleep(td, &nmp->nm_bufq,
+			    &ncl_iod_mutex, slpflag | PRIBIO, "nfsaio",
+			   slptimeo);
+			if (error) {
+				error2 = newnfs_sigintr(nmp, td);
+				if (error2) {
+					NFSUNLOCKIOD();
+					return (error2);
+				}
+				if (slpflag == PCATCH) {
+					slpflag = 0;
+					slptimeo = 2 * hz;
+				}
+			}
+			/*
+			 * We might have lost our iod while sleeping,
+			 * so check and loop if necessary.
+			 */
+			goto again;
+		}
+
+		/* We might have lost our nfsiod */
+		if (nmp->nm_bufqiods == 0) {
+			NFS_DPF(ASYNCIO,
+				("ncl_asyncio: no iods after mount %p queue was drained, looping\n", nmp));
+			goto again;
+		}
+
+		if (bp->b_iocmd == BIO_READ) {
+			if (bp->b_rcred == NOCRED && cred != NOCRED)
+				bp->b_rcred = crhold(cred);
+		} else {
+			if (bp->b_wcred == NOCRED && cred != NOCRED)
+				bp->b_wcred = crhold(cred);
+		}
+
+		if (bp->b_flags & B_REMFREE)
+			bremfreef(bp);
+		BUF_KERNPROC(bp);
+		TAILQ_INSERT_TAIL(&nmp->nm_bufq, bp, b_freelist);
+		nmp->nm_bufqlen++;
+		if ((bp->b_flags & B_DIRECT) && bp->b_iocmd == BIO_WRITE) {
+			NFSLOCKNODE(VTONFS(bp->b_vp));
+			VTONFS(bp->b_vp)->n_flag |= NMODIFIED;
+			VTONFS(bp->b_vp)->n_directio_asyncwr++;
+			NFSUNLOCKNODE(VTONFS(bp->b_vp));
+		}
+		NFSUNLOCKIOD();
+		return (0);
+	}
+
+	NFSUNLOCKIOD();
+
+	/*
+	 * All the iods are busy on other mounts, so return EIO to
+	 * force the caller to process the i/o synchronously.
+	 */
+	NFS_DPF(ASYNCIO, ("ncl_asyncio: no iods available, i/o is synchronous\n"));
+	return (EIO);
+}
+
+void
+ncl_doio_directwrite(struct buf *bp)
+{
+	int iomode, must_commit;
+	struct uio *uiop = (struct uio *)bp->b_caller1;
+	char *iov_base = uiop->uio_iov->iov_base;
+
+	iomode = NFSWRITE_FILESYNC;
+	uiop->uio_td = NULL; /* NULL since we're in nfsiod */
+	ncl_writerpc(bp->b_vp, uiop, bp->b_wcred, &iomode, &must_commit, 0);
+	KASSERT((must_commit == 0), ("ncl_doio_directwrite: Did not commit write"));
+	free(iov_base, M_NFSDIRECTIO);
+	free(uiop->uio_iov, M_NFSDIRECTIO);
+	free(uiop, M_NFSDIRECTIO);
+	if ((bp->b_flags & B_DIRECT) && bp->b_iocmd == BIO_WRITE) {
+		struct nfsnode *np = VTONFS(bp->b_vp);
+		NFSLOCKNODE(np);
+		if (NFSHASPNFS(VFSTONFS(vnode_mount(bp->b_vp)))) {
+			/*
+			 * Invalidate the attribute cache, since writes to a DS
+			 * won't update the size attribute.
+			 */
+			np->n_attrstamp = 0;
+		}
+		np->n_directio_asyncwr--;
+		if (np->n_directio_asyncwr == 0) {
+			np->n_flag &= ~NMODIFIED;
+			if ((np->n_flag & NFSYNCWAIT)) {
+				np->n_flag &= ~NFSYNCWAIT;
+				wakeup((caddr_t)&np->n_directio_asyncwr);
+			}
+		}
+		NFSUNLOCKNODE(np);
+	}
+	bp->b_vp = NULL;
+	relpbuf(bp, &ncl_pbuf_freecnt);
+}
+
+/*
+ * Do an I/O operation to/from a cache block. This may be called
+ * synchronously or from an nfsiod.
+ */
+int
+ncl_doio(struct vnode *vp, struct buf *bp, struct ucred *cr, struct thread *td,
+    int called_from_strategy)
+{
+	struct uio *uiop;
+	struct nfsnode *np;
+	struct nfsmount *nmp;
+	int error = 0, iomode, must_commit = 0;
+	struct uio uio;
+	struct iovec io;
+	struct proc *p = td ? td->td_proc : NULL;
+	uint8_t	iocmd;
+
+	np = VTONFS(vp);
+	nmp = VFSTONFS(vp->v_mount);
+	uiop = &uio;
+	uiop->uio_iov = &io;
+	uiop->uio_iovcnt = 1;
+	uiop->uio_segflg = UIO_SYSSPACE;
+	uiop->uio_td = td;
+
+	/*
+	 * clear BIO_ERROR and B_INVAL state prior to initiating the I/O.  We
+	 * do this here so we do not have to do it in all the code that
+	 * calls us.
+	 */
+	bp->b_flags &= ~B_INVAL;
+	bp->b_ioflags &= ~BIO_ERROR;
+
+	KASSERT(!(bp->b_flags & B_DONE), ("ncl_doio: bp %p already marked done", bp));
+	iocmd = bp->b_iocmd;
+	if (iocmd == BIO_READ) {
+	    io.iov_len = uiop->uio_resid = bp->b_bcount;
+	    io.iov_base = bp->b_data;
+	    uiop->uio_rw = UIO_READ;
+
+	    switch (vp->v_type) {
+	    case VREG:
+		uiop->uio_offset = ((off_t)bp->b_blkno) * DEV_BSIZE;
+		NFSINCRGLOBAL(nfsstatsv1.read_bios);
+		error = ncl_readrpc(vp, uiop, cr);
+
+		if (!error) {
+		    if (uiop->uio_resid) {
+			/*
+			 * If we had a short read with no error, we must have
+			 * hit a file hole.  We should zero-fill the remainder.
+			 * This can also occur if the server hits the file EOF.
+			 *
+			 * Holes used to be able to occur due to pending
+			 * writes, but that is not possible any longer.
+			 */
+			int nread = bp->b_bcount - uiop->uio_resid;
+			ssize_t left = uiop->uio_resid;
+
+			if (left > 0)
+				bzero((char *)bp->b_data + nread, left);
+			uiop->uio_resid = 0;
+		    }
+		}
+		/* ASSERT_VOP_LOCKED(vp, "ncl_doio"); */
+		if (p && vp->v_writecount <= -1) {
+			NFSLOCKNODE(np);
+			if (NFS_TIMESPEC_COMPARE(&np->n_mtime, &np->n_vattr.na_mtime)) {
+				NFSUNLOCKNODE(np);
+				PROC_LOCK(p);
+				killproc(p, "text file modification");
+				PROC_UNLOCK(p);
+			} else
+				NFSUNLOCKNODE(np);
+		}
+		break;
+	    case VLNK:
+		uiop->uio_offset = (off_t)0;
+		NFSINCRGLOBAL(nfsstatsv1.readlink_bios);
+		error = ncl_readlinkrpc(vp, uiop, cr);
+		break;
+	    case VDIR:
+		NFSINCRGLOBAL(nfsstatsv1.readdir_bios);
+		uiop->uio_offset = ((u_quad_t)bp->b_lblkno) * NFS_DIRBLKSIZ;
+		if ((nmp->nm_flag & NFSMNT_RDIRPLUS) != 0) {
+			error = ncl_readdirplusrpc(vp, uiop, cr, td);
+			if (error == NFSERR_NOTSUPP)
+				nmp->nm_flag &= ~NFSMNT_RDIRPLUS;
+		}
+		if ((nmp->nm_flag & NFSMNT_RDIRPLUS) == 0)
+			error = ncl_readdirrpc(vp, uiop, cr, td);
+		/*
+		 * end-of-directory sets B_INVAL but does not generate an
+		 * error.
+		 */
+		if (error == 0 && uiop->uio_resid == bp->b_bcount)
+			bp->b_flags |= B_INVAL;
+		break;
+	    default:
+		printf("ncl_doio:  type %x unexpected\n", vp->v_type);
+		break;
+	    }
+	    if (error) {
+		bp->b_ioflags |= BIO_ERROR;
+		bp->b_error = error;
+	    }
+	} else {
+	    /*
+	     * If we only need to commit, try to commit
+	     */
+	    if (bp->b_flags & B_NEEDCOMMIT) {
+		    int retv;
+		    off_t off;
+
+		    off = ((u_quad_t)bp->b_blkno) * DEV_BSIZE + bp->b_dirtyoff;
+		    retv = ncl_commit(vp, off, bp->b_dirtyend-bp->b_dirtyoff,
+			bp->b_wcred, td);
+		    if (retv == 0) {
+			    bp->b_dirtyoff = bp->b_dirtyend = 0;
+			    bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK);
+			    bp->b_resid = 0;
+			    bufdone(bp);
+			    return (0);
+		    }
+		    if (retv == NFSERR_STALEWRITEVERF) {
+			    ncl_clearcommit(vp->v_mount);
+		    }
+	    }
+
+	    /*
+	     * Setup for actual write
+	     */
+	    NFSLOCKNODE(np);
+	    if ((off_t)bp->b_blkno * DEV_BSIZE + bp->b_dirtyend > np->n_size)
+		bp->b_dirtyend = np->n_size - (off_t)bp->b_blkno * DEV_BSIZE;
+	    NFSUNLOCKNODE(np);
+
+	    if (bp->b_dirtyend > bp->b_dirtyoff) {
+		io.iov_len = uiop->uio_resid = bp->b_dirtyend
+		    - bp->b_dirtyoff;
+		uiop->uio_offset = (off_t)bp->b_blkno * DEV_BSIZE
+		    + bp->b_dirtyoff;
+		io.iov_base = (char *)bp->b_data + bp->b_dirtyoff;
+		uiop->uio_rw = UIO_WRITE;
+		NFSINCRGLOBAL(nfsstatsv1.write_bios);
+
+		if ((bp->b_flags & (B_ASYNC | B_NEEDCOMMIT | B_NOCACHE | B_CLUSTER)) == B_ASYNC)
+		    iomode = NFSWRITE_UNSTABLE;
+		else
+		    iomode = NFSWRITE_FILESYNC;
+
+		error = ncl_writerpc(vp, uiop, cr, &iomode, &must_commit,
+		    called_from_strategy);
+
+		/*
+		 * When setting B_NEEDCOMMIT also set B_CLUSTEROK to try
+		 * to cluster the buffers needing commit.  This will allow
+		 * the system to submit a single commit rpc for the whole
+		 * cluster.  We can do this even if the buffer is not 100%
+		 * dirty (relative to the NFS blocksize), so we optimize the
+		 * append-to-file-case.
+		 *
+		 * (when clearing B_NEEDCOMMIT, B_CLUSTEROK must also be
+		 * cleared because write clustering only works for commit
+		 * rpc's, not for the data portion of the write).
+		 */
+
+		if (!error && iomode == NFSWRITE_UNSTABLE) {
+		    bp->b_flags |= B_NEEDCOMMIT;
+		    if (bp->b_dirtyoff == 0
+			&& bp->b_dirtyend == bp->b_bcount)
+			bp->b_flags |= B_CLUSTEROK;
+		} else {
+		    bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK);
+		}
+
+		/*
+		 * For an interrupted write, the buffer is still valid
+		 * and the write hasn't been pushed to the server yet,
+		 * so we can't set BIO_ERROR and report the interruption
+		 * by setting B_EINTR. For the B_ASYNC case, B_EINTR
+		 * is not relevant, so the rpc attempt is essentially
+		 * a noop.  For the case of a V3 write rpc not being
+		 * committed to stable storage, the block is still
+		 * dirty and requires either a commit rpc or another
+		 * write rpc with iomode == NFSV3WRITE_FILESYNC before
+		 * the block is reused. This is indicated by setting
+		 * the B_DELWRI and B_NEEDCOMMIT flags.
+		 *
+		 * EIO is returned by ncl_writerpc() to indicate a recoverable
+		 * write error and is handled as above, except that
+		 * B_EINTR isn't set. One cause of this is a stale stateid
+		 * error for the RPC that indicates recovery is required,
+		 * when called with called_from_strategy != 0.
+		 *
+		 * If the buffer is marked B_PAGING, it does not reside on
+		 * the vp's paging queues so we cannot call bdirty().  The
+		 * bp in this case is not an NFS cache block so we should
+		 * be safe. XXX
+		 *
+		 * The logic below breaks up errors into recoverable and
+		 * unrecoverable. For the former, we clear B_INVAL|B_NOCACHE
+		 * and keep the buffer around for potential write retries.
+		 * For the latter (eg ESTALE), we toss the buffer away (B_INVAL)
+		 * and save the error in the nfsnode. This is less than ideal
+		 * but necessary. Keeping such buffers around could potentially
+		 * cause buffer exhaustion eventually (they can never be written
+		 * out, so will get constantly be re-dirtied). It also causes
+		 * all sorts of vfs panics. For non-recoverable write errors,
+		 * also invalidate the attrcache, so we'll be forced to go over
+		 * the wire for this object, returning an error to user on next
+		 * call (most of the time).
+		 */
+		if (error == EINTR || error == EIO || error == ETIMEDOUT
+		    || (!error && (bp->b_flags & B_NEEDCOMMIT))) {
+			bp->b_flags &= ~(B_INVAL|B_NOCACHE);
+			if ((bp->b_flags & B_PAGING) == 0) {
+			    bdirty(bp);
+			    bp->b_flags &= ~B_DONE;
+			}
+			if ((error == EINTR || error == ETIMEDOUT) &&
+			    (bp->b_flags & B_ASYNC) == 0)
+			    bp->b_flags |= B_EINTR;
+		} else {
+		    if (error) {
+			bp->b_ioflags |= BIO_ERROR;
+			bp->b_flags |= B_INVAL;
+			bp->b_error = np->n_error = error;
+			NFSLOCKNODE(np);
+			np->n_flag |= NWRITEERR;
+			np->n_attrstamp = 0;
+			KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp);
+			NFSUNLOCKNODE(np);
+		    }
+		    bp->b_dirtyoff = bp->b_dirtyend = 0;
+		}
+	    } else {
+		bp->b_resid = 0;
+		bufdone(bp);
+		return (0);
+	    }
+	}
+	bp->b_resid = uiop->uio_resid;
+	if (must_commit)
+	    ncl_clearcommit(vp->v_mount);
+	bufdone(bp);
+	return (error);
+}
+
+/*
+ * Used to aid in handling ftruncate() operations on the NFS client side.
+ * Truncation creates a number of special problems for NFS.  We have to
+ * throw away VM pages and buffer cache buffers that are beyond EOF, and
+ * we have to properly handle VM pages or (potentially dirty) buffers
+ * that straddle the truncation point.
+ */
+
+int
+ncl_meta_setsize(struct vnode *vp, struct thread *td, u_quad_t nsize)
+{
+	struct nfsnode *np = VTONFS(vp);
+	u_quad_t tsize;
+	int biosize = vp->v_bufobj.bo_bsize;
+	int error = 0;
+
+	NFSLOCKNODE(np);
+	tsize = np->n_size;
+	np->n_size = nsize;
+	NFSUNLOCKNODE(np);
+
+	if (nsize < tsize) {
+		struct buf *bp;
+		daddr_t lbn;
+		int bufsize;
+
+		/*
+		 * vtruncbuf() doesn't get the buffer overlapping the
+		 * truncation point.  We may have a B_DELWRI and/or B_CACHE
+		 * buffer that now needs to be truncated.
+		 */
+		error = vtruncbuf(vp, nsize, biosize);
+		lbn = nsize / biosize;
+		bufsize = nsize - (lbn * biosize);
+		bp = nfs_getcacheblk(vp, lbn, bufsize, td);
+		if (!bp)
+			return EINTR;
+		if (bp->b_dirtyoff > bp->b_bcount)
+			bp->b_dirtyoff = bp->b_bcount;
+		if (bp->b_dirtyend > bp->b_bcount)
+			bp->b_dirtyend = bp->b_bcount;
+		bp->b_flags |= B_RELBUF;  /* don't leave garbage around */
+		brelse(bp);
+	} else {
+		vnode_pager_setsize(vp, nsize);
+	}
+	return(error);
+}
+
diff --git a/freebsd/sys/fs/nfsclient/nfs_clcomsubs.c b/freebsd/sys/fs/nfsclient/nfs_clcomsubs.c
new file mode 100644
index 0000000..479a453
--- /dev/null
+++ b/freebsd/sys/fs/nfsclient/nfs_clcomsubs.c
@@ -0,0 +1,439 @@
+/*-
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 1989, 1993
+ *	The Regents of the University of California.  All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Rick Macklem at The University of Guelph.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+/*
+ * These functions support the macros and help fiddle mbuf chains for
+ * the nfs op functions. They do things like create the rpc header and
+ * copy data between mbuf chains and uio lists.
+ */
+#ifndef APPLEKEXT
+#include <fs/nfs/nfsport.h>
+
+extern struct nfsstatsv1 nfsstatsv1;
+extern int ncl_mbuf_mlen;
+extern enum vtype newnv2tov_type[8];
+extern enum vtype nv34tov_type[8];
+NFSCLSTATEMUTEX;
+#endif	/* !APPLEKEXT */
+
+static nfsuint64 nfs_nullcookie = {{ 0, 0 }};
+
+/*
+ * copies a uio scatter/gather list to an mbuf chain.
+ * NOTE: can ony handle iovcnt == 1
+ */
+APPLESTATIC void
+nfsm_uiombuf(struct nfsrv_descript *nd, struct uio *uiop, int siz)
+{
+	char *uiocp;
+	struct mbuf *mp, *mp2;
+	int xfer, left, mlen;
+	int uiosiz, clflg, rem;
+	char *cp, *tcp;
+
+	KASSERT(uiop->uio_iovcnt == 1, ("nfsm_uiotombuf: iovcnt != 1"));
+
+	if (siz > ncl_mbuf_mlen)	/* or should it >= MCLBYTES ?? */
+		clflg = 1;
+	else
+		clflg = 0;
+	rem = NFSM_RNDUP(siz) - siz;
+	mp = mp2 = nd->nd_mb;
+	while (siz > 0) {
+		left = uiop->uio_iov->iov_len;
+		uiocp = uiop->uio_iov->iov_base;
+		if (left > siz)
+			left = siz;
+		uiosiz = left;
+		while (left > 0) {
+			mlen = M_TRAILINGSPACE(mp);
+			if (mlen == 0) {
+				if (clflg)
+					NFSMCLGET(mp, M_WAITOK);
+				else
+					NFSMGET(mp);
+				mbuf_setlen(mp, 0);
+				mbuf_setnext(mp2, mp);
+				mp2 = mp;
+				mlen = M_TRAILINGSPACE(mp);
+			}
+			xfer = (left > mlen) ? mlen : left;
+#ifdef notdef
+			/* Not Yet.. */
+			if (uiop->uio_iov->iov_op != NULL)
+				(*(uiop->uio_iov->iov_op))
+				(uiocp, NFSMTOD(mp, caddr_t) + mbuf_len(mp),
+				    xfer);
+			else
+#endif
+			if (uiop->uio_segflg == UIO_SYSSPACE)
+			    NFSBCOPY(uiocp, NFSMTOD(mp, caddr_t) + mbuf_len(mp),
+				xfer);
+			else
+			    copyin(CAST_USER_ADDR_T(uiocp), NFSMTOD(mp, caddr_t)
+				+ mbuf_len(mp), xfer);
+			mbuf_setlen(mp, mbuf_len(mp) + xfer);
+			left -= xfer;
+			uiocp += xfer;
+			uiop->uio_offset += xfer;
+			uiop->uio_resid -= xfer;
+		}
+		tcp = (char *)uiop->uio_iov->iov_base;
+		tcp += uiosiz;
+		uiop->uio_iov->iov_base = (void *)tcp;
+		uiop->uio_iov->iov_len -= uiosiz;
+		siz -= uiosiz;
+	}
+	if (rem > 0) {
+		if (rem > M_TRAILINGSPACE(mp)) {
+			NFSMGET(mp);
+			mbuf_setlen(mp, 0);
+			mbuf_setnext(mp2, mp);
+		}
+		cp = NFSMTOD(mp, caddr_t) + mbuf_len(mp);
+		for (left = 0; left < rem; left++)
+			*cp++ = '\0';
+		mbuf_setlen(mp, mbuf_len(mp) + rem);
+		nd->nd_bpos = cp;
+	} else
+		nd->nd_bpos = NFSMTOD(mp, caddr_t) + mbuf_len(mp);
+	nd->nd_mb = mp;
+}
+
+/*
+ * copies a uio scatter/gather list to an mbuf chain.
+ * This version returns the mbuf list and does not use "nd".
+ * NOTE: can ony handle iovcnt == 1
+ */
+struct mbuf *
+nfsm_uiombuflist(struct uio *uiop, int siz, struct mbuf **mbp, char **cpp)
+{
+	char *uiocp;
+	struct mbuf *mp, *mp2, *firstmp;
+	int xfer, left, mlen;
+	int uiosiz, clflg;
+	char *tcp;
+
+	KASSERT(uiop->uio_iovcnt == 1, ("nfsm_uiotombuf: iovcnt != 1"));
+
+	if (siz > ncl_mbuf_mlen)	/* or should it >= MCLBYTES ?? */
+		clflg = 1;
+	else
+		clflg = 0;
+	if (clflg != 0)
+		NFSMCLGET(mp, M_WAITOK);
+	else
+		NFSMGET(mp);
+	mbuf_setlen(mp, 0);
+	firstmp = mp2 = mp;
+	while (siz > 0) {
+		left = uiop->uio_iov->iov_len;
+		uiocp = uiop->uio_iov->iov_base;
+		if (left > siz)
+			left = siz;
+		uiosiz = left;
+		while (left > 0) {
+			mlen = M_TRAILINGSPACE(mp);
+			if (mlen == 0) {
+				if (clflg)
+					NFSMCLGET(mp, M_WAITOK);
+				else
+					NFSMGET(mp);
+				mbuf_setlen(mp, 0);
+				mbuf_setnext(mp2, mp);
+				mp2 = mp;
+				mlen = M_TRAILINGSPACE(mp);
+			}
+			xfer = (left > mlen) ? mlen : left;
+			if (uiop->uio_segflg == UIO_SYSSPACE)
+				NFSBCOPY(uiocp, NFSMTOD(mp, caddr_t) +
+				    mbuf_len(mp), xfer);
+			else
+				copyin(uiocp, NFSMTOD(mp, caddr_t) +
+				    mbuf_len(mp), xfer);
+			mbuf_setlen(mp, mbuf_len(mp) + xfer);
+			left -= xfer;
+			uiocp += xfer;
+			uiop->uio_offset += xfer;
+			uiop->uio_resid -= xfer;
+		}
+		tcp = (char *)uiop->uio_iov->iov_base;
+		tcp += uiosiz;
+		uiop->uio_iov->iov_base = (void *)tcp;
+		uiop->uio_iov->iov_len -= uiosiz;
+		siz -= uiosiz;
+	}
+	if (cpp != NULL)
+		*cpp = NFSMTOD(mp, caddr_t) + mbuf_len(mp);
+	if (mbp != NULL)
+		*mbp = mp;
+	return (firstmp);
+}
+
+/*
+ * Load vnode attributes from the xdr file attributes.
+ * Returns EBADRPC if they can't be parsed, 0 otherwise.
+ */
+APPLESTATIC int
+nfsm_loadattr(struct nfsrv_descript *nd, struct nfsvattr *nap)
+{
+	struct nfs_fattr *fp;
+	int error = 0;
+
+	if (nd->nd_flag & ND_NFSV4) {
+		error = nfsv4_loadattr(nd, NULL, nap, NULL, NULL, 0, NULL,
+		    NULL, NULL, NULL, NULL, 0, NULL, NULL, NULL, NULL, NULL);
+	} else if (nd->nd_flag & ND_NFSV3) {
+		NFSM_DISSECT(fp, struct nfs_fattr *, NFSX_V3FATTR);
+		nap->na_type = nfsv34tov_type(fp->fa_type);
+		nap->na_mode = fxdr_unsigned(u_short, fp->fa_mode);
+		nap->na_rdev = NFSMAKEDEV(
+		    fxdr_unsigned(int, fp->fa3_rdev.specdata1),
+		    fxdr_unsigned(int, fp->fa3_rdev.specdata2));
+		nap->na_nlink = fxdr_unsigned(uint32_t, fp->fa_nlink);
+		nap->na_uid = fxdr_unsigned(uid_t, fp->fa_uid);
+		nap->na_gid = fxdr_unsigned(gid_t, fp->fa_gid);
+		nap->na_size = fxdr_hyper(&fp->fa3_size);
+		nap->na_blocksize = NFS_FABLKSIZE;
+		nap->na_bytes = fxdr_hyper(&fp->fa3_used);
+		nap->na_fileid = fxdr_hyper(&fp->fa3_fileid);
+		fxdr_nfsv3time(&fp->fa3_atime, &nap->na_atime);
+		fxdr_nfsv3time(&fp->fa3_ctime, &nap->na_ctime);
+		fxdr_nfsv3time(&fp->fa3_mtime, &nap->na_mtime);
+		nap->na_flags = 0;
+		nap->na_filerev = 0;
+	} else {
+		NFSM_DISSECT(fp, struct nfs_fattr *, NFSX_V2FATTR);
+		nap->na_type = nfsv2tov_type(fp->fa_type);
+		nap->na_mode = fxdr_unsigned(u_short, fp->fa_mode);
+		if (nap->na_type == VNON || nap->na_type == VREG)
+			nap->na_type = IFTOVT(nap->na_mode);
+		nap->na_rdev = fxdr_unsigned(dev_t, fp->fa2_rdev);
+
+		/*
+		 * Really ugly NFSv2 kludge.
+		 */
+		if (nap->na_type == VCHR && nap->na_rdev == ((dev_t)-1))
+			nap->na_type = VFIFO;
+		nap->na_nlink = fxdr_unsigned(u_short, fp->fa_nlink);
+		nap->na_uid = fxdr_unsigned(uid_t, fp->fa_uid);
+		nap->na_gid = fxdr_unsigned(gid_t, fp->fa_gid);
+		nap->na_size = fxdr_unsigned(u_int32_t, fp->fa2_size);
+		nap->na_blocksize = fxdr_unsigned(int32_t, fp->fa2_blocksize);
+		nap->na_bytes =
+		    (u_quad_t)fxdr_unsigned(int32_t, fp->fa2_blocks) *
+		    NFS_FABLKSIZE;
+		nap->na_fileid = fxdr_unsigned(uint64_t, fp->fa2_fileid);
+		fxdr_nfsv2time(&fp->fa2_atime, &nap->na_atime);
+		fxdr_nfsv2time(&fp->fa2_mtime, &nap->na_mtime);
+		nap->na_flags = 0;
+		nap->na_ctime.tv_sec = fxdr_unsigned(u_int32_t,
+		    fp->fa2_ctime.nfsv2_sec);
+		nap->na_ctime.tv_nsec = 0;
+		nap->na_gen = fxdr_unsigned(u_int32_t,fp->fa2_ctime.nfsv2_usec);
+		nap->na_filerev = 0;
+	}
+nfsmout:
+	return (error);
+}
+
+/*
+ * This function finds the directory cookie that corresponds to the
+ * logical byte offset given.
+ */
+APPLESTATIC nfsuint64 *
+nfscl_getcookie(struct nfsnode *np, off_t off, int add)
+{
+	struct nfsdmap *dp, *dp2;
+	int pos;
+
+	pos = off / NFS_DIRBLKSIZ;
+	if (pos == 0) {
+		KASSERT(!add, ("nfs getcookie add at 0"));
+		return (&nfs_nullcookie);
+	}
+	pos--;
+	dp = LIST_FIRST(&np->n_cookies);
+	if (!dp) {
+		if (add) {
+			dp = malloc(sizeof (struct nfsdmap),
+				M_NFSDIROFF, M_WAITOK);
+			dp->ndm_eocookie = 0;
+			LIST_INSERT_HEAD(&np->n_cookies, dp, ndm_list);
+		} else
+			return (NULL);
+	}
+	while (pos >= NFSNUMCOOKIES) {
+		pos -= NFSNUMCOOKIES;
+		if (LIST_NEXT(dp, ndm_list) != NULL) {
+			if (!add && dp->ndm_eocookie < NFSNUMCOOKIES &&
+				pos >= dp->ndm_eocookie)
+				return (NULL);
+			dp = LIST_NEXT(dp, ndm_list);
+		} else if (add) {
+			dp2 = malloc(sizeof (struct nfsdmap),
+				M_NFSDIROFF, M_WAITOK);
+			dp2->ndm_eocookie = 0;
+			LIST_INSERT_AFTER(dp, dp2, ndm_list);
+			dp = dp2;
+		} else
+			return (NULL);
+	}
+	if (pos >= dp->ndm_eocookie) {
+		if (add)
+			dp->ndm_eocookie = pos + 1;
+		else
+			return (NULL);
+	}
+	return (&dp->ndm_cookies[pos]);
+}
+
+/*
+ * Gets a file handle out of an nfs reply sent to the client and returns
+ * the file handle and the file's attributes.
+ * For V4, it assumes that Getfh and Getattr Op's results are here.
+ */
+APPLESTATIC int
+nfscl_mtofh(struct nfsrv_descript *nd, struct nfsfh **nfhpp,
+    struct nfsvattr *nap, int *attrflagp)
+{
+	u_int32_t *tl;
+	int error = 0, flag = 1;
+
+	*nfhpp = NULL;
+	*attrflagp = 0;
+	/*
+	 * First get the file handle and vnode.
+	 */
+	if (nd->nd_flag & ND_NFSV3) {
+		NFSM_DISSECT(tl, u_int32_t *, NFSX_UNSIGNED);
+		flag = fxdr_unsigned(int, *tl);
+	} else if (nd->nd_flag & ND_NFSV4) {
+		NFSM_DISSECT(tl, u_int32_t *, 2 * NFSX_UNSIGNED);
+		/* If the GetFH failed, clear flag. */
+		if (*++tl != 0) {
+			nd->nd_flag |= ND_NOMOREDATA;
+			flag = 0;
+			error = ENXIO;	/* Return ENXIO so *nfhpp isn't used. */
+		}
+	}
+	if (flag) {
+		error = nfsm_getfh(nd, nfhpp);
+		if (error)
+			return (error);
+	}
+
+	/*
+	 * Now, get the attributes.
+	 */
+	if (flag != 0 && (nd->nd_flag & ND_NFSV4) != 0) {
+		NFSM_DISSECT(tl, u_int32_t *, 2 * NFSX_UNSIGNED);
+		if (*++tl != 0) {
+			nd->nd_flag |= ND_NOMOREDATA;
+			flag = 0;
+		}
+	} else if (nd->nd_flag & ND_NFSV3) {
+		NFSM_DISSECT(tl, u_int32_t *, NFSX_UNSIGNED);
+		if (flag) {
+			flag = fxdr_unsigned(int, *tl);
+		} else if (fxdr_unsigned(int, *tl)) {
+			error = nfsm_advance(nd, NFSX_V3FATTR, -1);
+			if (error)
+				return (error);
+		}
+	}
+	if (flag) {
+		error = nfsm_loadattr(nd, nap);
+		if (!error)
+			*attrflagp = 1;
+	}
+nfsmout:
+	return (error);
+}
+
+/*
+ * Initialize the owner/delegation sleep lock.
+ */
+APPLESTATIC void
+nfscl_lockinit(struct nfsv4lock *lckp)
+{
+
+	lckp->nfslock_usecnt = 0;
+	lckp->nfslock_lock = 0;
+}
+
+/*
+ * Get an exclusive lock. (Not needed for OpenBSD4, since there is only one
+ * thread for each posix process in the kernel.)
+ */
+APPLESTATIC void
+nfscl_lockexcl(struct nfsv4lock *lckp, void *mutex)
+{
+	int igotlock;
+
+	do {
+		igotlock = nfsv4_lock(lckp, 1, NULL, mutex, NULL);
+	} while (!igotlock);
+}
+
+/*
+ * Release an exclusive lock.
+ */
+APPLESTATIC void
+nfscl_lockunlock(struct nfsv4lock *lckp)
+{
+
+	nfsv4_unlock(lckp, 0);
+}
+
+/*
+ * Called to derefernce a lock on a stateid (delegation or open owner).
+ */
+APPLESTATIC void
+nfscl_lockderef(struct nfsv4lock *lckp)
+{
+
+	NFSLOCKCLSTATE();
+	lckp->nfslock_usecnt--;
+	if (lckp->nfslock_usecnt == 0 && (lckp->nfslock_lock & NFSV4LOCK_WANTED)) {
+		lckp->nfslock_lock &= ~NFSV4LOCK_WANTED;
+		wakeup((caddr_t)lckp);
+	}
+	NFSUNLOCKCLSTATE();
+}
+
diff --git a/freebsd/sys/fs/nfsclient/nfs_clkdtrace.c b/freebsd/sys/fs/nfsclient/nfs_clkdtrace.c
new file mode 100644
index 0000000..9e51c2f
--- /dev/null
+++ b/freebsd/sys/fs/nfsclient/nfs_clkdtrace.c
@@ -0,0 +1,587 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 2009 Robert N. M. Watson
+ * All rights reserved.
+ *
+ * This software was developed at the University of Cambridge Computer
+ * Laboratory with support from a grant from Google, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/conf.h>
+#include <sys/kernel.h>
+#include <sys/malloc.h>
+#include <sys/module.h>
+
+#include <sys/dtrace.h>
+#include <sys/dtrace_bsd.h>
+
+#include <fs/nfs/nfsproto.h>
+
+#include <fs/nfsclient/nfs_kdtrace.h>
+
+/*
+ * dtnfscl is a DTrace provider that tracks the intent to perform RPCs
+ * in the NFS client, as well as access to and maintenance of the access and
+ * attribute caches.  This is not quite the same as RPCs, because NFS may
+ * issue multiple RPC transactions in the event that authentication fails,
+ * there's a jukebox error, or none at all if the access or attribute cache
+ * hits.  However, it cleanly represents the logical layer between RPC
+ * transmission and vnode/vfs operations, providing access to state linking
+ * the two.
+ */
+
+static int	dtnfsclient_unload(void);
+static void	dtnfsclient_getargdesc(void *, dtrace_id_t, void *,
+		    dtrace_argdesc_t *);
+static void	dtnfsclient_provide(void *, dtrace_probedesc_t *);
+static void	dtnfsclient_destroy(void *, dtrace_id_t, void *);
+static void	dtnfsclient_enable(void *, dtrace_id_t, void *);
+static void	dtnfsclient_disable(void *, dtrace_id_t, void *);
+static void	dtnfsclient_load(void *);
+
+static dtrace_pattr_t dtnfsclient_attr = {
+{ DTRACE_STABILITY_STABLE, DTRACE_STABILITY_STABLE, DTRACE_CLASS_COMMON },
+{ DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN },
+{ DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN },
+{ DTRACE_STABILITY_STABLE, DTRACE_STABILITY_STABLE, DTRACE_CLASS_COMMON },
+{ DTRACE_STABILITY_STABLE, DTRACE_STABILITY_STABLE, DTRACE_CLASS_COMMON },
+};
+
+/*
+ * Description of NFSv4, NFSv3 and (optional) NFSv2 probes for a procedure.
+ */
+struct dtnfsclient_rpc {
+	char		*nr_v4_name;
+	char		*nr_v3_name;	/* Or NULL if none. */
+	char		*nr_v2_name;	/* Or NULL if none. */
+
+	/*
+	 * IDs for the start and done cases, for NFSv2, NFSv3 and NFSv4.
+	 */
+	uint32_t	 nr_v2_id_start, nr_v2_id_done;
+	uint32_t	 nr_v3_id_start, nr_v3_id_done;
+	uint32_t	 nr_v4_id_start, nr_v4_id_done;
+};
+
+/*
+ * This table is indexed by NFSv3 procedure number, but also used for NFSv2
+ * procedure names and NFSv4 operations.
+ */
+static struct dtnfsclient_rpc	dtnfsclient_rpcs[NFSV41_NPROCS + 1] = {
+	{ "null", "null", "null" },
+	{ "getattr", "getattr", "getattr" },
+	{ "setattr", "setattr", "setattr" },
+	{ "lookup", "lookup", "lookup" },
+	{ "access", "access", "noop" },
+	{ "readlink", "readlink", "readlink" },
+	{ "read", "read", "read" },
+	{ "write", "write", "write" },
+	{ "create", "create", "create" },
+	{ "mkdir", "mkdir", "mkdir" },
+	{ "symlink", "symlink", "symlink" },
+	{ "mknod", "mknod" },
+	{ "remove", "remove", "remove" },
+	{ "rmdir", "rmdir", "rmdir" },
+	{ "rename", "rename", "rename" },
+	{ "link", "link", "link" },
+	{ "readdir", "readdir", "readdir" },
+	{ "readdirplus", "readdirplus" },
+	{ "fsstat", "fsstat", "statfs" },
+	{ "fsinfo", "fsinfo" },
+	{ "pathconf", "pathconf" },
+	{ "commit", "commit" },
+	{ "lookupp" },
+	{ "setclientid" },
+	{ "setclientidcfrm" },
+	{ "lock" },
+	{ "locku" },
+	{ "open" },
+	{ "close" },
+	{ "openconfirm" },
+	{ "lockt" },
+	{ "opendowngrade" },
+	{ "renew" },
+	{ "putrootfh" },
+	{ "releaselckown" },
+	{ "delegreturn" },
+	{ "retdelegremove" },
+	{ "retdelegrename1" },
+	{ "retdelegrename2" },
+	{ "getacl" },
+	{ "setacl" },
+	{ "noop", "noop", "noop" }
+};
+
+/*
+ * Module name strings.
+ */
+static char	*dtnfsclient_accesscache_str = "accesscache";
+static char	*dtnfsclient_attrcache_str = "attrcache";
+static char	*dtnfsclient_nfs2_str = "nfs2";
+static char	*dtnfsclient_nfs3_str = "nfs3";
+static char	*dtnfsclient_nfs4_str = "nfs4";
+
+/*
+ * Function name strings.
+ */
+static char	*dtnfsclient_flush_str = "flush";
+static char	*dtnfsclient_load_str = "load";
+static char	*dtnfsclient_get_str = "get";
+
+/*
+ * Name strings.
+ */
+static char	*dtnfsclient_done_str = "done";
+static char	*dtnfsclient_hit_str = "hit";
+static char	*dtnfsclient_miss_str = "miss";
+static char	*dtnfsclient_start_str = "start";
+
+static dtrace_pops_t dtnfsclient_pops = {
+	.dtps_provide =		dtnfsclient_provide,
+	.dtps_provide_module =	NULL,
+	.dtps_enable =		dtnfsclient_enable,
+	.dtps_disable =		dtnfsclient_disable,
+	.dtps_suspend =		NULL,
+	.dtps_resume =		NULL,
+	.dtps_getargdesc =	dtnfsclient_getargdesc,
+	.dtps_getargval =	NULL,
+	.dtps_usermode =	NULL,
+	.dtps_destroy =		dtnfsclient_destroy
+};
+
+static dtrace_provider_id_t	dtnfsclient_id;
+
+/*
+ * When tracing on a procedure is enabled, the DTrace ID for an RPC event is
+ * stored in one of these two NFS client-allocated arrays; 0 indicates that
+ * the event is not being traced so probes should not be called.
+ *
+ * For simplicity, we allocate both v2, v3 and v4 arrays as NFSV41_NPROCS + 1,
+ * and the v2, v3 arrays are simply sparse.
+ */
+extern uint32_t			nfscl_nfs2_start_probes[NFSV41_NPROCS + 1];
+extern uint32_t			nfscl_nfs2_done_probes[NFSV41_NPROCS + 1];
+
+extern uint32_t			nfscl_nfs3_start_probes[NFSV41_NPROCS + 1];
+extern uint32_t			nfscl_nfs3_done_probes[NFSV41_NPROCS + 1];
+
+extern uint32_t			nfscl_nfs4_start_probes[NFSV41_NPROCS + 1];
+extern uint32_t			nfscl_nfs4_done_probes[NFSV41_NPROCS + 1];
+
+/*
+ * Look up a DTrace probe ID to see if it's associated with a "done" event --
+ * if so, we will return a fourth argument type of "int".
+ */
+static int
+dtnfs234_isdoneprobe(dtrace_id_t id)
+{
+	int i;
+
+	for (i = 0; i < NFSV41_NPROCS + 1; i++) {
+		if (dtnfsclient_rpcs[i].nr_v4_id_done == id ||
+		    dtnfsclient_rpcs[i].nr_v3_id_done == id ||
+		    dtnfsclient_rpcs[i].nr_v2_id_done == id)
+			return (1);
+	}
+	return (0);
+}
+
+static void
+dtnfsclient_getargdesc(void *arg, dtrace_id_t id, void *parg,
+    dtrace_argdesc_t *desc)
+{
+	const char *p = NULL;
+
+	if (id == nfscl_accesscache_flush_done_id ||
+	    id == nfscl_attrcache_flush_done_id ||
+	    id == nfscl_attrcache_get_miss_id) {
+		switch (desc->dtargd_ndx) {
+		case 0:
+			p = "struct vnode *";
+			break;
+		default:
+			desc->dtargd_ndx = DTRACE_ARGNONE;
+			break;
+		}
+	} else if (id == nfscl_accesscache_get_hit_id ||
+	    id == nfscl_accesscache_get_miss_id) {
+		switch (desc->dtargd_ndx) {
+		case 0:
+			p = "struct vnode *";
+			break;
+		case 1:
+			p = "uid_t";
+			break;
+		case 2:
+			p = "uint32_t";
+			break;
+		default:
+			desc->dtargd_ndx = DTRACE_ARGNONE;
+			break;
+		}
+	} else if (id == nfscl_accesscache_load_done_id) {
+		switch (desc->dtargd_ndx) {
+		case 0:
+			p = "struct vnode *";
+			break;
+		case 1:
+			p = "uid_t";
+			break;
+		case 2:
+			p = "uint32_t";
+			break;
+		case 3:
+			p = "int";
+			break;
+		default:
+			desc->dtargd_ndx = DTRACE_ARGNONE;
+			break;
+		}
+	} else if (id == nfscl_attrcache_get_hit_id) {
+		switch (desc->dtargd_ndx) {
+		case 0:
+			p = "struct vnode *";
+			break;
+		case 1:
+			p = "struct vattr *";
+			break;
+		default:
+			desc->dtargd_ndx = DTRACE_ARGNONE;
+			break;
+		}
+	} else if (id == nfscl_attrcache_load_done_id) {
+		switch (desc->dtargd_ndx) {
+		case 0:
+			p = "struct vnode *";
+			break;
+		case 1:
+			p = "struct vattr *";
+			break;
+		case 2:
+			p = "int";
+			break;
+		default:
+			desc->dtargd_ndx = DTRACE_ARGNONE;
+			break;
+		}
+	} else {
+		switch (desc->dtargd_ndx) {
+		case 0:
+			p = "struct vnode *";
+			break;
+		case 1:
+			p = "struct mbuf *";
+			break;
+		case 2:
+			p = "struct ucred *";
+			break;
+		case 3:
+			p = "int";
+			break;
+		case 4:
+			if (dtnfs234_isdoneprobe(id)) {
+				p = "int";
+				break;
+			}
+			/* FALLSTHROUGH */
+		default:
+			desc->dtargd_ndx = DTRACE_ARGNONE;
+			break;
+		}
+	}
+	if (p != NULL)
+		strlcpy(desc->dtargd_native, p, sizeof(desc->dtargd_native));
+}
+
+static void
+dtnfsclient_provide(void *arg, dtrace_probedesc_t *desc)
+{
+	int i;
+
+	if (desc != NULL)
+		return;
+
+	/*
+	 * Register access cache probes.
+	 */
+	if (dtrace_probe_lookup(dtnfsclient_id, dtnfsclient_accesscache_str,
+	    dtnfsclient_flush_str, dtnfsclient_done_str) == 0) {
+		nfscl_accesscache_flush_done_id = dtrace_probe_create(
+		    dtnfsclient_id, dtnfsclient_accesscache_str,
+		    dtnfsclient_flush_str, dtnfsclient_done_str, 0, NULL);
+	}
+	if (dtrace_probe_lookup(dtnfsclient_id, dtnfsclient_accesscache_str,
+	    dtnfsclient_get_str, dtnfsclient_hit_str) == 0) {
+		nfscl_accesscache_get_hit_id = dtrace_probe_create(
+		    dtnfsclient_id, dtnfsclient_accesscache_str,
+		    dtnfsclient_get_str, dtnfsclient_hit_str, 0, NULL);
+	}
+	if (dtrace_probe_lookup(dtnfsclient_id, dtnfsclient_accesscache_str,
+	    dtnfsclient_get_str, dtnfsclient_miss_str) == 0) {
+		nfscl_accesscache_get_miss_id = dtrace_probe_create(
+		    dtnfsclient_id, dtnfsclient_accesscache_str,
+		    dtnfsclient_get_str, dtnfsclient_miss_str, 0, NULL);
+	}
+	if (dtrace_probe_lookup(dtnfsclient_id, dtnfsclient_accesscache_str,
+	    dtnfsclient_load_str, dtnfsclient_done_str) == 0) {
+		nfscl_accesscache_load_done_id = dtrace_probe_create(
+		    dtnfsclient_id, dtnfsclient_accesscache_str,
+		    dtnfsclient_load_str, dtnfsclient_done_str, 0, NULL);
+	}
+
+	/*
+	 * Register attribute cache probes.
+	 */
+	if (dtrace_probe_lookup(dtnfsclient_id, dtnfsclient_attrcache_str,
+	    dtnfsclient_flush_str, dtnfsclient_done_str) == 0) {
+		nfscl_attrcache_flush_done_id = dtrace_probe_create(
+		    dtnfsclient_id, dtnfsclient_attrcache_str,
+		    dtnfsclient_flush_str, dtnfsclient_done_str, 0, NULL);
+	}
+	if (dtrace_probe_lookup(dtnfsclient_id, dtnfsclient_attrcache_str,
+	    dtnfsclient_get_str, dtnfsclient_hit_str) == 0) {
+		nfscl_attrcache_get_hit_id = dtrace_probe_create(
+		    dtnfsclient_id, dtnfsclient_attrcache_str,
+		    dtnfsclient_get_str, dtnfsclient_hit_str, 0, NULL);
+	}
+	if (dtrace_probe_lookup(dtnfsclient_id, dtnfsclient_attrcache_str,
+	    dtnfsclient_get_str, dtnfsclient_miss_str) == 0) {
+		nfscl_attrcache_get_miss_id = dtrace_probe_create(
+		    dtnfsclient_id, dtnfsclient_attrcache_str,
+		    dtnfsclient_get_str, dtnfsclient_miss_str, 0, NULL);
+	}
+	if (dtrace_probe_lookup(dtnfsclient_id, dtnfsclient_attrcache_str,
+	    dtnfsclient_load_str, dtnfsclient_done_str) == 0) {
+		nfscl_attrcache_load_done_id = dtrace_probe_create(
+		    dtnfsclient_id, dtnfsclient_attrcache_str,
+		    dtnfsclient_load_str, dtnfsclient_done_str, 0, NULL);
+	}
+
+	/*
+	 * Register NFSv2 RPC procedures; note sparseness check for each slot
+	 * in the NFSv3, NFSv4 procnum-indexed array.
+	 */
+	for (i = 0; i < NFSV41_NPROCS + 1; i++) {
+		if (dtnfsclient_rpcs[i].nr_v2_name != NULL &&
+		    dtrace_probe_lookup(dtnfsclient_id, dtnfsclient_nfs2_str,
+		    dtnfsclient_rpcs[i].nr_v2_name, dtnfsclient_start_str) ==
+		    0) {
+			dtnfsclient_rpcs[i].nr_v2_id_start =
+			    dtrace_probe_create(dtnfsclient_id,
+			    dtnfsclient_nfs2_str,
+			    dtnfsclient_rpcs[i].nr_v2_name,
+			    dtnfsclient_start_str, 0,
+			    &nfscl_nfs2_start_probes[i]);
+		}
+		if (dtnfsclient_rpcs[i].nr_v2_name != NULL &&
+		    dtrace_probe_lookup(dtnfsclient_id, dtnfsclient_nfs2_str,
+		    dtnfsclient_rpcs[i].nr_v2_name, dtnfsclient_done_str) ==
+		    0) {
+			dtnfsclient_rpcs[i].nr_v2_id_done = 
+			    dtrace_probe_create(dtnfsclient_id,
+			    dtnfsclient_nfs2_str,
+			    dtnfsclient_rpcs[i].nr_v2_name,
+			    dtnfsclient_done_str, 0,
+			    &nfscl_nfs2_done_probes[i]);
+		}
+	}
+
+	/*
+	 * Register NFSv3 RPC procedures; note sparseness check for each slot
+	 * in the NFSv4 procnum-indexed array.
+	 */
+	for (i = 0; i < NFSV41_NPROCS + 1; i++) {
+		if (dtnfsclient_rpcs[i].nr_v3_name != NULL &&
+		    dtrace_probe_lookup(dtnfsclient_id, dtnfsclient_nfs3_str,
+		    dtnfsclient_rpcs[i].nr_v3_name, dtnfsclient_start_str) ==
+		    0) {
+			dtnfsclient_rpcs[i].nr_v3_id_start =
+			    dtrace_probe_create(dtnfsclient_id,
+			    dtnfsclient_nfs3_str,
+			    dtnfsclient_rpcs[i].nr_v3_name,
+			    dtnfsclient_start_str, 0,
+			    &nfscl_nfs3_start_probes[i]);
+		}
+		if (dtnfsclient_rpcs[i].nr_v3_name != NULL &&
+		    dtrace_probe_lookup(dtnfsclient_id, dtnfsclient_nfs3_str,
+		    dtnfsclient_rpcs[i].nr_v3_name, dtnfsclient_done_str) ==
+		    0) {
+			dtnfsclient_rpcs[i].nr_v3_id_done = 
+			    dtrace_probe_create(dtnfsclient_id,
+			    dtnfsclient_nfs3_str,
+			    dtnfsclient_rpcs[i].nr_v3_name,
+			    dtnfsclient_done_str, 0,
+			    &nfscl_nfs3_done_probes[i]);
+		}
+	}
+
+	/*
+	 * Register NFSv4 RPC procedures.
+	 */
+	for (i = 0; i < NFSV41_NPROCS + 1; i++) {
+		if (dtrace_probe_lookup(dtnfsclient_id, dtnfsclient_nfs4_str,
+		    dtnfsclient_rpcs[i].nr_v4_name, dtnfsclient_start_str) ==
+		    0) {
+			dtnfsclient_rpcs[i].nr_v4_id_start =
+			    dtrace_probe_create(dtnfsclient_id,
+			    dtnfsclient_nfs4_str,
+			    dtnfsclient_rpcs[i].nr_v4_name,
+			    dtnfsclient_start_str, 0,
+			    &nfscl_nfs4_start_probes[i]);
+		}
+		if (dtrace_probe_lookup(dtnfsclient_id, dtnfsclient_nfs4_str,
+		    dtnfsclient_rpcs[i].nr_v4_name, dtnfsclient_done_str) ==
+		    0) {
+			dtnfsclient_rpcs[i].nr_v4_id_done = 
+			    dtrace_probe_create(dtnfsclient_id,
+			    dtnfsclient_nfs4_str,
+			    dtnfsclient_rpcs[i].nr_v4_name,
+			    dtnfsclient_done_str, 0,
+			    &nfscl_nfs4_done_probes[i]);
+		}
+	}
+}
+
+static void
+dtnfsclient_destroy(void *arg, dtrace_id_t id, void *parg)
+{
+}
+
+static void
+dtnfsclient_enable(void *arg, dtrace_id_t id, void *parg)
+{
+	uint32_t *p = parg;
+	void *f = dtrace_probe;
+
+	if (id == nfscl_accesscache_flush_done_id)
+		dtrace_nfscl_accesscache_flush_done_probe = f;
+	else if (id == nfscl_accesscache_get_hit_id)
+		dtrace_nfscl_accesscache_get_hit_probe = f;
+	else if (id == nfscl_accesscache_get_miss_id)
+		dtrace_nfscl_accesscache_get_miss_probe = f;
+	else if (id == nfscl_accesscache_load_done_id)
+		dtrace_nfscl_accesscache_load_done_probe = f;
+	else if (id == nfscl_attrcache_flush_done_id)
+		dtrace_nfscl_attrcache_flush_done_probe = f;
+	else if (id == nfscl_attrcache_get_hit_id)
+		dtrace_nfscl_attrcache_get_hit_probe = f;
+	else if (id == nfscl_attrcache_get_miss_id)
+		dtrace_nfscl_attrcache_get_miss_probe = f;
+	else if (id == nfscl_attrcache_load_done_id)
+		dtrace_nfscl_attrcache_load_done_probe = f;
+	else
+		*p = id;
+}
+
+static void
+dtnfsclient_disable(void *arg, dtrace_id_t id, void *parg)
+{
+	uint32_t *p = parg;
+
+	if (id == nfscl_accesscache_flush_done_id)
+		dtrace_nfscl_accesscache_flush_done_probe = NULL;
+	else if (id == nfscl_accesscache_get_hit_id)
+		dtrace_nfscl_accesscache_get_hit_probe = NULL;
+	else if (id == nfscl_accesscache_get_miss_id)
+		dtrace_nfscl_accesscache_get_miss_probe = NULL;
+	else if (id == nfscl_accesscache_load_done_id)
+		dtrace_nfscl_accesscache_load_done_probe = NULL;
+	else if (id == nfscl_attrcache_flush_done_id)
+		dtrace_nfscl_attrcache_flush_done_probe = NULL;
+	else if (id == nfscl_attrcache_get_hit_id)
+		dtrace_nfscl_attrcache_get_hit_probe = NULL;
+	else if (id == nfscl_attrcache_get_miss_id)
+		dtrace_nfscl_attrcache_get_miss_probe = NULL;
+	else if (id == nfscl_attrcache_load_done_id)
+		dtrace_nfscl_attrcache_load_done_probe = NULL;
+	else
+		*p = 0;
+}
+
+static void
+dtnfsclient_load(void *dummy)
+{
+
+	if (dtrace_register("nfscl", &dtnfsclient_attr,
+	    DTRACE_PRIV_USER, NULL, &dtnfsclient_pops, NULL,
+	    &dtnfsclient_id) != 0)
+		return;
+
+	dtrace_nfscl_nfs234_start_probe =
+	    (dtrace_nfsclient_nfs23_start_probe_func_t)dtrace_probe;
+	dtrace_nfscl_nfs234_done_probe =
+	    (dtrace_nfsclient_nfs23_done_probe_func_t)dtrace_probe;
+}
+
+
+static int
+dtnfsclient_unload()
+{
+
+	dtrace_nfscl_nfs234_start_probe = NULL;
+	dtrace_nfscl_nfs234_done_probe = NULL;
+
+	return (dtrace_unregister(dtnfsclient_id));
+}
+
+static int
+dtnfsclient_modevent(module_t mod __unused, int type, void *data __unused)
+{
+	int error = 0;
+
+	switch (type) {
+	case MOD_LOAD:
+		break;
+
+	case MOD_UNLOAD:
+		break;
+
+	case MOD_SHUTDOWN:
+		break;
+
+	default:
+		error = EOPNOTSUPP;
+		break;
+	}
+
+	return (error);
+}
+
+SYSINIT(dtnfsclient_load, SI_SUB_DTRACE_PROVIDER, SI_ORDER_ANY,
+    dtnfsclient_load, NULL);
+SYSUNINIT(dtnfsclient_unload, SI_SUB_DTRACE_PROVIDER, SI_ORDER_ANY,
+    dtnfsclient_unload, NULL);
+
+DEV_MODULE(dtnfscl, dtnfsclient_modevent, NULL);
+MODULE_VERSION(dtnfscl, 1);
+MODULE_DEPEND(dtnfscl, dtrace, 1, 1, 1);
+MODULE_DEPEND(dtnfscl, opensolaris, 1, 1, 1);
+MODULE_DEPEND(dtnfscl, nfscl, 1, 1, 1);
+MODULE_DEPEND(dtnfscl, nfscommon, 1, 1, 1);
diff --git a/freebsd/sys/fs/nfsclient/nfs_clkrpc.c b/freebsd/sys/fs/nfsclient/nfs_clkrpc.c
new file mode 100644
index 0000000..6bdeaa7
--- /dev/null
+++ b/freebsd/sys/fs/nfsclient/nfs_clkrpc.c
@@ -0,0 +1,299 @@
+/*-
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 1989, 1993
+ *	The Regents of the University of California.  All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Rick Macklem at The University of Guelph.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include "opt_kgssapi.h"
+
+#include <fs/nfs/nfsport.h>
+
+#include <rpc/rpc.h>
+#include <rpc/rpcsec_gss.h>
+#include <rpc/replay.h>
+
+
+NFSDLOCKMUTEX;
+
+extern SVCPOOL	*nfscbd_pool;
+
+static int nfs_cbproc(struct nfsrv_descript *, u_int32_t);
+
+extern u_long sb_max_adj;
+extern int nfs_numnfscbd;
+extern int nfscl_debuglevel;
+
+/*
+ * NFS client system calls for handling callbacks.
+ */
+
+/*
+ * Handles server to client callbacks.
+ */
+static void
+nfscb_program(struct svc_req *rqst, SVCXPRT *xprt)
+{
+	struct nfsrv_descript nd;
+	int cacherep, credflavor;
+
+	memset(&nd, 0, sizeof(nd));
+	if (rqst->rq_proc != NFSPROC_NULL &&
+	    rqst->rq_proc != NFSV4PROC_CBCOMPOUND) {
+		svcerr_noproc(rqst);
+		svc_freereq(rqst);
+		return;
+	}
+	nd.nd_procnum = rqst->rq_proc;
+	nd.nd_flag = (ND_NFSCB | ND_NFSV4);
+
+	/*
+	 * Note: we want rq_addr, not svc_getrpccaller for nd_nam2 -
+	 * NFS_SRVMAXDATA uses a NULL value for nd_nam2 to detect TCP
+	 * mounts.
+	 */
+	nd.nd_mrep = rqst->rq_args;
+	rqst->rq_args = NULL;
+	newnfs_realign(&nd.nd_mrep, M_WAITOK);
+	nd.nd_md = nd.nd_mrep;
+	nd.nd_dpos = mtod(nd.nd_md, caddr_t);
+	nd.nd_nam = svc_getrpccaller(rqst);
+	nd.nd_nam2 = rqst->rq_addr;
+	nd.nd_mreq = NULL;
+	nd.nd_cred = NULL;
+
+	NFSCL_DEBUG(1, "cbproc=%d\n",nd.nd_procnum);
+	if (nd.nd_procnum != NFSPROC_NULL) {
+		if (!svc_getcred(rqst, &nd.nd_cred, &credflavor)) {
+			svcerr_weakauth(rqst);
+			svc_freereq(rqst);
+			m_freem(nd.nd_mrep);
+			return;
+		}
+
+		/* For now, I don't care what credential flavor was used. */
+#ifdef notyet
+#ifdef MAC
+		mac_cred_associate_nfsd(nd.nd_cred);
+#endif
+#endif
+		cacherep = nfs_cbproc(&nd, rqst->rq_xid);
+	} else {
+		NFSMGET(nd.nd_mreq);
+		nd.nd_mreq->m_len = 0;
+		cacherep = RC_REPLY;
+	}
+	if (nd.nd_mrep != NULL)
+		m_freem(nd.nd_mrep);
+
+	if (nd.nd_cred != NULL)
+		crfree(nd.nd_cred);
+
+	if (cacherep == RC_DROPIT) {
+		if (nd.nd_mreq != NULL)
+			m_freem(nd.nd_mreq);
+		svc_freereq(rqst);
+		return;
+	}
+
+	if (nd.nd_mreq == NULL) {
+		svcerr_decode(rqst);
+		svc_freereq(rqst);
+		return;
+	}
+
+	if (nd.nd_repstat & NFSERR_AUTHERR) {
+		svcerr_auth(rqst, nd.nd_repstat & ~NFSERR_AUTHERR);
+		if (nd.nd_mreq != NULL)
+			m_freem(nd.nd_mreq);
+	} else if (!svc_sendreply_mbuf(rqst, nd.nd_mreq))
+		svcerr_systemerr(rqst);
+	else
+		NFSCL_DEBUG(1, "cbrep sent\n");
+	svc_freereq(rqst);
+}
+
+/*
+ * Check the cache and, optionally, do the RPC.
+ * Return the appropriate cache response.
+ */
+static int
+nfs_cbproc(struct nfsrv_descript *nd, u_int32_t xid)
+{
+	struct thread *td = curthread;
+	int cacherep;
+
+	if (nd->nd_nam2 == NULL)
+		nd->nd_flag |= ND_STREAMSOCK;
+
+	nfscl_docb(nd, td);
+	if (nd->nd_repstat == NFSERR_DONTREPLY)
+		cacherep = RC_DROPIT;
+	else
+		cacherep = RC_REPLY;
+	return (cacherep);
+}
+
+/*
+ * Adds a socket to the list for servicing by nfscbds.
+ */
+int
+nfscbd_addsock(struct file *fp)
+{
+	int siz;
+	struct socket *so;
+	int error;
+	SVCXPRT *xprt;
+
+	so = fp->f_data;
+
+	siz = sb_max_adj;
+	error = soreserve(so, siz, siz);
+	if (error)
+		return (error);
+
+	/*
+	 * Steal the socket from userland so that it doesn't close
+	 * unexpectedly.
+	 */
+	if (so->so_type == SOCK_DGRAM)
+		xprt = svc_dg_create(nfscbd_pool, so, 0, 0);
+	else
+		xprt = svc_vc_create(nfscbd_pool, so, 0, 0);
+	if (xprt) {
+		fp->f_ops = &badfileops;
+		fp->f_data = NULL;
+		svc_reg(xprt, NFS_CALLBCKPROG, NFSV4_CBVERS, nfscb_program,
+		    NULL);
+		SVC_RELEASE(xprt);
+	}
+
+	return (0);
+}
+
+/*
+ * Called by nfssvc() for nfscbds. Just loops around servicing rpc requests
+ * until it is killed by a signal.
+ *
+ * For now, only support callbacks via RPCSEC_GSS if there is a KerberosV
+ * keytab entry with a host based entry in it on the client. (I'm not even
+ * sure that getting Acceptor credentials for a user principal with a
+ * credentials cache is possible, but even if it is, major changes to the
+ * kgssapi would be required.)
+ * I don't believe that this is a serious limitation since, as of 2009, most
+ * NFSv4 servers supporting callbacks are using AUTH_SYS for callbacks even
+ * when the client is using RPCSEC_GSS. (This BSD server uses AUTH_SYS
+ * for callbacks unless nfsrv_gsscallbackson is set non-zero.)
+ */
+int
+nfscbd_nfsd(struct thread *td, struct nfsd_nfscbd_args *args)
+{
+	char principal[128];
+	int error;
+
+	if (args != NULL) {
+		error = copyinstr(args->principal, principal,
+		    sizeof(principal), NULL);
+		if (error)
+			return (error);
+	} else {
+		principal[0] = '\0';
+	}
+
+	/*
+	 * Only the first nfsd actually does any work. The RPC code
+	 * adds threads to it as needed. Any extra processes offered
+	 * by nfsd just exit. If nfsd is new enough, it will call us
+	 * once with a structure that specifies how many threads to
+	 * use.
+	 */
+	NFSD_LOCK();
+	if (nfs_numnfscbd == 0) {
+		nfs_numnfscbd++;
+
+		NFSD_UNLOCK();
+
+		if (principal[0] != '\0')
+			rpc_gss_set_svc_name_call(principal, "kerberosv5",
+			    GSS_C_INDEFINITE, NFS_CALLBCKPROG, NFSV4_CBVERS);
+
+		nfscbd_pool->sp_minthreads = 4;
+		nfscbd_pool->sp_maxthreads = 4;
+			
+		svc_run(nfscbd_pool);
+
+		rpc_gss_clear_svc_name_call(NFS_CALLBCKPROG, NFSV4_CBVERS);
+
+		NFSD_LOCK();
+		nfs_numnfscbd--;
+		nfsrvd_cbinit(1);
+	}
+	NFSD_UNLOCK();
+
+	return (0);
+}
+
+/*
+ * Initialize the data structures for the server.
+ * Handshake with any new nfsds starting up to avoid any chance of
+ * corruption.
+ */
+void
+nfsrvd_cbinit(int terminating)
+{
+
+	NFSD_LOCK_ASSERT();
+
+	if (terminating) {
+		/* Wait for any xprt registrations to complete. */
+		while (nfs_numnfscbd > 0)
+			msleep(&nfs_numnfscbd, NFSDLOCKMUTEXPTR, PZERO, 
+			    "nfscbdt", 0);
+		if (nfscbd_pool != NULL) {
+			NFSD_UNLOCK();
+			svcpool_close(nfscbd_pool);
+			NFSD_LOCK();
+		}
+	}
+
+	if (nfscbd_pool == NULL) {
+		NFSD_UNLOCK();
+		nfscbd_pool = svcpool_create("nfscbd", NULL);
+		nfscbd_pool->sp_rcache = NULL;
+		nfscbd_pool->sp_assign = NULL;
+		nfscbd_pool->sp_done = NULL;
+		NFSD_LOCK();
+	}
+}
+
diff --git a/freebsd/sys/fs/nfsclient/nfs_clnfsiod.c b/freebsd/sys/fs/nfsclient/nfs_clnfsiod.c
new file mode 100644
index 0000000..3f0fb77
--- /dev/null
+++ b/freebsd/sys/fs/nfsclient/nfs_clnfsiod.c
@@ -0,0 +1,343 @@
+/*-
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 1989, 1993
+ *	The Regents of the University of California.  All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Rick Macklem at The University of Guelph.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ *	from nfs_syscalls.c	8.5 (Berkeley) 3/30/95
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/sysproto.h>
+#include <sys/kernel.h>
+#include <sys/sysctl.h>
+#include <sys/file.h>
+#include <sys/vnode.h>
+#include <sys/malloc.h>
+#include <sys/mount.h>
+#include <sys/proc.h>
+#include <sys/bio.h>
+#include <sys/buf.h>
+#include <sys/mbuf.h>
+#include <sys/socket.h>
+#include <sys/socketvar.h>
+#include <sys/domain.h>
+#include <sys/protosw.h>
+#include <sys/namei.h>
+#include <sys/unistd.h>
+#include <sys/kthread.h>
+#include <sys/fcntl.h>
+#include <sys/lockf.h>
+#include <sys/mutex.h>
+#include <sys/taskqueue.h>
+
+#include <netinet/in.h>
+#include <netinet/tcp.h>
+
+#include <fs/nfs/nfsport.h>
+#include <fs/nfsclient/nfsmount.h>
+#include <fs/nfsclient/nfs.h>
+#include <fs/nfsclient/nfsnode.h>
+
+extern struct mtx	ncl_iod_mutex;
+extern struct task	ncl_nfsiodnew_task;
+
+int ncl_numasync;
+enum nfsiod_state ncl_iodwant[NFS_MAXASYNCDAEMON];
+struct nfsmount *ncl_iodmount[NFS_MAXASYNCDAEMON];
+
+static void	nfssvc_iod(void *);
+
+static int nfs_asyncdaemon[NFS_MAXASYNCDAEMON];
+
+SYSCTL_DECL(_vfs_nfs);
+
+/* Maximum number of seconds a nfsiod kthread will sleep before exiting */
+static unsigned int nfs_iodmaxidle = 120;
+SYSCTL_UINT(_vfs_nfs, OID_AUTO, iodmaxidle, CTLFLAG_RW, &nfs_iodmaxidle, 0,
+    "Max number of seconds an nfsiod kthread will sleep before exiting");
+
+/* Maximum number of nfsiod kthreads */
+unsigned int ncl_iodmax = 20;
+
+/* Minimum number of nfsiod kthreads to keep as spares */
+static unsigned int nfs_iodmin = 0;
+
+static int nfs_nfsiodnew_sync(void);
+
+static int
+sysctl_iodmin(SYSCTL_HANDLER_ARGS)
+{
+	int error, i;
+	int newmin;
+
+	newmin = nfs_iodmin;
+	error = sysctl_handle_int(oidp, &newmin, 0, req);
+	if (error || (req->newptr == NULL))
+		return (error);
+	NFSLOCKIOD();
+	if (newmin > ncl_iodmax) {
+		error = EINVAL;
+		goto out;
+	}
+	nfs_iodmin = newmin;
+	if (ncl_numasync >= nfs_iodmin)
+		goto out;
+	/*
+	 * If the current number of nfsiod is lower
+	 * than the new minimum, create some more.
+	 */
+	for (i = nfs_iodmin - ncl_numasync; i > 0; i--)
+		nfs_nfsiodnew_sync();
+out:
+	NFSUNLOCKIOD();	
+	return (0);
+}
+SYSCTL_PROC(_vfs_nfs, OID_AUTO, iodmin, CTLTYPE_UINT | CTLFLAG_RW, 0,
+    sizeof (nfs_iodmin), sysctl_iodmin, "IU",
+    "Min number of nfsiod kthreads to keep as spares");
+
+static int
+sysctl_iodmax(SYSCTL_HANDLER_ARGS)
+{
+	int error, i;
+	int iod, newmax;
+
+	newmax = ncl_iodmax;
+	error = sysctl_handle_int(oidp, &newmax, 0, req);
+	if (error || (req->newptr == NULL))
+		return (error);
+	if (newmax > NFS_MAXASYNCDAEMON)
+		return (EINVAL);
+	NFSLOCKIOD();
+	ncl_iodmax = newmax;
+	if (ncl_numasync <= ncl_iodmax)
+		goto out;
+	/*
+	 * If there are some asleep nfsiods that should
+	 * exit, wakeup() them so that they check ncl_iodmax
+	 * and exit.  Those who are active will exit as
+	 * soon as they finish I/O.
+	 */
+	iod = ncl_numasync - 1;
+	for (i = 0; i < ncl_numasync - ncl_iodmax; i++) {
+		if (ncl_iodwant[iod] == NFSIOD_AVAILABLE)
+			wakeup(&ncl_iodwant[iod]);
+		iod--;
+	}
+out:
+	NFSUNLOCKIOD();
+	return (0);
+}
+SYSCTL_PROC(_vfs_nfs, OID_AUTO, iodmax, CTLTYPE_UINT | CTLFLAG_RW, 0,
+    sizeof (ncl_iodmax), sysctl_iodmax, "IU",
+    "Max number of nfsiod kthreads");
+
+static int
+nfs_nfsiodnew_sync(void)
+{
+	int error, i;
+
+	NFSASSERTIOD();
+	for (i = 0; i < ncl_iodmax; i++) {
+		if (nfs_asyncdaemon[i] == 0) {
+			nfs_asyncdaemon[i] = 1;
+			break;
+		}
+	}
+	if (i == ncl_iodmax)
+		return (0);
+	NFSUNLOCKIOD();
+	error = kproc_create(nfssvc_iod, nfs_asyncdaemon + i, NULL,
+	    RFHIGHPID, 0, "newnfs %d", i);
+	NFSLOCKIOD();
+	if (error == 0) {
+		ncl_numasync++;
+		ncl_iodwant[i] = NFSIOD_AVAILABLE;
+	} else
+		nfs_asyncdaemon[i] = 0;
+	return (error);
+}
+
+void
+ncl_nfsiodnew_tq(__unused void *arg, int pending)
+{
+
+	NFSLOCKIOD();
+	while (pending > 0) {
+		pending--;
+		nfs_nfsiodnew_sync();
+	}
+	NFSUNLOCKIOD();
+}
+
+void
+ncl_nfsiodnew(void)
+{
+
+	NFSASSERTIOD();
+	taskqueue_enqueue(taskqueue_thread, &ncl_nfsiodnew_task);
+}
+
+static void
+nfsiod_setup(void *dummy)
+{
+	int error;
+
+	TUNABLE_INT_FETCH("vfs.nfs.iodmin", &nfs_iodmin);
+	nfscl_init();
+	NFSLOCKIOD();
+	/* Silently limit the start number of nfsiod's */
+	if (nfs_iodmin > NFS_MAXASYNCDAEMON)
+		nfs_iodmin = NFS_MAXASYNCDAEMON;
+
+	while (ncl_numasync < nfs_iodmin) {
+		error = nfs_nfsiodnew_sync();
+		if (error == -1)
+			panic("nfsiod_setup: nfs_nfsiodnew failed");
+	}
+	NFSUNLOCKIOD();
+}
+SYSINIT(newnfsiod, SI_SUB_KTHREAD_IDLE, SI_ORDER_ANY, nfsiod_setup, NULL);
+
+static int nfs_defect = 0;
+SYSCTL_INT(_vfs_nfs, OID_AUTO, defect, CTLFLAG_RW, &nfs_defect, 0,
+    "Allow nfsiods to migrate serving different mounts");
+
+/*
+ * Asynchronous I/O daemons for client nfs.
+ * They do read-ahead and write-behind operations on the block I/O cache.
+ * Returns if we hit the timeout defined by the iodmaxidle sysctl.
+ */
+static void
+nfssvc_iod(void *instance)
+{
+	struct buf *bp;
+	struct nfsmount *nmp;
+	int myiod, timo;
+	int error = 0;
+
+	NFSLOCKIOD();
+	myiod = (int *)instance - nfs_asyncdaemon;
+	/*
+	 * Main loop
+	 */
+	for (;;) {
+	    while (((nmp = ncl_iodmount[myiod]) == NULL)
+		   || !TAILQ_FIRST(&nmp->nm_bufq)) {
+		if (myiod >= ncl_iodmax)
+			goto finish;
+		if (nmp)
+			nmp->nm_bufqiods--;
+		if (ncl_iodwant[myiod] == NFSIOD_NOT_AVAILABLE)
+			ncl_iodwant[myiod] = NFSIOD_AVAILABLE;
+		ncl_iodmount[myiod] = NULL;
+		/*
+		 * Always keep at least nfs_iodmin kthreads.
+		 */
+		timo = (myiod < nfs_iodmin) ? 0 : nfs_iodmaxidle * hz;
+		error = msleep(&ncl_iodwant[myiod], &ncl_iod_mutex, PWAIT | PCATCH,
+		    "-", timo);
+		if (error) {
+			nmp = ncl_iodmount[myiod];
+			/*
+			 * Rechecking the nm_bufq closes a rare race where the 
+			 * nfsiod is woken up at the exact time the idle timeout
+			 * fires
+			 */
+			if (nmp && TAILQ_FIRST(&nmp->nm_bufq))
+				error = 0;
+			break;
+		}
+	    }
+	    if (error)
+		    break;
+	    while ((bp = TAILQ_FIRST(&nmp->nm_bufq)) != NULL) {
+		/* Take one off the front of the list */
+		TAILQ_REMOVE(&nmp->nm_bufq, bp, b_freelist);
+		nmp->nm_bufqlen--;
+		if (nmp->nm_bufqwant && nmp->nm_bufqlen <= ncl_numasync) {
+		    nmp->nm_bufqwant = 0;
+		    wakeup(&nmp->nm_bufq);
+		}
+		NFSUNLOCKIOD();
+		if (bp->b_flags & B_DIRECT) {
+			KASSERT((bp->b_iocmd == BIO_WRITE), ("nfscvs_iod: BIO_WRITE not set"));
+			(void)ncl_doio_directwrite(bp);
+		} else {
+			if (bp->b_iocmd == BIO_READ)
+				(void) ncl_doio(bp->b_vp, bp, bp->b_rcred,
+				    NULL, 0);
+			else
+				(void) ncl_doio(bp->b_vp, bp, bp->b_wcred,
+				    NULL, 0);
+		}
+		NFSLOCKIOD();
+		/*
+		 * Make sure the nmp hasn't been dismounted as soon as
+		 * ncl_doio() completes for the last buffer.
+		 */
+		nmp = ncl_iodmount[myiod];
+		if (nmp == NULL)
+			break;
+
+		/*
+		 * If there are more than one iod on this mount, then defect
+		 * so that the iods can be shared out fairly between the mounts
+		 */
+		if (nfs_defect && nmp->nm_bufqiods > 1) {
+		    NFS_DPF(ASYNCIO,
+			    ("nfssvc_iod: iod %d defecting from mount %p\n",
+			     myiod, nmp));
+		    ncl_iodmount[myiod] = NULL;
+		    nmp->nm_bufqiods--;
+		    break;
+		}
+	    }
+	}
+finish:
+	nfs_asyncdaemon[myiod] = 0;
+	if (nmp)
+	    nmp->nm_bufqiods--;
+	ncl_iodwant[myiod] = NFSIOD_NOT_AVAILABLE;
+	ncl_iodmount[myiod] = NULL;
+	/* Someone may be waiting for the last nfsiod to terminate. */
+	if (--ncl_numasync == 0)
+		wakeup(&ncl_numasync);
+	NFSUNLOCKIOD();
+	if ((error == 0) || (error == EWOULDBLOCK))
+		kproc_exit(0);
+	/* Abnormal termination */
+	kproc_exit(1);
+}
diff --git a/freebsd/sys/fs/nfsclient/nfs_clnode.c b/freebsd/sys/fs/nfsclient/nfs_clnode.c
new file mode 100644
index 0000000..f41cb87
--- /dev/null
+++ b/freebsd/sys/fs/nfsclient/nfs_clnode.c
@@ -0,0 +1,365 @@
+/*-
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 1989, 1993
+ *	The Regents of the University of California.  All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Rick Macklem at The University of Guelph.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ *	from nfs_node.c	8.6 (Berkeley) 5/22/95
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/fcntl.h>
+#include <sys/lock.h>
+#include <sys/malloc.h>
+#include <sys/mount.h>
+#include <sys/namei.h>
+#include <sys/proc.h>
+#include <sys/socket.h>
+#include <sys/sysctl.h>
+#include <sys/taskqueue.h>
+#include <sys/vnode.h>
+
+#include <vm/uma.h>
+
+#include <fs/nfs/nfsport.h>
+#include <fs/nfsclient/nfsnode.h>
+#include <fs/nfsclient/nfsmount.h>
+#include <fs/nfsclient/nfs.h>
+#include <fs/nfsclient/nfs_kdtrace.h>
+
+#include <nfs/nfs_lock.h>
+
+extern struct vop_vector newnfs_vnodeops;
+extern struct buf_ops buf_ops_newnfs;
+MALLOC_DECLARE(M_NEWNFSREQ);
+
+uma_zone_t newnfsnode_zone;
+
+const char nfs_vnode_tag[] = "nfs";
+
+static void	nfs_freesillyrename(void *arg, __unused int pending);
+
+void
+ncl_nhinit(void)
+{
+
+	newnfsnode_zone = uma_zcreate("NCLNODE", sizeof(struct nfsnode), NULL,
+	    NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
+}
+
+void
+ncl_nhuninit(void)
+{
+	uma_zdestroy(newnfsnode_zone);
+}
+
+/*
+ * ONLY USED FOR THE ROOT DIRECTORY. nfscl_nget() does the rest. If this
+ * function is going to be used to get Regular Files, code must be added
+ * to fill in the "struct nfsv4node".
+ * Look up a vnode/nfsnode by file handle.
+ * Callers must check for mount points!!
+ * In all cases, a pointer to a
+ * nfsnode structure is returned.
+ */
+int
+ncl_nget(struct mount *mntp, u_int8_t *fhp, int fhsize, struct nfsnode **npp,
+    int lkflags)
+{
+	struct thread *td = curthread;	/* XXX */
+	struct nfsnode *np;
+	struct vnode *vp;
+	struct vnode *nvp;
+	int error;
+	u_int hash;
+	struct nfsmount *nmp;
+	struct nfsfh *nfhp;
+
+	nmp = VFSTONFS(mntp);
+	*npp = NULL;
+
+	hash = fnv_32_buf(fhp, fhsize, FNV1_32_INIT);
+
+	nfhp = malloc(sizeof (struct nfsfh) + fhsize,
+	    M_NFSFH, M_WAITOK);
+	bcopy(fhp, &nfhp->nfh_fh[0], fhsize);
+	nfhp->nfh_len = fhsize;
+	error = vfs_hash_get(mntp, hash, lkflags,
+	    td, &nvp, newnfs_vncmpf, nfhp);
+	free(nfhp, M_NFSFH);
+	if (error)
+		return (error);
+	if (nvp != NULL) {
+		*npp = VTONFS(nvp);
+		return (0);
+	}
+	np = uma_zalloc(newnfsnode_zone, M_WAITOK | M_ZERO);
+
+	error = getnewvnode(nfs_vnode_tag, mntp, &newnfs_vnodeops, &nvp);
+	if (error) {
+		uma_zfree(newnfsnode_zone, np);
+		return (error);
+	}
+	vp = nvp;
+	KASSERT(vp->v_bufobj.bo_bsize != 0, ("ncl_nget: bo_bsize == 0"));
+	vp->v_bufobj.bo_ops = &buf_ops_newnfs;
+	vp->v_data = np;
+	np->n_vnode = vp;
+	/* 
+	 * Initialize the mutex even if the vnode is going to be a loser.
+	 * This simplifies the logic in reclaim, which can then unconditionally
+	 * destroy the mutex (in the case of the loser, or if hash_insert
+	 * happened to return an error no special casing is needed).
+	 */
+	mtx_init(&np->n_mtx, "NEWNFSnode lock", NULL, MTX_DEF | MTX_DUPOK);
+	lockinit(&np->n_excl, PVFS, "nfsupg", VLKTIMEOUT, LK_NOSHARE |
+	    LK_CANRECURSE);
+
+	/*
+	 * NFS supports recursive and shared locking.
+	 */
+	lockmgr(vp->v_vnlock, LK_EXCLUSIVE | LK_NOWITNESS, NULL);
+	VN_LOCK_AREC(vp);
+	VN_LOCK_ASHARE(vp);
+	/* 
+	 * Are we getting the root? If so, make sure the vnode flags
+	 * are correct 
+	 */
+	if ((fhsize == nmp->nm_fhsize) &&
+	    !bcmp(fhp, nmp->nm_fh, fhsize)) {
+		if (vp->v_type == VNON)
+			vp->v_type = VDIR;
+		vp->v_vflag |= VV_ROOT;
+	}
+
+	vp->v_vflag |= VV_VMSIZEVNLOCK;
+	
+	np->n_fhp = malloc(sizeof (struct nfsfh) + fhsize,
+	    M_NFSFH, M_WAITOK);
+	bcopy(fhp, np->n_fhp->nfh_fh, fhsize);
+	np->n_fhp->nfh_len = fhsize;
+	error = insmntque(vp, mntp);
+	if (error != 0) {
+		*npp = NULL;
+		free(np->n_fhp, M_NFSFH);
+		mtx_destroy(&np->n_mtx);
+		lockdestroy(&np->n_excl);
+		uma_zfree(newnfsnode_zone, np);
+		return (error);
+	}
+	error = vfs_hash_insert(vp, hash, lkflags, 
+	    td, &nvp, newnfs_vncmpf, np->n_fhp);
+	if (error)
+		return (error);
+	if (nvp != NULL) {
+		*npp = VTONFS(nvp);
+		/* vfs_hash_insert() vput()'s the losing vnode */
+		return (0);
+	}
+	*npp = np;
+
+	return (0);
+}
+
+/*
+ * Do the vrele(sp->s_dvp) as a separate task in order to avoid a
+ * deadlock because of a LOR when vrele() locks the directory vnode.
+ */
+static void
+nfs_freesillyrename(void *arg, __unused int pending)
+{
+	struct sillyrename *sp;
+
+	sp = arg;
+	vrele(sp->s_dvp);
+	free(sp, M_NEWNFSREQ);
+}
+
+static void
+ncl_releasesillyrename(struct vnode *vp, struct thread *td)
+{
+	struct nfsnode *np;
+	struct sillyrename *sp;
+
+	ASSERT_VOP_ELOCKED(vp, "releasesillyrename");
+	np = VTONFS(vp);
+	NFSASSERTNODE(np);
+	if (vp->v_type != VDIR) {
+		sp = np->n_sillyrename;
+		np->n_sillyrename = NULL;
+	} else
+		sp = NULL;
+	if (sp != NULL) {
+		NFSUNLOCKNODE(np);
+		(void) ncl_vinvalbuf(vp, 0, td, 1);
+		/*
+		 * Remove the silly file that was rename'd earlier
+		 */
+		ncl_removeit(sp, vp);
+		crfree(sp->s_cred);
+		TASK_INIT(&sp->s_task, 0, nfs_freesillyrename, sp);
+		taskqueue_enqueue(taskqueue_thread, &sp->s_task);
+		NFSLOCKNODE(np);
+	}
+}
+
+int
+ncl_inactive(struct vop_inactive_args *ap)
+{
+	struct vnode *vp = ap->a_vp;
+	struct nfsnode *np;
+	boolean_t retv;
+
+	if (NFS_ISV4(vp) && vp->v_type == VREG) {
+		/*
+		 * Since mmap()'d files do I/O after VOP_CLOSE(), the NFSv4
+		 * Close operations are delayed until now. Any dirty
+		 * buffers/pages must be flushed before the close, so that the
+		 * stateid is available for the writes.
+		 */
+		if (vp->v_object != NULL) {
+			VM_OBJECT_WLOCK(vp->v_object);
+			retv = vm_object_page_clean(vp->v_object, 0, 0,
+			    OBJPC_SYNC);
+			VM_OBJECT_WUNLOCK(vp->v_object);
+		} else
+			retv = TRUE;
+		if (retv == TRUE) {
+			(void)ncl_flush(vp, MNT_WAIT, ap->a_td, 1, 0);
+			(void)nfsrpc_close(vp, 1, ap->a_td);
+		}
+	}
+
+	np = VTONFS(vp);
+	NFSLOCKNODE(np);
+	ncl_releasesillyrename(vp, ap->a_td);
+
+	/*
+	 * NMODIFIED means that there might be dirty/stale buffers
+	 * associated with the NFS vnode.
+	 * NDSCOMMIT means that the file is on a pNFS server and commits
+	 * should be done to the DS.
+	 * None of the other flags are meaningful after the vnode is unused.
+	 */
+	np->n_flag &= (NMODIFIED | NDSCOMMIT);
+	NFSUNLOCKNODE(np);
+	return (0);
+}
+
+/*
+ * Reclaim an nfsnode so that it can be used for other purposes.
+ */
+int
+ncl_reclaim(struct vop_reclaim_args *ap)
+{
+	struct vnode *vp = ap->a_vp;
+	struct nfsnode *np = VTONFS(vp);
+	struct nfsdmap *dp, *dp2;
+
+	/*
+	 * If the NLM is running, give it a chance to abort pending
+	 * locks.
+	 */
+	if (nfs_reclaim_p != NULL)
+		nfs_reclaim_p(ap);
+
+	NFSLOCKNODE(np);
+	ncl_releasesillyrename(vp, ap->a_td);
+	NFSUNLOCKNODE(np);
+
+	/*
+	 * Destroy the vm object and flush associated pages.
+	 */
+	vnode_destroy_vobject(vp);
+
+	if (NFS_ISV4(vp) && vp->v_type == VREG)
+		/*
+		 * We can now safely close any remaining NFSv4 Opens for
+		 * this file. Most opens will have already been closed by
+		 * ncl_inactive(), but there are cases where it is not
+		 * called, so we need to do it again here.
+		 */
+		(void) nfsrpc_close(vp, 1, ap->a_td);
+
+	vfs_hash_remove(vp);
+
+	/*
+	 * Call nfscl_reclaimnode() to save attributes in the delegation,
+	 * as required.
+	 */
+	if (vp->v_type == VREG)
+		nfscl_reclaimnode(vp);
+
+	/*
+	 * Free up any directory cookie structures and
+	 * large file handle structures that might be associated with
+	 * this nfs node.
+	 */
+	if (vp->v_type == VDIR) {
+		dp = LIST_FIRST(&np->n_cookies);
+		while (dp) {
+			dp2 = dp;
+			dp = LIST_NEXT(dp, ndm_list);
+			free(dp2, M_NFSDIROFF);
+		}
+	}
+	if (np->n_writecred != NULL)
+		crfree(np->n_writecred);
+	free(np->n_fhp, M_NFSFH);
+	if (np->n_v4 != NULL)
+		free(np->n_v4, M_NFSV4NODE);
+	mtx_destroy(&np->n_mtx);
+	lockdestroy(&np->n_excl);
+	uma_zfree(newnfsnode_zone, vp->v_data);
+	vp->v_data = NULL;
+	return (0);
+}
+
+/*
+ * Invalidate both the access and attribute caches for this vnode.
+ */
+void
+ncl_invalcaches(struct vnode *vp)
+{
+	struct nfsnode *np = VTONFS(vp);
+	int i;
+
+	NFSLOCKNODE(np);
+	for (i = 0; i < NFS_ACCESSCACHESIZE; i++)
+		np->n_accesscache[i].stamp = 0;
+	KDTRACE_NFS_ACCESSCACHE_FLUSH_DONE(vp);
+	np->n_attrstamp = 0;
+	KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp);
+	NFSUNLOCKNODE(np);
+}
diff --git a/freebsd/sys/fs/nfsclient/nfs_clport.c b/freebsd/sys/fs/nfsclient/nfs_clport.c
new file mode 100644
index 0000000..74320c9
--- /dev/null
+++ b/freebsd/sys/fs/nfsclient/nfs_clport.c
@@ -0,0 +1,1414 @@
+/*-
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 1989, 1993
+ *	The Regents of the University of California.  All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Rick Macklem at The University of Guelph.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include "opt_inet.h"
+#include "opt_inet6.h"
+
+#include <sys/capsicum.h>
+
+/*
+ * generally, I don't like #includes inside .h files, but it seems to
+ * be the easiest way to handle the port.
+ */
+#include <sys/fail.h>
+#include <sys/hash.h>
+#include <sys/sysctl.h>
+#include <fs/nfs/nfsport.h>
+#include <netinet/in_fib.h>
+#include <netinet/if_ether.h>
+#include <netinet6/ip6_var.h>
+#include <net/if_types.h>
+
+#include <fs/nfsclient/nfs_kdtrace.h>
+
+#ifdef KDTRACE_HOOKS
+dtrace_nfsclient_attrcache_flush_probe_func_t
+		dtrace_nfscl_attrcache_flush_done_probe;
+uint32_t	nfscl_attrcache_flush_done_id;
+
+dtrace_nfsclient_attrcache_get_hit_probe_func_t
+		dtrace_nfscl_attrcache_get_hit_probe;
+uint32_t	nfscl_attrcache_get_hit_id;
+
+dtrace_nfsclient_attrcache_get_miss_probe_func_t
+		dtrace_nfscl_attrcache_get_miss_probe;
+uint32_t	nfscl_attrcache_get_miss_id;
+
+dtrace_nfsclient_attrcache_load_probe_func_t
+		dtrace_nfscl_attrcache_load_done_probe;
+uint32_t	nfscl_attrcache_load_done_id;
+#endif /* !KDTRACE_HOOKS */
+
+extern u_int32_t newnfs_true, newnfs_false, newnfs_xdrneg1;
+extern struct vop_vector newnfs_vnodeops;
+extern struct vop_vector newnfs_fifoops;
+extern uma_zone_t newnfsnode_zone;
+extern struct buf_ops buf_ops_newnfs;
+extern int ncl_pbuf_freecnt;
+extern short nfsv4_cbport;
+extern int nfscl_enablecallb;
+extern int nfs_numnfscbd;
+extern int nfscl_inited;
+struct mtx ncl_iod_mutex;
+NFSDLOCKMUTEX;
+extern struct mtx nfsrv_dslock_mtx;
+
+extern void (*ncl_call_invalcaches)(struct vnode *);
+
+SYSCTL_DECL(_vfs_nfs);
+static int ncl_fileid_maxwarnings = 10;
+SYSCTL_INT(_vfs_nfs, OID_AUTO, fileid_maxwarnings, CTLFLAG_RWTUN,
+    &ncl_fileid_maxwarnings, 0,
+    "Limit fileid corruption warnings; 0 is off; -1 is unlimited");
+static volatile int ncl_fileid_nwarnings;
+
+static void nfscl_warn_fileid(struct nfsmount *, struct nfsvattr *,
+    struct nfsvattr *);
+
+/*
+ * Comparison function for vfs_hash functions.
+ */
+int
+newnfs_vncmpf(struct vnode *vp, void *arg)
+{
+	struct nfsfh *nfhp = (struct nfsfh *)arg;
+	struct nfsnode *np = VTONFS(vp);
+
+	if (np->n_fhp->nfh_len != nfhp->nfh_len ||
+	    NFSBCMP(np->n_fhp->nfh_fh, nfhp->nfh_fh, nfhp->nfh_len))
+		return (1);
+	return (0);
+}
+
+/*
+ * Look up a vnode/nfsnode by file handle.
+ * Callers must check for mount points!!
+ * In all cases, a pointer to a
+ * nfsnode structure is returned.
+ * This variant takes a "struct nfsfh *" as second argument and uses
+ * that structure up, either by hanging off the nfsnode or FREEing it.
+ */
+int
+nfscl_nget(struct mount *mntp, struct vnode *dvp, struct nfsfh *nfhp,
+    struct componentname *cnp, struct thread *td, struct nfsnode **npp,
+    void *stuff, int lkflags)
+{
+	struct nfsnode *np, *dnp;
+	struct vnode *vp, *nvp;
+	struct nfsv4node *newd, *oldd;
+	int error;
+	u_int hash;
+	struct nfsmount *nmp;
+
+	nmp = VFSTONFS(mntp);
+	dnp = VTONFS(dvp);
+	*npp = NULL;
+
+	hash = fnv_32_buf(nfhp->nfh_fh, nfhp->nfh_len, FNV1_32_INIT);
+
+	error = vfs_hash_get(mntp, hash, lkflags,
+	    td, &nvp, newnfs_vncmpf, nfhp);
+	if (error == 0 && nvp != NULL) {
+		/*
+		 * I believe there is a slight chance that vgonel() could
+		 * get called on this vnode between when NFSVOPLOCK() drops
+		 * the VI_LOCK() and vget() acquires it again, so that it
+		 * hasn't yet had v_usecount incremented. If this were to
+		 * happen, the VI_DOOMED flag would be set, so check for
+		 * that here. Since we now have the v_usecount incremented,
+		 * we should be ok until we vrele() it, if the VI_DOOMED
+		 * flag isn't set now.
+		 */
+		VI_LOCK(nvp);
+		if ((nvp->v_iflag & VI_DOOMED)) {
+			VI_UNLOCK(nvp);
+			vrele(nvp);
+			error = ENOENT;
+		} else {
+			VI_UNLOCK(nvp);
+		}
+	}
+	if (error) {
+		free(nfhp, M_NFSFH);
+		return (error);
+	}
+	if (nvp != NULL) {
+		np = VTONFS(nvp);
+		/*
+		 * For NFSv4, check to see if it is the same name and
+		 * replace the name, if it is different.
+		 */
+		oldd = newd = NULL;
+		if ((nmp->nm_flag & NFSMNT_NFSV4) && np->n_v4 != NULL &&
+		    nvp->v_type == VREG &&
+		    (np->n_v4->n4_namelen != cnp->cn_namelen ||
+		     NFSBCMP(cnp->cn_nameptr, NFS4NODENAME(np->n_v4),
+		     cnp->cn_namelen) ||
+		     dnp->n_fhp->nfh_len != np->n_v4->n4_fhlen ||
+		     NFSBCMP(dnp->n_fhp->nfh_fh, np->n_v4->n4_data,
+		     dnp->n_fhp->nfh_len))) {
+		    newd = malloc(
+			sizeof (struct nfsv4node) + dnp->n_fhp->nfh_len +
+			+ cnp->cn_namelen - 1, M_NFSV4NODE, M_WAITOK);
+		    NFSLOCKNODE(np);
+		    if (newd != NULL && np->n_v4 != NULL && nvp->v_type == VREG
+			&& (np->n_v4->n4_namelen != cnp->cn_namelen ||
+			 NFSBCMP(cnp->cn_nameptr, NFS4NODENAME(np->n_v4),
+			 cnp->cn_namelen) ||
+			 dnp->n_fhp->nfh_len != np->n_v4->n4_fhlen ||
+			 NFSBCMP(dnp->n_fhp->nfh_fh, np->n_v4->n4_data,
+			 dnp->n_fhp->nfh_len))) {
+			oldd = np->n_v4;
+			np->n_v4 = newd;
+			newd = NULL;
+			np->n_v4->n4_fhlen = dnp->n_fhp->nfh_len;
+			np->n_v4->n4_namelen = cnp->cn_namelen;
+			NFSBCOPY(dnp->n_fhp->nfh_fh, np->n_v4->n4_data,
+			    dnp->n_fhp->nfh_len);
+			NFSBCOPY(cnp->cn_nameptr, NFS4NODENAME(np->n_v4),
+			    cnp->cn_namelen);
+		    }
+		    NFSUNLOCKNODE(np);
+		}
+		if (newd != NULL)
+			free(newd, M_NFSV4NODE);
+		if (oldd != NULL)
+			free(oldd, M_NFSV4NODE);
+		*npp = np;
+		free(nfhp, M_NFSFH);
+		return (0);
+	}
+	np = uma_zalloc(newnfsnode_zone, M_WAITOK | M_ZERO);
+
+	error = getnewvnode(nfs_vnode_tag, mntp, &newnfs_vnodeops, &nvp);
+	if (error) {
+		uma_zfree(newnfsnode_zone, np);
+		free(nfhp, M_NFSFH);
+		return (error);
+	}
+	vp = nvp;
+	KASSERT(vp->v_bufobj.bo_bsize != 0, ("nfscl_nget: bo_bsize == 0"));
+	vp->v_bufobj.bo_ops = &buf_ops_newnfs;
+	vp->v_data = np;
+	np->n_vnode = vp;
+	/* 
+	 * Initialize the mutex even if the vnode is going to be a loser.
+	 * This simplifies the logic in reclaim, which can then unconditionally
+	 * destroy the mutex (in the case of the loser, or if hash_insert
+	 * happened to return an error no special casing is needed).
+	 */
+	mtx_init(&np->n_mtx, "NEWNFSnode lock", NULL, MTX_DEF | MTX_DUPOK);
+	lockinit(&np->n_excl, PVFS, "nfsupg", VLKTIMEOUT, LK_NOSHARE |
+	    LK_CANRECURSE);
+
+	/* 
+	 * Are we getting the root? If so, make sure the vnode flags
+	 * are correct 
+	 */
+	if ((nfhp->nfh_len == nmp->nm_fhsize) &&
+	    !bcmp(nfhp->nfh_fh, nmp->nm_fh, nfhp->nfh_len)) {
+		if (vp->v_type == VNON)
+			vp->v_type = VDIR;
+		vp->v_vflag |= VV_ROOT;
+	}
+
+	vp->v_vflag |= VV_VMSIZEVNLOCK;
+	
+	np->n_fhp = nfhp;
+	/*
+	 * For NFSv4, we have to attach the directory file handle and
+	 * file name, so that Open Ops can be done later.
+	 */
+	if (nmp->nm_flag & NFSMNT_NFSV4) {
+		np->n_v4 = malloc(sizeof (struct nfsv4node)
+		    + dnp->n_fhp->nfh_len + cnp->cn_namelen - 1, M_NFSV4NODE,
+		    M_WAITOK);
+		np->n_v4->n4_fhlen = dnp->n_fhp->nfh_len;
+		np->n_v4->n4_namelen = cnp->cn_namelen;
+		NFSBCOPY(dnp->n_fhp->nfh_fh, np->n_v4->n4_data,
+		    dnp->n_fhp->nfh_len);
+		NFSBCOPY(cnp->cn_nameptr, NFS4NODENAME(np->n_v4),
+		    cnp->cn_namelen);
+	} else {
+		np->n_v4 = NULL;
+	}
+
+	/*
+	 * NFS supports recursive and shared locking.
+	 */
+	lockmgr(vp->v_vnlock, LK_EXCLUSIVE | LK_NOWITNESS, NULL);
+	VN_LOCK_AREC(vp);
+	VN_LOCK_ASHARE(vp);
+	error = insmntque(vp, mntp);
+	if (error != 0) {
+		*npp = NULL;
+		mtx_destroy(&np->n_mtx);
+		lockdestroy(&np->n_excl);
+		free(nfhp, M_NFSFH);
+		if (np->n_v4 != NULL)
+			free(np->n_v4, M_NFSV4NODE);
+		uma_zfree(newnfsnode_zone, np);
+		return (error);
+	}
+	error = vfs_hash_insert(vp, hash, lkflags, 
+	    td, &nvp, newnfs_vncmpf, nfhp);
+	if (error)
+		return (error);
+	if (nvp != NULL) {
+		*npp = VTONFS(nvp);
+		/* vfs_hash_insert() vput()'s the losing vnode */
+		return (0);
+	}
+	*npp = np;
+
+	return (0);
+}
+
+/*
+ * Another variant of nfs_nget(). This one is only used by reopen. It
+ * takes almost the same args as nfs_nget(), but only succeeds if an entry
+ * exists in the cache. (Since files should already be "open" with a
+ * vnode ref cnt on the node when reopen calls this, it should always
+ * succeed.)
+ * Also, don't get a vnode lock, since it may already be locked by some
+ * other process that is handling it. This is ok, since all other threads
+ * on the client are blocked by the nfsc_lock being exclusively held by the
+ * caller of this function.
+ */
+int
+nfscl_ngetreopen(struct mount *mntp, u_int8_t *fhp, int fhsize,
+    struct thread *td, struct nfsnode **npp)
+{
+	struct vnode *nvp;
+	u_int hash;
+	struct nfsfh *nfhp;
+	int error;
+
+	*npp = NULL;
+	/* For forced dismounts, just return error. */
+	if (NFSCL_FORCEDISM(mntp))
+		return (EINTR);
+	nfhp = malloc(sizeof (struct nfsfh) + fhsize,
+	    M_NFSFH, M_WAITOK);
+	bcopy(fhp, &nfhp->nfh_fh[0], fhsize);
+	nfhp->nfh_len = fhsize;
+
+	hash = fnv_32_buf(fhp, fhsize, FNV1_32_INIT);
+
+	/*
+	 * First, try to get the vnode locked, but don't block for the lock.
+	 */
+	error = vfs_hash_get(mntp, hash, (LK_EXCLUSIVE | LK_NOWAIT), td, &nvp,
+	    newnfs_vncmpf, nfhp);
+	if (error == 0 && nvp != NULL) {
+		NFSVOPUNLOCK(nvp, 0);
+	} else if (error == EBUSY) {
+		/*
+		 * It is safe so long as a vflush() with
+		 * FORCECLOSE has not been done. Since the Renew thread is
+		 * stopped and the MNTK_UNMOUNTF flag is set before doing
+		 * a vflush() with FORCECLOSE, we should be ok here.
+		 */
+		if (NFSCL_FORCEDISM(mntp))
+			error = EINTR;
+		else {
+			vfs_hash_ref(mntp, hash, td, &nvp, newnfs_vncmpf, nfhp);
+			if (nvp == NULL) {
+				error = ENOENT;
+			} else if ((nvp->v_iflag & VI_DOOMED) != 0) {
+				error = ENOENT;
+				vrele(nvp);
+			} else {
+				error = 0;
+			}
+		}
+	}
+	free(nfhp, M_NFSFH);
+	if (error)
+		return (error);
+	if (nvp != NULL) {
+		*npp = VTONFS(nvp);
+		return (0);
+	}
+	return (EINVAL);
+}
+
+static void
+nfscl_warn_fileid(struct nfsmount *nmp, struct nfsvattr *oldnap,
+    struct nfsvattr *newnap)
+{
+	int off;
+
+	if (ncl_fileid_maxwarnings >= 0 &&
+	    ncl_fileid_nwarnings >= ncl_fileid_maxwarnings)
+		return;
+	off = 0;
+	if (ncl_fileid_maxwarnings >= 0) {
+		if (++ncl_fileid_nwarnings >= ncl_fileid_maxwarnings)
+			off = 1;
+	}
+
+	printf("newnfs: server '%s' error: fileid changed. "
+	    "fsid %jx:%jx: expected fileid %#jx, got %#jx. "
+	    "(BROKEN NFS SERVER OR MIDDLEWARE)\n",
+	    nmp->nm_com.nmcom_hostname,
+	    (uintmax_t)nmp->nm_fsid[0],
+	    (uintmax_t)nmp->nm_fsid[1],
+	    (uintmax_t)oldnap->na_fileid,
+	    (uintmax_t)newnap->na_fileid);
+
+	if (off)
+		printf("newnfs: Logged %d times about fileid corruption; "
+		    "going quiet to avoid spamming logs excessively. (Limit "
+		    "is: %d).\n", ncl_fileid_nwarnings,
+		    ncl_fileid_maxwarnings);
+}
+
+/*
+ * Load the attribute cache (that lives in the nfsnode entry) with
+ * the attributes of the second argument and
+ * Iff vaper not NULL
+ *    copy the attributes to *vaper
+ * Similar to nfs_loadattrcache(), except the attributes are passed in
+ * instead of being parsed out of the mbuf list.
+ */
+int
+nfscl_loadattrcache(struct vnode **vpp, struct nfsvattr *nap, void *nvaper,
+    void *stuff, int writeattr, int dontshrink)
+{
+	struct vnode *vp = *vpp;
+	struct vattr *vap, *nvap = &nap->na_vattr, *vaper = nvaper;
+	struct nfsnode *np;
+	struct nfsmount *nmp;
+	struct timespec mtime_save;
+	int error, force_fid_err;
+
+	error = 0;
+
+	/*
+	 * If v_type == VNON it is a new node, so fill in the v_type,
+	 * n_mtime fields. Check to see if it represents a special 
+	 * device, and if so, check for a possible alias. Once the
+	 * correct vnode has been obtained, fill in the rest of the
+	 * information.
+	 */
+	np = VTONFS(vp);
+	NFSLOCKNODE(np);
+	if (vp->v_type != nvap->va_type) {
+		vp->v_type = nvap->va_type;
+		if (vp->v_type == VFIFO)
+			vp->v_op = &newnfs_fifoops;
+		np->n_mtime = nvap->va_mtime;
+	}
+	nmp = VFSTONFS(vp->v_mount);
+	vap = &np->n_vattr.na_vattr;
+	mtime_save = vap->va_mtime;
+	if (writeattr) {
+		np->n_vattr.na_filerev = nap->na_filerev;
+		np->n_vattr.na_size = nap->na_size;
+		np->n_vattr.na_mtime = nap->na_mtime;
+		np->n_vattr.na_ctime = nap->na_ctime;
+		np->n_vattr.na_fsid = nap->na_fsid;
+		np->n_vattr.na_mode = nap->na_mode;
+	} else {
+		force_fid_err = 0;
+		KFAIL_POINT_ERROR(DEBUG_FP, nfscl_force_fileid_warning,
+		    force_fid_err);
+		/*
+		 * BROKEN NFS SERVER OR MIDDLEWARE
+		 *
+		 * Certain NFS servers (certain old proprietary filers ca.
+		 * 2006) or broken middleboxes (e.g. WAN accelerator products)
+		 * will respond to GETATTR requests with results for a
+		 * different fileid.
+		 *
+		 * The WAN accelerator we've observed not only serves stale
+		 * cache results for a given file, it also occasionally serves
+		 * results for wholly different files.  This causes surprising
+		 * problems; for example the cached size attribute of a file
+		 * may truncate down and then back up, resulting in zero
+		 * regions in file contents read by applications.  We observed
+		 * this reliably with Clang and .c files during parallel build.
+		 * A pcap revealed packet fragmentation and GETATTR RPC
+		 * responses with wholly wrong fileids.
+		 */
+		if ((np->n_vattr.na_fileid != 0 &&
+		     np->n_vattr.na_fileid != nap->na_fileid) ||
+		    force_fid_err) {
+			nfscl_warn_fileid(nmp, &np->n_vattr, nap);
+			error = EIDRM;
+			goto out;
+		}
+		NFSBCOPY((caddr_t)nap, (caddr_t)&np->n_vattr,
+		    sizeof (struct nfsvattr));
+	}
+
+	/*
+	 * For NFSv4, if the node's fsid is not equal to the mount point's
+	 * fsid, return the low order 32bits of the node's fsid. This
+	 * allows getcwd(3) to work. There is a chance that the fsid might
+	 * be the same as a local fs, but since this is in an NFS mount
+	 * point, I don't think that will cause any problems?
+	 */
+	if (NFSHASNFSV4(nmp) && NFSHASHASSETFSID(nmp) &&
+	    (nmp->nm_fsid[0] != np->n_vattr.na_filesid[0] ||
+	     nmp->nm_fsid[1] != np->n_vattr.na_filesid[1])) {
+		/*
+		 * va_fsid needs to be set to some value derived from
+		 * np->n_vattr.na_filesid that is not equal
+		 * vp->v_mount->mnt_stat.f_fsid[0], so that it changes
+		 * from the value used for the top level server volume
+		 * in the mounted subtree.
+		 */
+		vn_fsid(vp, vap);
+		if ((uint32_t)vap->va_fsid == np->n_vattr.na_filesid[0])
+			vap->va_fsid = hash32_buf(
+			    np->n_vattr.na_filesid, 2 * sizeof(uint64_t), 0);
+	} else
+		vn_fsid(vp, vap);
+	np->n_attrstamp = time_second;
+	if (vap->va_size != np->n_size) {
+		if (vap->va_type == VREG) {
+			if (dontshrink && vap->va_size < np->n_size) {
+				/*
+				 * We've been told not to shrink the file;
+				 * zero np->n_attrstamp to indicate that
+				 * the attributes are stale.
+				 */
+				vap->va_size = np->n_size;
+				np->n_attrstamp = 0;
+				KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp);
+			} else if (np->n_flag & NMODIFIED) {
+				/*
+				 * We've modified the file: Use the larger
+				 * of our size, and the server's size.
+				 */
+				if (vap->va_size < np->n_size) {
+					vap->va_size = np->n_size;
+				} else {
+					np->n_size = vap->va_size;
+					np->n_flag |= NSIZECHANGED;
+				}
+			} else {
+				np->n_size = vap->va_size;
+				np->n_flag |= NSIZECHANGED;
+			}
+		} else {
+			np->n_size = vap->va_size;
+		}
+	}
+	/*
+	 * The following checks are added to prevent a race between (say)
+	 * a READDIR+ and a WRITE. 
+	 * READDIR+, WRITE requests sent out.
+	 * READDIR+ resp, WRITE resp received on client.
+	 * However, the WRITE resp was handled before the READDIR+ resp
+	 * causing the post op attrs from the write to be loaded first
+	 * and the attrs from the READDIR+ to be loaded later. If this 
+	 * happens, we have stale attrs loaded into the attrcache.
+	 * We detect this by for the mtime moving back. We invalidate the 
+	 * attrcache when this happens.
+	 */
+	if (timespeccmp(&mtime_save, &vap->va_mtime, >)) {
+		/* Size changed or mtime went backwards */
+		np->n_attrstamp = 0;
+		KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp);
+	}
+	if (vaper != NULL) {
+		NFSBCOPY((caddr_t)vap, (caddr_t)vaper, sizeof(*vap));
+		if (np->n_flag & NCHG) {
+			if (np->n_flag & NACC)
+				vaper->va_atime = np->n_atim;
+			if (np->n_flag & NUPD)
+				vaper->va_mtime = np->n_mtim;
+		}
+	}
+
+out:
+#ifdef KDTRACE_HOOKS
+	if (np->n_attrstamp != 0)
+		KDTRACE_NFS_ATTRCACHE_LOAD_DONE(vp, vap, error);
+#endif
+	(void)ncl_pager_setsize(vp, NULL);
+	return (error);
+}
+
+/*
+ * Call vnode_pager_setsize() if the size of the node changed, as
+ * recorded in nfsnode vs. v_object, or delay the call if notifying
+ * the pager is not possible at the moment.
+ *
+ * If nsizep is non-NULL, the call is delayed and the new node size is
+ * provided.  Caller should itself call vnode_pager_setsize() if
+ * function returned true.  If nsizep is NULL, function tries to call
+ * vnode_pager_setsize() itself if needed and possible, and the nfs
+ * node is unlocked unconditionally, the return value is not useful.
+ */
+bool
+ncl_pager_setsize(struct vnode *vp, u_quad_t *nsizep)
+{
+	struct nfsnode *np;
+	vm_object_t object;
+	struct vattr *vap;
+	u_quad_t nsize;
+	bool setnsize;
+
+	np = VTONFS(vp);
+	NFSASSERTNODE(np);
+
+	vap = &np->n_vattr.na_vattr;
+	nsize = vap->va_size;
+	object = vp->v_object;
+	setnsize = false;
+
+	if (object != NULL && nsize != object->un_pager.vnp.vnp_size) {
+		if (VOP_ISLOCKED(vp) == LK_EXCLUSIVE)
+			setnsize = true;
+		else
+			np->n_flag |= NVNSETSZSKIP;
+	}
+	if (nsizep == NULL) {
+		NFSUNLOCKNODE(np);
+		if (setnsize)
+			vnode_pager_setsize(vp, nsize);
+		setnsize = false;
+	} else {
+		*nsizep = nsize;
+	}
+	return (setnsize);
+}
+
+/*
+ * Fill in the client id name. For these bytes:
+ * 1 - they must be unique
+ * 2 - they should be persistent across client reboots
+ * 1 is more critical than 2
+ * Use the mount point's unique id plus either the uuid or, if that
+ * isn't set, random junk.
+ */
+void
+nfscl_fillclid(u_int64_t clval, char *uuid, u_int8_t *cp, u_int16_t idlen)
+{
+	int uuidlen;
+
+	/*
+	 * First, put in the 64bit mount point identifier.
+	 */
+	if (idlen >= sizeof (u_int64_t)) {
+		NFSBCOPY((caddr_t)&clval, cp, sizeof (u_int64_t));
+		cp += sizeof (u_int64_t);
+		idlen -= sizeof (u_int64_t);
+	}
+
+	/*
+	 * If uuid is non-zero length, use it.
+	 */
+	uuidlen = strlen(uuid);
+	if (uuidlen > 0 && idlen >= uuidlen) {
+		NFSBCOPY(uuid, cp, uuidlen);
+		cp += uuidlen;
+		idlen -= uuidlen;
+	}
+
+	/*
+	 * This only normally happens if the uuid isn't set.
+	 */
+	while (idlen > 0) {
+		*cp++ = (u_int8_t)(arc4random() % 256);
+		idlen--;
+	}
+}
+
+/*
+ * Fill in a lock owner name. For now, pid + the process's creation time.
+ */
+void
+nfscl_filllockowner(void *id, u_int8_t *cp, int flags)
+{
+	union {
+		u_int32_t	lval;
+		u_int8_t	cval[4];
+	} tl;
+	struct proc *p;
+
+	if (id == NULL) {
+		/* Return the single open_owner of all 0 bytes. */
+		bzero(cp, NFSV4CL_LOCKNAMELEN);
+		return;
+	}
+	if ((flags & F_POSIX) != 0) {
+		p = (struct proc *)id;
+		tl.lval = p->p_pid;
+		*cp++ = tl.cval[0];
+		*cp++ = tl.cval[1];
+		*cp++ = tl.cval[2];
+		*cp++ = tl.cval[3];
+		tl.lval = p->p_stats->p_start.tv_sec;
+		*cp++ = tl.cval[0];
+		*cp++ = tl.cval[1];
+		*cp++ = tl.cval[2];
+		*cp++ = tl.cval[3];
+		tl.lval = p->p_stats->p_start.tv_usec;
+		*cp++ = tl.cval[0];
+		*cp++ = tl.cval[1];
+		*cp++ = tl.cval[2];
+		*cp = tl.cval[3];
+	} else if ((flags & F_FLOCK) != 0) {
+		bcopy(&id, cp, sizeof(id));
+		bzero(&cp[sizeof(id)], NFSV4CL_LOCKNAMELEN - sizeof(id));
+	} else {
+		printf("nfscl_filllockowner: not F_POSIX or F_FLOCK\n");
+		bzero(cp, NFSV4CL_LOCKNAMELEN);
+	}
+}
+
+/*
+ * Find the parent process for the thread passed in as an argument.
+ * If none exists, return NULL, otherwise return a thread for the parent.
+ * (Can be any of the threads, since it is only used for td->td_proc.)
+ */
+NFSPROC_T *
+nfscl_getparent(struct thread *td)
+{
+	struct proc *p;
+	struct thread *ptd;
+
+	if (td == NULL)
+		return (NULL);
+	p = td->td_proc;
+	if (p->p_pid == 0)
+		return (NULL);
+	p = p->p_pptr;
+	if (p == NULL)
+		return (NULL);
+	ptd = TAILQ_FIRST(&p->p_threads);
+	return (ptd);
+}
+
+/*
+ * Start up the renew kernel thread.
+ */
+static void
+start_nfscl(void *arg)
+{
+	struct nfsclclient *clp;
+	struct thread *td;
+
+	clp = (struct nfsclclient *)arg;
+	td = TAILQ_FIRST(&clp->nfsc_renewthread->p_threads);
+	nfscl_renewthread(clp, td);
+	kproc_exit(0);
+}
+
+void
+nfscl_start_renewthread(struct nfsclclient *clp)
+{
+
+	kproc_create(start_nfscl, (void *)clp, &clp->nfsc_renewthread, 0, 0,
+	    "nfscl");
+}
+
+/*
+ * Handle wcc_data.
+ * For NFSv4, it assumes that nfsv4_wccattr() was used to set up the getattr
+ * as the first Op after PutFH.
+ * (For NFSv4, the postop attributes are after the Op, so they can't be
+ *  parsed here. A separate call to nfscl_postop_attr() is required.)
+ */
+int
+nfscl_wcc_data(struct nfsrv_descript *nd, struct vnode *vp,
+    struct nfsvattr *nap, int *flagp, int *wccflagp, void *stuff)
+{
+	u_int32_t *tl;
+	struct nfsnode *np = VTONFS(vp);
+	struct nfsvattr nfsva;
+	int error = 0;
+
+	if (wccflagp != NULL)
+		*wccflagp = 0;
+	if (nd->nd_flag & ND_NFSV3) {
+		*flagp = 0;
+		NFSM_DISSECT(tl, u_int32_t *, NFSX_UNSIGNED);
+		if (*tl == newnfs_true) {
+			NFSM_DISSECT(tl, u_int32_t *, 6 * NFSX_UNSIGNED);
+			if (wccflagp != NULL) {
+				NFSLOCKNODE(np);
+				*wccflagp = (np->n_mtime.tv_sec ==
+				    fxdr_unsigned(u_int32_t, *(tl + 2)) &&
+				    np->n_mtime.tv_nsec ==
+				    fxdr_unsigned(u_int32_t, *(tl + 3)));
+				NFSUNLOCKNODE(np);
+			}
+		}
+		error = nfscl_postop_attr(nd, nap, flagp, stuff);
+		if (wccflagp != NULL && *flagp == 0)
+			*wccflagp = 0;
+	} else if ((nd->nd_flag & (ND_NOMOREDATA | ND_NFSV4 | ND_V4WCCATTR))
+	    == (ND_NFSV4 | ND_V4WCCATTR)) {
+		error = nfsv4_loadattr(nd, NULL, &nfsva, NULL,
+		    NULL, 0, NULL, NULL, NULL, NULL, NULL, 0,
+		    NULL, NULL, NULL, NULL, NULL);
+		if (error)
+			return (error);
+		/*
+		 * Get rid of Op# and status for next op.
+		 */
+		NFSM_DISSECT(tl, u_int32_t *, 2 * NFSX_UNSIGNED);
+		if (*++tl)
+			nd->nd_flag |= ND_NOMOREDATA;
+		if (wccflagp != NULL &&
+		    nfsva.na_vattr.va_mtime.tv_sec != 0) {
+			NFSLOCKNODE(np);
+			*wccflagp = (np->n_mtime.tv_sec ==
+			    nfsva.na_vattr.va_mtime.tv_sec &&
+			    np->n_mtime.tv_nsec ==
+			    nfsva.na_vattr.va_mtime.tv_sec);
+			NFSUNLOCKNODE(np);
+		}
+	}
+nfsmout:
+	return (error);
+}
+
+/*
+ * Get postop attributes.
+ */
+int
+nfscl_postop_attr(struct nfsrv_descript *nd, struct nfsvattr *nap, int *retp,
+    void *stuff)
+{
+	u_int32_t *tl;
+	int error = 0;
+
+	*retp = 0;
+	if (nd->nd_flag & ND_NOMOREDATA)
+		return (error);
+	if (nd->nd_flag & ND_NFSV3) {
+		NFSM_DISSECT(tl, u_int32_t *, NFSX_UNSIGNED);
+		*retp = fxdr_unsigned(int, *tl);
+	} else if (nd->nd_flag & ND_NFSV4) {
+		/*
+		 * For NFSv4, the postop attr are at the end, so no point
+		 * in looking if nd_repstat != 0.
+		 */
+		if (!nd->nd_repstat) {
+			NFSM_DISSECT(tl, u_int32_t *, 2 * NFSX_UNSIGNED);
+			if (*(tl + 1))
+				/* should never happen since nd_repstat != 0 */
+				nd->nd_flag |= ND_NOMOREDATA;
+			else
+				*retp = 1;
+		}
+	} else if (!nd->nd_repstat) {
+		/* For NFSv2, the attributes are here iff nd_repstat == 0 */
+		*retp = 1;
+	}
+	if (*retp) {
+		error = nfsm_loadattr(nd, nap);
+		if (error)
+			*retp = 0;
+	}
+nfsmout:
+	return (error);
+}
+
+/*
+ * nfscl_request() - mostly a wrapper for newnfs_request().
+ */
+int
+nfscl_request(struct nfsrv_descript *nd, struct vnode *vp, NFSPROC_T *p,
+    struct ucred *cred, void *stuff)
+{
+	int ret, vers;
+	struct nfsmount *nmp;
+
+	nmp = VFSTONFS(vp->v_mount);
+	if (nd->nd_flag & ND_NFSV4)
+		vers = NFS_VER4;
+	else if (nd->nd_flag & ND_NFSV3)
+		vers = NFS_VER3;
+	else
+		vers = NFS_VER2;
+	ret = newnfs_request(nd, nmp, NULL, &nmp->nm_sockreq, vp, p, cred,
+		NFS_PROG, vers, NULL, 1, NULL, NULL);
+	return (ret);
+}
+
+/*
+ * fill in this bsden's variant of statfs using nfsstatfs.
+ */
+void
+nfscl_loadsbinfo(struct nfsmount *nmp, struct nfsstatfs *sfp, void *statfs)
+{
+	struct statfs *sbp = (struct statfs *)statfs;
+
+	if (nmp->nm_flag & (NFSMNT_NFSV3 | NFSMNT_NFSV4)) {
+		sbp->f_bsize = NFS_FABLKSIZE;
+		sbp->f_blocks = sfp->sf_tbytes / NFS_FABLKSIZE;
+		sbp->f_bfree = sfp->sf_fbytes / NFS_FABLKSIZE;
+		/*
+		 * Although sf_abytes is uint64_t and f_bavail is int64_t,
+		 * the value after dividing by NFS_FABLKSIZE is small
+		 * enough that it will fit in 63bits, so it is ok to
+		 * assign it to f_bavail without fear that it will become
+		 * negative.
+		 */
+		sbp->f_bavail = sfp->sf_abytes / NFS_FABLKSIZE;
+		sbp->f_files = sfp->sf_tfiles;
+		/* Since f_ffree is int64_t, clip it to 63bits. */
+		if (sfp->sf_ffiles > INT64_MAX)
+			sbp->f_ffree = INT64_MAX;
+		else
+			sbp->f_ffree = sfp->sf_ffiles;
+	} else if ((nmp->nm_flag & NFSMNT_NFSV4) == 0) {
+		/*
+		 * The type casts to (int32_t) ensure that this code is
+		 * compatible with the old NFS client, in that it will
+		 * propagate bit31 to the high order bits. This may or may
+		 * not be correct for NFSv2, but since it is a legacy
+		 * environment, I'd rather retain backwards compatibility.
+		 */
+		sbp->f_bsize = (int32_t)sfp->sf_bsize;
+		sbp->f_blocks = (int32_t)sfp->sf_blocks;
+		sbp->f_bfree = (int32_t)sfp->sf_bfree;
+		sbp->f_bavail = (int32_t)sfp->sf_bavail;
+		sbp->f_files = 0;
+		sbp->f_ffree = 0;
+	}
+}
+
+/*
+ * Use the fsinfo stuff to update the mount point.
+ */
+void
+nfscl_loadfsinfo(struct nfsmount *nmp, struct nfsfsinfo *fsp)
+{
+
+	if ((nmp->nm_wsize == 0 || fsp->fs_wtpref < nmp->nm_wsize) &&
+	    fsp->fs_wtpref >= NFS_FABLKSIZE)
+		nmp->nm_wsize = (fsp->fs_wtpref + NFS_FABLKSIZE - 1) &
+		    ~(NFS_FABLKSIZE - 1);
+	if (fsp->fs_wtmax < nmp->nm_wsize && fsp->fs_wtmax > 0) {
+		nmp->nm_wsize = fsp->fs_wtmax & ~(NFS_FABLKSIZE - 1);
+		if (nmp->nm_wsize == 0)
+			nmp->nm_wsize = fsp->fs_wtmax;
+	}
+	if (nmp->nm_wsize < NFS_FABLKSIZE)
+		nmp->nm_wsize = NFS_FABLKSIZE;
+	if ((nmp->nm_rsize == 0 || fsp->fs_rtpref < nmp->nm_rsize) &&
+	    fsp->fs_rtpref >= NFS_FABLKSIZE)
+		nmp->nm_rsize = (fsp->fs_rtpref + NFS_FABLKSIZE - 1) &
+		    ~(NFS_FABLKSIZE - 1);
+	if (fsp->fs_rtmax < nmp->nm_rsize && fsp->fs_rtmax > 0) {
+		nmp->nm_rsize = fsp->fs_rtmax & ~(NFS_FABLKSIZE - 1);
+		if (nmp->nm_rsize == 0)
+			nmp->nm_rsize = fsp->fs_rtmax;
+	}
+	if (nmp->nm_rsize < NFS_FABLKSIZE)
+		nmp->nm_rsize = NFS_FABLKSIZE;
+	if ((nmp->nm_readdirsize == 0 || fsp->fs_dtpref < nmp->nm_readdirsize)
+	    && fsp->fs_dtpref >= NFS_DIRBLKSIZ)
+		nmp->nm_readdirsize = (fsp->fs_dtpref + NFS_DIRBLKSIZ - 1) &
+		    ~(NFS_DIRBLKSIZ - 1);
+	if (fsp->fs_rtmax < nmp->nm_readdirsize && fsp->fs_rtmax > 0) {
+		nmp->nm_readdirsize = fsp->fs_rtmax & ~(NFS_DIRBLKSIZ - 1);
+		if (nmp->nm_readdirsize == 0)
+			nmp->nm_readdirsize = fsp->fs_rtmax;
+	}
+	if (nmp->nm_readdirsize < NFS_DIRBLKSIZ)
+		nmp->nm_readdirsize = NFS_DIRBLKSIZ;
+	if (fsp->fs_maxfilesize > 0 &&
+	    fsp->fs_maxfilesize < nmp->nm_maxfilesize)
+		nmp->nm_maxfilesize = fsp->fs_maxfilesize;
+	nmp->nm_mountp->mnt_stat.f_iosize = newnfs_iosize(nmp);
+	nmp->nm_state |= NFSSTA_GOTFSINFO;
+}
+
+/*
+ * Lookups source address which should be used to communicate with
+ * @nmp and stores it inside @pdst.
+ *
+ * Returns 0 on success.
+ */
+u_int8_t *
+nfscl_getmyip(struct nfsmount *nmp, struct in6_addr *paddr, int *isinet6p)
+{
+#if defined(INET6) || defined(INET)
+	int error, fibnum;
+
+	fibnum = curthread->td_proc->p_fibnum;
+#endif
+#ifdef INET
+	if (nmp->nm_nam->sa_family == AF_INET) {
+		struct sockaddr_in *sin;
+		struct nhop4_extended nh_ext;
+
+		sin = (struct sockaddr_in *)nmp->nm_nam;
+		CURVNET_SET(CRED_TO_VNET(nmp->nm_sockreq.nr_cred));
+		error = fib4_lookup_nh_ext(fibnum, sin->sin_addr, 0, 0,
+		    &nh_ext);
+		CURVNET_RESTORE();
+		if (error != 0)
+			return (NULL);
+
+		if (IN_LOOPBACK(ntohl(nh_ext.nh_src.s_addr))) {
+			/* Ignore loopback addresses */
+			return (NULL);
+		}
+
+		*isinet6p = 0;
+		*((struct in_addr *)paddr) = nh_ext.nh_src;
+
+		return (u_int8_t *)paddr;
+	}
+#endif
+#ifdef INET6
+	if (nmp->nm_nam->sa_family == AF_INET6) {
+		struct sockaddr_in6 *sin6;
+
+		sin6 = (struct sockaddr_in6 *)nmp->nm_nam;
+
+		CURVNET_SET(CRED_TO_VNET(nmp->nm_sockreq.nr_cred));
+		error = in6_selectsrc_addr(fibnum, &sin6->sin6_addr,
+		    sin6->sin6_scope_id, NULL, paddr, NULL);
+		CURVNET_RESTORE();
+		if (error != 0)
+			return (NULL);
+
+		if (IN6_IS_ADDR_LOOPBACK(paddr))
+			return (NULL);
+
+		/* Scope is embedded in */
+		*isinet6p = 1;
+
+		return (u_int8_t *)paddr;
+	}
+#endif
+	return (NULL);
+}
+
+/*
+ * Copy NFS uid, gids from the cred structure.
+ */
+void
+newnfs_copyincred(struct ucred *cr, struct nfscred *nfscr)
+{
+	int i;
+
+	KASSERT(cr->cr_ngroups >= 0,
+	    ("newnfs_copyincred: negative cr_ngroups"));
+	nfscr->nfsc_uid = cr->cr_uid;
+	nfscr->nfsc_ngroups = MIN(cr->cr_ngroups, NFS_MAXGRPS + 1);
+	for (i = 0; i < nfscr->nfsc_ngroups; i++)
+		nfscr->nfsc_groups[i] = cr->cr_groups[i];
+}
+
+
+/*
+ * Do any client specific initialization.
+ */
+void
+nfscl_init(void)
+{
+	static int inited = 0;
+
+	if (inited)
+		return;
+	inited = 1;
+	nfscl_inited = 1;
+	ncl_pbuf_freecnt = nswbuf / 2 + 1;
+}
+
+/*
+ * Check each of the attributes to be set, to ensure they aren't already
+ * the correct value. Disable setting ones already correct.
+ */
+int
+nfscl_checksattr(struct vattr *vap, struct nfsvattr *nvap)
+{
+
+	if (vap->va_mode != (mode_t)VNOVAL) {
+		if (vap->va_mode == nvap->na_mode)
+			vap->va_mode = (mode_t)VNOVAL;
+	}
+	if (vap->va_uid != (uid_t)VNOVAL) {
+		if (vap->va_uid == nvap->na_uid)
+			vap->va_uid = (uid_t)VNOVAL;
+	}
+	if (vap->va_gid != (gid_t)VNOVAL) {
+		if (vap->va_gid == nvap->na_gid)
+			vap->va_gid = (gid_t)VNOVAL;
+	}
+	if (vap->va_size != VNOVAL) {
+		if (vap->va_size == nvap->na_size)
+			vap->va_size = VNOVAL;
+	}
+
+	/*
+	 * We are normally called with only a partially initialized
+	 * VAP.  Since the NFSv3 spec says that server may use the
+	 * file attributes to store the verifier, the spec requires
+	 * us to do a SETATTR RPC. FreeBSD servers store the verifier
+	 * in atime, but we can't really assume that all servers will
+	 * so we ensure that our SETATTR sets both atime and mtime.
+	 * Set the VA_UTIMES_NULL flag for this case, so that
+	 * the server's time will be used.  This is needed to
+	 * work around a bug in some Solaris servers, where
+	 * setting the time TOCLIENT causes the Setattr RPC
+	 * to return NFS_OK, but not set va_mode.
+	 */
+	if (vap->va_mtime.tv_sec == VNOVAL) {
+		vfs_timestamp(&vap->va_mtime);
+		vap->va_vaflags |= VA_UTIMES_NULL;
+	}
+	if (vap->va_atime.tv_sec == VNOVAL)
+		vap->va_atime = vap->va_mtime;
+	return (1);
+}
+
+/*
+ * Map nfsv4 errors to errno.h errors.
+ * The uid and gid arguments are only used for NFSERR_BADOWNER and that
+ * error should only be returned for the Open, Create and Setattr Ops.
+ * As such, most calls can just pass in 0 for those arguments.
+ */
+APPLESTATIC int
+nfscl_maperr(struct thread *td, int error, uid_t uid, gid_t gid)
+{
+	struct proc *p;
+
+	if (error < 10000 || error >= NFSERR_STALEWRITEVERF)
+		return (error);
+	if (td != NULL)
+		p = td->td_proc;
+	else
+		p = NULL;
+	switch (error) {
+	case NFSERR_BADOWNER:
+		tprintf(p, LOG_INFO,
+		    "No name and/or group mapping for uid,gid:(%d,%d)\n",
+		    uid, gid);
+		return (EPERM);
+	case NFSERR_BADNAME:
+	case NFSERR_BADCHAR:
+		printf("nfsv4 char/name not handled by server\n");
+		return (ENOENT);
+	case NFSERR_STALECLIENTID:
+	case NFSERR_STALESTATEID:
+	case NFSERR_EXPIRED:
+	case NFSERR_BADSTATEID:
+	case NFSERR_BADSESSION:
+		printf("nfsv4 recover err returned %d\n", error);
+		return (EIO);
+	case NFSERR_BADHANDLE:
+	case NFSERR_SERVERFAULT:
+	case NFSERR_BADTYPE:
+	case NFSERR_FHEXPIRED:
+	case NFSERR_RESOURCE:
+	case NFSERR_MOVED:
+	case NFSERR_NOFILEHANDLE:
+	case NFSERR_MINORVERMISMATCH:
+	case NFSERR_OLDSTATEID:
+	case NFSERR_BADSEQID:
+	case NFSERR_LEASEMOVED:
+	case NFSERR_RECLAIMBAD:
+	case NFSERR_BADXDR:
+	case NFSERR_OPILLEGAL:
+		printf("nfsv4 client/server protocol prob err=%d\n",
+		    error);
+		return (EIO);
+	default:
+		tprintf(p, LOG_INFO, "nfsv4 err=%d\n", error);
+		return (EIO);
+	};
+}
+
+/*
+ * Check to see if the process for this owner exists. Return 1 if it doesn't
+ * and 0 otherwise.
+ */
+int
+nfscl_procdoesntexist(u_int8_t *own)
+{
+	union {
+		u_int32_t	lval;
+		u_int8_t	cval[4];
+	} tl;
+	struct proc *p;
+	pid_t pid;
+	int i, ret = 0;
+
+	/* For the single open_owner of all 0 bytes, just return 0. */
+	for (i = 0; i < NFSV4CL_LOCKNAMELEN; i++)
+		if (own[i] != 0)
+			break;
+	if (i == NFSV4CL_LOCKNAMELEN)
+		return (0);
+
+	tl.cval[0] = *own++;
+	tl.cval[1] = *own++;
+	tl.cval[2] = *own++;
+	tl.cval[3] = *own++;
+	pid = tl.lval;
+	p = pfind_locked(pid);
+	if (p == NULL)
+		return (1);
+	if (p->p_stats == NULL) {
+		PROC_UNLOCK(p);
+		return (0);
+	}
+	tl.cval[0] = *own++;
+	tl.cval[1] = *own++;
+	tl.cval[2] = *own++;
+	tl.cval[3] = *own++;
+	if (tl.lval != p->p_stats->p_start.tv_sec) {
+		ret = 1;
+	} else {
+		tl.cval[0] = *own++;
+		tl.cval[1] = *own++;
+		tl.cval[2] = *own++;
+		tl.cval[3] = *own;
+		if (tl.lval != p->p_stats->p_start.tv_usec)
+			ret = 1;
+	}
+	PROC_UNLOCK(p);
+	return (ret);
+}
+
+/*
+ * - nfs pseudo system call for the client
+ */
+/*
+ * MPSAFE
+ */
+static int
+nfssvc_nfscl(struct thread *td, struct nfssvc_args *uap)
+{
+	struct file *fp;
+	struct nfscbd_args nfscbdarg;
+	struct nfsd_nfscbd_args nfscbdarg2;
+	struct nameidata nd;
+	struct nfscl_dumpmntopts dumpmntopts;
+	cap_rights_t rights;
+	char *buf;
+	int error;
+	struct mount *mp;
+	struct nfsmount *nmp;
+
+	if (uap->flag & NFSSVC_CBADDSOCK) {
+		error = copyin(uap->argp, (caddr_t)&nfscbdarg, sizeof(nfscbdarg));
+		if (error)
+			return (error);
+		/*
+		 * Since we don't know what rights might be required,
+		 * pretend that we need them all. It is better to be too
+		 * careful than too reckless.
+		 */
+		error = fget(td, nfscbdarg.sock,
+		    cap_rights_init(&rights, CAP_SOCK_CLIENT), &fp);
+		if (error)
+			return (error);
+		if (fp->f_type != DTYPE_SOCKET) {
+			fdrop(fp, td);
+			return (EPERM);
+		}
+		error = nfscbd_addsock(fp);
+		fdrop(fp, td);
+		if (!error && nfscl_enablecallb == 0) {
+			nfsv4_cbport = nfscbdarg.port;
+			nfscl_enablecallb = 1;
+		}
+	} else if (uap->flag & NFSSVC_NFSCBD) {
+		if (uap->argp == NULL) 
+			return (EINVAL);
+		error = copyin(uap->argp, (caddr_t)&nfscbdarg2,
+		    sizeof(nfscbdarg2));
+		if (error)
+			return (error);
+		error = nfscbd_nfsd(td, &nfscbdarg2);
+	} else if (uap->flag & NFSSVC_DUMPMNTOPTS) {
+		error = copyin(uap->argp, &dumpmntopts, sizeof(dumpmntopts));
+		if (error == 0 && (dumpmntopts.ndmnt_blen < 256 ||
+		    dumpmntopts.ndmnt_blen > 1024))
+			error = EINVAL;
+		if (error == 0)
+			error = nfsrv_lookupfilename(&nd,
+			    dumpmntopts.ndmnt_fname, td);
+		if (error == 0 && strcmp(nd.ni_vp->v_mount->mnt_vfc->vfc_name,
+		    "nfs") != 0) {
+			vput(nd.ni_vp);
+			error = EINVAL;
+		}
+		if (error == 0) {
+			buf = malloc(dumpmntopts.ndmnt_blen, M_TEMP, M_WAITOK);
+			nfscl_retopts(VFSTONFS(nd.ni_vp->v_mount), buf,
+			    dumpmntopts.ndmnt_blen);
+			vput(nd.ni_vp);
+			error = copyout(buf, dumpmntopts.ndmnt_buf,
+			    dumpmntopts.ndmnt_blen);
+			free(buf, M_TEMP);
+		}
+	} else if (uap->flag & NFSSVC_FORCEDISM) {
+		buf = malloc(MNAMELEN + 1, M_TEMP, M_WAITOK);
+		error = copyinstr(uap->argp, buf, MNAMELEN + 1, NULL);
+		if (error == 0) {
+			nmp = NULL;
+			mtx_lock(&mountlist_mtx);
+			TAILQ_FOREACH(mp, &mountlist, mnt_list) {
+				if (strcmp(mp->mnt_stat.f_mntonname, buf) ==
+				    0 && strcmp(mp->mnt_stat.f_fstypename,
+				    "nfs") == 0 && mp->mnt_data != NULL) {
+					nmp = VFSTONFS(mp);
+					NFSDDSLOCK();
+					if (nfsv4_findmirror(nmp) != NULL) {
+						NFSDDSUNLOCK();
+						error = ENXIO;
+						nmp = NULL;
+						break;
+					}
+					mtx_lock(&nmp->nm_mtx);
+					if ((nmp->nm_privflag &
+					    NFSMNTP_FORCEDISM) == 0) {
+						nmp->nm_privflag |= 
+						   (NFSMNTP_FORCEDISM |
+						    NFSMNTP_CANCELRPCS);
+						mtx_unlock(&nmp->nm_mtx);
+					} else {
+						mtx_unlock(&nmp->nm_mtx);
+						nmp = NULL;
+					}
+					NFSDDSUNLOCK();
+					break;
+				}
+			}
+			mtx_unlock(&mountlist_mtx);
+
+			if (nmp != NULL) {
+				/*
+				 * Call newnfs_nmcancelreqs() to cause
+				 * any RPCs in progress on the mount point to
+				 * fail.
+				 * This will cause any process waiting for an
+				 * RPC to complete while holding a vnode lock
+				 * on the mounted-on vnode (such as "df" or
+				 * a non-forced "umount") to fail.
+				 * This will unlock the mounted-on vnode so
+				 * a forced dismount can succeed.
+				 * Then clear NFSMNTP_CANCELRPCS and wakeup(),
+				 * so that nfs_unmount() can complete.
+				 */
+				newnfs_nmcancelreqs(nmp);
+				mtx_lock(&nmp->nm_mtx);
+				nmp->nm_privflag &= ~NFSMNTP_CANCELRPCS;
+				wakeup(nmp);
+				mtx_unlock(&nmp->nm_mtx);
+			} else if (error == 0)
+				error = EINVAL;
+		}
+		free(buf, M_TEMP);
+	} else {
+		error = EINVAL;
+	}
+	return (error);
+}
+
+extern int (*nfsd_call_nfscl)(struct thread *, struct nfssvc_args *);
+
+/*
+ * Called once to initialize data structures...
+ */
+static int
+nfscl_modevent(module_t mod, int type, void *data)
+{
+	int error = 0;
+	static int loaded = 0;
+
+	switch (type) {
+	case MOD_LOAD:
+		if (loaded)
+			return (0);
+		newnfs_portinit();
+		mtx_init(&ncl_iod_mutex, "ncl_iod_mutex", NULL, MTX_DEF);
+		nfscl_init();
+		NFSD_LOCK();
+		nfsrvd_cbinit(0);
+		NFSD_UNLOCK();
+		ncl_call_invalcaches = ncl_invalcaches;
+		nfsd_call_nfscl = nfssvc_nfscl;
+		loaded = 1;
+		break;
+
+	case MOD_UNLOAD:
+		if (nfs_numnfscbd != 0) {
+			error = EBUSY;
+			break;
+		}
+
+		/*
+		 * XXX: Unloading of nfscl module is unsupported.
+		 */
+#if 0
+		ncl_call_invalcaches = NULL;
+		nfsd_call_nfscl = NULL;
+		/* and get rid of the mutexes */
+		mtx_destroy(&ncl_iod_mutex);
+		loaded = 0;
+		break;
+#else
+		/* FALLTHROUGH */
+#endif
+	default:
+		error = EOPNOTSUPP;
+		break;
+	}
+	return error;
+}
+static moduledata_t nfscl_mod = {
+	"nfscl",
+	nfscl_modevent,
+	NULL,
+};
+DECLARE_MODULE(nfscl, nfscl_mod, SI_SUB_VFS, SI_ORDER_FIRST);
+
+/* So that loader and kldload(2) can find us, wherever we are.. */
+MODULE_VERSION(nfscl, 1);
+MODULE_DEPEND(nfscl, nfscommon, 1, 1, 1);
+MODULE_DEPEND(nfscl, krpc, 1, 1, 1);
+MODULE_DEPEND(nfscl, nfssvc, 1, 1, 1);
+MODULE_DEPEND(nfscl, nfslock, 1, 1, 1);
+
diff --git a/freebsd/sys/fs/nfsclient/nfs_clrpcops.c b/freebsd/sys/fs/nfsclient/nfs_clrpcops.c
new file mode 100644
index 0000000..8feb2a8
--- /dev/null
+++ b/freebsd/sys/fs/nfsclient/nfs_clrpcops.c
@@ -0,0 +1,7666 @@
+/*-
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 1989, 1993
+ *	The Regents of the University of California.  All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Rick Macklem at The University of Guelph.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+/*
+ * Rpc op calls, generally called from the vnode op calls or through the
+ * buffer cache, for NFS v2, 3 and 4.
+ * These do not normally make any changes to vnode arguments or use
+ * structures that might change between the VFS variants. The returned
+ * arguments are all at the end, after the NFSPROC_T *p one.
+ */
+
+#ifndef APPLEKEXT
+#include "opt_inet6.h"
+
+#include <fs/nfs/nfsport.h>
+#include <sys/sysctl.h>
+#include <sys/taskqueue.h>
+
+SYSCTL_DECL(_vfs_nfs);
+
+static int	nfsignore_eexist = 0;
+SYSCTL_INT(_vfs_nfs, OID_AUTO, ignore_eexist, CTLFLAG_RW,
+    &nfsignore_eexist, 0, "NFS ignore EEXIST replies for mkdir/symlink");
+
+static int	nfscl_dssameconn = 0;
+SYSCTL_INT(_vfs_nfs, OID_AUTO, dssameconn, CTLFLAG_RW,
+    &nfscl_dssameconn, 0, "Use same TCP connection to multiple DSs");
+
+/*
+ * Global variables
+ */
+extern int nfs_numnfscbd;
+extern struct timeval nfsboottime;
+extern u_int32_t newnfs_false, newnfs_true;
+extern nfstype nfsv34_type[9];
+extern int nfsrv_useacl;
+extern char nfsv4_callbackaddr[INET6_ADDRSTRLEN];
+extern int nfscl_debuglevel;
+extern int nfs_pnfsiothreads;
+NFSCLSTATEMUTEX;
+int nfstest_outofseq = 0;
+int nfscl_assumeposixlocks = 1;
+int nfscl_enablecallb = 0;
+short nfsv4_cbport = NFSV4_CBPORT;
+int nfstest_openallsetattr = 0;
+#endif	/* !APPLEKEXT */
+
+#define	DIRHDSIZ	offsetof(struct dirent, d_name)
+
+/*
+ * nfscl_getsameserver() can return one of three values:
+ * NFSDSP_USETHISSESSION - Use this session for the DS.
+ * NFSDSP_SEQTHISSESSION - Use the nfsclds_sequence field of this dsp for new
+ *     session.
+ * NFSDSP_NOTFOUND - No matching server was found.
+ */
+enum nfsclds_state {
+	NFSDSP_USETHISSESSION = 0,
+	NFSDSP_SEQTHISSESSION = 1,
+	NFSDSP_NOTFOUND = 2,
+};
+
+/*
+ * Do a write RPC on a DS data file, using this structure for the arguments,
+ * so that this function can be executed by a separate kernel process.
+ */
+struct nfsclwritedsdorpc {
+	int			done;
+	int			inprog;
+	struct task		tsk;
+	struct vnode		*vp;
+	int			iomode;
+	int			must_commit;
+	nfsv4stateid_t		*stateidp;
+	struct nfsclds		*dsp;
+	uint64_t		off;
+	int			len;
+	struct nfsfh		*fhp;
+	struct mbuf		*m;
+	int			vers;
+	int			minorvers;
+	struct ucred		*cred;
+	NFSPROC_T		*p;
+	int			err;
+};
+
+static int nfsrpc_setattrrpc(vnode_t , struct vattr *, nfsv4stateid_t *,
+    struct ucred *, NFSPROC_T *, struct nfsvattr *, int *, void *);
+static int nfsrpc_readrpc(vnode_t , struct uio *, struct ucred *,
+    nfsv4stateid_t *, NFSPROC_T *, struct nfsvattr *, int *, void *);
+static int nfsrpc_writerpc(vnode_t , struct uio *, int *, int *,
+    struct ucred *, nfsv4stateid_t *, NFSPROC_T *, struct nfsvattr *, int *,
+    void *);
+static int nfsrpc_createv23(vnode_t , char *, int, struct vattr *,
+    nfsquad_t, int, struct ucred *, NFSPROC_T *, struct nfsvattr *,
+    struct nfsvattr *, struct nfsfh **, int *, int *, void *);
+static int nfsrpc_createv4(vnode_t , char *, int, struct vattr *,
+    nfsquad_t, int, struct nfsclowner *, struct nfscldeleg **, struct ucred *,
+    NFSPROC_T *, struct nfsvattr *, struct nfsvattr *, struct nfsfh **, int *,
+    int *, void *, int *);
+static int nfsrpc_locku(struct nfsrv_descript *, struct nfsmount *,
+    struct nfscllockowner *, u_int64_t, u_int64_t,
+    u_int32_t, struct ucred *, NFSPROC_T *, int);
+static int nfsrpc_setaclrpc(vnode_t, struct ucred *, NFSPROC_T *,
+    struct acl *, nfsv4stateid_t *, void *);
+static int nfsrpc_getlayout(struct nfsmount *, vnode_t, struct nfsfh *, int,
+    uint32_t *, nfsv4stateid_t *, uint64_t, struct nfscllayout **,
+    struct ucred *, NFSPROC_T *);
+static int nfsrpc_fillsa(struct nfsmount *, struct sockaddr_in *,
+    struct sockaddr_in6 *, sa_family_t, int, struct nfsclds **, NFSPROC_T *);
+static void nfscl_initsessionslots(struct nfsclsession *);
+static int nfscl_doflayoutio(vnode_t, struct uio *, int *, int *, int *,
+    nfsv4stateid_t *, int, struct nfscldevinfo *, struct nfscllayout *,
+    struct nfsclflayout *, uint64_t, uint64_t, int, struct ucred *,
+    NFSPROC_T *);
+static int nfscl_dofflayoutio(vnode_t, struct uio *, int *, int *, int *,
+    nfsv4stateid_t *, int, struct nfscldevinfo *, struct nfscllayout *,
+    struct nfsclflayout *, uint64_t, uint64_t, int, int, struct mbuf *,
+    struct nfsclwritedsdorpc *, struct ucred *, NFSPROC_T *);
+static struct mbuf *nfsm_copym(struct mbuf *, int, int);
+static int nfsrpc_readds(vnode_t, struct uio *, nfsv4stateid_t *, int *,
+    struct nfsclds *, uint64_t, int, struct nfsfh *, int, int, int,
+    struct ucred *, NFSPROC_T *);
+static int nfsrpc_writeds(vnode_t, struct uio *, int *, int *,
+    nfsv4stateid_t *, struct nfsclds *, uint64_t, int,
+    struct nfsfh *, int, int, int, int, struct ucred *, NFSPROC_T *);
+static int nfsio_writedsmir(vnode_t, int *, int *, nfsv4stateid_t *,
+    struct nfsclds *, uint64_t, int, struct nfsfh *, struct mbuf *, int, int,
+    struct nfsclwritedsdorpc *, struct ucred *, NFSPROC_T *);
+static int nfsrpc_writedsmir(vnode_t, int *, int *, nfsv4stateid_t *,
+    struct nfsclds *, uint64_t, int, struct nfsfh *, struct mbuf *, int, int,
+    struct ucred *, NFSPROC_T *);
+static enum nfsclds_state nfscl_getsameserver(struct nfsmount *,
+    struct nfsclds *, struct nfsclds **, uint32_t *);
+static int nfsio_commitds(vnode_t, uint64_t, int, struct nfsclds *,
+    struct nfsfh *, int, int, struct nfsclwritedsdorpc *, struct ucred *,
+    NFSPROC_T *);
+static int nfsrpc_commitds(vnode_t, uint64_t, int, struct nfsclds *,
+    struct nfsfh *, int, int, struct ucred *, NFSPROC_T *);
+static void nfsrv_setuplayoutget(struct nfsrv_descript *, int, uint64_t,
+    uint64_t, uint64_t, nfsv4stateid_t *, int, int, int);
+static int nfsrv_parseug(struct nfsrv_descript *, int, uid_t *, gid_t *,
+    NFSPROC_T *);
+static int nfsrv_parselayoutget(struct nfsrv_descript *, nfsv4stateid_t *,
+    int *, struct nfsclflayouthead *);
+static int nfsrpc_getopenlayout(struct nfsmount *, vnode_t, u_int8_t *,
+    int, uint8_t *, int, uint32_t, struct nfsclopen *, uint8_t *, int,
+    struct nfscldeleg **, struct ucred *, NFSPROC_T *);
+static int nfsrpc_getcreatelayout(vnode_t, char *, int, struct vattr *,
+    nfsquad_t, int, struct nfsclowner *, struct nfscldeleg **,
+    struct ucred *, NFSPROC_T *, struct nfsvattr *, struct nfsvattr *,
+    struct nfsfh **, int *, int *, void *, int *);
+static int nfsrpc_openlayoutrpc(struct nfsmount *, vnode_t, u_int8_t *,
+    int, uint8_t *, int, uint32_t, struct nfsclopen *, uint8_t *, int,
+    struct nfscldeleg **, nfsv4stateid_t *, int, int, int, int *,
+    struct nfsclflayouthead *, int *, struct ucred *, NFSPROC_T *);
+static int nfsrpc_createlayout(vnode_t, char *, int, struct vattr *,
+    nfsquad_t, int, struct nfsclowner *, struct nfscldeleg **,
+    struct ucred *, NFSPROC_T *, struct nfsvattr *, struct nfsvattr *,
+    struct nfsfh **, int *, int *, void *, int *, nfsv4stateid_t *,
+    int, int, int, int *, struct nfsclflayouthead *, int *);
+static int nfsrpc_layoutget(struct nfsmount *, uint8_t *, int, int, uint64_t,
+    uint64_t, uint64_t, int, int, nfsv4stateid_t *, int *,
+    struct nfsclflayouthead *, struct ucred *, NFSPROC_T *, void *);
+static int nfsrpc_layoutgetres(struct nfsmount *, vnode_t, uint8_t *,
+    int, nfsv4stateid_t *, int, uint32_t *, struct nfscllayout **,
+    struct nfsclflayouthead *, int, int, int *, struct ucred *, NFSPROC_T *);
+
+int nfs_pnfsio(task_fn_t *, void *);
+
+/*
+ * nfs null call from vfs.
+ */
+APPLESTATIC int
+nfsrpc_null(vnode_t vp, struct ucred *cred, NFSPROC_T *p)
+{
+	int error;
+	struct nfsrv_descript nfsd, *nd = &nfsd;
+	
+	NFSCL_REQSTART(nd, NFSPROC_NULL, vp);
+	error = nfscl_request(nd, vp, p, cred, NULL);
+	if (nd->nd_repstat && !error)
+		error = nd->nd_repstat;
+	mbuf_freem(nd->nd_mrep);
+	return (error);
+}
+
+/*
+ * nfs access rpc op.
+ * For nfs version 3 and 4, use the access rpc to check accessibility. If file
+ * modes are changed on the server, accesses might still fail later.
+ */
+APPLESTATIC int
+nfsrpc_access(vnode_t vp, int acmode, struct ucred *cred,
+    NFSPROC_T *p, struct nfsvattr *nap, int *attrflagp)
+{
+	int error;
+	u_int32_t mode, rmode;
+
+	if (acmode & VREAD)
+		mode = NFSACCESS_READ;
+	else
+		mode = 0;
+	if (vnode_vtype(vp) == VDIR) {
+		if (acmode & VWRITE)
+			mode |= (NFSACCESS_MODIFY | NFSACCESS_EXTEND |
+				 NFSACCESS_DELETE);
+		if (acmode & VEXEC)
+			mode |= NFSACCESS_LOOKUP;
+	} else {
+		if (acmode & VWRITE)
+			mode |= (NFSACCESS_MODIFY | NFSACCESS_EXTEND);
+		if (acmode & VEXEC)
+			mode |= NFSACCESS_EXECUTE;
+	}
+
+	/*
+	 * Now, just call nfsrpc_accessrpc() to do the actual RPC.
+	 */
+	error = nfsrpc_accessrpc(vp, mode, cred, p, nap, attrflagp, &rmode,
+	    NULL);
+
+	/*
+	 * The NFS V3 spec does not clarify whether or not
+	 * the returned access bits can be a superset of
+	 * the ones requested, so...
+	 */
+	if (!error && (rmode & mode) != mode)
+		error = EACCES;
+	return (error);
+}
+
+/*
+ * The actual rpc, separated out for Darwin.
+ */
+APPLESTATIC int
+nfsrpc_accessrpc(vnode_t vp, u_int32_t mode, struct ucred *cred,
+    NFSPROC_T *p, struct nfsvattr *nap, int *attrflagp, u_int32_t *rmodep,
+    void *stuff)
+{
+	u_int32_t *tl;
+	u_int32_t supported, rmode;
+	int error;
+	struct nfsrv_descript nfsd, *nd = &nfsd;
+	nfsattrbit_t attrbits;
+
+	*attrflagp = 0;
+	supported = mode;
+	NFSCL_REQSTART(nd, NFSPROC_ACCESS, vp);
+	NFSM_BUILD(tl, u_int32_t *, NFSX_UNSIGNED);
+	*tl = txdr_unsigned(mode);
+	if (nd->nd_flag & ND_NFSV4) {
+		/*
+		 * And do a Getattr op.
+		 */
+		NFSM_BUILD(tl, u_int32_t *, NFSX_UNSIGNED);
+		*tl = txdr_unsigned(NFSV4OP_GETATTR);
+		NFSGETATTR_ATTRBIT(&attrbits);
+		(void) nfsrv_putattrbit(nd, &attrbits);
+	}
+	error = nfscl_request(nd, vp, p, cred, stuff);
+	if (error)
+		return (error);
+	if (nd->nd_flag & ND_NFSV3) {
+		error = nfscl_postop_attr(nd, nap, attrflagp, stuff);
+		if (error)
+			goto nfsmout;
+	}
+	if (!nd->nd_repstat) {
+		if (nd->nd_flag & ND_NFSV4) {
+			NFSM_DISSECT(tl, u_int32_t *, 2 * NFSX_UNSIGNED);
+			supported = fxdr_unsigned(u_int32_t, *tl++);
+		} else {
+			NFSM_DISSECT(tl, u_int32_t *, NFSX_UNSIGNED);
+		}
+		rmode = fxdr_unsigned(u_int32_t, *tl);
+		if (nd->nd_flag & ND_NFSV4)
+			error = nfscl_postop_attr(nd, nap, attrflagp, stuff);
+
+		/*
+		 * It's not obvious what should be done about
+		 * unsupported access modes. For now, be paranoid
+		 * and clear the unsupported ones.
+		 */
+		rmode &= supported;
+		*rmodep = rmode;
+	} else
+		error = nd->nd_repstat;
+nfsmout:
+	mbuf_freem(nd->nd_mrep);
+	return (error);
+}
+
+/*
+ * nfs open rpc
+ */
+APPLESTATIC int
+nfsrpc_open(vnode_t vp, int amode, struct ucred *cred, NFSPROC_T *p)
+{
+	struct nfsclopen *op;
+	struct nfscldeleg *dp;
+	struct nfsfh *nfhp;
+	struct nfsnode *np = VTONFS(vp);
+	struct nfsmount *nmp = VFSTONFS(vnode_mount(vp));
+	u_int32_t mode, clidrev;
+	int ret, newone, error, expireret = 0, retrycnt;
+
+	/*
+	 * For NFSv4, Open Ops are only done on Regular Files.
+	 */
+	if (vnode_vtype(vp) != VREG)
+		return (0);
+	mode = 0;
+	if (amode & FREAD)
+		mode |= NFSV4OPEN_ACCESSREAD;
+	if (amode & FWRITE)
+		mode |= NFSV4OPEN_ACCESSWRITE;
+	nfhp = np->n_fhp;
+
+	retrycnt = 0;
+#ifdef notdef
+{ char name[100]; int namel;
+namel = (np->n_v4->n4_namelen < 100) ? np->n_v4->n4_namelen : 99;
+bcopy(NFS4NODENAME(np->n_v4), name, namel);
+name[namel] = '\0';
+printf("rpcopen p=0x%x name=%s",p->p_pid,name);
+if (nfhp->nfh_len > 0) printf(" fh=0x%x\n",nfhp->nfh_fh[12]);
+else printf(" fhl=0\n");
+}
+#endif
+	do {
+	    dp = NULL;
+	    error = nfscl_open(vp, nfhp->nfh_fh, nfhp->nfh_len, mode, 1,
+		cred, p, NULL, &op, &newone, &ret, 1);
+	    if (error) {
+		return (error);
+	    }
+	    if (nmp->nm_clp != NULL)
+		clidrev = nmp->nm_clp->nfsc_clientidrev;
+	    else
+		clidrev = 0;
+	    if (ret == NFSCLOPEN_DOOPEN) {
+		if (np->n_v4 != NULL) {
+			/*
+			 * For the first attempt, try and get a layout, if
+			 * pNFS is enabled for the mount.
+			 */
+			if (!NFSHASPNFS(nmp) || nfscl_enablecallb == 0 ||
+			    nfs_numnfscbd == 0 ||
+			    (np->n_flag & NNOLAYOUT) != 0 || retrycnt > 0)
+				error = nfsrpc_openrpc(nmp, vp,
+				    np->n_v4->n4_data,
+				    np->n_v4->n4_fhlen, np->n_fhp->nfh_fh,
+				    np->n_fhp->nfh_len, mode, op,
+				    NFS4NODENAME(np->n_v4),
+				    np->n_v4->n4_namelen,
+				    &dp, 0, 0x0, cred, p, 0, 0);
+			else
+				error = nfsrpc_getopenlayout(nmp, vp,
+				    np->n_v4->n4_data,
+				    np->n_v4->n4_fhlen, np->n_fhp->nfh_fh,
+				    np->n_fhp->nfh_len, mode, op,
+				    NFS4NODENAME(np->n_v4),
+				    np->n_v4->n4_namelen, &dp, cred, p);
+			if (dp != NULL) {
+#ifdef APPLE
+				OSBitAndAtomic((int32_t)~NDELEGMOD, (UInt32 *)&np->n_flag);
+#else
+				NFSLOCKNODE(np);
+				np->n_flag &= ~NDELEGMOD;
+				/*
+				 * Invalidate the attribute cache, so that
+				 * attributes that pre-date the issue of a
+				 * delegation are not cached, since the
+				 * cached attributes will remain valid while
+				 * the delegation is held.
+				 */
+				NFSINVALATTRCACHE(np);
+				NFSUNLOCKNODE(np);
+#endif
+				(void) nfscl_deleg(nmp->nm_mountp,
+				    op->nfso_own->nfsow_clp,
+				    nfhp->nfh_fh, nfhp->nfh_len, cred, p, &dp);
+			}
+		} else {
+			error = EIO;
+		}
+		newnfs_copyincred(cred, &op->nfso_cred);
+	    } else if (ret == NFSCLOPEN_SETCRED)
+		/*
+		 * This is a new local open on a delegation. It needs
+		 * to have credentials so that an open can be done
+		 * against the server during recovery.
+		 */
+		newnfs_copyincred(cred, &op->nfso_cred);
+
+	    /*
+	     * nfso_opencnt is the count of how many VOP_OPEN()s have
+	     * been done on this Open successfully and a VOP_CLOSE()
+	     * is expected for each of these.
+	     * If error is non-zero, don't increment it, since the Open
+	     * hasn't succeeded yet.
+	     */
+	    if (!error)
+		op->nfso_opencnt++;
+	    nfscl_openrelease(nmp, op, error, newone);
+	    if (error == NFSERR_GRACE || error == NFSERR_STALECLIENTID ||
+		error == NFSERR_STALEDONTRECOVER || error == NFSERR_DELAY ||
+		error == NFSERR_BADSESSION) {
+		(void) nfs_catnap(PZERO, error, "nfs_open");
+	    } else if ((error == NFSERR_EXPIRED || error == NFSERR_BADSTATEID)
+		&& clidrev != 0) {
+		expireret = nfscl_hasexpired(nmp->nm_clp, clidrev, p);
+		retrycnt++;
+	    }
+	} while (error == NFSERR_GRACE || error == NFSERR_STALECLIENTID ||
+	    error == NFSERR_STALEDONTRECOVER || error == NFSERR_DELAY ||
+	    error == NFSERR_BADSESSION ||
+	    ((error == NFSERR_EXPIRED || error == NFSERR_BADSTATEID) &&
+	     expireret == 0 && clidrev != 0 && retrycnt < 4));
+	if (error && retrycnt >= 4)
+		error = EIO;
+	return (error);
+}
+
+/*
+ * the actual open rpc
+ */
+APPLESTATIC int
+nfsrpc_openrpc(struct nfsmount *nmp, vnode_t vp, u_int8_t *nfhp, int fhlen,
+    u_int8_t *newfhp, int newfhlen, u_int32_t mode, struct nfsclopen *op,
+    u_int8_t *name, int namelen, struct nfscldeleg **dpp,
+    int reclaim, u_int32_t delegtype, struct ucred *cred, NFSPROC_T *p,
+    int syscred, int recursed)
+{
+	u_int32_t *tl;
+	struct nfsrv_descript nfsd, *nd = &nfsd;
+	struct nfscldeleg *dp, *ndp = NULL;
+	struct nfsvattr nfsva;
+	u_int32_t rflags, deleg;
+	nfsattrbit_t attrbits;
+	int error, ret, acesize, limitby;
+	struct nfsclsession *tsep;
+
+	dp = *dpp;
+	*dpp = NULL;
+	nfscl_reqstart(nd, NFSPROC_OPEN, nmp, nfhp, fhlen, NULL, NULL, 0, 0);
+	NFSM_BUILD(tl, u_int32_t *, 5 * NFSX_UNSIGNED);
+	*tl++ = txdr_unsigned(op->nfso_own->nfsow_seqid);
+	*tl++ = txdr_unsigned(mode & NFSV4OPEN_ACCESSBOTH);
+	*tl++ = txdr_unsigned((mode >> NFSLCK_SHIFT) & NFSV4OPEN_DENYBOTH);
+	tsep = nfsmnt_mdssession(nmp);
+	*tl++ = tsep->nfsess_clientid.lval[0];
+	*tl = tsep->nfsess_clientid.lval[1];
+	(void) nfsm_strtom(nd, op->nfso_own->nfsow_owner, NFSV4CL_LOCKNAMELEN);
+	NFSM_BUILD(tl, u_int32_t *, 2 * NFSX_UNSIGNED);
+	*tl++ = txdr_unsigned(NFSV4OPEN_NOCREATE);
+	if (reclaim) {
+		*tl = txdr_unsigned(NFSV4OPEN_CLAIMPREVIOUS);
+		NFSM_BUILD(tl, u_int32_t *, NFSX_UNSIGNED);
+		*tl = txdr_unsigned(delegtype);
+	} else {
+		if (dp != NULL) {
+			*tl = txdr_unsigned(NFSV4OPEN_CLAIMDELEGATECUR);
+			NFSM_BUILD(tl, u_int32_t *, NFSX_STATEID);
+			if (NFSHASNFSV4N(nmp))
+				*tl++ = 0;
+			else
+				*tl++ = dp->nfsdl_stateid.seqid;
+			*tl++ = dp->nfsdl_stateid.other[0];
+			*tl++ = dp->nfsdl_stateid.other[1];
+			*tl = dp->nfsdl_stateid.other[2];
+		} else {
+			*tl = txdr_unsigned(NFSV4OPEN_CLAIMNULL);
+		}
+		(void) nfsm_strtom(nd, name, namelen);
+	}
+	NFSM_BUILD(tl, u_int32_t *, NFSX_UNSIGNED);
+	*tl = txdr_unsigned(NFSV4OP_GETATTR);
+	NFSZERO_ATTRBIT(&attrbits);
+	NFSSETBIT_ATTRBIT(&attrbits, NFSATTRBIT_CHANGE);
+	NFSSETBIT_ATTRBIT(&attrbits, NFSATTRBIT_TIMEMODIFY);
+	(void) nfsrv_putattrbit(nd, &attrbits);
+	if (syscred)
+		nd->nd_flag |= ND_USEGSSNAME;
+	error = newnfs_request(nd, nmp, NULL, &nmp->nm_sockreq, vp, p, cred,
+	    NFS_PROG, NFS_VER4, NULL, 1, NULL, NULL);
+	if (error)
+		return (error);
+	NFSCL_INCRSEQID(op->nfso_own->nfsow_seqid, nd);
+	if (!nd->nd_repstat) {
+		NFSM_DISSECT(tl, u_int32_t *, NFSX_STATEID +
+		    6 * NFSX_UNSIGNED);
+		op->nfso_stateid.seqid = *tl++;
+		op->nfso_stateid.other[0] = *tl++;
+		op->nfso_stateid.other[1] = *tl++;
+		op->nfso_stateid.other[2] = *tl;
+		rflags = fxdr_unsigned(u_int32_t, *(tl + 6));
+		error = nfsrv_getattrbits(nd, &attrbits, NULL, NULL);
+		if (error)
+			goto nfsmout;
+		NFSM_DISSECT(tl, u_int32_t *, NFSX_UNSIGNED);
+		deleg = fxdr_unsigned(u_int32_t, *tl);
+		if (deleg == NFSV4OPEN_DELEGATEREAD ||
+		    deleg == NFSV4OPEN_DELEGATEWRITE) {
+			if (!(op->nfso_own->nfsow_clp->nfsc_flags &
+			      NFSCLFLAGS_FIRSTDELEG))
+				op->nfso_own->nfsow_clp->nfsc_flags |=
+				  (NFSCLFLAGS_FIRSTDELEG | NFSCLFLAGS_GOTDELEG);
+			ndp = malloc(
+			    sizeof (struct nfscldeleg) + newfhlen,
+			    M_NFSCLDELEG, M_WAITOK);
+			LIST_INIT(&ndp->nfsdl_owner);
+			LIST_INIT(&ndp->nfsdl_lock);
+			ndp->nfsdl_clp = op->nfso_own->nfsow_clp;
+			ndp->nfsdl_fhlen = newfhlen;
+			NFSBCOPY(newfhp, ndp->nfsdl_fh, newfhlen);
+			newnfs_copyincred(cred, &ndp->nfsdl_cred);
+			nfscl_lockinit(&ndp->nfsdl_rwlock);
+			NFSM_DISSECT(tl, u_int32_t *, NFSX_STATEID +
+			    NFSX_UNSIGNED);
+			ndp->nfsdl_stateid.seqid = *tl++;
+			ndp->nfsdl_stateid.other[0] = *tl++;
+			ndp->nfsdl_stateid.other[1] = *tl++;
+			ndp->nfsdl_stateid.other[2] = *tl++;
+			ret = fxdr_unsigned(int, *tl);
+			if (deleg == NFSV4OPEN_DELEGATEWRITE) {
+				ndp->nfsdl_flags = NFSCLDL_WRITE;
+				/*
+				 * Indicates how much the file can grow.
+				 */
+				NFSM_DISSECT(tl, u_int32_t *,
+				    3 * NFSX_UNSIGNED);
+				limitby = fxdr_unsigned(int, *tl++);
+				switch (limitby) {
+				case NFSV4OPEN_LIMITSIZE:
+					ndp->nfsdl_sizelimit = fxdr_hyper(tl);
+					break;
+				case NFSV4OPEN_LIMITBLOCKS:
+					ndp->nfsdl_sizelimit =
+					    fxdr_unsigned(u_int64_t, *tl++);
+					ndp->nfsdl_sizelimit *=
+					    fxdr_unsigned(u_int64_t, *tl);
+					break;
+				default:
+					error = NFSERR_BADXDR;
+					goto nfsmout;
+				}
+			} else {
+				ndp->nfsdl_flags = NFSCLDL_READ;
+			}
+			if (ret)
+				ndp->nfsdl_flags |= NFSCLDL_RECALL;
+			error = nfsrv_dissectace(nd, &ndp->nfsdl_ace, &ret,
+			    &acesize, p);
+			if (error)
+				goto nfsmout;
+		} else if (deleg != NFSV4OPEN_DELEGATENONE) {
+			error = NFSERR_BADXDR;
+			goto nfsmout;
+		}
+		NFSM_DISSECT(tl, u_int32_t *, 2 * NFSX_UNSIGNED);
+		error = nfsv4_loadattr(nd, NULL, &nfsva, NULL,
+		    NULL, 0, NULL, NULL, NULL, NULL, NULL, 0,
+		    NULL, NULL, NULL, p, cred);
+		if (error)
+			goto nfsmout;
+		if (ndp != NULL) {
+			ndp->nfsdl_change = nfsva.na_filerev;
+			ndp->nfsdl_modtime = nfsva.na_mtime;
+			ndp->nfsdl_flags |= NFSCLDL_MODTIMESET;
+		}
+		if (!reclaim && (rflags & NFSV4OPEN_RESULTCONFIRM)) {
+		    do {
+			ret = nfsrpc_openconfirm(vp, newfhp, newfhlen, op,
+			    cred, p);
+			if (ret == NFSERR_DELAY)
+			    (void) nfs_catnap(PZERO, ret, "nfs_open");
+		    } while (ret == NFSERR_DELAY);
+		    error = ret;
+		}
+		if ((rflags & NFSV4OPEN_LOCKTYPEPOSIX) ||
+		    nfscl_assumeposixlocks)
+		    op->nfso_posixlock = 1;
+		else
+		    op->nfso_posixlock = 0;
+
+		/*
+		 * If the server is handing out delegations, but we didn't
+		 * get one because an OpenConfirm was required, try the
+		 * Open again, to get a delegation. This is a harmless no-op,
+		 * from a server's point of view.
+		 */
+		if (!reclaim && (rflags & NFSV4OPEN_RESULTCONFIRM) &&
+		    (op->nfso_own->nfsow_clp->nfsc_flags & NFSCLFLAGS_GOTDELEG)
+		    && !error && dp == NULL && ndp == NULL && !recursed) {
+		    do {
+			ret = nfsrpc_openrpc(nmp, vp, nfhp, fhlen, newfhp,
+			    newfhlen, mode, op, name, namelen, &ndp, 0, 0x0,
+			    cred, p, syscred, 1);
+			if (ret == NFSERR_DELAY)
+			    (void) nfs_catnap(PZERO, ret, "nfs_open2");
+		    } while (ret == NFSERR_DELAY);
+		    if (ret) {
+			if (ndp != NULL) {
+				free(ndp, M_NFSCLDELEG);
+				ndp = NULL;
+			}
+			if (ret == NFSERR_STALECLIENTID ||
+			    ret == NFSERR_STALEDONTRECOVER ||
+			    ret == NFSERR_BADSESSION)
+				error = ret;
+		    }
+		}
+	}
+	if (nd->nd_repstat != 0 && error == 0)
+		error = nd->nd_repstat;
+	if (error == NFSERR_STALECLIENTID)
+		nfscl_initiate_recovery(op->nfso_own->nfsow_clp);
+nfsmout:
+	if (!error)
+		*dpp = ndp;
+	else if (ndp != NULL)
+		free(ndp, M_NFSCLDELEG);
+	mbuf_freem(nd->nd_mrep);
+	return (error);
+}
+
+/*
+ * open downgrade rpc
+ */
+APPLESTATIC int
+nfsrpc_opendowngrade(vnode_t vp, u_int32_t mode, struct nfsclopen *op,
+    struct ucred *cred, NFSPROC_T *p)
+{
+	u_int32_t *tl;
+	struct nfsrv_descript nfsd, *nd = &nfsd;
+	int error;
+
+	NFSCL_REQSTART(nd, NFSPROC_OPENDOWNGRADE, vp);
+	NFSM_BUILD(tl, u_int32_t *, NFSX_STATEID + 3 * NFSX_UNSIGNED);
+	if (NFSHASNFSV4N(VFSTONFS(vnode_mount(vp))))
+		*tl++ = 0;
+	else
+		*tl++ = op->nfso_stateid.seqid;
+	*tl++ = op->nfso_stateid.other[0];
+	*tl++ = op->nfso_stateid.other[1];
+	*tl++ = op->nfso_stateid.other[2];
+	*tl++ = txdr_unsigned(op->nfso_own->nfsow_seqid);
+	*tl++ = txdr_unsigned(mode & NFSV4OPEN_ACCESSBOTH);
+	*tl = txdr_unsigned((mode >> NFSLCK_SHIFT) & NFSV4OPEN_DENYBOTH);
+	error = nfscl_request(nd, vp, p, cred, NULL);
+	if (error)
+		return (error);
+	NFSCL_INCRSEQID(op->nfso_own->nfsow_seqid, nd);
+	if (!nd->nd_repstat) {
+		NFSM_DISSECT(tl, u_int32_t *, NFSX_STATEID);
+		op->nfso_stateid.seqid = *tl++;
+		op->nfso_stateid.other[0] = *tl++;
+		op->nfso_stateid.other[1] = *tl++;
+		op->nfso_stateid.other[2] = *tl;
+	}
+	if (nd->nd_repstat && error == 0)
+		error = nd->nd_repstat;
+	if (error == NFSERR_STALESTATEID)
+		nfscl_initiate_recovery(op->nfso_own->nfsow_clp);
+nfsmout:
+	mbuf_freem(nd->nd_mrep);
+	return (error);
+}
+
+/*
+ * V4 Close operation.
+ */
+APPLESTATIC int
+nfsrpc_close(vnode_t vp, int doclose, NFSPROC_T *p)
+{
+	struct nfsclclient *clp;
+	int error;
+
+	if (vnode_vtype(vp) != VREG)
+		return (0);
+	if (doclose)
+		error = nfscl_doclose(vp, &clp, p);
+	else
+		error = nfscl_getclose(vp, &clp);
+	if (error)
+		return (error);
+
+	nfscl_clientrelease(clp);
+	return (0);
+}
+
+/*
+ * Close the open.
+ */
+APPLESTATIC void
+nfsrpc_doclose(struct nfsmount *nmp, struct nfsclopen *op, NFSPROC_T *p)
+{
+	struct nfsrv_descript nfsd, *nd = &nfsd;
+	struct nfscllockowner *lp, *nlp;
+	struct nfscllock *lop, *nlop;
+	struct ucred *tcred;
+	u_int64_t off = 0, len = 0;
+	u_int32_t type = NFSV4LOCKT_READ;
+	int error, do_unlock, trycnt;
+
+	tcred = newnfs_getcred();
+	newnfs_copycred(&op->nfso_cred, tcred);
+	/*
+	 * (Theoretically this could be done in the same
+	 *  compound as the close, but having multiple
+	 *  sequenced Ops in the same compound might be
+	 *  too scary for some servers.)
+	 */
+	if (op->nfso_posixlock) {
+		off = 0;
+		len = NFS64BITSSET;
+		type = NFSV4LOCKT_READ;
+	}
+
+	/*
+	 * Since this function is only called from VOP_INACTIVE(), no
+	 * other thread will be manipulating this Open. As such, the
+	 * lock lists are not being changed by other threads, so it should
+	 * be safe to do this without locking.
+	 */
+	LIST_FOREACH(lp, &op->nfso_lock, nfsl_list) {
+		do_unlock = 1;
+		LIST_FOREACH_SAFE(lop, &lp->nfsl_lock, nfslo_list, nlop) {
+			if (op->nfso_posixlock == 0) {
+				off = lop->nfslo_first;
+				len = lop->nfslo_end - lop->nfslo_first;
+				if (lop->nfslo_type == F_WRLCK)
+					type = NFSV4LOCKT_WRITE;
+				else
+					type = NFSV4LOCKT_READ;
+			}
+			if (do_unlock) {
+				trycnt = 0;
+				do {
+					error = nfsrpc_locku(nd, nmp, lp, off,
+					    len, type, tcred, p, 0);
+					if ((nd->nd_repstat == NFSERR_GRACE ||
+					    nd->nd_repstat == NFSERR_DELAY) &&
+					    error == 0)
+						(void) nfs_catnap(PZERO,
+						    (int)nd->nd_repstat,
+						    "nfs_close");
+				} while ((nd->nd_repstat == NFSERR_GRACE ||
+				    nd->nd_repstat == NFSERR_DELAY) &&
+				    error == 0 && trycnt++ < 5);
+				if (op->nfso_posixlock)
+					do_unlock = 0;
+			}
+			nfscl_freelock(lop, 0);
+		}
+		/*
+		 * Do a ReleaseLockOwner.
+		 * The lock owner name nfsl_owner may be used by other opens for
+		 * other files but the lock_owner4 name that nfsrpc_rellockown()
+		 * puts on the wire has the file handle for this file appended
+		 * to it, so it can be done now.
+		 */
+		(void)nfsrpc_rellockown(nmp, lp, lp->nfsl_open->nfso_fh,
+		    lp->nfsl_open->nfso_fhlen, tcred, p);
+	}
+
+	/*
+	 * There could be other Opens for different files on the same
+	 * OpenOwner, so locking is required.
+	 */
+	NFSLOCKCLSTATE();
+	nfscl_lockexcl(&op->nfso_own->nfsow_rwlock, NFSCLSTATEMUTEXPTR);
+	NFSUNLOCKCLSTATE();
+	do {
+		error = nfscl_tryclose(op, tcred, nmp, p);
+		if (error == NFSERR_GRACE)
+			(void) nfs_catnap(PZERO, error, "nfs_close");
+	} while (error == NFSERR_GRACE);
+	NFSLOCKCLSTATE();
+	nfscl_lockunlock(&op->nfso_own->nfsow_rwlock);
+
+	LIST_FOREACH_SAFE(lp, &op->nfso_lock, nfsl_list, nlp)
+		nfscl_freelockowner(lp, 0);
+	nfscl_freeopen(op, 0);
+	NFSUNLOCKCLSTATE();
+	NFSFREECRED(tcred);
+}
+
+/*
+ * The actual Close RPC.
+ */
+APPLESTATIC int
+nfsrpc_closerpc(struct nfsrv_descript *nd, struct nfsmount *nmp,
+    struct nfsclopen *op, struct ucred *cred, NFSPROC_T *p,
+    int syscred)
+{
+	u_int32_t *tl;
+	int error;
+
+	nfscl_reqstart(nd, NFSPROC_CLOSE, nmp, op->nfso_fh,
+	    op->nfso_fhlen, NULL, NULL, 0, 0);
+	NFSM_BUILD(tl, u_int32_t *, NFSX_UNSIGNED + NFSX_STATEID);
+	*tl++ = txdr_unsigned(op->nfso_own->nfsow_seqid);
+	if (NFSHASNFSV4N(nmp))
+		*tl++ = 0;
+	else
+		*tl++ = op->nfso_stateid.seqid;
+	*tl++ = op->nfso_stateid.other[0];
+	*tl++ = op->nfso_stateid.other[1];
+	*tl = op->nfso_stateid.other[2];
+	if (syscred)
+		nd->nd_flag |= ND_USEGSSNAME;
+	error = newnfs_request(nd, nmp, NULL, &nmp->nm_sockreq, NULL, p, cred,
+	    NFS_PROG, NFS_VER4, NULL, 1, NULL, NULL);
+	if (error)
+		return (error);
+	NFSCL_INCRSEQID(op->nfso_own->nfsow_seqid, nd);
+	if (nd->nd_repstat == 0)
+		NFSM_DISSECT(tl, u_int32_t *, NFSX_STATEID);
+	error = nd->nd_repstat;
+	if (error == NFSERR_STALESTATEID)
+		nfscl_initiate_recovery(op->nfso_own->nfsow_clp);
+nfsmout:
+	mbuf_freem(nd->nd_mrep);
+	return (error);
+}
+
+/*
+ * V4 Open Confirm RPC.
+ */
+APPLESTATIC int
+nfsrpc_openconfirm(vnode_t vp, u_int8_t *nfhp, int fhlen,
+    struct nfsclopen *op, struct ucred *cred, NFSPROC_T *p)
+{
+	u_int32_t *tl;
+	struct nfsrv_descript nfsd, *nd = &nfsd;
+	struct nfsmount *nmp;
+	int error;
+
+	nmp = VFSTONFS(vnode_mount(vp));
+	if (NFSHASNFSV4N(nmp))
+		return (0);		/* No confirmation for NFSv4.1. */
+	nfscl_reqstart(nd, NFSPROC_OPENCONFIRM, nmp, nfhp, fhlen, NULL, NULL,
+	    0, 0);
+	NFSM_BUILD(tl, u_int32_t *, NFSX_UNSIGNED + NFSX_STATEID);
+	*tl++ = op->nfso_stateid.seqid;
+	*tl++ = op->nfso_stateid.other[0];
+	*tl++ = op->nfso_stateid.other[1];
+	*tl++ = op->nfso_stateid.other[2];
+	*tl = txdr_unsigned(op->nfso_own->nfsow_seqid);
+	error = nfscl_request(nd, vp, p, cred, NULL);
+	if (error)
+		return (error);
+	NFSCL_INCRSEQID(op->nfso_own->nfsow_seqid, nd);
+	if (!nd->nd_repstat) {
+		NFSM_DISSECT(tl, u_int32_t *, NFSX_STATEID);
+		op->nfso_stateid.seqid = *tl++;
+		op->nfso_stateid.other[0] = *tl++;
+		op->nfso_stateid.other[1] = *tl++;
+		op->nfso_stateid.other[2] = *tl;
+	}
+	error = nd->nd_repstat;
+	if (error == NFSERR_STALESTATEID)
+		nfscl_initiate_recovery(op->nfso_own->nfsow_clp);
+nfsmout:
+	mbuf_freem(nd->nd_mrep);
+	return (error);
+}
+
+/*
+ * Do the setclientid and setclientid confirm RPCs. Called from nfs_statfs()
+ * when a mount has just occurred and when the server replies NFSERR_EXPIRED.
+ */
+APPLESTATIC int
+nfsrpc_setclient(struct nfsmount *nmp, struct nfsclclient *clp, int reclaim,
+    struct ucred *cred, NFSPROC_T *p)
+{
+	u_int32_t *tl;
+	struct nfsrv_descript nfsd;
+	struct nfsrv_descript *nd = &nfsd;
+	nfsattrbit_t attrbits;
+	u_int8_t *cp = NULL, *cp2, addr[INET6_ADDRSTRLEN + 9];
+	u_short port;
+	int error, isinet6 = 0, callblen;
+	nfsquad_t confirm;
+	u_int32_t lease;
+	static u_int32_t rev = 0;
+	struct nfsclds *dsp;
+	struct in6_addr a6;
+	struct nfsclsession *tsep;
+
+	if (nfsboottime.tv_sec == 0)
+		NFSSETBOOTTIME(nfsboottime);
+	clp->nfsc_rev = rev++;
+	if (NFSHASNFSV4N(nmp)) {
+		/*
+		 * Either there was no previous session or the
+		 * previous session has failed, so...
+		 * do an ExchangeID followed by the CreateSession.
+		 */
+		error = nfsrpc_exchangeid(nmp, clp, &nmp->nm_sockreq,
+		    NFSV4EXCH_USEPNFSMDS | NFSV4EXCH_USENONPNFS, &dsp, cred, p);
+		NFSCL_DEBUG(1, "aft exch=%d\n", error);
+		if (error == 0)
+			error = nfsrpc_createsession(nmp, &dsp->nfsclds_sess,
+			    &nmp->nm_sockreq,
+			    dsp->nfsclds_sess.nfsess_sequenceid, 1, cred, p);
+		if (error == 0) {
+			NFSLOCKMNT(nmp);
+			/*
+			 * The old sessions cannot be safely free'd
+			 * here, since they may still be used by
+			 * in-progress RPCs.
+			 */
+			tsep = NULL;
+			if (TAILQ_FIRST(&nmp->nm_sess) != NULL)
+				tsep = NFSMNT_MDSSESSION(nmp);
+			TAILQ_INSERT_HEAD(&nmp->nm_sess, dsp,
+			    nfsclds_list);
+			/*
+			 * Wake up RPCs waiting for a slot on the
+			 * old session. These will then fail with
+			 * NFSERR_BADSESSION and be retried with the
+			 * new session by nfsv4_setsequence().
+			 * Also wakeup() processes waiting for the
+			 * new session.
+			 */
+			if (tsep != NULL)
+				wakeup(&tsep->nfsess_slots);
+			wakeup(&nmp->nm_sess);
+			NFSUNLOCKMNT(nmp);
+		} else
+			nfscl_freenfsclds(dsp);
+		NFSCL_DEBUG(1, "aft createsess=%d\n", error);
+		if (error == 0 && reclaim == 0) {
+			error = nfsrpc_reclaimcomplete(nmp, cred, p);
+			NFSCL_DEBUG(1, "aft reclaimcomp=%d\n", error);
+			if (error == NFSERR_COMPLETEALREADY ||
+			    error == NFSERR_NOTSUPP)
+				/* Ignore this error. */
+				error = 0;
+		}
+		return (error);
+	}
+
+	/*
+	 * Allocate a single session structure for NFSv4.0, because some of
+	 * the fields are used by NFSv4.0 although it doesn't do a session.
+	 */
+	dsp = malloc(sizeof(struct nfsclds), M_NFSCLDS, M_WAITOK | M_ZERO);
+	mtx_init(&dsp->nfsclds_mtx, "nfsds", NULL, MTX_DEF);
+	mtx_init(&dsp->nfsclds_sess.nfsess_mtx, "nfssession", NULL, MTX_DEF);
+	NFSLOCKMNT(nmp);
+	TAILQ_INSERT_HEAD(&nmp->nm_sess, dsp, nfsclds_list);
+	tsep = NFSMNT_MDSSESSION(nmp);
+	NFSUNLOCKMNT(nmp);
+
+	nfscl_reqstart(nd, NFSPROC_SETCLIENTID, nmp, NULL, 0, NULL, NULL, 0, 0);
+	NFSM_BUILD(tl, u_int32_t *, 2 * NFSX_UNSIGNED);
+	*tl++ = txdr_unsigned(nfsboottime.tv_sec);
+	*tl = txdr_unsigned(clp->nfsc_rev);
+	(void) nfsm_strtom(nd, clp->nfsc_id, clp->nfsc_idlen);
+
+	/*
+	 * set up the callback address
+	 */
+	NFSM_BUILD(tl, u_int32_t *, NFSX_UNSIGNED);
+	*tl = txdr_unsigned(NFS_CALLBCKPROG);
+	callblen = strlen(nfsv4_callbackaddr);
+	if (callblen == 0)
+		cp = nfscl_getmyip(nmp, &a6, &isinet6);
+	if (nfscl_enablecallb && nfs_numnfscbd > 0 &&
+	    (callblen > 0 || cp != NULL)) {
+		port = htons(nfsv4_cbport);
+		cp2 = (u_int8_t *)&port;
+#ifdef INET6
+		if ((callblen > 0 &&
+		     strchr(nfsv4_callbackaddr, ':')) || isinet6) {
+			char ip6buf[INET6_ADDRSTRLEN], *ip6add;
+
+			(void) nfsm_strtom(nd, "tcp6", 4);
+			if (callblen == 0) {
+				ip6_sprintf(ip6buf, (struct in6_addr *)cp);
+				ip6add = ip6buf;
+			} else {
+				ip6add = nfsv4_callbackaddr;
+			}
+			snprintf(addr, INET6_ADDRSTRLEN + 9, "%s.%d.%d",
+			    ip6add, cp2[0], cp2[1]);
+		} else
+#endif
+		{
+			(void) nfsm_strtom(nd, "tcp", 3);
+			if (callblen == 0)
+				snprintf(addr, INET6_ADDRSTRLEN + 9,
+				    "%d.%d.%d.%d.%d.%d", cp[0], cp[1],
+				    cp[2], cp[3], cp2[0], cp2[1]);
+			else
+				snprintf(addr, INET6_ADDRSTRLEN + 9,
+				    "%s.%d.%d", nfsv4_callbackaddr,
+				    cp2[0], cp2[1]);
+		}
+		(void) nfsm_strtom(nd, addr, strlen(addr));
+	} else {
+		(void) nfsm_strtom(nd, "tcp", 3);
+		(void) nfsm_strtom(nd, "0.0.0.0.0.0", 11);
+	}
+	NFSM_BUILD(tl, u_int32_t *, NFSX_UNSIGNED);
+	*tl = txdr_unsigned(clp->nfsc_cbident);
+	nd->nd_flag |= ND_USEGSSNAME;
+	error = newnfs_request(nd, nmp, NULL, &nmp->nm_sockreq, NULL, p, cred,
+		NFS_PROG, NFS_VER4, NULL, 1, NULL, NULL);
+	if (error)
+		return (error);
+	if (nd->nd_repstat == 0) {
+	    NFSM_DISSECT(tl, u_int32_t *, 4 * NFSX_UNSIGNED);
+	    tsep->nfsess_clientid.lval[0] = *tl++;
+	    tsep->nfsess_clientid.lval[1] = *tl++;
+	    confirm.lval[0] = *tl++;
+	    confirm.lval[1] = *tl;
+	    mbuf_freem(nd->nd_mrep);
+	    nd->nd_mrep = NULL;
+
+	    /*
+	     * and confirm it.
+	     */
+	    nfscl_reqstart(nd, NFSPROC_SETCLIENTIDCFRM, nmp, NULL, 0, NULL,
+		NULL, 0, 0);
+	    NFSM_BUILD(tl, u_int32_t *, 4 * NFSX_UNSIGNED);
+	    *tl++ = tsep->nfsess_clientid.lval[0];
+	    *tl++ = tsep->nfsess_clientid.lval[1];
+	    *tl++ = confirm.lval[0];
+	    *tl = confirm.lval[1];
+	    nd->nd_flag |= ND_USEGSSNAME;
+	    error = newnfs_request(nd, nmp, NULL, &nmp->nm_sockreq, NULL, p,
+		cred, NFS_PROG, NFS_VER4, NULL, 1, NULL, NULL);
+	    if (error)
+		return (error);
+	    mbuf_freem(nd->nd_mrep);
+	    nd->nd_mrep = NULL;
+	    if (nd->nd_repstat == 0) {
+		nfscl_reqstart(nd, NFSPROC_GETATTR, nmp, nmp->nm_fh,
+		    nmp->nm_fhsize, NULL, NULL, 0, 0);
+		NFSZERO_ATTRBIT(&attrbits);
+		NFSSETBIT_ATTRBIT(&attrbits, NFSATTRBIT_LEASETIME);
+		(void) nfsrv_putattrbit(nd, &attrbits);
+		nd->nd_flag |= ND_USEGSSNAME;
+		error = newnfs_request(nd, nmp, NULL, &nmp->nm_sockreq, NULL, p,
+		    cred, NFS_PROG, NFS_VER4, NULL, 1, NULL, NULL);
+		if (error)
+		    return (error);
+		if (nd->nd_repstat == 0) {
+		    error = nfsv4_loadattr(nd, NULL, NULL, NULL, NULL, 0, NULL,
+			NULL, NULL, NULL, NULL, 0, NULL, &lease, NULL, p, cred);
+		    if (error)
+			goto nfsmout;
+		    clp->nfsc_renew = NFSCL_RENEW(lease);
+		    clp->nfsc_expire = NFSD_MONOSEC + clp->nfsc_renew;
+		    clp->nfsc_clientidrev++;
+		    if (clp->nfsc_clientidrev == 0)
+			clp->nfsc_clientidrev++;
+		}
+	    }
+	}
+	error = nd->nd_repstat;
+nfsmout:
+	mbuf_freem(nd->nd_mrep);
+	return (error);
+}
+
+/*
+ * nfs getattr call.
+ */
+APPLESTATIC int
+nfsrpc_getattr(vnode_t vp, struct ucred *cred, NFSPROC_T *p,
+    struct nfsvattr *nap, void *stuff)
+{
+	struct nfsrv_descript nfsd, *nd = &nfsd;
+	int error;
+	nfsattrbit_t attrbits;
+	
+	NFSCL_REQSTART(nd, NFSPROC_GETATTR, vp);
+	if (nd->nd_flag & ND_NFSV4) {
+		NFSGETATTR_ATTRBIT(&attrbits);
+		(void) nfsrv_putattrbit(nd, &attrbits);
+	}
+	error = nfscl_request(nd, vp, p, cred, stuff);
+	if (error)
+		return (error);
+	if (!nd->nd_repstat)
+		error = nfsm_loadattr(nd, nap);
+	else
+		error = nd->nd_repstat;
+	mbuf_freem(nd->nd_mrep);
+	return (error);
+}
+
+/*
+ * nfs getattr call with non-vnode arguemnts.
+ */
+APPLESTATIC int
+nfsrpc_getattrnovp(struct nfsmount *nmp, u_int8_t *fhp, int fhlen, int syscred,
+    struct ucred *cred, NFSPROC_T *p, struct nfsvattr *nap, u_int64_t *xidp,
+    uint32_t *leasep)
+{
+	struct nfsrv_descript nfsd, *nd = &nfsd;
+	int error, vers = NFS_VER2;
+	nfsattrbit_t attrbits;
+	
+	nfscl_reqstart(nd, NFSPROC_GETATTR, nmp, fhp, fhlen, NULL, NULL, 0, 0);
+	if (nd->nd_flag & ND_NFSV4) {
+		vers = NFS_VER4;
+		NFSGETATTR_ATTRBIT(&attrbits);
+		NFSSETBIT_ATTRBIT(&attrbits, NFSATTRBIT_LEASETIME);
+		(void) nfsrv_putattrbit(nd, &attrbits);
+	} else if (nd->nd_flag & ND_NFSV3) {
+		vers = NFS_VER3;
+	}
+	if (syscred)
+		nd->nd_flag |= ND_USEGSSNAME;
+	error = newnfs_request(nd, nmp, NULL, &nmp->nm_sockreq, NULL, p, cred,
+	    NFS_PROG, vers, NULL, 1, xidp, NULL);
+	if (error)
+		return (error);
+	if (nd->nd_repstat == 0) {
+		if ((nd->nd_flag & ND_NFSV4) != 0)
+			error = nfsv4_loadattr(nd, NULL, nap, NULL, NULL, 0,
+			    NULL, NULL, NULL, NULL, NULL, 0, NULL, leasep, NULL,
+			    NULL, NULL);
+		else
+			error = nfsm_loadattr(nd, nap);
+	} else
+		error = nd->nd_repstat;
+	mbuf_freem(nd->nd_mrep);
+	return (error);
+}
+
+/*
+ * Do an nfs setattr operation.
+ */
+APPLESTATIC int
+nfsrpc_setattr(vnode_t vp, struct vattr *vap, NFSACL_T *aclp,
+    struct ucred *cred, NFSPROC_T *p, struct nfsvattr *rnap, int *attrflagp,
+    void *stuff)
+{
+	int error, expireret = 0, openerr, retrycnt;
+	u_int32_t clidrev = 0, mode;
+	struct nfsmount *nmp = VFSTONFS(vnode_mount(vp));
+	struct nfsfh *nfhp;
+	nfsv4stateid_t stateid;
+	void *lckp;
+
+	if (nmp->nm_clp != NULL)
+		clidrev = nmp->nm_clp->nfsc_clientidrev;
+	if (vap != NULL && NFSATTRISSET(u_quad_t, vap, va_size))
+		mode = NFSV4OPEN_ACCESSWRITE;
+	else
+		mode = NFSV4OPEN_ACCESSREAD;
+	retrycnt = 0;
+	do {
+		lckp = NULL;
+		openerr = 1;
+		if (NFSHASNFSV4(nmp)) {
+			nfhp = VTONFS(vp)->n_fhp;
+			error = nfscl_getstateid(vp, nfhp->nfh_fh,
+			    nfhp->nfh_len, mode, 0, cred, p, &stateid, &lckp);
+			if (error && vnode_vtype(vp) == VREG &&
+			    (mode == NFSV4OPEN_ACCESSWRITE ||
+			     nfstest_openallsetattr)) {
+				/*
+				 * No Open stateid, so try and open the file
+				 * now.
+				 */
+				if (mode == NFSV4OPEN_ACCESSWRITE)
+					openerr = nfsrpc_open(vp, FWRITE, cred,
+					    p);
+				else
+					openerr = nfsrpc_open(vp, FREAD, cred,
+					    p);
+				if (!openerr)
+					(void) nfscl_getstateid(vp,
+					    nfhp->nfh_fh, nfhp->nfh_len,
+					    mode, 0, cred, p, &stateid, &lckp);
+			}
+		}
+		if (vap != NULL)
+			error = nfsrpc_setattrrpc(vp, vap, &stateid, cred, p,
+			    rnap, attrflagp, stuff);
+		else
+			error = nfsrpc_setaclrpc(vp, cred, p, aclp, &stateid,
+			    stuff);
+		if (error == NFSERR_OPENMODE && mode == NFSV4OPEN_ACCESSREAD) {
+			NFSLOCKMNT(nmp);
+			nmp->nm_state |= NFSSTA_OPENMODE;
+			NFSUNLOCKMNT(nmp);
+		}
+		if (error == NFSERR_STALESTATEID)
+			nfscl_initiate_recovery(nmp->nm_clp);
+		if (lckp != NULL)
+			nfscl_lockderef(lckp);
+		if (!openerr)
+			(void) nfsrpc_close(vp, 0, p);
+		if (error == NFSERR_GRACE || error == NFSERR_STALESTATEID ||
+		    error == NFSERR_STALEDONTRECOVER || error == NFSERR_DELAY ||
+		    error == NFSERR_OLDSTATEID || error == NFSERR_BADSESSION) {
+			(void) nfs_catnap(PZERO, error, "nfs_setattr");
+		} else if ((error == NFSERR_EXPIRED ||
+		    error == NFSERR_BADSTATEID) && clidrev != 0) {
+			expireret = nfscl_hasexpired(nmp->nm_clp, clidrev, p);
+		}
+		retrycnt++;
+	} while (error == NFSERR_GRACE || error == NFSERR_STALESTATEID ||
+	    error == NFSERR_STALEDONTRECOVER || error == NFSERR_DELAY ||
+	    error == NFSERR_BADSESSION ||
+	    (error == NFSERR_OLDSTATEID && retrycnt < 20) ||
+	    ((error == NFSERR_EXPIRED || error == NFSERR_BADSTATEID) &&
+	     expireret == 0 && clidrev != 0 && retrycnt < 4) ||
+	    (error == NFSERR_OPENMODE && mode == NFSV4OPEN_ACCESSREAD &&
+	     retrycnt < 4));
+	if (error && retrycnt >= 4)
+		error = EIO;
+	return (error);
+}
+
+static int
+nfsrpc_setattrrpc(vnode_t vp, struct vattr *vap,
+    nfsv4stateid_t *stateidp, struct ucred *cred, NFSPROC_T *p,
+    struct nfsvattr *rnap, int *attrflagp, void *stuff)
+{
+	u_int32_t *tl;
+	struct nfsrv_descript nfsd, *nd = &nfsd;
+	int error;
+	nfsattrbit_t attrbits;
+
+	*attrflagp = 0;
+	NFSCL_REQSTART(nd, NFSPROC_SETATTR, vp);
+	if (nd->nd_flag & ND_NFSV4)
+		nfsm_stateidtom(nd, stateidp, NFSSTATEID_PUTSTATEID);
+	vap->va_type = vnode_vtype(vp);
+	nfscl_fillsattr(nd, vap, vp, NFSSATTR_FULL, 0);
+	if (nd->nd_flag & ND_NFSV3) {
+		NFSM_BUILD(tl, u_int32_t *, NFSX_UNSIGNED);
+		*tl = newnfs_false;
+	} else if (nd->nd_flag & ND_NFSV4) {
+		NFSM_BUILD(tl, u_int32_t *, NFSX_UNSIGNED);
+		*tl = txdr_unsigned(NFSV4OP_GETATTR);
+		NFSGETATTR_ATTRBIT(&attrbits);
+		(void) nfsrv_putattrbit(nd, &attrbits);
+	}
+	error = nfscl_request(nd, vp, p, cred, stuff);
+	if (error)
+		return (error);
+	if (nd->nd_flag & (ND_NFSV3 | ND_NFSV4))
+		error = nfscl_wcc_data(nd, vp, rnap, attrflagp, NULL, stuff);
+	if ((nd->nd_flag & (ND_NFSV4 | ND_NOMOREDATA)) == ND_NFSV4 && !error)
+		error = nfsrv_getattrbits(nd, &attrbits, NULL, NULL);
+	if (!(nd->nd_flag & ND_NFSV3) && !nd->nd_repstat && !error)
+		error = nfscl_postop_attr(nd, rnap, attrflagp, stuff);
+	mbuf_freem(nd->nd_mrep);
+	if (nd->nd_repstat && !error)
+		error = nd->nd_repstat;
+	return (error);
+}
+
+/*
+ * nfs lookup rpc
+ */
+APPLESTATIC int
+nfsrpc_lookup(vnode_t dvp, char *name, int len, struct ucred *cred,
+    NFSPROC_T *p, struct nfsvattr *dnap, struct nfsvattr *nap,
+    struct nfsfh **nfhpp, int *attrflagp, int *dattrflagp, void *stuff)
+{
+	u_int32_t *tl;
+	struct nfsrv_descript nfsd, *nd = &nfsd;
+	struct nfsmount *nmp;
+	struct nfsnode *np;
+	struct nfsfh *nfhp;
+	nfsattrbit_t attrbits;
+	int error = 0, lookupp = 0;
+
+	*attrflagp = 0;
+	*dattrflagp = 0;
+	if (vnode_vtype(dvp) != VDIR)
+		return (ENOTDIR);
+	nmp = VFSTONFS(vnode_mount(dvp));
+	if (len > NFS_MAXNAMLEN)
+		return (ENAMETOOLONG);
+	if (NFSHASNFSV4(nmp) && len == 1 &&
+		name[0] == '.') {
+		/*
+		 * Just return the current dir's fh.
+		 */
+		np = VTONFS(dvp);
+		nfhp = malloc(sizeof (struct nfsfh) +
+			np->n_fhp->nfh_len, M_NFSFH, M_WAITOK);
+		nfhp->nfh_len = np->n_fhp->nfh_len;
+		NFSBCOPY(np->n_fhp->nfh_fh, nfhp->nfh_fh, nfhp->nfh_len);
+		*nfhpp = nfhp;
+		return (0);
+	}
+	if (NFSHASNFSV4(nmp) && len == 2 &&
+		name[0] == '.' && name[1] == '.') {
+		lookupp = 1;
+		NFSCL_REQSTART(nd, NFSPROC_LOOKUPP, dvp);
+	} else {
+		NFSCL_REQSTART(nd, NFSPROC_LOOKUP, dvp);
+		(void) nfsm_strtom(nd, name, len);
+	}
+	if (nd->nd_flag & ND_NFSV4) {
+		NFSGETATTR_ATTRBIT(&attrbits);
+		NFSM_BUILD(tl, u_int32_t *, 2 * NFSX_UNSIGNED);
+		*tl++ = txdr_unsigned(NFSV4OP_GETFH);
+		*tl = txdr_unsigned(NFSV4OP_GETATTR);
+		(void) nfsrv_putattrbit(nd, &attrbits);
+	}
+	error = nfscl_request(nd, dvp, p, cred, stuff);
+	if (error)
+		return (error);
+	if (nd->nd_repstat) {
+		/*
+		 * When an NFSv4 Lookupp returns ENOENT, it means that
+		 * the lookup is at the root of an fs, so return this dir.
+		 */
+		if (nd->nd_repstat == NFSERR_NOENT && lookupp) {
+		    np = VTONFS(dvp);
+		    nfhp = malloc(sizeof (struct nfsfh) +
+			np->n_fhp->nfh_len, M_NFSFH, M_WAITOK);
+		    nfhp->nfh_len = np->n_fhp->nfh_len;
+		    NFSBCOPY(np->n_fhp->nfh_fh, nfhp->nfh_fh, nfhp->nfh_len);
+		    *nfhpp = nfhp;
+		    mbuf_freem(nd->nd_mrep);
+		    return (0);
+		}
+		if (nd->nd_flag & ND_NFSV3)
+		    error = nfscl_postop_attr(nd, dnap, dattrflagp, stuff);
+		else if ((nd->nd_flag & (ND_NFSV4 | ND_NOMOREDATA)) ==
+		    ND_NFSV4) {
+			/* Load the directory attributes. */
+			error = nfsm_loadattr(nd, dnap);
+			if (error == 0)
+				*dattrflagp = 1;
+		}
+		goto nfsmout;
+	}
+	if ((nd->nd_flag & (ND_NFSV4 | ND_NOMOREDATA)) == ND_NFSV4) {
+		/* Load the directory attributes. */
+		error = nfsm_loadattr(nd, dnap);
+		if (error != 0)
+			goto nfsmout;
+		*dattrflagp = 1;
+		/* Skip over the Lookup and GetFH operation status values. */
+		NFSM_DISSECT(tl, u_int32_t *, 4 * NFSX_UNSIGNED);
+	}
+	error = nfsm_getfh(nd, nfhpp);
+	if (error)
+		goto nfsmout;
+
+	error = nfscl_postop_attr(nd, nap, attrflagp, stuff);
+	if ((nd->nd_flag & ND_NFSV3) && !error)
+		error = nfscl_postop_attr(nd, dnap, dattrflagp, stuff);
+nfsmout:
+	mbuf_freem(nd->nd_mrep);
+	if (!error && nd->nd_repstat)
+		error = nd->nd_repstat;
+	return (error);
+}
+
+/*
+ * Do a readlink rpc.
+ */
+APPLESTATIC int
+nfsrpc_readlink(vnode_t vp, struct uio *uiop, struct ucred *cred,
+    NFSPROC_T *p, struct nfsvattr *nap, int *attrflagp, void *stuff)
+{
+	u_int32_t *tl;
+	struct nfsrv_descript nfsd, *nd = &nfsd;
+	struct nfsnode *np = VTONFS(vp);
+	nfsattrbit_t attrbits;
+	int error, len, cangetattr = 1;
+
+	*attrflagp = 0;
+	NFSCL_REQSTART(nd, NFSPROC_READLINK, vp);
+	if (nd->nd_flag & ND_NFSV4) {
+		/*
+		 * And do a Getattr op.
+		 */
+		NFSM_BUILD(tl, u_int32_t *, NFSX_UNSIGNED);
+		*tl = txdr_unsigned(NFSV4OP_GETATTR);
+		NFSGETATTR_ATTRBIT(&attrbits);
+		(void) nfsrv_putattrbit(nd, &attrbits);
+	}
+	error = nfscl_request(nd, vp, p, cred, stuff);
+	if (error)
+		return (error);
+	if (nd->nd_flag & ND_NFSV3)
+		error = nfscl_postop_attr(nd, nap, attrflagp, stuff);
+	if (!nd->nd_repstat && !error) {
+		NFSM_STRSIZ(len, NFS_MAXPATHLEN);
+		/*
+		 * This seems weird to me, but must have been added to
+		 * FreeBSD for some reason. The only thing I can think of
+		 * is that there was/is some server that replies with
+		 * more link data than it should?
+		 */
+		if (len == NFS_MAXPATHLEN) {
+			NFSLOCKNODE(np);
+			if (np->n_size > 0 && np->n_size < NFS_MAXPATHLEN) {
+				len = np->n_size;
+				cangetattr = 0;
+			}
+			NFSUNLOCKNODE(np);
+		}
+		error = nfsm_mbufuio(nd, uiop, len);
+		if ((nd->nd_flag & ND_NFSV4) && !error && cangetattr)
+			error = nfscl_postop_attr(nd, nap, attrflagp, stuff);
+	}
+	if (nd->nd_repstat && !error)
+		error = nd->nd_repstat;
+nfsmout:
+	mbuf_freem(nd->nd_mrep);
+	return (error);
+}
+
+/*
+ * Read operation.
+ */
+APPLESTATIC int
+nfsrpc_read(vnode_t vp, struct uio *uiop, struct ucred *cred,
+    NFSPROC_T *p, struct nfsvattr *nap, int *attrflagp, void *stuff)
+{
+	int error, expireret = 0, retrycnt;
+	u_int32_t clidrev = 0;
+	struct nfsmount *nmp = VFSTONFS(vnode_mount(vp));
+	struct nfsnode *np = VTONFS(vp);
+	struct ucred *newcred;
+	struct nfsfh *nfhp = NULL;
+	nfsv4stateid_t stateid;
+	void *lckp;
+
+	if (nmp->nm_clp != NULL)
+		clidrev = nmp->nm_clp->nfsc_clientidrev;
+	newcred = cred;
+	if (NFSHASNFSV4(nmp)) {
+		nfhp = np->n_fhp;
+		newcred = NFSNEWCRED(cred);
+	}
+	retrycnt = 0;
+	do {
+		lckp = NULL;
+		if (NFSHASNFSV4(nmp))
+			(void)nfscl_getstateid(vp, nfhp->nfh_fh, nfhp->nfh_len,
+			    NFSV4OPEN_ACCESSREAD, 0, newcred, p, &stateid,
+			    &lckp);
+		error = nfsrpc_readrpc(vp, uiop, newcred, &stateid, p, nap,
+		    attrflagp, stuff);
+		if (error == NFSERR_OPENMODE) {
+			NFSLOCKMNT(nmp);
+			nmp->nm_state |= NFSSTA_OPENMODE;
+			NFSUNLOCKMNT(nmp);
+		}
+		if (error == NFSERR_STALESTATEID)
+			nfscl_initiate_recovery(nmp->nm_clp);
+		if (lckp != NULL)
+			nfscl_lockderef(lckp);
+		if (error == NFSERR_GRACE || error == NFSERR_STALESTATEID ||
+		    error == NFSERR_STALEDONTRECOVER || error == NFSERR_DELAY ||
+		    error == NFSERR_OLDSTATEID || error == NFSERR_BADSESSION) {
+			(void) nfs_catnap(PZERO, error, "nfs_read");
+		} else if ((error == NFSERR_EXPIRED ||
+		    error == NFSERR_BADSTATEID) && clidrev != 0) {
+			expireret = nfscl_hasexpired(nmp->nm_clp, clidrev, p);
+		}
+		retrycnt++;
+	} while (error == NFSERR_GRACE || error == NFSERR_STALESTATEID ||
+	    error == NFSERR_STALEDONTRECOVER || error == NFSERR_DELAY ||
+	    error == NFSERR_BADSESSION ||
+	    (error == NFSERR_OLDSTATEID && retrycnt < 20) ||
+	    ((error == NFSERR_EXPIRED || error == NFSERR_BADSTATEID) &&
+	     expireret == 0 && clidrev != 0 && retrycnt < 4) ||
+	    (error == NFSERR_OPENMODE && retrycnt < 4));
+	if (error && retrycnt >= 4)
+		error = EIO;
+	if (NFSHASNFSV4(nmp))
+		NFSFREECRED(newcred);
+	return (error);
+}
+
+/*
+ * The actual read RPC.
+ */
+static int
+nfsrpc_readrpc(vnode_t vp, struct uio *uiop, struct ucred *cred,
+    nfsv4stateid_t *stateidp, NFSPROC_T *p, struct nfsvattr *nap,
+    int *attrflagp, void *stuff)
+{
+	u_int32_t *tl;
+	int error = 0, len, retlen, tsiz, eof = 0;
+	struct nfsrv_descript nfsd;
+	struct nfsmount *nmp = VFSTONFS(vnode_mount(vp));
+	struct nfsrv_descript *nd = &nfsd;
+	int rsize;
+	off_t tmp_off;
+
+	*attrflagp = 0;
+	tsiz = uio_uio_resid(uiop);
+	tmp_off = uiop->uio_offset + tsiz;
+	NFSLOCKMNT(nmp);
+	if (tmp_off > nmp->nm_maxfilesize || tmp_off < uiop->uio_offset) {
+		NFSUNLOCKMNT(nmp);
+		return (EFBIG);
+	}
+	rsize = nmp->nm_rsize;
+	NFSUNLOCKMNT(nmp);
+	nd->nd_mrep = NULL;
+	while (tsiz > 0) {
+		*attrflagp = 0;
+		len = (tsiz > rsize) ? rsize : tsiz;
+		NFSCL_REQSTART(nd, NFSPROC_READ, vp);
+		if (nd->nd_flag & ND_NFSV4)
+			nfsm_stateidtom(nd, stateidp, NFSSTATEID_PUTSTATEID);
+		NFSM_BUILD(tl, u_int32_t *, NFSX_UNSIGNED * 3);
+		if (nd->nd_flag & ND_NFSV2) {
+			*tl++ = txdr_unsigned(uiop->uio_offset);
+			*tl++ = txdr_unsigned(len);
+			*tl = 0;
+		} else {
+			txdr_hyper(uiop->uio_offset, tl);
+			*(tl + 2) = txdr_unsigned(len);
+		}
+		/*
+		 * Since I can't do a Getattr for NFSv4 for Write, there
+		 * doesn't seem any point in doing one here, either.
+		 * (See the comment in nfsrpc_writerpc() for more info.)
+		 */
+		error = nfscl_request(nd, vp, p, cred, stuff);
+		if (error)
+			return (error);
+		if (nd->nd_flag & ND_NFSV3) {
+			error = nfscl_postop_attr(nd, nap, attrflagp, stuff);
+		} else if (!nd->nd_repstat && (nd->nd_flag & ND_NFSV2)) {
+			error = nfsm_loadattr(nd, nap);
+			if (!error)
+				*attrflagp = 1;
+		}
+		if (nd->nd_repstat || error) {
+			if (!error)
+				error = nd->nd_repstat;
+			goto nfsmout;
+		}
+		if (nd->nd_flag & ND_NFSV3) {
+			NFSM_DISSECT(tl, u_int32_t *, 2 * NFSX_UNSIGNED);
+			eof = fxdr_unsigned(int, *(tl + 1));
+		} else if (nd->nd_flag & ND_NFSV4) {
+			NFSM_DISSECT(tl, u_int32_t *, NFSX_UNSIGNED);
+			eof = fxdr_unsigned(int, *tl);
+		}
+		NFSM_STRSIZ(retlen, len);
+		error = nfsm_mbufuio(nd, uiop, retlen);
+		if (error)
+			goto nfsmout;
+		mbuf_freem(nd->nd_mrep);
+		nd->nd_mrep = NULL;
+		tsiz -= retlen;
+		if (!(nd->nd_flag & ND_NFSV2)) {
+			if (eof || retlen == 0)
+				tsiz = 0;
+		} else if (retlen < len)
+			tsiz = 0;
+	}
+	return (0);
+nfsmout:
+	if (nd->nd_mrep != NULL)
+		mbuf_freem(nd->nd_mrep);
+	return (error);
+}
+
+/*
+ * nfs write operation
+ * When called_from_strategy != 0, it should return EIO for an error that
+ * indicates recovery is in progress, so that the buffer will be left
+ * dirty and be written back to the server later. If it loops around,
+ * the recovery thread could get stuck waiting for the buffer and recovery
+ * will then deadlock.
+ */
+APPLESTATIC int
+nfsrpc_write(vnode_t vp, struct uio *uiop, int *iomode, int *must_commit,
+    struct ucred *cred, NFSPROC_T *p, struct nfsvattr *nap, int *attrflagp,
+    void *stuff, int called_from_strategy)
+{
+	int error, expireret = 0, retrycnt, nostateid;
+	u_int32_t clidrev = 0;
+	struct nfsmount *nmp = VFSTONFS(vnode_mount(vp));
+	struct nfsnode *np = VTONFS(vp);
+	struct ucred *newcred;
+	struct nfsfh *nfhp = NULL;
+	nfsv4stateid_t stateid;
+	void *lckp;
+
+	*must_commit = 0;
+	if (nmp->nm_clp != NULL)
+		clidrev = nmp->nm_clp->nfsc_clientidrev;
+	newcred = cred;
+	if (NFSHASNFSV4(nmp)) {
+		newcred = NFSNEWCRED(cred);
+		nfhp = np->n_fhp;
+	}
+	retrycnt = 0;
+	do {
+		lckp = NULL;
+		nostateid = 0;
+		if (NFSHASNFSV4(nmp)) {
+			(void)nfscl_getstateid(vp, nfhp->nfh_fh, nfhp->nfh_len,
+			    NFSV4OPEN_ACCESSWRITE, 0, newcred, p, &stateid,
+			    &lckp);
+			if (stateid.other[0] == 0 && stateid.other[1] == 0 &&
+			    stateid.other[2] == 0) {
+				nostateid = 1;
+				NFSCL_DEBUG(1, "stateid0 in write\n");
+			}
+		}
+
+		/*
+		 * If there is no stateid for NFSv4, it means this is an
+		 * extraneous write after close. Basically a poorly
+		 * implemented buffer cache. Just don't do the write.
+		 */
+		if (nostateid)
+			error = 0;
+		else
+			error = nfsrpc_writerpc(vp, uiop, iomode, must_commit,
+			    newcred, &stateid, p, nap, attrflagp, stuff);
+		if (error == NFSERR_STALESTATEID)
+			nfscl_initiate_recovery(nmp->nm_clp);
+		if (lckp != NULL)
+			nfscl_lockderef(lckp);
+		if (error == NFSERR_GRACE || error == NFSERR_STALESTATEID ||
+		    error == NFSERR_STALEDONTRECOVER || error == NFSERR_DELAY ||
+		    error == NFSERR_OLDSTATEID || error == NFSERR_BADSESSION) {
+			(void) nfs_catnap(PZERO, error, "nfs_write");
+		} else if ((error == NFSERR_EXPIRED ||
+		    error == NFSERR_BADSTATEID) && clidrev != 0) {
+			expireret = nfscl_hasexpired(nmp->nm_clp, clidrev, p);
+		}
+		retrycnt++;
+	} while (error == NFSERR_GRACE || error == NFSERR_DELAY ||
+	    ((error == NFSERR_STALESTATEID || error == NFSERR_BADSESSION ||
+	      error == NFSERR_STALEDONTRECOVER) && called_from_strategy == 0) ||
+	    (error == NFSERR_OLDSTATEID && retrycnt < 20) ||
+	    ((error == NFSERR_EXPIRED || error == NFSERR_BADSTATEID) &&
+	     expireret == 0 && clidrev != 0 && retrycnt < 4));
+	if (error != 0 && (retrycnt >= 4 ||
+	    ((error == NFSERR_STALESTATEID || error == NFSERR_BADSESSION ||
+	      error == NFSERR_STALEDONTRECOVER) && called_from_strategy != 0)))
+		error = EIO;
+	if (NFSHASNFSV4(nmp))
+		NFSFREECRED(newcred);
+	return (error);
+}
+
+/*
+ * The actual write RPC.
+ */
+static int
+nfsrpc_writerpc(vnode_t vp, struct uio *uiop, int *iomode,
+    int *must_commit, struct ucred *cred, nfsv4stateid_t *stateidp,
+    NFSPROC_T *p, struct nfsvattr *nap, int *attrflagp, void *stuff)
+{
+	u_int32_t *tl;
+	struct nfsmount *nmp = VFSTONFS(vnode_mount(vp));
+	struct nfsnode *np = VTONFS(vp);
+	int error = 0, len, tsiz, rlen, commit, committed = NFSWRITE_FILESYNC;
+	int wccflag = 0, wsize;
+	int32_t backup;
+	struct nfsrv_descript nfsd;
+	struct nfsrv_descript *nd = &nfsd;
+	nfsattrbit_t attrbits;
+	off_t tmp_off;
+
+	KASSERT(uiop->uio_iovcnt == 1, ("nfs: writerpc iovcnt > 1"));
+	*attrflagp = 0;
+	tsiz = uio_uio_resid(uiop);
+	tmp_off = uiop->uio_offset + tsiz;
+	NFSLOCKMNT(nmp);
+	if (tmp_off > nmp->nm_maxfilesize || tmp_off < uiop->uio_offset) {
+		NFSUNLOCKMNT(nmp);
+		return (EFBIG);
+	}
+	wsize = nmp->nm_wsize;
+	NFSUNLOCKMNT(nmp);
+	nd->nd_mrep = NULL;	/* NFSv2 sometimes does a write with */
+	nd->nd_repstat = 0;	/* uio_resid == 0, so the while is not done */
+	while (tsiz > 0) {
+		*attrflagp = 0;
+		len = (tsiz > wsize) ? wsize : tsiz;
+		NFSCL_REQSTART(nd, NFSPROC_WRITE, vp);
+		if (nd->nd_flag & ND_NFSV4) {
+			nfsm_stateidtom(nd, stateidp, NFSSTATEID_PUTSTATEID);
+			NFSM_BUILD(tl, u_int32_t *, NFSX_HYPER+2*NFSX_UNSIGNED);
+			txdr_hyper(uiop->uio_offset, tl);
+			tl += 2;
+			*tl++ = txdr_unsigned(*iomode);
+			*tl = txdr_unsigned(len);
+		} else if (nd->nd_flag & ND_NFSV3) {
+			NFSM_BUILD(tl, u_int32_t *, NFSX_HYPER+3*NFSX_UNSIGNED);
+			txdr_hyper(uiop->uio_offset, tl);
+			tl += 2;
+			*tl++ = txdr_unsigned(len);
+			*tl++ = txdr_unsigned(*iomode);
+			*tl = txdr_unsigned(len);
+		} else {
+			u_int32_t x;
+
+			NFSM_BUILD(tl, u_int32_t *, 4 * NFSX_UNSIGNED);
+			/*
+			 * Not sure why someone changed this, since the
+			 * RFC clearly states that "beginoffset" and
+			 * "totalcount" are ignored, but it wouldn't
+			 * surprise me if there's a busted server out there.
+			 */
+			/* Set both "begin" and "current" to non-garbage. */
+			x = txdr_unsigned((u_int32_t)uiop->uio_offset);
+			*tl++ = x;      /* "begin offset" */
+			*tl++ = x;      /* "current offset" */
+			x = txdr_unsigned(len);
+			*tl++ = x;      /* total to this offset */
+			*tl = x;        /* size of this write */
+
+		}
+		nfsm_uiombuf(nd, uiop, len);
+		/*
+		 * Although it is tempting to do a normal Getattr Op in the
+		 * NFSv4 compound, the result can be a nearly hung client
+		 * system if the Getattr asks for Owner and/or OwnerGroup.
+		 * It occurs when the client can't map either the Owner or
+		 * Owner_group name in the Getattr reply to a uid/gid. When
+		 * there is a cache miss, the kernel does an upcall to the
+		 * nfsuserd. Then, it can try and read the local /etc/passwd
+		 * or /etc/group file. It can then block in getnewbuf(),
+		 * waiting for dirty writes to be pushed to the NFS server.
+		 * The only reason this doesn't result in a complete
+		 * deadlock, is that the upcall times out and allows
+		 * the write to complete. However, progress is so slow
+		 * that it might just as well be deadlocked.
+		 * As such, we get the rest of the attributes, but not
+		 * Owner or Owner_group.
+		 * nb: nfscl_loadattrcache() needs to be told that these
+		 *     partial attributes from a write rpc are being
+		 *     passed in, via a argument flag.
+		 */
+		if (nd->nd_flag & ND_NFSV4) {
+			NFSWRITEGETATTR_ATTRBIT(&attrbits);
+			NFSM_BUILD(tl, u_int32_t *, NFSX_UNSIGNED);
+			*tl = txdr_unsigned(NFSV4OP_GETATTR);
+			(void) nfsrv_putattrbit(nd, &attrbits);
+		}
+		error = nfscl_request(nd, vp, p, cred, stuff);
+		if (error)
+			return (error);
+		if (nd->nd_repstat) {
+			/*
+			 * In case the rpc gets retried, roll
+			 * the uio fileds changed by nfsm_uiombuf()
+			 * back.
+			 */
+			uiop->uio_offset -= len;
+			uio_uio_resid_add(uiop, len);
+			uio_iov_base_add(uiop, -len);
+			uio_iov_len_add(uiop, len);
+		}
+		if (nd->nd_flag & (ND_NFSV3 | ND_NFSV4)) {
+			error = nfscl_wcc_data(nd, vp, nap, attrflagp,
+			    &wccflag, stuff);
+			if (error)
+				goto nfsmout;
+		}
+		if (!nd->nd_repstat) {
+			if (nd->nd_flag & (ND_NFSV3 | ND_NFSV4)) {
+				NFSM_DISSECT(tl, u_int32_t *, 2 * NFSX_UNSIGNED
+					+ NFSX_VERF);
+				rlen = fxdr_unsigned(int, *tl++);
+				if (rlen == 0) {
+					error = NFSERR_IO;
+					goto nfsmout;
+				} else if (rlen < len) {
+					backup = len - rlen;
+					uio_iov_base_add(uiop, -(backup));
+					uio_iov_len_add(uiop, backup);
+					uiop->uio_offset -= backup;
+					uio_uio_resid_add(uiop, backup);
+					len = rlen;
+				}
+				commit = fxdr_unsigned(int, *tl++);
+
+				/*
+				 * Return the lowest commitment level
+				 * obtained by any of the RPCs.
+				 */
+				if (committed == NFSWRITE_FILESYNC)
+					committed = commit;
+				else if (committed == NFSWRITE_DATASYNC &&
+					commit == NFSWRITE_UNSTABLE)
+					committed = commit;
+				NFSLOCKMNT(nmp);
+				if (!NFSHASWRITEVERF(nmp)) {
+					NFSBCOPY((caddr_t)tl,
+					    (caddr_t)&nmp->nm_verf[0],
+					    NFSX_VERF);
+					NFSSETWRITEVERF(nmp);
+	    			} else if (NFSBCMP(tl, nmp->nm_verf,
+				    NFSX_VERF)) {
+					*must_commit = 1;
+					NFSBCOPY(tl, nmp->nm_verf, NFSX_VERF);
+				}
+				NFSUNLOCKMNT(nmp);
+			}
+			if (nd->nd_flag & ND_NFSV4)
+				NFSM_DISSECT(tl, u_int32_t *, 2 * NFSX_UNSIGNED);
+			if (nd->nd_flag & (ND_NFSV2 | ND_NFSV4)) {
+				error = nfsm_loadattr(nd, nap);
+				if (!error)
+					*attrflagp = NFS_LATTR_NOSHRINK;
+			}
+		} else {
+			error = nd->nd_repstat;
+		}
+		if (error)
+			goto nfsmout;
+		NFSWRITERPC_SETTIME(wccflag, np, nap, (nd->nd_flag & ND_NFSV4));
+		mbuf_freem(nd->nd_mrep);
+		nd->nd_mrep = NULL;
+		tsiz -= len;
+	}
+nfsmout:
+	if (nd->nd_mrep != NULL)
+		mbuf_freem(nd->nd_mrep);
+	*iomode = committed;
+	if (nd->nd_repstat && !error)
+		error = nd->nd_repstat;
+	return (error);
+}
+
+/*
+ * nfs mknod rpc
+ * For NFS v2 this is a kludge. Use a create rpc but with the IFMT bits of the
+ * mode set to specify the file type and the size field for rdev.
+ */
+APPLESTATIC int
+nfsrpc_mknod(vnode_t dvp, char *name, int namelen, struct vattr *vap,
+    u_int32_t rdev, enum vtype vtyp, struct ucred *cred, NFSPROC_T *p,
+    struct nfsvattr *dnap, struct nfsvattr *nnap, struct nfsfh **nfhpp,
+    int *attrflagp, int *dattrflagp, void *dstuff)
+{
+	u_int32_t *tl;
+	int error = 0;
+	struct nfsrv_descript nfsd, *nd = &nfsd;
+	nfsattrbit_t attrbits;
+
+	*nfhpp = NULL;
+	*attrflagp = 0;
+	*dattrflagp = 0;
+	if (namelen > NFS_MAXNAMLEN)
+		return (ENAMETOOLONG);
+	NFSCL_REQSTART(nd, NFSPROC_MKNOD, dvp);
+	if (nd->nd_flag & ND_NFSV4) {
+		if (vtyp == VBLK || vtyp == VCHR) {
+			NFSM_BUILD(tl, u_int32_t *, 3 * NFSX_UNSIGNED);
+			*tl++ = vtonfsv34_type(vtyp);
+			*tl++ = txdr_unsigned(NFSMAJOR(rdev));
+			*tl = txdr_unsigned(NFSMINOR(rdev));
+		} else {
+			NFSM_BUILD(tl, u_int32_t *, NFSX_UNSIGNED);
+			*tl = vtonfsv34_type(vtyp);
+		}
+	}
+	(void) nfsm_strtom(nd, name, namelen);
+	if (nd->nd_flag & ND_NFSV3) {
+		NFSM_BUILD(tl, u_int32_t *, NFSX_UNSIGNED);
+		*tl = vtonfsv34_type(vtyp);
+	}
+	if (nd->nd_flag & (ND_NFSV3 | ND_NFSV4))
+		nfscl_fillsattr(nd, vap, dvp, 0, 0);
+	if ((nd->nd_flag & ND_NFSV3) &&
+	    (vtyp == VCHR || vtyp == VBLK)) {
+		NFSM_BUILD(tl, u_int32_t *, 2 * NFSX_UNSIGNED);
+		*tl++ = txdr_unsigned(NFSMAJOR(rdev));
+		*tl = txdr_unsigned(NFSMINOR(rdev));
+	}
+	if (nd->nd_flag & ND_NFSV4) {
+		NFSGETATTR_ATTRBIT(&attrbits);
+		NFSM_BUILD(tl, u_int32_t *, 2 * NFSX_UNSIGNED);
+		*tl++ = txdr_unsigned(NFSV4OP_GETFH);
+		*tl = txdr_unsigned(NFSV4OP_GETATTR);
+		(void) nfsrv_putattrbit(nd, &attrbits);
+	}
+	if (nd->nd_flag & ND_NFSV2)
+		nfscl_fillsattr(nd, vap, dvp, NFSSATTR_SIZERDEV, rdev);
+	error = nfscl_request(nd, dvp, p, cred, dstuff);
+	if (error)
+		return (error);
+	if (nd->nd_flag & ND_NFSV4)
+		error = nfscl_wcc_data(nd, dvp, dnap, dattrflagp, NULL, dstuff);
+	if (!nd->nd_repstat) {
+		if (nd->nd_flag & ND_NFSV4) {
+			NFSM_DISSECT(tl, u_int32_t *, 5 * NFSX_UNSIGNED);
+			error = nfsrv_getattrbits(nd, &attrbits, NULL, NULL);
+			if (error)
+				goto nfsmout;
+		}
+		error = nfscl_mtofh(nd, nfhpp, nnap, attrflagp);
+		if (error)
+			goto nfsmout;
+	}
+	if (nd->nd_flag & ND_NFSV3)
+		error = nfscl_wcc_data(nd, dvp, dnap, dattrflagp, NULL, dstuff);
+	if (!error && nd->nd_repstat)
+		error = nd->nd_repstat;
+nfsmout:
+	mbuf_freem(nd->nd_mrep);
+	return (error);
+}
+
+/*
+ * nfs file create call
+ * Mostly just call the approriate routine. (I separated out v4, so that
+ * error recovery wouldn't be as difficult.)
+ */
+APPLESTATIC int
+nfsrpc_create(vnode_t dvp, char *name, int namelen, struct vattr *vap,
+    nfsquad_t cverf, int fmode, struct ucred *cred, NFSPROC_T *p,
+    struct nfsvattr *dnap, struct nfsvattr *nnap, struct nfsfh **nfhpp,
+    int *attrflagp, int *dattrflagp, void *dstuff)
+{
+	int error = 0, newone, expireret = 0, retrycnt, unlocked;
+	struct nfsclowner *owp;
+	struct nfscldeleg *dp;
+	struct nfsmount *nmp = VFSTONFS(vnode_mount(dvp));
+	u_int32_t clidrev;
+
+	if (NFSHASNFSV4(nmp)) {
+	    retrycnt = 0;
+	    do {
+		dp = NULL;
+		error = nfscl_open(dvp, NULL, 0, (NFSV4OPEN_ACCESSWRITE |
+		    NFSV4OPEN_ACCESSREAD), 0, cred, p, &owp, NULL, &newone,
+		    NULL, 1);
+		if (error)
+			return (error);
+		if (nmp->nm_clp != NULL)
+			clidrev = nmp->nm_clp->nfsc_clientidrev;
+		else
+			clidrev = 0;
+		if (!NFSHASPNFS(nmp) || nfscl_enablecallb == 0 ||
+		    nfs_numnfscbd == 0 || retrycnt > 0)
+			error = nfsrpc_createv4(dvp, name, namelen, vap, cverf,
+			  fmode, owp, &dp, cred, p, dnap, nnap, nfhpp,
+			  attrflagp, dattrflagp, dstuff, &unlocked);
+		else
+			error = nfsrpc_getcreatelayout(dvp, name, namelen, vap,
+			  cverf, fmode, owp, &dp, cred, p, dnap, nnap, nfhpp,
+			  attrflagp, dattrflagp, dstuff, &unlocked);
+		/*
+		 * There is no need to invalidate cached attributes here,
+		 * since new post-delegation issue attributes are always
+		 * returned by nfsrpc_createv4() and these will update the
+		 * attribute cache.
+		 */
+		if (dp != NULL)
+			(void) nfscl_deleg(nmp->nm_mountp, owp->nfsow_clp,
+			    (*nfhpp)->nfh_fh, (*nfhpp)->nfh_len, cred, p, &dp);
+		nfscl_ownerrelease(nmp, owp, error, newone, unlocked);
+		if (error == NFSERR_GRACE || error == NFSERR_STALECLIENTID ||
+		    error == NFSERR_STALEDONTRECOVER || error == NFSERR_DELAY ||
+		    error == NFSERR_BADSESSION) {
+			(void) nfs_catnap(PZERO, error, "nfs_open");
+		} else if ((error == NFSERR_EXPIRED ||
+		    error == NFSERR_BADSTATEID) && clidrev != 0) {
+			expireret = nfscl_hasexpired(nmp->nm_clp, clidrev, p);
+			retrycnt++;
+		}
+	    } while (error == NFSERR_GRACE || error == NFSERR_STALECLIENTID ||
+		error == NFSERR_STALEDONTRECOVER || error == NFSERR_DELAY ||
+		error == NFSERR_BADSESSION ||
+		((error == NFSERR_EXPIRED || error == NFSERR_BADSTATEID) &&
+		 expireret == 0 && clidrev != 0 && retrycnt < 4));
+	    if (error && retrycnt >= 4)
+		    error = EIO;
+	} else {
+		error = nfsrpc_createv23(dvp, name, namelen, vap, cverf,
+		    fmode, cred, p, dnap, nnap, nfhpp, attrflagp, dattrflagp,
+		    dstuff);
+	}
+	return (error);
+}
+
+/*
+ * The create rpc for v2 and 3.
+ */
+static int
+nfsrpc_createv23(vnode_t dvp, char *name, int namelen, struct vattr *vap,
+    nfsquad_t cverf, int fmode, struct ucred *cred, NFSPROC_T *p,
+    struct nfsvattr *dnap, struct nfsvattr *nnap, struct nfsfh **nfhpp,
+    int *attrflagp, int *dattrflagp, void *dstuff)
+{
+	u_int32_t *tl;
+	int error = 0;
+	struct nfsrv_descript nfsd, *nd = &nfsd;
+
+	*nfhpp = NULL;
+	*attrflagp = 0;
+	*dattrflagp = 0;
+	if (namelen > NFS_MAXNAMLEN)
+		return (ENAMETOOLONG);
+	NFSCL_REQSTART(nd, NFSPROC_CREATE, dvp);
+	(void) nfsm_strtom(nd, name, namelen);
+	if (nd->nd_flag & ND_NFSV3) {
+		NFSM_BUILD(tl, u_int32_t *, NFSX_UNSIGNED);
+		if (fmode & O_EXCL) {
+			*tl = txdr_unsigned(NFSCREATE_EXCLUSIVE);
+			NFSM_BUILD(tl, u_int32_t *, NFSX_VERF);
+			*tl++ = cverf.lval[0];
+			*tl = cverf.lval[1];
+		} else {
+			*tl = txdr_unsigned(NFSCREATE_UNCHECKED);
+			nfscl_fillsattr(nd, vap, dvp, 0, 0);
+		}
+	} else {
+		nfscl_fillsattr(nd, vap, dvp, NFSSATTR_SIZE0, 0);
+	}
+	error = nfscl_request(nd, dvp, p, cred, dstuff);
+	if (error)
+		return (error);
+	if (nd->nd_repstat == 0) {
+		error = nfscl_mtofh(nd, nfhpp, nnap, attrflagp);
+		if (error)
+			goto nfsmout;
+	}
+	if (nd->nd_flag & ND_NFSV3)
+		error = nfscl_wcc_data(nd, dvp, dnap, dattrflagp, NULL, dstuff);
+	if (nd->nd_repstat != 0 && error == 0)
+		error = nd->nd_repstat;
+nfsmout:
+	mbuf_freem(nd->nd_mrep);
+	return (error);
+}
+
+static int
+nfsrpc_createv4(vnode_t dvp, char *name, int namelen, struct vattr *vap,
+    nfsquad_t cverf, int fmode, struct nfsclowner *owp, struct nfscldeleg **dpp,
+    struct ucred *cred, NFSPROC_T *p, struct nfsvattr *dnap,
+    struct nfsvattr *nnap, struct nfsfh **nfhpp, int *attrflagp,
+    int *dattrflagp, void *dstuff, int *unlockedp)
+{
+	u_int32_t *tl;
+	int error = 0, deleg, newone, ret, acesize, limitby;
+	struct nfsrv_descript nfsd, *nd = &nfsd;
+	struct nfsclopen *op;
+	struct nfscldeleg *dp = NULL;
+	struct nfsnode *np;
+	struct nfsfh *nfhp;
+	nfsattrbit_t attrbits;
+	nfsv4stateid_t stateid;
+	u_int32_t rflags;
+	struct nfsmount *nmp;
+	struct nfsclsession *tsep;
+
+	nmp = VFSTONFS(dvp->v_mount);
+	np = VTONFS(dvp);
+	*unlockedp = 0;
+	*nfhpp = NULL;
+	*dpp = NULL;
+	*attrflagp = 0;
+	*dattrflagp = 0;
+	if (namelen > NFS_MAXNAMLEN)
+		return (ENAMETOOLONG);
+	NFSCL_REQSTART(nd, NFSPROC_CREATE, dvp);
+	/*
+	 * For V4, this is actually an Open op.
+	 */
+	NFSM_BUILD(tl, u_int32_t *, 5 * NFSX_UNSIGNED);
+	*tl++ = txdr_unsigned(owp->nfsow_seqid);
+	*tl++ = txdr_unsigned(NFSV4OPEN_ACCESSWRITE |
+	    NFSV4OPEN_ACCESSREAD);
+	*tl++ = txdr_unsigned(NFSV4OPEN_DENYNONE);
+	tsep = nfsmnt_mdssession(nmp);
+	*tl++ = tsep->nfsess_clientid.lval[0];
+	*tl = tsep->nfsess_clientid.lval[1];
+	(void) nfsm_strtom(nd, owp->nfsow_owner, NFSV4CL_LOCKNAMELEN);
+	NFSM_BUILD(tl, u_int32_t *, 2 * NFSX_UNSIGNED);
+	*tl++ = txdr_unsigned(NFSV4OPEN_CREATE);
+	if (fmode & O_EXCL) {
+		if (NFSHASNFSV4N(nmp)) {
+			if (NFSHASSESSPERSIST(nmp)) {
+				/* Use GUARDED for persistent sessions. */
+				*tl = txdr_unsigned(NFSCREATE_GUARDED);
+				nfscl_fillsattr(nd, vap, dvp, 0, 0);
+			} else {
+				/* Otherwise, use EXCLUSIVE4_1. */
+				*tl = txdr_unsigned(NFSCREATE_EXCLUSIVE41);
+				NFSM_BUILD(tl, u_int32_t *, NFSX_VERF);
+				*tl++ = cverf.lval[0];
+				*tl = cverf.lval[1];
+				nfscl_fillsattr(nd, vap, dvp, 0, 0);
+			}
+		} else {
+			/* NFSv4.0 */
+			*tl = txdr_unsigned(NFSCREATE_EXCLUSIVE);
+			NFSM_BUILD(tl, u_int32_t *, NFSX_VERF);
+			*tl++ = cverf.lval[0];
+			*tl = cverf.lval[1];
+		}
+	} else {
+		*tl = txdr_unsigned(NFSCREATE_UNCHECKED);
+		nfscl_fillsattr(nd, vap, dvp, 0, 0);
+	}
+	NFSM_BUILD(tl, u_int32_t *, NFSX_UNSIGNED);
+	*tl = txdr_unsigned(NFSV4OPEN_CLAIMNULL);
+	(void) nfsm_strtom(nd, name, namelen);
+	/* Get the new file's handle and attributes. */
+	NFSM_BUILD(tl, u_int32_t *, 2 * NFSX_UNSIGNED);
+	*tl++ = txdr_unsigned(NFSV4OP_GETFH);
+	*tl = txdr_unsigned(NFSV4OP_GETATTR);
+	NFSGETATTR_ATTRBIT(&attrbits);
+	(void) nfsrv_putattrbit(nd, &attrbits);
+	/* Get the directory's post-op attributes. */
+	NFSM_BUILD(tl, u_int32_t *, NFSX_UNSIGNED);
+	*tl = txdr_unsigned(NFSV4OP_PUTFH);
+	(void) nfsm_fhtom(nd, np->n_fhp->nfh_fh, np->n_fhp->nfh_len, 0);
+	NFSM_BUILD(tl, u_int32_t *, NFSX_UNSIGNED);
+	*tl = txdr_unsigned(NFSV4OP_GETATTR);
+	(void) nfsrv_putattrbit(nd, &attrbits);
+	error = nfscl_request(nd, dvp, p, cred, dstuff);
+	if (error)
+		return (error);
+	NFSCL_INCRSEQID(owp->nfsow_seqid, nd);
+	if (nd->nd_repstat == 0) {
+		NFSM_DISSECT(tl, u_int32_t *, NFSX_STATEID +
+		    6 * NFSX_UNSIGNED);
+		stateid.seqid = *tl++;
+		stateid.other[0] = *tl++;
+		stateid.other[1] = *tl++;
+		stateid.other[2] = *tl;
+		rflags = fxdr_unsigned(u_int32_t, *(tl + 6));
+		(void) nfsrv_getattrbits(nd, &attrbits, NULL, NULL);
+		NFSM_DISSECT(tl, u_int32_t *, NFSX_UNSIGNED);
+		deleg = fxdr_unsigned(int, *tl);
+		if (deleg == NFSV4OPEN_DELEGATEREAD ||
+		    deleg == NFSV4OPEN_DELEGATEWRITE) {
+			if (!(owp->nfsow_clp->nfsc_flags &
+			      NFSCLFLAGS_FIRSTDELEG))
+				owp->nfsow_clp->nfsc_flags |=
+				  (NFSCLFLAGS_FIRSTDELEG | NFSCLFLAGS_GOTDELEG);
+			dp = malloc(
+			    sizeof (struct nfscldeleg) + NFSX_V4FHMAX,
+			    M_NFSCLDELEG, M_WAITOK);
+			LIST_INIT(&dp->nfsdl_owner);
+			LIST_INIT(&dp->nfsdl_lock);
+			dp->nfsdl_clp = owp->nfsow_clp;
+			newnfs_copyincred(cred, &dp->nfsdl_cred);
+			nfscl_lockinit(&dp->nfsdl_rwlock);
+			NFSM_DISSECT(tl, u_int32_t *, NFSX_STATEID +
+			    NFSX_UNSIGNED);
+			dp->nfsdl_stateid.seqid = *tl++;
+			dp->nfsdl_stateid.other[0] = *tl++;
+			dp->nfsdl_stateid.other[1] = *tl++;
+			dp->nfsdl_stateid.other[2] = *tl++;
+			ret = fxdr_unsigned(int, *tl);
+			if (deleg == NFSV4OPEN_DELEGATEWRITE) {
+				dp->nfsdl_flags = NFSCLDL_WRITE;
+				/*
+				 * Indicates how much the file can grow.
+				 */
+				NFSM_DISSECT(tl, u_int32_t *,
+				    3 * NFSX_UNSIGNED);
+				limitby = fxdr_unsigned(int, *tl++);
+				switch (limitby) {
+				case NFSV4OPEN_LIMITSIZE:
+					dp->nfsdl_sizelimit = fxdr_hyper(tl);
+					break;
+				case NFSV4OPEN_LIMITBLOCKS:
+					dp->nfsdl_sizelimit =
+					    fxdr_unsigned(u_int64_t, *tl++);
+					dp->nfsdl_sizelimit *=
+					    fxdr_unsigned(u_int64_t, *tl);
+					break;
+				default:
+					error = NFSERR_BADXDR;
+					goto nfsmout;
+				}
+			} else {
+				dp->nfsdl_flags = NFSCLDL_READ;
+			}
+			if (ret)
+				dp->nfsdl_flags |= NFSCLDL_RECALL;
+			error = nfsrv_dissectace(nd, &dp->nfsdl_ace, &ret,
+			    &acesize, p);
+			if (error)
+				goto nfsmout;
+		} else if (deleg != NFSV4OPEN_DELEGATENONE) {
+			error = NFSERR_BADXDR;
+			goto nfsmout;
+		}
+		error = nfscl_mtofh(nd, nfhpp, nnap, attrflagp);
+		if (error)
+			goto nfsmout;
+		/* Get rid of the PutFH and Getattr status values. */
+		NFSM_DISSECT(tl, u_int32_t *, 4 * NFSX_UNSIGNED);
+		/* Load the directory attributes. */
+		error = nfsm_loadattr(nd, dnap);
+		if (error)
+			goto nfsmout;
+		*dattrflagp = 1;
+		if (dp != NULL && *attrflagp) {
+			dp->nfsdl_change = nnap->na_filerev;
+			dp->nfsdl_modtime = nnap->na_mtime;
+			dp->nfsdl_flags |= NFSCLDL_MODTIMESET;
+		}
+		/*
+		 * We can now complete the Open state.
+		 */
+		nfhp = *nfhpp;
+		if (dp != NULL) {
+			dp->nfsdl_fhlen = nfhp->nfh_len;
+			NFSBCOPY(nfhp->nfh_fh, dp->nfsdl_fh, nfhp->nfh_len);
+		}
+		/*
+		 * Get an Open structure that will be
+		 * attached to the OpenOwner, acquired already.
+		 */
+		error = nfscl_open(dvp, nfhp->nfh_fh, nfhp->nfh_len, 
+		    (NFSV4OPEN_ACCESSWRITE | NFSV4OPEN_ACCESSREAD), 0,
+		    cred, p, NULL, &op, &newone, NULL, 0);
+		if (error)
+			goto nfsmout;
+		op->nfso_stateid = stateid;
+		newnfs_copyincred(cred, &op->nfso_cred);
+		if ((rflags & NFSV4OPEN_RESULTCONFIRM)) {
+		    do {
+			ret = nfsrpc_openconfirm(dvp, nfhp->nfh_fh,
+			    nfhp->nfh_len, op, cred, p);
+			if (ret == NFSERR_DELAY)
+			    (void) nfs_catnap(PZERO, ret, "nfs_create");
+		    } while (ret == NFSERR_DELAY);
+		    error = ret;
+		}
+
+		/*
+		 * If the server is handing out delegations, but we didn't
+		 * get one because an OpenConfirm was required, try the
+		 * Open again, to get a delegation. This is a harmless no-op,
+		 * from a server's point of view.
+		 */
+		if ((rflags & NFSV4OPEN_RESULTCONFIRM) &&
+		    (owp->nfsow_clp->nfsc_flags & NFSCLFLAGS_GOTDELEG) &&
+		    !error && dp == NULL) {
+		    do {
+			ret = nfsrpc_openrpc(VFSTONFS(vnode_mount(dvp)), dvp,
+			    np->n_fhp->nfh_fh, np->n_fhp->nfh_len,
+			    nfhp->nfh_fh, nfhp->nfh_len,
+			    (NFSV4OPEN_ACCESSWRITE | NFSV4OPEN_ACCESSREAD), op,
+			    name, namelen, &dp, 0, 0x0, cred, p, 0, 1);
+			if (ret == NFSERR_DELAY)
+			    (void) nfs_catnap(PZERO, ret, "nfs_crt2");
+		    } while (ret == NFSERR_DELAY);
+		    if (ret) {
+			if (dp != NULL) {
+				free(dp, M_NFSCLDELEG);
+				dp = NULL;
+			}
+			if (ret == NFSERR_STALECLIENTID ||
+			    ret == NFSERR_STALEDONTRECOVER ||
+			    ret == NFSERR_BADSESSION)
+				error = ret;
+		    }
+		}
+		nfscl_openrelease(nmp, op, error, newone);
+		*unlockedp = 1;
+	}
+	if (nd->nd_repstat != 0 && error == 0)
+		error = nd->nd_repstat;
+	if (error == NFSERR_STALECLIENTID)
+		nfscl_initiate_recovery(owp->nfsow_clp);
+nfsmout:
+	if (!error)
+		*dpp = dp;
+	else if (dp != NULL)
+		free(dp, M_NFSCLDELEG);
+	mbuf_freem(nd->nd_mrep);
+	return (error);
+}
+
+/*
+ * Nfs remove rpc
+ */
+APPLESTATIC int
+nfsrpc_remove(vnode_t dvp, char *name, int namelen, vnode_t vp,
+    struct ucred *cred, NFSPROC_T *p, struct nfsvattr *dnap, int *dattrflagp,
+    void *dstuff)
+{
+	u_int32_t *tl;
+	struct nfsrv_descript nfsd, *nd = &nfsd;
+	struct nfsnode *np;
+	struct nfsmount *nmp;
+	nfsv4stateid_t dstateid;
+	int error, ret = 0, i;
+
+	*dattrflagp = 0;
+	if (namelen > NFS_MAXNAMLEN)
+		return (ENAMETOOLONG);
+	nmp = VFSTONFS(vnode_mount(dvp));
+tryagain:
+	if (NFSHASNFSV4(nmp) && ret == 0) {
+		ret = nfscl_removedeleg(vp, p, &dstateid);
+		if (ret == 1) {
+			NFSCL_REQSTART(nd, NFSPROC_RETDELEGREMOVE, vp);
+			NFSM_BUILD(tl, u_int32_t *, NFSX_STATEID +
+			    NFSX_UNSIGNED);
+			if (NFSHASNFSV4N(nmp))
+				*tl++ = 0;
+			else
+				*tl++ = dstateid.seqid;
+			*tl++ = dstateid.other[0];
+			*tl++ = dstateid.other[1];
+			*tl++ = dstateid.other[2];
+			*tl = txdr_unsigned(NFSV4OP_PUTFH);
+			np = VTONFS(dvp);
+			(void) nfsm_fhtom(nd, np->n_fhp->nfh_fh,
+			    np->n_fhp->nfh_len, 0);
+			NFSM_BUILD(tl, u_int32_t *, NFSX_UNSIGNED);
+			*tl = txdr_unsigned(NFSV4OP_REMOVE);
+		}
+	} else {
+		ret = 0;
+	}
+	if (ret == 0)
+		NFSCL_REQSTART(nd, NFSPROC_REMOVE, dvp);
+	(void) nfsm_strtom(nd, name, namelen);
+	error = nfscl_request(nd, dvp, p, cred, dstuff);
+	if (error)
+		return (error);
+	if (nd->nd_flag & (ND_NFSV3 | ND_NFSV4)) {
+		/* For NFSv4, parse out any Delereturn replies. */
+		if (ret > 0 && nd->nd_repstat != 0 &&
+		    (nd->nd_flag & ND_NOMOREDATA)) {
+			/*
+			 * If the Delegreturn failed, try again without
+			 * it. The server will Recall, as required.
+			 */
+			mbuf_freem(nd->nd_mrep);
+			goto tryagain;
+		}
+		for (i = 0; i < (ret * 2); i++) {
+			if ((nd->nd_flag & (ND_NFSV4 | ND_NOMOREDATA)) ==
+			    ND_NFSV4) {
+			    NFSM_DISSECT(tl, u_int32_t *, 2 * NFSX_UNSIGNED);
+			    if (*(tl + 1))
+				nd->nd_flag |= ND_NOMOREDATA;
+			}
+		}
+		error = nfscl_wcc_data(nd, dvp, dnap, dattrflagp, NULL, dstuff);
+	}
+	if (nd->nd_repstat && !error)
+		error = nd->nd_repstat;
+nfsmout:
+	mbuf_freem(nd->nd_mrep);
+	return (error);
+}
+
+/*
+ * Do an nfs rename rpc.
+ */
+APPLESTATIC int
+nfsrpc_rename(vnode_t fdvp, vnode_t fvp, char *fnameptr, int fnamelen,
+    vnode_t tdvp, vnode_t tvp, char *tnameptr, int tnamelen, struct ucred *cred,
+    NFSPROC_T *p, struct nfsvattr *fnap, struct nfsvattr *tnap,
+    int *fattrflagp, int *tattrflagp, void *fstuff, void *tstuff)
+{
+	u_int32_t *tl;
+	struct nfsrv_descript nfsd, *nd = &nfsd;
+	struct nfsmount *nmp;
+	struct nfsnode *np;
+	nfsattrbit_t attrbits;
+	nfsv4stateid_t fdstateid, tdstateid;
+	int error = 0, ret = 0, gottd = 0, gotfd = 0, i;
+	
+	*fattrflagp = 0;
+	*tattrflagp = 0;
+	nmp = VFSTONFS(vnode_mount(fdvp));
+	if (fnamelen > NFS_MAXNAMLEN || tnamelen > NFS_MAXNAMLEN)
+		return (ENAMETOOLONG);
+tryagain:
+	if (NFSHASNFSV4(nmp) && ret == 0) {
+		ret = nfscl_renamedeleg(fvp, &fdstateid, &gotfd, tvp,
+		    &tdstateid, &gottd, p);
+		if (gotfd && gottd) {
+			NFSCL_REQSTART(nd, NFSPROC_RETDELEGRENAME2, fvp);
+		} else if (gotfd) {
+			NFSCL_REQSTART(nd, NFSPROC_RETDELEGRENAME1, fvp);
+		} else if (gottd) {
+			NFSCL_REQSTART(nd, NFSPROC_RETDELEGRENAME1, tvp);
+		}
+		if (gotfd) {
+			NFSM_BUILD(tl, u_int32_t *, NFSX_STATEID);
+			if (NFSHASNFSV4N(nmp))
+				*tl++ = 0;
+			else
+				*tl++ = fdstateid.seqid;
+			*tl++ = fdstateid.other[0];
+			*tl++ = fdstateid.other[1];
+			*tl = fdstateid.other[2];
+			if (gottd) {
+				NFSM_BUILD(tl, u_int32_t *, NFSX_UNSIGNED);
+				*tl = txdr_unsigned(NFSV4OP_PUTFH);
+				np = VTONFS(tvp);
+				(void) nfsm_fhtom(nd, np->n_fhp->nfh_fh,
+				    np->n_fhp->nfh_len, 0);
+				NFSM_BUILD(tl, u_int32_t *, NFSX_UNSIGNED);
+				*tl = txdr_unsigned(NFSV4OP_DELEGRETURN);
+			}
+		}
+		if (gottd) {
+			NFSM_BUILD(tl, u_int32_t *, NFSX_STATEID);
+			if (NFSHASNFSV4N(nmp))
+				*tl++ = 0;
+			else
+				*tl++ = tdstateid.seqid;
+			*tl++ = tdstateid.other[0];
+			*tl++ = tdstateid.other[1];
+			*tl = tdstateid.other[2];
+		}
+		if (ret > 0) {
+			NFSM_BUILD(tl, u_int32_t *, NFSX_UNSIGNED);
+			*tl = txdr_unsigned(NFSV4OP_PUTFH);
+			np = VTONFS(fdvp);
+			(void) nfsm_fhtom(nd, np->n_fhp->nfh_fh,
+			    np->n_fhp->nfh_len, 0);
+			NFSM_BUILD(tl, u_int32_t *, NFSX_UNSIGNED);
+			*tl = txdr_unsigned(NFSV4OP_SAVEFH);
+		}
+	} else {
+		ret = 0;
+	}
+	if (ret == 0)
+		NFSCL_REQSTART(nd, NFSPROC_RENAME, fdvp);
+	if (nd->nd_flag & ND_NFSV4) {
+		NFSM_BUILD(tl, u_int32_t *, NFSX_UNSIGNED);
+		*tl = txdr_unsigned(NFSV4OP_GETATTR);
+		NFSWCCATTR_ATTRBIT(&attrbits);
+		(void) nfsrv_putattrbit(nd, &attrbits);
+		NFSM_BUILD(tl, u_int32_t *, NFSX_UNSIGNED);
+		*tl = txdr_unsigned(NFSV4OP_PUTFH);
+		(void) nfsm_fhtom(nd, VTONFS(tdvp)->n_fhp->nfh_fh,
+		    VTONFS(tdvp)->n_fhp->nfh_len, 0);
+		NFSM_BUILD(tl, u_int32_t *, NFSX_UNSIGNED);
+		*tl = txdr_unsigned(NFSV4OP_GETATTR);
+		(void) nfsrv_putattrbit(nd, &attrbits);
+		nd->nd_flag |= ND_V4WCCATTR;
+		NFSM_BUILD(tl, u_int32_t *, NFSX_UNSIGNED);
+		*tl = txdr_unsigned(NFSV4OP_RENAME);
+	}
+	(void) nfsm_strtom(nd, fnameptr, fnamelen);
+	if (!(nd->nd_flag & ND_NFSV4))
+		(void) nfsm_fhtom(nd, VTONFS(tdvp)->n_fhp->nfh_fh,
+			VTONFS(tdvp)->n_fhp->nfh_len, 0);
+	(void) nfsm_strtom(nd, tnameptr, tnamelen);
+	error = nfscl_request(nd, fdvp, p, cred, fstuff);
+	if (error)
+		return (error);
+	if (nd->nd_flag & (ND_NFSV3 | ND_NFSV4)) {
+		/* For NFSv4, parse out any Delereturn replies. */
+		if (ret > 0 && nd->nd_repstat != 0 &&
+		    (nd->nd_flag & ND_NOMOREDATA)) {
+			/*
+			 * If the Delegreturn failed, try again without
+			 * it. The server will Recall, as required.
+			 */
+			mbuf_freem(nd->nd_mrep);
+			goto tryagain;
+		}
+		for (i = 0; i < (ret * 2); i++) {
+			if ((nd->nd_flag & (ND_NFSV4 | ND_NOMOREDATA)) ==
+			    ND_NFSV4) {
+			    NFSM_DISSECT(tl, u_int32_t *, 2 * NFSX_UNSIGNED);
+			    if (*(tl + 1)) {
+				if (i == 0 && ret > 1) {
+				    /*
+				     * If the Delegreturn failed, try again
+				     * without it. The server will Recall, as
+				     * required.
+				     * If ret > 1, the first iteration of this
+				     * loop is the second DelegReturn result.
+				     */
+				    mbuf_freem(nd->nd_mrep);
+				    goto tryagain;
+				} else {
+				    nd->nd_flag |= ND_NOMOREDATA;
+				}
+			    }
+			}
+		}
+		/* Now, the first wcc attribute reply. */
+		if ((nd->nd_flag & (ND_NFSV4 | ND_NOMOREDATA)) == ND_NFSV4) {
+			NFSM_DISSECT(tl, u_int32_t *, 2 * NFSX_UNSIGNED);
+			if (*(tl + 1))
+				nd->nd_flag |= ND_NOMOREDATA;
+		}
+		error = nfscl_wcc_data(nd, fdvp, fnap, fattrflagp, NULL,
+		    fstuff);
+		/* and the second wcc attribute reply. */
+		if ((nd->nd_flag & (ND_NFSV4 | ND_NOMOREDATA)) == ND_NFSV4 &&
+		    !error) {
+			NFSM_DISSECT(tl, u_int32_t *, 2 * NFSX_UNSIGNED);
+			if (*(tl + 1))
+				nd->nd_flag |= ND_NOMOREDATA;
+		}
+		if (!error)
+			error = nfscl_wcc_data(nd, tdvp, tnap, tattrflagp,
+			    NULL, tstuff);
+	}
+	if (nd->nd_repstat && !error)
+		error = nd->nd_repstat;
+nfsmout:
+	mbuf_freem(nd->nd_mrep);
+	return (error);
+}
+
+/*
+ * nfs hard link create rpc
+ */
+APPLESTATIC int
+nfsrpc_link(vnode_t dvp, vnode_t vp, char *name, int namelen,
+    struct ucred *cred, NFSPROC_T *p, struct nfsvattr *dnap,
+    struct nfsvattr *nap, int *attrflagp, int *dattrflagp, void *dstuff)
+{
+	u_int32_t *tl;
+	struct nfsrv_descript nfsd, *nd = &nfsd;
+	nfsattrbit_t attrbits;
+	int error = 0;
+
+	*attrflagp = 0;
+	*dattrflagp = 0;
+	if (namelen > NFS_MAXNAMLEN)
+		return (ENAMETOOLONG);
+	NFSCL_REQSTART(nd, NFSPROC_LINK, vp);
+	if (nd->nd_flag & ND_NFSV4) {
+		NFSM_BUILD(tl, u_int32_t *, NFSX_UNSIGNED);
+		*tl = txdr_unsigned(NFSV4OP_PUTFH);
+	}
+	(void) nfsm_fhtom(nd, VTONFS(dvp)->n_fhp->nfh_fh,
+		VTONFS(dvp)->n_fhp->nfh_len, 0);
+	if (nd->nd_flag & ND_NFSV4) {
+		NFSM_BUILD(tl, u_int32_t *, NFSX_UNSIGNED);
+		*tl = txdr_unsigned(NFSV4OP_GETATTR);
+		NFSWCCATTR_ATTRBIT(&attrbits);
+		(void) nfsrv_putattrbit(nd, &attrbits);
+		nd->nd_flag |= ND_V4WCCATTR;
+		NFSM_BUILD(tl, u_int32_t *, NFSX_UNSIGNED);
+		*tl = txdr_unsigned(NFSV4OP_LINK);
+	}
+	(void) nfsm_strtom(nd, name, namelen);
+	error = nfscl_request(nd, vp, p, cred, dstuff);
+	if (error)
+		return (error);
+	if (nd->nd_flag & ND_NFSV3) {
+		error = nfscl_postop_attr(nd, nap, attrflagp, dstuff);
+		if (!error)
+			error = nfscl_wcc_data(nd, dvp, dnap, dattrflagp,
+			    NULL, dstuff);
+	} else if ((nd->nd_flag & (ND_NFSV4 | ND_NOMOREDATA)) == ND_NFSV4) {
+		/*
+		 * First, parse out the PutFH and Getattr result.
+		 */
+		NFSM_DISSECT(tl, u_int32_t *, 2 * NFSX_UNSIGNED);
+		if (!(*(tl + 1)))
+			NFSM_DISSECT(tl, u_int32_t *, 2 * NFSX_UNSIGNED);
+		if (*(tl + 1))
+			nd->nd_flag |= ND_NOMOREDATA;
+		/*
+		 * Get the pre-op attributes.
+		 */
+		error = nfscl_wcc_data(nd, dvp, dnap, dattrflagp, NULL, dstuff);
+	}
+	if (nd->nd_repstat && !error)
+		error = nd->nd_repstat;
+nfsmout:
+	mbuf_freem(nd->nd_mrep);
+	return (error);
+}
+
+/*
+ * nfs symbolic link create rpc
+ */
+APPLESTATIC int
+nfsrpc_symlink(vnode_t dvp, char *name, int namelen, char *target,
+    struct vattr *vap, struct ucred *cred, NFSPROC_T *p, struct nfsvattr *dnap,
+    struct nfsvattr *nnap, struct nfsfh **nfhpp, int *attrflagp,
+    int *dattrflagp, void *dstuff)
+{
+	u_int32_t *tl;
+	struct nfsrv_descript nfsd, *nd = &nfsd;
+	struct nfsmount *nmp;
+	int slen, error = 0;
+
+	*nfhpp = NULL;
+	*attrflagp = 0;
+	*dattrflagp = 0;
+	nmp = VFSTONFS(vnode_mount(dvp));
+	slen = strlen(target);
+	if (slen > NFS_MAXPATHLEN || namelen > NFS_MAXNAMLEN)
+		return (ENAMETOOLONG);
+	NFSCL_REQSTART(nd, NFSPROC_SYMLINK, dvp);
+	if (nd->nd_flag & ND_NFSV4) {
+		NFSM_BUILD(tl, u_int32_t *, NFSX_UNSIGNED);
+		*tl = txdr_unsigned(NFLNK);
+		(void) nfsm_strtom(nd, target, slen);
+	}
+	(void) nfsm_strtom(nd, name, namelen);
+	if (nd->nd_flag & (ND_NFSV3 | ND_NFSV4))
+		nfscl_fillsattr(nd, vap, dvp, 0, 0);
+	if (!(nd->nd_flag & ND_NFSV4))
+		(void) nfsm_strtom(nd, target, slen);
+	if (nd->nd_flag & ND_NFSV2)
+		nfscl_fillsattr(nd, vap, dvp, NFSSATTR_SIZENEG1, 0);
+	error = nfscl_request(nd, dvp, p, cred, dstuff);
+	if (error)
+		return (error);
+	if (nd->nd_flag & ND_NFSV4)
+		error = nfscl_wcc_data(nd, dvp, dnap, dattrflagp, NULL, dstuff);
+	if ((nd->nd_flag & ND_NFSV3) && !error) {
+		if (!nd->nd_repstat)
+			error = nfscl_mtofh(nd, nfhpp, nnap, attrflagp);
+		if (!error)
+			error = nfscl_wcc_data(nd, dvp, dnap, dattrflagp,
+			    NULL, dstuff);
+	}
+	if (nd->nd_repstat && !error)
+		error = nd->nd_repstat;
+	mbuf_freem(nd->nd_mrep);
+	/*
+	 * Kludge: Map EEXIST => 0 assuming that it is a reply to a retry.
+	 * Only do this if vfs.nfs.ignore_eexist is set.
+	 * Never do this for NFSv4.1 or later minor versions, since sessions
+	 * should guarantee "exactly once" RPC semantics.
+	 */
+	if (error == EEXIST && nfsignore_eexist != 0 && (!NFSHASNFSV4(nmp) ||
+	    nmp->nm_minorvers == 0))
+		error = 0;
+	return (error);
+}
+
+/*
+ * nfs make dir rpc
+ */
+APPLESTATIC int
+nfsrpc_mkdir(vnode_t dvp, char *name, int namelen, struct vattr *vap,
+    struct ucred *cred, NFSPROC_T *p, struct nfsvattr *dnap,
+    struct nfsvattr *nnap, struct nfsfh **nfhpp, int *attrflagp,
+    int *dattrflagp, void *dstuff)
+{
+	u_int32_t *tl;
+	struct nfsrv_descript nfsd, *nd = &nfsd;
+	nfsattrbit_t attrbits;
+	int error = 0;
+	struct nfsfh *fhp;
+	struct nfsmount *nmp;
+
+	*nfhpp = NULL;
+	*attrflagp = 0;
+	*dattrflagp = 0;
+	nmp = VFSTONFS(vnode_mount(dvp));
+	fhp = VTONFS(dvp)->n_fhp;
+	if (namelen > NFS_MAXNAMLEN)
+		return (ENAMETOOLONG);
+	NFSCL_REQSTART(nd, NFSPROC_MKDIR, dvp);
+	if (nd->nd_flag & ND_NFSV4) {
+		NFSM_BUILD(tl, u_int32_t *, NFSX_UNSIGNED);
+		*tl = txdr_unsigned(NFDIR);
+	}
+	(void) nfsm_strtom(nd, name, namelen);
+	nfscl_fillsattr(nd, vap, dvp, NFSSATTR_SIZENEG1, 0);
+	if (nd->nd_flag & ND_NFSV4) {
+		NFSGETATTR_ATTRBIT(&attrbits);
+		NFSM_BUILD(tl, u_int32_t *, 2 * NFSX_UNSIGNED);
+		*tl++ = txdr_unsigned(NFSV4OP_GETFH);
+		*tl = txdr_unsigned(NFSV4OP_GETATTR);
+		(void) nfsrv_putattrbit(nd, &attrbits);
+		NFSM_BUILD(tl, u_int32_t *, NFSX_UNSIGNED);
+		*tl = txdr_unsigned(NFSV4OP_PUTFH);
+		(void) nfsm_fhtom(nd, fhp->nfh_fh, fhp->nfh_len, 0);
+		NFSM_BUILD(tl, u_int32_t *, NFSX_UNSIGNED);
+		*tl = txdr_unsigned(NFSV4OP_GETATTR);
+		(void) nfsrv_putattrbit(nd, &attrbits);
+	}
+	error = nfscl_request(nd, dvp, p, cred, dstuff);
+	if (error)
+		return (error);
+	if (nd->nd_flag & ND_NFSV4)
+		error = nfscl_wcc_data(nd, dvp, dnap, dattrflagp, NULL, dstuff);
+	if (!nd->nd_repstat && !error) {
+		if (nd->nd_flag & ND_NFSV4) {
+			NFSM_DISSECT(tl, u_int32_t *, 5 * NFSX_UNSIGNED);
+			error = nfsrv_getattrbits(nd, &attrbits, NULL, NULL);
+		}
+		if (!error)
+			error = nfscl_mtofh(nd, nfhpp, nnap, attrflagp);
+		if (error == 0 && (nd->nd_flag & ND_NFSV4) != 0) {
+			/* Get rid of the PutFH and Getattr status values. */
+			NFSM_DISSECT(tl, u_int32_t *, 4 * NFSX_UNSIGNED);
+			/* Load the directory attributes. */
+			error = nfsm_loadattr(nd, dnap);
+			if (error == 0)
+				*dattrflagp = 1;
+		}
+	}
+	if ((nd->nd_flag & ND_NFSV3) && !error)
+		error = nfscl_wcc_data(nd, dvp, dnap, dattrflagp, NULL, dstuff);
+	if (nd->nd_repstat && !error)
+		error = nd->nd_repstat;
+nfsmout:
+	mbuf_freem(nd->nd_mrep);
+	/*
+	 * Kludge: Map EEXIST => 0 assuming that it is a reply to a retry.
+	 * Only do this if vfs.nfs.ignore_eexist is set.
+	 * Never do this for NFSv4.1 or later minor versions, since sessions
+	 * should guarantee "exactly once" RPC semantics.
+	 */
+	if (error == EEXIST && nfsignore_eexist != 0 && (!NFSHASNFSV4(nmp) ||
+	    nmp->nm_minorvers == 0))
+		error = 0;
+	return (error);
+}
+
+/*
+ * nfs remove directory call
+ */
+APPLESTATIC int
+nfsrpc_rmdir(vnode_t dvp, char *name, int namelen, struct ucred *cred,
+    NFSPROC_T *p, struct nfsvattr *dnap, int *dattrflagp, void *dstuff)
+{
+	struct nfsrv_descript nfsd, *nd = &nfsd;
+	int error = 0;
+
+	*dattrflagp = 0;
+	if (namelen > NFS_MAXNAMLEN)
+		return (ENAMETOOLONG);
+	NFSCL_REQSTART(nd, NFSPROC_RMDIR, dvp);
+	(void) nfsm_strtom(nd, name, namelen);
+	error = nfscl_request(nd, dvp, p, cred, dstuff);
+	if (error)
+		return (error);
+	if (nd->nd_flag & (ND_NFSV3 | ND_NFSV4))
+		error = nfscl_wcc_data(nd, dvp, dnap, dattrflagp, NULL, dstuff);
+	if (nd->nd_repstat && !error)
+		error = nd->nd_repstat;
+	mbuf_freem(nd->nd_mrep);
+	/*
+	 * Kludge: Map ENOENT => 0 assuming that you have a reply to a retry.
+	 */
+	if (error == ENOENT)
+		error = 0;
+	return (error);
+}
+
+/*
+ * Readdir rpc.
+ * Always returns with either uio_resid unchanged, if you are at the
+ * end of the directory, or uio_resid == 0, with all DIRBLKSIZ chunks
+ * filled in.
+ * I felt this would allow caching of directory blocks more easily
+ * than returning a pertially filled block.
+ * Directory offset cookies:
+ * Oh my, what to do with them...
+ * I can think of three ways to deal with them:
+ * 1 - have the layer above these RPCs maintain a map between logical
+ *     directory byte offsets and the NFS directory offset cookies
+ * 2 - pass the opaque directory offset cookies up into userland
+ *     and let the libc functions deal with them, via the system call
+ * 3 - return them to userland in the "struct dirent", so future versions
+ *     of libc can use them and do whatever is necessary to make things work
+ *     above these rpc calls, in the meantime
+ * For now, I do #3 by "hiding" the directory offset cookies after the
+ * d_name field in struct dirent. This is space inside d_reclen that
+ * will be ignored by anything that doesn't know about them.
+ * The directory offset cookies are filled in as the last 8 bytes of
+ * each directory entry, after d_name. Someday, the userland libc
+ * functions may be able to use these. In the meantime, it satisfies
+ * OpenBSD's requirements for cookies being returned.
+ * If expects the directory offset cookie for the read to be in uio_offset
+ * and returns the one for the next entry after this directory block in
+ * there, as well.
+ */
+APPLESTATIC int
+nfsrpc_readdir(vnode_t vp, struct uio *uiop, nfsuint64 *cookiep,
+    struct ucred *cred, NFSPROC_T *p, struct nfsvattr *nap, int *attrflagp,
+    int *eofp, void *stuff)
+{
+	int len, left;
+	struct dirent *dp = NULL;
+	u_int32_t *tl;
+	nfsquad_t cookie, ncookie;
+	struct nfsmount *nmp = VFSTONFS(vnode_mount(vp));
+	struct nfsnode *dnp = VTONFS(vp);
+	struct nfsvattr nfsva;
+	struct nfsrv_descript nfsd, *nd = &nfsd;
+	int error = 0, tlen, more_dirs = 1, blksiz = 0, bigenough = 1;
+	int reqsize, tryformoredirs = 1, readsize, eof = 0, gotmnton = 0;
+	u_int64_t dotfileid, dotdotfileid = 0, fakefileno = UINT64_MAX;
+	char *cp;
+	nfsattrbit_t attrbits, dattrbits;
+	u_int32_t rderr, *tl2 = NULL;
+	size_t tresid;
+
+	KASSERT(uiop->uio_iovcnt == 1 &&
+	    (uio_uio_resid(uiop) & (DIRBLKSIZ - 1)) == 0,
+	    ("nfs readdirrpc bad uio"));
+	ncookie.lval[0] = ncookie.lval[1] = 0;
+	/*
+	 * There is no point in reading a lot more than uio_resid, however
+	 * adding one additional DIRBLKSIZ makes sense. Since uio_resid
+	 * and nm_readdirsize are both exact multiples of DIRBLKSIZ, this
+	 * will never make readsize > nm_readdirsize.
+	 */
+	readsize = nmp->nm_readdirsize;
+	if (readsize > uio_uio_resid(uiop))
+		readsize = uio_uio_resid(uiop) + DIRBLKSIZ;
+
+	*attrflagp = 0;
+	if (eofp)
+		*eofp = 0;
+	tresid = uio_uio_resid(uiop);
+	cookie.lval[0] = cookiep->nfsuquad[0];
+	cookie.lval[1] = cookiep->nfsuquad[1];
+	nd->nd_mrep = NULL;
+
+	/*
+	 * For NFSv4, first create the "." and ".." entries.
+	 */
+	if (NFSHASNFSV4(nmp)) {
+		reqsize = 6 * NFSX_UNSIGNED;
+		NFSGETATTR_ATTRBIT(&dattrbits);
+		NFSZERO_ATTRBIT(&attrbits);
+		NFSSETBIT_ATTRBIT(&attrbits, NFSATTRBIT_FILEID);
+		NFSSETBIT_ATTRBIT(&attrbits, NFSATTRBIT_TYPE);
+		if (NFSISSET_ATTRBIT(&dnp->n_vattr.na_suppattr,
+		    NFSATTRBIT_MOUNTEDONFILEID)) {
+			NFSSETBIT_ATTRBIT(&attrbits,
+			    NFSATTRBIT_MOUNTEDONFILEID);
+			gotmnton = 1;
+		} else {
+			/*
+			 * Must fake it. Use the fileno, except when the
+			 * fsid is != to that of the directory. For that
+			 * case, generate a fake fileno that is not the same.
+			 */
+			NFSSETBIT_ATTRBIT(&attrbits, NFSATTRBIT_FSID);
+			gotmnton = 0;
+		}
+
+		/*
+		 * Joy, oh joy. For V4 we get to hand craft '.' and '..'.
+		 */
+		if (uiop->uio_offset == 0) {
+			NFSCL_REQSTART(nd, NFSPROC_LOOKUPP, vp);
+			NFSM_BUILD(tl, u_int32_t *, 2 * NFSX_UNSIGNED);
+			*tl++ = txdr_unsigned(NFSV4OP_GETFH);
+			*tl = txdr_unsigned(NFSV4OP_GETATTR);
+			(void) nfsrv_putattrbit(nd, &attrbits);
+			error = nfscl_request(nd, vp, p, cred, stuff);
+			if (error)
+			    return (error);
+			dotfileid = 0;	/* Fake out the compiler. */
+			if ((nd->nd_flag & ND_NOMOREDATA) == 0) {
+			    error = nfsm_loadattr(nd, &nfsva);
+			    if (error != 0)
+				goto nfsmout;
+			    dotfileid = nfsva.na_fileid;
+			}
+			if (nd->nd_repstat == 0) {
+			    NFSM_DISSECT(tl, u_int32_t *, 5 * NFSX_UNSIGNED);
+			    len = fxdr_unsigned(int, *(tl + 4));
+			    if (len > 0 && len <= NFSX_V4FHMAX)
+				error = nfsm_advance(nd, NFSM_RNDUP(len), -1);
+			    else
+				error = EPERM;
+			    if (!error) {
+				NFSM_DISSECT(tl, u_int32_t *, 2*NFSX_UNSIGNED);
+				nfsva.na_mntonfileno = UINT64_MAX;
+				error = nfsv4_loadattr(nd, NULL, &nfsva, NULL,
+				    NULL, 0, NULL, NULL, NULL, NULL, NULL, 0,
+				    NULL, NULL, NULL, p, cred);
+				if (error) {
+				    dotdotfileid = dotfileid;
+				} else if (gotmnton) {
+				    if (nfsva.na_mntonfileno != UINT64_MAX)
+					dotdotfileid = nfsva.na_mntonfileno;
+				    else
+					dotdotfileid = nfsva.na_fileid;
+				} else if (nfsva.na_filesid[0] ==
+				    dnp->n_vattr.na_filesid[0] &&
+				    nfsva.na_filesid[1] ==
+				    dnp->n_vattr.na_filesid[1]) {
+				    dotdotfileid = nfsva.na_fileid;
+				} else {
+				    do {
+					fakefileno--;
+				    } while (fakefileno ==
+					nfsva.na_fileid);
+				    dotdotfileid = fakefileno;
+				}
+			    }
+			} else if (nd->nd_repstat == NFSERR_NOENT) {
+			    /*
+			     * Lookupp returns NFSERR_NOENT when we are
+			     * at the root, so just use the current dir.
+			     */
+			    nd->nd_repstat = 0;
+			    dotdotfileid = dotfileid;
+			} else {
+			    error = nd->nd_repstat;
+			}
+			mbuf_freem(nd->nd_mrep);
+			if (error)
+			    return (error);
+			nd->nd_mrep = NULL;
+			dp = (struct dirent *)uio_iov_base(uiop);
+			dp->d_pad0 = dp->d_pad1 = 0;
+			dp->d_off = 0;
+			dp->d_type = DT_DIR;
+			dp->d_fileno = dotfileid;
+			dp->d_namlen = 1;
+			*((uint64_t *)dp->d_name) = 0;	/* Zero pad it. */
+			dp->d_name[0] = '.';
+			dp->d_reclen = _GENERIC_DIRSIZ(dp) + NFSX_HYPER;
+			/*
+			 * Just make these offset cookie 0.
+			 */
+			tl = (u_int32_t *)&dp->d_name[8];
+			*tl++ = 0;
+			*tl = 0;
+			blksiz += dp->d_reclen;
+			uio_uio_resid_add(uiop, -(dp->d_reclen));
+			uiop->uio_offset += dp->d_reclen;
+			uio_iov_base_add(uiop, dp->d_reclen);
+			uio_iov_len_add(uiop, -(dp->d_reclen));
+			dp = (struct dirent *)uio_iov_base(uiop);
+			dp->d_pad0 = dp->d_pad1 = 0;
+			dp->d_off = 0;
+			dp->d_type = DT_DIR;
+			dp->d_fileno = dotdotfileid;
+			dp->d_namlen = 2;
+			*((uint64_t *)dp->d_name) = 0;
+			dp->d_name[0] = '.';
+			dp->d_name[1] = '.';
+			dp->d_reclen = _GENERIC_DIRSIZ(dp) + NFSX_HYPER;
+			/*
+			 * Just make these offset cookie 0.
+			 */
+			tl = (u_int32_t *)&dp->d_name[8];
+			*tl++ = 0;
+			*tl = 0;
+			blksiz += dp->d_reclen;
+			uio_uio_resid_add(uiop, -(dp->d_reclen));
+			uiop->uio_offset += dp->d_reclen;
+			uio_iov_base_add(uiop, dp->d_reclen);
+			uio_iov_len_add(uiop, -(dp->d_reclen));
+		}
+		NFSSETBIT_ATTRBIT(&attrbits, NFSATTRBIT_RDATTRERROR);
+	} else {
+		reqsize = 5 * NFSX_UNSIGNED;
+	}
+
+
+	/*
+	 * Loop around doing readdir rpc's of size readsize.
+	 * The stopping criteria is EOF or buffer full.
+	 */
+	while (more_dirs && bigenough) {
+		*attrflagp = 0;
+		NFSCL_REQSTART(nd, NFSPROC_READDIR, vp);
+		if (nd->nd_flag & ND_NFSV2) {
+			NFSM_BUILD(tl, u_int32_t *, 2 * NFSX_UNSIGNED);
+			*tl++ = cookie.lval[1];
+			*tl = txdr_unsigned(readsize);
+		} else {
+			NFSM_BUILD(tl, u_int32_t *, reqsize);
+			*tl++ = cookie.lval[0];
+			*tl++ = cookie.lval[1];
+			if (cookie.qval == 0) {
+				*tl++ = 0;
+				*tl++ = 0;
+			} else {
+				NFSLOCKNODE(dnp);
+				*tl++ = dnp->n_cookieverf.nfsuquad[0];
+				*tl++ = dnp->n_cookieverf.nfsuquad[1];
+				NFSUNLOCKNODE(dnp);
+			}
+			if (nd->nd_flag & ND_NFSV4) {
+				*tl++ = txdr_unsigned(readsize);
+				*tl = txdr_unsigned(readsize);
+				(void) nfsrv_putattrbit(nd, &attrbits);
+				NFSM_BUILD(tl, u_int32_t *, NFSX_UNSIGNED);
+				*tl = txdr_unsigned(NFSV4OP_GETATTR);
+				(void) nfsrv_putattrbit(nd, &dattrbits);
+			} else {
+				*tl = txdr_unsigned(readsize);
+			}
+		}
+		error = nfscl_request(nd, vp, p, cred, stuff);
+		if (error)
+			return (error);
+		if (!(nd->nd_flag & ND_NFSV2)) {
+			if (nd->nd_flag & ND_NFSV3)
+				error = nfscl_postop_attr(nd, nap, attrflagp,
+				    stuff);
+			if (!nd->nd_repstat && !error) {
+				NFSM_DISSECT(tl, u_int32_t *, NFSX_HYPER);
+				NFSLOCKNODE(dnp);
+				dnp->n_cookieverf.nfsuquad[0] = *tl++;
+				dnp->n_cookieverf.nfsuquad[1] = *tl;
+				NFSUNLOCKNODE(dnp);
+			}
+		}
+		if (nd->nd_repstat || error) {
+			if (!error)
+				error = nd->nd_repstat;
+			goto nfsmout;
+		}
+		NFSM_DISSECT(tl, u_int32_t *, NFSX_UNSIGNED);
+		more_dirs = fxdr_unsigned(int, *tl);
+		if (!more_dirs)
+			tryformoredirs = 0;
+	
+		/* loop through the dir entries, doctoring them to 4bsd form */
+		while (more_dirs && bigenough) {
+			if (nd->nd_flag & ND_NFSV4) {
+				NFSM_DISSECT(tl, u_int32_t *, 3*NFSX_UNSIGNED);
+				ncookie.lval[0] = *tl++;
+				ncookie.lval[1] = *tl++;
+				len = fxdr_unsigned(int, *tl);
+			} else if (nd->nd_flag & ND_NFSV3) {
+				NFSM_DISSECT(tl, u_int32_t *, 3*NFSX_UNSIGNED);
+				nfsva.na_fileid = fxdr_hyper(tl);
+				tl += 2;
+				len = fxdr_unsigned(int, *tl);
+			} else {
+				NFSM_DISSECT(tl, u_int32_t *, 2*NFSX_UNSIGNED);
+				nfsva.na_fileid = fxdr_unsigned(uint64_t,
+				    *tl++);
+				len = fxdr_unsigned(int, *tl);
+			}
+			if (len <= 0 || len > NFS_MAXNAMLEN) {
+				error = EBADRPC;
+				goto nfsmout;
+			}
+			tlen = roundup2(len, 8);
+			if (tlen == len)
+				tlen += 8;  /* To ensure null termination. */
+			left = DIRBLKSIZ - blksiz;
+			if (_GENERIC_DIRLEN(len) + NFSX_HYPER > left) {
+				NFSBZERO(uio_iov_base(uiop), left);
+				dp->d_reclen += left;
+				uio_iov_base_add(uiop, left);
+				uio_iov_len_add(uiop, -(left));
+				uio_uio_resid_add(uiop, -(left));
+				uiop->uio_offset += left;
+				blksiz = 0;
+			}
+			if (_GENERIC_DIRLEN(len) + NFSX_HYPER >
+			    uio_uio_resid(uiop))
+				bigenough = 0;
+			if (bigenough) {
+				dp = (struct dirent *)uio_iov_base(uiop);
+				dp->d_pad0 = dp->d_pad1 = 0;
+				dp->d_off = 0;
+				dp->d_namlen = len;
+				dp->d_reclen = _GENERIC_DIRLEN(len) +
+				    NFSX_HYPER;
+				dp->d_type = DT_UNKNOWN;
+				blksiz += dp->d_reclen;
+				if (blksiz == DIRBLKSIZ)
+					blksiz = 0;
+				uio_uio_resid_add(uiop, -(DIRHDSIZ));
+				uiop->uio_offset += DIRHDSIZ;
+				uio_iov_base_add(uiop, DIRHDSIZ);
+				uio_iov_len_add(uiop, -(DIRHDSIZ));
+				error = nfsm_mbufuio(nd, uiop, len);
+				if (error)
+					goto nfsmout;
+				cp = uio_iov_base(uiop);
+				tlen -= len;
+				NFSBZERO(cp, tlen);
+				cp += tlen;	/* points to cookie storage */
+				tl2 = (u_int32_t *)cp;
+				uio_iov_base_add(uiop, (tlen + NFSX_HYPER));
+				uio_iov_len_add(uiop, -(tlen + NFSX_HYPER));
+				uio_uio_resid_add(uiop, -(tlen + NFSX_HYPER));
+				uiop->uio_offset += (tlen + NFSX_HYPER);
+			} else {
+				error = nfsm_advance(nd, NFSM_RNDUP(len), -1);
+				if (error)
+					goto nfsmout;
+			}
+			if (nd->nd_flag & ND_NFSV4) {
+				rderr = 0;
+				nfsva.na_mntonfileno = UINT64_MAX;
+				error = nfsv4_loadattr(nd, NULL, &nfsva, NULL,
+				    NULL, 0, NULL, NULL, NULL, NULL, NULL, 0,
+				    NULL, NULL, &rderr, p, cred);
+				if (error)
+					goto nfsmout;
+				NFSM_DISSECT(tl, u_int32_t *, NFSX_UNSIGNED);
+			} else if (nd->nd_flag & ND_NFSV3) {
+				NFSM_DISSECT(tl, u_int32_t *, 3*NFSX_UNSIGNED);
+				ncookie.lval[0] = *tl++;
+				ncookie.lval[1] = *tl++;
+			} else {
+				NFSM_DISSECT(tl, u_int32_t *, 2*NFSX_UNSIGNED);
+				ncookie.lval[0] = 0;
+				ncookie.lval[1] = *tl++;
+			}
+			if (bigenough) {
+			    if (nd->nd_flag & ND_NFSV4) {
+				if (rderr) {
+				    dp->d_fileno = 0;
+				} else {
+				    if (gotmnton) {
+					if (nfsva.na_mntonfileno != UINT64_MAX)
+					    dp->d_fileno = nfsva.na_mntonfileno;
+					else
+					    dp->d_fileno = nfsva.na_fileid;
+				    } else if (nfsva.na_filesid[0] ==
+					dnp->n_vattr.na_filesid[0] &&
+					nfsva.na_filesid[1] ==
+					dnp->n_vattr.na_filesid[1]) {
+					dp->d_fileno = nfsva.na_fileid;
+				    } else {
+					do {
+					    fakefileno--;
+					} while (fakefileno ==
+					    nfsva.na_fileid);
+					dp->d_fileno = fakefileno;
+				    }
+				    dp->d_type = vtonfs_dtype(nfsva.na_type);
+				}
+			    } else {
+				dp->d_fileno = nfsva.na_fileid;
+			    }
+			    *tl2++ = cookiep->nfsuquad[0] = cookie.lval[0] =
+				ncookie.lval[0];
+			    *tl2 = cookiep->nfsuquad[1] = cookie.lval[1] =
+				ncookie.lval[1];
+			}
+			more_dirs = fxdr_unsigned(int, *tl);
+		}
+		/*
+		 * If at end of rpc data, get the eof boolean
+		 */
+		if (!more_dirs) {
+			NFSM_DISSECT(tl, u_int32_t *, NFSX_UNSIGNED);
+			eof = fxdr_unsigned(int, *tl);
+			if (tryformoredirs)
+				more_dirs = !eof;
+			if (nd->nd_flag & ND_NFSV4) {
+				error = nfscl_postop_attr(nd, nap, attrflagp,
+				    stuff);
+				if (error)
+					goto nfsmout;
+			}
+		}
+		mbuf_freem(nd->nd_mrep);
+		nd->nd_mrep = NULL;
+	}
+	/*
+	 * Fill last record, iff any, out to a multiple of DIRBLKSIZ
+	 * by increasing d_reclen for the last record.
+	 */
+	if (blksiz > 0) {
+		left = DIRBLKSIZ - blksiz;
+		NFSBZERO(uio_iov_base(uiop), left);
+		dp->d_reclen += left;
+		uio_iov_base_add(uiop, left);
+		uio_iov_len_add(uiop, -(left));
+		uio_uio_resid_add(uiop, -(left));
+		uiop->uio_offset += left;
+	}
+
+	/*
+	 * If returning no data, assume end of file.
+	 * If not bigenough, return not end of file, since you aren't
+	 *    returning all the data
+	 * Otherwise, return the eof flag from the server.
+	 */
+	if (eofp) {
+		if (tresid == ((size_t)(uio_uio_resid(uiop))))
+			*eofp = 1;
+		else if (!bigenough)
+			*eofp = 0;
+		else
+			*eofp = eof;
+	}
+
+	/*
+	 * Add extra empty records to any remaining DIRBLKSIZ chunks.
+	 */
+	while (uio_uio_resid(uiop) > 0 && uio_uio_resid(uiop) != tresid) {
+		dp = (struct dirent *)uio_iov_base(uiop);
+		NFSBZERO(dp, DIRBLKSIZ);
+		dp->d_type = DT_UNKNOWN;
+		tl = (u_int32_t *)&dp->d_name[4];
+		*tl++ = cookie.lval[0];
+		*tl = cookie.lval[1];
+		dp->d_reclen = DIRBLKSIZ;
+		uio_iov_base_add(uiop, DIRBLKSIZ);
+		uio_iov_len_add(uiop, -(DIRBLKSIZ));
+		uio_uio_resid_add(uiop, -(DIRBLKSIZ));
+		uiop->uio_offset += DIRBLKSIZ;
+	}
+
+nfsmout:
+	if (nd->nd_mrep != NULL)
+		mbuf_freem(nd->nd_mrep);
+	return (error);
+}
+
+#ifndef APPLE
+/*
+ * NFS V3 readdir plus RPC. Used in place of nfsrpc_readdir().
+ * (Also used for NFS V4 when mount flag set.)
+ * (ditto above w.r.t. multiple of DIRBLKSIZ, etc.)
+ */
+APPLESTATIC int
+nfsrpc_readdirplus(vnode_t vp, struct uio *uiop, nfsuint64 *cookiep,
+    struct ucred *cred, NFSPROC_T *p, struct nfsvattr *nap, int *attrflagp,
+    int *eofp, void *stuff)
+{
+	int len, left;
+	struct dirent *dp = NULL;
+	u_int32_t *tl;
+	vnode_t newvp = NULLVP;
+	struct nfsrv_descript nfsd, *nd = &nfsd;
+	struct nameidata nami, *ndp = &nami;
+	struct componentname *cnp = &ndp->ni_cnd;
+	struct nfsmount *nmp = VFSTONFS(vnode_mount(vp));
+	struct nfsnode *dnp = VTONFS(vp), *np;
+	struct nfsvattr nfsva;
+	struct nfsfh *nfhp;
+	nfsquad_t cookie, ncookie;
+	int error = 0, tlen, more_dirs = 1, blksiz = 0, bigenough = 1;
+	int attrflag, tryformoredirs = 1, eof = 0, gotmnton = 0;
+	int isdotdot = 0, unlocknewvp = 0;
+	u_int64_t dotfileid, dotdotfileid = 0, fakefileno = UINT64_MAX;
+	u_int64_t fileno = 0;
+	char *cp;
+	nfsattrbit_t attrbits, dattrbits;
+	size_t tresid;
+	u_int32_t *tl2 = NULL, rderr;
+	struct timespec dctime;
+
+	KASSERT(uiop->uio_iovcnt == 1 &&
+	    (uio_uio_resid(uiop) & (DIRBLKSIZ - 1)) == 0,
+	    ("nfs readdirplusrpc bad uio"));
+	ncookie.lval[0] = ncookie.lval[1] = 0;
+	timespecclear(&dctime);
+	*attrflagp = 0;
+	if (eofp != NULL)
+		*eofp = 0;
+	ndp->ni_dvp = vp;
+	nd->nd_mrep = NULL;
+	cookie.lval[0] = cookiep->nfsuquad[0];
+	cookie.lval[1] = cookiep->nfsuquad[1];
+	tresid = uio_uio_resid(uiop);
+
+	/*
+	 * For NFSv4, first create the "." and ".." entries.
+	 */
+	if (NFSHASNFSV4(nmp)) {
+		NFSGETATTR_ATTRBIT(&dattrbits);
+		NFSZERO_ATTRBIT(&attrbits);
+		NFSSETBIT_ATTRBIT(&attrbits, NFSATTRBIT_FILEID);
+		if (NFSISSET_ATTRBIT(&dnp->n_vattr.na_suppattr,
+		    NFSATTRBIT_MOUNTEDONFILEID)) {
+			NFSSETBIT_ATTRBIT(&attrbits,
+			    NFSATTRBIT_MOUNTEDONFILEID);
+			gotmnton = 1;
+		} else {
+			/*
+			 * Must fake it. Use the fileno, except when the
+			 * fsid is != to that of the directory. For that
+			 * case, generate a fake fileno that is not the same.
+			 */
+			NFSSETBIT_ATTRBIT(&attrbits, NFSATTRBIT_FSID);
+			gotmnton = 0;
+		}
+
+		/*
+		 * Joy, oh joy. For V4 we get to hand craft '.' and '..'.
+		 */
+		if (uiop->uio_offset == 0) {
+			NFSCL_REQSTART(nd, NFSPROC_LOOKUPP, vp);
+			NFSM_BUILD(tl, u_int32_t *, 2 * NFSX_UNSIGNED);
+			*tl++ = txdr_unsigned(NFSV4OP_GETFH);
+			*tl = txdr_unsigned(NFSV4OP_GETATTR);
+			(void) nfsrv_putattrbit(nd, &attrbits);
+			error = nfscl_request(nd, vp, p, cred, stuff);
+			if (error)
+			    return (error);
+			dotfileid = 0;	/* Fake out the compiler. */
+			if ((nd->nd_flag & ND_NOMOREDATA) == 0) {
+			    error = nfsm_loadattr(nd, &nfsva);
+			    if (error != 0)
+				goto nfsmout;
+			    dctime = nfsva.na_ctime;
+			    dotfileid = nfsva.na_fileid;
+			}
+			if (nd->nd_repstat == 0) {
+			    NFSM_DISSECT(tl, u_int32_t *, 5 * NFSX_UNSIGNED);
+			    len = fxdr_unsigned(int, *(tl + 4));
+			    if (len > 0 && len <= NFSX_V4FHMAX)
+				error = nfsm_advance(nd, NFSM_RNDUP(len), -1);
+			    else
+				error = EPERM;
+			    if (!error) {
+				NFSM_DISSECT(tl, u_int32_t *, 2*NFSX_UNSIGNED);
+				nfsva.na_mntonfileno = UINT64_MAX;
+				error = nfsv4_loadattr(nd, NULL, &nfsva, NULL,
+				    NULL, 0, NULL, NULL, NULL, NULL, NULL, 0,
+				    NULL, NULL, NULL, p, cred);
+				if (error) {
+				    dotdotfileid = dotfileid;
+				} else if (gotmnton) {
+				    if (nfsva.na_mntonfileno != UINT64_MAX)
+					dotdotfileid = nfsva.na_mntonfileno;
+				    else
+					dotdotfileid = nfsva.na_fileid;
+				} else if (nfsva.na_filesid[0] ==
+				    dnp->n_vattr.na_filesid[0] &&
+				    nfsva.na_filesid[1] ==
+				    dnp->n_vattr.na_filesid[1]) {
+				    dotdotfileid = nfsva.na_fileid;
+				} else {
+				    do {
+					fakefileno--;
+				    } while (fakefileno ==
+					nfsva.na_fileid);
+				    dotdotfileid = fakefileno;
+				}
+			    }
+			} else if (nd->nd_repstat == NFSERR_NOENT) {
+			    /*
+			     * Lookupp returns NFSERR_NOENT when we are
+			     * at the root, so just use the current dir.
+			     */
+			    nd->nd_repstat = 0;
+			    dotdotfileid = dotfileid;
+			} else {
+			    error = nd->nd_repstat;
+			}
+			mbuf_freem(nd->nd_mrep);
+			if (error)
+			    return (error);
+			nd->nd_mrep = NULL;
+			dp = (struct dirent *)uio_iov_base(uiop);
+			dp->d_pad0 = dp->d_pad1 = 0;
+			dp->d_off = 0;
+			dp->d_type = DT_DIR;
+			dp->d_fileno = dotfileid;
+			dp->d_namlen = 1;
+			*((uint64_t *)dp->d_name) = 0;	/* Zero pad it. */
+			dp->d_name[0] = '.';
+			dp->d_reclen = _GENERIC_DIRSIZ(dp) + NFSX_HYPER;
+			/*
+			 * Just make these offset cookie 0.
+			 */
+			tl = (u_int32_t *)&dp->d_name[8];
+			*tl++ = 0;
+			*tl = 0;
+			blksiz += dp->d_reclen;
+			uio_uio_resid_add(uiop, -(dp->d_reclen));
+			uiop->uio_offset += dp->d_reclen;
+			uio_iov_base_add(uiop, dp->d_reclen);
+			uio_iov_len_add(uiop, -(dp->d_reclen));
+			dp = (struct dirent *)uio_iov_base(uiop);
+			dp->d_pad0 = dp->d_pad1 = 0;
+			dp->d_off = 0;
+			dp->d_type = DT_DIR;
+			dp->d_fileno = dotdotfileid;
+			dp->d_namlen = 2;
+			*((uint64_t *)dp->d_name) = 0;
+			dp->d_name[0] = '.';
+			dp->d_name[1] = '.';
+			dp->d_reclen = _GENERIC_DIRSIZ(dp) + NFSX_HYPER;
+			/*
+			 * Just make these offset cookie 0.
+			 */
+			tl = (u_int32_t *)&dp->d_name[8];
+			*tl++ = 0;
+			*tl = 0;
+			blksiz += dp->d_reclen;
+			uio_uio_resid_add(uiop, -(dp->d_reclen));
+			uiop->uio_offset += dp->d_reclen;
+			uio_iov_base_add(uiop, dp->d_reclen);
+			uio_iov_len_add(uiop, -(dp->d_reclen));
+		}
+		NFSREADDIRPLUS_ATTRBIT(&attrbits);
+		if (gotmnton)
+			NFSSETBIT_ATTRBIT(&attrbits,
+			    NFSATTRBIT_MOUNTEDONFILEID);
+	}
+
+	/*
+	 * Loop around doing readdir rpc's of size nm_readdirsize.
+	 * The stopping criteria is EOF or buffer full.
+	 */
+	while (more_dirs && bigenough) {
+		*attrflagp = 0;
+		NFSCL_REQSTART(nd, NFSPROC_READDIRPLUS, vp);
+ 		NFSM_BUILD(tl, u_int32_t *, 6 * NFSX_UNSIGNED);
+		*tl++ = cookie.lval[0];
+		*tl++ = cookie.lval[1];
+		if (cookie.qval == 0) {
+			*tl++ = 0;
+			*tl++ = 0;
+		} else {
+			NFSLOCKNODE(dnp);
+			*tl++ = dnp->n_cookieverf.nfsuquad[0];
+			*tl++ = dnp->n_cookieverf.nfsuquad[1];
+			NFSUNLOCKNODE(dnp);
+		}
+		*tl++ = txdr_unsigned(nmp->nm_readdirsize);
+		*tl = txdr_unsigned(nmp->nm_readdirsize);
+		if (nd->nd_flag & ND_NFSV4) {
+			(void) nfsrv_putattrbit(nd, &attrbits);
+			NFSM_BUILD(tl, u_int32_t *, NFSX_UNSIGNED);
+			*tl = txdr_unsigned(NFSV4OP_GETATTR);
+			(void) nfsrv_putattrbit(nd, &dattrbits);
+		}
+		error = nfscl_request(nd, vp, p, cred, stuff);
+		if (error)
+			return (error);
+		if (nd->nd_flag & ND_NFSV3)
+			error = nfscl_postop_attr(nd, nap, attrflagp, stuff);
+		if (nd->nd_repstat || error) {
+			if (!error)
+				error = nd->nd_repstat;
+			goto nfsmout;
+		}
+		if ((nd->nd_flag & ND_NFSV3) != 0 && *attrflagp != 0)
+			dctime = nap->na_ctime;
+		NFSM_DISSECT(tl, u_int32_t *, 3 * NFSX_UNSIGNED);
+		NFSLOCKNODE(dnp);
+		dnp->n_cookieverf.nfsuquad[0] = *tl++;
+		dnp->n_cookieverf.nfsuquad[1] = *tl++;
+		NFSUNLOCKNODE(dnp);
+		more_dirs = fxdr_unsigned(int, *tl);
+		if (!more_dirs)
+			tryformoredirs = 0;
+	
+		/* loop through the dir entries, doctoring them to 4bsd form */
+		while (more_dirs && bigenough) {
+			NFSM_DISSECT(tl, u_int32_t *, 3 * NFSX_UNSIGNED);
+			if (nd->nd_flag & ND_NFSV4) {
+				ncookie.lval[0] = *tl++;
+				ncookie.lval[1] = *tl++;
+			} else {
+				fileno = fxdr_hyper(tl);
+				tl += 2;
+			}
+			len = fxdr_unsigned(int, *tl);
+			if (len <= 0 || len > NFS_MAXNAMLEN) {
+				error = EBADRPC;
+				goto nfsmout;
+			}
+			tlen = roundup2(len, 8);
+			if (tlen == len)
+				tlen += 8;  /* To ensure null termination. */
+			left = DIRBLKSIZ - blksiz;
+			if (_GENERIC_DIRLEN(len) + NFSX_HYPER > left) {
+				NFSBZERO(uio_iov_base(uiop), left);
+				dp->d_reclen += left;
+				uio_iov_base_add(uiop, left);
+				uio_iov_len_add(uiop, -(left));
+				uio_uio_resid_add(uiop, -(left));
+				uiop->uio_offset += left;
+				blksiz = 0;
+			}
+			if (_GENERIC_DIRLEN(len) + NFSX_HYPER >
+			    uio_uio_resid(uiop))
+				bigenough = 0;
+			if (bigenough) {
+				dp = (struct dirent *)uio_iov_base(uiop);
+				dp->d_pad0 = dp->d_pad1 = 0;
+				dp->d_off = 0;
+				dp->d_namlen = len;
+				dp->d_reclen = _GENERIC_DIRLEN(len) +
+				    NFSX_HYPER;
+				dp->d_type = DT_UNKNOWN;
+				blksiz += dp->d_reclen;
+				if (blksiz == DIRBLKSIZ)
+					blksiz = 0;
+				uio_uio_resid_add(uiop, -(DIRHDSIZ));
+				uiop->uio_offset += DIRHDSIZ;
+				uio_iov_base_add(uiop, DIRHDSIZ);
+				uio_iov_len_add(uiop, -(DIRHDSIZ));
+				cnp->cn_nameptr = uio_iov_base(uiop);
+				cnp->cn_namelen = len;
+				NFSCNHASHZERO(cnp);
+				error = nfsm_mbufuio(nd, uiop, len);
+				if (error)
+					goto nfsmout;
+				cp = uio_iov_base(uiop);
+				tlen -= len;
+				NFSBZERO(cp, tlen);
+				cp += tlen;	/* points to cookie storage */
+				tl2 = (u_int32_t *)cp;
+				if (len == 2 && cnp->cn_nameptr[0] == '.' &&
+				    cnp->cn_nameptr[1] == '.')
+					isdotdot = 1;
+				else
+					isdotdot = 0;
+				uio_iov_base_add(uiop, (tlen + NFSX_HYPER));
+				uio_iov_len_add(uiop, -(tlen + NFSX_HYPER));
+				uio_uio_resid_add(uiop, -(tlen + NFSX_HYPER));
+				uiop->uio_offset += (tlen + NFSX_HYPER);
+			} else {
+				error = nfsm_advance(nd, NFSM_RNDUP(len), -1);
+				if (error)
+					goto nfsmout;
+			}
+			nfhp = NULL;
+			if (nd->nd_flag & ND_NFSV3) {
+				NFSM_DISSECT(tl, u_int32_t *, 3*NFSX_UNSIGNED);
+				ncookie.lval[0] = *tl++;
+				ncookie.lval[1] = *tl++;
+				attrflag = fxdr_unsigned(int, *tl);
+				if (attrflag) {
+				  error = nfsm_loadattr(nd, &nfsva);
+				  if (error)
+					goto nfsmout;
+				}
+				NFSM_DISSECT(tl,u_int32_t *,NFSX_UNSIGNED);
+				if (*tl) {
+					error = nfsm_getfh(nd, &nfhp);
+					if (error)
+					    goto nfsmout;
+				}
+				if (!attrflag && nfhp != NULL) {
+					free(nfhp, M_NFSFH);
+					nfhp = NULL;
+				}
+			} else {
+				rderr = 0;
+				nfsva.na_mntonfileno = 0xffffffff;
+				error = nfsv4_loadattr(nd, NULL, &nfsva, &nfhp,
+				    NULL, 0, NULL, NULL, NULL, NULL, NULL, 0,
+				    NULL, NULL, &rderr, p, cred);
+				if (error)
+					goto nfsmout;
+			}
+
+			if (bigenough) {
+			    if (nd->nd_flag & ND_NFSV4) {
+				if (rderr) {
+				    dp->d_fileno = 0;
+				} else if (gotmnton) {
+				    if (nfsva.na_mntonfileno != 0xffffffff)
+					dp->d_fileno = nfsva.na_mntonfileno;
+				    else
+					dp->d_fileno = nfsva.na_fileid;
+				} else if (nfsva.na_filesid[0] ==
+				    dnp->n_vattr.na_filesid[0] &&
+				    nfsva.na_filesid[1] ==
+				    dnp->n_vattr.na_filesid[1]) {
+				    dp->d_fileno = nfsva.na_fileid;
+				} else {
+				    do {
+					fakefileno--;
+				    } while (fakefileno ==
+					nfsva.na_fileid);
+				    dp->d_fileno = fakefileno;
+				}
+			    } else {
+				dp->d_fileno = fileno;
+			    }
+			    *tl2++ = cookiep->nfsuquad[0] = cookie.lval[0] =
+				ncookie.lval[0];
+			    *tl2 = cookiep->nfsuquad[1] = cookie.lval[1] =
+				ncookie.lval[1];
+
+			    if (nfhp != NULL) {
+				if (NFSRV_CMPFH(nfhp->nfh_fh, nfhp->nfh_len,
+				    dnp->n_fhp->nfh_fh, dnp->n_fhp->nfh_len)) {
+				    VREF(vp);
+				    newvp = vp;
+				    unlocknewvp = 0;
+				    free(nfhp, M_NFSFH);
+				    np = dnp;
+				} else if (isdotdot != 0) {
+				    /*
+				     * Skip doing a nfscl_nget() call for "..".
+				     * There's a race between acquiring the nfs
+				     * node here and lookups that look for the
+				     * directory being read (in the parent).
+				     * It would try to get a lock on ".." here,
+				     * owning the lock on the directory being
+				     * read. Lookup will hold the lock on ".."
+				     * and try to acquire the lock on the
+				     * directory being read.
+				     * If the directory is unlocked/relocked,
+				     * then there is a LOR with the buflock
+				     * vp is relocked.
+				     */
+				    free(nfhp, M_NFSFH);
+				} else {
+				    error = nfscl_nget(vnode_mount(vp), vp,
+				      nfhp, cnp, p, &np, NULL, LK_EXCLUSIVE);
+				    if (!error) {
+					newvp = NFSTOV(np);
+					unlocknewvp = 1;
+				    }
+				}
+				nfhp = NULL;
+				if (newvp != NULLVP) {
+				    error = nfscl_loadattrcache(&newvp,
+					&nfsva, NULL, NULL, 0, 0);
+				    if (error) {
+					if (unlocknewvp)
+					    vput(newvp);
+					else
+					    vrele(newvp);
+					goto nfsmout;
+				    }
+				    dp->d_type =
+					vtonfs_dtype(np->n_vattr.na_type);
+				    ndp->ni_vp = newvp;
+				    NFSCNHASH(cnp, HASHINIT);
+				    if (cnp->cn_namelen <= NCHNAMLEN &&
+					(newvp->v_type != VDIR ||
+					 dctime.tv_sec != 0)) {
+					cache_enter_time(ndp->ni_dvp,
+					    ndp->ni_vp, cnp,
+					    &nfsva.na_ctime,
+					    newvp->v_type != VDIR ? NULL :
+					    &dctime);
+				    }
+				    if (unlocknewvp)
+					vput(newvp);
+				    else
+					vrele(newvp);
+				    newvp = NULLVP;
+				}
+			    }
+			} else if (nfhp != NULL) {
+			    free(nfhp, M_NFSFH);
+			}
+			NFSM_DISSECT(tl, u_int32_t *, NFSX_UNSIGNED);
+			more_dirs = fxdr_unsigned(int, *tl);
+		}
+		/*
+		 * If at end of rpc data, get the eof boolean
+		 */
+		if (!more_dirs) {
+			NFSM_DISSECT(tl, u_int32_t *, NFSX_UNSIGNED);
+			eof = fxdr_unsigned(int, *tl);
+			if (tryformoredirs)
+				more_dirs = !eof;
+			if (nd->nd_flag & ND_NFSV4) {
+				error = nfscl_postop_attr(nd, nap, attrflagp,
+				    stuff);
+				if (error)
+					goto nfsmout;
+			}
+		}
+		mbuf_freem(nd->nd_mrep);
+		nd->nd_mrep = NULL;
+	}
+	/*
+	 * Fill last record, iff any, out to a multiple of DIRBLKSIZ
+	 * by increasing d_reclen for the last record.
+	 */
+	if (blksiz > 0) {
+		left = DIRBLKSIZ - blksiz;
+		NFSBZERO(uio_iov_base(uiop), left);
+		dp->d_reclen += left;
+		uio_iov_base_add(uiop, left);
+		uio_iov_len_add(uiop, -(left));
+		uio_uio_resid_add(uiop, -(left));
+		uiop->uio_offset += left;
+	}
+
+	/*
+	 * If returning no data, assume end of file.
+	 * If not bigenough, return not end of file, since you aren't
+	 *    returning all the data
+	 * Otherwise, return the eof flag from the server.
+	 */
+	if (eofp != NULL) {
+		if (tresid == uio_uio_resid(uiop))
+			*eofp = 1;
+		else if (!bigenough)
+			*eofp = 0;
+		else
+			*eofp = eof;
+	}
+
+	/*
+	 * Add extra empty records to any remaining DIRBLKSIZ chunks.
+	 */
+	while (uio_uio_resid(uiop) > 0 && uio_uio_resid(uiop) != tresid) {
+		dp = (struct dirent *)uio_iov_base(uiop);
+		NFSBZERO(dp, DIRBLKSIZ);
+		dp->d_type = DT_UNKNOWN;
+		tl = (u_int32_t *)&dp->d_name[4];
+		*tl++ = cookie.lval[0];
+		*tl = cookie.lval[1];
+		dp->d_reclen = DIRBLKSIZ;
+		uio_iov_base_add(uiop, DIRBLKSIZ);
+		uio_iov_len_add(uiop, -(DIRBLKSIZ));
+		uio_uio_resid_add(uiop, -(DIRBLKSIZ));
+		uiop->uio_offset += DIRBLKSIZ;
+	}
+
+nfsmout:
+	if (nd->nd_mrep != NULL)
+		mbuf_freem(nd->nd_mrep);
+	return (error);
+}
+#endif	/* !APPLE */
+
+/*
+ * Nfs commit rpc
+ */
+APPLESTATIC int
+nfsrpc_commit(vnode_t vp, u_quad_t offset, int cnt, struct ucred *cred,
+    NFSPROC_T *p, struct nfsvattr *nap, int *attrflagp, void *stuff)
+{
+	u_int32_t *tl;
+	struct nfsrv_descript nfsd, *nd = &nfsd;
+	nfsattrbit_t attrbits;
+	int error;
+	struct nfsmount *nmp = VFSTONFS(vnode_mount(vp));
+	
+	*attrflagp = 0;
+	NFSCL_REQSTART(nd, NFSPROC_COMMIT, vp);
+	NFSM_BUILD(tl, u_int32_t *, 3 * NFSX_UNSIGNED);
+	txdr_hyper(offset, tl);
+	tl += 2;
+	*tl = txdr_unsigned(cnt);
+	if (nd->nd_flag & ND_NFSV4) {
+		/*
+		 * And do a Getattr op.
+		 */
+		NFSM_BUILD(tl, u_int32_t *, NFSX_UNSIGNED);
+		*tl = txdr_unsigned(NFSV4OP_GETATTR);
+		NFSGETATTR_ATTRBIT(&attrbits);
+		(void) nfsrv_putattrbit(nd, &attrbits);
+	}
+	error = nfscl_request(nd, vp, p, cred, stuff);
+	if (error)
+		return (error);
+	error = nfscl_wcc_data(nd, vp, nap, attrflagp, NULL, stuff);
+	if (!error && !nd->nd_repstat) {
+		NFSM_DISSECT(tl, u_int32_t *, NFSX_VERF);
+		NFSLOCKMNT(nmp);
+		if (NFSBCMP(nmp->nm_verf, tl, NFSX_VERF)) {
+			NFSBCOPY(tl, nmp->nm_verf, NFSX_VERF);
+			nd->nd_repstat = NFSERR_STALEWRITEVERF;
+		}
+		NFSUNLOCKMNT(nmp);
+		if (nd->nd_flag & ND_NFSV4)
+			error = nfscl_postop_attr(nd, nap, attrflagp, stuff);
+	}
+nfsmout:
+	if (!error && nd->nd_repstat)
+		error = nd->nd_repstat;
+	mbuf_freem(nd->nd_mrep);
+	return (error);
+}
+
+/*
+ * NFS byte range lock rpc.
+ * (Mostly just calls one of the three lower level RPC routines.)
+ */
+APPLESTATIC int
+nfsrpc_advlock(vnode_t vp, off_t size, int op, struct flock *fl,
+    int reclaim, struct ucred *cred, NFSPROC_T *p, void *id, int flags)
+{
+	struct nfscllockowner *lp;
+	struct nfsclclient *clp;
+	struct nfsfh *nfhp;
+	struct nfsrv_descript nfsd, *nd = &nfsd;
+	struct nfsmount *nmp = VFSTONFS(vnode_mount(vp));
+	u_int64_t off, len;
+	off_t start, end;
+	u_int32_t clidrev = 0;
+	int error = 0, newone = 0, expireret = 0, retrycnt, donelocally;
+	int callcnt, dorpc;
+
+	/*
+	 * Convert the flock structure into a start and end and do POSIX
+	 * bounds checking.
+	 */
+	switch (fl->l_whence) {
+	case SEEK_SET:
+	case SEEK_CUR:
+		/*
+		 * Caller is responsible for adding any necessary offset
+		 * when SEEK_CUR is used.
+		 */
+		start = fl->l_start;
+		off = fl->l_start;
+		break;
+	case SEEK_END:
+		start = size + fl->l_start;
+		off = size + fl->l_start;
+		break;
+	default:
+		return (EINVAL);
+	}
+	if (start < 0)
+		return (EINVAL);
+	if (fl->l_len != 0) {
+		end = start + fl->l_len - 1;
+		if (end < start)
+			return (EINVAL);
+	}
+
+	len = fl->l_len;
+	if (len == 0)
+		len = NFS64BITSSET;
+	retrycnt = 0;
+	do {
+	    nd->nd_repstat = 0;
+	    if (op == F_GETLK) {
+		error = nfscl_getcl(vnode_mount(vp), cred, p, 1, &clp);
+		if (error)
+			return (error);
+		error = nfscl_lockt(vp, clp, off, len, fl, p, id, flags);
+		if (!error) {
+			clidrev = clp->nfsc_clientidrev;
+			error = nfsrpc_lockt(nd, vp, clp, off, len, fl, cred,
+			    p, id, flags);
+		} else if (error == -1) {
+			error = 0;
+		}
+		nfscl_clientrelease(clp);
+	    } else if (op == F_UNLCK && fl->l_type == F_UNLCK) {
+		/*
+		 * We must loop around for all lockowner cases.
+		 */
+		callcnt = 0;
+		error = nfscl_getcl(vnode_mount(vp), cred, p, 1, &clp);
+		if (error)
+			return (error);
+		do {
+		    error = nfscl_relbytelock(vp, off, len, cred, p, callcnt,
+			clp, id, flags, &lp, &dorpc);
+		    /*
+		     * If it returns a NULL lp, we're done.
+		     */
+		    if (lp == NULL) {
+			if (callcnt == 0)
+			    nfscl_clientrelease(clp);
+			else
+			    nfscl_releasealllocks(clp, vp, p, id, flags);
+			return (error);
+		    }
+		    if (nmp->nm_clp != NULL)
+			clidrev = nmp->nm_clp->nfsc_clientidrev;
+		    else
+			clidrev = 0;
+		    /*
+		     * If the server doesn't support Posix lock semantics,
+		     * only allow locks on the entire file, since it won't
+		     * handle overlapping byte ranges.
+		     * There might still be a problem when a lock
+		     * upgrade/downgrade (read<->write) occurs, since the
+		     * server "might" expect an unlock first?
+		     */
+		    if (dorpc && (lp->nfsl_open->nfso_posixlock ||
+			(off == 0 && len == NFS64BITSSET))) {
+			/*
+			 * Since the lock records will go away, we must
+			 * wait for grace and delay here.
+			 */
+			do {
+			    error = nfsrpc_locku(nd, nmp, lp, off, len,
+				NFSV4LOCKT_READ, cred, p, 0);
+			    if ((nd->nd_repstat == NFSERR_GRACE ||
+				 nd->nd_repstat == NFSERR_DELAY) &&
+				error == 0)
+				(void) nfs_catnap(PZERO, (int)nd->nd_repstat,
+				    "nfs_advlock");
+			} while ((nd->nd_repstat == NFSERR_GRACE ||
+			    nd->nd_repstat == NFSERR_DELAY) && error == 0);
+		    }
+		    callcnt++;
+		} while (error == 0 && nd->nd_repstat == 0);
+		nfscl_releasealllocks(clp, vp, p, id, flags);
+	    } else if (op == F_SETLK) {
+		error = nfscl_getbytelock(vp, off, len, fl->l_type, cred, p,
+		    NULL, 0, id, flags, NULL, NULL, &lp, &newone, &donelocally);
+		if (error || donelocally) {
+			return (error);
+		}
+		if (nmp->nm_clp != NULL)
+			clidrev = nmp->nm_clp->nfsc_clientidrev;
+		else
+			clidrev = 0;
+		nfhp = VTONFS(vp)->n_fhp;
+		if (!lp->nfsl_open->nfso_posixlock &&
+		    (off != 0 || len != NFS64BITSSET)) {
+			error = EINVAL;
+		} else {
+			error = nfsrpc_lock(nd, nmp, vp, nfhp->nfh_fh,
+			    nfhp->nfh_len, lp, newone, reclaim, off,
+			    len, fl->l_type, cred, p, 0);
+		}
+		if (!error)
+			error = nd->nd_repstat;
+		nfscl_lockrelease(lp, error, newone);
+	    } else {
+		error = EINVAL;
+	    }
+	    if (!error)
+	        error = nd->nd_repstat;
+	    if (error == NFSERR_GRACE || error == NFSERR_STALESTATEID ||
+		error == NFSERR_STALEDONTRECOVER ||
+		error == NFSERR_STALECLIENTID || error == NFSERR_DELAY ||
+		error == NFSERR_BADSESSION) {
+		(void) nfs_catnap(PZERO, error, "nfs_advlock");
+	    } else if ((error == NFSERR_EXPIRED || error == NFSERR_BADSTATEID)
+		&& clidrev != 0) {
+		expireret = nfscl_hasexpired(nmp->nm_clp, clidrev, p);
+		retrycnt++;
+	    }
+	} while (error == NFSERR_GRACE ||
+	    error == NFSERR_STALECLIENTID || error == NFSERR_DELAY ||
+	    error == NFSERR_STALEDONTRECOVER || error == NFSERR_STALESTATEID ||
+	    error == NFSERR_BADSESSION ||
+	    ((error == NFSERR_EXPIRED || error == NFSERR_BADSTATEID) &&
+	     expireret == 0 && clidrev != 0 && retrycnt < 4));
+	if (error && retrycnt >= 4)
+		error = EIO;
+	return (error);
+}
+
+/*
+ * The lower level routine for the LockT case.
+ */
+APPLESTATIC int
+nfsrpc_lockt(struct nfsrv_descript *nd, vnode_t vp,
+    struct nfsclclient *clp, u_int64_t off, u_int64_t len, struct flock *fl,
+    struct ucred *cred, NFSPROC_T *p, void *id, int flags)
+{
+	u_int32_t *tl;
+	int error, type, size;
+	uint8_t own[NFSV4CL_LOCKNAMELEN + NFSX_V4FHMAX];
+	struct nfsnode *np;
+	struct nfsmount *nmp;
+	struct nfsclsession *tsep;
+
+	nmp = VFSTONFS(vp->v_mount);
+	NFSCL_REQSTART(nd, NFSPROC_LOCKT, vp);
+	NFSM_BUILD(tl, u_int32_t *, 7 * NFSX_UNSIGNED);
+	if (fl->l_type == F_RDLCK)
+		*tl++ = txdr_unsigned(NFSV4LOCKT_READ);
+	else
+		*tl++ = txdr_unsigned(NFSV4LOCKT_WRITE);
+	txdr_hyper(off, tl);
+	tl += 2;
+	txdr_hyper(len, tl);
+	tl += 2;
+	tsep = nfsmnt_mdssession(nmp);
+	*tl++ = tsep->nfsess_clientid.lval[0];
+	*tl = tsep->nfsess_clientid.lval[1];
+	nfscl_filllockowner(id, own, flags);
+	np = VTONFS(vp);
+	NFSBCOPY(np->n_fhp->nfh_fh, &own[NFSV4CL_LOCKNAMELEN],
+	    np->n_fhp->nfh_len);
+	(void)nfsm_strtom(nd, own, NFSV4CL_LOCKNAMELEN + np->n_fhp->nfh_len);
+	error = nfscl_request(nd, vp, p, cred, NULL);
+	if (error)
+		return (error);
+	if (nd->nd_repstat == 0) {
+		fl->l_type = F_UNLCK;
+	} else if (nd->nd_repstat == NFSERR_DENIED) {
+		nd->nd_repstat = 0;
+		fl->l_whence = SEEK_SET;
+		NFSM_DISSECT(tl, u_int32_t *, 8 * NFSX_UNSIGNED);
+		fl->l_start = fxdr_hyper(tl);
+		tl += 2;
+		len = fxdr_hyper(tl);
+		tl += 2;
+		if (len == NFS64BITSSET)
+			fl->l_len = 0;
+		else
+			fl->l_len = len;
+		type = fxdr_unsigned(int, *tl++);
+		if (type == NFSV4LOCKT_WRITE)
+			fl->l_type = F_WRLCK;
+		else
+			fl->l_type = F_RDLCK;
+		/*
+		 * XXX For now, I have no idea what to do with the
+		 * conflicting lock_owner, so I'll just set the pid == 0
+		 * and skip over the lock_owner.
+		 */
+		fl->l_pid = (pid_t)0;
+		tl += 2;
+		size = fxdr_unsigned(int, *tl);
+		if (size < 0 || size > NFSV4_OPAQUELIMIT)
+			error = EBADRPC;
+		if (!error)
+			error = nfsm_advance(nd, NFSM_RNDUP(size), -1);
+	} else if (nd->nd_repstat == NFSERR_STALECLIENTID)
+		nfscl_initiate_recovery(clp);
+nfsmout:
+	mbuf_freem(nd->nd_mrep);
+	return (error);
+}
+
+/*
+ * Lower level function that performs the LockU RPC.
+ */
+static int
+nfsrpc_locku(struct nfsrv_descript *nd, struct nfsmount *nmp,
+    struct nfscllockowner *lp, u_int64_t off, u_int64_t len,
+    u_int32_t type, struct ucred *cred, NFSPROC_T *p, int syscred)
+{
+	u_int32_t *tl;
+	int error;
+
+	nfscl_reqstart(nd, NFSPROC_LOCKU, nmp, lp->nfsl_open->nfso_fh,
+	    lp->nfsl_open->nfso_fhlen, NULL, NULL, 0, 0);
+	NFSM_BUILD(tl, u_int32_t *, NFSX_STATEID + 6 * NFSX_UNSIGNED);
+	*tl++ = txdr_unsigned(type);
+	*tl = txdr_unsigned(lp->nfsl_seqid);
+	if (nfstest_outofseq &&
+	    (arc4random() % nfstest_outofseq) == 0)
+		*tl = txdr_unsigned(lp->nfsl_seqid + 1);
+	tl++;
+	if (NFSHASNFSV4N(nmp))
+		*tl++ = 0;
+	else
+		*tl++ = lp->nfsl_stateid.seqid;
+	*tl++ = lp->nfsl_stateid.other[0];
+	*tl++ = lp->nfsl_stateid.other[1];
+	*tl++ = lp->nfsl_stateid.other[2];
+	txdr_hyper(off, tl);
+	tl += 2;
+	txdr_hyper(len, tl);
+	if (syscred)
+		nd->nd_flag |= ND_USEGSSNAME;
+	error = newnfs_request(nd, nmp, NULL, &nmp->nm_sockreq, NULL, p, cred,
+	    NFS_PROG, NFS_VER4, NULL, 1, NULL, NULL);
+	NFSCL_INCRSEQID(lp->nfsl_seqid, nd);
+	if (error)
+		return (error);
+	if (nd->nd_repstat == 0) {
+		NFSM_DISSECT(tl, u_int32_t *, NFSX_STATEID);
+		lp->nfsl_stateid.seqid = *tl++;
+		lp->nfsl_stateid.other[0] = *tl++;
+		lp->nfsl_stateid.other[1] = *tl++;
+		lp->nfsl_stateid.other[2] = *tl;
+	} else if (nd->nd_repstat == NFSERR_STALESTATEID)
+		nfscl_initiate_recovery(lp->nfsl_open->nfso_own->nfsow_clp);
+nfsmout:
+	mbuf_freem(nd->nd_mrep);
+	return (error);
+}
+
+/*
+ * The actual Lock RPC.
+ */
+APPLESTATIC int
+nfsrpc_lock(struct nfsrv_descript *nd, struct nfsmount *nmp, vnode_t vp,
+    u_int8_t *nfhp, int fhlen, struct nfscllockowner *lp, int newone,
+    int reclaim, u_int64_t off, u_int64_t len, short type, struct ucred *cred,
+    NFSPROC_T *p, int syscred)
+{
+	u_int32_t *tl;
+	int error, size;
+	uint8_t own[NFSV4CL_LOCKNAMELEN + NFSX_V4FHMAX];
+	struct nfsclsession *tsep;
+
+	nfscl_reqstart(nd, NFSPROC_LOCK, nmp, nfhp, fhlen, NULL, NULL, 0, 0);
+	NFSM_BUILD(tl, u_int32_t *, 7 * NFSX_UNSIGNED);
+	if (type == F_RDLCK)
+		*tl++ = txdr_unsigned(NFSV4LOCKT_READ);
+	else
+		*tl++ = txdr_unsigned(NFSV4LOCKT_WRITE);
+	*tl++ = txdr_unsigned(reclaim);
+	txdr_hyper(off, tl);
+	tl += 2;
+	txdr_hyper(len, tl);
+	tl += 2;
+	if (newone) {
+	    *tl = newnfs_true;
+	    NFSM_BUILD(tl, u_int32_t *, NFSX_STATEID +
+		2 * NFSX_UNSIGNED + NFSX_HYPER);
+	    *tl++ = txdr_unsigned(lp->nfsl_open->nfso_own->nfsow_seqid);
+	    if (NFSHASNFSV4N(nmp))
+		*tl++ = 0;
+	    else
+		*tl++ = lp->nfsl_open->nfso_stateid.seqid;
+	    *tl++ = lp->nfsl_open->nfso_stateid.other[0];
+	    *tl++ = lp->nfsl_open->nfso_stateid.other[1];
+	    *tl++ = lp->nfsl_open->nfso_stateid.other[2];
+	    *tl++ = txdr_unsigned(lp->nfsl_seqid);
+	    tsep = nfsmnt_mdssession(nmp);
+	    *tl++ = tsep->nfsess_clientid.lval[0];
+	    *tl = tsep->nfsess_clientid.lval[1];
+	    NFSBCOPY(lp->nfsl_owner, own, NFSV4CL_LOCKNAMELEN);
+	    NFSBCOPY(nfhp, &own[NFSV4CL_LOCKNAMELEN], fhlen);
+	    (void)nfsm_strtom(nd, own, NFSV4CL_LOCKNAMELEN + fhlen);
+	} else {
+	    *tl = newnfs_false;
+	    NFSM_BUILD(tl, u_int32_t *, NFSX_STATEID + NFSX_UNSIGNED);
+	    if (NFSHASNFSV4N(nmp))
+		*tl++ = 0;
+	    else
+		*tl++ = lp->nfsl_stateid.seqid;
+	    *tl++ = lp->nfsl_stateid.other[0];
+	    *tl++ = lp->nfsl_stateid.other[1];
+	    *tl++ = lp->nfsl_stateid.other[2];
+	    *tl = txdr_unsigned(lp->nfsl_seqid);
+	    if (nfstest_outofseq &&
+		(arc4random() % nfstest_outofseq) == 0)
+		    *tl = txdr_unsigned(lp->nfsl_seqid + 1);
+	}
+	if (syscred)
+		nd->nd_flag |= ND_USEGSSNAME;
+	error = newnfs_request(nd, nmp, NULL, &nmp->nm_sockreq, vp, p, cred,
+	    NFS_PROG, NFS_VER4, NULL, 1, NULL, NULL);
+	if (error)
+		return (error);
+	if (newone)
+	    NFSCL_INCRSEQID(lp->nfsl_open->nfso_own->nfsow_seqid, nd);
+	NFSCL_INCRSEQID(lp->nfsl_seqid, nd);
+	if (nd->nd_repstat == 0) {
+		NFSM_DISSECT(tl, u_int32_t *, NFSX_STATEID);
+		lp->nfsl_stateid.seqid = *tl++;
+		lp->nfsl_stateid.other[0] = *tl++;
+		lp->nfsl_stateid.other[1] = *tl++;
+		lp->nfsl_stateid.other[2] = *tl;
+	} else if (nd->nd_repstat == NFSERR_DENIED) {
+		NFSM_DISSECT(tl, u_int32_t *, 8 * NFSX_UNSIGNED);
+		size = fxdr_unsigned(int, *(tl + 7));
+		if (size < 0 || size > NFSV4_OPAQUELIMIT)
+			error = EBADRPC;
+		if (!error)
+			error = nfsm_advance(nd, NFSM_RNDUP(size), -1);
+	} else if (nd->nd_repstat == NFSERR_STALESTATEID)
+		nfscl_initiate_recovery(lp->nfsl_open->nfso_own->nfsow_clp);
+nfsmout:
+	mbuf_freem(nd->nd_mrep);
+	return (error);
+}
+
+/*
+ * nfs statfs rpc
+ * (always called with the vp for the mount point)
+ */
+APPLESTATIC int
+nfsrpc_statfs(vnode_t vp, struct nfsstatfs *sbp, struct nfsfsinfo *fsp,
+    struct ucred *cred, NFSPROC_T *p, struct nfsvattr *nap, int *attrflagp,
+    void *stuff)
+{
+	u_int32_t *tl = NULL;
+	struct nfsrv_descript nfsd, *nd = &nfsd;
+	struct nfsmount *nmp;
+	nfsattrbit_t attrbits;
+	int error;
+
+	*attrflagp = 0;
+	nmp = VFSTONFS(vnode_mount(vp));
+	if (NFSHASNFSV4(nmp)) {
+		/*
+		 * For V4, you actually do a getattr.
+		 */
+		NFSCL_REQSTART(nd, NFSPROC_GETATTR, vp);
+		NFSSTATFS_GETATTRBIT(&attrbits);
+		(void) nfsrv_putattrbit(nd, &attrbits);
+		nd->nd_flag |= ND_USEGSSNAME;
+		error = nfscl_request(nd, vp, p, cred, stuff);
+		if (error)
+			return (error);
+		if (nd->nd_repstat == 0) {
+			error = nfsv4_loadattr(nd, NULL, nap, NULL, NULL, 0,
+			    NULL, NULL, sbp, fsp, NULL, 0, NULL, NULL, NULL, p,
+			    cred);
+			if (!error) {
+				nmp->nm_fsid[0] = nap->na_filesid[0];
+				nmp->nm_fsid[1] = nap->na_filesid[1];
+				NFSSETHASSETFSID(nmp);
+				*attrflagp = 1;
+			}
+		} else {
+			error = nd->nd_repstat;
+		}
+		if (error)
+			goto nfsmout;
+	} else {
+		NFSCL_REQSTART(nd, NFSPROC_FSSTAT, vp);
+		error = nfscl_request(nd, vp, p, cred, stuff);
+		if (error)
+			return (error);
+		if (nd->nd_flag & ND_NFSV3) {
+			error = nfscl_postop_attr(nd, nap, attrflagp, stuff);
+			if (error)
+				goto nfsmout;
+		}
+		if (nd->nd_repstat) {
+			error = nd->nd_repstat;
+			goto nfsmout;
+		}
+		NFSM_DISSECT(tl, u_int32_t *,
+		    NFSX_STATFS(nd->nd_flag & ND_NFSV3));
+	}
+	if (NFSHASNFSV3(nmp)) {
+		sbp->sf_tbytes = fxdr_hyper(tl); tl += 2;
+		sbp->sf_fbytes = fxdr_hyper(tl); tl += 2;
+		sbp->sf_abytes = fxdr_hyper(tl); tl += 2;
+		sbp->sf_tfiles = fxdr_hyper(tl); tl += 2;
+		sbp->sf_ffiles = fxdr_hyper(tl); tl += 2;
+		sbp->sf_afiles = fxdr_hyper(tl); tl += 2;
+		sbp->sf_invarsec = fxdr_unsigned(u_int32_t, *tl);
+	} else if (NFSHASNFSV4(nmp) == 0) {
+		sbp->sf_tsize = fxdr_unsigned(u_int32_t, *tl++);
+		sbp->sf_bsize = fxdr_unsigned(u_int32_t, *tl++);
+		sbp->sf_blocks = fxdr_unsigned(u_int32_t, *tl++);
+		sbp->sf_bfree = fxdr_unsigned(u_int32_t, *tl++);
+		sbp->sf_bavail = fxdr_unsigned(u_int32_t, *tl);
+	}
+nfsmout:
+	mbuf_freem(nd->nd_mrep);
+	return (error);
+}
+
+/*
+ * nfs pathconf rpc
+ */
+APPLESTATIC int
+nfsrpc_pathconf(vnode_t vp, struct nfsv3_pathconf *pc,
+    struct ucred *cred, NFSPROC_T *p, struct nfsvattr *nap, int *attrflagp,
+    void *stuff)
+{
+	struct nfsrv_descript nfsd, *nd = &nfsd;
+	struct nfsmount *nmp;
+	u_int32_t *tl;
+	nfsattrbit_t attrbits;
+	int error;
+
+	*attrflagp = 0;
+	nmp = VFSTONFS(vnode_mount(vp));
+	if (NFSHASNFSV4(nmp)) {
+		/*
+		 * For V4, you actually do a getattr.
+		 */
+		NFSCL_REQSTART(nd, NFSPROC_GETATTR, vp);
+		NFSPATHCONF_GETATTRBIT(&attrbits);
+		(void) nfsrv_putattrbit(nd, &attrbits);
+		nd->nd_flag |= ND_USEGSSNAME;
+		error = nfscl_request(nd, vp, p, cred, stuff);
+		if (error)
+			return (error);
+		if (nd->nd_repstat == 0) {
+			error = nfsv4_loadattr(nd, NULL, nap, NULL, NULL, 0,
+			    pc, NULL, NULL, NULL, NULL, 0, NULL, NULL, NULL, p,
+			    cred);
+			if (!error)
+				*attrflagp = 1;
+		} else {
+			error = nd->nd_repstat;
+		}
+	} else {
+		NFSCL_REQSTART(nd, NFSPROC_PATHCONF, vp);
+		error = nfscl_request(nd, vp, p, cred, stuff);
+		if (error)
+			return (error);
+		error = nfscl_postop_attr(nd, nap, attrflagp, stuff);
+		if (nd->nd_repstat && !error)
+			error = nd->nd_repstat;
+		if (!error) {
+			NFSM_DISSECT(tl, u_int32_t *, NFSX_V3PATHCONF);
+			pc->pc_linkmax = fxdr_unsigned(u_int32_t, *tl++);
+			pc->pc_namemax = fxdr_unsigned(u_int32_t, *tl++);
+			pc->pc_notrunc = fxdr_unsigned(u_int32_t, *tl++);
+			pc->pc_chownrestricted =
+			    fxdr_unsigned(u_int32_t, *tl++);
+			pc->pc_caseinsensitive =
+			    fxdr_unsigned(u_int32_t, *tl++);
+			pc->pc_casepreserving = fxdr_unsigned(u_int32_t, *tl);
+		}
+	}
+nfsmout:
+	mbuf_freem(nd->nd_mrep);
+	return (error);
+}
+
+/*
+ * nfs version 3 fsinfo rpc call
+ */
+APPLESTATIC int
+nfsrpc_fsinfo(vnode_t vp, struct nfsfsinfo *fsp, struct ucred *cred,
+    NFSPROC_T *p, struct nfsvattr *nap, int *attrflagp, void *stuff)
+{
+	u_int32_t *tl;
+	struct nfsrv_descript nfsd, *nd = &nfsd;
+	int error;
+
+	*attrflagp = 0;
+	NFSCL_REQSTART(nd, NFSPROC_FSINFO, vp);
+	error = nfscl_request(nd, vp, p, cred, stuff);
+	if (error)
+		return (error);
+	error = nfscl_postop_attr(nd, nap, attrflagp, stuff);
+	if (nd->nd_repstat && !error)
+		error = nd->nd_repstat;
+	if (!error) {
+		NFSM_DISSECT(tl, u_int32_t *, NFSX_V3FSINFO);
+		fsp->fs_rtmax = fxdr_unsigned(u_int32_t, *tl++);
+		fsp->fs_rtpref = fxdr_unsigned(u_int32_t, *tl++);
+		fsp->fs_rtmult = fxdr_unsigned(u_int32_t, *tl++);
+		fsp->fs_wtmax = fxdr_unsigned(u_int32_t, *tl++);
+		fsp->fs_wtpref = fxdr_unsigned(u_int32_t, *tl++);
+		fsp->fs_wtmult = fxdr_unsigned(u_int32_t, *tl++);
+		fsp->fs_dtpref = fxdr_unsigned(u_int32_t, *tl++);
+		fsp->fs_maxfilesize = fxdr_hyper(tl);
+		tl += 2;
+		fxdr_nfsv3time(tl, &fsp->fs_timedelta);
+		tl += 2;
+		fsp->fs_properties = fxdr_unsigned(u_int32_t, *tl);
+	}
+nfsmout:
+	mbuf_freem(nd->nd_mrep);
+	return (error);
+}
+
+/*
+ * This function performs the Renew RPC.
+ */
+APPLESTATIC int
+nfsrpc_renew(struct nfsclclient *clp, struct nfsclds *dsp, struct ucred *cred,
+    NFSPROC_T *p)
+{
+	u_int32_t *tl;
+	struct nfsrv_descript nfsd;
+	struct nfsrv_descript *nd = &nfsd;
+	struct nfsmount *nmp;
+	int error;
+	struct nfssockreq *nrp;
+	struct nfsclsession *tsep;
+
+	nmp = clp->nfsc_nmp;
+	if (nmp == NULL)
+		return (0);
+	if (dsp == NULL)
+		nfscl_reqstart(nd, NFSPROC_RENEW, nmp, NULL, 0, NULL, NULL, 0,
+		    0);
+	else
+		nfscl_reqstart(nd, NFSPROC_RENEW, nmp, NULL, 0, NULL,
+		    &dsp->nfsclds_sess, 0, 0);
+	if (!NFSHASNFSV4N(nmp)) {
+		/* NFSv4.1 just uses a Sequence Op and not a Renew. */
+		NFSM_BUILD(tl, u_int32_t *, 2 * NFSX_UNSIGNED);
+		tsep = nfsmnt_mdssession(nmp);
+		*tl++ = tsep->nfsess_clientid.lval[0];
+		*tl = tsep->nfsess_clientid.lval[1];
+	}
+	nrp = NULL;
+	if (dsp != NULL)
+		nrp = dsp->nfsclds_sockp;
+	if (nrp == NULL)
+		/* If NULL, use the MDS socket. */
+		nrp = &nmp->nm_sockreq;
+	nd->nd_flag |= ND_USEGSSNAME;
+	if (dsp == NULL)
+		error = newnfs_request(nd, nmp, NULL, nrp, NULL, p, cred,
+		    NFS_PROG, NFS_VER4, NULL, 1, NULL, NULL);
+	else {
+		error = newnfs_request(nd, nmp, NULL, nrp, NULL, p, cred,
+		    NFS_PROG, NFS_VER4, NULL, 1, NULL, &dsp->nfsclds_sess);
+		if (error == ENXIO)
+			nfscl_cancelreqs(dsp);
+	}
+	if (error)
+		return (error);
+	error = nd->nd_repstat;
+	mbuf_freem(nd->nd_mrep);
+	return (error);
+}
+
+/*
+ * This function performs the Releaselockowner RPC.
+ */
+APPLESTATIC int
+nfsrpc_rellockown(struct nfsmount *nmp, struct nfscllockowner *lp,
+    uint8_t *fh, int fhlen, struct ucred *cred, NFSPROC_T *p)
+{
+	struct nfsrv_descript nfsd, *nd = &nfsd;
+	u_int32_t *tl;
+	int error;
+	uint8_t own[NFSV4CL_LOCKNAMELEN + NFSX_V4FHMAX];
+	struct nfsclsession *tsep;
+
+	if (NFSHASNFSV4N(nmp)) {
+		/* For NFSv4.1, do a FreeStateID. */
+		nfscl_reqstart(nd, NFSPROC_FREESTATEID, nmp, NULL, 0, NULL,
+		    NULL, 0, 0);
+		nfsm_stateidtom(nd, &lp->nfsl_stateid, NFSSTATEID_PUTSTATEID);
+	} else {
+		nfscl_reqstart(nd, NFSPROC_RELEASELCKOWN, nmp, NULL, 0, NULL,
+		    NULL, 0, 0);
+		NFSM_BUILD(tl, u_int32_t *, 2 * NFSX_UNSIGNED);
+		tsep = nfsmnt_mdssession(nmp);
+		*tl++ = tsep->nfsess_clientid.lval[0];
+		*tl = tsep->nfsess_clientid.lval[1];
+		NFSBCOPY(lp->nfsl_owner, own, NFSV4CL_LOCKNAMELEN);
+		NFSBCOPY(fh, &own[NFSV4CL_LOCKNAMELEN], fhlen);
+		(void)nfsm_strtom(nd, own, NFSV4CL_LOCKNAMELEN + fhlen);
+	}
+	nd->nd_flag |= ND_USEGSSNAME;
+	error = newnfs_request(nd, nmp, NULL, &nmp->nm_sockreq, NULL, p, cred,
+	    NFS_PROG, NFS_VER4, NULL, 1, NULL, NULL);
+	if (error)
+		return (error);
+	error = nd->nd_repstat;
+	mbuf_freem(nd->nd_mrep);
+	return (error);
+}
+
+/*
+ * This function performs the Compound to get the mount pt FH.
+ */
+APPLESTATIC int
+nfsrpc_getdirpath(struct nfsmount *nmp, u_char *dirpath, struct ucred *cred,
+    NFSPROC_T *p)
+{
+	u_int32_t *tl;
+	struct nfsrv_descript nfsd;
+	struct nfsrv_descript *nd = &nfsd;
+	u_char *cp, *cp2;
+	int error, cnt, len, setnil;
+	u_int32_t *opcntp;
+
+	nfscl_reqstart(nd, NFSPROC_PUTROOTFH, nmp, NULL, 0, &opcntp, NULL, 0,
+	    0);
+	cp = dirpath;
+	cnt = 0;
+	do {
+		setnil = 0;
+		while (*cp == '/')
+			cp++;
+		cp2 = cp;
+		while (*cp2 != '\0' && *cp2 != '/')
+			cp2++;
+		if (*cp2 == '/') {
+			setnil = 1;
+			*cp2 = '\0';
+		}
+		if (cp2 != cp) {
+			NFSM_BUILD(tl, u_int32_t *, NFSX_UNSIGNED);
+			*tl = txdr_unsigned(NFSV4OP_LOOKUP);
+			nfsm_strtom(nd, cp, strlen(cp));
+			cnt++;
+		}
+		if (setnil)
+			*cp2++ = '/';
+		cp = cp2;
+	} while (*cp != '\0');
+	if (NFSHASNFSV4N(nmp))
+		/* Has a Sequence Op done by nfscl_reqstart(). */
+		*opcntp = txdr_unsigned(3 + cnt);
+	else
+		*opcntp = txdr_unsigned(2 + cnt);
+	NFSM_BUILD(tl, u_int32_t *, NFSX_UNSIGNED);
+	*tl = txdr_unsigned(NFSV4OP_GETFH);
+	nd->nd_flag |= ND_USEGSSNAME;
+	error = newnfs_request(nd, nmp, NULL, &nmp->nm_sockreq, NULL, p, cred,
+		NFS_PROG, NFS_VER4, NULL, 1, NULL, NULL);
+	if (error)
+		return (error);
+	if (nd->nd_repstat == 0) {
+		NFSM_DISSECT(tl, u_int32_t *, (3 + 2 * cnt) * NFSX_UNSIGNED);
+		tl += (2 + 2 * cnt);
+		if ((len = fxdr_unsigned(int, *tl)) <= 0 ||
+			len > NFSX_FHMAX) {
+			nd->nd_repstat = NFSERR_BADXDR;
+		} else {
+			nd->nd_repstat = nfsrv_mtostr(nd, nmp->nm_fh, len);
+			if (nd->nd_repstat == 0)
+				nmp->nm_fhsize = len;
+		}
+	}
+	error = nd->nd_repstat;
+nfsmout:
+	mbuf_freem(nd->nd_mrep);
+	return (error);
+}
+
+/*
+ * This function performs the Delegreturn RPC.
+ */
+APPLESTATIC int
+nfsrpc_delegreturn(struct nfscldeleg *dp, struct ucred *cred,
+    struct nfsmount *nmp, NFSPROC_T *p, int syscred)
+{
+	u_int32_t *tl;
+	struct nfsrv_descript nfsd;
+	struct nfsrv_descript *nd = &nfsd;
+	int error;
+
+	nfscl_reqstart(nd, NFSPROC_DELEGRETURN, nmp, dp->nfsdl_fh,
+	    dp->nfsdl_fhlen, NULL, NULL, 0, 0);
+	NFSM_BUILD(tl, u_int32_t *, NFSX_STATEID);
+	if (NFSHASNFSV4N(nmp))
+		*tl++ = 0;
+	else
+		*tl++ = dp->nfsdl_stateid.seqid;
+	*tl++ = dp->nfsdl_stateid.other[0];
+	*tl++ = dp->nfsdl_stateid.other[1];
+	*tl = dp->nfsdl_stateid.other[2];
+	if (syscred)
+		nd->nd_flag |= ND_USEGSSNAME;
+	error = newnfs_request(nd, nmp, NULL, &nmp->nm_sockreq, NULL, p, cred,
+	    NFS_PROG, NFS_VER4, NULL, 1, NULL, NULL);
+	if (error)
+		return (error);
+	error = nd->nd_repstat;
+	mbuf_freem(nd->nd_mrep);
+	return (error);
+}
+
+/*
+ * nfs getacl call.
+ */
+APPLESTATIC int
+nfsrpc_getacl(vnode_t vp, struct ucred *cred, NFSPROC_T *p,
+    struct acl *aclp, void *stuff)
+{
+	struct nfsrv_descript nfsd, *nd = &nfsd;
+	int error;
+	nfsattrbit_t attrbits;
+	struct nfsmount *nmp = VFSTONFS(vnode_mount(vp));
+	
+	if (nfsrv_useacl == 0 || !NFSHASNFSV4(nmp))
+		return (EOPNOTSUPP);
+	NFSCL_REQSTART(nd, NFSPROC_GETACL, vp);
+	NFSZERO_ATTRBIT(&attrbits);
+	NFSSETBIT_ATTRBIT(&attrbits, NFSATTRBIT_ACL);
+	(void) nfsrv_putattrbit(nd, &attrbits);
+	error = nfscl_request(nd, vp, p, cred, stuff);
+	if (error)
+		return (error);
+	if (!nd->nd_repstat)
+		error = nfsv4_loadattr(nd, vp, NULL, NULL, NULL, 0, NULL,
+		    NULL, NULL, NULL, aclp, 0, NULL, NULL, NULL, p, cred);
+	else
+		error = nd->nd_repstat;
+	mbuf_freem(nd->nd_mrep);
+	return (error);
+}
+
+/*
+ * nfs setacl call.
+ */
+APPLESTATIC int
+nfsrpc_setacl(vnode_t vp, struct ucred *cred, NFSPROC_T *p,
+    struct acl *aclp, void *stuff)
+{
+	int error;
+	struct nfsmount *nmp = VFSTONFS(vnode_mount(vp));
+	
+	if (nfsrv_useacl == 0 || !NFSHASNFSV4(nmp))
+		return (EOPNOTSUPP);
+	error = nfsrpc_setattr(vp, NULL, aclp, cred, p, NULL, NULL, stuff);
+	return (error);
+}
+
+/*
+ * nfs setacl call.
+ */
+static int
+nfsrpc_setaclrpc(vnode_t vp, struct ucred *cred, NFSPROC_T *p,
+    struct acl *aclp, nfsv4stateid_t *stateidp, void *stuff)
+{
+	struct nfsrv_descript nfsd, *nd = &nfsd;
+	int error;
+	nfsattrbit_t attrbits;
+	struct nfsmount *nmp = VFSTONFS(vnode_mount(vp));
+	
+	if (!NFSHASNFSV4(nmp))
+		return (EOPNOTSUPP);
+	NFSCL_REQSTART(nd, NFSPROC_SETACL, vp);
+	nfsm_stateidtom(nd, stateidp, NFSSTATEID_PUTSTATEID);
+	NFSZERO_ATTRBIT(&attrbits);
+	NFSSETBIT_ATTRBIT(&attrbits, NFSATTRBIT_ACL);
+	(void) nfsv4_fillattr(nd, vnode_mount(vp), vp, aclp, NULL, NULL, 0,
+	    &attrbits, NULL, NULL, 0, 0, 0, 0, (uint64_t)0, NULL);
+	error = nfscl_request(nd, vp, p, cred, stuff);
+	if (error)
+		return (error);
+	/* Don't care about the pre/postop attributes */
+	mbuf_freem(nd->nd_mrep);
+	return (nd->nd_repstat);
+}
+
+/*
+ * Do the NFSv4.1 Exchange ID.
+ */
+int
+nfsrpc_exchangeid(struct nfsmount *nmp, struct nfsclclient *clp,
+    struct nfssockreq *nrp, uint32_t exchflags, struct nfsclds **dspp,
+    struct ucred *cred, NFSPROC_T *p)
+{
+	uint32_t *tl, v41flags;
+	struct nfsrv_descript nfsd;
+	struct nfsrv_descript *nd = &nfsd;
+	struct nfsclds *dsp;
+	struct timespec verstime;
+	int error, len;
+
+	*dspp = NULL;
+	nfscl_reqstart(nd, NFSPROC_EXCHANGEID, nmp, NULL, 0, NULL, NULL, 0, 0);
+	NFSM_BUILD(tl, uint32_t *, 2 * NFSX_UNSIGNED);
+	*tl++ = txdr_unsigned(nfsboottime.tv_sec);	/* Client owner */
+	*tl = txdr_unsigned(clp->nfsc_rev);
+	(void) nfsm_strtom(nd, clp->nfsc_id, clp->nfsc_idlen);
+
+	NFSM_BUILD(tl, uint32_t *, 3 * NFSX_UNSIGNED);
+	*tl++ = txdr_unsigned(exchflags);
+	*tl++ = txdr_unsigned(NFSV4EXCH_SP4NONE);
+
+	/* Set the implementation id4 */
+	*tl = txdr_unsigned(1);
+	(void) nfsm_strtom(nd, "freebsd.org", strlen("freebsd.org"));
+	(void) nfsm_strtom(nd, version, strlen(version));
+	NFSM_BUILD(tl, uint32_t *, NFSX_V4TIME);
+	verstime.tv_sec = 1293840000;		/* Jan 1, 2011 */
+	verstime.tv_nsec = 0;
+	txdr_nfsv4time(&verstime, tl);
+	nd->nd_flag |= ND_USEGSSNAME;
+	error = newnfs_request(nd, nmp, NULL, nrp, NULL, p, cred,
+	    NFS_PROG, NFS_VER4, NULL, 1, NULL, NULL);
+	NFSCL_DEBUG(1, "exchangeid err=%d reps=%d\n", error,
+	    (int)nd->nd_repstat);
+	if (error != 0)
+		return (error);
+	if (nd->nd_repstat == 0) {
+		NFSM_DISSECT(tl, uint32_t *, 6 * NFSX_UNSIGNED + NFSX_HYPER);
+		len = fxdr_unsigned(int, *(tl + 7));
+		if (len < 0 || len > NFSV4_OPAQUELIMIT) {
+			error = NFSERR_BADXDR;
+			goto nfsmout;
+		}
+		dsp = malloc(sizeof(struct nfsclds) + len + 1, M_NFSCLDS,
+		    M_WAITOK | M_ZERO);
+		dsp->nfsclds_expire = NFSD_MONOSEC + clp->nfsc_renew;
+		dsp->nfsclds_servownlen = len;
+		dsp->nfsclds_sess.nfsess_clientid.lval[0] = *tl++;
+		dsp->nfsclds_sess.nfsess_clientid.lval[1] = *tl++;
+		dsp->nfsclds_sess.nfsess_sequenceid =
+		    fxdr_unsigned(uint32_t, *tl++);
+		v41flags = fxdr_unsigned(uint32_t, *tl);
+		if ((v41flags & NFSV4EXCH_USEPNFSMDS) != 0 &&
+		    NFSHASPNFSOPT(nmp)) {
+			NFSCL_DEBUG(1, "set PNFS\n");
+			NFSLOCKMNT(nmp);
+			nmp->nm_state |= NFSSTA_PNFS;
+			NFSUNLOCKMNT(nmp);
+			dsp->nfsclds_flags |= NFSCLDS_MDS;
+		}
+		if ((v41flags & NFSV4EXCH_USEPNFSDS) != 0)
+			dsp->nfsclds_flags |= NFSCLDS_DS;
+		if (len > 0)
+			nd->nd_repstat = nfsrv_mtostr(nd,
+			    dsp->nfsclds_serverown, len);
+		if (nd->nd_repstat == 0) {
+			mtx_init(&dsp->nfsclds_mtx, "nfsds", NULL, MTX_DEF);
+			mtx_init(&dsp->nfsclds_sess.nfsess_mtx, "nfssession",
+			    NULL, MTX_DEF);
+			nfscl_initsessionslots(&dsp->nfsclds_sess);
+			*dspp = dsp;
+		} else
+			free(dsp, M_NFSCLDS);
+	}
+	error = nd->nd_repstat;
+nfsmout:
+	mbuf_freem(nd->nd_mrep);
+	return (error);
+}
+
+/*
+ * Do the NFSv4.1 Create Session.
+ */
+int
+nfsrpc_createsession(struct nfsmount *nmp, struct nfsclsession *sep,
+    struct nfssockreq *nrp, uint32_t sequenceid, int mds, struct ucred *cred,
+    NFSPROC_T *p)
+{
+	uint32_t crflags, maxval, *tl;
+	struct nfsrv_descript nfsd;
+	struct nfsrv_descript *nd = &nfsd;
+	int error, irdcnt;
+
+	/* Make sure nm_rsize, nm_wsize is set. */
+	if (nmp->nm_rsize > NFS_MAXBSIZE || nmp->nm_rsize == 0)
+		nmp->nm_rsize = NFS_MAXBSIZE;
+	if (nmp->nm_wsize > NFS_MAXBSIZE || nmp->nm_wsize == 0)
+		nmp->nm_wsize = NFS_MAXBSIZE;
+	nfscl_reqstart(nd, NFSPROC_CREATESESSION, nmp, NULL, 0, NULL, NULL, 0,
+	    0);
+	NFSM_BUILD(tl, uint32_t *, 4 * NFSX_UNSIGNED);
+	*tl++ = sep->nfsess_clientid.lval[0];
+	*tl++ = sep->nfsess_clientid.lval[1];
+	*tl++ = txdr_unsigned(sequenceid);
+	crflags = (NFSMNT_RDONLY(nmp->nm_mountp) ? 0 : NFSV4CRSESS_PERSIST);
+	if (nfscl_enablecallb != 0 && nfs_numnfscbd > 0 && mds != 0)
+		crflags |= NFSV4CRSESS_CONNBACKCHAN;
+	*tl = txdr_unsigned(crflags);
+
+	/* Fill in fore channel attributes. */
+	NFSM_BUILD(tl, uint32_t *, 7 * NFSX_UNSIGNED);
+	*tl++ = 0;				/* Header pad size */
+	*tl++ = txdr_unsigned(nmp->nm_wsize + NFS_MAXXDR);/* Max request size */
+	*tl++ = txdr_unsigned(nmp->nm_rsize + NFS_MAXXDR);/* Max reply size */
+	*tl++ = txdr_unsigned(4096);		/* Max response size cached */
+	*tl++ = txdr_unsigned(20);		/* Max operations */
+	*tl++ = txdr_unsigned(64);		/* Max slots */
+	*tl = 0;				/* No rdma ird */
+
+	/* Fill in back channel attributes. */
+	NFSM_BUILD(tl, uint32_t *, 7 * NFSX_UNSIGNED);
+	*tl++ = 0;				/* Header pad size */
+	*tl++ = txdr_unsigned(10000);		/* Max request size */
+	*tl++ = txdr_unsigned(10000);		/* Max response size */
+	*tl++ = txdr_unsigned(4096);		/* Max response size cached */
+	*tl++ = txdr_unsigned(4);		/* Max operations */
+	*tl++ = txdr_unsigned(NFSV4_CBSLOTS);	/* Max slots */
+	*tl = 0;				/* No rdma ird */
+
+	NFSM_BUILD(tl, uint32_t *, 8 * NFSX_UNSIGNED);
+	*tl++ = txdr_unsigned(NFS_CALLBCKPROG);	/* Call back prog # */
+
+	/* Allow AUTH_SYS callbacks as uid, gid == 0. */
+	*tl++ = txdr_unsigned(1);		/* Auth_sys only */
+	*tl++ = txdr_unsigned(AUTH_SYS);	/* AUTH_SYS type */
+	*tl++ = txdr_unsigned(nfsboottime.tv_sec); /* time stamp */
+	*tl++ = 0;				/* Null machine name */
+	*tl++ = 0;				/* Uid == 0 */
+	*tl++ = 0;				/* Gid == 0 */
+	*tl = 0;				/* No additional gids */
+	nd->nd_flag |= ND_USEGSSNAME;
+	error = newnfs_request(nd, nmp, NULL, nrp, NULL, p, cred, NFS_PROG,
+	    NFS_VER4, NULL, 1, NULL, NULL);
+	if (error != 0)
+		return (error);
+	if (nd->nd_repstat == 0) {
+		NFSM_DISSECT(tl, uint32_t *, NFSX_V4SESSIONID +
+		    2 * NFSX_UNSIGNED);
+		bcopy(tl, sep->nfsess_sessionid, NFSX_V4SESSIONID);
+		tl += NFSX_V4SESSIONID / NFSX_UNSIGNED;
+		sep->nfsess_sequenceid = fxdr_unsigned(uint32_t, *tl++);
+		crflags = fxdr_unsigned(uint32_t, *tl);
+		if ((crflags & NFSV4CRSESS_PERSIST) != 0 && mds != 0) {
+			NFSLOCKMNT(nmp);
+			nmp->nm_state |= NFSSTA_SESSPERSIST;
+			NFSUNLOCKMNT(nmp);
+		}
+
+		/* Get the fore channel slot count. */
+		NFSM_DISSECT(tl, uint32_t *, 7 * NFSX_UNSIGNED);
+		tl++;			/* Skip the header pad size. */
+
+		/* Make sure nm_wsize is small enough. */
+		maxval = fxdr_unsigned(uint32_t, *tl++);
+		while (maxval < nmp->nm_wsize + NFS_MAXXDR) {
+			if (nmp->nm_wsize > 8096)
+				nmp->nm_wsize /= 2;
+			else
+				break;
+		}
+
+		/* Make sure nm_rsize is small enough. */
+		maxval = fxdr_unsigned(uint32_t, *tl++);
+		while (maxval < nmp->nm_rsize + NFS_MAXXDR) {
+			if (nmp->nm_rsize > 8096)
+				nmp->nm_rsize /= 2;
+			else
+				break;
+		}
+
+		sep->nfsess_maxcache = fxdr_unsigned(int, *tl++);
+		tl++;
+		sep->nfsess_foreslots = fxdr_unsigned(uint16_t, *tl++);
+		NFSCL_DEBUG(4, "fore slots=%d\n", (int)sep->nfsess_foreslots);
+		irdcnt = fxdr_unsigned(int, *tl);
+		if (irdcnt > 0)
+			NFSM_DISSECT(tl, uint32_t *, irdcnt * NFSX_UNSIGNED);
+
+		/* and the back channel slot count. */
+		NFSM_DISSECT(tl, uint32_t *, 7 * NFSX_UNSIGNED);
+		tl += 5;
+		sep->nfsess_backslots = fxdr_unsigned(uint16_t, *tl);
+		NFSCL_DEBUG(4, "back slots=%d\n", (int)sep->nfsess_backslots);
+	}
+	error = nd->nd_repstat;
+nfsmout:
+	mbuf_freem(nd->nd_mrep);
+	return (error);
+}
+
+/*
+ * Do the NFSv4.1 Destroy Session.
+ */
+int
+nfsrpc_destroysession(struct nfsmount *nmp, struct nfsclclient *clp,
+    struct ucred *cred, NFSPROC_T *p)
+{
+	uint32_t *tl;
+	struct nfsrv_descript nfsd;
+	struct nfsrv_descript *nd = &nfsd;
+	int error;
+	struct nfsclsession *tsep;
+
+	nfscl_reqstart(nd, NFSPROC_DESTROYSESSION, nmp, NULL, 0, NULL, NULL, 0,
+	    0);
+	NFSM_BUILD(tl, uint32_t *, NFSX_V4SESSIONID);
+	tsep = nfsmnt_mdssession(nmp);
+	bcopy(tsep->nfsess_sessionid, tl, NFSX_V4SESSIONID);
+	nd->nd_flag |= ND_USEGSSNAME;
+	error = newnfs_request(nd, nmp, NULL, &nmp->nm_sockreq, NULL, p, cred,
+	    NFS_PROG, NFS_VER4, NULL, 1, NULL, NULL);
+	if (error != 0)
+		return (error);
+	error = nd->nd_repstat;
+	mbuf_freem(nd->nd_mrep);
+	return (error);
+}
+
+/*
+ * Do the NFSv4.1 Destroy Client.
+ */
+int
+nfsrpc_destroyclient(struct nfsmount *nmp, struct nfsclclient *clp,
+    struct ucred *cred, NFSPROC_T *p)
+{
+	uint32_t *tl;
+	struct nfsrv_descript nfsd;
+	struct nfsrv_descript *nd = &nfsd;
+	int error;
+	struct nfsclsession *tsep;
+
+	nfscl_reqstart(nd, NFSPROC_DESTROYCLIENT, nmp, NULL, 0, NULL, NULL, 0,
+	    0);
+	NFSM_BUILD(tl, uint32_t *, 2 * NFSX_UNSIGNED);
+	tsep = nfsmnt_mdssession(nmp);
+	*tl++ = tsep->nfsess_clientid.lval[0];
+	*tl = tsep->nfsess_clientid.lval[1];
+	nd->nd_flag |= ND_USEGSSNAME;
+	error = newnfs_request(nd, nmp, NULL, &nmp->nm_sockreq, NULL, p, cred,
+	    NFS_PROG, NFS_VER4, NULL, 1, NULL, NULL);
+	if (error != 0)
+		return (error);
+	error = nd->nd_repstat;
+	mbuf_freem(nd->nd_mrep);
+	return (error);
+}
+
+/*
+ * Do the NFSv4.1 LayoutGet.
+ */
+static int
+nfsrpc_layoutget(struct nfsmount *nmp, uint8_t *fhp, int fhlen, int iomode,
+    uint64_t offset, uint64_t len, uint64_t minlen, int layouttype,
+    int layoutlen, nfsv4stateid_t *stateidp, int *retonclosep,
+    struct nfsclflayouthead *flhp, struct ucred *cred, NFSPROC_T *p,
+    void *stuff)
+{
+	struct nfsrv_descript nfsd, *nd = &nfsd;
+	int error;
+
+	nfscl_reqstart(nd, NFSPROC_LAYOUTGET, nmp, fhp, fhlen, NULL, NULL, 0,
+	    0);
+	nfsrv_setuplayoutget(nd, iomode, offset, len, minlen, stateidp,
+	    layouttype, layoutlen, 0);
+	nd->nd_flag |= ND_USEGSSNAME;
+	error = newnfs_request(nd, nmp, NULL, &nmp->nm_sockreq, NULL, p, cred,
+	    NFS_PROG, NFS_VER4, NULL, 1, NULL, NULL);
+	NFSCL_DEBUG(4, "layget err=%d st=%d\n", error, nd->nd_repstat);
+	if (error != 0)
+		return (error);
+	if (nd->nd_repstat == 0)
+		error = nfsrv_parselayoutget(nd, stateidp, retonclosep, flhp);
+	if (error == 0 && nd->nd_repstat != 0)
+		error = nd->nd_repstat;
+	mbuf_freem(nd->nd_mrep);
+	return (error);
+}
+
+/*
+ * Do the NFSv4.1 Get Device Info.
+ */
+int
+nfsrpc_getdeviceinfo(struct nfsmount *nmp, uint8_t *deviceid, int layouttype,
+    uint32_t *notifybitsp, struct nfscldevinfo **ndip, struct ucred *cred,
+    NFSPROC_T *p)
+{
+	uint32_t cnt, *tl, vers, minorvers;
+	struct nfsrv_descript nfsd;
+	struct nfsrv_descript *nd = &nfsd;
+	struct sockaddr_in sin, ssin;
+	struct sockaddr_in6 sin6, ssin6;
+	struct nfsclds *dsp = NULL, **dspp, **gotdspp;
+	struct nfscldevinfo *ndi;
+	int addrcnt = 0, bitcnt, error, gotvers, i, isudp, j, stripecnt;
+	uint8_t stripeindex;
+	sa_family_t af, safilled;
+
+	*ndip = NULL;
+	ndi = NULL;
+	gotdspp = NULL;
+	nfscl_reqstart(nd, NFSPROC_GETDEVICEINFO, nmp, NULL, 0, NULL, NULL, 0,
+	    0);
+	NFSM_BUILD(tl, uint32_t *, NFSX_V4DEVICEID + 3 * NFSX_UNSIGNED);
+	NFSBCOPY(deviceid, tl, NFSX_V4DEVICEID);
+	tl += (NFSX_V4DEVICEID / NFSX_UNSIGNED);
+	*tl++ = txdr_unsigned(layouttype);
+	*tl++ = txdr_unsigned(100000);
+	if (notifybitsp != NULL && *notifybitsp != 0) {
+		*tl = txdr_unsigned(1);		/* One word of bits. */
+		NFSM_BUILD(tl, uint32_t *, NFSX_UNSIGNED);
+		*tl = txdr_unsigned(*notifybitsp);
+	} else
+		*tl = txdr_unsigned(0);
+	nd->nd_flag |= ND_USEGSSNAME;
+	error = newnfs_request(nd, nmp, NULL, &nmp->nm_sockreq, NULL, p, cred,
+	    NFS_PROG, NFS_VER4, NULL, 1, NULL, NULL);
+	if (error != 0)
+		return (error);
+	if (nd->nd_repstat == 0) {
+		NFSM_DISSECT(tl, uint32_t *, 2 * NFSX_UNSIGNED);
+		if (layouttype != fxdr_unsigned(int, *tl))
+			printf("EEK! devinfo layout type not same!\n");
+		if (layouttype == NFSLAYOUT_NFSV4_1_FILES) {
+			NFSM_DISSECT(tl, uint32_t *, NFSX_UNSIGNED);
+			stripecnt = fxdr_unsigned(int, *tl);
+			NFSCL_DEBUG(4, "stripecnt=%d\n", stripecnt);
+			if (stripecnt < 1 || stripecnt > 4096) {
+				printf("pNFS File layout devinfo stripecnt %d:"
+				    " out of range\n", stripecnt);
+				error = NFSERR_BADXDR;
+				goto nfsmout;
+			}
+			NFSM_DISSECT(tl, uint32_t *, (stripecnt + 1) *
+			    NFSX_UNSIGNED);
+			addrcnt = fxdr_unsigned(int, *(tl + stripecnt));
+			NFSCL_DEBUG(4, "addrcnt=%d\n", addrcnt);
+			if (addrcnt < 1 || addrcnt > 128) {
+				printf("NFS devinfo addrcnt %d: out of range\n",
+				    addrcnt);
+				error = NFSERR_BADXDR;
+				goto nfsmout;
+			}
+	
+			/*
+			 * Now we know how many stripe indices and addresses, so
+			 * we can allocate the structure the correct size.
+			 */
+			i = (stripecnt * sizeof(uint8_t)) /
+			    sizeof(struct nfsclds *) + 1;
+			NFSCL_DEBUG(4, "stripeindices=%d\n", i);
+			ndi = malloc(sizeof(*ndi) + (addrcnt + i) *
+			    sizeof(struct nfsclds *), M_NFSDEVINFO, M_WAITOK |
+			    M_ZERO);
+			NFSBCOPY(deviceid, ndi->nfsdi_deviceid,
+			    NFSX_V4DEVICEID);
+			ndi->nfsdi_refcnt = 0;
+			ndi->nfsdi_flags = NFSDI_FILELAYOUT;
+			ndi->nfsdi_stripecnt = stripecnt;
+			ndi->nfsdi_addrcnt = addrcnt;
+			/* Fill in the stripe indices. */
+			for (i = 0; i < stripecnt; i++) {
+				stripeindex = fxdr_unsigned(uint8_t, *tl++);
+				NFSCL_DEBUG(4, "stripeind=%d\n", stripeindex);
+				if (stripeindex >= addrcnt) {
+					printf("pNFS File Layout devinfo"
+					    " stripeindex %d: too big\n",
+					    (int)stripeindex);
+					error = NFSERR_BADXDR;
+					goto nfsmout;
+				}
+				nfsfldi_setstripeindex(ndi, i, stripeindex);
+			}
+		} else if (layouttype == NFSLAYOUT_FLEXFILE) {
+			/* For Flex File, we only get one address list. */
+			ndi = malloc(sizeof(*ndi) + sizeof(struct nfsclds *),
+			    M_NFSDEVINFO, M_WAITOK | M_ZERO);
+			NFSBCOPY(deviceid, ndi->nfsdi_deviceid,
+			    NFSX_V4DEVICEID);
+			ndi->nfsdi_refcnt = 0;
+			ndi->nfsdi_flags = NFSDI_FLEXFILE;
+			addrcnt = ndi->nfsdi_addrcnt = 1;
+		}
+
+		/* Now, dissect the server address(es). */
+		safilled = AF_UNSPEC;
+		for (i = 0; i < addrcnt; i++) {
+			NFSM_DISSECT(tl, uint32_t *, NFSX_UNSIGNED);
+			cnt = fxdr_unsigned(uint32_t, *tl);
+			if (cnt == 0) {
+				printf("NFS devinfo 0 len addrlist\n");
+				error = NFSERR_BADXDR;
+				goto nfsmout;
+			}
+			dspp = nfsfldi_addr(ndi, i);
+			safilled = AF_UNSPEC;
+			for (j = 0; j < cnt; j++) {
+				error = nfsv4_getipaddr(nd, &sin, &sin6, &af,
+				    &isudp);
+				if (error != 0 && error != EPERM) {
+					error = NFSERR_BADXDR;
+					goto nfsmout;
+				}
+				if (error == 0 && isudp == 0) {
+					/*
+					 * The priority is:
+					 * - Same address family.
+					 * Save the address and dspp, so that
+					 * the connection can be done after
+					 * parsing is complete.
+					 */
+					if (safilled == AF_UNSPEC ||
+					    (af == nmp->nm_nam->sa_family &&
+					     safilled != nmp->nm_nam->sa_family)
+					   ) {
+						if (af == AF_INET)
+							ssin = sin;
+						else
+							ssin6 = sin6;
+						safilled = af;
+						gotdspp = dspp;
+					}
+				}
+			}
+		}
+
+		gotvers = NFS_VER4;	/* Always NFSv4 for File Layout. */
+		/* For Flex File, we will take one of the versions to use. */
+		if (layouttype == NFSLAYOUT_FLEXFILE) {
+			NFSM_DISSECT(tl, uint32_t *, NFSX_UNSIGNED);
+			j = fxdr_unsigned(int, *tl);
+			if (j < 1 || j > NFSDEV_MAXVERS) {
+				printf("pNFS: too many versions\n");
+				error = NFSERR_BADXDR;
+				goto nfsmout;
+			}
+			gotvers = 0;
+			for (i = 0; i < j; i++) {
+				NFSM_DISSECT(tl, uint32_t *, 5 * NFSX_UNSIGNED);
+				vers = fxdr_unsigned(uint32_t, *tl++);
+				minorvers = fxdr_unsigned(uint32_t, *tl++);
+				if ((vers == NFS_VER4 && minorvers ==
+				    NFSV41_MINORVERSION) || (vers == NFS_VER3 &&
+				    gotvers == 0)) {
+					gotvers = vers;
+					/* We'll take this one. */
+					ndi->nfsdi_versindex = i;
+					ndi->nfsdi_vers = vers;
+					ndi->nfsdi_minorvers = minorvers;
+					ndi->nfsdi_rsize = fxdr_unsigned(
+					    uint32_t, *tl++);
+					ndi->nfsdi_wsize = fxdr_unsigned(
+					    uint32_t, *tl++);
+					if (*tl == newnfs_true)
+						ndi->nfsdi_flags |=
+						    NFSDI_TIGHTCOUPLED;
+					else
+						ndi->nfsdi_flags &=
+						    ~NFSDI_TIGHTCOUPLED;
+				}
+			}
+			if (gotvers == 0) {
+				printf("pNFS: no NFSv3 or NFSv4.1\n");
+				error = NFSERR_BADXDR;
+				goto nfsmout;
+			}
+		}
+
+		/* And the notify bits. */
+		NFSM_DISSECT(tl, uint32_t *, NFSX_UNSIGNED);
+		bitcnt = fxdr_unsigned(int, *tl);
+		if (bitcnt > 0) {
+			NFSM_DISSECT(tl, uint32_t *, NFSX_UNSIGNED);
+			if (notifybitsp != NULL)
+				*notifybitsp =
+				    fxdr_unsigned(uint32_t, *tl);
+		}
+		if (safilled != AF_UNSPEC) {
+			KASSERT(ndi != NULL, ("ndi is NULL"));
+			*ndip = ndi;
+		} else
+			error = EPERM;
+		if (error == 0) {
+			/*
+			 * Now we can do a TCP connection for the correct
+			 * NFS version and IP address.
+			 */
+			error = nfsrpc_fillsa(nmp, &ssin, &ssin6, safilled,
+			    gotvers, &dsp, p);
+		}
+		if (error == 0) {
+			KASSERT(gotdspp != NULL, ("gotdspp is NULL"));
+			*gotdspp = dsp;
+		}
+	}
+	if (nd->nd_repstat != 0 && error == 0)
+		error = nd->nd_repstat;
+nfsmout:
+	if (error != 0 && ndi != NULL)
+		nfscl_freedevinfo(ndi);
+	mbuf_freem(nd->nd_mrep);
+	return (error);
+}
+
+/*
+ * Do the NFSv4.1 LayoutCommit.
+ */
+int
+nfsrpc_layoutcommit(struct nfsmount *nmp, uint8_t *fh, int fhlen, int reclaim,
+    uint64_t off, uint64_t len, uint64_t lastbyte, nfsv4stateid_t *stateidp,
+    int layouttype, struct ucred *cred, NFSPROC_T *p, void *stuff)
+{
+	uint32_t *tl;
+	struct nfsrv_descript nfsd, *nd = &nfsd;
+	int error;
+
+	nfscl_reqstart(nd, NFSPROC_LAYOUTCOMMIT, nmp, fh, fhlen, NULL, NULL,
+	    0, 0);
+	NFSM_BUILD(tl, uint32_t *, 5 * NFSX_UNSIGNED + 3 * NFSX_HYPER +
+	    NFSX_STATEID);
+	txdr_hyper(off, tl);
+	tl += 2;
+	txdr_hyper(len, tl);
+	tl += 2;
+	if (reclaim != 0)
+		*tl++ = newnfs_true;
+	else
+		*tl++ = newnfs_false;
+	*tl++ = txdr_unsigned(stateidp->seqid);
+	*tl++ = stateidp->other[0];
+	*tl++ = stateidp->other[1];
+	*tl++ = stateidp->other[2];
+	*tl++ = newnfs_true;
+	if (lastbyte < off)
+		lastbyte = off;
+	else if (lastbyte >= (off + len))
+		lastbyte = off + len - 1;
+	txdr_hyper(lastbyte, tl);
+	tl += 2;
+	*tl++ = newnfs_false;
+	*tl++ = txdr_unsigned(layouttype);
+	/* All supported layouts are 0 length. */
+	*tl = txdr_unsigned(0);
+	nd->nd_flag |= ND_USEGSSNAME;
+	error = newnfs_request(nd, nmp, NULL, &nmp->nm_sockreq, NULL, p, cred,
+	    NFS_PROG, NFS_VER4, NULL, 1, NULL, NULL);
+	if (error != 0)
+		return (error);
+	error = nd->nd_repstat;
+	mbuf_freem(nd->nd_mrep);
+	return (error);
+}
+
+/*
+ * Do the NFSv4.1 LayoutReturn.
+ */
+int
+nfsrpc_layoutreturn(struct nfsmount *nmp, uint8_t *fh, int fhlen, int reclaim,
+    int layouttype, uint32_t iomode, int layoutreturn, uint64_t offset,
+    uint64_t len, nfsv4stateid_t *stateidp, struct ucred *cred, NFSPROC_T *p,
+    uint32_t stat, uint32_t op, char *devid)
+{
+	uint32_t *tl;
+	struct nfsrv_descript nfsd, *nd = &nfsd;
+	uint64_t tu64;
+	int error;
+
+	nfscl_reqstart(nd, NFSPROC_LAYOUTRETURN, nmp, fh, fhlen, NULL, NULL,
+	    0, 0);
+	NFSM_BUILD(tl, uint32_t *, 4 * NFSX_UNSIGNED);
+	if (reclaim != 0)
+		*tl++ = newnfs_true;
+	else
+		*tl++ = newnfs_false;
+	*tl++ = txdr_unsigned(layouttype);
+	*tl++ = txdr_unsigned(iomode);
+	*tl = txdr_unsigned(layoutreturn);
+	if (layoutreturn == NFSLAYOUTRETURN_FILE) {
+		NFSM_BUILD(tl, uint32_t *, 2 * NFSX_HYPER + NFSX_STATEID +
+		    NFSX_UNSIGNED);
+		txdr_hyper(offset, tl);
+		tl += 2;
+		txdr_hyper(len, tl);
+		tl += 2;
+		NFSCL_DEBUG(4, "layoutret stseq=%d\n", (int)stateidp->seqid);
+		*tl++ = txdr_unsigned(stateidp->seqid);
+		*tl++ = stateidp->other[0];
+		*tl++ = stateidp->other[1];
+		*tl++ = stateidp->other[2];
+		if (layouttype == NFSLAYOUT_NFSV4_1_FILES)
+			*tl = txdr_unsigned(0);
+		else if (layouttype == NFSLAYOUT_FLEXFILE) {
+			if (stat != 0) {
+				*tl = txdr_unsigned(2 * NFSX_HYPER +
+				    NFSX_STATEID + NFSX_V4DEVICEID + 5 *
+				    NFSX_UNSIGNED);
+				NFSM_BUILD(tl, uint32_t *, 2 * NFSX_HYPER +
+				    NFSX_STATEID + NFSX_V4DEVICEID + 5 *
+				    NFSX_UNSIGNED);
+				*tl++ = txdr_unsigned(1);	/* One error. */
+				tu64 = 0;			/* Offset. */
+				txdr_hyper(tu64, tl); tl += 2;
+				tu64 = UINT64_MAX;		/* Length. */
+				txdr_hyper(tu64, tl); tl += 2;
+				NFSBCOPY(stateidp, tl, NFSX_STATEID);
+				tl += (NFSX_STATEID / NFSX_UNSIGNED);
+				*tl++ = txdr_unsigned(1);	/* One error. */
+				NFSBCOPY(devid, tl, NFSX_V4DEVICEID);
+				tl += (NFSX_V4DEVICEID / NFSX_UNSIGNED);
+				*tl++ = txdr_unsigned(stat);
+				*tl++ = txdr_unsigned(op);
+			} else {
+				*tl = txdr_unsigned(2 * NFSX_UNSIGNED);
+				NFSM_BUILD(tl, uint32_t *, 2 * NFSX_UNSIGNED);
+				/* No ioerrs. */
+				*tl++ = 0;
+			}
+			*tl = 0;	/* No stats yet. */
+		}
+	}
+	nd->nd_flag |= ND_USEGSSNAME;
+	error = newnfs_request(nd, nmp, NULL, &nmp->nm_sockreq, NULL, p, cred,
+	    NFS_PROG, NFS_VER4, NULL, 1, NULL, NULL);
+	if (error != 0)
+		return (error);
+	if (nd->nd_repstat == 0) {
+		NFSM_DISSECT(tl, uint32_t *, NFSX_UNSIGNED);
+		if (*tl != 0) {
+			NFSM_DISSECT(tl, uint32_t *, NFSX_STATEID);
+			stateidp->seqid = fxdr_unsigned(uint32_t, *tl++);
+			stateidp->other[0] = *tl++;
+			stateidp->other[1] = *tl++;
+			stateidp->other[2] = *tl;
+		}
+	} else
+		error = nd->nd_repstat;
+nfsmout:
+	mbuf_freem(nd->nd_mrep);
+	return (error);
+}
+
+/*
+ * Acquire a layout and devinfo, if possible. The caller must have acquired
+ * a reference count on the nfsclclient structure before calling this.
+ * Return the layout in lypp with a reference count on it, if successful.
+ */
+static int
+nfsrpc_getlayout(struct nfsmount *nmp, vnode_t vp, struct nfsfh *nfhp,
+    int iomode, uint32_t *notifybitsp, nfsv4stateid_t *stateidp, uint64_t off,
+    struct nfscllayout **lypp, struct ucred *cred, NFSPROC_T *p)
+{
+	struct nfscllayout *lyp;
+	struct nfsclflayout *flp;
+	struct nfsclflayouthead flh;
+	int error = 0, islocked, layoutlen, layouttype, recalled, retonclose;
+	nfsv4stateid_t stateid;
+	struct nfsclsession *tsep;
+
+	*lypp = NULL;
+	if (NFSHASFLEXFILE(nmp))
+		layouttype = NFSLAYOUT_FLEXFILE;
+	else
+		layouttype = NFSLAYOUT_NFSV4_1_FILES;
+	/*
+	 * If lyp is returned non-NULL, there will be a refcnt (shared lock)
+	 * on it, iff flp != NULL or a lock (exclusive lock) on it iff
+	 * flp == NULL.
+	 */
+	lyp = nfscl_getlayout(nmp->nm_clp, nfhp->nfh_fh, nfhp->nfh_len,
+	    off, &flp, &recalled);
+	islocked = 0;
+	if (lyp == NULL || flp == NULL) {
+		if (recalled != 0)
+			return (EIO);
+		LIST_INIT(&flh);
+		tsep = nfsmnt_mdssession(nmp);
+		layoutlen = tsep->nfsess_maxcache -
+		    (NFSX_STATEID + 3 * NFSX_UNSIGNED);
+		if (lyp == NULL) {
+			stateid.seqid = 0;
+			stateid.other[0] = stateidp->other[0];
+			stateid.other[1] = stateidp->other[1];
+			stateid.other[2] = stateidp->other[2];
+			error = nfsrpc_layoutget(nmp, nfhp->nfh_fh,
+			    nfhp->nfh_len, iomode, (uint64_t)0, UINT64_MAX,
+			    (uint64_t)0, layouttype, layoutlen, &stateid,
+			    &retonclose, &flh, cred, p, NULL);
+		} else {
+			islocked = 1;
+			stateid.seqid = lyp->nfsly_stateid.seqid;
+			stateid.other[0] = lyp->nfsly_stateid.other[0];
+			stateid.other[1] = lyp->nfsly_stateid.other[1];
+			stateid.other[2] = lyp->nfsly_stateid.other[2];
+			error = nfsrpc_layoutget(nmp, nfhp->nfh_fh,
+			    nfhp->nfh_len, iomode, off, UINT64_MAX,
+			    (uint64_t)0, layouttype, layoutlen, &stateid,
+			    &retonclose, &flh, cred, p, NULL);
+		}
+		error = nfsrpc_layoutgetres(nmp, vp, nfhp->nfh_fh,
+		    nfhp->nfh_len, &stateid, retonclose, notifybitsp, &lyp,
+		    &flh, layouttype, error, NULL, cred, p);
+		if (error == 0)
+			*lypp = lyp;
+		else if (islocked != 0)
+			nfscl_rellayout(lyp, 1);
+	} else
+		*lypp = lyp;
+	return (error);
+}
+
+/*
+ * Do a TCP connection plus exchange id and create session.
+ * If successful, a "struct nfsclds" is linked into the list for the
+ * mount point and a pointer to it is returned.
+ */
+static int
+nfsrpc_fillsa(struct nfsmount *nmp, struct sockaddr_in *sin,
+    struct sockaddr_in6 *sin6, sa_family_t af, int vers, struct nfsclds **dspp,
+    NFSPROC_T *p)
+{
+	struct sockaddr_in *msad, *sad;
+	struct sockaddr_in6 *msad6, *sad6;
+	struct nfsclclient *clp;
+	struct nfssockreq *nrp;
+	struct nfsclds *dsp, *tdsp;
+	int error;
+	enum nfsclds_state retv;
+	uint32_t sequenceid;
+
+	KASSERT(nmp->nm_sockreq.nr_cred != NULL,
+	    ("nfsrpc_fillsa: NULL nr_cred"));
+	NFSLOCKCLSTATE();
+	clp = nmp->nm_clp;
+	NFSUNLOCKCLSTATE();
+	if (clp == NULL)
+		return (EPERM);
+	if (af == AF_INET) {
+		NFSLOCKMNT(nmp);
+		/*
+		 * Check to see if we already have a session for this
+		 * address that is usable for a DS.
+		 * Note that the MDS's address is in a different place
+		 * than the sessions already acquired for DS's.
+		 */
+		msad = (struct sockaddr_in *)nmp->nm_sockreq.nr_nam;
+		tdsp = TAILQ_FIRST(&nmp->nm_sess);
+		while (tdsp != NULL) {
+			if (msad != NULL && msad->sin_family == AF_INET &&
+			    sin->sin_addr.s_addr == msad->sin_addr.s_addr &&
+			    sin->sin_port == msad->sin_port &&
+			    (tdsp->nfsclds_flags & NFSCLDS_DS) != 0 &&
+			    tdsp->nfsclds_sess.nfsess_defunct == 0) {
+				*dspp = tdsp;
+				NFSUNLOCKMNT(nmp);
+				NFSCL_DEBUG(4, "fnd same addr\n");
+				return (0);
+			}
+			tdsp = TAILQ_NEXT(tdsp, nfsclds_list);
+			if (tdsp != NULL && tdsp->nfsclds_sockp != NULL)
+				msad = (struct sockaddr_in *)
+				    tdsp->nfsclds_sockp->nr_nam;
+			else
+				msad = NULL;
+		}
+		NFSUNLOCKMNT(nmp);
+
+		/* No IP address match, so look for new/trunked one. */
+		sad = malloc(sizeof(*sad), M_SONAME, M_WAITOK | M_ZERO);
+		sad->sin_len = sizeof(*sad);
+		sad->sin_family = AF_INET;
+		sad->sin_port = sin->sin_port;
+		sad->sin_addr.s_addr = sin->sin_addr.s_addr;
+		nrp = malloc(sizeof(*nrp), M_NFSSOCKREQ, M_WAITOK | M_ZERO);
+		nrp->nr_nam = (struct sockaddr *)sad;
+	} else if (af == AF_INET6) {
+		NFSLOCKMNT(nmp);
+		/*
+		 * Check to see if we already have a session for this
+		 * address that is usable for a DS.
+		 * Note that the MDS's address is in a different place
+		 * than the sessions already acquired for DS's.
+		 */
+		msad6 = (struct sockaddr_in6 *)nmp->nm_sockreq.nr_nam;
+		tdsp = TAILQ_FIRST(&nmp->nm_sess);
+		while (tdsp != NULL) {
+			if (msad6 != NULL && msad6->sin6_family == AF_INET6 &&
+			    IN6_ARE_ADDR_EQUAL(&sin6->sin6_addr,
+			    &msad6->sin6_addr) &&
+			    sin6->sin6_port == msad6->sin6_port &&
+			    (tdsp->nfsclds_flags & NFSCLDS_DS) != 0 &&
+			    tdsp->nfsclds_sess.nfsess_defunct == 0) {
+				*dspp = tdsp;
+				NFSUNLOCKMNT(nmp);
+				return (0);
+			}
+			tdsp = TAILQ_NEXT(tdsp, nfsclds_list);
+			if (tdsp != NULL && tdsp->nfsclds_sockp != NULL)
+				msad6 = (struct sockaddr_in6 *)
+				    tdsp->nfsclds_sockp->nr_nam;
+			else
+				msad6 = NULL;
+		}
+		NFSUNLOCKMNT(nmp);
+
+		/* No IP address match, so look for new/trunked one. */
+		sad6 = malloc(sizeof(*sad6), M_SONAME, M_WAITOK | M_ZERO);
+		sad6->sin6_len = sizeof(*sad6);
+		sad6->sin6_family = AF_INET6;
+		sad6->sin6_port = sin6->sin6_port;
+		NFSBCOPY(&sin6->sin6_addr, &sad6->sin6_addr,
+		    sizeof(struct in6_addr));
+		nrp = malloc(sizeof(*nrp), M_NFSSOCKREQ, M_WAITOK | M_ZERO);
+		nrp->nr_nam = (struct sockaddr *)sad6;
+	} else
+		return (EPERM);
+
+	nrp->nr_sotype = SOCK_STREAM;
+	mtx_init(&nrp->nr_mtx, "nfssock", NULL, MTX_DEF);
+	nrp->nr_prog = NFS_PROG;
+	nrp->nr_vers = vers;
+
+	/*
+	 * Use the credentials that were used for the mount, which are
+	 * in nmp->nm_sockreq.nr_cred for newnfs_connect() etc.
+	 * Ref. counting the credentials with crhold() is probably not
+	 * necessary, since nm_sockreq.nr_cred won't be crfree()'d until
+	 * unmount, but I did it anyhow.
+	 */
+	nrp->nr_cred = crhold(nmp->nm_sockreq.nr_cred);
+	error = newnfs_connect(nmp, nrp, NULL, p, 0);
+	NFSCL_DEBUG(3, "DS connect=%d\n", error);
+
+	dsp = NULL;
+	/* Now, do the exchangeid and create session. */
+	if (error == 0) {
+		if (vers == NFS_VER4) {
+			error = nfsrpc_exchangeid(nmp, clp, nrp,
+			    NFSV4EXCH_USEPNFSDS, &dsp, nrp->nr_cred, p);
+			NFSCL_DEBUG(3, "DS exchangeid=%d\n", error);
+			if (error != 0)
+				newnfs_disconnect(nrp);
+		} else {
+			dsp = malloc(sizeof(struct nfsclds), M_NFSCLDS,
+			    M_WAITOK | M_ZERO);
+			dsp->nfsclds_flags |= NFSCLDS_DS;
+			dsp->nfsclds_expire = INT32_MAX; /* No renews needed. */
+			mtx_init(&dsp->nfsclds_mtx, "nfsds", NULL, MTX_DEF);
+			mtx_init(&dsp->nfsclds_sess.nfsess_mtx, "nfssession",
+			    NULL, MTX_DEF);
+		}
+	}
+	if (error == 0) {
+		dsp->nfsclds_sockp = nrp;
+		if (vers == NFS_VER4) {
+			NFSLOCKMNT(nmp);
+			retv = nfscl_getsameserver(nmp, dsp, &tdsp,
+			    &sequenceid);
+			NFSCL_DEBUG(3, "getsame ret=%d\n", retv);
+			if (retv == NFSDSP_USETHISSESSION &&
+			    nfscl_dssameconn != 0) {
+				NFSLOCKDS(tdsp);
+				tdsp->nfsclds_flags |= NFSCLDS_SAMECONN;
+				NFSUNLOCKDS(tdsp);
+				NFSUNLOCKMNT(nmp);
+				/*
+				 * If there is already a session for this
+				 * server, use it.
+				 */
+				(void)newnfs_disconnect(nrp);
+				nfscl_freenfsclds(dsp);
+				*dspp = tdsp;
+				return (0);
+			}
+			if (retv == NFSDSP_NOTFOUND)
+				sequenceid =
+				    dsp->nfsclds_sess.nfsess_sequenceid;
+			NFSUNLOCKMNT(nmp);
+			error = nfsrpc_createsession(nmp, &dsp->nfsclds_sess,
+			    nrp, sequenceid, 0, nrp->nr_cred, p);
+			NFSCL_DEBUG(3, "DS createsess=%d\n", error);
+		}
+	} else {
+		NFSFREECRED(nrp->nr_cred);
+		NFSFREEMUTEX(&nrp->nr_mtx);
+		free(nrp->nr_nam, M_SONAME);
+		free(nrp, M_NFSSOCKREQ);
+	}
+	if (error == 0) {
+		NFSCL_DEBUG(3, "add DS session\n");
+		/*
+		 * Put it at the end of the list. That way the list
+		 * is ordered by when the entry was added. This matters
+		 * since the one done first is the one that should be
+		 * used for sequencid'ing any subsequent create sessions.
+		 */
+		NFSLOCKMNT(nmp);
+		TAILQ_INSERT_TAIL(&nmp->nm_sess, dsp, nfsclds_list);
+		NFSUNLOCKMNT(nmp);
+		*dspp = dsp;
+	} else if (dsp != NULL) {
+		newnfs_disconnect(nrp);
+		nfscl_freenfsclds(dsp);
+	}
+	return (error);
+}
+
+/*
+ * Do the NFSv4.1 Reclaim Complete.
+ */
+int
+nfsrpc_reclaimcomplete(struct nfsmount *nmp, struct ucred *cred, NFSPROC_T *p)
+{
+	uint32_t *tl;
+	struct nfsrv_descript nfsd;
+	struct nfsrv_descript *nd = &nfsd;
+	int error;
+
+	nfscl_reqstart(nd, NFSPROC_RECLAIMCOMPL, nmp, NULL, 0, NULL, NULL, 0,
+	    0);
+	NFSM_BUILD(tl, uint32_t *, NFSX_UNSIGNED);
+	*tl = newnfs_false;
+	nd->nd_flag |= ND_USEGSSNAME;
+	error = newnfs_request(nd, nmp, NULL, &nmp->nm_sockreq, NULL, p, cred,
+	    NFS_PROG, NFS_VER4, NULL, 1, NULL, NULL);
+	if (error != 0)
+		return (error);
+	error = nd->nd_repstat;
+	mbuf_freem(nd->nd_mrep);
+	return (error);
+}
+
+/*
+ * Initialize the slot tables for a session.
+ */
+static void
+nfscl_initsessionslots(struct nfsclsession *sep)
+{
+	int i;
+
+	for (i = 0; i < NFSV4_CBSLOTS; i++) {
+		if (sep->nfsess_cbslots[i].nfssl_reply != NULL)
+			m_freem(sep->nfsess_cbslots[i].nfssl_reply);
+		NFSBZERO(&sep->nfsess_cbslots[i], sizeof(struct nfsslot));
+	}
+	for (i = 0; i < 64; i++)
+		sep->nfsess_slotseq[i] = 0;
+	sep->nfsess_slots = 0;
+}
+
+/*
+ * Called to try and do an I/O operation via an NFSv4.1 Data Server (DS).
+ */
+int
+nfscl_doiods(vnode_t vp, struct uio *uiop, int *iomode, int *must_commit,
+    uint32_t rwaccess, int docommit, struct ucred *cred, NFSPROC_T *p)
+{
+	struct nfsnode *np = VTONFS(vp);
+	struct nfsmount *nmp = VFSTONFS(vnode_mount(vp));
+	struct nfscllayout *layp;
+	struct nfscldevinfo *dip;
+	struct nfsclflayout *rflp;
+	struct mbuf *m;
+	struct nfsclwritedsdorpc *drpc, *tdrpc;
+	nfsv4stateid_t stateid;
+	struct ucred *newcred;
+	uint64_t lastbyte, len, off, oresid, xfer;
+	int eof, error, firstmirror, i, iolaymode, mirrorcnt, recalled, timo;
+	void *lckp;
+	uint8_t *dev;
+	void *iovbase = NULL;
+	size_t iovlen = 0;
+	off_t offs = 0;
+	ssize_t resid = 0;
+
+	if (!NFSHASPNFS(nmp) || nfscl_enablecallb == 0 || nfs_numnfscbd == 0 ||
+	    (np->n_flag & NNOLAYOUT) != 0)
+		return (EIO);
+	/* Now, get a reference cnt on the clientid for this mount. */
+	if (nfscl_getref(nmp) == 0)
+		return (EIO);
+
+	/* Find an appropriate stateid. */
+	newcred = NFSNEWCRED(cred);
+	error = nfscl_getstateid(vp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len,
+	    rwaccess, 1, newcred, p, &stateid, &lckp);
+	if (error != 0) {
+		NFSFREECRED(newcred);
+		nfscl_relref(nmp);
+		return (error);
+	}
+	/* Search for a layout for this file. */
+	off = uiop->uio_offset;
+	layp = nfscl_getlayout(nmp->nm_clp, np->n_fhp->nfh_fh,
+	    np->n_fhp->nfh_len, off, &rflp, &recalled);
+	if (layp == NULL || rflp == NULL) {
+		if (recalled != 0) {
+			NFSFREECRED(newcred);
+			nfscl_relref(nmp);
+			return (EIO);
+		}
+		if (layp != NULL) {
+			nfscl_rellayout(layp, (rflp == NULL) ? 1 : 0);
+			layp = NULL;
+		}
+		/* Try and get a Layout, if it is supported. */
+		if (rwaccess == NFSV4OPEN_ACCESSWRITE ||
+		    (np->n_flag & NWRITEOPENED) != 0)
+			iolaymode = NFSLAYOUTIOMODE_RW;
+		else
+			iolaymode = NFSLAYOUTIOMODE_READ;
+		error = nfsrpc_getlayout(nmp, vp, np->n_fhp, iolaymode,
+		    NULL, &stateid, off, &layp, newcred, p);
+		if (error != 0) {
+			NFSLOCKNODE(np);
+			np->n_flag |= NNOLAYOUT;
+			NFSUNLOCKNODE(np);
+			if (lckp != NULL)
+				nfscl_lockderef(lckp);
+			NFSFREECRED(newcred);
+			if (layp != NULL)
+				nfscl_rellayout(layp, 0);
+			nfscl_relref(nmp);
+			return (error);
+		}
+	}
+
+	/*
+	 * Loop around finding a layout that works for the first part of
+	 * this I/O operation, and then call the function that actually
+	 * does the RPC.
+	 */
+	eof = 0;
+	len = (uint64_t)uiop->uio_resid;
+	while (len > 0 && error == 0 && eof == 0) {
+		off = uiop->uio_offset;
+		error = nfscl_findlayoutforio(layp, off, rwaccess, &rflp);
+		if (error == 0) {
+			oresid = xfer = (uint64_t)uiop->uio_resid;
+			if (xfer > (rflp->nfsfl_end - rflp->nfsfl_off))
+				xfer = rflp->nfsfl_end - rflp->nfsfl_off;
+			/*
+			 * For Flex File layout with mirrored DSs, select one
+			 * of them at random for reads. For writes and commits,
+			 * do all mirrors.
+			 */
+			m = NULL;
+			tdrpc = drpc = NULL;
+			firstmirror = 0;
+			mirrorcnt = 1;
+			if ((layp->nfsly_flags & NFSLY_FLEXFILE) != 0 &&
+			    (mirrorcnt = rflp->nfsfl_mirrorcnt) > 1) {
+				if (rwaccess == NFSV4OPEN_ACCESSREAD) {
+					firstmirror = arc4random() % mirrorcnt;
+					mirrorcnt = firstmirror + 1;
+				} else {
+					if (docommit == 0) {
+						/*
+						 * Save values, so uiop can be
+						 * rolled back upon a write
+						 * error.
+						 */
+						offs = uiop->uio_offset;
+						resid = uiop->uio_resid;
+						iovbase =
+						    uiop->uio_iov->iov_base;
+						iovlen = uiop->uio_iov->iov_len;
+						m = nfsm_uiombuflist(uiop, len,
+						    NULL, NULL);
+					}
+					tdrpc = drpc = malloc(sizeof(*drpc) *
+					    (mirrorcnt - 1), M_TEMP, M_WAITOK |
+					    M_ZERO);
+				}
+			}
+			for (i = firstmirror; i < mirrorcnt && error == 0; i++){
+				if ((layp->nfsly_flags & NFSLY_FLEXFILE) != 0) {
+					dev = rflp->nfsfl_ffm[i].dev;
+					dip = nfscl_getdevinfo(nmp->nm_clp, dev,
+					    rflp->nfsfl_ffm[i].devp);
+				} else {
+					dev = rflp->nfsfl_dev;
+					dip = nfscl_getdevinfo(nmp->nm_clp, dev,
+					    rflp->nfsfl_devp);
+				}
+				if (dip != NULL) {
+					if ((rflp->nfsfl_flags & NFSFL_FLEXFILE)
+					    != 0)
+						error = nfscl_dofflayoutio(vp,
+						    uiop, iomode, must_commit,
+						    &eof, &stateid, rwaccess,
+						    dip, layp, rflp, off, xfer,
+						    i, docommit, m, tdrpc,
+						    newcred, p);
+					else
+						error = nfscl_doflayoutio(vp,
+						    uiop, iomode, must_commit,
+						    &eof, &stateid, rwaccess,
+						    dip, layp, rflp, off, xfer,
+						    docommit, newcred, p);
+					nfscl_reldevinfo(dip);
+				} else
+					error = EIO;
+				tdrpc++;
+			}
+			if (m != NULL)
+				m_freem(m);
+			tdrpc = drpc;
+			timo = hz / 50;		/* Wait for 20msec. */
+			if (timo < 1)
+				timo = 1;
+			for (i = firstmirror; i < mirrorcnt - 1 &&
+			    tdrpc != NULL; i++, tdrpc++) {
+				/*
+				 * For the unused drpc entries, both inprog and
+				 * err == 0, so this loop won't break.
+				 */
+				while (tdrpc->inprog != 0 && tdrpc->done == 0)
+					tsleep(&tdrpc->tsk, PVFS, "clrpcio",
+					    timo);
+				if (error == 0 && tdrpc->err != 0)
+					error = tdrpc->err;
+			}
+			free(drpc, M_TEMP);
+			if (error == 0) {
+				if (mirrorcnt > 1 && rwaccess ==
+				    NFSV4OPEN_ACCESSWRITE && docommit == 0) {
+					NFSLOCKCLSTATE();
+					layp->nfsly_flags |= NFSLY_WRITTEN;
+					NFSUNLOCKCLSTATE();
+				}
+				lastbyte = off + xfer - 1;
+				NFSLOCKCLSTATE();
+				if (lastbyte > layp->nfsly_lastbyte)
+					layp->nfsly_lastbyte = lastbyte;
+				NFSUNLOCKCLSTATE();
+			} else if (error == NFSERR_OPENMODE &&
+			    rwaccess == NFSV4OPEN_ACCESSREAD) {
+				NFSLOCKMNT(nmp);
+				nmp->nm_state |= NFSSTA_OPENMODE;
+				NFSUNLOCKMNT(nmp);
+			} else
+				error = EIO;
+			if (error == 0)
+				len -= (oresid - (uint64_t)uiop->uio_resid);
+			else if (mirrorcnt > 1 && rwaccess ==
+			    NFSV4OPEN_ACCESSWRITE && docommit == 0) {
+				/*
+				 * In case the rpc gets retried, roll the
+				 * uio fields changed by nfsm_uiombuflist()
+				 * back.
+				 */
+				uiop->uio_offset = offs;
+				uiop->uio_resid = resid;
+				uiop->uio_iov->iov_base = iovbase;
+				uiop->uio_iov->iov_len = iovlen;
+			}
+		}
+	}
+	if (lckp != NULL)
+		nfscl_lockderef(lckp);
+	NFSFREECRED(newcred);
+	nfscl_rellayout(layp, 0);
+	nfscl_relref(nmp);
+	return (error);
+}
+
+/*
+ * Make a copy of the mbuf chain and add an mbuf for null padding, as required.
+ */
+static struct mbuf *
+nfsm_copym(struct mbuf *m, int off, int xfer)
+{
+	struct mbuf *m2, *m3, *m4;
+	uint32_t *tl;
+	int rem;
+
+	m2 = m_copym(m, off, xfer, M_WAITOK);
+	rem = NFSM_RNDUP(xfer) - xfer;
+	if (rem > 0) {
+		/*
+		 * The zero padding to a multiple of 4 bytes is required by
+		 * the XDR. So that the mbufs copied by reference aren't
+		 * modified, add an mbuf with the zero'd bytes to the list.
+		 * rem will be a maximum of 3, so one zero'd uint32_t is
+		 * sufficient.
+		 */
+		m3 = m2;
+		while (m3->m_next != NULL)
+			m3 = m3->m_next;
+		NFSMGET(m4);
+		tl = NFSMTOD(m4, uint32_t *);
+		*tl = 0;
+		mbuf_setlen(m4, rem);
+		mbuf_setnext(m3, m4);
+	}
+	return (m2);
+}
+
+/*
+ * Find a file layout that will handle the first bytes of the requested
+ * range and return the information from it needed to the I/O operation.
+ */
+int
+nfscl_findlayoutforio(struct nfscllayout *lyp, uint64_t off, uint32_t rwaccess,
+    struct nfsclflayout **retflpp)
+{
+	struct nfsclflayout *flp, *nflp, *rflp;
+	uint32_t rw;
+
+	rflp = NULL;
+	rw = rwaccess;
+	/* For reading, do the Read list first and then the Write list. */
+	do {
+		if (rw == NFSV4OPEN_ACCESSREAD)
+			flp = LIST_FIRST(&lyp->nfsly_flayread);
+		else
+			flp = LIST_FIRST(&lyp->nfsly_flayrw);
+		while (flp != NULL) {
+			nflp = LIST_NEXT(flp, nfsfl_list);
+			if (flp->nfsfl_off > off)
+				break;
+			if (flp->nfsfl_end > off &&
+			    (rflp == NULL || rflp->nfsfl_end < flp->nfsfl_end))
+				rflp = flp;
+			flp = nflp;
+		}
+		if (rw == NFSV4OPEN_ACCESSREAD)
+			rw = NFSV4OPEN_ACCESSWRITE;
+		else
+			rw = 0;
+	} while (rw != 0);
+	if (rflp != NULL) {
+		/* This one covers the most bytes starting at off. */
+		*retflpp = rflp;
+		return (0);
+	}
+	return (EIO);
+}
+
+/*
+ * Do I/O using an NFSv4.1 file layout.
+ */
+static int
+nfscl_doflayoutio(vnode_t vp, struct uio *uiop, int *iomode, int *must_commit,
+    int *eofp, nfsv4stateid_t *stateidp, int rwflag, struct nfscldevinfo *dp,
+    struct nfscllayout *lyp, struct nfsclflayout *flp, uint64_t off,
+    uint64_t len, int docommit, struct ucred *cred, NFSPROC_T *p)
+{
+	uint64_t io_off, rel_off, stripe_unit_size, transfer, xfer;
+	int commit_thru_mds, error, stripe_index, stripe_pos;
+	struct nfsnode *np;
+	struct nfsfh *fhp;
+	struct nfsclds **dspp;
+
+	np = VTONFS(vp);
+	rel_off = off - flp->nfsfl_patoff;
+	stripe_unit_size = (flp->nfsfl_util >> 6) & 0x3ffffff;
+	stripe_pos = (rel_off / stripe_unit_size + flp->nfsfl_stripe1) %
+	    dp->nfsdi_stripecnt;
+	transfer = stripe_unit_size - (rel_off % stripe_unit_size);
+	error = 0;
+
+	/* Loop around, doing I/O for each stripe unit. */
+	while (len > 0 && error == 0) {
+		stripe_index = nfsfldi_stripeindex(dp, stripe_pos);
+		dspp = nfsfldi_addr(dp, stripe_index);
+		if (len > transfer && docommit == 0)
+			xfer = transfer;
+		else
+			xfer = len;
+		if ((flp->nfsfl_util & NFSFLAYUTIL_DENSE) != 0) {
+			/* Dense layout. */
+			if (stripe_pos >= flp->nfsfl_fhcnt)
+				return (EIO);
+			fhp = flp->nfsfl_fh[stripe_pos];
+			io_off = (rel_off / (stripe_unit_size *
+			    dp->nfsdi_stripecnt)) * stripe_unit_size +
+			    rel_off % stripe_unit_size;
+		} else {
+			/* Sparse layout. */
+			if (flp->nfsfl_fhcnt > 1) {
+				if (stripe_index >= flp->nfsfl_fhcnt)
+					return (EIO);
+				fhp = flp->nfsfl_fh[stripe_index];
+			} else if (flp->nfsfl_fhcnt == 1)
+				fhp = flp->nfsfl_fh[0];
+			else
+				fhp = np->n_fhp;
+			io_off = off;
+		}
+		if ((flp->nfsfl_util & NFSFLAYUTIL_COMMIT_THRU_MDS) != 0) {
+			commit_thru_mds = 1;
+			if (docommit != 0)
+				error = EIO;
+		} else {
+			commit_thru_mds = 0;
+			NFSLOCKNODE(np);
+			np->n_flag |= NDSCOMMIT;
+			NFSUNLOCKNODE(np);
+		}
+		if (docommit != 0) {
+			if (error == 0)
+				error = nfsrpc_commitds(vp, io_off, xfer,
+				    *dspp, fhp, 0, 0, cred, p);
+			if (error == 0) {
+				/*
+				 * Set both eof and uio_resid = 0 to end any
+				 * loops.
+				 */
+				*eofp = 1;
+				uiop->uio_resid = 0;
+			} else {
+				NFSLOCKNODE(np);
+				np->n_flag &= ~NDSCOMMIT;
+				NFSUNLOCKNODE(np);
+			}
+		} else if (rwflag == NFSV4OPEN_ACCESSREAD)
+			error = nfsrpc_readds(vp, uiop, stateidp, eofp, *dspp,
+			    io_off, xfer, fhp, 0, 0, 0, cred, p);
+		else {
+			error = nfsrpc_writeds(vp, uiop, iomode, must_commit,
+			    stateidp, *dspp, io_off, xfer, fhp, commit_thru_mds,
+			    0, 0, 0, cred, p);
+			if (error == 0) {
+				NFSLOCKCLSTATE();
+				lyp->nfsly_flags |= NFSLY_WRITTEN;
+				NFSUNLOCKCLSTATE();
+			}
+		}
+		if (error == 0) {
+			transfer = stripe_unit_size;
+			stripe_pos = (stripe_pos + 1) % dp->nfsdi_stripecnt;
+			len -= xfer;
+			off += xfer;
+		}
+	}
+	return (error);
+}
+
+/*
+ * Do I/O using an NFSv4.1 flex file layout.
+ */
+static int
+nfscl_dofflayoutio(vnode_t vp, struct uio *uiop, int *iomode, int *must_commit,
+    int *eofp, nfsv4stateid_t *stateidp, int rwflag, struct nfscldevinfo *dp,
+    struct nfscllayout *lyp, struct nfsclflayout *flp, uint64_t off,
+    uint64_t len, int mirror, int docommit, struct mbuf *mp,
+    struct nfsclwritedsdorpc *drpc, struct ucred *cred, NFSPROC_T *p)
+{
+	uint64_t transfer, xfer;
+	int error, rel_off;
+	struct nfsnode *np;
+	struct nfsfh *fhp;
+	struct nfsclds **dspp;
+	struct ucred *tcred;
+	struct mbuf *m;
+
+	np = VTONFS(vp);
+	error = 0;
+	rel_off = 0;
+	NFSCL_DEBUG(4, "nfscl_dofflayoutio: off=%ju len=%ju\n", (uintmax_t)off,
+	    (uintmax_t)len);
+	/* Loop around, doing I/O for each stripe unit. */
+	while (len > 0 && error == 0) {
+		dspp = nfsfldi_addr(dp, 0);
+		fhp = flp->nfsfl_ffm[mirror].fh[dp->nfsdi_versindex];
+		stateidp = &flp->nfsfl_ffm[mirror].st;
+		NFSCL_DEBUG(4, "mirror=%d vind=%d fhlen=%d st.seqid=0x%x\n",
+		    mirror, dp->nfsdi_versindex, fhp->nfh_len, stateidp->seqid);
+		if ((dp->nfsdi_flags & NFSDI_TIGHTCOUPLED) == 0) {
+			tcred = NFSNEWCRED(cred);
+			tcred->cr_uid = flp->nfsfl_ffm[mirror].user;
+			tcred->cr_groups[0] = flp->nfsfl_ffm[mirror].group;
+			tcred->cr_ngroups = 1;
+		} else
+			tcred = cred;
+		if (rwflag == NFSV4OPEN_ACCESSREAD)
+			transfer = dp->nfsdi_rsize;
+		else
+			transfer = dp->nfsdi_wsize;
+		NFSLOCKNODE(np);
+		np->n_flag |= NDSCOMMIT;
+		NFSUNLOCKNODE(np);
+		if (len > transfer && docommit == 0)
+			xfer = transfer;
+		else
+			xfer = len;
+		if (docommit != 0) {
+			if (error == 0) {
+				/*
+				 * Do last mirrored DS commit with this thread.
+				 */
+				if (mirror < flp->nfsfl_mirrorcnt - 1)
+					error = nfsio_commitds(vp, off, xfer,
+					    *dspp, fhp, dp->nfsdi_vers,
+					    dp->nfsdi_minorvers, drpc, tcred,
+					    p);
+				else
+					error = nfsrpc_commitds(vp, off, xfer,
+					    *dspp, fhp, dp->nfsdi_vers,
+					    dp->nfsdi_minorvers, tcred, p);
+				NFSCL_DEBUG(4, "commitds=%d\n", error);
+				if (error != 0 && error != EACCES && error !=
+				    ESTALE) {
+					NFSCL_DEBUG(4,
+					    "DS layreterr for commit\n");
+					nfscl_dserr(NFSV4OP_COMMIT, error, dp,
+					    lyp, *dspp);
+				}
+			}
+			NFSCL_DEBUG(4, "aft nfsio_commitds=%d\n", error);
+			if (error == 0) {
+				/*
+				 * Set both eof and uio_resid = 0 to end any
+				 * loops.
+				 */
+				*eofp = 1;
+				uiop->uio_resid = 0;
+			} else {
+				NFSLOCKNODE(np);
+				np->n_flag &= ~NDSCOMMIT;
+				NFSUNLOCKNODE(np);
+			}
+		} else if (rwflag == NFSV4OPEN_ACCESSREAD) {
+			error = nfsrpc_readds(vp, uiop, stateidp, eofp, *dspp,
+			    off, xfer, fhp, 1, dp->nfsdi_vers,
+			    dp->nfsdi_minorvers, tcred, p);
+			NFSCL_DEBUG(4, "readds=%d\n", error);
+			if (error != 0 && error != EACCES && error != ESTALE) {
+				NFSCL_DEBUG(4, "DS layreterr for read\n");
+				nfscl_dserr(NFSV4OP_READ, error, dp, lyp,
+				    *dspp);
+			}
+		} else {
+			if (flp->nfsfl_mirrorcnt == 1) {
+				error = nfsrpc_writeds(vp, uiop, iomode,
+				    must_commit, stateidp, *dspp, off, xfer,
+				    fhp, 0, 1, dp->nfsdi_vers,
+				    dp->nfsdi_minorvers, tcred, p);
+				if (error == 0) {
+					NFSLOCKCLSTATE();
+					lyp->nfsly_flags |= NFSLY_WRITTEN;
+					NFSUNLOCKCLSTATE();
+				}
+			} else {
+				m = nfsm_copym(mp, rel_off, xfer);
+				NFSCL_DEBUG(4, "mcopy reloff=%d xfer=%jd\n",
+				    rel_off, (uintmax_t)xfer);
+				/*
+				 * Do last write to a mirrored DS with this
+				 * thread.
+				 */
+				if (mirror < flp->nfsfl_mirrorcnt - 1)
+					error = nfsio_writedsmir(vp, iomode,
+					    must_commit, stateidp, *dspp, off,
+					    xfer, fhp, m, dp->nfsdi_vers,
+					    dp->nfsdi_minorvers, drpc, tcred,
+					    p);
+				else
+					error = nfsrpc_writedsmir(vp, iomode,
+					    must_commit, stateidp, *dspp, off,
+					    xfer, fhp, m, dp->nfsdi_vers,
+					    dp->nfsdi_minorvers, tcred, p);
+				NFSCL_DEBUG(4, "nfsio_writedsmir=%d\n", error);
+				if (error != 0 && error != EACCES && error !=
+				    ESTALE) {
+					NFSCL_DEBUG(4,
+					    "DS layreterr for write\n");
+					nfscl_dserr(NFSV4OP_WRITE, error, dp,
+					    lyp, *dspp);
+				}
+			}
+		}
+		NFSCL_DEBUG(4, "aft read/writeds=%d\n", error);
+		if (error == 0) {
+			len -= xfer;
+			off += xfer;
+			rel_off += xfer;
+		}
+		if ((dp->nfsdi_flags & NFSDI_TIGHTCOUPLED) == 0)
+			NFSFREECRED(tcred);
+	}
+	NFSCL_DEBUG(4, "eo nfscl_dofflayoutio=%d\n", error);
+	return (error);
+}
+
+/*
+ * The actual read RPC done to a DS.
+ */
+static int
+nfsrpc_readds(vnode_t vp, struct uio *uiop, nfsv4stateid_t *stateidp, int *eofp,
+    struct nfsclds *dsp, uint64_t io_off, int len, struct nfsfh *fhp, int flex,
+    int vers, int minorvers, struct ucred *cred, NFSPROC_T *p)
+{
+	uint32_t *tl;
+	int attrflag, error, retlen;
+	struct nfsrv_descript nfsd;
+	struct nfsmount *nmp = VFSTONFS(vnode_mount(vp));
+	struct nfsrv_descript *nd = &nfsd;
+	struct nfssockreq *nrp;
+	struct nfsvattr na;
+
+	nd->nd_mrep = NULL;
+	if (vers == 0 || vers == NFS_VER4) {
+		nfscl_reqstart(nd, NFSPROC_READDS, nmp, fhp->nfh_fh,
+		    fhp->nfh_len, NULL, &dsp->nfsclds_sess, vers, minorvers);
+		vers = NFS_VER4;
+		NFSCL_DEBUG(4, "nfsrpc_readds: vers4 minvers=%d\n", minorvers);
+		if (flex != 0)
+			nfsm_stateidtom(nd, stateidp, NFSSTATEID_PUTSTATEID);
+		else
+			nfsm_stateidtom(nd, stateidp, NFSSTATEID_PUTSEQIDZERO);
+	} else {
+		nfscl_reqstart(nd, NFSPROC_READ, nmp, fhp->nfh_fh,
+		    fhp->nfh_len, NULL, &dsp->nfsclds_sess, vers, minorvers);
+		NFSCL_DEBUG(4, "nfsrpc_readds: vers3\n");
+	}
+	NFSM_BUILD(tl, uint32_t *, NFSX_UNSIGNED * 3);
+	txdr_hyper(io_off, tl);
+	*(tl + 2) = txdr_unsigned(len);
+	nrp = dsp->nfsclds_sockp;
+	NFSCL_DEBUG(4, "nfsrpc_readds: nrp=%p\n", nrp);
+	if (nrp == NULL)
+		/* If NULL, use the MDS socket. */
+		nrp = &nmp->nm_sockreq;
+	error = newnfs_request(nd, nmp, NULL, nrp, vp, p, cred,
+	    NFS_PROG, vers, NULL, 1, NULL, &dsp->nfsclds_sess);
+	NFSCL_DEBUG(4, "nfsrpc_readds: stat=%d err=%d\n", nd->nd_repstat,
+	    error);
+	if (error != 0)
+		return (error);
+	if (vers == NFS_VER3) {
+		error = nfscl_postop_attr(nd, &na, &attrflag, NULL);
+		NFSCL_DEBUG(4, "nfsrpc_readds: postop=%d\n", error);
+		if (error != 0)
+			goto nfsmout;
+	}
+	if (nd->nd_repstat != 0) {
+		error = nd->nd_repstat;
+		goto nfsmout;
+	}
+	if (vers == NFS_VER3) {
+		NFSM_DISSECT(tl, uint32_t *, 2 * NFSX_UNSIGNED);
+		*eofp = fxdr_unsigned(int, *(tl + 1));
+	} else {
+		NFSM_DISSECT(tl, uint32_t *, NFSX_UNSIGNED);
+		*eofp = fxdr_unsigned(int, *tl);
+	}
+	NFSM_STRSIZ(retlen, len);
+	NFSCL_DEBUG(4, "nfsrpc_readds: retlen=%d eof=%d\n", retlen, *eofp);
+	error = nfsm_mbufuio(nd, uiop, retlen);
+nfsmout:
+	if (nd->nd_mrep != NULL)
+		mbuf_freem(nd->nd_mrep);
+	return (error);
+}
+
+/*
+ * The actual write RPC done to a DS.
+ */
+static int
+nfsrpc_writeds(vnode_t vp, struct uio *uiop, int *iomode, int *must_commit,
+    nfsv4stateid_t *stateidp, struct nfsclds *dsp, uint64_t io_off, int len,
+    struct nfsfh *fhp, int commit_thru_mds, int flex, int vers, int minorvers,
+    struct ucred *cred, NFSPROC_T *p)
+{
+	uint32_t *tl;
+	struct nfsmount *nmp = VFSTONFS(vnode_mount(vp));
+	int attrflag, error, rlen, commit, committed = NFSWRITE_FILESYNC;
+	int32_t backup;
+	struct nfsrv_descript nfsd;
+	struct nfsrv_descript *nd = &nfsd;
+	struct nfssockreq *nrp;
+	struct nfsvattr na;
+
+	KASSERT(uiop->uio_iovcnt == 1, ("nfs: writerpc iovcnt > 1"));
+	nd->nd_mrep = NULL;
+	if (vers == 0 || vers == NFS_VER4) {
+		nfscl_reqstart(nd, NFSPROC_WRITEDS, nmp, fhp->nfh_fh,
+		    fhp->nfh_len, NULL, &dsp->nfsclds_sess, vers, minorvers);
+		NFSCL_DEBUG(4, "nfsrpc_writeds: vers4 minvers=%d\n", minorvers);
+		vers = NFS_VER4;
+		if (flex != 0)
+			nfsm_stateidtom(nd, stateidp, NFSSTATEID_PUTSTATEID);
+		else
+			nfsm_stateidtom(nd, stateidp, NFSSTATEID_PUTSEQIDZERO);
+		NFSM_BUILD(tl, uint32_t *, NFSX_HYPER + 2 * NFSX_UNSIGNED);
+	} else {
+		nfscl_reqstart(nd, NFSPROC_WRITE, nmp, fhp->nfh_fh,
+		    fhp->nfh_len, NULL, &dsp->nfsclds_sess, vers, minorvers);
+		NFSCL_DEBUG(4, "nfsrpc_writeds: vers3\n");
+		NFSM_BUILD(tl, uint32_t *, NFSX_HYPER + 3 * NFSX_UNSIGNED);
+	}
+	txdr_hyper(io_off, tl);
+	tl += 2;
+	if (vers == NFS_VER3)
+		*tl++ = txdr_unsigned(len);
+	*tl++ = txdr_unsigned(*iomode);
+	*tl = txdr_unsigned(len);
+	nfsm_uiombuf(nd, uiop, len);
+	nrp = dsp->nfsclds_sockp;
+	if (nrp == NULL)
+		/* If NULL, use the MDS socket. */
+		nrp = &nmp->nm_sockreq;
+	error = newnfs_request(nd, nmp, NULL, nrp, vp, p, cred,
+	    NFS_PROG, vers, NULL, 1, NULL, &dsp->nfsclds_sess);
+	NFSCL_DEBUG(4, "nfsrpc_writeds: err=%d stat=%d\n", error,
+	    nd->nd_repstat);
+	if (error != 0)
+		return (error);
+	if (nd->nd_repstat != 0) {
+		/*
+		 * In case the rpc gets retried, roll
+		 * the uio fileds changed by nfsm_uiombuf()
+		 * back.
+		 */
+		uiop->uio_offset -= len;
+		uio_uio_resid_add(uiop, len);
+		uio_iov_base_add(uiop, -len);
+		uio_iov_len_add(uiop, len);
+		error = nd->nd_repstat;
+	} else {
+		if (vers == NFS_VER3) {
+			error = nfscl_wcc_data(nd, vp, &na, &attrflag, NULL,
+			    NULL);
+			NFSCL_DEBUG(4, "nfsrpc_writeds: wcc_data=%d\n", error);
+			if (error != 0)
+				goto nfsmout;
+		}
+		NFSM_DISSECT(tl, uint32_t *, 2 * NFSX_UNSIGNED + NFSX_VERF);
+		rlen = fxdr_unsigned(int, *tl++);
+		NFSCL_DEBUG(4, "nfsrpc_writeds: len=%d rlen=%d\n", len, rlen);
+		if (rlen == 0) {
+			error = NFSERR_IO;
+			goto nfsmout;
+		} else if (rlen < len) {
+			backup = len - rlen;
+			uio_iov_base_add(uiop, -(backup));
+			uio_iov_len_add(uiop, backup);
+			uiop->uio_offset -= backup;
+			uio_uio_resid_add(uiop, backup);
+			len = rlen;
+		}
+		commit = fxdr_unsigned(int, *tl++);
+
+		/*
+		 * Return the lowest commitment level
+		 * obtained by any of the RPCs.
+		 */
+		if (committed == NFSWRITE_FILESYNC)
+			committed = commit;
+		else if (committed == NFSWRITE_DATASYNC &&
+		    commit == NFSWRITE_UNSTABLE)
+			committed = commit;
+		if (commit_thru_mds != 0) {
+			NFSLOCKMNT(nmp);
+			if (!NFSHASWRITEVERF(nmp)) {
+				NFSBCOPY(tl, nmp->nm_verf, NFSX_VERF);
+				NFSSETWRITEVERF(nmp);
+	    		} else if (NFSBCMP(tl, nmp->nm_verf, NFSX_VERF)) {
+				*must_commit = 1;
+				NFSBCOPY(tl, nmp->nm_verf, NFSX_VERF);
+			}
+			NFSUNLOCKMNT(nmp);
+		} else {
+			NFSLOCKDS(dsp);
+			if ((dsp->nfsclds_flags & NFSCLDS_HASWRITEVERF) == 0) {
+				NFSBCOPY(tl, dsp->nfsclds_verf, NFSX_VERF);
+				dsp->nfsclds_flags |= NFSCLDS_HASWRITEVERF;
+			} else if (NFSBCMP(tl, dsp->nfsclds_verf, NFSX_VERF)) {
+				*must_commit = 1;
+				NFSBCOPY(tl, dsp->nfsclds_verf, NFSX_VERF);
+			}
+			NFSUNLOCKDS(dsp);
+		}
+	}
+nfsmout:
+	if (nd->nd_mrep != NULL)
+		mbuf_freem(nd->nd_mrep);
+	*iomode = committed;
+	if (nd->nd_repstat != 0 && error == 0)
+		error = nd->nd_repstat;
+	return (error);
+}
+
+/*
+ * The actual write RPC done to a DS.
+ * This variant is called from a separate kernel process for mirrors.
+ * Any short write is considered an IO error.
+ */
+static int
+nfsrpc_writedsmir(vnode_t vp, int *iomode, int *must_commit,
+    nfsv4stateid_t *stateidp, struct nfsclds *dsp, uint64_t io_off, int len,
+    struct nfsfh *fhp, struct mbuf *m, int vers, int minorvers,
+    struct ucred *cred, NFSPROC_T *p)
+{
+	uint32_t *tl;
+	struct nfsmount *nmp = VFSTONFS(vnode_mount(vp));
+	int attrflag, error, commit, committed = NFSWRITE_FILESYNC, rlen;
+	struct nfsrv_descript nfsd;
+	struct nfsrv_descript *nd = &nfsd;
+	struct nfssockreq *nrp;
+	struct nfsvattr na;
+
+	nd->nd_mrep = NULL;
+	if (vers == 0 || vers == NFS_VER4) {
+		nfscl_reqstart(nd, NFSPROC_WRITEDS, nmp, fhp->nfh_fh,
+		    fhp->nfh_len, NULL, &dsp->nfsclds_sess, vers, minorvers);
+		vers = NFS_VER4;
+		NFSCL_DEBUG(4, "nfsrpc_writedsmir: vers4 minvers=%d\n",
+		    minorvers);
+		nfsm_stateidtom(nd, stateidp, NFSSTATEID_PUTSTATEID);
+		NFSM_BUILD(tl, uint32_t *, NFSX_HYPER + 2 * NFSX_UNSIGNED);
+	} else {
+		nfscl_reqstart(nd, NFSPROC_WRITE, nmp, fhp->nfh_fh,
+		    fhp->nfh_len, NULL, &dsp->nfsclds_sess, vers, minorvers);
+		NFSCL_DEBUG(4, "nfsrpc_writedsmir: vers3\n");
+		NFSM_BUILD(tl, uint32_t *, NFSX_HYPER + 3 * NFSX_UNSIGNED);
+	}
+	txdr_hyper(io_off, tl);
+	tl += 2;
+	if (vers == NFS_VER3)
+		*tl++ = txdr_unsigned(len);
+	*tl++ = txdr_unsigned(*iomode);
+	*tl = txdr_unsigned(len);
+	if (len > 0) {
+		/* Put data in mbuf chain. */
+		nd->nd_mb->m_next = m;
+		/* Set nd_mb and nd_bpos to end of data. */
+		while (m->m_next != NULL)
+			m = m->m_next;
+		nd->nd_mb = m;
+		nd->nd_bpos = mtod(m, char *) + m->m_len;
+		NFSCL_DEBUG(4, "nfsrpc_writedsmir: lastmb len=%d\n", m->m_len);
+	}
+	nrp = dsp->nfsclds_sockp;
+	if (nrp == NULL)
+		/* If NULL, use the MDS socket. */
+		nrp = &nmp->nm_sockreq;
+	error = newnfs_request(nd, nmp, NULL, nrp, vp, p, cred,
+	    NFS_PROG, vers, NULL, 1, NULL, &dsp->nfsclds_sess);
+	NFSCL_DEBUG(4, "nfsrpc_writedsmir: err=%d stat=%d\n", error,
+	    nd->nd_repstat);
+	if (error != 0)
+		return (error);
+	if (nd->nd_repstat != 0)
+		error = nd->nd_repstat;
+	else {
+		if (vers == NFS_VER3) {
+			error = nfscl_wcc_data(nd, vp, &na, &attrflag, NULL,
+			    NULL);
+			NFSCL_DEBUG(4, "nfsrpc_writedsmir: wcc_data=%d\n",
+			    error);
+			if (error != 0)
+				goto nfsmout;
+		}
+		NFSM_DISSECT(tl, uint32_t *, 2 * NFSX_UNSIGNED + NFSX_VERF);
+		rlen = fxdr_unsigned(int, *tl++);
+		NFSCL_DEBUG(4, "nfsrpc_writedsmir: len=%d rlen=%d\n", len,
+		    rlen);
+		if (rlen != len) {
+			error = NFSERR_IO;
+			NFSCL_DEBUG(4, "nfsrpc_writedsmir: len=%d rlen=%d\n",
+			    len, rlen);
+			goto nfsmout;
+		}
+		commit = fxdr_unsigned(int, *tl++);
+
+		/*
+		 * Return the lowest commitment level
+		 * obtained by any of the RPCs.
+		 */
+		if (committed == NFSWRITE_FILESYNC)
+			committed = commit;
+		else if (committed == NFSWRITE_DATASYNC &&
+		    commit == NFSWRITE_UNSTABLE)
+			committed = commit;
+		NFSLOCKDS(dsp);
+		if ((dsp->nfsclds_flags & NFSCLDS_HASWRITEVERF) == 0) {
+			NFSBCOPY(tl, dsp->nfsclds_verf, NFSX_VERF);
+			dsp->nfsclds_flags |= NFSCLDS_HASWRITEVERF;
+		} else if (NFSBCMP(tl, dsp->nfsclds_verf, NFSX_VERF)) {
+			*must_commit = 1;
+			NFSBCOPY(tl, dsp->nfsclds_verf, NFSX_VERF);
+		}
+		NFSUNLOCKDS(dsp);
+	}
+nfsmout:
+	if (nd->nd_mrep != NULL)
+		mbuf_freem(nd->nd_mrep);
+	*iomode = committed;
+	if (nd->nd_repstat != 0 && error == 0)
+		error = nd->nd_repstat;
+	return (error);
+}
+
+/*
+ * Start up the thread that will execute nfsrpc_writedsmir().
+ */
+static void
+start_writedsmir(void *arg, int pending)
+{
+	struct nfsclwritedsdorpc *drpc;
+
+	drpc = (struct nfsclwritedsdorpc *)arg;
+	drpc->err = nfsrpc_writedsmir(drpc->vp, &drpc->iomode,
+	    &drpc->must_commit, drpc->stateidp, drpc->dsp, drpc->off, drpc->len,
+	    drpc->fhp, drpc->m, drpc->vers, drpc->minorvers, drpc->cred,
+	    drpc->p);
+	drpc->done = 1;
+	NFSCL_DEBUG(4, "start_writedsmir: err=%d\n", drpc->err);
+}
+
+/*
+ * Set up the write DS mirror call for the pNFS I/O thread.
+ */
+static int
+nfsio_writedsmir(vnode_t vp, int *iomode, int *must_commit,
+    nfsv4stateid_t *stateidp, struct nfsclds *dsp, uint64_t off, int len,
+    struct nfsfh *fhp, struct mbuf *m, int vers, int minorvers,
+    struct nfsclwritedsdorpc *drpc, struct ucred *cred, NFSPROC_T *p)
+{
+	int error, ret;
+
+	error = 0;
+	drpc->done = 0;
+	drpc->vp = vp;
+	drpc->iomode = *iomode;
+	drpc->must_commit = *must_commit;
+	drpc->stateidp = stateidp;
+	drpc->dsp = dsp;
+	drpc->off = off;
+	drpc->len = len;
+	drpc->fhp = fhp;
+	drpc->m = m;
+	drpc->vers = vers;
+	drpc->minorvers = minorvers;
+	drpc->cred = cred;
+	drpc->p = p;
+	drpc->inprog = 0;
+	ret = EIO;
+	if (nfs_pnfsiothreads != 0) {
+		ret = nfs_pnfsio(start_writedsmir, drpc);
+		NFSCL_DEBUG(4, "nfsio_writedsmir: nfs_pnfsio=%d\n", ret);
+	}
+	if (ret != 0)
+		error = nfsrpc_writedsmir(vp, iomode, must_commit, stateidp,
+		    dsp, off, len, fhp, m, vers, minorvers, cred, p);
+	NFSCL_DEBUG(4, "nfsio_writedsmir: error=%d\n", error);
+	return (error);
+}
+
+/*
+ * Free up the nfsclds structure.
+ */
+void
+nfscl_freenfsclds(struct nfsclds *dsp)
+{
+	int i;
+
+	if (dsp == NULL)
+		return;
+	if (dsp->nfsclds_sockp != NULL) {
+		NFSFREECRED(dsp->nfsclds_sockp->nr_cred);
+		NFSFREEMUTEX(&dsp->nfsclds_sockp->nr_mtx);
+		free(dsp->nfsclds_sockp->nr_nam, M_SONAME);
+		free(dsp->nfsclds_sockp, M_NFSSOCKREQ);
+	}
+	NFSFREEMUTEX(&dsp->nfsclds_mtx);
+	NFSFREEMUTEX(&dsp->nfsclds_sess.nfsess_mtx);
+	for (i = 0; i < NFSV4_CBSLOTS; i++) {
+		if (dsp->nfsclds_sess.nfsess_cbslots[i].nfssl_reply != NULL)
+			m_freem(
+			    dsp->nfsclds_sess.nfsess_cbslots[i].nfssl_reply);
+	}
+	free(dsp, M_NFSCLDS);
+}
+
+static enum nfsclds_state
+nfscl_getsameserver(struct nfsmount *nmp, struct nfsclds *newdsp,
+    struct nfsclds **retdspp, uint32_t *sequencep)
+{
+	struct nfsclds *dsp;
+	int fndseq;
+
+	/*
+	 * Search the list of nfsclds structures for one with the same
+	 * server.
+	 */
+	fndseq = 0;
+	TAILQ_FOREACH(dsp, &nmp->nm_sess, nfsclds_list) {
+		if (dsp->nfsclds_servownlen == newdsp->nfsclds_servownlen &&
+		    dsp->nfsclds_servownlen != 0 &&
+		    !NFSBCMP(dsp->nfsclds_serverown, newdsp->nfsclds_serverown,
+		    dsp->nfsclds_servownlen) &&
+		    dsp->nfsclds_sess.nfsess_defunct == 0) {
+			NFSCL_DEBUG(4, "fnd same fdsp=%p dsp=%p flg=0x%x\n",
+			    TAILQ_FIRST(&nmp->nm_sess), dsp,
+			    dsp->nfsclds_flags);
+			if (fndseq == 0) {
+				/* Get sequenceid# from first entry. */
+				*sequencep =
+				    dsp->nfsclds_sess.nfsess_sequenceid;
+				fndseq = 1;
+			}
+			/* Server major id matches. */
+			if ((dsp->nfsclds_flags & NFSCLDS_DS) != 0) {
+				*retdspp = dsp;
+				return (NFSDSP_USETHISSESSION);
+			}
+
+		}
+	}
+	if (fndseq != 0)
+		return (NFSDSP_SEQTHISSESSION);
+	return (NFSDSP_NOTFOUND);
+}
+
+/*
+ * NFS commit rpc to a NFSv4.1 DS.
+ */
+static int
+nfsrpc_commitds(vnode_t vp, uint64_t offset, int cnt, struct nfsclds *dsp,
+    struct nfsfh *fhp, int vers, int minorvers, struct ucred *cred,
+    NFSPROC_T *p)
+{
+	uint32_t *tl;
+	struct nfsrv_descript nfsd, *nd = &nfsd;
+	struct nfsmount *nmp = VFSTONFS(vnode_mount(vp));
+	struct nfssockreq *nrp;
+	struct nfsvattr na;
+	int attrflag, error;
+	
+	nd->nd_mrep = NULL;
+	if (vers == 0 || vers == NFS_VER4) {
+		nfscl_reqstart(nd, NFSPROC_COMMITDS, nmp, fhp->nfh_fh,
+		    fhp->nfh_len, NULL, &dsp->nfsclds_sess, vers, minorvers);
+		vers = NFS_VER4;
+	} else
+		nfscl_reqstart(nd, NFSPROC_COMMIT, nmp, fhp->nfh_fh,
+		    fhp->nfh_len, NULL, &dsp->nfsclds_sess, vers, minorvers);
+	NFSCL_DEBUG(4, "nfsrpc_commitds: vers=%d minvers=%d\n", vers,
+	    minorvers);
+	NFSM_BUILD(tl, uint32_t *, NFSX_HYPER + NFSX_UNSIGNED);
+	txdr_hyper(offset, tl);
+	tl += 2;
+	*tl = txdr_unsigned(cnt);
+	nrp = dsp->nfsclds_sockp;
+	if (nrp == NULL)
+		/* If NULL, use the MDS socket. */
+		nrp = &nmp->nm_sockreq;
+	error = newnfs_request(nd, nmp, NULL, nrp, vp, p, cred,
+	    NFS_PROG, vers, NULL, 1, NULL, &dsp->nfsclds_sess);
+	NFSCL_DEBUG(4, "nfsrpc_commitds: err=%d stat=%d\n", error,
+	    nd->nd_repstat);
+	if (error != 0)
+		return (error);
+	if (nd->nd_repstat == 0) {
+		if (vers == NFS_VER3) {
+			error = nfscl_wcc_data(nd, vp, &na, &attrflag, NULL,
+			    NULL);
+			NFSCL_DEBUG(4, "nfsrpc_commitds: wccdata=%d\n", error);
+			if (error != 0)
+				goto nfsmout;
+		}
+		NFSM_DISSECT(tl, u_int32_t *, NFSX_VERF);
+		NFSLOCKDS(dsp);
+		if (NFSBCMP(tl, dsp->nfsclds_verf, NFSX_VERF)) {
+			NFSBCOPY(tl, dsp->nfsclds_verf, NFSX_VERF);
+			error = NFSERR_STALEWRITEVERF;
+		}
+		NFSUNLOCKDS(dsp);
+	}
+nfsmout:
+	if (error == 0 && nd->nd_repstat != 0)
+		error = nd->nd_repstat;
+	mbuf_freem(nd->nd_mrep);
+	return (error);
+}
+
+/*
+ * Start up the thread that will execute nfsrpc_commitds().
+ */
+static void
+start_commitds(void *arg, int pending)
+{
+	struct nfsclwritedsdorpc *drpc;
+
+	drpc = (struct nfsclwritedsdorpc *)arg;
+	drpc->err = nfsrpc_commitds(drpc->vp, drpc->off, drpc->len,
+	    drpc->dsp, drpc->fhp, drpc->vers, drpc->minorvers, drpc->cred,
+	    drpc->p);
+	drpc->done = 1;
+	NFSCL_DEBUG(4, "start_commitds: err=%d\n", drpc->err);
+}
+
+/*
+ * Set up the commit DS mirror call for the pNFS I/O thread.
+ */
+static int
+nfsio_commitds(vnode_t vp, uint64_t offset, int cnt, struct nfsclds *dsp,
+    struct nfsfh *fhp, int vers, int minorvers,
+    struct nfsclwritedsdorpc *drpc, struct ucred *cred, NFSPROC_T *p)
+{
+	int error, ret;
+
+	error = 0;
+	drpc->done = 0;
+	drpc->vp = vp;
+	drpc->off = offset;
+	drpc->len = cnt;
+	drpc->dsp = dsp;
+	drpc->fhp = fhp;
+	drpc->vers = vers;
+	drpc->minorvers = minorvers;
+	drpc->cred = cred;
+	drpc->p = p;
+	drpc->inprog = 0;
+	ret = EIO;
+	if (nfs_pnfsiothreads != 0) {
+		ret = nfs_pnfsio(start_commitds, drpc);
+		NFSCL_DEBUG(4, "nfsio_commitds: nfs_pnfsio=%d\n", ret);
+	}
+	if (ret != 0)
+		error = nfsrpc_commitds(vp, offset, cnt, dsp, fhp, vers,
+		    minorvers, cred, p);
+	NFSCL_DEBUG(4, "nfsio_commitds: error=%d\n", error);
+	return (error);
+}
+
+/*
+ * Set up the XDR arguments for the LayoutGet operation.
+ */
+static void
+nfsrv_setuplayoutget(struct nfsrv_descript *nd, int iomode, uint64_t offset,
+    uint64_t len, uint64_t minlen, nfsv4stateid_t *stateidp, int layouttype,
+    int layoutlen, int usecurstateid)
+{
+	uint32_t *tl;
+
+	NFSM_BUILD(tl, uint32_t *, 4 * NFSX_UNSIGNED + 3 * NFSX_HYPER +
+	    NFSX_STATEID);
+	*tl++ = newnfs_false;		/* Don't signal availability. */
+	*tl++ = txdr_unsigned(layouttype);
+	*tl++ = txdr_unsigned(iomode);
+	txdr_hyper(offset, tl);
+	tl += 2;
+	txdr_hyper(len, tl);
+	tl += 2;
+	txdr_hyper(minlen, tl);
+	tl += 2;
+	if (usecurstateid != 0) {
+		/* Special stateid for Current stateid. */
+		*tl++ = txdr_unsigned(1);
+		*tl++ = 0;
+		*tl++ = 0;
+		*tl++ = 0;
+	} else {
+		*tl++ = txdr_unsigned(stateidp->seqid);
+		NFSCL_DEBUG(4, "layget seq=%d\n", (int)stateidp->seqid);
+		*tl++ = stateidp->other[0];
+		*tl++ = stateidp->other[1];
+		*tl++ = stateidp->other[2];
+	}
+	*tl = txdr_unsigned(layoutlen);
+}
+
+/*
+ * Parse the reply for a successful LayoutGet operation.
+ */
+static int
+nfsrv_parselayoutget(struct nfsrv_descript *nd, nfsv4stateid_t *stateidp,
+    int *retonclosep, struct nfsclflayouthead *flhp)
+{
+	uint32_t *tl;
+	struct nfsclflayout *flp, *prevflp, *tflp;
+	int cnt, error, fhcnt, gotiomode, i, iomode, j, k, l, laytype, nfhlen;
+	int m, mirrorcnt;
+	uint64_t retlen, off;
+	struct nfsfh *nfhp;
+	uint8_t *cp;
+	uid_t user;
+	gid_t grp;
+
+	NFSCL_DEBUG(4, "in nfsrv_parselayoutget\n");
+	error = 0;
+	flp = NULL;
+	gotiomode = -1;
+	NFSM_DISSECT(tl, uint32_t *, 2 * NFSX_UNSIGNED + NFSX_STATEID);
+	if (*tl++ != 0)
+		*retonclosep = 1;
+	else
+		*retonclosep = 0;
+	stateidp->seqid = fxdr_unsigned(uint32_t, *tl++);
+	NFSCL_DEBUG(4, "retoncls=%d stseq=%d\n", *retonclosep,
+	    (int)stateidp->seqid);
+	stateidp->other[0] = *tl++;
+	stateidp->other[1] = *tl++;
+	stateidp->other[2] = *tl++;
+	cnt = fxdr_unsigned(int, *tl);
+	NFSCL_DEBUG(4, "layg cnt=%d\n", cnt);
+	if (cnt <= 0 || cnt > 10000) {
+		/* Don't accept more than 10000 layouts in reply. */
+		error = NFSERR_BADXDR;
+		goto nfsmout;
+	}
+	for (i = 0; i < cnt; i++) {
+		/* Dissect to the layout type. */
+		NFSM_DISSECT(tl, uint32_t *, 2 * NFSX_HYPER +
+		    3 * NFSX_UNSIGNED);
+		off = fxdr_hyper(tl); tl += 2;
+		retlen = fxdr_hyper(tl); tl += 2;
+		iomode = fxdr_unsigned(int, *tl++);
+		laytype = fxdr_unsigned(int, *tl);
+		NFSCL_DEBUG(4, "layt=%d off=%ju len=%ju iom=%d\n", laytype,
+		    (uintmax_t)off, (uintmax_t)retlen, iomode);
+		/* Ignore length of layout body for now. */
+		if (laytype == NFSLAYOUT_NFSV4_1_FILES) {
+			/* Parse the File layout up to fhcnt. */
+			NFSM_DISSECT(tl, uint32_t *, 3 * NFSX_UNSIGNED +
+			    NFSX_HYPER + NFSX_V4DEVICEID);
+			fhcnt = fxdr_unsigned(int, *(tl + 4 +
+			    NFSX_V4DEVICEID / NFSX_UNSIGNED));
+			NFSCL_DEBUG(4, "fhcnt=%d\n", fhcnt);
+			if (fhcnt < 0 || fhcnt > 100) {
+				/* Don't accept more than 100 file handles. */
+				error = NFSERR_BADXDR;
+				goto nfsmout;
+			}
+			if (fhcnt > 0)
+				flp = malloc(sizeof(*flp) + fhcnt *
+				    sizeof(struct nfsfh *), M_NFSFLAYOUT,
+				    M_WAITOK);
+			else
+				flp = malloc(sizeof(*flp), M_NFSFLAYOUT,
+				    M_WAITOK);
+			flp->nfsfl_flags = NFSFL_FILE;
+			flp->nfsfl_fhcnt = 0;
+			flp->nfsfl_devp = NULL;
+			flp->nfsfl_off = off;
+			if (flp->nfsfl_off + retlen < flp->nfsfl_off)
+				flp->nfsfl_end = UINT64_MAX - flp->nfsfl_off;
+			else
+				flp->nfsfl_end = flp->nfsfl_off + retlen;
+			flp->nfsfl_iomode = iomode;
+			if (gotiomode == -1)
+				gotiomode = flp->nfsfl_iomode;
+			/* Ignore layout body length for now. */
+			NFSBCOPY(tl, flp->nfsfl_dev, NFSX_V4DEVICEID);
+			tl += (NFSX_V4DEVICEID / NFSX_UNSIGNED);
+			flp->nfsfl_util = fxdr_unsigned(uint32_t, *tl++);
+			NFSCL_DEBUG(4, "flutil=0x%x\n", flp->nfsfl_util);
+			flp->nfsfl_stripe1 = fxdr_unsigned(uint32_t, *tl++);
+			flp->nfsfl_patoff = fxdr_hyper(tl); tl += 2;
+			NFSCL_DEBUG(4, "stripe1=%u poff=%ju\n",
+			    flp->nfsfl_stripe1, (uintmax_t)flp->nfsfl_patoff);
+			for (j = 0; j < fhcnt; j++) {
+				NFSM_DISSECT(tl, uint32_t *, NFSX_UNSIGNED);
+				nfhlen = fxdr_unsigned(int, *tl);
+				if (nfhlen <= 0 || nfhlen > NFSX_V4FHMAX) {
+					error = NFSERR_BADXDR;
+					goto nfsmout;
+				}
+				nfhp = malloc(sizeof(*nfhp) + nfhlen - 1,
+				    M_NFSFH, M_WAITOK);
+				flp->nfsfl_fh[j] = nfhp;
+				flp->nfsfl_fhcnt++;
+				nfhp->nfh_len = nfhlen;
+				NFSM_DISSECT(cp, uint8_t *, NFSM_RNDUP(nfhlen));
+				NFSBCOPY(cp, nfhp->nfh_fh, nfhlen);
+			}
+		} else if (laytype == NFSLAYOUT_FLEXFILE) {
+			NFSM_DISSECT(tl, uint32_t *, NFSX_UNSIGNED +
+			    NFSX_HYPER);
+			mirrorcnt = fxdr_unsigned(int, *(tl + 2));
+			NFSCL_DEBUG(4, "mirrorcnt=%d\n", mirrorcnt);
+			if (mirrorcnt < 1 || mirrorcnt > NFSDEV_MAXMIRRORS) {
+				error = NFSERR_BADXDR;
+				goto nfsmout;
+			}
+			flp = malloc(sizeof(*flp) + mirrorcnt *
+			    sizeof(struct nfsffm), M_NFSFLAYOUT, M_WAITOK);
+			flp->nfsfl_flags = NFSFL_FLEXFILE;
+			flp->nfsfl_mirrorcnt = mirrorcnt;
+			for (j = 0; j < mirrorcnt; j++)
+				flp->nfsfl_ffm[j].devp = NULL;
+			flp->nfsfl_off = off;
+			if (flp->nfsfl_off + retlen < flp->nfsfl_off)
+				flp->nfsfl_end = UINT64_MAX - flp->nfsfl_off;
+			else
+				flp->nfsfl_end = flp->nfsfl_off + retlen;
+			flp->nfsfl_iomode = iomode;
+			if (gotiomode == -1)
+				gotiomode = flp->nfsfl_iomode;
+			flp->nfsfl_stripeunit = fxdr_hyper(tl);
+			NFSCL_DEBUG(4, "stripeunit=%ju\n",
+			    (uintmax_t)flp->nfsfl_stripeunit);
+			for (j = 0; j < mirrorcnt; j++) {
+				NFSM_DISSECT(tl, uint32_t *, NFSX_UNSIGNED);
+				k = fxdr_unsigned(int, *tl);
+				if (k < 1 || k > 128) {
+					error = NFSERR_BADXDR;
+					goto nfsmout;
+				}
+				NFSCL_DEBUG(4, "servercnt=%d\n", k);
+				for (l = 0; l < k; l++) {
+					NFSM_DISSECT(tl, uint32_t *,
+					    NFSX_V4DEVICEID + NFSX_STATEID +
+					    2 * NFSX_UNSIGNED);
+					if (l == 0) {
+						/* Just use the first server. */
+						NFSBCOPY(tl,
+						    flp->nfsfl_ffm[j].dev,
+						    NFSX_V4DEVICEID);
+						tl += (NFSX_V4DEVICEID /
+						    NFSX_UNSIGNED);
+						tl++;
+						flp->nfsfl_ffm[j].st.seqid =
+						    *tl++;
+						flp->nfsfl_ffm[j].st.other[0] =
+						    *tl++;
+						flp->nfsfl_ffm[j].st.other[1] =
+						    *tl++;
+						flp->nfsfl_ffm[j].st.other[2] =
+						    *tl++;
+						NFSCL_DEBUG(4, "st.seqid=%u "
+						 "st.o0=0x%x st.o1=0x%x "
+						 "st.o2=0x%x\n",
+						 flp->nfsfl_ffm[j].st.seqid,
+						 flp->nfsfl_ffm[j].st.other[0],
+						 flp->nfsfl_ffm[j].st.other[1],
+						 flp->nfsfl_ffm[j].st.other[2]);
+					} else
+						tl += ((NFSX_V4DEVICEID +
+						    NFSX_STATEID +
+						    NFSX_UNSIGNED) /
+						    NFSX_UNSIGNED);
+					fhcnt = fxdr_unsigned(int, *tl);
+					NFSCL_DEBUG(4, "fhcnt=%d\n", fhcnt);
+					if (fhcnt < 1 ||
+					    fhcnt > NFSDEV_MAXVERS) {
+						error = NFSERR_BADXDR;
+						goto nfsmout;
+					}
+					for (m = 0; m < fhcnt; m++) {
+						NFSM_DISSECT(tl, uint32_t *,
+						    NFSX_UNSIGNED);
+						nfhlen = fxdr_unsigned(int,
+						    *tl);
+						NFSCL_DEBUG(4, "nfhlen=%d\n",
+						    nfhlen);
+						if (nfhlen <= 0 || nfhlen >
+						    NFSX_V4FHMAX) {
+							error = NFSERR_BADXDR;
+							goto nfsmout;
+						}
+						NFSM_DISSECT(cp, uint8_t *,
+						    NFSM_RNDUP(nfhlen));
+						if (l == 0) {
+							flp->nfsfl_ffm[j].fhcnt 
+							    = fhcnt;
+							nfhp = malloc(
+							    sizeof(*nfhp) +
+							    nfhlen - 1, M_NFSFH,
+							    M_WAITOK);
+							flp->nfsfl_ffm[j].fh[m]
+							    = nfhp;
+							nfhp->nfh_len = nfhlen;
+							NFSBCOPY(cp,
+							    nfhp->nfh_fh,
+							    nfhlen);
+							NFSCL_DEBUG(4,
+							    "got fh\n");
+						}
+					}
+					/* Now, get the ffsd_user/ffds_group. */
+					error = nfsrv_parseug(nd, 0, &user,
+					    &grp, curthread);
+					NFSCL_DEBUG(4, "after parseu=%d\n",
+					    error);
+					if (error == 0)
+						error = nfsrv_parseug(nd, 1,
+						    &user, &grp, curthread);
+					NFSCL_DEBUG(4, "aft parseg=%d\n",
+					    grp);
+					if (error != 0)
+						goto nfsmout;
+					NFSCL_DEBUG(4, "user=%d group=%d\n",
+					    user, grp);
+					if (l == 0) {
+						flp->nfsfl_ffm[j].user = user;
+						flp->nfsfl_ffm[j].group = grp;
+						NFSCL_DEBUG(4,
+						    "usr=%d grp=%d\n", user,
+						    grp);
+					}
+				}
+			}
+			NFSM_DISSECT(tl, uint32_t *, 2 * NFSX_UNSIGNED);
+			flp->nfsfl_fflags = fxdr_unsigned(uint32_t, *tl++);
+			flp->nfsfl_statshint = fxdr_unsigned(uint32_t, *tl);
+			NFSCL_DEBUG(4, "fflags=0x%x statshint=%d\n",
+			    flp->nfsfl_fflags, flp->nfsfl_statshint);
+		} else {
+			error = NFSERR_BADXDR;
+			goto nfsmout;
+		}
+		if (flp->nfsfl_iomode == gotiomode) {
+			/* Keep the list in increasing offset order. */
+			tflp = LIST_FIRST(flhp);
+			prevflp = NULL;
+			while (tflp != NULL &&
+			    tflp->nfsfl_off < flp->nfsfl_off) {
+				prevflp = tflp;
+				tflp = LIST_NEXT(tflp, nfsfl_list);
+			}
+			if (prevflp == NULL)
+				LIST_INSERT_HEAD(flhp, flp, nfsfl_list);
+			else
+				LIST_INSERT_AFTER(prevflp, flp,
+				    nfsfl_list);
+			NFSCL_DEBUG(4, "flp inserted\n");
+		} else {
+			printf("nfscl_layoutget(): got wrong iomode\n");
+			nfscl_freeflayout(flp);
+		}
+		flp = NULL;
+	}
+nfsmout:
+	NFSCL_DEBUG(4, "eo nfsrv_parselayoutget=%d\n", error);
+	if (error != 0 && flp != NULL)
+		nfscl_freeflayout(flp);
+	return (error);
+}
+
+/*
+ * Parse a user/group digit string.
+ */
+static int
+nfsrv_parseug(struct nfsrv_descript *nd, int dogrp, uid_t *uidp, gid_t *gidp,
+    NFSPROC_T *p)
+{
+	uint32_t *tl;
+	char *cp, *str, str0[NFSV4_SMALLSTR + 1];
+	uint32_t len = 0;
+	int error = 0;
+
+	NFSM_DISSECT(tl, uint32_t *, NFSX_UNSIGNED);
+	len = fxdr_unsigned(uint32_t, *tl);
+	str = NULL;
+	if (len > NFSV4_OPAQUELIMIT) {
+		error = NFSERR_BADXDR;
+		goto nfsmout;
+	}
+	NFSCL_DEBUG(4, "nfsrv_parseug: len=%d\n", len);
+	if (len == 0) {
+		if (dogrp != 0)
+			*gidp = GID_NOGROUP;
+		else
+			*uidp = UID_NOBODY;
+		return (0);
+	}
+	if (len > NFSV4_SMALLSTR)
+		str = malloc(len + 1, M_TEMP, M_WAITOK);
+	else
+		str = str0;
+	NFSM_DISSECT(cp, char *, NFSM_RNDUP(len));
+	NFSBCOPY(cp, str, len);
+	str[len] = '\0';
+	NFSCL_DEBUG(4, "nfsrv_parseug: str=%s\n", str);
+	if (dogrp != 0)
+		error = nfsv4_strtogid(nd, str, len, gidp, p);
+	else
+		error = nfsv4_strtouid(nd, str, len, uidp, p);
+nfsmout:
+	if (len > NFSV4_SMALLSTR)
+		free(str, M_TEMP);
+	NFSCL_DEBUG(4, "eo nfsrv_parseug=%d\n", error);
+	return (error);
+}
+
+/*
+ * Similar to nfsrpc_getlayout(), except that it uses nfsrpc_openlayget(),
+ * so that it does both an Open and a Layoutget.
+ */
+static int
+nfsrpc_getopenlayout(struct nfsmount *nmp, vnode_t vp, u_int8_t *nfhp,
+    int fhlen, uint8_t *newfhp, int newfhlen, uint32_t mode,
+    struct nfsclopen *op, uint8_t *name, int namelen, struct nfscldeleg **dpp,
+    struct ucred *cred, NFSPROC_T *p)
+{
+	struct nfscllayout *lyp;
+	struct nfsclflayout *flp;
+	struct nfsclflayouthead flh;
+	int error, islocked, layoutlen, recalled, retonclose, usecurstateid;
+	int layouttype, laystat;
+	nfsv4stateid_t stateid;
+	struct nfsclsession *tsep;
+
+	error = 0;
+	if (NFSHASFLEXFILE(nmp))
+		layouttype = NFSLAYOUT_FLEXFILE;
+	else
+		layouttype = NFSLAYOUT_NFSV4_1_FILES;
+	/*
+	 * If lyp is returned non-NULL, there will be a refcnt (shared lock)
+	 * on it, iff flp != NULL or a lock (exclusive lock) on it iff
+	 * flp == NULL.
+	 */
+	lyp = nfscl_getlayout(nmp->nm_clp, newfhp, newfhlen, 0, &flp,
+	    &recalled);
+	NFSCL_DEBUG(4, "nfsrpc_getopenlayout nfscl_getlayout lyp=%p\n", lyp);
+	if (lyp == NULL)
+		islocked = 0;
+	else if (flp != NULL)
+		islocked = 1;
+	else
+		islocked = 2;
+	if ((lyp == NULL || flp == NULL) && recalled == 0) {
+		LIST_INIT(&flh);
+		tsep = nfsmnt_mdssession(nmp);
+		layoutlen = tsep->nfsess_maxcache - (NFSX_STATEID +
+		    3 * NFSX_UNSIGNED);
+		if (lyp == NULL)
+			usecurstateid = 1;
+		else {
+			usecurstateid = 0;
+			stateid.seqid = lyp->nfsly_stateid.seqid;
+			stateid.other[0] = lyp->nfsly_stateid.other[0];
+			stateid.other[1] = lyp->nfsly_stateid.other[1];
+			stateid.other[2] = lyp->nfsly_stateid.other[2];
+		}
+		error = nfsrpc_openlayoutrpc(nmp, vp, nfhp, fhlen,
+		    newfhp, newfhlen, mode, op, name, namelen,
+		    dpp, &stateid, usecurstateid, layouttype, layoutlen,
+		    &retonclose, &flh, &laystat, cred, p);
+		NFSCL_DEBUG(4, "aft nfsrpc_openlayoutrpc laystat=%d err=%d\n",
+		    laystat, error);
+		laystat = nfsrpc_layoutgetres(nmp, vp, newfhp, newfhlen,
+		    &stateid, retonclose, NULL, &lyp, &flh, layouttype, laystat,
+		    &islocked, cred, p);
+	} else
+		error = nfsrpc_openrpc(nmp, vp, nfhp, fhlen, newfhp, newfhlen,
+		    mode, op, name, namelen, dpp, 0, 0, cred, p, 0, 0);
+	if (islocked == 2)
+		nfscl_rellayout(lyp, 1);
+	else if (islocked == 1)
+		nfscl_rellayout(lyp, 0);
+	return (error);
+}
+
+/*
+ * This function does an Open+LayoutGet for an NFSv4.1 mount with pNFS
+ * enabled, only for the CLAIM_NULL case.  All other NFSv4 Opens are
+ * handled by nfsrpc_openrpc().
+ * For the case where op == NULL, dvp is the directory.  When op != NULL, it
+ * can be NULL.
+ */
+static int
+nfsrpc_openlayoutrpc(struct nfsmount *nmp, vnode_t vp, u_int8_t *nfhp,
+    int fhlen, uint8_t *newfhp, int newfhlen, uint32_t mode,
+    struct nfsclopen *op, uint8_t *name, int namelen, struct nfscldeleg **dpp,
+    nfsv4stateid_t *stateidp, int usecurstateid, int layouttype,
+    int layoutlen, int *retonclosep, struct nfsclflayouthead *flhp,
+    int *laystatp, struct ucred *cred, NFSPROC_T *p)
+{
+	uint32_t *tl;
+	struct nfsrv_descript nfsd, *nd = &nfsd;
+	struct nfscldeleg *ndp = NULL;
+	struct nfsvattr nfsva;
+	struct nfsclsession *tsep;
+	uint32_t rflags, deleg;
+	nfsattrbit_t attrbits;
+	int error, ret, acesize, limitby, iomode;
+
+	*dpp = NULL;
+	*laystatp = ENXIO;
+	nfscl_reqstart(nd, NFSPROC_OPENLAYGET, nmp, nfhp, fhlen, NULL, NULL,
+	    0, 0);
+	NFSM_BUILD(tl, uint32_t *, 5 * NFSX_UNSIGNED);
+	*tl++ = txdr_unsigned(op->nfso_own->nfsow_seqid);
+	*tl++ = txdr_unsigned(mode & NFSV4OPEN_ACCESSBOTH);
+	*tl++ = txdr_unsigned((mode >> NFSLCK_SHIFT) & NFSV4OPEN_DENYBOTH);
+	tsep = nfsmnt_mdssession(nmp);
+	*tl++ = tsep->nfsess_clientid.lval[0];
+	*tl = tsep->nfsess_clientid.lval[1];
+	nfsm_strtom(nd, op->nfso_own->nfsow_owner, NFSV4CL_LOCKNAMELEN);
+	NFSM_BUILD(tl, uint32_t *, 2 * NFSX_UNSIGNED);
+	*tl++ = txdr_unsigned(NFSV4OPEN_NOCREATE);
+	*tl = txdr_unsigned(NFSV4OPEN_CLAIMNULL);
+	nfsm_strtom(nd, name, namelen);
+	NFSM_BUILD(tl, uint32_t *, NFSX_UNSIGNED);
+	*tl = txdr_unsigned(NFSV4OP_GETATTR);
+	NFSZERO_ATTRBIT(&attrbits);
+	NFSSETBIT_ATTRBIT(&attrbits, NFSATTRBIT_CHANGE);
+	NFSSETBIT_ATTRBIT(&attrbits, NFSATTRBIT_TIMEMODIFY);
+	nfsrv_putattrbit(nd, &attrbits);
+	NFSM_BUILD(tl, uint32_t *, NFSX_UNSIGNED);
+	*tl = txdr_unsigned(NFSV4OP_LAYOUTGET);
+	if ((mode & NFSV4OPEN_ACCESSWRITE) != 0)
+		iomode = NFSLAYOUTIOMODE_RW;
+	else
+		iomode = NFSLAYOUTIOMODE_READ;
+	nfsrv_setuplayoutget(nd, iomode, 0, UINT64_MAX, 0, stateidp,
+	    layouttype, layoutlen, usecurstateid);
+	error = newnfs_request(nd, nmp, NULL, &nmp->nm_sockreq, vp, p, cred,
+	    NFS_PROG, NFS_VER4, NULL, 1, NULL, NULL);
+	if (error != 0)
+		return (error);
+	NFSCL_INCRSEQID(op->nfso_own->nfsow_seqid, nd);
+	if (nd->nd_repstat != 0)
+		*laystatp = nd->nd_repstat;
+	if ((nd->nd_flag & ND_NOMOREDATA) == 0) {
+		/* ND_NOMOREDATA will be set if the Open operation failed. */
+		NFSM_DISSECT(tl, u_int32_t *, NFSX_STATEID +
+		    6 * NFSX_UNSIGNED);
+		op->nfso_stateid.seqid = *tl++;
+		op->nfso_stateid.other[0] = *tl++;
+		op->nfso_stateid.other[1] = *tl++;
+		op->nfso_stateid.other[2] = *tl;
+		rflags = fxdr_unsigned(u_int32_t, *(tl + 6));
+		error = nfsrv_getattrbits(nd, &attrbits, NULL, NULL);
+		if (error != 0)
+			goto nfsmout;
+		NFSM_DISSECT(tl, u_int32_t *, NFSX_UNSIGNED);
+		deleg = fxdr_unsigned(u_int32_t, *tl);
+		if (deleg == NFSV4OPEN_DELEGATEREAD ||
+		    deleg == NFSV4OPEN_DELEGATEWRITE) {
+			if (!(op->nfso_own->nfsow_clp->nfsc_flags &
+			      NFSCLFLAGS_FIRSTDELEG))
+				op->nfso_own->nfsow_clp->nfsc_flags |=
+				  (NFSCLFLAGS_FIRSTDELEG | NFSCLFLAGS_GOTDELEG);
+			ndp = malloc(sizeof(struct nfscldeleg) + newfhlen,
+			    M_NFSCLDELEG, M_WAITOK);
+			LIST_INIT(&ndp->nfsdl_owner);
+			LIST_INIT(&ndp->nfsdl_lock);
+			ndp->nfsdl_clp = op->nfso_own->nfsow_clp;
+			ndp->nfsdl_fhlen = newfhlen;
+			NFSBCOPY(newfhp, ndp->nfsdl_fh, newfhlen);
+			newnfs_copyincred(cred, &ndp->nfsdl_cred);
+			nfscl_lockinit(&ndp->nfsdl_rwlock);
+			NFSM_DISSECT(tl, u_int32_t *, NFSX_STATEID +
+			    NFSX_UNSIGNED);
+			ndp->nfsdl_stateid.seqid = *tl++;
+			ndp->nfsdl_stateid.other[0] = *tl++;
+			ndp->nfsdl_stateid.other[1] = *tl++;
+			ndp->nfsdl_stateid.other[2] = *tl++;
+			ret = fxdr_unsigned(int, *tl);
+			if (deleg == NFSV4OPEN_DELEGATEWRITE) {
+				ndp->nfsdl_flags = NFSCLDL_WRITE;
+				/*
+				 * Indicates how much the file can grow.
+				 */
+				NFSM_DISSECT(tl, u_int32_t *,
+				    3 * NFSX_UNSIGNED);
+				limitby = fxdr_unsigned(int, *tl++);
+				switch (limitby) {
+				case NFSV4OPEN_LIMITSIZE:
+					ndp->nfsdl_sizelimit = fxdr_hyper(tl);
+					break;
+				case NFSV4OPEN_LIMITBLOCKS:
+					ndp->nfsdl_sizelimit =
+					    fxdr_unsigned(u_int64_t, *tl++);
+					ndp->nfsdl_sizelimit *=
+					    fxdr_unsigned(u_int64_t, *tl);
+					break;
+				default:
+					error = NFSERR_BADXDR;
+					goto nfsmout;
+				};
+			} else
+				ndp->nfsdl_flags = NFSCLDL_READ;
+			if (ret != 0)
+				ndp->nfsdl_flags |= NFSCLDL_RECALL;
+			error = nfsrv_dissectace(nd, &ndp->nfsdl_ace, &ret,
+			    &acesize, p);
+			if (error != 0)
+				goto nfsmout;
+		} else if (deleg != NFSV4OPEN_DELEGATENONE) {
+			error = NFSERR_BADXDR;
+			goto nfsmout;
+		}
+		if ((rflags & NFSV4OPEN_LOCKTYPEPOSIX) != 0 ||
+		    nfscl_assumeposixlocks)
+			op->nfso_posixlock = 1;
+		else
+			op->nfso_posixlock = 0;
+		NFSM_DISSECT(tl, u_int32_t *, 2 * NFSX_UNSIGNED);
+		/* If the 2nd element == NFS_OK, the Getattr succeeded. */
+		if (*++tl == 0) {
+			error = nfsv4_loadattr(nd, NULL, &nfsva, NULL,
+			    NULL, 0, NULL, NULL, NULL, NULL, NULL, 0,
+			    NULL, NULL, NULL, p, cred);
+			if (error != 0)
+				goto nfsmout;
+			if (ndp != NULL) {
+				ndp->nfsdl_change = nfsva.na_filerev;
+				ndp->nfsdl_modtime = nfsva.na_mtime;
+				ndp->nfsdl_flags |= NFSCLDL_MODTIMESET;
+				*dpp = ndp;
+				ndp = NULL;
+			}
+			/*
+			 * At this point, the Open has succeeded, so set
+			 * nd_repstat = NFS_OK.  If the Layoutget failed,
+			 * this function just won't return a layout.
+			 */
+			if (nd->nd_repstat == 0) {
+				NFSM_DISSECT(tl, uint32_t *, 2 * NFSX_UNSIGNED);
+				*laystatp = fxdr_unsigned(int, *++tl);
+				if (*laystatp == 0) {
+					error = nfsrv_parselayoutget(nd,
+					    stateidp, retonclosep, flhp);
+					if (error != 0)
+						*laystatp = error;
+				}
+			} else
+				nd->nd_repstat = 0;	/* Return 0 for Open. */
+		}
+	}
+	if (nd->nd_repstat != 0 && error == 0)
+		error = nd->nd_repstat;
+nfsmout:
+	free(ndp, M_NFSCLDELEG);
+	mbuf_freem(nd->nd_mrep);
+	return (error);
+}
+
+/*
+ * Similar nfsrpc_createv4(), but also does the LayoutGet operation.
+ * Used only for mounts with pNFS enabled.
+ */
+static int
+nfsrpc_createlayout(vnode_t dvp, char *name, int namelen, struct vattr *vap,
+    nfsquad_t cverf, int fmode, struct nfsclowner *owp, struct nfscldeleg **dpp,
+    struct ucred *cred, NFSPROC_T *p, struct nfsvattr *dnap,
+    struct nfsvattr *nnap, struct nfsfh **nfhpp, int *attrflagp,
+    int *dattrflagp, void *dstuff, int *unlockedp, nfsv4stateid_t *stateidp,
+    int usecurstateid, int layouttype, int layoutlen, int *retonclosep,
+    struct nfsclflayouthead *flhp, int *laystatp)
+{
+	uint32_t *tl;
+	int error = 0, deleg, newone, ret, acesize, limitby;
+	struct nfsrv_descript nfsd, *nd = &nfsd;
+	struct nfsclopen *op;
+	struct nfscldeleg *dp = NULL;
+	struct nfsnode *np;
+	struct nfsfh *nfhp;
+	struct nfsclsession *tsep;
+	nfsattrbit_t attrbits;
+	nfsv4stateid_t stateid;
+	struct nfsmount *nmp;
+
+	nmp = VFSTONFS(dvp->v_mount);
+	np = VTONFS(dvp);
+	*laystatp = ENXIO;
+	*unlockedp = 0;
+	*nfhpp = NULL;
+	*dpp = NULL;
+	*attrflagp = 0;
+	*dattrflagp = 0;
+	if (namelen > NFS_MAXNAMLEN)
+		return (ENAMETOOLONG);
+	NFSCL_REQSTART(nd, NFSPROC_CREATELAYGET, dvp);
+	/*
+	 * For V4, this is actually an Open op.
+	 */
+	NFSM_BUILD(tl, u_int32_t *, 5 * NFSX_UNSIGNED);
+	*tl++ = txdr_unsigned(owp->nfsow_seqid);
+	*tl++ = txdr_unsigned(NFSV4OPEN_ACCESSWRITE |
+	    NFSV4OPEN_ACCESSREAD);
+	*tl++ = txdr_unsigned(NFSV4OPEN_DENYNONE);
+	tsep = nfsmnt_mdssession(nmp);
+	*tl++ = tsep->nfsess_clientid.lval[0];
+	*tl = tsep->nfsess_clientid.lval[1];
+	nfsm_strtom(nd, owp->nfsow_owner, NFSV4CL_LOCKNAMELEN);
+	NFSM_BUILD(tl, u_int32_t *, 2 * NFSX_UNSIGNED);
+	*tl++ = txdr_unsigned(NFSV4OPEN_CREATE);
+	if ((fmode & O_EXCL) != 0) {
+		if (NFSHASSESSPERSIST(nmp)) {
+			/* Use GUARDED for persistent sessions. */
+			*tl = txdr_unsigned(NFSCREATE_GUARDED);
+			nfscl_fillsattr(nd, vap, dvp, 0, 0);
+		} else {
+			/* Otherwise, use EXCLUSIVE4_1. */
+			*tl = txdr_unsigned(NFSCREATE_EXCLUSIVE41);
+			NFSM_BUILD(tl, u_int32_t *, NFSX_VERF);
+			*tl++ = cverf.lval[0];
+			*tl = cverf.lval[1];
+			nfscl_fillsattr(nd, vap, dvp, 0, 0);
+		}
+	} else {
+		*tl = txdr_unsigned(NFSCREATE_UNCHECKED);
+		nfscl_fillsattr(nd, vap, dvp, 0, 0);
+	}
+	NFSM_BUILD(tl, u_int32_t *, NFSX_UNSIGNED);
+	*tl = txdr_unsigned(NFSV4OPEN_CLAIMNULL);
+	nfsm_strtom(nd, name, namelen);
+	/* Get the new file's handle and attributes, plus save the FH. */
+	NFSM_BUILD(tl, u_int32_t *, 3 * NFSX_UNSIGNED);
+	*tl++ = txdr_unsigned(NFSV4OP_SAVEFH);
+	*tl++ = txdr_unsigned(NFSV4OP_GETFH);
+	*tl = txdr_unsigned(NFSV4OP_GETATTR);
+	NFSGETATTR_ATTRBIT(&attrbits);
+	nfsrv_putattrbit(nd, &attrbits);
+	/* Get the directory's post-op attributes. */
+	NFSM_BUILD(tl, u_int32_t *, NFSX_UNSIGNED);
+	*tl = txdr_unsigned(NFSV4OP_PUTFH);
+	nfsm_fhtom(nd, np->n_fhp->nfh_fh, np->n_fhp->nfh_len, 0);
+	NFSM_BUILD(tl, u_int32_t *, NFSX_UNSIGNED);
+	*tl = txdr_unsigned(NFSV4OP_GETATTR);
+	nfsrv_putattrbit(nd, &attrbits);
+	NFSM_BUILD(tl, u_int32_t *, 2 * NFSX_UNSIGNED);
+	*tl++ = txdr_unsigned(NFSV4OP_RESTOREFH);
+	*tl = txdr_unsigned(NFSV4OP_LAYOUTGET);
+	nfsrv_setuplayoutget(nd, NFSLAYOUTIOMODE_RW, 0, UINT64_MAX, 0, stateidp,
+	    layouttype, layoutlen, usecurstateid);
+	error = nfscl_request(nd, dvp, p, cred, dstuff);
+	if (error != 0)
+		return (error);
+	NFSCL_DEBUG(4, "nfsrpc_createlayout stat=%d err=%d\n", nd->nd_repstat,
+	    error);
+	if (nd->nd_repstat != 0)
+		*laystatp = nd->nd_repstat;
+	NFSCL_INCRSEQID(owp->nfsow_seqid, nd);
+	if ((nd->nd_flag & ND_NOMOREDATA) == 0) {
+		NFSCL_DEBUG(4, "nfsrpc_createlayout open succeeded\n");
+		NFSM_DISSECT(tl, u_int32_t *, NFSX_STATEID +
+		    6 * NFSX_UNSIGNED);
+		stateid.seqid = *tl++;
+		stateid.other[0] = *tl++;
+		stateid.other[1] = *tl++;
+		stateid.other[2] = *tl;
+		nfsrv_getattrbits(nd, &attrbits, NULL, NULL);
+		NFSM_DISSECT(tl, u_int32_t *, NFSX_UNSIGNED);
+		deleg = fxdr_unsigned(int, *tl);
+		if (deleg == NFSV4OPEN_DELEGATEREAD ||
+		    deleg == NFSV4OPEN_DELEGATEWRITE) {
+			if (!(owp->nfsow_clp->nfsc_flags &
+			      NFSCLFLAGS_FIRSTDELEG))
+				owp->nfsow_clp->nfsc_flags |=
+				  (NFSCLFLAGS_FIRSTDELEG | NFSCLFLAGS_GOTDELEG);
+			dp = malloc(sizeof(struct nfscldeleg) + NFSX_V4FHMAX,
+			    M_NFSCLDELEG, M_WAITOK);
+			LIST_INIT(&dp->nfsdl_owner);
+			LIST_INIT(&dp->nfsdl_lock);
+			dp->nfsdl_clp = owp->nfsow_clp;
+			newnfs_copyincred(cred, &dp->nfsdl_cred);
+			nfscl_lockinit(&dp->nfsdl_rwlock);
+			NFSM_DISSECT(tl, u_int32_t *, NFSX_STATEID +
+			    NFSX_UNSIGNED);
+			dp->nfsdl_stateid.seqid = *tl++;
+			dp->nfsdl_stateid.other[0] = *tl++;
+			dp->nfsdl_stateid.other[1] = *tl++;
+			dp->nfsdl_stateid.other[2] = *tl++;
+			ret = fxdr_unsigned(int, *tl);
+			if (deleg == NFSV4OPEN_DELEGATEWRITE) {
+				dp->nfsdl_flags = NFSCLDL_WRITE;
+				/*
+				 * Indicates how much the file can grow.
+				 */
+				NFSM_DISSECT(tl, u_int32_t *,
+				    3 * NFSX_UNSIGNED);
+				limitby = fxdr_unsigned(int, *tl++);
+				switch (limitby) {
+				case NFSV4OPEN_LIMITSIZE:
+					dp->nfsdl_sizelimit = fxdr_hyper(tl);
+					break;
+				case NFSV4OPEN_LIMITBLOCKS:
+					dp->nfsdl_sizelimit =
+					    fxdr_unsigned(u_int64_t, *tl++);
+					dp->nfsdl_sizelimit *=
+					    fxdr_unsigned(u_int64_t, *tl);
+					break;
+				default:
+					error = NFSERR_BADXDR;
+					goto nfsmout;
+				};
+			} else {
+				dp->nfsdl_flags = NFSCLDL_READ;
+			}
+			if (ret != 0)
+				dp->nfsdl_flags |= NFSCLDL_RECALL;
+			error = nfsrv_dissectace(nd, &dp->nfsdl_ace, &ret,
+			    &acesize, p);
+			if (error != 0)
+				goto nfsmout;
+		} else if (deleg != NFSV4OPEN_DELEGATENONE) {
+			error = NFSERR_BADXDR;
+			goto nfsmout;
+		}
+
+		/* Now, we should have the status for the SaveFH. */
+		NFSM_DISSECT(tl, uint32_t *, 2 * NFSX_UNSIGNED);
+		if (*++tl == 0) {
+			NFSCL_DEBUG(4, "nfsrpc_createlayout SaveFH ok\n");
+			/*
+			 * Now, process the GetFH and Getattr for the newly
+			 * created file. nfscl_mtofh() will set
+			 * ND_NOMOREDATA if these weren't successful.
+			 */
+			error = nfscl_mtofh(nd, nfhpp, nnap, attrflagp);
+			NFSCL_DEBUG(4, "aft nfscl_mtofh err=%d\n", error);
+			if (error != 0)
+				goto nfsmout;
+		} else
+			nd->nd_flag |= ND_NOMOREDATA;
+		/* Now we have the PutFH and Getattr for the directory. */
+		if ((nd->nd_flag & ND_NOMOREDATA) == 0) {
+			NFSM_DISSECT(tl, uint32_t *, 2 * NFSX_UNSIGNED);
+			if (*++tl != 0)
+				nd->nd_flag |= ND_NOMOREDATA;
+			else {
+				NFSM_DISSECT(tl, uint32_t *, 2 *
+				    NFSX_UNSIGNED);
+				if (*++tl != 0)
+					nd->nd_flag |= ND_NOMOREDATA;
+			}
+		}
+		if ((nd->nd_flag & ND_NOMOREDATA) == 0) {
+			/* Load the directory attributes. */
+			error = nfsm_loadattr(nd, dnap);
+			NFSCL_DEBUG(4, "aft nfsm_loadattr err=%d\n", error);
+			if (error != 0)
+				goto nfsmout;
+			*dattrflagp = 1;
+			if (dp != NULL && *attrflagp != 0) {
+				dp->nfsdl_change = nnap->na_filerev;
+				dp->nfsdl_modtime = nnap->na_mtime;
+				dp->nfsdl_flags |= NFSCLDL_MODTIMESET;
+			}
+			/*
+			 * We can now complete the Open state.
+			 */
+			nfhp = *nfhpp;
+			if (dp != NULL) {
+				dp->nfsdl_fhlen = nfhp->nfh_len;
+				NFSBCOPY(nfhp->nfh_fh, dp->nfsdl_fh,
+				    nfhp->nfh_len);
+			}
+			/*
+			 * Get an Open structure that will be
+			 * attached to the OpenOwner, acquired already.
+			 */
+			error = nfscl_open(dvp, nfhp->nfh_fh, nfhp->nfh_len, 
+			    (NFSV4OPEN_ACCESSWRITE | NFSV4OPEN_ACCESSREAD), 0,
+			    cred, p, NULL, &op, &newone, NULL, 0);
+			if (error != 0)
+				goto nfsmout;
+			op->nfso_stateid = stateid;
+			newnfs_copyincred(cred, &op->nfso_cred);
+	
+			nfscl_openrelease(nmp, op, error, newone);
+			*unlockedp = 1;
+
+			/* Now, handle the RestoreFH and LayoutGet. */
+			if (nd->nd_repstat == 0) {
+				NFSM_DISSECT(tl, uint32_t *, 4 * NFSX_UNSIGNED);
+				*laystatp = fxdr_unsigned(int, *(tl + 3));
+				if (*laystatp == 0) {
+					error = nfsrv_parselayoutget(nd,
+					    stateidp, retonclosep, flhp);
+					if (error != 0)
+						*laystatp = error;
+				}
+				NFSCL_DEBUG(4, "aft nfsrv_parselayout err=%d\n",
+				    error);
+			} else
+				nd->nd_repstat = 0;
+		}
+	}
+	if (nd->nd_repstat != 0 && error == 0)
+		error = nd->nd_repstat;
+	if (error == NFSERR_STALECLIENTID || error == NFSERR_BADSESSION)
+		nfscl_initiate_recovery(owp->nfsow_clp);
+nfsmout:
+	NFSCL_DEBUG(4, "eo nfsrpc_createlayout err=%d\n", error);
+	if (error == 0)
+		*dpp = dp;
+	else
+		free(dp, M_NFSCLDELEG);
+	mbuf_freem(nd->nd_mrep);
+	return (error);
+}
+
+/*
+ * Similar to nfsrpc_getopenlayout(), except that it used for the Create case.
+ */
+static int
+nfsrpc_getcreatelayout(vnode_t dvp, char *name, int namelen, struct vattr *vap,
+    nfsquad_t cverf, int fmode, struct nfsclowner *owp, struct nfscldeleg **dpp,
+    struct ucred *cred, NFSPROC_T *p, struct nfsvattr *dnap,
+    struct nfsvattr *nnap, struct nfsfh **nfhpp, int *attrflagp,
+    int *dattrflagp, void *dstuff, int *unlockedp)
+{
+	struct nfscllayout *lyp;
+	struct nfsclflayouthead flh;
+	struct nfsfh *nfhp;
+	struct nfsclsession *tsep;
+	struct nfsmount *nmp;
+	nfsv4stateid_t stateid;
+	int error, layoutlen, layouttype, retonclose, laystat;
+
+	error = 0;
+	nmp = VFSTONFS(dvp->v_mount);
+	if (NFSHASFLEXFILE(nmp))
+		layouttype = NFSLAYOUT_FLEXFILE;
+	else
+		layouttype = NFSLAYOUT_NFSV4_1_FILES;
+	LIST_INIT(&flh);
+	tsep = nfsmnt_mdssession(nmp);
+	layoutlen = tsep->nfsess_maxcache - (NFSX_STATEID + 3 * NFSX_UNSIGNED);
+	error = nfsrpc_createlayout(dvp, name, namelen, vap, cverf, fmode,
+	    owp, dpp, cred, p, dnap, nnap, nfhpp, attrflagp, dattrflagp,
+	    dstuff, unlockedp, &stateid, 1, layouttype, layoutlen, &retonclose,
+	    &flh, &laystat);
+	NFSCL_DEBUG(4, "aft nfsrpc_createlayoutrpc laystat=%d err=%d\n",
+	    laystat, error);
+	lyp = NULL;
+	if (laystat == 0) {
+		nfhp = *nfhpp;
+		laystat = nfsrpc_layoutgetres(nmp, dvp, nfhp->nfh_fh,
+		    nfhp->nfh_len, &stateid, retonclose, NULL, &lyp, &flh,
+		    layouttype, laystat, NULL, cred, p);
+	} else
+		laystat = nfsrpc_layoutgetres(nmp, dvp, NULL, 0, &stateid,
+		    retonclose, NULL, &lyp, &flh, layouttype, laystat, NULL,
+		    cred, p);
+	if (laystat == 0)
+		nfscl_rellayout(lyp, 0);
+	return (error);
+}
+
+/*
+ * Process the results of a layoutget() operation.
+ */
+static int
+nfsrpc_layoutgetres(struct nfsmount *nmp, vnode_t vp, uint8_t *newfhp,
+    int newfhlen, nfsv4stateid_t *stateidp, int retonclose, uint32_t *notifybit,
+    struct nfscllayout **lypp, struct nfsclflayouthead *flhp, int layouttype,
+    int laystat, int *islockedp, struct ucred *cred, NFSPROC_T *p)
+{
+	struct nfsclflayout *tflp;
+	struct nfscldevinfo *dip;
+	uint8_t *dev;
+	int i, mirrorcnt;
+
+	if (laystat == NFSERR_UNKNLAYOUTTYPE) {
+		NFSLOCKMNT(nmp);
+		if (!NFSHASFLEXFILE(nmp)) {
+			/* Switch to using Flex File Layout. */
+			nmp->nm_state |= NFSSTA_FLEXFILE;
+		} else if (layouttype == NFSLAYOUT_FLEXFILE) {
+			/* Disable pNFS. */
+			NFSCL_DEBUG(1, "disable PNFS\n");
+			nmp->nm_state &= ~(NFSSTA_PNFS | NFSSTA_FLEXFILE);
+		}
+		NFSUNLOCKMNT(nmp);
+	}
+	if (laystat == 0) {
+		NFSCL_DEBUG(4, "nfsrpc_layoutgetres at FOREACH\n");
+		LIST_FOREACH(tflp, flhp, nfsfl_list) {
+			if (layouttype == NFSLAYOUT_FLEXFILE)
+				mirrorcnt = tflp->nfsfl_mirrorcnt;
+			else
+				mirrorcnt = 1;
+			for (i = 0; i < mirrorcnt; i++) {
+				laystat = nfscl_adddevinfo(nmp, NULL, i, tflp);
+				NFSCL_DEBUG(4, "aft adddev=%d\n", laystat);
+				if (laystat != 0) {
+					if (layouttype == NFSLAYOUT_FLEXFILE)
+						dev = tflp->nfsfl_ffm[i].dev;
+					else
+						dev = tflp->nfsfl_dev;
+					laystat = nfsrpc_getdeviceinfo(nmp, dev,
+					    layouttype, notifybit, &dip, cred,
+					    p);
+					NFSCL_DEBUG(4, "aft nfsrpc_gdi=%d\n",
+					    laystat);
+					if (laystat != 0)
+						goto out;
+					laystat = nfscl_adddevinfo(nmp, dip, i,
+					    tflp);
+					if (laystat != 0)
+						printf("nfsrpc_layoutgetresout"
+						    ": cannot add\n");
+				}
+			}
+		}
+	}
+out:
+	if (laystat == 0) {
+		/*
+		 * nfscl_layout() always returns with the nfsly_lock
+		 * set to a refcnt (shared lock).
+		 * Passing in dvp is sufficient, since it is only used to
+		 * get the fsid for the file system.
+		 */
+		laystat = nfscl_layout(nmp, vp, newfhp, newfhlen, stateidp,
+		    layouttype, retonclose, flhp, lypp, cred, p);
+		NFSCL_DEBUG(4, "nfsrpc_layoutgetres: aft nfscl_layout=%d\n",
+		    laystat);
+		if (laystat == 0 && islockedp != NULL)
+			*islockedp = 1;
+	}
+	return (laystat);
+}
+
diff --git a/freebsd/sys/fs/nfsclient/nfs_clstate.c b/freebsd/sys/fs/nfsclient/nfs_clstate.c
new file mode 100644
index 0000000..0a2e4bc
--- /dev/null
+++ b/freebsd/sys/fs/nfsclient/nfs_clstate.c
@@ -0,0 +1,5458 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 2009 Rick Macklem, University of Guelph
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+/*
+ * These functions implement the client side state handling for NFSv4.
+ * NFSv4 state handling:
+ * - A lockowner is used to determine lock contention, so it
+ *   corresponds directly to a Posix pid. (1 to 1 mapping)
+ * - The correct granularity of an OpenOwner is not nearly so
+ *   obvious. An OpenOwner does the following:
+ *   - provides a serial sequencing of Open/Close/Lock-with-new-lockowner
+ *   - is used to check for Open/Share contention (not applicable to
+ *     this client, since all Opens are Deny_None)
+ *   As such, I considered both extreme.
+ *   1 OpenOwner per ClientID - Simple to manage, but fully serializes
+ *   all Open, Close and Lock (with a new lockowner) Ops.
+ *   1 OpenOwner for each Open - This one results in an OpenConfirm for
+ *   every Open, for most servers.
+ *   So, I chose to use the same mapping as I did for LockOwnwers.
+ *   The main concern here is that you can end up with multiple Opens
+ *   for the same File Handle, but on different OpenOwners (opens
+ *   inherited from parents, grandparents...) and you do not know
+ *   which of these the vnodeop close applies to. This is handled by
+ *   delaying the Close Op(s) until all of the Opens have been closed.
+ *   (It is not yet obvious if this is the correct granularity.)
+ * - How the code handles serialization:
+ *   - For the ClientId, it uses an exclusive lock while getting its
+ *     SetClientId and during recovery. Otherwise, it uses a shared
+ *     lock via a reference count.
+ *   - For the rest of the data structures, it uses an SMP mutex
+ *     (once the nfs client is SMP safe) and doesn't sleep while
+ *     manipulating the linked lists.
+ *   - The serialization of Open/Close/Lock/LockU falls out in the
+ *     "wash", since OpenOwners and LockOwners are both mapped from
+ *     Posix pid. In other words, there is only one Posix pid using
+ *     any given owner, so that owner is serialized. (If you change
+ *     the granularity of the OpenOwner, then code must be added to
+ *     serialize Ops on the OpenOwner.)
+ * - When to get rid of OpenOwners and LockOwners.
+ *   - The function nfscl_cleanup_common() is executed after a process exits.
+ *     It goes through the client list looking for all Open and Lock Owners.
+ *     When one is found, it is marked "defunct" or in the case of
+ *     an OpenOwner without any Opens, freed.
+ *     The renew thread scans for defunct Owners and gets rid of them,
+ *     if it can. The LockOwners will also be deleted when the
+ *     associated Open is closed.
+ *   - If the LockU or Close Op(s) fail during close in a way
+ *     that could be recovered upon retry, they are relinked to the
+ *     ClientId's defunct open list and retried by the renew thread
+ *     until they succeed or an unmount/recovery occurs.
+ *     (Since we are done with them, they do not need to be recovered.)
+ */
+
+#ifndef APPLEKEXT
+#include <fs/nfs/nfsport.h>
+
+/*
+ * Global variables
+ */
+extern struct nfsstatsv1 nfsstatsv1;
+extern struct nfsreqhead nfsd_reqq;
+extern u_int32_t newnfs_false, newnfs_true;
+extern int nfscl_debuglevel;
+extern int nfscl_enablecallb;
+extern int nfs_numnfscbd;
+NFSREQSPINLOCK;
+NFSCLSTATEMUTEX;
+int nfscl_inited = 0;
+struct nfsclhead nfsclhead;	/* Head of clientid list */
+int nfscl_deleghighwater = NFSCLDELEGHIGHWATER;
+int nfscl_layouthighwater = NFSCLLAYOUTHIGHWATER;
+#endif	/* !APPLEKEXT */
+
+static int nfscl_delegcnt = 0;
+static int nfscl_layoutcnt = 0;
+static int nfscl_getopen(struct nfsclownerhead *, u_int8_t *, int, u_int8_t *,
+    u_int8_t *, u_int32_t, struct nfscllockowner **, struct nfsclopen **);
+static void nfscl_clrelease(struct nfsclclient *);
+static void nfscl_cleanclient(struct nfsclclient *);
+static void nfscl_expireclient(struct nfsclclient *, struct nfsmount *,
+    struct ucred *, NFSPROC_T *);
+static int nfscl_expireopen(struct nfsclclient *, struct nfsclopen *,
+    struct nfsmount *, struct ucred *, NFSPROC_T *);
+static void nfscl_recover(struct nfsclclient *, struct ucred *, NFSPROC_T *);
+static void nfscl_insertlock(struct nfscllockowner *, struct nfscllock *,
+    struct nfscllock *, int);
+static int nfscl_updatelock(struct nfscllockowner *, struct nfscllock **,
+    struct nfscllock **, int);
+static void nfscl_delegreturnall(struct nfsclclient *, NFSPROC_T *);
+static u_int32_t nfscl_nextcbident(void);
+static mount_t nfscl_getmnt(int, uint8_t *, u_int32_t, struct nfsclclient **);
+static struct nfsclclient *nfscl_getclnt(u_int32_t);
+static struct nfsclclient *nfscl_getclntsess(uint8_t *);
+static struct nfscldeleg *nfscl_finddeleg(struct nfsclclient *, u_int8_t *,
+    int);
+static void nfscl_retoncloselayout(vnode_t, struct nfsclclient *, uint8_t *,
+    int, struct nfsclrecalllayout **);
+static void nfscl_reldevinfo_locked(struct nfscldevinfo *);
+static struct nfscllayout *nfscl_findlayout(struct nfsclclient *, u_int8_t *,
+    int);
+static struct nfscldevinfo *nfscl_finddevinfo(struct nfsclclient *, uint8_t *);
+static int nfscl_checkconflict(struct nfscllockownerhead *, struct nfscllock *,
+    u_int8_t *, struct nfscllock **);
+static void nfscl_freealllocks(struct nfscllockownerhead *, int);
+static int nfscl_localconflict(struct nfsclclient *, u_int8_t *, int,
+    struct nfscllock *, u_int8_t *, struct nfscldeleg *, struct nfscllock **);
+static void nfscl_newopen(struct nfsclclient *, struct nfscldeleg *,
+    struct nfsclowner **, struct nfsclowner **, struct nfsclopen **,
+    struct nfsclopen **, u_int8_t *, u_int8_t *, int, struct ucred *, int *);
+static int nfscl_moveopen(vnode_t , struct nfsclclient *,
+    struct nfsmount *, struct nfsclopen *, struct nfsclowner *,
+    struct nfscldeleg *, struct ucred *, NFSPROC_T *);
+static void nfscl_totalrecall(struct nfsclclient *);
+static int nfscl_relock(vnode_t , struct nfsclclient *, struct nfsmount *,
+    struct nfscllockowner *, struct nfscllock *, struct ucred *, NFSPROC_T *);
+static int nfscl_tryopen(struct nfsmount *, vnode_t , u_int8_t *, int,
+    u_int8_t *, int, u_int32_t, struct nfsclopen *, u_int8_t *, int,
+    struct nfscldeleg **, int, u_int32_t, struct ucred *, NFSPROC_T *);
+static int nfscl_trylock(struct nfsmount *, vnode_t , u_int8_t *,
+    int, struct nfscllockowner *, int, int, u_int64_t, u_int64_t, short,
+    struct ucred *, NFSPROC_T *);
+static int nfsrpc_reopen(struct nfsmount *, u_int8_t *, int, u_int32_t,
+    struct nfsclopen *, struct nfscldeleg **, struct ucred *, NFSPROC_T *);
+static void nfscl_freedeleg(struct nfscldeleghead *, struct nfscldeleg *);
+static int nfscl_errmap(struct nfsrv_descript *, u_int32_t);
+static void nfscl_cleanup_common(struct nfsclclient *, u_int8_t *);
+static int nfscl_recalldeleg(struct nfsclclient *, struct nfsmount *,
+    struct nfscldeleg *, vnode_t, struct ucred *, NFSPROC_T *, int);
+static void nfscl_freeopenowner(struct nfsclowner *, int);
+static void nfscl_cleandeleg(struct nfscldeleg *);
+static int nfscl_trydelegreturn(struct nfscldeleg *, struct ucred *,
+    struct nfsmount *, NFSPROC_T *);
+static void nfscl_emptylockowner(struct nfscllockowner *,
+    struct nfscllockownerfhhead *);
+static void nfscl_mergeflayouts(struct nfsclflayouthead *,
+    struct nfsclflayouthead *);
+static int nfscl_layoutrecall(int, struct nfscllayout *, uint32_t, uint64_t,
+    uint64_t, uint32_t, uint32_t, uint32_t, char *, struct nfsclrecalllayout *);
+static int nfscl_seq(uint32_t, uint32_t);
+static void nfscl_layoutreturn(struct nfsmount *, struct nfscllayout *,
+    struct ucred *, NFSPROC_T *);
+static void nfscl_dolayoutcommit(struct nfsmount *, struct nfscllayout *,
+    struct ucred *, NFSPROC_T *);
+
+static short nfscberr_null[] = {
+	0,
+	0,
+};
+
+static short nfscberr_getattr[] = {
+	NFSERR_RESOURCE,
+	NFSERR_BADHANDLE,
+	NFSERR_BADXDR,
+	NFSERR_RESOURCE,
+	NFSERR_SERVERFAULT,
+	0,
+};
+
+static short nfscberr_recall[] = {
+	NFSERR_RESOURCE,
+	NFSERR_BADHANDLE,
+	NFSERR_BADSTATEID,
+	NFSERR_BADXDR,
+	NFSERR_RESOURCE,
+	NFSERR_SERVERFAULT,
+	0,
+};
+
+static short *nfscl_cberrmap[] = {
+	nfscberr_null,
+	nfscberr_null,
+	nfscberr_null,
+	nfscberr_getattr,
+	nfscberr_recall
+};
+
+#define	NETFAMILY(clp) \
+		(((clp)->nfsc_flags & NFSCLFLAGS_AFINET6) ? AF_INET6 : AF_INET)
+
+/*
+ * Called for an open operation.
+ * If the nfhp argument is NULL, just get an openowner.
+ */
+APPLESTATIC int
+nfscl_open(vnode_t vp, u_int8_t *nfhp, int fhlen, u_int32_t amode, int usedeleg,
+    struct ucred *cred, NFSPROC_T *p, struct nfsclowner **owpp,
+    struct nfsclopen **opp, int *newonep, int *retp, int lockit)
+{
+	struct nfsclclient *clp;
+	struct nfsclowner *owp, *nowp;
+	struct nfsclopen *op = NULL, *nop = NULL;
+	struct nfscldeleg *dp;
+	struct nfsclownerhead *ohp;
+	u_int8_t own[NFSV4CL_LOCKNAMELEN];
+	int ret;
+
+	if (newonep != NULL)
+		*newonep = 0;
+	if (opp != NULL)
+		*opp = NULL;
+	if (owpp != NULL)
+		*owpp = NULL;
+
+	/*
+	 * Might need one or both of these, so MALLOC them now, to
+	 * avoid a tsleep() in MALLOC later.
+	 */
+	nowp = malloc(sizeof (struct nfsclowner),
+	    M_NFSCLOWNER, M_WAITOK);
+	if (nfhp != NULL)
+	    nop = malloc(sizeof (struct nfsclopen) +
+		fhlen - 1, M_NFSCLOPEN, M_WAITOK);
+	ret = nfscl_getcl(vnode_mount(vp), cred, p, 1, &clp);
+	if (ret != 0) {
+		free(nowp, M_NFSCLOWNER);
+		if (nop != NULL)
+			free(nop, M_NFSCLOPEN);
+		return (ret);
+	}
+
+	/*
+	 * Get the Open iff it already exists.
+	 * If none found, add the new one or return error, depending upon
+	 * "create".
+	 */
+	NFSLOCKCLSTATE();
+	dp = NULL;
+	/* First check the delegation list */
+	if (nfhp != NULL && usedeleg) {
+		LIST_FOREACH(dp, NFSCLDELEGHASH(clp, nfhp, fhlen), nfsdl_hash) {
+			if (dp->nfsdl_fhlen == fhlen &&
+			    !NFSBCMP(nfhp, dp->nfsdl_fh, fhlen)) {
+				if (!(amode & NFSV4OPEN_ACCESSWRITE) ||
+				    (dp->nfsdl_flags & NFSCLDL_WRITE))
+					break;
+				dp = NULL;
+				break;
+			}
+		}
+	}
+
+	if (dp != NULL) {
+		nfscl_filllockowner(p->td_proc, own, F_POSIX);
+		ohp = &dp->nfsdl_owner;
+	} else {
+		/* For NFSv4.1 and this option, use a single open_owner. */
+		if (NFSHASONEOPENOWN(VFSTONFS(vnode_mount(vp))))
+			nfscl_filllockowner(NULL, own, F_POSIX);
+		else
+			nfscl_filllockowner(p->td_proc, own, F_POSIX);
+		ohp = &clp->nfsc_owner;
+	}
+	/* Now, search for an openowner */
+	LIST_FOREACH(owp, ohp, nfsow_list) {
+		if (!NFSBCMP(owp->nfsow_owner, own, NFSV4CL_LOCKNAMELEN))
+			break;
+	}
+
+	/*
+	 * Create a new open, as required.
+	 */
+	nfscl_newopen(clp, dp, &owp, &nowp, &op, &nop, own, nfhp, fhlen,
+	    cred, newonep);
+
+	/*
+	 * Now, check the mode on the open and return the appropriate
+	 * value.
+	 */
+	if (retp != NULL) {
+		if (nfhp != NULL && dp != NULL && nop == NULL)
+			/* new local open on delegation */
+			*retp = NFSCLOPEN_SETCRED;
+		else
+			*retp = NFSCLOPEN_OK;
+	}
+	if (op != NULL && (amode & ~(op->nfso_mode))) {
+		op->nfso_mode |= amode;
+		if (retp != NULL && dp == NULL)
+			*retp = NFSCLOPEN_DOOPEN;
+	}
+
+	/*
+	 * Serialize modifications to the open owner for multiple threads
+	 * within the same process using a read/write sleep lock.
+	 * For NFSv4.1 and a single OpenOwner, allow concurrent open operations
+	 * by acquiring a shared lock.  The close operations still use an
+	 * exclusive lock for this case.
+	 */
+	if (lockit != 0) {
+		if (NFSHASONEOPENOWN(VFSTONFS(vnode_mount(vp)))) {
+			/*
+			 * Get a shared lock on the OpenOwner, but first
+			 * wait for any pending exclusive lock, so that the
+			 * exclusive locker gets priority.
+			 */
+			nfsv4_lock(&owp->nfsow_rwlock, 0, NULL,
+			    NFSCLSTATEMUTEXPTR, NULL);
+			nfsv4_getref(&owp->nfsow_rwlock, NULL,
+			    NFSCLSTATEMUTEXPTR, NULL);
+		} else
+			nfscl_lockexcl(&owp->nfsow_rwlock, NFSCLSTATEMUTEXPTR);
+	}
+	NFSUNLOCKCLSTATE();
+	if (nowp != NULL)
+		free(nowp, M_NFSCLOWNER);
+	if (nop != NULL)
+		free(nop, M_NFSCLOPEN);
+	if (owpp != NULL)
+		*owpp = owp;
+	if (opp != NULL)
+		*opp = op;
+	return (0);
+}
+
+/*
+ * Create a new open, as required.
+ */
+static void
+nfscl_newopen(struct nfsclclient *clp, struct nfscldeleg *dp,
+    struct nfsclowner **owpp, struct nfsclowner **nowpp, struct nfsclopen **opp,
+    struct nfsclopen **nopp, u_int8_t *own, u_int8_t *fhp, int fhlen,
+    struct ucred *cred, int *newonep)
+{
+	struct nfsclowner *owp = *owpp, *nowp;
+	struct nfsclopen *op, *nop;
+
+	if (nowpp != NULL)
+		nowp = *nowpp;
+	else
+		nowp = NULL;
+	if (nopp != NULL)
+		nop = *nopp;
+	else
+		nop = NULL;
+	if (owp == NULL && nowp != NULL) {
+		NFSBCOPY(own, nowp->nfsow_owner, NFSV4CL_LOCKNAMELEN);
+		LIST_INIT(&nowp->nfsow_open);
+		nowp->nfsow_clp = clp;
+		nowp->nfsow_seqid = 0;
+		nowp->nfsow_defunct = 0;
+		nfscl_lockinit(&nowp->nfsow_rwlock);
+		if (dp != NULL) {
+			nfsstatsv1.cllocalopenowners++;
+			LIST_INSERT_HEAD(&dp->nfsdl_owner, nowp, nfsow_list);
+		} else {
+			nfsstatsv1.clopenowners++;
+			LIST_INSERT_HEAD(&clp->nfsc_owner, nowp, nfsow_list);
+		}
+		owp = *owpp = nowp;
+		*nowpp = NULL;
+		if (newonep != NULL)
+			*newonep = 1;
+	}
+
+	 /* If an fhp has been specified, create an Open as well. */
+	if (fhp != NULL) {
+		/* and look for the correct open, based upon FH */
+		LIST_FOREACH(op, &owp->nfsow_open, nfso_list) {
+			if (op->nfso_fhlen == fhlen &&
+			    !NFSBCMP(op->nfso_fh, fhp, fhlen))
+				break;
+		}
+		if (op == NULL && nop != NULL) {
+			nop->nfso_own = owp;
+			nop->nfso_mode = 0;
+			nop->nfso_opencnt = 0;
+			nop->nfso_posixlock = 1;
+			nop->nfso_fhlen = fhlen;
+			NFSBCOPY(fhp, nop->nfso_fh, fhlen);
+			LIST_INIT(&nop->nfso_lock);
+			nop->nfso_stateid.seqid = 0;
+			nop->nfso_stateid.other[0] = 0;
+			nop->nfso_stateid.other[1] = 0;
+			nop->nfso_stateid.other[2] = 0;
+			KASSERT(cred != NULL, ("%s: cred NULL\n", __func__));
+			newnfs_copyincred(cred, &nop->nfso_cred);
+			if (dp != NULL) {
+				TAILQ_REMOVE(&clp->nfsc_deleg, dp, nfsdl_list);
+				TAILQ_INSERT_HEAD(&clp->nfsc_deleg, dp,
+				    nfsdl_list);
+				dp->nfsdl_timestamp = NFSD_MONOSEC + 120;
+				nfsstatsv1.cllocalopens++;
+			} else {
+				nfsstatsv1.clopens++;
+			}
+			LIST_INSERT_HEAD(&owp->nfsow_open, nop, nfso_list);
+			*opp = nop;
+			*nopp = NULL;
+			if (newonep != NULL)
+				*newonep = 1;
+		} else {
+			*opp = op;
+		}
+	}
+}
+
+/*
+ * Called to find/add a delegation to a client.
+ */
+APPLESTATIC int
+nfscl_deleg(mount_t mp, struct nfsclclient *clp, u_int8_t *nfhp,
+    int fhlen, struct ucred *cred, NFSPROC_T *p, struct nfscldeleg **dpp)
+{
+	struct nfscldeleg *dp = *dpp, *tdp;
+
+	/*
+	 * First, if we have received a Read delegation for a file on a
+	 * read/write file system, just return it, because they aren't
+	 * useful, imho.
+	 */
+	if (mp != NULL && dp != NULL && !NFSMNT_RDONLY(mp) &&
+	    (dp->nfsdl_flags & NFSCLDL_READ)) {
+		(void) nfscl_trydelegreturn(dp, cred, VFSTONFS(mp), p);
+		free(dp, M_NFSCLDELEG);
+		*dpp = NULL;
+		return (0);
+	}
+
+	/* Look for the correct deleg, based upon FH */
+	NFSLOCKCLSTATE();
+	tdp = nfscl_finddeleg(clp, nfhp, fhlen);
+	if (tdp == NULL) {
+		if (dp == NULL) {
+			NFSUNLOCKCLSTATE();
+			return (NFSERR_BADSTATEID);
+		}
+		*dpp = NULL;
+		TAILQ_INSERT_HEAD(&clp->nfsc_deleg, dp, nfsdl_list);
+		LIST_INSERT_HEAD(NFSCLDELEGHASH(clp, nfhp, fhlen), dp,
+		    nfsdl_hash);
+		dp->nfsdl_timestamp = NFSD_MONOSEC + 120;
+		nfsstatsv1.cldelegates++;
+		nfscl_delegcnt++;
+	} else {
+		/*
+		 * Delegation already exists, what do we do if a new one??
+		 */
+		if (dp != NULL) {
+			printf("Deleg already exists!\n");
+			free(dp, M_NFSCLDELEG);
+			*dpp = NULL;
+		} else {
+			*dpp = tdp;
+		}
+	}
+	NFSUNLOCKCLSTATE();
+	return (0);
+}
+
+/*
+ * Find a delegation for this file handle. Return NULL upon failure.
+ */
+static struct nfscldeleg *
+nfscl_finddeleg(struct nfsclclient *clp, u_int8_t *fhp, int fhlen)
+{
+	struct nfscldeleg *dp;
+
+	LIST_FOREACH(dp, NFSCLDELEGHASH(clp, fhp, fhlen), nfsdl_hash) {
+	    if (dp->nfsdl_fhlen == fhlen &&
+		!NFSBCMP(dp->nfsdl_fh, fhp, fhlen))
+		break;
+	}
+	return (dp);
+}
+
+/*
+ * Get a stateid for an I/O operation. First, look for an open and iff
+ * found, return either a lockowner stateid or the open stateid.
+ * If no Open is found, just return error and the special stateid of all zeros.
+ */
+APPLESTATIC int
+nfscl_getstateid(vnode_t vp, u_int8_t *nfhp, int fhlen, u_int32_t mode,
+    int fords, struct ucred *cred, NFSPROC_T *p, nfsv4stateid_t *stateidp,
+    void **lckpp)
+{
+	struct nfsclclient *clp;
+	struct nfsclowner *owp;
+	struct nfsclopen *op = NULL, *top;
+	struct nfscllockowner *lp;
+	struct nfscldeleg *dp;
+	struct nfsnode *np;
+	struct nfsmount *nmp;
+	u_int8_t own[NFSV4CL_LOCKNAMELEN];
+	int error, done;
+
+	*lckpp = NULL;
+	/*
+	 * Initially, just set the special stateid of all zeros.
+	 * (Don't do this for a DS, since the special stateid can't be used.)
+	 */
+	if (fords == 0) {
+		stateidp->seqid = 0;
+		stateidp->other[0] = 0;
+		stateidp->other[1] = 0;
+		stateidp->other[2] = 0;
+	}
+	if (vnode_vtype(vp) != VREG)
+		return (EISDIR);
+	np = VTONFS(vp);
+	nmp = VFSTONFS(vnode_mount(vp));
+	NFSLOCKCLSTATE();
+	clp = nfscl_findcl(nmp);
+	if (clp == NULL) {
+		NFSUNLOCKCLSTATE();
+		return (EACCES);
+	}
+
+	/*
+	 * Wait for recovery to complete.
+	 */
+	while ((clp->nfsc_flags & NFSCLFLAGS_RECVRINPROG))
+		(void) nfsmsleep(&clp->nfsc_flags, NFSCLSTATEMUTEXPTR,
+		    PZERO, "nfsrecvr", NULL);
+
+	/*
+	 * First, look for a delegation.
+	 */
+	LIST_FOREACH(dp, NFSCLDELEGHASH(clp, nfhp, fhlen), nfsdl_hash) {
+		if (dp->nfsdl_fhlen == fhlen &&
+		    !NFSBCMP(nfhp, dp->nfsdl_fh, fhlen)) {
+			if (!(mode & NFSV4OPEN_ACCESSWRITE) ||
+			    (dp->nfsdl_flags & NFSCLDL_WRITE)) {
+				stateidp->seqid = dp->nfsdl_stateid.seqid;
+				stateidp->other[0] = dp->nfsdl_stateid.other[0];
+				stateidp->other[1] = dp->nfsdl_stateid.other[1];
+				stateidp->other[2] = dp->nfsdl_stateid.other[2];
+				if (!(np->n_flag & NDELEGRECALL)) {
+					TAILQ_REMOVE(&clp->nfsc_deleg, dp,
+					    nfsdl_list);
+					TAILQ_INSERT_HEAD(&clp->nfsc_deleg, dp,
+					    nfsdl_list);
+					dp->nfsdl_timestamp = NFSD_MONOSEC +
+					    120;
+					dp->nfsdl_rwlock.nfslock_usecnt++;
+					*lckpp = (void *)&dp->nfsdl_rwlock;
+				}
+				NFSUNLOCKCLSTATE();
+				return (0);
+			}
+			break;
+		}
+	}
+
+	if (p != NULL) {
+		/*
+		 * If p != NULL, we want to search the parentage tree
+		 * for a matching OpenOwner and use that.
+		 */
+		if (NFSHASONEOPENOWN(VFSTONFS(vnode_mount(vp))))
+			nfscl_filllockowner(NULL, own, F_POSIX);
+		else
+			nfscl_filllockowner(p->td_proc, own, F_POSIX);
+		lp = NULL;
+		error = nfscl_getopen(&clp->nfsc_owner, nfhp, fhlen, own, own,
+		    mode, &lp, &op);
+		if (error == 0 && lp != NULL && fords == 0) {
+			/* Don't return a lock stateid for a DS. */
+			stateidp->seqid =
+			    lp->nfsl_stateid.seqid;
+			stateidp->other[0] =
+			    lp->nfsl_stateid.other[0];
+			stateidp->other[1] =
+			    lp->nfsl_stateid.other[1];
+			stateidp->other[2] =
+			    lp->nfsl_stateid.other[2];
+			NFSUNLOCKCLSTATE();
+			return (0);
+		}
+	}
+	if (op == NULL) {
+		/* If not found, just look for any OpenOwner that will work. */
+		top = NULL;
+		done = 0;
+		owp = LIST_FIRST(&clp->nfsc_owner);
+		while (!done && owp != NULL) {
+			LIST_FOREACH(op, &owp->nfsow_open, nfso_list) {
+				if (op->nfso_fhlen == fhlen &&
+				    !NFSBCMP(op->nfso_fh, nfhp, fhlen)) {
+					if (top == NULL && (op->nfso_mode &
+					    NFSV4OPEN_ACCESSWRITE) != 0 &&
+					    (mode & NFSV4OPEN_ACCESSREAD) != 0)
+						top = op;
+					if ((mode & op->nfso_mode) == mode) {
+						done = 1;
+						break;
+					}
+				}
+			}
+			if (!done)
+				owp = LIST_NEXT(owp, nfsow_list);
+		}
+		if (!done) {
+			NFSCL_DEBUG(2, "openmode top=%p\n", top);
+			if (top == NULL || NFSHASOPENMODE(nmp)) {
+				NFSUNLOCKCLSTATE();
+				return (ENOENT);
+			} else
+				op = top;
+		}
+		/*
+		 * For read aheads or write behinds, use the open cred.
+		 * A read ahead or write behind is indicated by p == NULL.
+		 */
+		if (p == NULL)
+			newnfs_copycred(&op->nfso_cred, cred);
+	}
+
+	/*
+	 * No lock stateid, so return the open stateid.
+	 */
+	stateidp->seqid = op->nfso_stateid.seqid;
+	stateidp->other[0] = op->nfso_stateid.other[0];
+	stateidp->other[1] = op->nfso_stateid.other[1];
+	stateidp->other[2] = op->nfso_stateid.other[2];
+	NFSUNLOCKCLSTATE();
+	return (0);
+}
+
+/*
+ * Search for a matching file, mode and, optionally, lockowner.
+ */
+static int
+nfscl_getopen(struct nfsclownerhead *ohp, u_int8_t *nfhp, int fhlen,
+    u_int8_t *openown, u_int8_t *lockown, u_int32_t mode,
+    struct nfscllockowner **lpp, struct nfsclopen **opp)
+{
+	struct nfsclowner *owp;
+	struct nfsclopen *op, *rop, *rop2;
+	struct nfscllockowner *lp;
+	int keep_looping;
+
+	if (lpp != NULL)
+		*lpp = NULL;
+	/*
+	 * rop will be set to the open to be returned. There are three
+	 * variants of this, all for an open of the correct file:
+	 * 1 - A match of lockown.
+	 * 2 - A match of the openown, when no lockown match exists.
+	 * 3 - A match for any open, if no openown or lockown match exists.
+	 * Looking for #2 over #3 probably isn't necessary, but since
+	 * RFC3530 is vague w.r.t. the relationship between openowners and
+	 * lockowners, I think this is the safer way to go.
+	 */
+	rop = NULL;
+	rop2 = NULL;
+	keep_looping = 1;
+	/* Search the client list */
+	owp = LIST_FIRST(ohp);
+	while (owp != NULL && keep_looping != 0) {
+		/* and look for the correct open */
+		op = LIST_FIRST(&owp->nfsow_open);
+		while (op != NULL && keep_looping != 0) {
+			if (op->nfso_fhlen == fhlen &&
+			    !NFSBCMP(op->nfso_fh, nfhp, fhlen)
+			    && (op->nfso_mode & mode) == mode) {
+				if (lpp != NULL) {
+					/* Now look for a matching lockowner. */
+					LIST_FOREACH(lp, &op->nfso_lock,
+					    nfsl_list) {
+						if (!NFSBCMP(lp->nfsl_owner,
+						    lockown,
+						    NFSV4CL_LOCKNAMELEN)) {
+							*lpp = lp;
+							rop = op;
+							keep_looping = 0;
+							break;
+						}
+					}
+				}
+				if (rop == NULL && !NFSBCMP(owp->nfsow_owner,
+				    openown, NFSV4CL_LOCKNAMELEN)) {
+					rop = op;
+					if (lpp == NULL)
+						keep_looping = 0;
+				}
+				if (rop2 == NULL)
+					rop2 = op;
+			}
+			op = LIST_NEXT(op, nfso_list);
+		}
+		owp = LIST_NEXT(owp, nfsow_list);
+	}
+	if (rop == NULL)
+		rop = rop2;
+	if (rop == NULL)
+		return (EBADF);
+	*opp = rop;
+	return (0);
+}
+
+/*
+ * Release use of an open owner. Called when open operations are done
+ * with the open owner.
+ */
+APPLESTATIC void
+nfscl_ownerrelease(struct nfsmount *nmp, struct nfsclowner *owp,
+    __unused int error, __unused int candelete, int unlocked)
+{
+
+	if (owp == NULL)
+		return;
+	NFSLOCKCLSTATE();
+	if (unlocked == 0) {
+		if (NFSHASONEOPENOWN(nmp))
+			nfsv4_relref(&owp->nfsow_rwlock);
+		else
+			nfscl_lockunlock(&owp->nfsow_rwlock);
+	}
+	nfscl_clrelease(owp->nfsow_clp);
+	NFSUNLOCKCLSTATE();
+}
+
+/*
+ * Release use of an open structure under an open owner.
+ */
+APPLESTATIC void
+nfscl_openrelease(struct nfsmount *nmp, struct nfsclopen *op, int error,
+    int candelete)
+{
+	struct nfsclclient *clp;
+	struct nfsclowner *owp;
+
+	if (op == NULL)
+		return;
+	NFSLOCKCLSTATE();
+	owp = op->nfso_own;
+	if (NFSHASONEOPENOWN(nmp))
+		nfsv4_relref(&owp->nfsow_rwlock);
+	else
+		nfscl_lockunlock(&owp->nfsow_rwlock);
+	clp = owp->nfsow_clp;
+	if (error && candelete && op->nfso_opencnt == 0)
+		nfscl_freeopen(op, 0);
+	nfscl_clrelease(clp);
+	NFSUNLOCKCLSTATE();
+}
+
+/*
+ * Called to get a clientid structure. It will optionally lock the
+ * client data structures to do the SetClientId/SetClientId_confirm,
+ * but will release that lock and return the clientid with a reference
+ * count on it.
+ * If the "cred" argument is NULL, a new clientid should not be created.
+ * If the "p" argument is NULL, a SetClientID/SetClientIDConfirm cannot
+ * be done.
+ * The start_renewthread argument tells nfscl_getcl() to start a renew
+ * thread if this creates a new clp.
+ * It always clpp with a reference count on it, unless returning an error.
+ */
+APPLESTATIC int
+nfscl_getcl(struct mount *mp, struct ucred *cred, NFSPROC_T *p,
+    int start_renewthread, struct nfsclclient **clpp)
+{
+	struct nfsclclient *clp;
+	struct nfsclclient *newclp = NULL;
+	struct nfsmount *nmp;
+	char uuid[HOSTUUIDLEN];
+	int igotlock = 0, error, trystalecnt, clidinusedelay, i;
+	u_int16_t idlen = 0;
+
+	nmp = VFSTONFS(mp);
+	if (cred != NULL) {
+		getcredhostuuid(cred, uuid, sizeof uuid);
+		idlen = strlen(uuid);
+		if (idlen > 0)
+			idlen += sizeof (u_int64_t);
+		else
+			idlen += sizeof (u_int64_t) + 16; /* 16 random bytes */
+		newclp = malloc(
+		    sizeof (struct nfsclclient) + idlen - 1, M_NFSCLCLIENT,
+		    M_WAITOK | M_ZERO);
+	}
+	NFSLOCKCLSTATE();
+	/*
+	 * If a forced dismount is already in progress, don't
+	 * allocate a new clientid and get out now. For the case where
+	 * clp != NULL, this is a harmless optimization.
+	 */
+	if (NFSCL_FORCEDISM(mp)) {
+		NFSUNLOCKCLSTATE();
+		if (newclp != NULL)
+			free(newclp, M_NFSCLCLIENT);
+		return (EBADF);
+	}
+	clp = nmp->nm_clp;
+	if (clp == NULL) {
+		if (newclp == NULL) {
+			NFSUNLOCKCLSTATE();
+			return (EACCES);
+		}
+		clp = newclp;
+		clp->nfsc_idlen = idlen;
+		LIST_INIT(&clp->nfsc_owner);
+		TAILQ_INIT(&clp->nfsc_deleg);
+		TAILQ_INIT(&clp->nfsc_layout);
+		LIST_INIT(&clp->nfsc_devinfo);
+		for (i = 0; i < NFSCLDELEGHASHSIZE; i++)
+			LIST_INIT(&clp->nfsc_deleghash[i]);
+		for (i = 0; i < NFSCLLAYOUTHASHSIZE; i++)
+			LIST_INIT(&clp->nfsc_layouthash[i]);
+		clp->nfsc_flags = NFSCLFLAGS_INITED;
+		clp->nfsc_clientidrev = 1;
+		clp->nfsc_cbident = nfscl_nextcbident();
+		nfscl_fillclid(nmp->nm_clval, uuid, clp->nfsc_id,
+		    clp->nfsc_idlen);
+		LIST_INSERT_HEAD(&nfsclhead, clp, nfsc_list);
+		nmp->nm_clp = clp;
+		clp->nfsc_nmp = nmp;
+		NFSUNLOCKCLSTATE();
+		if (start_renewthread != 0)
+			nfscl_start_renewthread(clp);
+	} else {
+		NFSUNLOCKCLSTATE();
+		if (newclp != NULL)
+			free(newclp, M_NFSCLCLIENT);
+	}
+	NFSLOCKCLSTATE();
+	while ((clp->nfsc_flags & NFSCLFLAGS_HASCLIENTID) == 0 && !igotlock &&
+	    !NFSCL_FORCEDISM(mp))
+		igotlock = nfsv4_lock(&clp->nfsc_lock, 1, NULL,
+		    NFSCLSTATEMUTEXPTR, mp);
+	if (igotlock == 0) {
+		/*
+		 * Call nfsv4_lock() with "iwantlock == 0" so that it will
+		 * wait for a pending exclusive lock request.  This gives the
+		 * exclusive lock request priority over this shared lock
+		 * request.
+		 * An exclusive lock on nfsc_lock is used mainly for server
+		 * crash recoveries.
+		 */
+		nfsv4_lock(&clp->nfsc_lock, 0, NULL, NFSCLSTATEMUTEXPTR, mp);
+		nfsv4_getref(&clp->nfsc_lock, NULL, NFSCLSTATEMUTEXPTR, mp);
+	}
+	if (igotlock == 0 && NFSCL_FORCEDISM(mp)) {
+		/*
+		 * Both nfsv4_lock() and nfsv4_getref() know to check
+		 * for NFSCL_FORCEDISM() and return without sleeping to
+		 * wait for the exclusive lock to be released, since it
+		 * might be held by nfscl_umount() and we need to get out
+		 * now for that case and not wait until nfscl_umount()
+		 * releases it.
+		 */
+		NFSUNLOCKCLSTATE();
+		return (EBADF);
+	}
+	NFSUNLOCKCLSTATE();
+
+	/*
+	 * If it needs a clientid, do the setclientid now.
+	 */
+	if ((clp->nfsc_flags & NFSCLFLAGS_HASCLIENTID) == 0) {
+		if (!igotlock)
+			panic("nfscl_clget");
+		if (p == NULL || cred == NULL) {
+			NFSLOCKCLSTATE();
+			nfsv4_unlock(&clp->nfsc_lock, 0);
+			NFSUNLOCKCLSTATE();
+			return (EACCES);
+		}
+		/*
+		 * If RFC3530 Sec. 14.2.33 is taken literally,
+		 * NFSERR_CLIDINUSE will be returned persistently for the
+		 * case where a new mount of the same file system is using
+		 * a different principal. In practice, NFSERR_CLIDINUSE is
+		 * only returned when there is outstanding unexpired state
+		 * on the clientid. As such, try for twice the lease
+		 * interval, if we know what that is. Otherwise, make a
+		 * wild ass guess.
+		 * The case of returning NFSERR_STALECLIENTID is far less
+		 * likely, but might occur if there is a significant delay
+		 * between doing the SetClientID and SetClientIDConfirm Ops,
+		 * such that the server throws away the clientid before
+		 * receiving the SetClientIDConfirm.
+		 */
+		if (clp->nfsc_renew > 0)
+			clidinusedelay = NFSCL_LEASE(clp->nfsc_renew) * 2;
+		else
+			clidinusedelay = 120;
+		trystalecnt = 3;
+		do {
+			error = nfsrpc_setclient(nmp, clp, 0, cred, p);
+			if (error == NFSERR_STALECLIENTID ||
+			    error == NFSERR_STALEDONTRECOVER ||
+			    error == NFSERR_BADSESSION ||
+			    error == NFSERR_CLIDINUSE) {
+				(void) nfs_catnap(PZERO, error, "nfs_setcl");
+			}
+		} while (((error == NFSERR_STALECLIENTID ||
+		     error == NFSERR_BADSESSION ||
+		     error == NFSERR_STALEDONTRECOVER) && --trystalecnt > 0) ||
+		    (error == NFSERR_CLIDINUSE && --clidinusedelay > 0));
+		if (error) {
+			NFSLOCKCLSTATE();
+			nfsv4_unlock(&clp->nfsc_lock, 0);
+			NFSUNLOCKCLSTATE();
+			return (error);
+		}
+		clp->nfsc_flags |= NFSCLFLAGS_HASCLIENTID;
+	}
+	if (igotlock) {
+		NFSLOCKCLSTATE();
+		nfsv4_unlock(&clp->nfsc_lock, 1);
+		NFSUNLOCKCLSTATE();
+	}
+
+	*clpp = clp;
+	return (0);
+}
+
+/*
+ * Get a reference to a clientid and return it, if valid.
+ */
+APPLESTATIC struct nfsclclient *
+nfscl_findcl(struct nfsmount *nmp)
+{
+	struct nfsclclient *clp;
+
+	clp = nmp->nm_clp;
+	if (clp == NULL || !(clp->nfsc_flags & NFSCLFLAGS_HASCLIENTID))
+		return (NULL);
+	return (clp);
+}
+
+/*
+ * Release the clientid structure. It may be locked or reference counted.
+ */
+static void
+nfscl_clrelease(struct nfsclclient *clp)
+{
+
+	if (clp->nfsc_lock.nfslock_lock & NFSV4LOCK_LOCK)
+		nfsv4_unlock(&clp->nfsc_lock, 0);
+	else
+		nfsv4_relref(&clp->nfsc_lock);
+}
+
+/*
+ * External call for nfscl_clrelease.
+ */
+APPLESTATIC void
+nfscl_clientrelease(struct nfsclclient *clp)
+{
+
+	NFSLOCKCLSTATE();
+	if (clp->nfsc_lock.nfslock_lock & NFSV4LOCK_LOCK)
+		nfsv4_unlock(&clp->nfsc_lock, 0);
+	else
+		nfsv4_relref(&clp->nfsc_lock);
+	NFSUNLOCKCLSTATE();
+}
+
+/*
+ * Called when wanting to lock a byte region.
+ */
+APPLESTATIC int
+nfscl_getbytelock(vnode_t vp, u_int64_t off, u_int64_t len,
+    short type, struct ucred *cred, NFSPROC_T *p, struct nfsclclient *rclp,
+    int recovery, void *id, int flags, u_int8_t *rownp, u_int8_t *ropenownp,
+    struct nfscllockowner **lpp, int *newonep, int *donelocallyp)
+{
+	struct nfscllockowner *lp;
+	struct nfsclopen *op;
+	struct nfsclclient *clp;
+	struct nfscllockowner *nlp;
+	struct nfscllock *nlop, *otherlop;
+	struct nfscldeleg *dp = NULL, *ldp = NULL;
+	struct nfscllockownerhead *lhp = NULL;
+	struct nfsnode *np;
+	u_int8_t own[NFSV4CL_LOCKNAMELEN], *ownp, openown[NFSV4CL_LOCKNAMELEN];
+	u_int8_t *openownp;
+	int error = 0, ret, donelocally = 0;
+	u_int32_t mode;
+
+	/* For Lock Ops, the open mode doesn't matter, so use 0 to match any. */
+	mode = 0;
+	np = VTONFS(vp);
+	*lpp = NULL;
+	lp = NULL;
+	*newonep = 0;
+	*donelocallyp = 0;
+
+	/*
+	 * Might need these, so MALLOC them now, to
+	 * avoid a tsleep() in MALLOC later.
+	 */
+	nlp = malloc(
+	    sizeof (struct nfscllockowner), M_NFSCLLOCKOWNER, M_WAITOK);
+	otherlop = malloc(
+	    sizeof (struct nfscllock), M_NFSCLLOCK, M_WAITOK);
+	nlop = malloc(
+	    sizeof (struct nfscllock), M_NFSCLLOCK, M_WAITOK);
+	nlop->nfslo_type = type;
+	nlop->nfslo_first = off;
+	if (len == NFS64BITSSET) {
+		nlop->nfslo_end = NFS64BITSSET;
+	} else {
+		nlop->nfslo_end = off + len;
+		if (nlop->nfslo_end <= nlop->nfslo_first)
+			error = NFSERR_INVAL;
+	}
+
+	if (!error) {
+		if (recovery)
+			clp = rclp;
+		else
+			error = nfscl_getcl(vnode_mount(vp), cred, p, 1, &clp);
+	}
+	if (error) {
+		free(nlp, M_NFSCLLOCKOWNER);
+		free(otherlop, M_NFSCLLOCK);
+		free(nlop, M_NFSCLLOCK);
+		return (error);
+	}
+
+	op = NULL;
+	if (recovery) {
+		ownp = rownp;
+		openownp = ropenownp;
+	} else {
+		nfscl_filllockowner(id, own, flags);
+		ownp = own;
+		if (NFSHASONEOPENOWN(VFSTONFS(vnode_mount(vp))))
+			nfscl_filllockowner(NULL, openown, F_POSIX);
+		else
+			nfscl_filllockowner(p->td_proc, openown, F_POSIX);
+		openownp = openown;
+	}
+	if (!recovery) {
+		NFSLOCKCLSTATE();
+		/*
+		 * First, search for a delegation. If one exists for this file,
+		 * the lock can be done locally against it, so long as there
+		 * isn't a local lock conflict.
+		 */
+		ldp = dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh,
+		    np->n_fhp->nfh_len);
+		/* Just sanity check for correct type of delegation */
+		if (dp != NULL && ((dp->nfsdl_flags &
+		    (NFSCLDL_RECALL | NFSCLDL_DELEGRET)) != 0 ||
+		     (type == F_WRLCK &&
+		      (dp->nfsdl_flags & NFSCLDL_WRITE) == 0)))
+			dp = NULL;
+	}
+	if (dp != NULL) {
+		/* Now, find an open and maybe a lockowner. */
+		ret = nfscl_getopen(&dp->nfsdl_owner, np->n_fhp->nfh_fh,
+		    np->n_fhp->nfh_len, openownp, ownp, mode, NULL, &op);
+		if (ret)
+			ret = nfscl_getopen(&clp->nfsc_owner,
+			    np->n_fhp->nfh_fh, np->n_fhp->nfh_len, openownp,
+			    ownp, mode, NULL, &op);
+		if (!ret) {
+			lhp = &dp->nfsdl_lock;
+			TAILQ_REMOVE(&clp->nfsc_deleg, dp, nfsdl_list);
+			TAILQ_INSERT_HEAD(&clp->nfsc_deleg, dp, nfsdl_list);
+			dp->nfsdl_timestamp = NFSD_MONOSEC + 120;
+			donelocally = 1;
+		} else {
+			dp = NULL;
+		}
+	}
+	if (!donelocally) {
+		/*
+		 * Get the related Open and maybe lockowner.
+		 */
+		error = nfscl_getopen(&clp->nfsc_owner,
+		    np->n_fhp->nfh_fh, np->n_fhp->nfh_len, openownp,
+		    ownp, mode, &lp, &op);
+		if (!error)
+			lhp = &op->nfso_lock;
+	}
+	if (!error && !recovery)
+		error = nfscl_localconflict(clp, np->n_fhp->nfh_fh,
+		    np->n_fhp->nfh_len, nlop, ownp, ldp, NULL);
+	if (error) {
+		if (!recovery) {
+			nfscl_clrelease(clp);
+			NFSUNLOCKCLSTATE();
+		}
+		free(nlp, M_NFSCLLOCKOWNER);
+		free(otherlop, M_NFSCLLOCK);
+		free(nlop, M_NFSCLLOCK);
+		return (error);
+	}
+
+	/*
+	 * Ok, see if a lockowner exists and create one, as required.
+	 */
+	if (lp == NULL)
+		LIST_FOREACH(lp, lhp, nfsl_list) {
+			if (!NFSBCMP(lp->nfsl_owner, ownp, NFSV4CL_LOCKNAMELEN))
+				break;
+		}
+	if (lp == NULL) {
+		NFSBCOPY(ownp, nlp->nfsl_owner, NFSV4CL_LOCKNAMELEN);
+		if (recovery)
+			NFSBCOPY(ropenownp, nlp->nfsl_openowner,
+			    NFSV4CL_LOCKNAMELEN);
+		else
+			NFSBCOPY(op->nfso_own->nfsow_owner, nlp->nfsl_openowner,
+			    NFSV4CL_LOCKNAMELEN);
+		nlp->nfsl_seqid = 0;
+		nlp->nfsl_lockflags = flags;
+		nlp->nfsl_inprog = NULL;
+		nfscl_lockinit(&nlp->nfsl_rwlock);
+		LIST_INIT(&nlp->nfsl_lock);
+		if (donelocally) {
+			nlp->nfsl_open = NULL;
+			nfsstatsv1.cllocallockowners++;
+		} else {
+			nlp->nfsl_open = op;
+			nfsstatsv1.cllockowners++;
+		}
+		LIST_INSERT_HEAD(lhp, nlp, nfsl_list);
+		lp = nlp;
+		nlp = NULL;
+		*newonep = 1;
+	}
+
+	/*
+	 * Now, update the byte ranges for locks.
+	 */
+	ret = nfscl_updatelock(lp, &nlop, &otherlop, donelocally);
+	if (!ret)
+		donelocally = 1;
+	if (donelocally) {
+		*donelocallyp = 1;
+		if (!recovery)
+			nfscl_clrelease(clp);
+	} else {
+		/*
+		 * Serial modifications on the lock owner for multiple threads
+		 * for the same process using a read/write lock.
+		 */
+		if (!recovery)
+			nfscl_lockexcl(&lp->nfsl_rwlock, NFSCLSTATEMUTEXPTR);
+	}
+	if (!recovery)
+		NFSUNLOCKCLSTATE();
+
+	if (nlp)
+		free(nlp, M_NFSCLLOCKOWNER);
+	if (nlop)
+		free(nlop, M_NFSCLLOCK);
+	if (otherlop)
+		free(otherlop, M_NFSCLLOCK);
+
+	*lpp = lp;
+	return (0);
+}
+
+/*
+ * Called to unlock a byte range, for LockU.
+ */
+APPLESTATIC int
+nfscl_relbytelock(vnode_t vp, u_int64_t off, u_int64_t len,
+    __unused struct ucred *cred, NFSPROC_T *p, int callcnt,
+    struct nfsclclient *clp, void *id, int flags,
+    struct nfscllockowner **lpp, int *dorpcp)
+{
+	struct nfscllockowner *lp;
+	struct nfsclowner *owp;
+	struct nfsclopen *op;
+	struct nfscllock *nlop, *other_lop = NULL;
+	struct nfscldeleg *dp;
+	struct nfsnode *np;
+	u_int8_t own[NFSV4CL_LOCKNAMELEN];
+	int ret = 0, fnd;
+
+	np = VTONFS(vp);
+	*lpp = NULL;
+	*dorpcp = 0;
+
+	/*
+	 * Might need these, so MALLOC them now, to
+	 * avoid a tsleep() in MALLOC later.
+	 */
+	nlop = malloc(
+	    sizeof (struct nfscllock), M_NFSCLLOCK, M_WAITOK);
+	nlop->nfslo_type = F_UNLCK;
+	nlop->nfslo_first = off;
+	if (len == NFS64BITSSET) {
+		nlop->nfslo_end = NFS64BITSSET;
+	} else {
+		nlop->nfslo_end = off + len;
+		if (nlop->nfslo_end <= nlop->nfslo_first) {
+			free(nlop, M_NFSCLLOCK);
+			return (NFSERR_INVAL);
+		}
+	}
+	if (callcnt == 0) {
+		other_lop = malloc(
+		    sizeof (struct nfscllock), M_NFSCLLOCK, M_WAITOK);
+		*other_lop = *nlop;
+	}
+	nfscl_filllockowner(id, own, flags);
+	dp = NULL;
+	NFSLOCKCLSTATE();
+	if (callcnt == 0)
+		dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh,
+		    np->n_fhp->nfh_len);
+
+	/*
+	 * First, unlock any local regions on a delegation.
+	 */
+	if (dp != NULL) {
+		/* Look for this lockowner. */
+		LIST_FOREACH(lp, &dp->nfsdl_lock, nfsl_list) {
+			if (!NFSBCMP(lp->nfsl_owner, own,
+			    NFSV4CL_LOCKNAMELEN))
+				break;
+		}
+		if (lp != NULL)
+			/* Use other_lop, so nlop is still available */
+			(void)nfscl_updatelock(lp, &other_lop, NULL, 1);
+	}
+
+	/*
+	 * Now, find a matching open/lockowner that hasn't already been done,
+	 * as marked by nfsl_inprog.
+	 */
+	lp = NULL;
+	fnd = 0;
+	LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) {
+	    LIST_FOREACH(op, &owp->nfsow_open, nfso_list) {
+		if (op->nfso_fhlen == np->n_fhp->nfh_len &&
+		    !NFSBCMP(op->nfso_fh, np->n_fhp->nfh_fh, op->nfso_fhlen)) {
+		    LIST_FOREACH(lp, &op->nfso_lock, nfsl_list) {
+			if (lp->nfsl_inprog == NULL &&
+			    !NFSBCMP(lp->nfsl_owner, own,
+			     NFSV4CL_LOCKNAMELEN)) {
+				fnd = 1;
+				break;
+			}
+		    }
+		    if (fnd)
+			break;
+		}
+	    }
+	    if (fnd)
+		break;
+	}
+
+	if (lp != NULL) {
+		ret = nfscl_updatelock(lp, &nlop, NULL, 0);
+		if (ret)
+			*dorpcp = 1;
+		/*
+		 * Serial modifications on the lock owner for multiple
+		 * threads for the same process using a read/write lock.
+		 */
+		lp->nfsl_inprog = p;
+		nfscl_lockexcl(&lp->nfsl_rwlock, NFSCLSTATEMUTEXPTR);
+		*lpp = lp;
+	}
+	NFSUNLOCKCLSTATE();
+	if (nlop)
+		free(nlop, M_NFSCLLOCK);
+	if (other_lop)
+		free(other_lop, M_NFSCLLOCK);
+	return (0);
+}
+
+/*
+ * Release all lockowners marked in progess for this process and file.
+ */
+APPLESTATIC void
+nfscl_releasealllocks(struct nfsclclient *clp, vnode_t vp, NFSPROC_T *p,
+    void *id, int flags)
+{
+	struct nfsclowner *owp;
+	struct nfsclopen *op;
+	struct nfscllockowner *lp;
+	struct nfsnode *np;
+	u_int8_t own[NFSV4CL_LOCKNAMELEN];
+
+	np = VTONFS(vp);
+	nfscl_filllockowner(id, own, flags);
+	NFSLOCKCLSTATE();
+	LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) {
+	    LIST_FOREACH(op, &owp->nfsow_open, nfso_list) {
+		if (op->nfso_fhlen == np->n_fhp->nfh_len &&
+		    !NFSBCMP(op->nfso_fh, np->n_fhp->nfh_fh, op->nfso_fhlen)) {
+		    LIST_FOREACH(lp, &op->nfso_lock, nfsl_list) {
+			if (lp->nfsl_inprog == p &&
+			    !NFSBCMP(lp->nfsl_owner, own,
+			    NFSV4CL_LOCKNAMELEN)) {
+			    lp->nfsl_inprog = NULL;
+			    nfscl_lockunlock(&lp->nfsl_rwlock);
+			}
+		    }
+		}
+	    }
+	}
+	nfscl_clrelease(clp);
+	NFSUNLOCKCLSTATE();
+}
+
+/*
+ * Called to find out if any bytes within the byte range specified are
+ * write locked by the calling process. Used to determine if flushing
+ * is required before a LockU.
+ * If in doubt, return 1, so the flush will occur.
+ */
+APPLESTATIC int
+nfscl_checkwritelocked(vnode_t vp, struct flock *fl,
+    struct ucred *cred, NFSPROC_T *p, void *id, int flags)
+{
+	struct nfsclowner *owp;
+	struct nfscllockowner *lp;
+	struct nfsclopen *op;
+	struct nfsclclient *clp;
+	struct nfscllock *lop;
+	struct nfscldeleg *dp;
+	struct nfsnode *np;
+	u_int64_t off, end;
+	u_int8_t own[NFSV4CL_LOCKNAMELEN];
+	int error = 0;
+
+	np = VTONFS(vp);
+	switch (fl->l_whence) {
+	case SEEK_SET:
+	case SEEK_CUR:
+		/*
+		 * Caller is responsible for adding any necessary offset
+		 * when SEEK_CUR is used.
+		 */
+		off = fl->l_start;
+		break;
+	case SEEK_END:
+		off = np->n_size + fl->l_start;
+		break;
+	default:
+		return (1);
+	}
+	if (fl->l_len != 0) {
+		end = off + fl->l_len;
+		if (end < off)
+			return (1);
+	} else {
+		end = NFS64BITSSET;
+	}
+
+	error = nfscl_getcl(vnode_mount(vp), cred, p, 1, &clp);
+	if (error)
+		return (1);
+	nfscl_filllockowner(id, own, flags);
+	NFSLOCKCLSTATE();
+
+	/*
+	 * First check the delegation locks.
+	 */
+	dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len);
+	if (dp != NULL) {
+		LIST_FOREACH(lp, &dp->nfsdl_lock, nfsl_list) {
+			if (!NFSBCMP(lp->nfsl_owner, own,
+			    NFSV4CL_LOCKNAMELEN))
+				break;
+		}
+		if (lp != NULL) {
+			LIST_FOREACH(lop, &lp->nfsl_lock, nfslo_list) {
+				if (lop->nfslo_first >= end)
+					break;
+				if (lop->nfslo_end <= off)
+					continue;
+				if (lop->nfslo_type == F_WRLCK) {
+					nfscl_clrelease(clp);
+					NFSUNLOCKCLSTATE();
+					return (1);
+				}
+			}
+		}
+	}
+
+	/*
+	 * Now, check state against the server.
+	 */
+	LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) {
+	    LIST_FOREACH(op, &owp->nfsow_open, nfso_list) {
+		if (op->nfso_fhlen == np->n_fhp->nfh_len &&
+		    !NFSBCMP(op->nfso_fh, np->n_fhp->nfh_fh, op->nfso_fhlen)) {
+		    LIST_FOREACH(lp, &op->nfso_lock, nfsl_list) {
+			if (!NFSBCMP(lp->nfsl_owner, own,
+			    NFSV4CL_LOCKNAMELEN))
+			    break;
+		    }
+		    if (lp != NULL) {
+			LIST_FOREACH(lop, &lp->nfsl_lock, nfslo_list) {
+			    if (lop->nfslo_first >= end)
+				break;
+			    if (lop->nfslo_end <= off)
+				continue;
+			    if (lop->nfslo_type == F_WRLCK) {
+				nfscl_clrelease(clp);
+				NFSUNLOCKCLSTATE();
+				return (1);
+			    }
+			}
+		    }
+		}
+	    }
+	}
+	nfscl_clrelease(clp);
+	NFSUNLOCKCLSTATE();
+	return (0);
+}
+
+/*
+ * Release a byte range lock owner structure.
+ */
+APPLESTATIC void
+nfscl_lockrelease(struct nfscllockowner *lp, int error, int candelete)
+{
+	struct nfsclclient *clp;
+
+	if (lp == NULL)
+		return;
+	NFSLOCKCLSTATE();
+	clp = lp->nfsl_open->nfso_own->nfsow_clp;
+	if (error != 0 && candelete &&
+	    (lp->nfsl_rwlock.nfslock_lock & NFSV4LOCK_WANTED) == 0)
+		nfscl_freelockowner(lp, 0);
+	else
+		nfscl_lockunlock(&lp->nfsl_rwlock);
+	nfscl_clrelease(clp);
+	NFSUNLOCKCLSTATE();
+}
+
+/*
+ * Free up an open structure and any associated byte range lock structures.
+ */
+APPLESTATIC void
+nfscl_freeopen(struct nfsclopen *op, int local)
+{
+
+	LIST_REMOVE(op, nfso_list);
+	nfscl_freealllocks(&op->nfso_lock, local);
+	free(op, M_NFSCLOPEN);
+	if (local)
+		nfsstatsv1.cllocalopens--;
+	else
+		nfsstatsv1.clopens--;
+}
+
+/*
+ * Free up all lock owners and associated locks.
+ */
+static void
+nfscl_freealllocks(struct nfscllockownerhead *lhp, int local)
+{
+	struct nfscllockowner *lp, *nlp;
+
+	LIST_FOREACH_SAFE(lp, lhp, nfsl_list, nlp) {
+		if ((lp->nfsl_rwlock.nfslock_lock & NFSV4LOCK_WANTED))
+			panic("nfscllckw");
+		nfscl_freelockowner(lp, local);
+	}
+}
+
+/*
+ * Called for an Open when NFSERR_EXPIRED is received from the server.
+ * If there are no byte range locks nor a Share Deny lost, try to do a
+ * fresh Open. Otherwise, free the open.
+ */
+static int
+nfscl_expireopen(struct nfsclclient *clp, struct nfsclopen *op,
+    struct nfsmount *nmp, struct ucred *cred, NFSPROC_T *p)
+{
+	struct nfscllockowner *lp;
+	struct nfscldeleg *dp;
+	int mustdelete = 0, error;
+
+	/*
+	 * Look for any byte range lock(s).
+	 */
+	LIST_FOREACH(lp, &op->nfso_lock, nfsl_list) {
+		if (!LIST_EMPTY(&lp->nfsl_lock)) {
+			mustdelete = 1;
+			break;
+		}
+	}
+
+	/*
+	 * If no byte range lock(s) nor a Share deny, try to re-open.
+	 */
+	if (!mustdelete && (op->nfso_mode & NFSLCK_DENYBITS) == 0) {
+		newnfs_copycred(&op->nfso_cred, cred);
+		dp = NULL;
+		error = nfsrpc_reopen(nmp, op->nfso_fh,
+		    op->nfso_fhlen, op->nfso_mode, op, &dp, cred, p);
+		if (error) {
+			mustdelete = 1;
+			if (dp != NULL) {
+				free(dp, M_NFSCLDELEG);
+				dp = NULL;
+			}
+		}
+		if (dp != NULL)
+			nfscl_deleg(nmp->nm_mountp, clp, op->nfso_fh,
+			    op->nfso_fhlen, cred, p, &dp);
+	}
+
+	/*
+	 * If a byte range lock or Share deny or couldn't re-open, free it.
+	 */
+	if (mustdelete)
+		nfscl_freeopen(op, 0);
+	return (mustdelete);
+}
+
+/*
+ * Free up an open owner structure.
+ */
+static void
+nfscl_freeopenowner(struct nfsclowner *owp, int local)
+{
+
+	LIST_REMOVE(owp, nfsow_list);
+	free(owp, M_NFSCLOWNER);
+	if (local)
+		nfsstatsv1.cllocalopenowners--;
+	else
+		nfsstatsv1.clopenowners--;
+}
+
+/*
+ * Free up a byte range lock owner structure.
+ */
+APPLESTATIC void
+nfscl_freelockowner(struct nfscllockowner *lp, int local)
+{
+	struct nfscllock *lop, *nlop;
+
+	LIST_REMOVE(lp, nfsl_list);
+	LIST_FOREACH_SAFE(lop, &lp->nfsl_lock, nfslo_list, nlop) {
+		nfscl_freelock(lop, local);
+	}
+	free(lp, M_NFSCLLOCKOWNER);
+	if (local)
+		nfsstatsv1.cllocallockowners--;
+	else
+		nfsstatsv1.cllockowners--;
+}
+
+/*
+ * Free up a byte range lock structure.
+ */
+APPLESTATIC void
+nfscl_freelock(struct nfscllock *lop, int local)
+{
+
+	LIST_REMOVE(lop, nfslo_list);
+	free(lop, M_NFSCLLOCK);
+	if (local)
+		nfsstatsv1.cllocallocks--;
+	else
+		nfsstatsv1.cllocks--;
+}
+
+/*
+ * Clean out the state related to a delegation.
+ */
+static void
+nfscl_cleandeleg(struct nfscldeleg *dp)
+{
+	struct nfsclowner *owp, *nowp;
+	struct nfsclopen *op;
+
+	LIST_FOREACH_SAFE(owp, &dp->nfsdl_owner, nfsow_list, nowp) {
+		op = LIST_FIRST(&owp->nfsow_open);
+		if (op != NULL) {
+			if (LIST_NEXT(op, nfso_list) != NULL)
+				panic("nfscleandel");
+			nfscl_freeopen(op, 1);
+		}
+		nfscl_freeopenowner(owp, 1);
+	}
+	nfscl_freealllocks(&dp->nfsdl_lock, 1);
+}
+
+/*
+ * Free a delegation.
+ */
+static void
+nfscl_freedeleg(struct nfscldeleghead *hdp, struct nfscldeleg *dp)
+{
+
+	TAILQ_REMOVE(hdp, dp, nfsdl_list);
+	LIST_REMOVE(dp, nfsdl_hash);
+	free(dp, M_NFSCLDELEG);
+	nfsstatsv1.cldelegates--;
+	nfscl_delegcnt--;
+}
+
+/*
+ * Free up all state related to this client structure.
+ */
+static void
+nfscl_cleanclient(struct nfsclclient *clp)
+{
+	struct nfsclowner *owp, *nowp;
+	struct nfsclopen *op, *nop;
+	struct nfscllayout *lyp, *nlyp;
+	struct nfscldevinfo *dip, *ndip;
+
+	TAILQ_FOREACH_SAFE(lyp, &clp->nfsc_layout, nfsly_list, nlyp)
+		nfscl_freelayout(lyp);
+
+	LIST_FOREACH_SAFE(dip, &clp->nfsc_devinfo, nfsdi_list, ndip)
+		nfscl_freedevinfo(dip);
+
+	/* Now, all the OpenOwners, etc. */
+	LIST_FOREACH_SAFE(owp, &clp->nfsc_owner, nfsow_list, nowp) {
+		LIST_FOREACH_SAFE(op, &owp->nfsow_open, nfso_list, nop) {
+			nfscl_freeopen(op, 0);
+		}
+		nfscl_freeopenowner(owp, 0);
+	}
+}
+
+/*
+ * Called when an NFSERR_EXPIRED is received from the server.
+ */
+static void
+nfscl_expireclient(struct nfsclclient *clp, struct nfsmount *nmp,
+    struct ucred *cred, NFSPROC_T *p)
+{
+	struct nfsclowner *owp, *nowp, *towp;
+	struct nfsclopen *op, *nop, *top;
+	struct nfscldeleg *dp, *ndp;
+	int ret, printed = 0;
+
+	/*
+	 * First, merge locally issued Opens into the list for the server.
+	 */
+	dp = TAILQ_FIRST(&clp->nfsc_deleg);
+	while (dp != NULL) {
+	    ndp = TAILQ_NEXT(dp, nfsdl_list);
+	    owp = LIST_FIRST(&dp->nfsdl_owner);
+	    while (owp != NULL) {
+		nowp = LIST_NEXT(owp, nfsow_list);
+		op = LIST_FIRST(&owp->nfsow_open);
+		if (op != NULL) {
+		    if (LIST_NEXT(op, nfso_list) != NULL)
+			panic("nfsclexp");
+		    LIST_FOREACH(towp, &clp->nfsc_owner, nfsow_list) {
+			if (!NFSBCMP(towp->nfsow_owner, owp->nfsow_owner,
+			    NFSV4CL_LOCKNAMELEN))
+			    break;
+		    }
+		    if (towp != NULL) {
+			/* Merge opens in */
+			LIST_FOREACH(top, &towp->nfsow_open, nfso_list) {
+			    if (top->nfso_fhlen == op->nfso_fhlen &&
+				!NFSBCMP(top->nfso_fh, op->nfso_fh,
+				 op->nfso_fhlen)) {
+				top->nfso_mode |= op->nfso_mode;
+				top->nfso_opencnt += op->nfso_opencnt;
+				break;
+			    }
+			}
+			if (top == NULL) {
+			    /* Just add the open to the owner list */
+			    LIST_REMOVE(op, nfso_list);
+			    op->nfso_own = towp;
+			    LIST_INSERT_HEAD(&towp->nfsow_open, op, nfso_list);
+			    nfsstatsv1.cllocalopens--;
+			    nfsstatsv1.clopens++;
+			}
+		    } else {
+			/* Just add the openowner to the client list */
+			LIST_REMOVE(owp, nfsow_list);
+			owp->nfsow_clp = clp;
+			LIST_INSERT_HEAD(&clp->nfsc_owner, owp, nfsow_list);
+			nfsstatsv1.cllocalopenowners--;
+			nfsstatsv1.clopenowners++;
+			nfsstatsv1.cllocalopens--;
+			nfsstatsv1.clopens++;
+		    }
+		}
+		owp = nowp;
+	    }
+	    if (!printed && !LIST_EMPTY(&dp->nfsdl_lock)) {
+		printed = 1;
+		printf("nfsv4 expired locks lost\n");
+	    }
+	    nfscl_cleandeleg(dp);
+	    nfscl_freedeleg(&clp->nfsc_deleg, dp);
+	    dp = ndp;
+	}
+	if (!TAILQ_EMPTY(&clp->nfsc_deleg))
+	    panic("nfsclexp");
+
+	/*
+	 * Now, try and reopen against the server.
+	 */
+	LIST_FOREACH_SAFE(owp, &clp->nfsc_owner, nfsow_list, nowp) {
+		owp->nfsow_seqid = 0;
+		LIST_FOREACH_SAFE(op, &owp->nfsow_open, nfso_list, nop) {
+			ret = nfscl_expireopen(clp, op, nmp, cred, p);
+			if (ret && !printed) {
+				printed = 1;
+				printf("nfsv4 expired locks lost\n");
+			}
+		}
+		if (LIST_EMPTY(&owp->nfsow_open))
+			nfscl_freeopenowner(owp, 0);
+	}
+}
+
+/*
+ * This function must be called after the process represented by "own" has
+ * exited. Must be called with CLSTATE lock held.
+ */
+static void
+nfscl_cleanup_common(struct nfsclclient *clp, u_int8_t *own)
+{
+	struct nfsclowner *owp, *nowp;
+	struct nfscllockowner *lp, *nlp;
+	struct nfscldeleg *dp;
+
+	/* First, get rid of local locks on delegations. */
+	TAILQ_FOREACH(dp, &clp->nfsc_deleg, nfsdl_list) {
+		LIST_FOREACH_SAFE(lp, &dp->nfsdl_lock, nfsl_list, nlp) {
+		    if (!NFSBCMP(lp->nfsl_owner, own, NFSV4CL_LOCKNAMELEN)) {
+			if ((lp->nfsl_rwlock.nfslock_lock & NFSV4LOCK_WANTED))
+			    panic("nfscllckw");
+			nfscl_freelockowner(lp, 1);
+		    }
+		}
+	}
+	owp = LIST_FIRST(&clp->nfsc_owner);
+	while (owp != NULL) {
+		nowp = LIST_NEXT(owp, nfsow_list);
+		if (!NFSBCMP(owp->nfsow_owner, own,
+		    NFSV4CL_LOCKNAMELEN)) {
+			/*
+			 * If there are children that haven't closed the
+			 * file descriptors yet, the opens will still be
+			 * here. For that case, let the renew thread clear
+			 * out the OpenOwner later.
+			 */
+			if (LIST_EMPTY(&owp->nfsow_open))
+				nfscl_freeopenowner(owp, 0);
+			else
+				owp->nfsow_defunct = 1;
+		}
+		owp = nowp;
+	}
+}
+
+/*
+ * Find open/lock owners for processes that have exited.
+ */
+static void
+nfscl_cleanupkext(struct nfsclclient *clp, struct nfscllockownerfhhead *lhp)
+{
+	struct nfsclowner *owp, *nowp;
+	struct nfsclopen *op;
+	struct nfscllockowner *lp, *nlp;
+	struct nfscldeleg *dp;
+
+	NFSPROCLISTLOCK();
+	NFSLOCKCLSTATE();
+	LIST_FOREACH_SAFE(owp, &clp->nfsc_owner, nfsow_list, nowp) {
+		LIST_FOREACH(op, &owp->nfsow_open, nfso_list) {
+			LIST_FOREACH_SAFE(lp, &op->nfso_lock, nfsl_list, nlp) {
+				if (LIST_EMPTY(&lp->nfsl_lock))
+					nfscl_emptylockowner(lp, lhp);
+			}
+		}
+		if (nfscl_procdoesntexist(owp->nfsow_owner))
+			nfscl_cleanup_common(clp, owp->nfsow_owner);
+	}
+
+	/*
+	 * For the single open_owner case, these lock owners need to be
+	 * checked to see if they still exist separately.
+	 * This is because nfscl_procdoesntexist() never returns true for
+	 * the single open_owner so that the above doesn't ever call
+	 * nfscl_cleanup_common().
+	 */
+	TAILQ_FOREACH(dp, &clp->nfsc_deleg, nfsdl_list) {
+		LIST_FOREACH_SAFE(lp, &dp->nfsdl_lock, nfsl_list, nlp) {
+			if (nfscl_procdoesntexist(lp->nfsl_owner))
+				nfscl_cleanup_common(clp, lp->nfsl_owner);
+		}
+	}
+	NFSUNLOCKCLSTATE();
+	NFSPROCLISTUNLOCK();
+}
+
+/*
+ * Take the empty lock owner and move it to the local lhp list if the
+ * associated process no longer exists.
+ */
+static void
+nfscl_emptylockowner(struct nfscllockowner *lp,
+    struct nfscllockownerfhhead *lhp)
+{
+	struct nfscllockownerfh *lfhp, *mylfhp;
+	struct nfscllockowner *nlp;
+	int fnd_it;
+
+	/* If not a Posix lock owner, just return. */
+	if ((lp->nfsl_lockflags & F_POSIX) == 0)
+		return;
+
+	fnd_it = 0;
+	mylfhp = NULL;
+	/*
+	 * First, search to see if this lock owner is already in the list.
+	 * If it is, then the associated process no longer exists.
+	 */
+	SLIST_FOREACH(lfhp, lhp, nfslfh_list) {
+		if (lfhp->nfslfh_len == lp->nfsl_open->nfso_fhlen &&
+		    !NFSBCMP(lfhp->nfslfh_fh, lp->nfsl_open->nfso_fh,
+		    lfhp->nfslfh_len))
+			mylfhp = lfhp;
+		LIST_FOREACH(nlp, &lfhp->nfslfh_lock, nfsl_list)
+			if (!NFSBCMP(nlp->nfsl_owner, lp->nfsl_owner,
+			    NFSV4CL_LOCKNAMELEN))
+				fnd_it = 1;
+	}
+	/* If not found, check if process still exists. */
+	if (fnd_it == 0 && nfscl_procdoesntexist(lp->nfsl_owner) == 0)
+		return;
+
+	/* Move the lock owner over to the local list. */
+	if (mylfhp == NULL) {
+		mylfhp = malloc(sizeof(struct nfscllockownerfh), M_TEMP,
+		    M_NOWAIT);
+		if (mylfhp == NULL)
+			return;
+		mylfhp->nfslfh_len = lp->nfsl_open->nfso_fhlen;
+		NFSBCOPY(lp->nfsl_open->nfso_fh, mylfhp->nfslfh_fh,
+		    mylfhp->nfslfh_len);
+		LIST_INIT(&mylfhp->nfslfh_lock);
+		SLIST_INSERT_HEAD(lhp, mylfhp, nfslfh_list);
+	}
+	LIST_REMOVE(lp, nfsl_list);
+	LIST_INSERT_HEAD(&mylfhp->nfslfh_lock, lp, nfsl_list);
+}
+
+static int	fake_global;	/* Used to force visibility of MNTK_UNMOUNTF */
+/*
+ * Called from nfs umount to free up the clientid.
+ */
+APPLESTATIC void
+nfscl_umount(struct nfsmount *nmp, NFSPROC_T *p)
+{
+	struct nfsclclient *clp;
+	struct ucred *cred;
+	int igotlock;
+
+	/*
+	 * For the case that matters, this is the thread that set
+	 * MNTK_UNMOUNTF, so it will see it set. The code that follows is
+	 * done to ensure that any thread executing nfscl_getcl() after
+	 * this time, will see MNTK_UNMOUNTF set. nfscl_getcl() uses the
+	 * mutex for NFSLOCKCLSTATE(), so it is "m" for the following
+	 * explanation, courtesy of Alan Cox.
+	 * What follows is a snippet from Alan Cox's email at:
+	 * https://docs.FreeBSD.org/cgi/mid.cgi?BANLkTikR3d65zPHo9==08ZfJ2vmqZucEvw
+	 * 
+	 * 1. Set MNTK_UNMOUNTF
+	 * 2. Acquire a standard FreeBSD mutex "m".
+	 * 3. Update some data structures.
+	 * 4. Release mutex "m".
+	 * 
+	 * Then, other threads that acquire "m" after step 4 has occurred will
+	 * see MNTK_UNMOUNTF as set.  But, other threads that beat thread X to
+	 * step 2 may or may not see MNTK_UNMOUNTF as set.
+	 */
+	NFSLOCKCLSTATE();
+	if ((nmp->nm_mountp->mnt_kern_flag & MNTK_UNMOUNTF) != 0) {
+		fake_global++;
+		NFSUNLOCKCLSTATE();
+		NFSLOCKCLSTATE();
+	}
+
+	clp = nmp->nm_clp;
+	if (clp != NULL) {
+		if ((clp->nfsc_flags & NFSCLFLAGS_INITED) == 0)
+			panic("nfscl umount");
+	
+		/*
+		 * First, handshake with the nfscl renew thread, to terminate
+		 * it.
+		 */
+		clp->nfsc_flags |= NFSCLFLAGS_UMOUNT;
+		while (clp->nfsc_flags & NFSCLFLAGS_HASTHREAD)
+			(void)mtx_sleep(clp, NFSCLSTATEMUTEXPTR, PWAIT,
+			    "nfsclumnt", hz);
+	
+		/*
+		 * Now, get the exclusive lock on the client state, so
+		 * that no uses of the state are still in progress.
+		 */
+		do {
+			igotlock = nfsv4_lock(&clp->nfsc_lock, 1, NULL,
+			    NFSCLSTATEMUTEXPTR, NULL);
+		} while (!igotlock);
+		NFSUNLOCKCLSTATE();
+	
+		/*
+		 * Free up all the state. It will expire on the server, but
+		 * maybe we should do a SetClientId/SetClientIdConfirm so
+		 * the server throws it away?
+		 */
+		LIST_REMOVE(clp, nfsc_list);
+		nfscl_delegreturnall(clp, p);
+		cred = newnfs_getcred();
+		if (NFSHASNFSV4N(nmp)) {
+			(void)nfsrpc_destroysession(nmp, clp, cred, p);
+			(void)nfsrpc_destroyclient(nmp, clp, cred, p);
+		} else
+			(void)nfsrpc_setclient(nmp, clp, 0, cred, p);
+		nfscl_cleanclient(clp);
+		nmp->nm_clp = NULL;
+		NFSFREECRED(cred);
+		free(clp, M_NFSCLCLIENT);
+	} else
+		NFSUNLOCKCLSTATE();
+}
+
+/*
+ * This function is called when a server replies with NFSERR_STALECLIENTID
+ * NFSERR_STALESTATEID or NFSERR_BADSESSION. It traverses the clientid lists,
+ * doing Opens and Locks with reclaim. If these fail, it deletes the
+ * corresponding state.
+ */
+static void
+nfscl_recover(struct nfsclclient *clp, struct ucred *cred, NFSPROC_T *p)
+{
+	struct nfsclowner *owp, *nowp;
+	struct nfsclopen *op, *nop;
+	struct nfscllockowner *lp, *nlp;
+	struct nfscllock *lop, *nlop;
+	struct nfscldeleg *dp, *ndp, *tdp;
+	struct nfsmount *nmp;
+	struct ucred *tcred;
+	struct nfsclopenhead extra_open;
+	struct nfscldeleghead extra_deleg;
+	struct nfsreq *rep;
+	u_int64_t len;
+	u_int32_t delegtype = NFSV4OPEN_DELEGATEWRITE, mode;
+	int i, igotlock = 0, error, trycnt, firstlock;
+	struct nfscllayout *lyp, *nlyp;
+
+	/*
+	 * First, lock the client structure, so everyone else will
+	 * block when trying to use state.
+	 */
+	NFSLOCKCLSTATE();
+	clp->nfsc_flags |= NFSCLFLAGS_RECVRINPROG;
+	do {
+		igotlock = nfsv4_lock(&clp->nfsc_lock, 1, NULL,
+		    NFSCLSTATEMUTEXPTR, NULL);
+	} while (!igotlock);
+	NFSUNLOCKCLSTATE();
+
+	nmp = clp->nfsc_nmp;
+	if (nmp == NULL)
+		panic("nfscl recover");
+
+	/*
+	 * For now, just get rid of all layouts. There may be a need
+	 * to do LayoutCommit Ops with reclaim == true later.
+	 */
+	TAILQ_FOREACH_SAFE(lyp, &clp->nfsc_layout, nfsly_list, nlyp)
+		nfscl_freelayout(lyp);
+	TAILQ_INIT(&clp->nfsc_layout);
+	for (i = 0; i < NFSCLLAYOUTHASHSIZE; i++)
+		LIST_INIT(&clp->nfsc_layouthash[i]);
+
+	trycnt = 5;
+	do {
+		error = nfsrpc_setclient(nmp, clp, 1, cred, p);
+	} while ((error == NFSERR_STALECLIENTID ||
+	     error == NFSERR_BADSESSION ||
+	     error == NFSERR_STALEDONTRECOVER) && --trycnt > 0);
+	if (error) {
+		NFSLOCKCLSTATE();
+		clp->nfsc_flags &= ~(NFSCLFLAGS_RECOVER |
+		    NFSCLFLAGS_RECVRINPROG);
+		wakeup(&clp->nfsc_flags);
+		nfsv4_unlock(&clp->nfsc_lock, 0);
+		NFSUNLOCKCLSTATE();
+		return;
+	}
+	clp->nfsc_flags |= NFSCLFLAGS_HASCLIENTID;
+	clp->nfsc_flags &= ~NFSCLFLAGS_RECOVER;
+
+	/*
+	 * Mark requests already queued on the server, so that they don't
+	 * initiate another recovery cycle. Any requests already in the
+	 * queue that handle state information will have the old stale
+	 * clientid/stateid and will get a NFSERR_STALESTATEID,
+	 * NFSERR_STALECLIENTID or NFSERR_BADSESSION reply from the server.
+	 * This will be translated to NFSERR_STALEDONTRECOVER when
+	 * R_DONTRECOVER is set.
+	 */
+	NFSLOCKREQ();
+	TAILQ_FOREACH(rep, &nfsd_reqq, r_chain) {
+		if (rep->r_nmp == nmp)
+			rep->r_flags |= R_DONTRECOVER;
+	}
+	NFSUNLOCKREQ();
+
+	/*
+	 * Now, mark all delegations "need reclaim".
+	 */
+	TAILQ_FOREACH(dp, &clp->nfsc_deleg, nfsdl_list)
+		dp->nfsdl_flags |= NFSCLDL_NEEDRECLAIM;
+
+	TAILQ_INIT(&extra_deleg);
+	LIST_INIT(&extra_open);
+	/*
+	 * Now traverse the state lists, doing Open and Lock Reclaims.
+	 */
+	tcred = newnfs_getcred();
+	owp = LIST_FIRST(&clp->nfsc_owner);
+	while (owp != NULL) {
+	    nowp = LIST_NEXT(owp, nfsow_list);
+	    owp->nfsow_seqid = 0;
+	    op = LIST_FIRST(&owp->nfsow_open);
+	    while (op != NULL) {
+		nop = LIST_NEXT(op, nfso_list);
+		if (error != NFSERR_NOGRACE && error != NFSERR_BADSESSION) {
+		    /* Search for a delegation to reclaim with the open */
+		    TAILQ_FOREACH(dp, &clp->nfsc_deleg, nfsdl_list) {
+			if (!(dp->nfsdl_flags & NFSCLDL_NEEDRECLAIM))
+			    continue;
+			if ((dp->nfsdl_flags & NFSCLDL_WRITE)) {
+			    mode = NFSV4OPEN_ACCESSWRITE;
+			    delegtype = NFSV4OPEN_DELEGATEWRITE;
+			} else {
+			    mode = NFSV4OPEN_ACCESSREAD;
+			    delegtype = NFSV4OPEN_DELEGATEREAD;
+			}
+			if ((op->nfso_mode & mode) == mode &&
+			    op->nfso_fhlen == dp->nfsdl_fhlen &&
+			    !NFSBCMP(op->nfso_fh, dp->nfsdl_fh, op->nfso_fhlen))
+			    break;
+		    }
+		    ndp = dp;
+		    if (dp == NULL)
+			delegtype = NFSV4OPEN_DELEGATENONE;
+		    newnfs_copycred(&op->nfso_cred, tcred);
+		    error = nfscl_tryopen(nmp, NULL, op->nfso_fh,
+			op->nfso_fhlen, op->nfso_fh, op->nfso_fhlen,
+			op->nfso_mode, op, NULL, 0, &ndp, 1, delegtype,
+			tcred, p);
+		    if (!error) {
+			/* Handle any replied delegation */
+			if (ndp != NULL && ((ndp->nfsdl_flags & NFSCLDL_WRITE)
+			    || NFSMNT_RDONLY(nmp->nm_mountp))) {
+			    if ((ndp->nfsdl_flags & NFSCLDL_WRITE))
+				mode = NFSV4OPEN_ACCESSWRITE;
+			    else
+				mode = NFSV4OPEN_ACCESSREAD;
+			    TAILQ_FOREACH(dp, &clp->nfsc_deleg, nfsdl_list) {
+				if (!(dp->nfsdl_flags & NFSCLDL_NEEDRECLAIM))
+				    continue;
+				if ((op->nfso_mode & mode) == mode &&
+				    op->nfso_fhlen == dp->nfsdl_fhlen &&
+				    !NFSBCMP(op->nfso_fh, dp->nfsdl_fh,
+				    op->nfso_fhlen)) {
+				    dp->nfsdl_stateid = ndp->nfsdl_stateid;
+				    dp->nfsdl_sizelimit = ndp->nfsdl_sizelimit;
+				    dp->nfsdl_ace = ndp->nfsdl_ace;
+				    dp->nfsdl_change = ndp->nfsdl_change;
+				    dp->nfsdl_flags &= ~NFSCLDL_NEEDRECLAIM;
+				    if ((ndp->nfsdl_flags & NFSCLDL_RECALL))
+					dp->nfsdl_flags |= NFSCLDL_RECALL;
+				    free(ndp, M_NFSCLDELEG);
+				    ndp = NULL;
+				    break;
+				}
+			    }
+			}
+			if (ndp != NULL)
+			    TAILQ_INSERT_HEAD(&extra_deleg, ndp, nfsdl_list);
+
+			/* and reclaim all byte range locks */
+			lp = LIST_FIRST(&op->nfso_lock);
+			while (lp != NULL) {
+			    nlp = LIST_NEXT(lp, nfsl_list);
+			    lp->nfsl_seqid = 0;
+			    firstlock = 1;
+			    lop = LIST_FIRST(&lp->nfsl_lock);
+			    while (lop != NULL) {
+				nlop = LIST_NEXT(lop, nfslo_list);
+				if (lop->nfslo_end == NFS64BITSSET)
+				    len = NFS64BITSSET;
+				else
+				    len = lop->nfslo_end - lop->nfslo_first;
+				error = nfscl_trylock(nmp, NULL,
+				    op->nfso_fh, op->nfso_fhlen, lp,
+				    firstlock, 1, lop->nfslo_first, len,
+				    lop->nfslo_type, tcred, p);
+				if (error != 0)
+				    nfscl_freelock(lop, 0);
+				else
+				    firstlock = 0;
+				lop = nlop;
+			    }
+			    /* If no locks, but a lockowner, just delete it. */
+			    if (LIST_EMPTY(&lp->nfsl_lock))
+				nfscl_freelockowner(lp, 0);
+			    lp = nlp;
+			}
+		    }
+		}
+		if (error != 0 && error != NFSERR_BADSESSION)
+		    nfscl_freeopen(op, 0);
+		op = nop;
+	    }
+	    owp = nowp;
+	}
+
+	/*
+	 * Now, try and get any delegations not yet reclaimed by cobbling
+	 * to-gether an appropriate open.
+	 */
+	nowp = NULL;
+	dp = TAILQ_FIRST(&clp->nfsc_deleg);
+	while (dp != NULL) {
+	    ndp = TAILQ_NEXT(dp, nfsdl_list);
+	    if ((dp->nfsdl_flags & NFSCLDL_NEEDRECLAIM)) {
+		if (nowp == NULL) {
+		    nowp = malloc(
+			sizeof (struct nfsclowner), M_NFSCLOWNER, M_WAITOK);
+		    /*
+		     * Name must be as long an largest possible
+		     * NFSV4CL_LOCKNAMELEN. 12 for now.
+		     */
+		    NFSBCOPY("RECLAIMDELEG", nowp->nfsow_owner,
+			NFSV4CL_LOCKNAMELEN);
+		    LIST_INIT(&nowp->nfsow_open);
+		    nowp->nfsow_clp = clp;
+		    nowp->nfsow_seqid = 0;
+		    nowp->nfsow_defunct = 0;
+		    nfscl_lockinit(&nowp->nfsow_rwlock);
+		}
+		nop = NULL;
+		if (error != NFSERR_NOGRACE && error != NFSERR_BADSESSION) {
+		    nop = malloc(sizeof (struct nfsclopen) +
+			dp->nfsdl_fhlen - 1, M_NFSCLOPEN, M_WAITOK);
+		    nop->nfso_own = nowp;
+		    if ((dp->nfsdl_flags & NFSCLDL_WRITE)) {
+			nop->nfso_mode = NFSV4OPEN_ACCESSWRITE;
+			delegtype = NFSV4OPEN_DELEGATEWRITE;
+		    } else {
+			nop->nfso_mode = NFSV4OPEN_ACCESSREAD;
+			delegtype = NFSV4OPEN_DELEGATEREAD;
+		    }
+		    nop->nfso_opencnt = 0;
+		    nop->nfso_posixlock = 1;
+		    nop->nfso_fhlen = dp->nfsdl_fhlen;
+		    NFSBCOPY(dp->nfsdl_fh, nop->nfso_fh, dp->nfsdl_fhlen);
+		    LIST_INIT(&nop->nfso_lock);
+		    nop->nfso_stateid.seqid = 0;
+		    nop->nfso_stateid.other[0] = 0;
+		    nop->nfso_stateid.other[1] = 0;
+		    nop->nfso_stateid.other[2] = 0;
+		    newnfs_copycred(&dp->nfsdl_cred, tcred);
+		    newnfs_copyincred(tcred, &nop->nfso_cred);
+		    tdp = NULL;
+		    error = nfscl_tryopen(nmp, NULL, nop->nfso_fh,
+			nop->nfso_fhlen, nop->nfso_fh, nop->nfso_fhlen,
+			nop->nfso_mode, nop, NULL, 0, &tdp, 1,
+			delegtype, tcred, p);
+		    if (tdp != NULL) {
+			if ((tdp->nfsdl_flags & NFSCLDL_WRITE))
+			    mode = NFSV4OPEN_ACCESSWRITE;
+			else
+			    mode = NFSV4OPEN_ACCESSREAD;
+			if ((nop->nfso_mode & mode) == mode &&
+			    nop->nfso_fhlen == tdp->nfsdl_fhlen &&
+			    !NFSBCMP(nop->nfso_fh, tdp->nfsdl_fh,
+			    nop->nfso_fhlen)) {
+			    dp->nfsdl_stateid = tdp->nfsdl_stateid;
+			    dp->nfsdl_sizelimit = tdp->nfsdl_sizelimit;
+			    dp->nfsdl_ace = tdp->nfsdl_ace;
+			    dp->nfsdl_change = tdp->nfsdl_change;
+			    dp->nfsdl_flags &= ~NFSCLDL_NEEDRECLAIM;
+			    if ((tdp->nfsdl_flags & NFSCLDL_RECALL))
+				dp->nfsdl_flags |= NFSCLDL_RECALL;
+			    free(tdp, M_NFSCLDELEG);
+			} else {
+			    TAILQ_INSERT_HEAD(&extra_deleg, tdp, nfsdl_list);
+			}
+		    }
+		}
+		if (error) {
+		    if (nop != NULL)
+			free(nop, M_NFSCLOPEN);
+		    /*
+		     * Couldn't reclaim it, so throw the state
+		     * away. Ouch!!
+		     */
+		    nfscl_cleandeleg(dp);
+		    nfscl_freedeleg(&clp->nfsc_deleg, dp);
+		} else {
+		    LIST_INSERT_HEAD(&extra_open, nop, nfso_list);
+		}
+	    }
+	    dp = ndp;
+	}
+
+	/*
+	 * Now, get rid of extra Opens and Delegations.
+	 */
+	LIST_FOREACH_SAFE(op, &extra_open, nfso_list, nop) {
+		do {
+			newnfs_copycred(&op->nfso_cred, tcred);
+			error = nfscl_tryclose(op, tcred, nmp, p);
+			if (error == NFSERR_GRACE)
+				(void) nfs_catnap(PZERO, error, "nfsexcls");
+		} while (error == NFSERR_GRACE);
+		LIST_REMOVE(op, nfso_list);
+		free(op, M_NFSCLOPEN);
+	}
+	if (nowp != NULL)
+		free(nowp, M_NFSCLOWNER);
+
+	TAILQ_FOREACH_SAFE(dp, &extra_deleg, nfsdl_list, ndp) {
+		do {
+			newnfs_copycred(&dp->nfsdl_cred, tcred);
+			error = nfscl_trydelegreturn(dp, tcred, nmp, p);
+			if (error == NFSERR_GRACE)
+				(void) nfs_catnap(PZERO, error, "nfsexdlg");
+		} while (error == NFSERR_GRACE);
+		TAILQ_REMOVE(&extra_deleg, dp, nfsdl_list);
+		free(dp, M_NFSCLDELEG);
+	}
+
+	/* For NFSv4.1 or later, do a RECLAIM_COMPLETE. */
+	if (NFSHASNFSV4N(nmp))
+		(void)nfsrpc_reclaimcomplete(nmp, cred, p);
+
+	NFSLOCKCLSTATE();
+	clp->nfsc_flags &= ~NFSCLFLAGS_RECVRINPROG;
+	wakeup(&clp->nfsc_flags);
+	nfsv4_unlock(&clp->nfsc_lock, 0);
+	NFSUNLOCKCLSTATE();
+	NFSFREECRED(tcred);
+}
+
+/*
+ * This function is called when a server replies with NFSERR_EXPIRED.
+ * It deletes all state for the client and does a fresh SetClientId/confirm.
+ * XXX Someday it should post a signal to the process(es) that hold the
+ * state, so they know that lock state has been lost.
+ */
+APPLESTATIC int
+nfscl_hasexpired(struct nfsclclient *clp, u_int32_t clidrev, NFSPROC_T *p)
+{
+	struct nfsmount *nmp;
+	struct ucred *cred;
+	int igotlock = 0, error, trycnt;
+
+	/*
+	 * If the clientid has gone away or a new SetClientid has already
+	 * been done, just return ok.
+	 */
+	if (clp == NULL || clidrev != clp->nfsc_clientidrev)
+		return (0);
+
+	/*
+	 * First, lock the client structure, so everyone else will
+	 * block when trying to use state. Also, use NFSCLFLAGS_EXPIREIT so
+	 * that only one thread does the work.
+	 */
+	NFSLOCKCLSTATE();
+	clp->nfsc_flags |= NFSCLFLAGS_EXPIREIT;
+	do {
+		igotlock = nfsv4_lock(&clp->nfsc_lock, 1, NULL,
+		    NFSCLSTATEMUTEXPTR, NULL);
+	} while (!igotlock && (clp->nfsc_flags & NFSCLFLAGS_EXPIREIT));
+	if ((clp->nfsc_flags & NFSCLFLAGS_EXPIREIT) == 0) {
+		if (igotlock)
+			nfsv4_unlock(&clp->nfsc_lock, 0);
+		NFSUNLOCKCLSTATE();
+		return (0);
+	}
+	clp->nfsc_flags |= NFSCLFLAGS_RECVRINPROG;
+	NFSUNLOCKCLSTATE();
+
+	nmp = clp->nfsc_nmp;
+	if (nmp == NULL)
+		panic("nfscl expired");
+	cred = newnfs_getcred();
+	trycnt = 5;
+	do {
+		error = nfsrpc_setclient(nmp, clp, 0, cred, p);
+	} while ((error == NFSERR_STALECLIENTID ||
+	     error == NFSERR_BADSESSION ||
+	     error == NFSERR_STALEDONTRECOVER) && --trycnt > 0);
+	if (error) {
+		NFSLOCKCLSTATE();
+		clp->nfsc_flags &= ~NFSCLFLAGS_RECOVER;
+	} else {
+		/*
+		 * Expire the state for the client.
+		 */
+		nfscl_expireclient(clp, nmp, cred, p);
+		NFSLOCKCLSTATE();
+		clp->nfsc_flags |= NFSCLFLAGS_HASCLIENTID;
+		clp->nfsc_flags &= ~NFSCLFLAGS_RECOVER;
+	}
+	clp->nfsc_flags &= ~(NFSCLFLAGS_EXPIREIT | NFSCLFLAGS_RECVRINPROG);
+	wakeup(&clp->nfsc_flags);
+	nfsv4_unlock(&clp->nfsc_lock, 0);
+	NFSUNLOCKCLSTATE();
+	NFSFREECRED(cred);
+	return (error);
+}
+
+/*
+ * This function inserts a lock in the list after insert_lop.
+ */
+static void
+nfscl_insertlock(struct nfscllockowner *lp, struct nfscllock *new_lop,
+    struct nfscllock *insert_lop, int local)
+{
+
+	if ((struct nfscllockowner *)insert_lop == lp)
+		LIST_INSERT_HEAD(&lp->nfsl_lock, new_lop, nfslo_list);
+	else
+		LIST_INSERT_AFTER(insert_lop, new_lop, nfslo_list);
+	if (local)
+		nfsstatsv1.cllocallocks++;
+	else
+		nfsstatsv1.cllocks++;
+}
+
+/*
+ * This function updates the locking for a lock owner and given file. It
+ * maintains a list of lock ranges ordered on increasing file offset that
+ * are NFSCLLOCK_READ or NFSCLLOCK_WRITE and non-overlapping (aka POSIX style).
+ * It always adds new_lop to the list and sometimes uses the one pointed
+ * at by other_lopp.
+ * Returns 1 if the locks were modified, 0 otherwise.
+ */
+static int
+nfscl_updatelock(struct nfscllockowner *lp, struct nfscllock **new_lopp,
+    struct nfscllock **other_lopp, int local)
+{
+	struct nfscllock *new_lop = *new_lopp;
+	struct nfscllock *lop, *tlop, *ilop;
+	struct nfscllock *other_lop;
+	int unlock = 0, modified = 0;
+	u_int64_t tmp;
+
+	/*
+	 * Work down the list until the lock is merged.
+	 */
+	if (new_lop->nfslo_type == F_UNLCK)
+		unlock = 1;
+	ilop = (struct nfscllock *)lp;
+	lop = LIST_FIRST(&lp->nfsl_lock);
+	while (lop != NULL) {
+	    /*
+	     * Only check locks for this file that aren't before the start of
+	     * new lock's range.
+	     */
+	    if (lop->nfslo_end >= new_lop->nfslo_first) {
+		if (new_lop->nfslo_end < lop->nfslo_first) {
+		    /*
+		     * If the new lock ends before the start of the
+		     * current lock's range, no merge, just insert
+		     * the new lock.
+		     */
+		    break;
+		}
+		if (new_lop->nfslo_type == lop->nfslo_type ||
+		    (new_lop->nfslo_first <= lop->nfslo_first &&
+		     new_lop->nfslo_end >= lop->nfslo_end)) {
+		    /*
+		     * This lock can be absorbed by the new lock/unlock.
+		     * This happens when it covers the entire range
+		     * of the old lock or is contiguous
+		     * with the old lock and is of the same type or an
+		     * unlock.
+		     */
+		    if (new_lop->nfslo_type != lop->nfslo_type ||
+			new_lop->nfslo_first != lop->nfslo_first ||
+			new_lop->nfslo_end != lop->nfslo_end)
+			modified = 1;
+		    if (lop->nfslo_first < new_lop->nfslo_first)
+			new_lop->nfslo_first = lop->nfslo_first;
+		    if (lop->nfslo_end > new_lop->nfslo_end)
+			new_lop->nfslo_end = lop->nfslo_end;
+		    tlop = lop;
+		    lop = LIST_NEXT(lop, nfslo_list);
+		    nfscl_freelock(tlop, local);
+		    continue;
+		}
+
+		/*
+		 * All these cases are for contiguous locks that are not the
+		 * same type, so they can't be merged.
+		 */
+		if (new_lop->nfslo_first <= lop->nfslo_first) {
+		    /*
+		     * This case is where the new lock overlaps with the
+		     * first part of the old lock. Move the start of the
+		     * old lock to just past the end of the new lock. The
+		     * new lock will be inserted in front of the old, since
+		     * ilop hasn't been updated. (We are done now.)
+		     */
+		    if (lop->nfslo_first != new_lop->nfslo_end) {
+			lop->nfslo_first = new_lop->nfslo_end;
+			modified = 1;
+		    }
+		    break;
+		}
+		if (new_lop->nfslo_end >= lop->nfslo_end) {
+		    /*
+		     * This case is where the new lock overlaps with the
+		     * end of the old lock's range. Move the old lock's
+		     * end to just before the new lock's first and insert
+		     * the new lock after the old lock.
+		     * Might not be done yet, since the new lock could
+		     * overlap further locks with higher ranges.
+		     */
+		    if (lop->nfslo_end != new_lop->nfslo_first) {
+			lop->nfslo_end = new_lop->nfslo_first;
+			modified = 1;
+		    }
+		    ilop = lop;
+		    lop = LIST_NEXT(lop, nfslo_list);
+		    continue;
+		}
+		/*
+		 * The final case is where the new lock's range is in the
+		 * middle of the current lock's and splits the current lock
+		 * up. Use *other_lopp to handle the second part of the
+		 * split old lock range. (We are done now.)
+		 * For unlock, we use new_lop as other_lop and tmp, since
+		 * other_lop and new_lop are the same for this case.
+		 * We noted the unlock case above, so we don't need
+		 * new_lop->nfslo_type any longer.
+		 */
+		tmp = new_lop->nfslo_first;
+		if (unlock) {
+		    other_lop = new_lop;
+		    *new_lopp = NULL;
+		} else {
+		    other_lop = *other_lopp;
+		    *other_lopp = NULL;
+		}
+		other_lop->nfslo_first = new_lop->nfslo_end;
+		other_lop->nfslo_end = lop->nfslo_end;
+		other_lop->nfslo_type = lop->nfslo_type;
+		lop->nfslo_end = tmp;
+		nfscl_insertlock(lp, other_lop, lop, local);
+		ilop = lop;
+		modified = 1;
+		break;
+	    }
+	    ilop = lop;
+	    lop = LIST_NEXT(lop, nfslo_list);
+	    if (lop == NULL)
+		break;
+	}
+
+	/*
+	 * Insert the new lock in the list at the appropriate place.
+	 */
+	if (!unlock) {
+		nfscl_insertlock(lp, new_lop, ilop, local);
+		*new_lopp = NULL;
+		modified = 1;
+	}
+	return (modified);
+}
+
+/*
+ * This function must be run as a kernel thread.
+ * It does Renew Ops and recovery, when required.
+ */
+APPLESTATIC void
+nfscl_renewthread(struct nfsclclient *clp, NFSPROC_T *p)
+{
+	struct nfsclowner *owp, *nowp;
+	struct nfsclopen *op;
+	struct nfscllockowner *lp, *nlp;
+	struct nfscldeleghead dh;
+	struct nfscldeleg *dp, *ndp;
+	struct ucred *cred;
+	u_int32_t clidrev;
+	int error, cbpathdown, islept, igotlock, ret, clearok;
+	uint32_t recover_done_time = 0;
+	time_t mytime;
+	static time_t prevsec = 0;
+	struct nfscllockownerfh *lfhp, *nlfhp;
+	struct nfscllockownerfhhead lfh;
+	struct nfscllayout *lyp, *nlyp;
+	struct nfscldevinfo *dip, *ndip;
+	struct nfscllayouthead rlh;
+	struct nfsclrecalllayout *recallp;
+	struct nfsclds *dsp;
+
+	cred = newnfs_getcred();
+	NFSLOCKCLSTATE();
+	clp->nfsc_flags |= NFSCLFLAGS_HASTHREAD;
+	NFSUNLOCKCLSTATE();
+	for(;;) {
+		newnfs_setroot(cred);
+		cbpathdown = 0;
+		if (clp->nfsc_flags & NFSCLFLAGS_RECOVER) {
+			/*
+			 * Only allow one recover within 1/2 of the lease
+			 * duration (nfsc_renew).
+			 */
+			if (recover_done_time < NFSD_MONOSEC) {
+				recover_done_time = NFSD_MONOSEC +
+				    clp->nfsc_renew;
+				NFSCL_DEBUG(1, "Doing recovery..\n");
+				nfscl_recover(clp, cred, p);
+			} else {
+				NFSCL_DEBUG(1, "Clear Recovery dt=%u ms=%jd\n",
+				    recover_done_time, (intmax_t)NFSD_MONOSEC);
+				NFSLOCKCLSTATE();
+				clp->nfsc_flags &= ~NFSCLFLAGS_RECOVER;
+				NFSUNLOCKCLSTATE();
+			}
+		}
+		if (clp->nfsc_expire <= NFSD_MONOSEC &&
+		    (clp->nfsc_flags & NFSCLFLAGS_HASCLIENTID)) {
+			clp->nfsc_expire = NFSD_MONOSEC + clp->nfsc_renew;
+			clidrev = clp->nfsc_clientidrev;
+			error = nfsrpc_renew(clp, NULL, cred, p);
+			if (error == NFSERR_CBPATHDOWN)
+			    cbpathdown = 1;
+			else if (error == NFSERR_STALECLIENTID ||
+			    error == NFSERR_BADSESSION) {
+			    NFSLOCKCLSTATE();
+			    clp->nfsc_flags |= NFSCLFLAGS_RECOVER;
+			    NFSUNLOCKCLSTATE();
+			} else if (error == NFSERR_EXPIRED)
+			    (void) nfscl_hasexpired(clp, clidrev, p);
+		}
+
+checkdsrenew:
+		if (NFSHASNFSV4N(clp->nfsc_nmp)) {
+			/* Do renews for any DS sessions. */
+			NFSLOCKMNT(clp->nfsc_nmp);
+			/* Skip first entry, since the MDS is handled above. */
+			dsp = TAILQ_FIRST(&clp->nfsc_nmp->nm_sess);
+			if (dsp != NULL)
+				dsp = TAILQ_NEXT(dsp, nfsclds_list);
+			while (dsp != NULL) {
+				if (dsp->nfsclds_expire <= NFSD_MONOSEC &&
+				    dsp->nfsclds_sess.nfsess_defunct == 0) {
+					dsp->nfsclds_expire = NFSD_MONOSEC +
+					    clp->nfsc_renew;
+					NFSUNLOCKMNT(clp->nfsc_nmp);
+					(void)nfsrpc_renew(clp, dsp, cred, p);
+					goto checkdsrenew;
+				}
+				dsp = TAILQ_NEXT(dsp, nfsclds_list);
+			}
+			NFSUNLOCKMNT(clp->nfsc_nmp);
+		}
+
+		TAILQ_INIT(&dh);
+		NFSLOCKCLSTATE();
+		if (cbpathdown)
+			/* It's a Total Recall! */
+			nfscl_totalrecall(clp);
+
+		/*
+		 * Now, handle defunct owners.
+		 */
+		LIST_FOREACH_SAFE(owp, &clp->nfsc_owner, nfsow_list, nowp) {
+			if (LIST_EMPTY(&owp->nfsow_open)) {
+				if (owp->nfsow_defunct != 0)
+					nfscl_freeopenowner(owp, 0);
+			}
+		}
+
+		/*
+		 * Do the recall on any delegations. To avoid trouble, always
+		 * come back up here after having slept.
+		 */
+		igotlock = 0;
+tryagain:
+		dp = TAILQ_FIRST(&clp->nfsc_deleg);
+		while (dp != NULL) {
+			ndp = TAILQ_NEXT(dp, nfsdl_list);
+			if ((dp->nfsdl_flags & NFSCLDL_RECALL)) {
+				/*
+				 * Wait for outstanding I/O ops to be done.
+				 */
+				if (dp->nfsdl_rwlock.nfslock_usecnt > 0) {
+				    if (igotlock) {
+					nfsv4_unlock(&clp->nfsc_lock, 0);
+					igotlock = 0;
+				    }
+				    dp->nfsdl_rwlock.nfslock_lock |=
+					NFSV4LOCK_WANTED;
+				    (void) nfsmsleep(&dp->nfsdl_rwlock,
+					NFSCLSTATEMUTEXPTR, PZERO, "nfscld",
+					NULL);
+				    goto tryagain;
+				}
+				while (!igotlock) {
+				    igotlock = nfsv4_lock(&clp->nfsc_lock, 1,
+					&islept, NFSCLSTATEMUTEXPTR, NULL);
+				    if (islept)
+					goto tryagain;
+				}
+				NFSUNLOCKCLSTATE();
+				newnfs_copycred(&dp->nfsdl_cred, cred);
+				ret = nfscl_recalldeleg(clp, clp->nfsc_nmp, dp,
+				    NULL, cred, p, 1);
+				if (!ret) {
+				    nfscl_cleandeleg(dp);
+				    TAILQ_REMOVE(&clp->nfsc_deleg, dp,
+					nfsdl_list);
+				    LIST_REMOVE(dp, nfsdl_hash);
+				    TAILQ_INSERT_HEAD(&dh, dp, nfsdl_list);
+				    nfscl_delegcnt--;
+				    nfsstatsv1.cldelegates--;
+				}
+				NFSLOCKCLSTATE();
+			}
+			dp = ndp;
+		}
+
+		/*
+		 * Clear out old delegations, if we are above the high water
+		 * mark. Only clear out ones with no state related to them.
+		 * The tailq list is in LRU order.
+		 */
+		dp = TAILQ_LAST(&clp->nfsc_deleg, nfscldeleghead);
+		while (nfscl_delegcnt > nfscl_deleghighwater && dp != NULL) {
+		    ndp = TAILQ_PREV(dp, nfscldeleghead, nfsdl_list);
+		    if (dp->nfsdl_rwlock.nfslock_usecnt == 0 &&
+			dp->nfsdl_rwlock.nfslock_lock == 0 &&
+			dp->nfsdl_timestamp < NFSD_MONOSEC &&
+			(dp->nfsdl_flags & (NFSCLDL_RECALL | NFSCLDL_ZAPPED |
+			  NFSCLDL_NEEDRECLAIM | NFSCLDL_DELEGRET)) == 0) {
+			clearok = 1;
+			LIST_FOREACH(owp, &dp->nfsdl_owner, nfsow_list) {
+			    op = LIST_FIRST(&owp->nfsow_open);
+			    if (op != NULL) {
+				clearok = 0;
+				break;
+			    }
+			}
+			if (clearok) {
+			    LIST_FOREACH(lp, &dp->nfsdl_lock, nfsl_list) {
+				if (!LIST_EMPTY(&lp->nfsl_lock)) {
+				    clearok = 0;
+				    break;
+				}
+			    }
+			}
+			if (clearok) {
+			    TAILQ_REMOVE(&clp->nfsc_deleg, dp, nfsdl_list);
+			    LIST_REMOVE(dp, nfsdl_hash);
+			    TAILQ_INSERT_HEAD(&dh, dp, nfsdl_list);
+			    nfscl_delegcnt--;
+			    nfsstatsv1.cldelegates--;
+			}
+		    }
+		    dp = ndp;
+		}
+		if (igotlock)
+			nfsv4_unlock(&clp->nfsc_lock, 0);
+
+		/*
+		 * Do the recall on any layouts. To avoid trouble, always
+		 * come back up here after having slept.
+		 */
+		TAILQ_INIT(&rlh);
+tryagain2:
+		TAILQ_FOREACH_SAFE(lyp, &clp->nfsc_layout, nfsly_list, nlyp) {
+			if ((lyp->nfsly_flags & NFSLY_RECALL) != 0) {
+				/*
+				 * Wait for outstanding I/O ops to be done.
+				 */
+				if (lyp->nfsly_lock.nfslock_usecnt > 0 ||
+				    (lyp->nfsly_lock.nfslock_lock &
+				     NFSV4LOCK_LOCK) != 0) {
+					lyp->nfsly_lock.nfslock_lock |=
+					    NFSV4LOCK_WANTED;
+					nfsmsleep(&lyp->nfsly_lock.nfslock_lock,
+					    NFSCLSTATEMUTEXPTR, PZERO, "nfslyp",
+					    NULL);
+					goto tryagain2;
+				}
+				/* Move the layout to the recall list. */
+				TAILQ_REMOVE(&clp->nfsc_layout, lyp,
+				    nfsly_list);
+				LIST_REMOVE(lyp, nfsly_hash);
+				TAILQ_INSERT_HEAD(&rlh, lyp, nfsly_list);
+
+				/* Handle any layout commits. */
+				if (!NFSHASNOLAYOUTCOMMIT(clp->nfsc_nmp) &&
+				    (lyp->nfsly_flags & NFSLY_WRITTEN) != 0) {
+					lyp->nfsly_flags &= ~NFSLY_WRITTEN;
+					NFSUNLOCKCLSTATE();
+					NFSCL_DEBUG(3, "do layoutcommit\n");
+					nfscl_dolayoutcommit(clp->nfsc_nmp, lyp,
+					    cred, p);
+					NFSLOCKCLSTATE();
+					goto tryagain2;
+				}
+			}
+		}
+
+		/* Now, look for stale layouts. */
+		lyp = TAILQ_LAST(&clp->nfsc_layout, nfscllayouthead);
+		while (lyp != NULL) {
+			nlyp = TAILQ_PREV(lyp, nfscllayouthead, nfsly_list);
+			if (lyp->nfsly_timestamp < NFSD_MONOSEC &&
+			    (lyp->nfsly_flags & NFSLY_RECALL) == 0 &&
+			    lyp->nfsly_lock.nfslock_usecnt == 0 &&
+			    lyp->nfsly_lock.nfslock_lock == 0) {
+				NFSCL_DEBUG(4, "ret stale lay=%d\n",
+				    nfscl_layoutcnt);
+				recallp = malloc(sizeof(*recallp),
+				    M_NFSLAYRECALL, M_NOWAIT);
+				if (recallp == NULL)
+					break;
+				(void)nfscl_layoutrecall(NFSLAYOUTRETURN_FILE,
+				    lyp, NFSLAYOUTIOMODE_ANY, 0, UINT64_MAX,
+				    lyp->nfsly_stateid.seqid, 0, 0, NULL,
+				    recallp);
+			}
+			lyp = nlyp;
+		}
+
+		/*
+		 * Free up any unreferenced device info structures.
+		 */
+		LIST_FOREACH_SAFE(dip, &clp->nfsc_devinfo, nfsdi_list, ndip) {
+			if (dip->nfsdi_layoutrefs == 0 &&
+			    dip->nfsdi_refcnt == 0) {
+				NFSCL_DEBUG(4, "freeing devinfo\n");
+				LIST_REMOVE(dip, nfsdi_list);
+				nfscl_freedevinfo(dip);
+			}
+		}
+		NFSUNLOCKCLSTATE();
+
+		/* Do layout return(s), as required. */
+		TAILQ_FOREACH_SAFE(lyp, &rlh, nfsly_list, nlyp) {
+			TAILQ_REMOVE(&rlh, lyp, nfsly_list);
+			NFSCL_DEBUG(4, "ret layout\n");
+			nfscl_layoutreturn(clp->nfsc_nmp, lyp, cred, p);
+			nfscl_freelayout(lyp);
+		}
+
+		/*
+		 * Delegreturn any delegations cleaned out or recalled.
+		 */
+		TAILQ_FOREACH_SAFE(dp, &dh, nfsdl_list, ndp) {
+			newnfs_copycred(&dp->nfsdl_cred, cred);
+			(void) nfscl_trydelegreturn(dp, cred, clp->nfsc_nmp, p);
+			TAILQ_REMOVE(&dh, dp, nfsdl_list);
+			free(dp, M_NFSCLDELEG);
+		}
+
+		SLIST_INIT(&lfh);
+		/*
+		 * Call nfscl_cleanupkext() once per second to check for
+		 * open/lock owners where the process has exited.
+		 */
+		mytime = NFSD_MONOSEC;
+		if (prevsec != mytime) {
+			prevsec = mytime;
+			nfscl_cleanupkext(clp, &lfh);
+		}
+
+		/*
+		 * Do a ReleaseLockOwner for all lock owners where the
+		 * associated process no longer exists, as found by
+		 * nfscl_cleanupkext().
+		 */
+		newnfs_setroot(cred);
+		SLIST_FOREACH_SAFE(lfhp, &lfh, nfslfh_list, nlfhp) {
+			LIST_FOREACH_SAFE(lp, &lfhp->nfslfh_lock, nfsl_list,
+			    nlp) {
+				(void)nfsrpc_rellockown(clp->nfsc_nmp, lp,
+				    lfhp->nfslfh_fh, lfhp->nfslfh_len, cred,
+				    p);
+				nfscl_freelockowner(lp, 0);
+			}
+			free(lfhp, M_TEMP);
+		}
+		SLIST_INIT(&lfh);
+
+		NFSLOCKCLSTATE();
+		if ((clp->nfsc_flags & NFSCLFLAGS_RECOVER) == 0)
+			(void)mtx_sleep(clp, NFSCLSTATEMUTEXPTR, PWAIT, "nfscl",
+			    hz);
+		if (clp->nfsc_flags & NFSCLFLAGS_UMOUNT) {
+			clp->nfsc_flags &= ~NFSCLFLAGS_HASTHREAD;
+			NFSUNLOCKCLSTATE();
+			NFSFREECRED(cred);
+			wakeup((caddr_t)clp);
+			return;
+		}
+		NFSUNLOCKCLSTATE();
+	}
+}
+
+/*
+ * Initiate state recovery. Called when NFSERR_STALECLIENTID,
+ * NFSERR_STALESTATEID or NFSERR_BADSESSION is received.
+ */
+APPLESTATIC void
+nfscl_initiate_recovery(struct nfsclclient *clp)
+{
+
+	if (clp == NULL)
+		return;
+	NFSLOCKCLSTATE();
+	clp->nfsc_flags |= NFSCLFLAGS_RECOVER;
+	NFSUNLOCKCLSTATE();
+	wakeup((caddr_t)clp);
+}
+
+/*
+ * Dump out the state stuff for debugging.
+ */
+APPLESTATIC void
+nfscl_dumpstate(struct nfsmount *nmp, int openowner, int opens,
+    int lockowner, int locks)
+{
+	struct nfsclclient *clp;
+	struct nfsclowner *owp;
+	struct nfsclopen *op;
+	struct nfscllockowner *lp;
+	struct nfscllock *lop;
+	struct nfscldeleg *dp;
+
+	clp = nmp->nm_clp;
+	if (clp == NULL) {
+		printf("nfscl dumpstate NULL clp\n");
+		return;
+	}
+	NFSLOCKCLSTATE();
+	TAILQ_FOREACH(dp, &clp->nfsc_deleg, nfsdl_list) {
+	  LIST_FOREACH(owp, &dp->nfsdl_owner, nfsow_list) {
+	    if (openowner && !LIST_EMPTY(&owp->nfsow_open))
+		printf("owner=0x%x 0x%x 0x%x 0x%x seqid=%d\n",
+		    owp->nfsow_owner[0], owp->nfsow_owner[1],
+		    owp->nfsow_owner[2], owp->nfsow_owner[3],
+		    owp->nfsow_seqid);
+	    LIST_FOREACH(op, &owp->nfsow_open, nfso_list) {
+		if (opens)
+		    printf("open st=0x%x 0x%x 0x%x cnt=%d fh12=0x%x\n",
+			op->nfso_stateid.other[0], op->nfso_stateid.other[1],
+			op->nfso_stateid.other[2], op->nfso_opencnt,
+			op->nfso_fh[12]);
+		LIST_FOREACH(lp, &op->nfso_lock, nfsl_list) {
+		    if (lockowner)
+			printf("lckown=0x%x 0x%x 0x%x 0x%x seqid=%d st=0x%x 0x%x 0x%x\n",
+			    lp->nfsl_owner[0], lp->nfsl_owner[1],
+			    lp->nfsl_owner[2], lp->nfsl_owner[3],
+			    lp->nfsl_seqid,
+			    lp->nfsl_stateid.other[0], lp->nfsl_stateid.other[1],
+			    lp->nfsl_stateid.other[2]);
+		    LIST_FOREACH(lop, &lp->nfsl_lock, nfslo_list) {
+			if (locks)
+#ifdef __FreeBSD__
+			    printf("lck typ=%d fst=%ju end=%ju\n",
+				lop->nfslo_type, (intmax_t)lop->nfslo_first,
+				(intmax_t)lop->nfslo_end);
+#else
+			    printf("lck typ=%d fst=%qd end=%qd\n",
+				lop->nfslo_type, lop->nfslo_first,
+				lop->nfslo_end);
+#endif
+		    }
+		}
+	    }
+	  }
+	}
+	LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) {
+	    if (openowner && !LIST_EMPTY(&owp->nfsow_open))
+		printf("owner=0x%x 0x%x 0x%x 0x%x seqid=%d\n",
+		    owp->nfsow_owner[0], owp->nfsow_owner[1],
+		    owp->nfsow_owner[2], owp->nfsow_owner[3],
+		    owp->nfsow_seqid);
+	    LIST_FOREACH(op, &owp->nfsow_open, nfso_list) {
+		if (opens)
+		    printf("open st=0x%x 0x%x 0x%x cnt=%d fh12=0x%x\n",
+			op->nfso_stateid.other[0], op->nfso_stateid.other[1],
+			op->nfso_stateid.other[2], op->nfso_opencnt,
+			op->nfso_fh[12]);
+		LIST_FOREACH(lp, &op->nfso_lock, nfsl_list) {
+		    if (lockowner)
+			printf("lckown=0x%x 0x%x 0x%x 0x%x seqid=%d st=0x%x 0x%x 0x%x\n",
+			    lp->nfsl_owner[0], lp->nfsl_owner[1],
+			    lp->nfsl_owner[2], lp->nfsl_owner[3],
+			    lp->nfsl_seqid,
+			    lp->nfsl_stateid.other[0], lp->nfsl_stateid.other[1],
+			    lp->nfsl_stateid.other[2]);
+		    LIST_FOREACH(lop, &lp->nfsl_lock, nfslo_list) {
+			if (locks)
+#ifdef __FreeBSD__
+			    printf("lck typ=%d fst=%ju end=%ju\n",
+				lop->nfslo_type, (intmax_t)lop->nfslo_first,
+				(intmax_t)lop->nfslo_end);
+#else
+			    printf("lck typ=%d fst=%qd end=%qd\n",
+				lop->nfslo_type, lop->nfslo_first,
+				lop->nfslo_end);
+#endif
+		    }
+		}
+	    }
+	}
+	NFSUNLOCKCLSTATE();
+}
+
+/*
+ * Check for duplicate open owners and opens.
+ * (Only used as a diagnostic aid.)
+ */
+APPLESTATIC void
+nfscl_dupopen(vnode_t vp, int dupopens)
+{
+	struct nfsclclient *clp;
+	struct nfsclowner *owp, *owp2;
+	struct nfsclopen *op, *op2;
+	struct nfsfh *nfhp;
+
+	clp = VFSTONFS(vnode_mount(vp))->nm_clp;
+	if (clp == NULL) {
+		printf("nfscl dupopen NULL clp\n");
+		return;
+	}
+	nfhp = VTONFS(vp)->n_fhp;
+	NFSLOCKCLSTATE();
+
+	/*
+	 * First, search for duplicate owners.
+	 * These should never happen!
+	 */
+	LIST_FOREACH(owp2, &clp->nfsc_owner, nfsow_list) {
+	    LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) {
+		if (owp != owp2 &&
+		    !NFSBCMP(owp->nfsow_owner, owp2->nfsow_owner,
+		    NFSV4CL_LOCKNAMELEN)) {
+			NFSUNLOCKCLSTATE();
+			printf("DUP OWNER\n");
+			nfscl_dumpstate(VFSTONFS(vnode_mount(vp)), 1, 1, 0, 0);
+			return;
+		}
+	    }
+	}
+
+	/*
+	 * Now, search for duplicate stateids.
+	 * These shouldn't happen, either.
+	 */
+	LIST_FOREACH(owp2, &clp->nfsc_owner, nfsow_list) {
+	    LIST_FOREACH(op2, &owp2->nfsow_open, nfso_list) {
+		LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) {
+		    LIST_FOREACH(op, &owp->nfsow_open, nfso_list) {
+			if (op != op2 &&
+			    (op->nfso_stateid.other[0] != 0 ||
+			     op->nfso_stateid.other[1] != 0 ||
+			     op->nfso_stateid.other[2] != 0) &&
+			    op->nfso_stateid.other[0] == op2->nfso_stateid.other[0] &&
+			    op->nfso_stateid.other[1] == op2->nfso_stateid.other[1] &&
+			    op->nfso_stateid.other[2] == op2->nfso_stateid.other[2]) {
+			    NFSUNLOCKCLSTATE();
+			    printf("DUP STATEID\n");
+			    nfscl_dumpstate(VFSTONFS(vnode_mount(vp)), 1, 1, 0,
+				0);
+			    return;
+			}
+		    }
+		}
+	    }
+	}
+
+	/*
+	 * Now search for duplicate opens.
+	 * Duplicate opens for the same owner
+	 * should never occur. Other duplicates are
+	 * possible and are checked for if "dupopens"
+	 * is true.
+	 */
+	LIST_FOREACH(owp2, &clp->nfsc_owner, nfsow_list) {
+	    LIST_FOREACH(op2, &owp2->nfsow_open, nfso_list) {
+		if (nfhp->nfh_len == op2->nfso_fhlen &&
+		    !NFSBCMP(nfhp->nfh_fh, op2->nfso_fh, nfhp->nfh_len)) {
+		    LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) {
+			LIST_FOREACH(op, &owp->nfsow_open, nfso_list) {
+			    if (op != op2 && nfhp->nfh_len == op->nfso_fhlen &&
+				!NFSBCMP(nfhp->nfh_fh, op->nfso_fh, nfhp->nfh_len) &&
+				(!NFSBCMP(op->nfso_own->nfsow_owner,
+				 op2->nfso_own->nfsow_owner, NFSV4CL_LOCKNAMELEN) ||
+				 dupopens)) {
+				if (!NFSBCMP(op->nfso_own->nfsow_owner,
+				    op2->nfso_own->nfsow_owner, NFSV4CL_LOCKNAMELEN)) {
+				    NFSUNLOCKCLSTATE();
+				    printf("BADDUP OPEN\n");
+				} else {
+				    NFSUNLOCKCLSTATE();
+				    printf("DUP OPEN\n");
+				}
+				nfscl_dumpstate(VFSTONFS(vnode_mount(vp)), 1, 1,
+				    0, 0);
+				return;
+			    }
+			}
+		    }
+		}
+	    }
+	}
+	NFSUNLOCKCLSTATE();
+}
+
+/*
+ * During close, find an open that needs to be dereferenced and
+ * dereference it. If there are no more opens for this file,
+ * log a message to that effect.
+ * Opens aren't actually Close'd until VOP_INACTIVE() is performed
+ * on the file's vnode.
+ * This is the safe way, since it is difficult to identify
+ * which open the close is for and I/O can be performed after the
+ * close(2) system call when a file is mmap'd.
+ * If it returns 0 for success, there will be a referenced
+ * clp returned via clpp.
+ */
+APPLESTATIC int
+nfscl_getclose(vnode_t vp, struct nfsclclient **clpp)
+{
+	struct nfsclclient *clp;
+	struct nfsclowner *owp;
+	struct nfsclopen *op;
+	struct nfscldeleg *dp;
+	struct nfsfh *nfhp;
+	int error, notdecr;
+
+	error = nfscl_getcl(vnode_mount(vp), NULL, NULL, 1, &clp);
+	if (error)
+		return (error);
+	*clpp = clp;
+
+	nfhp = VTONFS(vp)->n_fhp;
+	notdecr = 1;
+	NFSLOCKCLSTATE();
+	/*
+	 * First, look for one under a delegation that was locally issued
+	 * and just decrement the opencnt for it. Since all my Opens against
+	 * the server are DENY_NONE, I don't see a problem with hanging
+	 * onto them. (It is much easier to use one of the extant Opens
+	 * that I already have on the server when a Delegation is recalled
+	 * than to do fresh Opens.) Someday, I might need to rethink this, but.
+	 */
+	dp = nfscl_finddeleg(clp, nfhp->nfh_fh, nfhp->nfh_len);
+	if (dp != NULL) {
+		LIST_FOREACH(owp, &dp->nfsdl_owner, nfsow_list) {
+			op = LIST_FIRST(&owp->nfsow_open);
+			if (op != NULL) {
+				/*
+				 * Since a delegation is for a file, there
+				 * should never be more than one open for
+				 * each openowner.
+				 */
+				if (LIST_NEXT(op, nfso_list) != NULL)
+					panic("nfscdeleg opens");
+				if (notdecr && op->nfso_opencnt > 0) {
+					notdecr = 0;
+					op->nfso_opencnt--;
+					break;
+				}
+			}
+		}
+	}
+
+	/* Now process the opens against the server. */
+	LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) {
+		LIST_FOREACH(op, &owp->nfsow_open, nfso_list) {
+			if (op->nfso_fhlen == nfhp->nfh_len &&
+			    !NFSBCMP(op->nfso_fh, nfhp->nfh_fh,
+			    nfhp->nfh_len)) {
+				/* Found an open, decrement cnt if possible */
+				if (notdecr && op->nfso_opencnt > 0) {
+					notdecr = 0;
+					op->nfso_opencnt--;
+				}
+				/*
+				 * There are more opens, so just return.
+				 */
+				if (op->nfso_opencnt > 0) {
+					NFSUNLOCKCLSTATE();
+					return (0);
+				}
+			}
+		}
+	}
+	NFSUNLOCKCLSTATE();
+	if (notdecr)
+		printf("nfscl: never fnd open\n");
+	return (0);
+}
+
+APPLESTATIC int
+nfscl_doclose(vnode_t vp, struct nfsclclient **clpp, NFSPROC_T *p)
+{
+	struct nfsclclient *clp;
+	struct nfsclowner *owp, *nowp;
+	struct nfsclopen *op;
+	struct nfscldeleg *dp;
+	struct nfsfh *nfhp;
+	struct nfsclrecalllayout *recallp;
+	int error;
+
+	error = nfscl_getcl(vnode_mount(vp), NULL, NULL, 1, &clp);
+	if (error)
+		return (error);
+	*clpp = clp;
+
+	nfhp = VTONFS(vp)->n_fhp;
+	recallp = malloc(sizeof(*recallp), M_NFSLAYRECALL, M_WAITOK);
+	NFSLOCKCLSTATE();
+	/*
+	 * First get rid of the local Open structures, which should be no
+	 * longer in use.
+	 */
+	dp = nfscl_finddeleg(clp, nfhp->nfh_fh, nfhp->nfh_len);
+	if (dp != NULL) {
+		LIST_FOREACH_SAFE(owp, &dp->nfsdl_owner, nfsow_list, nowp) {
+			op = LIST_FIRST(&owp->nfsow_open);
+			if (op != NULL) {
+				KASSERT((op->nfso_opencnt == 0),
+				    ("nfscl: bad open cnt on deleg"));
+				nfscl_freeopen(op, 1);
+			}
+			nfscl_freeopenowner(owp, 1);
+		}
+	}
+
+	/* Return any layouts marked return on close. */
+	nfscl_retoncloselayout(vp, clp, nfhp->nfh_fh, nfhp->nfh_len, &recallp);
+
+	/* Now process the opens against the server. */
+lookformore:
+	LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) {
+		op = LIST_FIRST(&owp->nfsow_open);
+		while (op != NULL) {
+			if (op->nfso_fhlen == nfhp->nfh_len &&
+			    !NFSBCMP(op->nfso_fh, nfhp->nfh_fh,
+			    nfhp->nfh_len)) {
+				/* Found an open, close it. */
+				KASSERT((op->nfso_opencnt == 0),
+				    ("nfscl: bad open cnt on server"));
+				NFSUNLOCKCLSTATE();
+				nfsrpc_doclose(VFSTONFS(vnode_mount(vp)), op,
+				    p);
+				NFSLOCKCLSTATE();
+				goto lookformore;
+			}
+			op = LIST_NEXT(op, nfso_list);
+		}
+	}
+	NFSUNLOCKCLSTATE();
+	/*
+	 * recallp has been set NULL by nfscl_retoncloselayout() if it was
+	 * used by the function, but calling free() with a NULL pointer is ok.
+	 */
+	free(recallp, M_NFSLAYRECALL);
+	return (0);
+}
+
+/*
+ * Return all delegations on this client.
+ * (Must be called with client sleep lock.)
+ */
+static void
+nfscl_delegreturnall(struct nfsclclient *clp, NFSPROC_T *p)
+{
+	struct nfscldeleg *dp, *ndp;
+	struct ucred *cred;
+
+	cred = newnfs_getcred();
+	TAILQ_FOREACH_SAFE(dp, &clp->nfsc_deleg, nfsdl_list, ndp) {
+		nfscl_cleandeleg(dp);
+		(void) nfscl_trydelegreturn(dp, cred, clp->nfsc_nmp, p);
+		nfscl_freedeleg(&clp->nfsc_deleg, dp);
+	}
+	NFSFREECRED(cred);
+}
+
+/*
+ * Do a callback RPC.
+ */
+APPLESTATIC void
+nfscl_docb(struct nfsrv_descript *nd, NFSPROC_T *p)
+{
+	int clist, gotseq_ok, i, j, k, op, rcalls;
+	u_int32_t *tl;
+	struct nfsclclient *clp;
+	struct nfscldeleg *dp = NULL;
+	int numops, taglen = -1, error = 0, trunc __unused;
+	u_int32_t minorvers = 0, retops = 0, *retopsp = NULL, *repp, cbident;
+	u_char tag[NFSV4_SMALLSTR + 1], *tagstr;
+	vnode_t vp = NULL;
+	struct nfsnode *np;
+	struct vattr va;
+	struct nfsfh *nfhp;
+	mount_t mp;
+	nfsattrbit_t attrbits, rattrbits;
+	nfsv4stateid_t stateid;
+	uint32_t seqid, slotid = 0, highslot, cachethis __unused;
+	uint8_t sessionid[NFSX_V4SESSIONID];
+	struct mbuf *rep;
+	struct nfscllayout *lyp;
+	uint64_t filesid[2], len, off;
+	int changed, gotone, laytype, recalltype;
+	uint32_t iomode;
+	struct nfsclrecalllayout *recallp = NULL;
+	struct nfsclsession *tsep;
+
+	gotseq_ok = 0;
+	nfsrvd_rephead(nd);
+	NFSM_DISSECT(tl, u_int32_t *, NFSX_UNSIGNED);
+	taglen = fxdr_unsigned(int, *tl);
+	if (taglen < 0) {
+		error = EBADRPC;
+		goto nfsmout;
+	}
+	if (taglen <= NFSV4_SMALLSTR)
+		tagstr = tag;
+	else
+		tagstr = malloc(taglen + 1, M_TEMP, M_WAITOK);
+	error = nfsrv_mtostr(nd, tagstr, taglen);
+	if (error) {
+		if (taglen > NFSV4_SMALLSTR)
+			free(tagstr, M_TEMP);
+		taglen = -1;
+		goto nfsmout;
+	}
+	(void) nfsm_strtom(nd, tag, taglen);
+	if (taglen > NFSV4_SMALLSTR) {
+		free(tagstr, M_TEMP);
+	}
+	NFSM_BUILD(retopsp, u_int32_t *, NFSX_UNSIGNED);
+	NFSM_DISSECT(tl, u_int32_t *, 3 * NFSX_UNSIGNED);
+	minorvers = fxdr_unsigned(u_int32_t, *tl++);
+	if (minorvers != NFSV4_MINORVERSION && minorvers != NFSV41_MINORVERSION)
+		nd->nd_repstat = NFSERR_MINORVERMISMATCH;
+	cbident = fxdr_unsigned(u_int32_t, *tl++);
+	if (nd->nd_repstat)
+		numops = 0;
+	else
+		numops = fxdr_unsigned(int, *tl);
+	/*
+	 * Loop around doing the sub ops.
+	 */
+	for (i = 0; i < numops; i++) {
+		NFSM_DISSECT(tl, u_int32_t *, NFSX_UNSIGNED);
+		NFSM_BUILD(repp, u_int32_t *, 2 * NFSX_UNSIGNED);
+		*repp++ = *tl;
+		op = fxdr_unsigned(int, *tl);
+		if (op < NFSV4OP_CBGETATTR ||
+		   (op > NFSV4OP_CBRECALL && minorvers == NFSV4_MINORVERSION) ||
+		   (op > NFSV4OP_CBNOTIFYDEVID &&
+		    minorvers == NFSV41_MINORVERSION)) {
+		    nd->nd_repstat = NFSERR_OPILLEGAL;
+		    *repp = nfscl_errmap(nd, minorvers);
+		    retops++;
+		    break;
+		}
+		nd->nd_procnum = op;
+		if (op < NFSV41_CBNOPS)
+			nfsstatsv1.cbrpccnt[nd->nd_procnum]++;
+		switch (op) {
+		case NFSV4OP_CBGETATTR:
+			NFSCL_DEBUG(4, "cbgetattr\n");
+			mp = NULL;
+			vp = NULL;
+			error = nfsm_getfh(nd, &nfhp);
+			if (!error)
+				error = nfsrv_getattrbits(nd, &attrbits,
+				    NULL, NULL);
+			if (error == 0 && i == 0 &&
+			    minorvers != NFSV4_MINORVERSION)
+				error = NFSERR_OPNOTINSESS;
+			if (!error) {
+				mp = nfscl_getmnt(minorvers, sessionid, cbident,
+				    &clp);
+				if (mp == NULL)
+					error = NFSERR_SERVERFAULT;
+			}
+			if (!error) {
+				error = nfscl_ngetreopen(mp, nfhp->nfh_fh,
+				    nfhp->nfh_len, p, &np);
+				if (!error)
+					vp = NFSTOV(np);
+			}
+			if (!error) {
+				NFSZERO_ATTRBIT(&rattrbits);
+				NFSLOCKCLSTATE();
+				dp = nfscl_finddeleg(clp, nfhp->nfh_fh,
+				    nfhp->nfh_len);
+				if (dp != NULL) {
+					if (NFSISSET_ATTRBIT(&attrbits,
+					    NFSATTRBIT_SIZE)) {
+						if (vp != NULL)
+							va.va_size = np->n_size;
+						else
+							va.va_size =
+							    dp->nfsdl_size;
+						NFSSETBIT_ATTRBIT(&rattrbits,
+						    NFSATTRBIT_SIZE);
+					}
+					if (NFSISSET_ATTRBIT(&attrbits,
+					    NFSATTRBIT_CHANGE)) {
+						va.va_filerev =
+						    dp->nfsdl_change;
+						if (vp == NULL ||
+						    (np->n_flag & NDELEGMOD))
+							va.va_filerev++;
+						NFSSETBIT_ATTRBIT(&rattrbits,
+						    NFSATTRBIT_CHANGE);
+					}
+				} else
+					error = NFSERR_SERVERFAULT;
+				NFSUNLOCKCLSTATE();
+			}
+			if (vp != NULL)
+				vrele(vp);
+			if (mp != NULL)
+				vfs_unbusy(mp);
+			if (nfhp != NULL)
+				free(nfhp, M_NFSFH);
+			if (!error)
+				(void) nfsv4_fillattr(nd, NULL, NULL, NULL, &va,
+				    NULL, 0, &rattrbits, NULL, p, 0, 0, 0, 0,
+				    (uint64_t)0, NULL);
+			break;
+		case NFSV4OP_CBRECALL:
+			NFSCL_DEBUG(4, "cbrecall\n");
+			NFSM_DISSECT(tl, u_int32_t *, NFSX_STATEID +
+			    NFSX_UNSIGNED);
+			stateid.seqid = *tl++;
+			NFSBCOPY((caddr_t)tl, (caddr_t)stateid.other,
+			    NFSX_STATEIDOTHER);
+			tl += (NFSX_STATEIDOTHER / NFSX_UNSIGNED);
+			trunc = fxdr_unsigned(int, *tl);
+			error = nfsm_getfh(nd, &nfhp);
+			if (error == 0 && i == 0 &&
+			    minorvers != NFSV4_MINORVERSION)
+				error = NFSERR_OPNOTINSESS;
+			if (!error) {
+				NFSLOCKCLSTATE();
+				if (minorvers == NFSV4_MINORVERSION)
+					clp = nfscl_getclnt(cbident);
+				else
+					clp = nfscl_getclntsess(sessionid);
+				if (clp != NULL) {
+					dp = nfscl_finddeleg(clp, nfhp->nfh_fh,
+					    nfhp->nfh_len);
+					if (dp != NULL && (dp->nfsdl_flags &
+					    NFSCLDL_DELEGRET) == 0) {
+						dp->nfsdl_flags |=
+						    NFSCLDL_RECALL;
+						wakeup((caddr_t)clp);
+					}
+				} else {
+					error = NFSERR_SERVERFAULT;
+				}
+				NFSUNLOCKCLSTATE();
+			}
+			if (nfhp != NULL)
+				free(nfhp, M_NFSFH);
+			break;
+		case NFSV4OP_CBLAYOUTRECALL:
+			NFSCL_DEBUG(4, "cblayrec\n");
+			nfhp = NULL;
+			NFSM_DISSECT(tl, uint32_t *, 4 * NFSX_UNSIGNED);
+			laytype = fxdr_unsigned(int, *tl++);
+			iomode = fxdr_unsigned(uint32_t, *tl++);
+			if (newnfs_true == *tl++)
+				changed = 1;
+			else
+				changed = 0;
+			recalltype = fxdr_unsigned(int, *tl);
+			NFSCL_DEBUG(4, "layt=%d iom=%d ch=%d rectyp=%d\n",
+			    laytype, iomode, changed, recalltype);
+			recallp = malloc(sizeof(*recallp), M_NFSLAYRECALL,
+			    M_WAITOK);
+			if (laytype != NFSLAYOUT_NFSV4_1_FILES &&
+			    laytype != NFSLAYOUT_FLEXFILE)
+				error = NFSERR_NOMATCHLAYOUT;
+			else if (recalltype == NFSLAYOUTRETURN_FILE) {
+				error = nfsm_getfh(nd, &nfhp);
+				NFSCL_DEBUG(4, "retfile getfh=%d\n", error);
+				if (error != 0)
+					goto nfsmout;
+				NFSM_DISSECT(tl, u_int32_t *, 2 * NFSX_HYPER +
+				    NFSX_STATEID);
+				off = fxdr_hyper(tl); tl += 2;
+				len = fxdr_hyper(tl); tl += 2;
+				stateid.seqid = fxdr_unsigned(uint32_t, *tl++);
+				NFSBCOPY(tl, stateid.other, NFSX_STATEIDOTHER);
+				if (minorvers == NFSV4_MINORVERSION)
+					error = NFSERR_NOTSUPP;
+				else if (i == 0)
+					error = NFSERR_OPNOTINSESS;
+				NFSCL_DEBUG(4, "off=%ju len=%ju sq=%u err=%d\n",
+				    (uintmax_t)off, (uintmax_t)len,
+				    stateid.seqid, error);
+				if (error == 0) {
+					NFSLOCKCLSTATE();
+					clp = nfscl_getclntsess(sessionid);
+					NFSCL_DEBUG(4, "cbly clp=%p\n", clp);
+					if (clp != NULL) {
+						lyp = nfscl_findlayout(clp,
+						    nfhp->nfh_fh,
+						    nfhp->nfh_len);
+						NFSCL_DEBUG(4, "cblyp=%p\n",
+						    lyp);
+						if (lyp != NULL &&
+						    (lyp->nfsly_flags &
+						     (NFSLY_FILES |
+						      NFSLY_FLEXFILE)) != 0 &&
+						    !NFSBCMP(stateid.other,
+						    lyp->nfsly_stateid.other,
+						    NFSX_STATEIDOTHER)) {
+							error =
+							    nfscl_layoutrecall(
+							    recalltype,
+							    lyp, iomode, off,
+							    len, stateid.seqid,
+							    0, 0, NULL,
+							    recallp);
+							recallp = NULL;
+							wakeup(clp);
+							NFSCL_DEBUG(4,
+							    "aft layrcal=%d\n",
+							    error);
+						} else
+							error =
+							  NFSERR_NOMATCHLAYOUT;
+					} else
+						error = NFSERR_NOMATCHLAYOUT;
+					NFSUNLOCKCLSTATE();
+				}
+				free(nfhp, M_NFSFH);
+			} else if (recalltype == NFSLAYOUTRETURN_FSID) {
+				NFSM_DISSECT(tl, uint32_t *, 2 * NFSX_HYPER);
+				filesid[0] = fxdr_hyper(tl); tl += 2;
+				filesid[1] = fxdr_hyper(tl); tl += 2;
+				gotone = 0;
+				NFSLOCKCLSTATE();
+				clp = nfscl_getclntsess(sessionid);
+				if (clp != NULL) {
+					TAILQ_FOREACH(lyp, &clp->nfsc_layout,
+					    nfsly_list) {
+						if (lyp->nfsly_filesid[0] ==
+						    filesid[0] &&
+						    lyp->nfsly_filesid[1] ==
+						    filesid[1]) {
+							error =
+							    nfscl_layoutrecall(
+							    recalltype,
+							    lyp, iomode, 0,
+							    UINT64_MAX,
+							    lyp->nfsly_stateid.seqid,
+							    0, 0, NULL,
+							    recallp);
+							recallp = NULL;
+							gotone = 1;
+						}
+					}
+					if (gotone != 0)
+						wakeup(clp);
+					else
+						error = NFSERR_NOMATCHLAYOUT;
+				} else
+					error = NFSERR_NOMATCHLAYOUT;
+				NFSUNLOCKCLSTATE();
+			} else if (recalltype == NFSLAYOUTRETURN_ALL) {
+				gotone = 0;
+				NFSLOCKCLSTATE();
+				clp = nfscl_getclntsess(sessionid);
+				if (clp != NULL) {
+					TAILQ_FOREACH(lyp, &clp->nfsc_layout,
+					    nfsly_list) {
+						error = nfscl_layoutrecall(
+						    recalltype, lyp, iomode, 0,
+						    UINT64_MAX,
+						    lyp->nfsly_stateid.seqid,
+						    0, 0, NULL, recallp);
+						recallp = NULL;
+						gotone = 1;
+					}
+					if (gotone != 0)
+						wakeup(clp);
+					else
+						error = NFSERR_NOMATCHLAYOUT;
+				} else
+					error = NFSERR_NOMATCHLAYOUT;
+				NFSUNLOCKCLSTATE();
+			} else
+				error = NFSERR_NOMATCHLAYOUT;
+			if (recallp != NULL) {
+				free(recallp, M_NFSLAYRECALL);
+				recallp = NULL;
+			}
+			break;
+		case NFSV4OP_CBSEQUENCE:
+			NFSM_DISSECT(tl, uint32_t *, NFSX_V4SESSIONID +
+			    5 * NFSX_UNSIGNED);
+			bcopy(tl, sessionid, NFSX_V4SESSIONID);
+			tl += NFSX_V4SESSIONID / NFSX_UNSIGNED;
+			seqid = fxdr_unsigned(uint32_t, *tl++);
+			slotid = fxdr_unsigned(uint32_t, *tl++);
+			highslot = fxdr_unsigned(uint32_t, *tl++);
+			cachethis = *tl++;
+			/* Throw away the referring call stuff. */
+			clist = fxdr_unsigned(int, *tl);
+			for (j = 0; j < clist; j++) {
+				NFSM_DISSECT(tl, uint32_t *, NFSX_V4SESSIONID +
+				    NFSX_UNSIGNED);
+				tl += NFSX_V4SESSIONID / NFSX_UNSIGNED;
+				rcalls = fxdr_unsigned(int, *tl);
+				for (k = 0; k < rcalls; k++) {
+					NFSM_DISSECT(tl, uint32_t *,
+					    2 * NFSX_UNSIGNED);
+				}
+			}
+			NFSLOCKCLSTATE();
+			if (i == 0) {
+				clp = nfscl_getclntsess(sessionid);
+				if (clp == NULL)
+					error = NFSERR_SERVERFAULT;
+			} else
+				error = NFSERR_SEQUENCEPOS;
+			if (error == 0) {
+				tsep = nfsmnt_mdssession(clp->nfsc_nmp);
+				error = nfsv4_seqsession(seqid, slotid,
+				    highslot, tsep->nfsess_cbslots, &rep,
+				    tsep->nfsess_backslots);
+			}
+			NFSUNLOCKCLSTATE();
+			if (error == 0 || error == NFSERR_REPLYFROMCACHE) {
+				gotseq_ok = 1;
+				if (rep != NULL) {
+					/*
+					 * Handle a reply for a retried
+					 * callback.  The reply will be
+					 * re-inserted in the session cache
+					 * by the nfsv4_seqsess_cacherep() call
+					 * after out:
+					 */
+					KASSERT(error == NFSERR_REPLYFROMCACHE,
+					    ("cbsequence: non-NULL rep"));
+					NFSCL_DEBUG(4, "Got cbretry\n");
+					m_freem(nd->nd_mreq);
+					nd->nd_mreq = rep;
+					rep = NULL;
+					goto out;
+				}
+				NFSM_BUILD(tl, uint32_t *,
+				    NFSX_V4SESSIONID + 4 * NFSX_UNSIGNED);
+				bcopy(sessionid, tl, NFSX_V4SESSIONID);
+				tl += NFSX_V4SESSIONID / NFSX_UNSIGNED;
+				*tl++ = txdr_unsigned(seqid);
+				*tl++ = txdr_unsigned(slotid);
+				*tl++ = txdr_unsigned(NFSV4_CBSLOTS - 1);
+				*tl = txdr_unsigned(NFSV4_CBSLOTS - 1);
+			}
+			break;
+		default:
+			if (i == 0 && minorvers == NFSV41_MINORVERSION)
+				error = NFSERR_OPNOTINSESS;
+			else {
+				NFSCL_DEBUG(1, "unsupp callback %d\n", op);
+				error = NFSERR_NOTSUPP;
+			}
+			break;
+		}
+		if (error) {
+			if (error == EBADRPC || error == NFSERR_BADXDR) {
+				nd->nd_repstat = NFSERR_BADXDR;
+			} else {
+				nd->nd_repstat = error;
+			}
+			error = 0;
+		}
+		retops++;
+		if (nd->nd_repstat) {
+			*repp = nfscl_errmap(nd, minorvers);
+			break;
+		} else
+			*repp = 0;	/* NFS4_OK */
+	}
+nfsmout:
+	if (recallp != NULL)
+		free(recallp, M_NFSLAYRECALL);
+	if (error) {
+		if (error == EBADRPC || error == NFSERR_BADXDR)
+			nd->nd_repstat = NFSERR_BADXDR;
+		else
+			printf("nfsv4 comperr1=%d\n", error);
+	}
+	if (taglen == -1) {
+		NFSM_BUILD(tl, u_int32_t *, 2 * NFSX_UNSIGNED);
+		*tl++ = 0;
+		*tl = 0;
+	} else {
+		*retopsp = txdr_unsigned(retops);
+	}
+	*nd->nd_errp = nfscl_errmap(nd, minorvers);
+out:
+	if (gotseq_ok != 0) {
+		rep = m_copym(nd->nd_mreq, 0, M_COPYALL, M_WAITOK);
+		NFSLOCKCLSTATE();
+		clp = nfscl_getclntsess(sessionid);
+		if (clp != NULL) {
+			tsep = nfsmnt_mdssession(clp->nfsc_nmp);
+			nfsv4_seqsess_cacherep(slotid, tsep->nfsess_cbslots,
+			    NFSERR_OK, &rep);
+			NFSUNLOCKCLSTATE();
+		} else {
+			NFSUNLOCKCLSTATE();
+			m_freem(rep);
+		}
+	}
+}
+
+/*
+ * Generate the next cbident value. Basically just increment a static value
+ * and then check that it isn't already in the list, if it has wrapped around.
+ */
+static u_int32_t
+nfscl_nextcbident(void)
+{
+	struct nfsclclient *clp;
+	int matched;
+	static u_int32_t nextcbident = 0;
+	static int haswrapped = 0;
+
+	nextcbident++;
+	if (nextcbident == 0)
+		haswrapped = 1;
+	if (haswrapped) {
+		/*
+		 * Search the clientid list for one already using this cbident.
+		 */
+		do {
+			matched = 0;
+			NFSLOCKCLSTATE();
+			LIST_FOREACH(clp, &nfsclhead, nfsc_list) {
+				if (clp->nfsc_cbident == nextcbident) {
+					matched = 1;
+					break;
+				}
+			}
+			NFSUNLOCKCLSTATE();
+			if (matched == 1)
+				nextcbident++;
+		} while (matched);
+	}
+	return (nextcbident);
+}
+
+/*
+ * Get the mount point related to a given cbident or session and busy it.
+ */
+static mount_t
+nfscl_getmnt(int minorvers, uint8_t *sessionid, u_int32_t cbident,
+    struct nfsclclient **clpp)
+{
+	struct nfsclclient *clp;
+	mount_t mp;
+	int error;
+	struct nfsclsession *tsep;
+
+	*clpp = NULL;
+	NFSLOCKCLSTATE();
+	LIST_FOREACH(clp, &nfsclhead, nfsc_list) {
+		tsep = nfsmnt_mdssession(clp->nfsc_nmp);
+		if (minorvers == NFSV4_MINORVERSION) {
+			if (clp->nfsc_cbident == cbident)
+				break;
+		} else if (!NFSBCMP(tsep->nfsess_sessionid, sessionid,
+		    NFSX_V4SESSIONID))
+			break;
+	}
+	if (clp == NULL) {
+		NFSUNLOCKCLSTATE();
+		return (NULL);
+	}
+	mp = clp->nfsc_nmp->nm_mountp;
+	vfs_ref(mp);
+	NFSUNLOCKCLSTATE();
+	error = vfs_busy(mp, 0);
+	vfs_rel(mp);
+	if (error != 0)
+		return (NULL);
+	*clpp = clp;
+	return (mp);
+}
+
+/*
+ * Get the clientid pointer related to a given cbident.
+ */
+static struct nfsclclient *
+nfscl_getclnt(u_int32_t cbident)
+{
+	struct nfsclclient *clp;
+
+	LIST_FOREACH(clp, &nfsclhead, nfsc_list)
+		if (clp->nfsc_cbident == cbident)
+			break;
+	return (clp);
+}
+
+/*
+ * Get the clientid pointer related to a given sessionid.
+ */
+static struct nfsclclient *
+nfscl_getclntsess(uint8_t *sessionid)
+{
+	struct nfsclclient *clp;
+	struct nfsclsession *tsep;
+
+	LIST_FOREACH(clp, &nfsclhead, nfsc_list) {
+		tsep = nfsmnt_mdssession(clp->nfsc_nmp);
+		if (!NFSBCMP(tsep->nfsess_sessionid, sessionid,
+		    NFSX_V4SESSIONID))
+			break;
+	}
+	return (clp);
+}
+
+/*
+ * Search for a lock conflict locally on the client. A conflict occurs if
+ * - not same owner and overlapping byte range and at least one of them is
+ *   a write lock or this is an unlock.
+ */
+static int
+nfscl_localconflict(struct nfsclclient *clp, u_int8_t *fhp, int fhlen,
+    struct nfscllock *nlop, u_int8_t *own, struct nfscldeleg *dp,
+    struct nfscllock **lopp)
+{
+	struct nfsclowner *owp;
+	struct nfsclopen *op;
+	int ret;
+
+	if (dp != NULL) {
+		ret = nfscl_checkconflict(&dp->nfsdl_lock, nlop, own, lopp);
+		if (ret)
+			return (ret);
+	}
+	LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) {
+		LIST_FOREACH(op, &owp->nfsow_open, nfso_list) {
+			if (op->nfso_fhlen == fhlen &&
+			    !NFSBCMP(op->nfso_fh, fhp, fhlen)) {
+				ret = nfscl_checkconflict(&op->nfso_lock, nlop,
+				    own, lopp);
+				if (ret)
+					return (ret);
+			}
+		}
+	}
+	return (0);
+}
+
+static int
+nfscl_checkconflict(struct nfscllockownerhead *lhp, struct nfscllock *nlop,
+    u_int8_t *own, struct nfscllock **lopp)
+{
+	struct nfscllockowner *lp;
+	struct nfscllock *lop;
+
+	LIST_FOREACH(lp, lhp, nfsl_list) {
+		if (NFSBCMP(lp->nfsl_owner, own, NFSV4CL_LOCKNAMELEN)) {
+			LIST_FOREACH(lop, &lp->nfsl_lock, nfslo_list) {
+				if (lop->nfslo_first >= nlop->nfslo_end)
+					break;
+				if (lop->nfslo_end <= nlop->nfslo_first)
+					continue;
+				if (lop->nfslo_type == F_WRLCK ||
+				    nlop->nfslo_type == F_WRLCK ||
+				    nlop->nfslo_type == F_UNLCK) {
+					if (lopp != NULL)
+						*lopp = lop;
+					return (NFSERR_DENIED);
+				}
+			}
+		}
+	}
+	return (0);
+}
+
+/*
+ * Check for a local conflicting lock.
+ */
+APPLESTATIC int
+nfscl_lockt(vnode_t vp, struct nfsclclient *clp, u_int64_t off,
+    u_int64_t len, struct flock *fl, NFSPROC_T *p, void *id, int flags)
+{
+	struct nfscllock *lop, nlck;
+	struct nfscldeleg *dp;
+	struct nfsnode *np;
+	u_int8_t own[NFSV4CL_LOCKNAMELEN];
+	int error;
+
+	nlck.nfslo_type = fl->l_type;
+	nlck.nfslo_first = off;
+	if (len == NFS64BITSSET) {
+		nlck.nfslo_end = NFS64BITSSET;
+	} else {
+		nlck.nfslo_end = off + len;
+		if (nlck.nfslo_end <= nlck.nfslo_first)
+			return (NFSERR_INVAL);
+	}
+	np = VTONFS(vp);
+	nfscl_filllockowner(id, own, flags);
+	NFSLOCKCLSTATE();
+	dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len);
+	error = nfscl_localconflict(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len,
+	    &nlck, own, dp, &lop);
+	if (error != 0) {
+		fl->l_whence = SEEK_SET;
+		fl->l_start = lop->nfslo_first;
+		if (lop->nfslo_end == NFS64BITSSET)
+			fl->l_len = 0;
+		else
+			fl->l_len = lop->nfslo_end - lop->nfslo_first;
+		fl->l_pid = (pid_t)0;
+		fl->l_type = lop->nfslo_type;
+		error = -1;			/* no RPC required */
+	} else if (dp != NULL && ((dp->nfsdl_flags & NFSCLDL_WRITE) ||
+	    fl->l_type == F_RDLCK)) {
+		/*
+		 * The delegation ensures that there isn't a conflicting
+		 * lock on the server, so return -1 to indicate an RPC
+		 * isn't required.
+		 */
+		fl->l_type = F_UNLCK;
+		error = -1;
+	}
+	NFSUNLOCKCLSTATE();
+	return (error);
+}
+
+/*
+ * Handle Recall of a delegation.
+ * The clp must be exclusive locked when this is called.
+ */
+static int
+nfscl_recalldeleg(struct nfsclclient *clp, struct nfsmount *nmp,
+    struct nfscldeleg *dp, vnode_t vp, struct ucred *cred, NFSPROC_T *p,
+    int called_from_renewthread)
+{
+	struct nfsclowner *owp, *lowp, *nowp;
+	struct nfsclopen *op, *lop;
+	struct nfscllockowner *lp;
+	struct nfscllock *lckp;
+	struct nfsnode *np;
+	int error = 0, ret, gotvp = 0;
+
+	if (vp == NULL) {
+		/*
+		 * First, get a vnode for the file. This is needed to do RPCs.
+		 */
+		ret = nfscl_ngetreopen(nmp->nm_mountp, dp->nfsdl_fh,
+		    dp->nfsdl_fhlen, p, &np);
+		if (ret) {
+			/*
+			 * File isn't open, so nothing to move over to the
+			 * server.
+			 */
+			return (0);
+		}
+		vp = NFSTOV(np);
+		gotvp = 1;
+	} else {
+		np = VTONFS(vp);
+	}
+	dp->nfsdl_flags &= ~NFSCLDL_MODTIMESET;
+
+	/*
+	 * Ok, if it's a write delegation, flush data to the server, so
+	 * that close/open consistency is retained.
+	 */
+	ret = 0;
+	NFSLOCKNODE(np);
+	if ((dp->nfsdl_flags & NFSCLDL_WRITE) && (np->n_flag & NMODIFIED)) {
+		np->n_flag |= NDELEGRECALL;
+		NFSUNLOCKNODE(np);
+		ret = ncl_flush(vp, MNT_WAIT, p, 1, called_from_renewthread);
+		NFSLOCKNODE(np);
+		np->n_flag &= ~NDELEGRECALL;
+	}
+	NFSINVALATTRCACHE(np);
+	NFSUNLOCKNODE(np);
+	if (ret == EIO && called_from_renewthread != 0) {
+		/*
+		 * If the flush failed with EIO for the renew thread,
+		 * return now, so that the dirty buffer will be flushed
+		 * later.
+		 */
+		if (gotvp != 0)
+			vrele(vp);
+		return (ret);
+	}
+
+	/*
+	 * Now, for each openowner with opens issued locally, move them
+	 * over to state against the server.
+	 */
+	LIST_FOREACH(lowp, &dp->nfsdl_owner, nfsow_list) {
+		lop = LIST_FIRST(&lowp->nfsow_open);
+		if (lop != NULL) {
+			if (LIST_NEXT(lop, nfso_list) != NULL)
+				panic("nfsdlg mult opens");
+			/*
+			 * Look for the same openowner against the server.
+			 */
+			LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) {
+				if (!NFSBCMP(lowp->nfsow_owner,
+				    owp->nfsow_owner, NFSV4CL_LOCKNAMELEN)) {
+					newnfs_copycred(&dp->nfsdl_cred, cred);
+					ret = nfscl_moveopen(vp, clp, nmp, lop,
+					    owp, dp, cred, p);
+					if (ret == NFSERR_STALECLIENTID ||
+					    ret == NFSERR_STALEDONTRECOVER ||
+					    ret == NFSERR_BADSESSION) {
+						if (gotvp)
+							vrele(vp);
+						return (ret);
+					}
+					if (ret) {
+						nfscl_freeopen(lop, 1);
+						if (!error)
+							error = ret;
+					}
+					break;
+				}
+			}
+
+			/*
+			 * If no openowner found, create one and get an open
+			 * for it.
+			 */
+			if (owp == NULL) {
+				nowp = malloc(
+				    sizeof (struct nfsclowner), M_NFSCLOWNER,
+				    M_WAITOK);
+				nfscl_newopen(clp, NULL, &owp, &nowp, &op, 
+				    NULL, lowp->nfsow_owner, dp->nfsdl_fh,
+				    dp->nfsdl_fhlen, NULL, NULL);
+				newnfs_copycred(&dp->nfsdl_cred, cred);
+				ret = nfscl_moveopen(vp, clp, nmp, lop,
+				    owp, dp, cred, p);
+				if (ret) {
+					nfscl_freeopenowner(owp, 0);
+					if (ret == NFSERR_STALECLIENTID ||
+					    ret == NFSERR_STALEDONTRECOVER ||
+					    ret == NFSERR_BADSESSION) {
+						if (gotvp)
+							vrele(vp);
+						return (ret);
+					}
+					if (ret) {
+						nfscl_freeopen(lop, 1);
+						if (!error)
+							error = ret;
+					}
+				}
+			}
+		}
+	}
+
+	/*
+	 * Now, get byte range locks for any locks done locally.
+	 */
+	LIST_FOREACH(lp, &dp->nfsdl_lock, nfsl_list) {
+		LIST_FOREACH(lckp, &lp->nfsl_lock, nfslo_list) {
+			newnfs_copycred(&dp->nfsdl_cred, cred);
+			ret = nfscl_relock(vp, clp, nmp, lp, lckp, cred, p);
+			if (ret == NFSERR_STALESTATEID ||
+			    ret == NFSERR_STALEDONTRECOVER ||
+			    ret == NFSERR_STALECLIENTID ||
+			    ret == NFSERR_BADSESSION) {
+				if (gotvp)
+					vrele(vp);
+				return (ret);
+			}
+			if (ret && !error)
+				error = ret;
+		}
+	}
+	if (gotvp)
+		vrele(vp);
+	return (error);
+}
+
+/*
+ * Move a locally issued open over to an owner on the state list.
+ * SIDE EFFECT: If it needs to sleep (do an rpc), it unlocks clstate and
+ * returns with it unlocked.
+ */
+static int
+nfscl_moveopen(vnode_t vp, struct nfsclclient *clp, struct nfsmount *nmp,
+    struct nfsclopen *lop, struct nfsclowner *owp, struct nfscldeleg *dp,
+    struct ucred *cred, NFSPROC_T *p)
+{
+	struct nfsclopen *op, *nop;
+	struct nfscldeleg *ndp;
+	struct nfsnode *np;
+	int error = 0, newone;
+
+	/*
+	 * First, look for an appropriate open, If found, just increment the
+	 * opencnt in it.
+	 */
+	LIST_FOREACH(op, &owp->nfsow_open, nfso_list) {
+		if ((op->nfso_mode & lop->nfso_mode) == lop->nfso_mode &&
+		    op->nfso_fhlen == lop->nfso_fhlen &&
+		    !NFSBCMP(op->nfso_fh, lop->nfso_fh, op->nfso_fhlen)) {
+			op->nfso_opencnt += lop->nfso_opencnt;
+			nfscl_freeopen(lop, 1);
+			return (0);
+		}
+	}
+
+	/* No appropriate open, so we have to do one against the server. */
+	np = VTONFS(vp);
+	nop = malloc(sizeof (struct nfsclopen) +
+	    lop->nfso_fhlen - 1, M_NFSCLOPEN, M_WAITOK);
+	newone = 0;
+	nfscl_newopen(clp, NULL, &owp, NULL, &op, &nop, owp->nfsow_owner,
+	    lop->nfso_fh, lop->nfso_fhlen, cred, &newone);
+	ndp = dp;
+	error = nfscl_tryopen(nmp, vp, np->n_v4->n4_data, np->n_v4->n4_fhlen,
+	    lop->nfso_fh, lop->nfso_fhlen, lop->nfso_mode, op,
+	    NFS4NODENAME(np->n_v4), np->n_v4->n4_namelen, &ndp, 0, 0, cred, p);
+	if (error) {
+		if (newone)
+			nfscl_freeopen(op, 0);
+	} else {
+		op->nfso_mode |= lop->nfso_mode;
+		op->nfso_opencnt += lop->nfso_opencnt;
+		nfscl_freeopen(lop, 1);
+	}
+	if (nop != NULL)
+		free(nop, M_NFSCLOPEN);
+	if (ndp != NULL) {
+		/*
+		 * What should I do with the returned delegation, since the
+		 * delegation is being recalled? For now, just printf and
+		 * through it away.
+		 */
+		printf("Moveopen returned deleg\n");
+		free(ndp, M_NFSCLDELEG);
+	}
+	return (error);
+}
+
+/*
+ * Recall all delegations on this client.
+ */
+static void
+nfscl_totalrecall(struct nfsclclient *clp)
+{
+	struct nfscldeleg *dp;
+
+	TAILQ_FOREACH(dp, &clp->nfsc_deleg, nfsdl_list) {
+		if ((dp->nfsdl_flags & NFSCLDL_DELEGRET) == 0)
+			dp->nfsdl_flags |= NFSCLDL_RECALL;
+	}
+}
+
+/*
+ * Relock byte ranges. Called for delegation recall and state expiry.
+ */
+static int
+nfscl_relock(vnode_t vp, struct nfsclclient *clp, struct nfsmount *nmp,
+    struct nfscllockowner *lp, struct nfscllock *lop, struct ucred *cred,
+    NFSPROC_T *p)
+{
+	struct nfscllockowner *nlp;
+	struct nfsfh *nfhp;
+	u_int64_t off, len;
+	int error, newone, donelocally;
+
+	off = lop->nfslo_first;
+	len = lop->nfslo_end - lop->nfslo_first;
+	error = nfscl_getbytelock(vp, off, len, lop->nfslo_type, cred, p,
+	    clp, 1, NULL, lp->nfsl_lockflags, lp->nfsl_owner,
+	    lp->nfsl_openowner, &nlp, &newone, &donelocally);
+	if (error || donelocally)
+		return (error);
+	nfhp = VTONFS(vp)->n_fhp;
+	error = nfscl_trylock(nmp, vp, nfhp->nfh_fh,
+	    nfhp->nfh_len, nlp, newone, 0, off,
+	    len, lop->nfslo_type, cred, p);
+	if (error)
+		nfscl_freelockowner(nlp, 0);
+	return (error);
+}
+
+/*
+ * Called to re-open a file. Basically get a vnode for the file handle
+ * and then call nfsrpc_openrpc() to do the rest.
+ */
+static int
+nfsrpc_reopen(struct nfsmount *nmp, u_int8_t *fhp, int fhlen,
+    u_int32_t mode, struct nfsclopen *op, struct nfscldeleg **dpp,
+    struct ucred *cred, NFSPROC_T *p)
+{
+	struct nfsnode *np;
+	vnode_t vp;
+	int error;
+
+	error = nfscl_ngetreopen(nmp->nm_mountp, fhp, fhlen, p, &np);
+	if (error)
+		return (error);
+	vp = NFSTOV(np);
+	if (np->n_v4 != NULL) {
+		error = nfscl_tryopen(nmp, vp, np->n_v4->n4_data,
+		    np->n_v4->n4_fhlen, fhp, fhlen, mode, op,
+		    NFS4NODENAME(np->n_v4), np->n_v4->n4_namelen, dpp, 0, 0,
+		    cred, p);
+	} else {
+		error = EINVAL;
+	}
+	vrele(vp);
+	return (error);
+}
+
+/*
+ * Try an open against the server. Just call nfsrpc_openrpc(), retrying while
+ * NFSERR_DELAY. Also, try system credentials, if the passed in credentials
+ * fail.
+ */
+static int
+nfscl_tryopen(struct nfsmount *nmp, vnode_t vp, u_int8_t *fhp, int fhlen,
+    u_int8_t *newfhp, int newfhlen, u_int32_t mode, struct nfsclopen *op,
+    u_int8_t *name, int namelen, struct nfscldeleg **ndpp,
+    int reclaim, u_int32_t delegtype, struct ucred *cred, NFSPROC_T *p)
+{
+	int error;
+
+	do {
+		error = nfsrpc_openrpc(nmp, vp, fhp, fhlen, newfhp, newfhlen,
+		    mode, op, name, namelen, ndpp, reclaim, delegtype, cred, p,
+		    0, 0);
+		if (error == NFSERR_DELAY)
+			(void) nfs_catnap(PZERO, error, "nfstryop");
+	} while (error == NFSERR_DELAY);
+	if (error == EAUTH || error == EACCES) {
+		/* Try again using system credentials */
+		newnfs_setroot(cred);
+		do {
+		    error = nfsrpc_openrpc(nmp, vp, fhp, fhlen, newfhp,
+			newfhlen, mode, op, name, namelen, ndpp, reclaim,
+			delegtype, cred, p, 1, 0);
+		    if (error == NFSERR_DELAY)
+			(void) nfs_catnap(PZERO, error, "nfstryop");
+		} while (error == NFSERR_DELAY);
+	}
+	return (error);
+}
+
+/*
+ * Try a byte range lock. Just loop on nfsrpc_lock() while it returns
+ * NFSERR_DELAY. Also, retry with system credentials, if the provided
+ * cred don't work.
+ */
+static int
+nfscl_trylock(struct nfsmount *nmp, vnode_t vp, u_int8_t *fhp,
+    int fhlen, struct nfscllockowner *nlp, int newone, int reclaim,
+    u_int64_t off, u_int64_t len, short type, struct ucred *cred, NFSPROC_T *p)
+{
+	struct nfsrv_descript nfsd, *nd = &nfsd;
+	int error;
+
+	do {
+		error = nfsrpc_lock(nd, nmp, vp, fhp, fhlen, nlp, newone,
+		    reclaim, off, len, type, cred, p, 0);
+		if (!error && nd->nd_repstat == NFSERR_DELAY)
+			(void) nfs_catnap(PZERO, (int)nd->nd_repstat,
+			    "nfstrylck");
+	} while (!error && nd->nd_repstat == NFSERR_DELAY);
+	if (!error)
+		error = nd->nd_repstat;
+	if (error == EAUTH || error == EACCES) {
+		/* Try again using root credentials */
+		newnfs_setroot(cred);
+		do {
+			error = nfsrpc_lock(nd, nmp, vp, fhp, fhlen, nlp,
+			    newone, reclaim, off, len, type, cred, p, 1);
+			if (!error && nd->nd_repstat == NFSERR_DELAY)
+				(void) nfs_catnap(PZERO, (int)nd->nd_repstat,
+				    "nfstrylck");
+		} while (!error && nd->nd_repstat == NFSERR_DELAY);
+		if (!error)
+			error = nd->nd_repstat;
+	}
+	return (error);
+}
+
+/*
+ * Try a delegreturn against the server. Just call nfsrpc_delegreturn(),
+ * retrying while NFSERR_DELAY. Also, try system credentials, if the passed in
+ * credentials fail.
+ */
+static int
+nfscl_trydelegreturn(struct nfscldeleg *dp, struct ucred *cred,
+    struct nfsmount *nmp, NFSPROC_T *p)
+{
+	int error;
+
+	do {
+		error = nfsrpc_delegreturn(dp, cred, nmp, p, 0);
+		if (error == NFSERR_DELAY)
+			(void) nfs_catnap(PZERO, error, "nfstrydp");
+	} while (error == NFSERR_DELAY);
+	if (error == EAUTH || error == EACCES) {
+		/* Try again using system credentials */
+		newnfs_setroot(cred);
+		do {
+			error = nfsrpc_delegreturn(dp, cred, nmp, p, 1);
+			if (error == NFSERR_DELAY)
+				(void) nfs_catnap(PZERO, error, "nfstrydp");
+		} while (error == NFSERR_DELAY);
+	}
+	return (error);
+}
+
+/*
+ * Try a close against the server. Just call nfsrpc_closerpc(),
+ * retrying while NFSERR_DELAY. Also, try system credentials, if the passed in
+ * credentials fail.
+ */
+APPLESTATIC int
+nfscl_tryclose(struct nfsclopen *op, struct ucred *cred,
+    struct nfsmount *nmp, NFSPROC_T *p)
+{
+	struct nfsrv_descript nfsd, *nd = &nfsd;
+	int error;
+
+	do {
+		error = nfsrpc_closerpc(nd, nmp, op, cred, p, 0);
+		if (error == NFSERR_DELAY)
+			(void) nfs_catnap(PZERO, error, "nfstrycl");
+	} while (error == NFSERR_DELAY);
+	if (error == EAUTH || error == EACCES) {
+		/* Try again using system credentials */
+		newnfs_setroot(cred);
+		do {
+			error = nfsrpc_closerpc(nd, nmp, op, cred, p, 1);
+			if (error == NFSERR_DELAY)
+				(void) nfs_catnap(PZERO, error, "nfstrycl");
+		} while (error == NFSERR_DELAY);
+	}
+	return (error);
+}
+
+/*
+ * Decide if a delegation on a file permits close without flushing writes
+ * to the server. This might be a big performance win in some environments.
+ * (Not useful until the client does caching on local stable storage.)
+ */
+APPLESTATIC int
+nfscl_mustflush(vnode_t vp)
+{
+	struct nfsclclient *clp;
+	struct nfscldeleg *dp;
+	struct nfsnode *np;
+	struct nfsmount *nmp;
+
+	np = VTONFS(vp);
+	nmp = VFSTONFS(vnode_mount(vp));
+	if (!NFSHASNFSV4(nmp))
+		return (1);
+	NFSLOCKCLSTATE();
+	clp = nfscl_findcl(nmp);
+	if (clp == NULL) {
+		NFSUNLOCKCLSTATE();
+		return (1);
+	}
+	dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len);
+	if (dp != NULL && (dp->nfsdl_flags &
+	    (NFSCLDL_WRITE | NFSCLDL_RECALL | NFSCLDL_DELEGRET)) ==
+	     NFSCLDL_WRITE &&
+	    (dp->nfsdl_sizelimit >= np->n_size ||
+	     !NFSHASSTRICT3530(nmp))) {
+		NFSUNLOCKCLSTATE();
+		return (0);
+	}
+	NFSUNLOCKCLSTATE();
+	return (1);
+}
+
+/*
+ * See if a (write) delegation exists for this file.
+ */
+APPLESTATIC int
+nfscl_nodeleg(vnode_t vp, int writedeleg)
+{
+	struct nfsclclient *clp;
+	struct nfscldeleg *dp;
+	struct nfsnode *np;
+	struct nfsmount *nmp;
+
+	np = VTONFS(vp);
+	nmp = VFSTONFS(vnode_mount(vp));
+	if (!NFSHASNFSV4(nmp))
+		return (1);
+	NFSLOCKCLSTATE();
+	clp = nfscl_findcl(nmp);
+	if (clp == NULL) {
+		NFSUNLOCKCLSTATE();
+		return (1);
+	}
+	dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len);
+	if (dp != NULL &&
+	    (dp->nfsdl_flags & (NFSCLDL_RECALL | NFSCLDL_DELEGRET)) == 0 &&
+	    (writedeleg == 0 || (dp->nfsdl_flags & NFSCLDL_WRITE) ==
+	     NFSCLDL_WRITE)) {
+		NFSUNLOCKCLSTATE();
+		return (0);
+	}
+	NFSUNLOCKCLSTATE();
+	return (1);
+}
+
+/*
+ * Look for an associated delegation that should be DelegReturned.
+ */
+APPLESTATIC int
+nfscl_removedeleg(vnode_t vp, NFSPROC_T *p, nfsv4stateid_t *stp)
+{
+	struct nfsclclient *clp;
+	struct nfscldeleg *dp;
+	struct nfsclowner *owp;
+	struct nfscllockowner *lp;
+	struct nfsmount *nmp;
+	struct ucred *cred;
+	struct nfsnode *np;
+	int igotlock = 0, triedrecall = 0, needsrecall, retcnt = 0, islept;
+
+	nmp = VFSTONFS(vnode_mount(vp));
+	np = VTONFS(vp);
+	NFSLOCKCLSTATE();
+	/*
+	 * Loop around waiting for:
+	 * - outstanding I/O operations on delegations to complete
+	 * - for a delegation on vp that has state, lock the client and
+	 *   do a recall
+	 * - return delegation with no state
+	 */
+	while (1) {
+		clp = nfscl_findcl(nmp);
+		if (clp == NULL) {
+			NFSUNLOCKCLSTATE();
+			return (retcnt);
+		}
+		dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh,
+		    np->n_fhp->nfh_len);
+		if (dp != NULL) {
+		    /*
+		     * Wait for outstanding I/O ops to be done.
+		     */
+		    if (dp->nfsdl_rwlock.nfslock_usecnt > 0) {
+			if (igotlock) {
+			    nfsv4_unlock(&clp->nfsc_lock, 0);
+			    igotlock = 0;
+			}
+			dp->nfsdl_rwlock.nfslock_lock |= NFSV4LOCK_WANTED;
+			(void) nfsmsleep(&dp->nfsdl_rwlock,
+			    NFSCLSTATEMUTEXPTR, PZERO, "nfscld", NULL);
+			continue;
+		    }
+		    needsrecall = 0;
+		    LIST_FOREACH(owp, &dp->nfsdl_owner, nfsow_list) {
+			if (!LIST_EMPTY(&owp->nfsow_open)) {
+			    needsrecall = 1;
+			    break;
+			}
+		    }
+		    if (!needsrecall) {
+			LIST_FOREACH(lp, &dp->nfsdl_lock, nfsl_list) {
+			    if (!LIST_EMPTY(&lp->nfsl_lock)) {
+				needsrecall = 1;
+				break;
+			    }
+			}
+		    }
+		    if (needsrecall && !triedrecall) {
+			dp->nfsdl_flags |= NFSCLDL_DELEGRET;
+			islept = 0;
+			while (!igotlock) {
+			    igotlock = nfsv4_lock(&clp->nfsc_lock, 1,
+				&islept, NFSCLSTATEMUTEXPTR, NULL);
+			    if (islept)
+				break;
+			}
+			if (islept)
+			    continue;
+			NFSUNLOCKCLSTATE();
+			cred = newnfs_getcred();
+			newnfs_copycred(&dp->nfsdl_cred, cred);
+			(void) nfscl_recalldeleg(clp, nmp, dp, vp, cred, p, 0);
+			NFSFREECRED(cred);
+			triedrecall = 1;
+			NFSLOCKCLSTATE();
+			nfsv4_unlock(&clp->nfsc_lock, 0);
+			igotlock = 0;
+			continue;
+		    }
+		    *stp = dp->nfsdl_stateid;
+		    retcnt = 1;
+		    nfscl_cleandeleg(dp);
+		    nfscl_freedeleg(&clp->nfsc_deleg, dp);
+		}
+		if (igotlock)
+		    nfsv4_unlock(&clp->nfsc_lock, 0);
+		NFSUNLOCKCLSTATE();
+		return (retcnt);
+	}
+}
+
+/*
+ * Look for associated delegation(s) that should be DelegReturned.
+ */
+APPLESTATIC int
+nfscl_renamedeleg(vnode_t fvp, nfsv4stateid_t *fstp, int *gotfdp, vnode_t tvp,
+    nfsv4stateid_t *tstp, int *gottdp, NFSPROC_T *p)
+{
+	struct nfsclclient *clp;
+	struct nfscldeleg *dp;
+	struct nfsclowner *owp;
+	struct nfscllockowner *lp;
+	struct nfsmount *nmp;
+	struct ucred *cred;
+	struct nfsnode *np;
+	int igotlock = 0, triedrecall = 0, needsrecall, retcnt = 0, islept;
+
+	nmp = VFSTONFS(vnode_mount(fvp));
+	*gotfdp = 0;
+	*gottdp = 0;
+	NFSLOCKCLSTATE();
+	/*
+	 * Loop around waiting for:
+	 * - outstanding I/O operations on delegations to complete
+	 * - for a delegation on fvp that has state, lock the client and
+	 *   do a recall
+	 * - return delegation(s) with no state.
+	 */
+	while (1) {
+		clp = nfscl_findcl(nmp);
+		if (clp == NULL) {
+			NFSUNLOCKCLSTATE();
+			return (retcnt);
+		}
+		np = VTONFS(fvp);
+		dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh,
+		    np->n_fhp->nfh_len);
+		if (dp != NULL && *gotfdp == 0) {
+		    /*
+		     * Wait for outstanding I/O ops to be done.
+		     */
+		    if (dp->nfsdl_rwlock.nfslock_usecnt > 0) {
+			if (igotlock) {
+			    nfsv4_unlock(&clp->nfsc_lock, 0);
+			    igotlock = 0;
+			}
+			dp->nfsdl_rwlock.nfslock_lock |= NFSV4LOCK_WANTED;
+			(void) nfsmsleep(&dp->nfsdl_rwlock,
+			    NFSCLSTATEMUTEXPTR, PZERO, "nfscld", NULL);
+			continue;
+		    }
+		    needsrecall = 0;
+		    LIST_FOREACH(owp, &dp->nfsdl_owner, nfsow_list) {
+			if (!LIST_EMPTY(&owp->nfsow_open)) {
+			    needsrecall = 1;
+			    break;
+			}
+		    }
+		    if (!needsrecall) {
+			LIST_FOREACH(lp, &dp->nfsdl_lock, nfsl_list) {
+			    if (!LIST_EMPTY(&lp->nfsl_lock)) {
+				needsrecall = 1;
+				break;
+			    }
+			}
+		    }
+		    if (needsrecall && !triedrecall) {
+			dp->nfsdl_flags |= NFSCLDL_DELEGRET;
+			islept = 0;
+			while (!igotlock) {
+			    igotlock = nfsv4_lock(&clp->nfsc_lock, 1,
+				&islept, NFSCLSTATEMUTEXPTR, NULL);
+			    if (islept)
+				break;
+			}
+			if (islept)
+			    continue;
+			NFSUNLOCKCLSTATE();
+			cred = newnfs_getcred();
+			newnfs_copycred(&dp->nfsdl_cred, cred);
+			(void) nfscl_recalldeleg(clp, nmp, dp, fvp, cred, p, 0);
+			NFSFREECRED(cred);
+			triedrecall = 1;
+			NFSLOCKCLSTATE();
+			nfsv4_unlock(&clp->nfsc_lock, 0);
+			igotlock = 0;
+			continue;
+		    }
+		    *fstp = dp->nfsdl_stateid;
+		    retcnt++;
+		    *gotfdp = 1;
+		    nfscl_cleandeleg(dp);
+		    nfscl_freedeleg(&clp->nfsc_deleg, dp);
+		}
+		if (igotlock) {
+		    nfsv4_unlock(&clp->nfsc_lock, 0);
+		    igotlock = 0;
+		}
+		if (tvp != NULL) {
+		    np = VTONFS(tvp);
+		    dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh,
+			np->n_fhp->nfh_len);
+		    if (dp != NULL && *gottdp == 0) {
+			/*
+			 * Wait for outstanding I/O ops to be done.
+			 */
+			if (dp->nfsdl_rwlock.nfslock_usecnt > 0) {
+			    dp->nfsdl_rwlock.nfslock_lock |= NFSV4LOCK_WANTED;
+			    (void) nfsmsleep(&dp->nfsdl_rwlock,
+				NFSCLSTATEMUTEXPTR, PZERO, "nfscld", NULL);
+			    continue;
+			}
+			LIST_FOREACH(owp, &dp->nfsdl_owner, nfsow_list) {
+			    if (!LIST_EMPTY(&owp->nfsow_open)) {
+				NFSUNLOCKCLSTATE();
+				return (retcnt);
+			    }
+			}
+			LIST_FOREACH(lp, &dp->nfsdl_lock, nfsl_list) {
+			    if (!LIST_EMPTY(&lp->nfsl_lock)) {
+				NFSUNLOCKCLSTATE();
+				return (retcnt);
+			    }
+			}
+			*tstp = dp->nfsdl_stateid;
+			retcnt++;
+			*gottdp = 1;
+			nfscl_cleandeleg(dp);
+			nfscl_freedeleg(&clp->nfsc_deleg, dp);
+		    }
+		}
+		NFSUNLOCKCLSTATE();
+		return (retcnt);
+	}
+}
+
+/*
+ * Get a reference on the clientid associated with the mount point.
+ * Return 1 if success, 0 otherwise.
+ */
+APPLESTATIC int
+nfscl_getref(struct nfsmount *nmp)
+{
+	struct nfsclclient *clp;
+
+	NFSLOCKCLSTATE();
+	clp = nfscl_findcl(nmp);
+	if (clp == NULL) {
+		NFSUNLOCKCLSTATE();
+		return (0);
+	}
+	nfsv4_getref(&clp->nfsc_lock, NULL, NFSCLSTATEMUTEXPTR, NULL);
+	NFSUNLOCKCLSTATE();
+	return (1);
+}
+
+/*
+ * Release a reference on a clientid acquired with the above call.
+ */
+APPLESTATIC void
+nfscl_relref(struct nfsmount *nmp)
+{
+	struct nfsclclient *clp;
+
+	NFSLOCKCLSTATE();
+	clp = nfscl_findcl(nmp);
+	if (clp == NULL) {
+		NFSUNLOCKCLSTATE();
+		return;
+	}
+	nfsv4_relref(&clp->nfsc_lock);
+	NFSUNLOCKCLSTATE();
+}
+
+/*
+ * Save the size attribute in the delegation, since the nfsnode
+ * is going away.
+ */
+APPLESTATIC void
+nfscl_reclaimnode(vnode_t vp)
+{
+	struct nfsclclient *clp;
+	struct nfscldeleg *dp;
+	struct nfsnode *np = VTONFS(vp);
+	struct nfsmount *nmp;
+
+	nmp = VFSTONFS(vnode_mount(vp));
+	if (!NFSHASNFSV4(nmp))
+		return;
+	NFSLOCKCLSTATE();
+	clp = nfscl_findcl(nmp);
+	if (clp == NULL) {
+		NFSUNLOCKCLSTATE();
+		return;
+	}
+	dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len);
+	if (dp != NULL && (dp->nfsdl_flags & NFSCLDL_WRITE))
+		dp->nfsdl_size = np->n_size;
+	NFSUNLOCKCLSTATE();
+}
+
+/*
+ * Get the saved size attribute in the delegation, since it is a
+ * newly allocated nfsnode.
+ */
+APPLESTATIC void
+nfscl_newnode(vnode_t vp)
+{
+	struct nfsclclient *clp;
+	struct nfscldeleg *dp;
+	struct nfsnode *np = VTONFS(vp);
+	struct nfsmount *nmp;
+
+	nmp = VFSTONFS(vnode_mount(vp));
+	if (!NFSHASNFSV4(nmp))
+		return;
+	NFSLOCKCLSTATE();
+	clp = nfscl_findcl(nmp);
+	if (clp == NULL) {
+		NFSUNLOCKCLSTATE();
+		return;
+	}
+	dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len);
+	if (dp != NULL && (dp->nfsdl_flags & NFSCLDL_WRITE))
+		np->n_size = dp->nfsdl_size;
+	NFSUNLOCKCLSTATE();
+}
+
+/*
+ * If there is a valid write delegation for this file, set the modtime
+ * to the local clock time.
+ */
+APPLESTATIC void
+nfscl_delegmodtime(vnode_t vp)
+{
+	struct nfsclclient *clp;
+	struct nfscldeleg *dp;
+	struct nfsnode *np = VTONFS(vp);
+	struct nfsmount *nmp;
+
+	nmp = VFSTONFS(vnode_mount(vp));
+	if (!NFSHASNFSV4(nmp))
+		return;
+	NFSLOCKCLSTATE();
+	clp = nfscl_findcl(nmp);
+	if (clp == NULL) {
+		NFSUNLOCKCLSTATE();
+		return;
+	}
+	dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len);
+	if (dp != NULL && (dp->nfsdl_flags & NFSCLDL_WRITE)) {
+		nanotime(&dp->nfsdl_modtime);
+		dp->nfsdl_flags |= NFSCLDL_MODTIMESET;
+	}
+	NFSUNLOCKCLSTATE();
+}
+
+/*
+ * If there is a valid write delegation for this file with a modtime set,
+ * put that modtime in mtime.
+ */
+APPLESTATIC void
+nfscl_deleggetmodtime(vnode_t vp, struct timespec *mtime)
+{
+	struct nfsclclient *clp;
+	struct nfscldeleg *dp;
+	struct nfsnode *np = VTONFS(vp);
+	struct nfsmount *nmp;
+
+	nmp = VFSTONFS(vnode_mount(vp));
+	if (!NFSHASNFSV4(nmp))
+		return;
+	NFSLOCKCLSTATE();
+	clp = nfscl_findcl(nmp);
+	if (clp == NULL) {
+		NFSUNLOCKCLSTATE();
+		return;
+	}
+	dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len);
+	if (dp != NULL &&
+	    (dp->nfsdl_flags & (NFSCLDL_WRITE | NFSCLDL_MODTIMESET)) ==
+	    (NFSCLDL_WRITE | NFSCLDL_MODTIMESET))
+		*mtime = dp->nfsdl_modtime;
+	NFSUNLOCKCLSTATE();
+}
+
+static int
+nfscl_errmap(struct nfsrv_descript *nd, u_int32_t minorvers)
+{
+	short *defaulterrp, *errp;
+
+	if (!nd->nd_repstat)
+		return (0);
+	if (nd->nd_procnum == NFSPROC_NOOP)
+		return (txdr_unsigned(nd->nd_repstat & 0xffff));
+	if (nd->nd_repstat == EBADRPC)
+		return (txdr_unsigned(NFSERR_BADXDR));
+	if (nd->nd_repstat == NFSERR_MINORVERMISMATCH ||
+	    nd->nd_repstat == NFSERR_OPILLEGAL)
+		return (txdr_unsigned(nd->nd_repstat));
+	if (nd->nd_repstat >= NFSERR_BADIOMODE && nd->nd_repstat < 20000 &&
+	    minorvers > NFSV4_MINORVERSION) {
+		/* NFSv4.n error. */
+		return (txdr_unsigned(nd->nd_repstat));
+	}
+	if (nd->nd_procnum < NFSV4OP_CBNOPS)
+		errp = defaulterrp = nfscl_cberrmap[nd->nd_procnum];
+	else
+		return (txdr_unsigned(nd->nd_repstat));
+	while (*++errp)
+		if (*errp == (short)nd->nd_repstat)
+			return (txdr_unsigned(nd->nd_repstat));
+	return (txdr_unsigned(*defaulterrp));
+}
+
+/*
+ * Called to find/add a layout to a client.
+ * This function returns the layout with a refcnt (shared lock) upon
+ * success (returns 0) or with no lock/refcnt on the layout when an
+ * error is returned.
+ * If a layout is passed in via lypp, it is locked (exclusively locked).
+ */
+APPLESTATIC int
+nfscl_layout(struct nfsmount *nmp, vnode_t vp, u_int8_t *fhp, int fhlen,
+    nfsv4stateid_t *stateidp, int layouttype, int retonclose,
+    struct nfsclflayouthead *fhlp, struct nfscllayout **lypp,
+    struct ucred *cred, NFSPROC_T *p)
+{
+	struct nfsclclient *clp;
+	struct nfscllayout *lyp, *tlyp;
+	struct nfsclflayout *flp;
+	struct nfsnode *np = VTONFS(vp);
+	mount_t mp;
+	int layout_passed_in;
+
+	mp = nmp->nm_mountp;
+	layout_passed_in = 1;
+	tlyp = NULL;
+	lyp = *lypp;
+	if (lyp == NULL) {
+		layout_passed_in = 0;
+		tlyp = malloc(sizeof(*tlyp) + fhlen - 1, M_NFSLAYOUT,
+		    M_WAITOK | M_ZERO);
+	}
+
+	NFSLOCKCLSTATE();
+	clp = nmp->nm_clp;
+	if (clp == NULL) {
+		if (layout_passed_in != 0)
+			nfsv4_unlock(&lyp->nfsly_lock, 0);
+		NFSUNLOCKCLSTATE();
+		if (tlyp != NULL)
+			free(tlyp, M_NFSLAYOUT);
+		return (EPERM);
+	}
+	if (lyp == NULL) {
+		/*
+		 * Although no lyp was passed in, another thread might have
+		 * allocated one. If one is found, just increment it's ref
+		 * count and return it.
+		 */
+		lyp = nfscl_findlayout(clp, fhp, fhlen);
+		if (lyp == NULL) {
+			lyp = tlyp;
+			tlyp = NULL;
+			lyp->nfsly_stateid.seqid = stateidp->seqid;
+			lyp->nfsly_stateid.other[0] = stateidp->other[0];
+			lyp->nfsly_stateid.other[1] = stateidp->other[1];
+			lyp->nfsly_stateid.other[2] = stateidp->other[2];
+			lyp->nfsly_lastbyte = 0;
+			LIST_INIT(&lyp->nfsly_flayread);
+			LIST_INIT(&lyp->nfsly_flayrw);
+			LIST_INIT(&lyp->nfsly_recall);
+			lyp->nfsly_filesid[0] = np->n_vattr.na_filesid[0];
+			lyp->nfsly_filesid[1] = np->n_vattr.na_filesid[1];
+			lyp->nfsly_clp = clp;
+			if (layouttype == NFSLAYOUT_FLEXFILE)
+				lyp->nfsly_flags = NFSLY_FLEXFILE;
+			else
+				lyp->nfsly_flags = NFSLY_FILES;
+			if (retonclose != 0)
+				lyp->nfsly_flags |= NFSLY_RETONCLOSE;
+			lyp->nfsly_fhlen = fhlen;
+			NFSBCOPY(fhp, lyp->nfsly_fh, fhlen);
+			TAILQ_INSERT_HEAD(&clp->nfsc_layout, lyp, nfsly_list);
+			LIST_INSERT_HEAD(NFSCLLAYOUTHASH(clp, fhp, fhlen), lyp,
+			    nfsly_hash);
+			lyp->nfsly_timestamp = NFSD_MONOSEC + 120;
+			nfscl_layoutcnt++;
+		} else {
+			if (retonclose != 0)
+				lyp->nfsly_flags |= NFSLY_RETONCLOSE;
+			TAILQ_REMOVE(&clp->nfsc_layout, lyp, nfsly_list);
+			TAILQ_INSERT_HEAD(&clp->nfsc_layout, lyp, nfsly_list);
+			lyp->nfsly_timestamp = NFSD_MONOSEC + 120;
+		}
+		nfsv4_getref(&lyp->nfsly_lock, NULL, NFSCLSTATEMUTEXPTR, mp);
+		if (NFSCL_FORCEDISM(mp)) {
+			NFSUNLOCKCLSTATE();
+			if (tlyp != NULL)
+				free(tlyp, M_NFSLAYOUT);
+			return (EPERM);
+		}
+		*lypp = lyp;
+	} else
+		lyp->nfsly_stateid.seqid = stateidp->seqid;
+
+	/* Merge the new list of File Layouts into the list. */
+	flp = LIST_FIRST(fhlp);
+	if (flp != NULL) {
+		if (flp->nfsfl_iomode == NFSLAYOUTIOMODE_READ)
+			nfscl_mergeflayouts(&lyp->nfsly_flayread, fhlp);
+		else
+			nfscl_mergeflayouts(&lyp->nfsly_flayrw, fhlp);
+	}
+	if (layout_passed_in != 0)
+		nfsv4_unlock(&lyp->nfsly_lock, 1);
+	NFSUNLOCKCLSTATE();
+	if (tlyp != NULL)
+		free(tlyp, M_NFSLAYOUT);
+	return (0);
+}
+
+/*
+ * Search for a layout by MDS file handle.
+ * If one is found, it is returned with a refcnt (shared lock) iff
+ * retflpp returned non-NULL and locked (exclusive locked) iff retflpp is
+ * returned NULL.
+ */
+struct nfscllayout *
+nfscl_getlayout(struct nfsclclient *clp, uint8_t *fhp, int fhlen,
+    uint64_t off, struct nfsclflayout **retflpp, int *recalledp)
+{
+	struct nfscllayout *lyp;
+	mount_t mp;
+	int error, igotlock;
+
+	mp = clp->nfsc_nmp->nm_mountp;
+	*recalledp = 0;
+	*retflpp = NULL;
+	NFSLOCKCLSTATE();
+	lyp = nfscl_findlayout(clp, fhp, fhlen);
+	if (lyp != NULL) {
+		if ((lyp->nfsly_flags & NFSLY_RECALL) == 0) {
+			TAILQ_REMOVE(&clp->nfsc_layout, lyp, nfsly_list);
+			TAILQ_INSERT_HEAD(&clp->nfsc_layout, lyp, nfsly_list);
+			lyp->nfsly_timestamp = NFSD_MONOSEC + 120;
+			error = nfscl_findlayoutforio(lyp, off,
+			    NFSV4OPEN_ACCESSREAD, retflpp);
+			if (error == 0)
+				nfsv4_getref(&lyp->nfsly_lock, NULL,
+				    NFSCLSTATEMUTEXPTR, mp);
+			else {
+				do {
+					igotlock = nfsv4_lock(&lyp->nfsly_lock,
+					    1, NULL, NFSCLSTATEMUTEXPTR, mp);
+				} while (igotlock == 0 && !NFSCL_FORCEDISM(mp));
+				*retflpp = NULL;
+			}
+			if (NFSCL_FORCEDISM(mp)) {
+				lyp = NULL;
+				*recalledp = 1;
+			}
+		} else {
+			lyp = NULL;
+			*recalledp = 1;
+		}
+	}
+	NFSUNLOCKCLSTATE();
+	return (lyp);
+}
+
+/*
+ * Search for a layout by MDS file handle. If one is found, mark in to be
+ * recalled, if it already marked "return on close".
+ */
+static void
+nfscl_retoncloselayout(vnode_t vp, struct nfsclclient *clp, uint8_t *fhp,
+    int fhlen, struct nfsclrecalllayout **recallpp)
+{
+	struct nfscllayout *lyp;
+	uint32_t iomode;
+
+	if (vp->v_type != VREG || !NFSHASPNFS(VFSTONFS(vnode_mount(vp))) ||
+	    nfscl_enablecallb == 0 || nfs_numnfscbd == 0 ||
+	    (VTONFS(vp)->n_flag & NNOLAYOUT) != 0)
+		return;
+	lyp = nfscl_findlayout(clp, fhp, fhlen);
+	if (lyp != NULL && (lyp->nfsly_flags & (NFSLY_RETONCLOSE |
+	    NFSLY_RECALL)) == NFSLY_RETONCLOSE) {
+		iomode = 0;
+		if (!LIST_EMPTY(&lyp->nfsly_flayread))
+			iomode |= NFSLAYOUTIOMODE_READ;
+		if (!LIST_EMPTY(&lyp->nfsly_flayrw))
+			iomode |= NFSLAYOUTIOMODE_RW;
+		(void)nfscl_layoutrecall(NFSLAYOUTRETURN_FILE, lyp, iomode,
+		    0, UINT64_MAX, lyp->nfsly_stateid.seqid, 0, 0, NULL,
+		    *recallpp);
+		NFSCL_DEBUG(4, "retoncls recall iomode=%d\n", iomode);
+		*recallpp = NULL;
+	}
+}
+
+/*
+ * Mark the layout to be recalled and with an error.
+ * Also, disable the dsp from further use.
+ */
+void
+nfscl_dserr(uint32_t op, uint32_t stat, struct nfscldevinfo *dp,
+    struct nfscllayout *lyp, struct nfsclds *dsp)
+{
+	struct nfsclrecalllayout *recallp;
+	uint32_t iomode;
+
+	printf("DS being disabled, error=%d\n", stat);
+	/* Set up the return of the layout. */
+	recallp = malloc(sizeof(*recallp), M_NFSLAYRECALL, M_WAITOK);
+	iomode = 0;
+	NFSLOCKCLSTATE();
+	if ((lyp->nfsly_flags & NFSLY_RECALL) == 0) {
+		if (!LIST_EMPTY(&lyp->nfsly_flayread))
+			iomode |= NFSLAYOUTIOMODE_READ;
+		if (!LIST_EMPTY(&lyp->nfsly_flayrw))
+			iomode |= NFSLAYOUTIOMODE_RW;
+		(void)nfscl_layoutrecall(NFSLAYOUTRETURN_FILE, lyp, iomode,
+		    0, UINT64_MAX, lyp->nfsly_stateid.seqid, stat, op,
+		    dp->nfsdi_deviceid, recallp);
+		NFSUNLOCKCLSTATE();
+		NFSCL_DEBUG(4, "nfscl_dserr recall iomode=%d\n", iomode);
+	} else {
+		NFSUNLOCKCLSTATE();
+		free(recallp, M_NFSLAYRECALL);
+	}
+
+	/* And shut the TCP connection down. */
+	nfscl_cancelreqs(dsp);
+}
+
+/*
+ * Cancel all RPCs for this "dsp" by closing the connection.
+ * Also, mark the session as defunct.
+ * If NFSCLDS_SAMECONN is set, the connection is shared with other DSs and
+ * cannot be shut down.
+ */
+APPLESTATIC void
+nfscl_cancelreqs(struct nfsclds *dsp)
+{
+	struct __rpc_client *cl;
+	static int non_event;
+
+	NFSLOCKDS(dsp);
+	if ((dsp->nfsclds_flags & (NFSCLDS_CLOSED | NFSCLDS_SAMECONN)) == 0 &&
+	    dsp->nfsclds_sockp != NULL &&
+	    dsp->nfsclds_sockp->nr_client != NULL) {
+		dsp->nfsclds_flags |= NFSCLDS_CLOSED;
+		cl = dsp->nfsclds_sockp->nr_client;
+		dsp->nfsclds_sess.nfsess_defunct = 1;
+		NFSUNLOCKDS(dsp);
+		CLNT_CLOSE(cl);
+		/*
+		 * This 1sec sleep is done to reduce the number of reconnect
+		 * attempts made on the DS while it has failed.
+		 */
+		tsleep(&non_event, PVFS, "ndscls", hz);
+		return;
+	}
+	NFSUNLOCKDS(dsp);
+}
+
+/*
+ * Dereference a layout.
+ */
+void
+nfscl_rellayout(struct nfscllayout *lyp, int exclocked)
+{
+
+	NFSLOCKCLSTATE();
+	if (exclocked != 0)
+		nfsv4_unlock(&lyp->nfsly_lock, 0);
+	else
+		nfsv4_relref(&lyp->nfsly_lock);
+	NFSUNLOCKCLSTATE();
+}
+
+/*
+ * Search for a devinfo by deviceid. If one is found, return it after
+ * acquiring a reference count on it.
+ */
+struct nfscldevinfo *
+nfscl_getdevinfo(struct nfsclclient *clp, uint8_t *deviceid,
+    struct nfscldevinfo *dip)
+{
+
+	NFSLOCKCLSTATE();
+	if (dip == NULL)
+		dip = nfscl_finddevinfo(clp, deviceid);
+	if (dip != NULL)
+		dip->nfsdi_refcnt++;
+	NFSUNLOCKCLSTATE();
+	return (dip);
+}
+
+/*
+ * Dereference a devinfo structure.
+ */
+static void
+nfscl_reldevinfo_locked(struct nfscldevinfo *dip)
+{
+
+	dip->nfsdi_refcnt--;
+	if (dip->nfsdi_refcnt == 0)
+		wakeup(&dip->nfsdi_refcnt);
+}
+
+/*
+ * Dereference a devinfo structure.
+ */
+void
+nfscl_reldevinfo(struct nfscldevinfo *dip)
+{
+
+	NFSLOCKCLSTATE();
+	nfscl_reldevinfo_locked(dip);
+	NFSUNLOCKCLSTATE();
+}
+
+/*
+ * Find a layout for this file handle. Return NULL upon failure.
+ */
+static struct nfscllayout *
+nfscl_findlayout(struct nfsclclient *clp, u_int8_t *fhp, int fhlen)
+{
+	struct nfscllayout *lyp;
+
+	LIST_FOREACH(lyp, NFSCLLAYOUTHASH(clp, fhp, fhlen), nfsly_hash)
+		if (lyp->nfsly_fhlen == fhlen &&
+		    !NFSBCMP(lyp->nfsly_fh, fhp, fhlen))
+			break;
+	return (lyp);
+}
+
+/*
+ * Find a devinfo for this deviceid. Return NULL upon failure.
+ */
+static struct nfscldevinfo *
+nfscl_finddevinfo(struct nfsclclient *clp, uint8_t *deviceid)
+{
+	struct nfscldevinfo *dip;
+
+	LIST_FOREACH(dip, &clp->nfsc_devinfo, nfsdi_list)
+		if (NFSBCMP(dip->nfsdi_deviceid, deviceid, NFSX_V4DEVICEID)
+		    == 0)
+			break;
+	return (dip);
+}
+
+/*
+ * Merge the new file layout list into the main one, maintaining it in
+ * increasing offset order.
+ */
+static void
+nfscl_mergeflayouts(struct nfsclflayouthead *fhlp,
+    struct nfsclflayouthead *newfhlp)
+{
+	struct nfsclflayout *flp, *nflp, *prevflp, *tflp;
+
+	flp = LIST_FIRST(fhlp);
+	prevflp = NULL;
+	LIST_FOREACH_SAFE(nflp, newfhlp, nfsfl_list, tflp) {
+		while (flp != NULL && flp->nfsfl_off < nflp->nfsfl_off) {
+			prevflp = flp;
+			flp = LIST_NEXT(flp, nfsfl_list);
+		}
+		if (prevflp == NULL)
+			LIST_INSERT_HEAD(fhlp, nflp, nfsfl_list);
+		else
+			LIST_INSERT_AFTER(prevflp, nflp, nfsfl_list);
+		prevflp = nflp;
+	}
+}
+
+/*
+ * Add this nfscldevinfo to the client, if it doesn't already exist.
+ * This function consumes the structure pointed at by dip, if not NULL.
+ */
+APPLESTATIC int
+nfscl_adddevinfo(struct nfsmount *nmp, struct nfscldevinfo *dip, int ind,
+    struct nfsclflayout *flp)
+{
+	struct nfsclclient *clp;
+	struct nfscldevinfo *tdip;
+	uint8_t *dev;
+
+	NFSLOCKCLSTATE();
+	clp = nmp->nm_clp;
+	if (clp == NULL) {
+		NFSUNLOCKCLSTATE();
+		if (dip != NULL)
+			free(dip, M_NFSDEVINFO);
+		return (ENODEV);
+	}
+	if ((flp->nfsfl_flags & NFSFL_FILE) != 0)
+		dev = flp->nfsfl_dev;
+	else
+		dev = flp->nfsfl_ffm[ind].dev;
+	tdip = nfscl_finddevinfo(clp, dev);
+	if (tdip != NULL) {
+		tdip->nfsdi_layoutrefs++;
+		if ((flp->nfsfl_flags & NFSFL_FILE) != 0)
+			flp->nfsfl_devp = tdip;
+		else
+			flp->nfsfl_ffm[ind].devp = tdip;
+		nfscl_reldevinfo_locked(tdip);
+		NFSUNLOCKCLSTATE();
+		if (dip != NULL)
+			free(dip, M_NFSDEVINFO);
+		return (0);
+	}
+	if (dip != NULL) {
+		LIST_INSERT_HEAD(&clp->nfsc_devinfo, dip, nfsdi_list);
+		dip->nfsdi_layoutrefs = 1;
+		if ((flp->nfsfl_flags & NFSFL_FILE) != 0)
+			flp->nfsfl_devp = dip;
+		else
+			flp->nfsfl_ffm[ind].devp = dip;
+	}
+	NFSUNLOCKCLSTATE();
+	if (dip == NULL)
+		return (ENODEV);
+	return (0);
+}
+
+/*
+ * Free up a layout structure and associated file layout structure(s).
+ */
+APPLESTATIC void
+nfscl_freelayout(struct nfscllayout *layp)
+{
+	struct nfsclflayout *flp, *nflp;
+	struct nfsclrecalllayout *rp, *nrp;
+
+	LIST_FOREACH_SAFE(flp, &layp->nfsly_flayread, nfsfl_list, nflp) {
+		LIST_REMOVE(flp, nfsfl_list);
+		nfscl_freeflayout(flp);
+	}
+	LIST_FOREACH_SAFE(flp, &layp->nfsly_flayrw, nfsfl_list, nflp) {
+		LIST_REMOVE(flp, nfsfl_list);
+		nfscl_freeflayout(flp);
+	}
+	LIST_FOREACH_SAFE(rp, &layp->nfsly_recall, nfsrecly_list, nrp) {
+		LIST_REMOVE(rp, nfsrecly_list);
+		free(rp, M_NFSLAYRECALL);
+	}
+	nfscl_layoutcnt--;
+	free(layp, M_NFSLAYOUT);
+}
+
+/*
+ * Free up a file layout structure.
+ */
+APPLESTATIC void
+nfscl_freeflayout(struct nfsclflayout *flp)
+{
+	int i, j;
+
+	if ((flp->nfsfl_flags & NFSFL_FILE) != 0) {
+		for (i = 0; i < flp->nfsfl_fhcnt; i++)
+			free(flp->nfsfl_fh[i], M_NFSFH);
+		if (flp->nfsfl_devp != NULL)
+			flp->nfsfl_devp->nfsdi_layoutrefs--;
+	}
+	if ((flp->nfsfl_flags & NFSFL_FLEXFILE) != 0)
+		for (i = 0; i < flp->nfsfl_mirrorcnt; i++) {
+			for (j = 0; j < flp->nfsfl_ffm[i].fhcnt; j++)
+				free(flp->nfsfl_ffm[i].fh[j], M_NFSFH);
+			if (flp->nfsfl_ffm[i].devp != NULL)	
+				flp->nfsfl_ffm[i].devp->nfsdi_layoutrefs--;	
+		}
+	free(flp, M_NFSFLAYOUT);
+}
+
+/*
+ * Free up a file layout devinfo structure.
+ */
+APPLESTATIC void
+nfscl_freedevinfo(struct nfscldevinfo *dip)
+{
+
+	free(dip, M_NFSDEVINFO);
+}
+
+/*
+ * Mark any layouts that match as recalled.
+ */
+static int
+nfscl_layoutrecall(int recalltype, struct nfscllayout *lyp, uint32_t iomode,
+    uint64_t off, uint64_t len, uint32_t stateseqid, uint32_t stat, uint32_t op,
+    char *devid, struct nfsclrecalllayout *recallp)
+{
+	struct nfsclrecalllayout *rp, *orp;
+
+	recallp->nfsrecly_recalltype = recalltype;
+	recallp->nfsrecly_iomode = iomode;
+	recallp->nfsrecly_stateseqid = stateseqid;
+	recallp->nfsrecly_off = off;
+	recallp->nfsrecly_len = len;
+	recallp->nfsrecly_stat = stat;
+	recallp->nfsrecly_op = op;
+	if (devid != NULL)
+		NFSBCOPY(devid, recallp->nfsrecly_devid, NFSX_V4DEVICEID);
+	/*
+	 * Order the list as file returns first, followed by fsid and any
+	 * returns, both in increasing stateseqid order.
+	 * Note that the seqids wrap around, so 1 is after 0xffffffff.
+	 * (I'm not sure this is correct because I find RFC5661 confusing
+	 *  on this, but hopefully it will work ok.)
+	 */
+	orp = NULL;
+	LIST_FOREACH(rp, &lyp->nfsly_recall, nfsrecly_list) {
+		orp = rp;
+		if ((recalltype == NFSLAYOUTRETURN_FILE &&
+		     (rp->nfsrecly_recalltype != NFSLAYOUTRETURN_FILE ||
+		      nfscl_seq(stateseqid, rp->nfsrecly_stateseqid) != 0)) ||
+		    (recalltype != NFSLAYOUTRETURN_FILE &&
+		     rp->nfsrecly_recalltype != NFSLAYOUTRETURN_FILE &&
+		     nfscl_seq(stateseqid, rp->nfsrecly_stateseqid) != 0)) {
+			LIST_INSERT_BEFORE(rp, recallp, nfsrecly_list);
+			break;
+		}
+
+		/*
+		 * Put any error return on all the file returns that will
+		 * preceed this one.
+		 */
+		if (rp->nfsrecly_recalltype == NFSLAYOUTRETURN_FILE &&
+		   stat != 0 && rp->nfsrecly_stat == 0) {
+			rp->nfsrecly_stat = stat;
+			rp->nfsrecly_op = op;
+			if (devid != NULL)
+				NFSBCOPY(devid, rp->nfsrecly_devid,
+				    NFSX_V4DEVICEID);
+		}
+	}
+	if (rp == NULL) {
+		if (orp == NULL)
+			LIST_INSERT_HEAD(&lyp->nfsly_recall, recallp,
+			    nfsrecly_list);
+		else
+			LIST_INSERT_AFTER(orp, recallp, nfsrecly_list);
+	}
+	lyp->nfsly_flags |= NFSLY_RECALL;
+	wakeup(lyp->nfsly_clp);
+	return (0);
+}
+
+/*
+ * Compare the two seqids for ordering. The trick is that the seqids can
+ * wrap around from 0xffffffff->0, so check for the cases where one
+ * has wrapped around.
+ * Return 1 if seqid1 comes before seqid2, 0 otherwise.
+ */
+static int
+nfscl_seq(uint32_t seqid1, uint32_t seqid2)
+{
+
+	if (seqid2 > seqid1 && (seqid2 - seqid1) >= 0x7fffffff)
+		/* seqid2 has wrapped around. */
+		return (0);
+	if (seqid1 > seqid2 && (seqid1 - seqid2) >= 0x7fffffff)
+		/* seqid1 has wrapped around. */
+		return (1);
+	if (seqid1 <= seqid2)
+		return (1);
+	return (0);
+}
+
+/*
+ * Do a layout return for each of the recalls.
+ */
+static void
+nfscl_layoutreturn(struct nfsmount *nmp, struct nfscllayout *lyp,
+    struct ucred *cred, NFSPROC_T *p)
+{
+	struct nfsclrecalllayout *rp;
+	nfsv4stateid_t stateid;
+	int layouttype;
+
+	NFSBCOPY(lyp->nfsly_stateid.other, stateid.other, NFSX_STATEIDOTHER);
+	stateid.seqid = lyp->nfsly_stateid.seqid;
+	if ((lyp->nfsly_flags & NFSLY_FILES) != 0)
+		layouttype = NFSLAYOUT_NFSV4_1_FILES;
+	else
+		layouttype = NFSLAYOUT_FLEXFILE;
+	LIST_FOREACH(rp, &lyp->nfsly_recall, nfsrecly_list) {
+		(void)nfsrpc_layoutreturn(nmp, lyp->nfsly_fh,
+		    lyp->nfsly_fhlen, 0, layouttype,
+		    rp->nfsrecly_iomode, rp->nfsrecly_recalltype,
+		    rp->nfsrecly_off, rp->nfsrecly_len,
+		    &stateid, cred, p, rp->nfsrecly_stat, rp->nfsrecly_op,
+		    rp->nfsrecly_devid);
+	}
+}
+
+/*
+ * Do the layout commit for a file layout.
+ */
+static void
+nfscl_dolayoutcommit(struct nfsmount *nmp, struct nfscllayout *lyp,
+    struct ucred *cred, NFSPROC_T *p)
+{
+	struct nfsclflayout *flp;
+	uint64_t len;
+	int error, layouttype;
+
+	if ((lyp->nfsly_flags & NFSLY_FILES) != 0)
+		layouttype = NFSLAYOUT_NFSV4_1_FILES;
+	else
+		layouttype = NFSLAYOUT_FLEXFILE;
+	LIST_FOREACH(flp, &lyp->nfsly_flayrw, nfsfl_list) {
+		if (layouttype == NFSLAYOUT_FLEXFILE &&
+		    (flp->nfsfl_fflags & NFSFLEXFLAG_NO_LAYOUTCOMMIT) != 0) {
+			NFSCL_DEBUG(4, "Flex file: no layoutcommit\n");
+			/* If not supported, don't bother doing it. */
+			NFSLOCKMNT(nmp);
+			nmp->nm_state |= NFSSTA_NOLAYOUTCOMMIT;
+			NFSUNLOCKMNT(nmp);
+			break;
+		} else if (flp->nfsfl_off <= lyp->nfsly_lastbyte) {
+			len = flp->nfsfl_end - flp->nfsfl_off;
+			error = nfsrpc_layoutcommit(nmp, lyp->nfsly_fh,
+			    lyp->nfsly_fhlen, 0, flp->nfsfl_off, len,
+			    lyp->nfsly_lastbyte, &lyp->nfsly_stateid,
+			    layouttype, cred, p, NULL);
+			NFSCL_DEBUG(4, "layoutcommit err=%d\n", error);
+			if (error == NFSERR_NOTSUPP) {
+				/* If not supported, don't bother doing it. */
+				NFSLOCKMNT(nmp);
+				nmp->nm_state |= NFSSTA_NOLAYOUTCOMMIT;
+				NFSUNLOCKMNT(nmp);
+				break;
+			}
+		}
+	}
+}
+
+/*
+ * Commit all layouts for a file (vnode).
+ */
+int
+nfscl_layoutcommit(vnode_t vp, NFSPROC_T *p)
+{
+	struct nfsclclient *clp;
+	struct nfscllayout *lyp;
+	struct nfsnode *np = VTONFS(vp);
+	mount_t mp;
+	struct nfsmount *nmp;
+
+	mp = vnode_mount(vp);
+	nmp = VFSTONFS(mp);
+	if (NFSHASNOLAYOUTCOMMIT(nmp))
+		return (0);
+	NFSLOCKCLSTATE();
+	clp = nmp->nm_clp;
+	if (clp == NULL) {
+		NFSUNLOCKCLSTATE();
+		return (EPERM);
+	}
+	lyp = nfscl_findlayout(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len);
+	if (lyp == NULL) {
+		NFSUNLOCKCLSTATE();
+		return (EPERM);
+	}
+	nfsv4_getref(&lyp->nfsly_lock, NULL, NFSCLSTATEMUTEXPTR, mp);
+	if (NFSCL_FORCEDISM(mp)) {
+		NFSUNLOCKCLSTATE();
+		return (EPERM);
+	}
+tryagain:
+	if ((lyp->nfsly_flags & NFSLY_WRITTEN) != 0) {
+		lyp->nfsly_flags &= ~NFSLY_WRITTEN;
+		NFSUNLOCKCLSTATE();
+		NFSCL_DEBUG(4, "do layoutcommit2\n");
+		nfscl_dolayoutcommit(clp->nfsc_nmp, lyp, NFSPROCCRED(p), p);
+		NFSLOCKCLSTATE();
+		goto tryagain;
+	}
+	nfsv4_relref(&lyp->nfsly_lock);
+	NFSUNLOCKCLSTATE();
+	return (0);
+}
+
diff --git a/freebsd/sys/fs/nfsclient/nfs_clsubs.c b/freebsd/sys/fs/nfsclient/nfs_clsubs.c
new file mode 100644
index 0000000..1c69d93
--- /dev/null
+++ b/freebsd/sys/fs/nfsclient/nfs_clsubs.c
@@ -0,0 +1,391 @@
+/*-
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 1989, 1993
+ *	The Regents of the University of California.  All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Rick Macklem at The University of Guelph.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ *	from nfs_subs.c  8.8 (Berkeley) 5/22/95
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+/*
+ * These functions support the macros and help fiddle mbuf chains for
+ * the nfs op functions. They do things like create the rpc header and
+ * copy data between mbuf chains and uio lists.
+ */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/kernel.h>
+#include <sys/bio.h>
+#include <sys/buf.h>
+#include <sys/proc.h>
+#include <sys/mount.h>
+#include <sys/vnode.h>
+#include <sys/namei.h>
+#include <sys/mbuf.h>
+#include <sys/socket.h>
+#include <sys/stat.h>
+#include <sys/malloc.h>
+#include <sys/sysent.h>
+#include <sys/syscall.h>
+#include <sys/sysproto.h>
+#include <sys/taskqueue.h>
+
+#include <vm/vm.h>
+#include <vm/vm_object.h>
+#include <vm/vm_extern.h>
+#include <vm/uma.h>
+
+#include <fs/nfs/nfsport.h>
+#include <fs/nfsclient/nfsnode.h>
+#include <fs/nfsclient/nfsmount.h>
+#include <fs/nfsclient/nfs.h>
+#include <fs/nfsclient/nfs_kdtrace.h>
+
+#include <netinet/in.h>
+
+/*
+ * Note that stdarg.h and the ANSI style va_start macro is used for both
+ * ANSI and traditional C compilers.
+ */
+#include <machine/stdarg.h>
+
+extern struct mtx ncl_iod_mutex;
+extern enum nfsiod_state ncl_iodwant[NFS_MAXASYNCDAEMON];
+extern struct nfsmount *ncl_iodmount[NFS_MAXASYNCDAEMON];
+extern int ncl_numasync;
+extern unsigned int ncl_iodmax;
+extern struct nfsstatsv1 nfsstatsv1;
+
+struct task	ncl_nfsiodnew_task;
+
+int
+ncl_uninit(struct vfsconf *vfsp)
+{
+	/*
+	 * XXX: Unloading of nfscl module is unsupported.
+	 */
+#if 0
+	int i;
+
+	/*
+	 * Tell all nfsiod processes to exit. Clear ncl_iodmax, and wakeup
+	 * any sleeping nfsiods so they check ncl_iodmax and exit.
+	 */
+	NFSLOCKIOD();
+	ncl_iodmax = 0;
+	for (i = 0; i < ncl_numasync; i++)
+		if (ncl_iodwant[i] == NFSIOD_AVAILABLE)
+			wakeup(&ncl_iodwant[i]);
+	/* The last nfsiod to exit will wake us up when ncl_numasync hits 0 */
+	while (ncl_numasync)
+		msleep(&ncl_numasync, &ncl_iod_mutex, PWAIT, "ioddie", 0);
+	NFSUNLOCKIOD();
+	ncl_nhuninit();
+	return (0);
+#else
+	return (EOPNOTSUPP);
+#endif
+}
+
+void 
+ncl_dircookie_lock(struct nfsnode *np)
+{
+	NFSLOCKNODE(np);
+	while (np->n_flag & NDIRCOOKIELK)
+		(void) msleep(&np->n_flag, &np->n_mtx, PZERO, "nfsdirlk", 0);
+	np->n_flag |= NDIRCOOKIELK;
+	NFSUNLOCKNODE(np);
+}
+
+void 
+ncl_dircookie_unlock(struct nfsnode *np)
+{
+	NFSLOCKNODE(np);
+	np->n_flag &= ~NDIRCOOKIELK;
+	wakeup(&np->n_flag);
+	NFSUNLOCKNODE(np);
+}
+
+bool
+ncl_excl_start(struct vnode *vp)
+{
+	struct nfsnode *np;
+	int vn_lk;
+
+	ASSERT_VOP_LOCKED(vp, "ncl_excl_start");
+	vn_lk = NFSVOPISLOCKED(vp);
+	if (vn_lk == LK_EXCLUSIVE)
+		return (false);
+	KASSERT(vn_lk == LK_SHARED,
+	    ("ncl_excl_start: wrong vnode lock %d", vn_lk));
+	/* Ensure exclusive access, this might block */
+	np = VTONFS(vp);
+	lockmgr(&np->n_excl, LK_EXCLUSIVE, NULL);
+	return (true);
+}
+
+void
+ncl_excl_finish(struct vnode *vp, bool old_lock)
+{
+	struct nfsnode *np;
+
+	if (!old_lock)
+		return;
+	np = VTONFS(vp);
+	lockmgr(&np->n_excl, LK_RELEASE, NULL);
+}
+
+#ifdef NFS_ACDEBUG
+#include <sys/sysctl.h>
+SYSCTL_DECL(_vfs_nfs);
+static int nfs_acdebug;
+SYSCTL_INT(_vfs_nfs, OID_AUTO, acdebug, CTLFLAG_RW, &nfs_acdebug, 0, "");
+#endif
+
+/*
+ * Check the time stamp
+ * If the cache is valid, copy contents to *vap and return 0
+ * otherwise return an error
+ */
+int
+ncl_getattrcache(struct vnode *vp, struct vattr *vaper)
+{
+	struct nfsnode *np;
+	struct vattr *vap;
+	struct nfsmount *nmp;
+	int timeo, mustflush;
+	u_quad_t nsize;
+	bool setnsize;
+	
+	np = VTONFS(vp);
+	vap = &np->n_vattr.na_vattr;
+	nmp = VFSTONFS(vp->v_mount);
+	mustflush = nfscl_mustflush(vp);	/* must be before mtx_lock() */
+	NFSLOCKNODE(np);
+	/* XXX n_mtime doesn't seem to be updated on a miss-and-reload */
+	timeo = (time_second - np->n_mtime.tv_sec) / 10;
+
+#ifdef NFS_ACDEBUG
+	if (nfs_acdebug>1)
+		printf("ncl_getattrcache: initial timeo = %d\n", timeo);
+#endif
+
+	if (vap->va_type == VDIR) {
+		if ((np->n_flag & NMODIFIED) || timeo < nmp->nm_acdirmin)
+			timeo = nmp->nm_acdirmin;
+		else if (timeo > nmp->nm_acdirmax)
+			timeo = nmp->nm_acdirmax;
+	} else {
+		if ((np->n_flag & NMODIFIED) || timeo < nmp->nm_acregmin)
+			timeo = nmp->nm_acregmin;
+		else if (timeo > nmp->nm_acregmax)
+			timeo = nmp->nm_acregmax;
+	}
+
+#ifdef NFS_ACDEBUG
+	if (nfs_acdebug > 2)
+		printf("acregmin %d; acregmax %d; acdirmin %d; acdirmax %d\n",
+		    nmp->nm_acregmin, nmp->nm_acregmax,
+		    nmp->nm_acdirmin, nmp->nm_acdirmax);
+
+	if (nfs_acdebug)
+		printf("ncl_getattrcache: age = %d; final timeo = %d\n",
+		    (time_second - np->n_attrstamp), timeo);
+#endif
+
+	if ((time_second - np->n_attrstamp) >= timeo &&
+	    (mustflush != 0 || np->n_attrstamp == 0)) {
+		nfsstatsv1.attrcache_misses++;
+		NFSUNLOCKNODE(np);
+		KDTRACE_NFS_ATTRCACHE_GET_MISS(vp);
+		return( ENOENT);
+	}
+	nfsstatsv1.attrcache_hits++;
+	setnsize = false;
+	if (vap->va_size != np->n_size) {
+		if (vap->va_type == VREG) {
+			if (np->n_flag & NMODIFIED) {
+				if (vap->va_size < np->n_size)
+					vap->va_size = np->n_size;
+				else
+					np->n_size = vap->va_size;
+			} else {
+				np->n_size = vap->va_size;
+			}
+			setnsize = ncl_pager_setsize(vp, &nsize);
+		} else {
+			np->n_size = vap->va_size;
+		}
+	}
+	bcopy((caddr_t)vap, (caddr_t)vaper, sizeof(struct vattr));
+	if (np->n_flag & NCHG) {
+		if (np->n_flag & NACC)
+			vaper->va_atime = np->n_atim;
+		if (np->n_flag & NUPD)
+			vaper->va_mtime = np->n_mtim;
+	}
+	NFSUNLOCKNODE(np);
+	if (setnsize)
+		vnode_pager_setsize(vp, nsize);
+	KDTRACE_NFS_ATTRCACHE_GET_HIT(vp, vap);
+	return (0);
+}
+
+static nfsuint64 nfs_nullcookie = { { 0, 0 } };
+/*
+ * This function finds the directory cookie that corresponds to the
+ * logical byte offset given.
+ */
+nfsuint64 *
+ncl_getcookie(struct nfsnode *np, off_t off, int add)
+{
+	struct nfsdmap *dp, *dp2;
+	int pos;
+	nfsuint64 *retval = NULL;
+	
+	pos = (uoff_t)off / NFS_DIRBLKSIZ;
+	if (pos == 0 || off < 0) {
+		KASSERT(!add, ("nfs getcookie add at <= 0"));
+		return (&nfs_nullcookie);
+	}
+	pos--;
+	dp = LIST_FIRST(&np->n_cookies);
+	if (!dp) {
+		if (add) {
+			dp = malloc(sizeof (struct nfsdmap),
+				M_NFSDIROFF, M_WAITOK);
+			dp->ndm_eocookie = 0;
+			LIST_INSERT_HEAD(&np->n_cookies, dp, ndm_list);
+		} else
+			goto out;
+	}
+	while (pos >= NFSNUMCOOKIES) {
+		pos -= NFSNUMCOOKIES;
+		if (LIST_NEXT(dp, ndm_list)) {
+			if (!add && dp->ndm_eocookie < NFSNUMCOOKIES &&
+			    pos >= dp->ndm_eocookie)
+				goto out;
+			dp = LIST_NEXT(dp, ndm_list);
+		} else if (add) {
+			dp2 = malloc(sizeof (struct nfsdmap),
+				M_NFSDIROFF, M_WAITOK);
+			dp2->ndm_eocookie = 0;
+			LIST_INSERT_AFTER(dp, dp2, ndm_list);
+			dp = dp2;
+		} else
+			goto out;
+	}
+	if (pos >= dp->ndm_eocookie) {
+		if (add)
+			dp->ndm_eocookie = pos + 1;
+		else
+			goto out;
+	}
+	retval = &dp->ndm_cookies[pos];
+out:
+	return (retval);
+}
+
+/*
+ * Invalidate cached directory information, except for the actual directory
+ * blocks (which are invalidated separately).
+ * Done mainly to avoid the use of stale offset cookies.
+ */
+void
+ncl_invaldir(struct vnode *vp)
+{
+	struct nfsnode *np = VTONFS(vp);
+
+	KASSERT(vp->v_type == VDIR, ("nfs: invaldir not dir"));
+	ncl_dircookie_lock(np);
+	np->n_direofoffset = 0;
+	np->n_cookieverf.nfsuquad[0] = 0;
+	np->n_cookieverf.nfsuquad[1] = 0;
+	if (LIST_FIRST(&np->n_cookies))
+		LIST_FIRST(&np->n_cookies)->ndm_eocookie = 0;
+	ncl_dircookie_unlock(np);
+}
+
+/*
+ * The write verifier has changed (probably due to a server reboot), so all
+ * B_NEEDCOMMIT blocks will have to be written again. Since they are on the
+ * dirty block list as B_DELWRI, all this takes is clearing the B_NEEDCOMMIT
+ * and B_CLUSTEROK flags.  Once done the new write verifier can be set for the
+ * mount point.
+ *
+ * B_CLUSTEROK must be cleared along with B_NEEDCOMMIT because stage 1 data
+ * writes are not clusterable.
+ */
+void
+ncl_clearcommit(struct mount *mp)
+{
+	struct vnode *vp, *nvp;
+	struct buf *bp, *nbp;
+	struct bufobj *bo;
+
+	MNT_VNODE_FOREACH_ALL(vp, mp, nvp) {
+		bo = &vp->v_bufobj;
+		vholdl(vp);
+		VI_UNLOCK(vp);
+		BO_LOCK(bo);
+		TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) {
+			if (!BUF_ISLOCKED(bp) &&
+			    (bp->b_flags & (B_DELWRI | B_NEEDCOMMIT))
+				== (B_DELWRI | B_NEEDCOMMIT))
+				bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK);
+		}
+		BO_UNLOCK(bo);
+		vdrop(vp);
+	}
+}
+
+/*
+ * Called once to initialize data structures...
+ */
+int
+ncl_init(struct vfsconf *vfsp)
+{
+	int i;
+
+	/* Ensure async daemons disabled */
+	for (i = 0; i < NFS_MAXASYNCDAEMON; i++) {
+		ncl_iodwant[i] = NFSIOD_NOT_AVAILABLE;
+		ncl_iodmount[i] = NULL;
+	}
+	TASK_INIT(&ncl_nfsiodnew_task, 0, ncl_nfsiodnew_tq, NULL);
+	ncl_nhinit();			/* Init the nfsnode table */
+
+	return (0);
+}
+
diff --git a/freebsd/sys/fs/nfsclient/nfs_clvfsops.c b/freebsd/sys/fs/nfsclient/nfs_clvfsops.c
new file mode 100644
index 0000000..348a64a
--- /dev/null
+++ b/freebsd/sys/fs/nfsclient/nfs_clvfsops.c
@@ -0,0 +1,2051 @@
+/*-
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 1989, 1993, 1995
+ *	The Regents of the University of California.  All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Rick Macklem at The University of Guelph.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ *	from nfs_vfsops.c	8.12 (Berkeley) 5/20/95
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+
+#include "opt_bootp.h"
+#include "opt_nfsroot.h"
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/kernel.h>
+#include <sys/bio.h>
+#include <sys/buf.h>
+#include <sys/clock.h>
+#include <sys/jail.h>
+#include <sys/limits.h>
+#include <sys/lock.h>
+#include <sys/malloc.h>
+#include <sys/mbuf.h>
+#include <sys/module.h>
+#include <sys/mount.h>
+#include <sys/proc.h>
+#include <sys/socket.h>
+#include <sys/socketvar.h>
+#include <sys/sockio.h>
+#include <sys/sysctl.h>
+#include <sys/vnode.h>
+#include <sys/signalvar.h>
+
+#include <vm/vm.h>
+#include <vm/vm_extern.h>
+#include <vm/uma.h>
+
+#include <net/if.h>
+#include <net/route.h>
+#include <netinet/in.h>
+
+#include <fs/nfs/nfsport.h>
+#include <fs/nfsclient/nfsnode.h>
+#include <fs/nfsclient/nfsmount.h>
+#include <fs/nfsclient/nfs.h>
+#include <nfs/nfsdiskless.h>
+
+FEATURE(nfscl, "NFSv4 client");
+
+extern int nfscl_ticks;
+extern struct timeval nfsboottime;
+extern int nfsrv_useacl;
+extern int nfscl_debuglevel;
+extern enum nfsiod_state ncl_iodwant[NFS_MAXASYNCDAEMON];
+extern struct nfsmount *ncl_iodmount[NFS_MAXASYNCDAEMON];
+extern struct mtx ncl_iod_mutex;
+NFSCLSTATEMUTEX;
+extern struct mtx nfsrv_dslock_mtx;
+
+MALLOC_DEFINE(M_NEWNFSREQ, "newnfsclient_req", "NFS request header");
+MALLOC_DEFINE(M_NEWNFSMNT, "newnfsmnt", "NFS mount struct");
+
+SYSCTL_DECL(_vfs_nfs);
+static int nfs_ip_paranoia = 1;
+SYSCTL_INT(_vfs_nfs, OID_AUTO, nfs_ip_paranoia, CTLFLAG_RW,
+    &nfs_ip_paranoia, 0, "");
+static int nfs_tprintf_initial_delay = NFS_TPRINTF_INITIAL_DELAY;
+SYSCTL_INT(_vfs_nfs, NFS_TPRINTF_INITIAL_DELAY,
+        downdelayinitial, CTLFLAG_RW, &nfs_tprintf_initial_delay, 0, "");
+/* how long between console messages "nfs server foo not responding" */
+static int nfs_tprintf_delay = NFS_TPRINTF_DELAY;
+SYSCTL_INT(_vfs_nfs, NFS_TPRINTF_DELAY,
+        downdelayinterval, CTLFLAG_RW, &nfs_tprintf_delay, 0, "");
+#ifdef NFS_DEBUG
+int nfs_debug;
+SYSCTL_INT(_vfs_nfs, OID_AUTO, debug, CTLFLAG_RW, &nfs_debug, 0,
+    "Toggle debug flag");
+#endif
+
+static int	nfs_mountroot(struct mount *);
+static void	nfs_sec_name(char *, int *);
+static void	nfs_decode_args(struct mount *mp, struct nfsmount *nmp,
+		    struct nfs_args *argp, const char *, struct ucred *,
+		    struct thread *);
+static int	mountnfs(struct nfs_args *, struct mount *,
+		    struct sockaddr *, char *, u_char *, int, u_char *, int,
+		    u_char *, int, struct vnode **, struct ucred *,
+		    struct thread *, int, int, int);
+static void	nfs_getnlminfo(struct vnode *, uint8_t *, size_t *,
+		    struct sockaddr_storage *, int *, off_t *,
+		    struct timeval *);
+static vfs_mount_t nfs_mount;
+static vfs_cmount_t nfs_cmount;
+static vfs_unmount_t nfs_unmount;
+static vfs_root_t nfs_root;
+static vfs_statfs_t nfs_statfs;
+static vfs_sync_t nfs_sync;
+static vfs_sysctl_t nfs_sysctl;
+static vfs_purge_t nfs_purge;
+
+/*
+ * nfs vfs operations.
+ */
+static struct vfsops nfs_vfsops = {
+	.vfs_init =		ncl_init,
+	.vfs_mount =		nfs_mount,
+	.vfs_cmount =		nfs_cmount,
+	.vfs_root =		nfs_root,
+	.vfs_statfs =		nfs_statfs,
+	.vfs_sync =		nfs_sync,
+	.vfs_uninit =		ncl_uninit,
+	.vfs_unmount =		nfs_unmount,
+	.vfs_sysctl =		nfs_sysctl,
+	.vfs_purge =		nfs_purge,
+};
+VFS_SET(nfs_vfsops, nfs, VFCF_NETWORK | VFCF_SBDRY);
+
+/* So that loader and kldload(2) can find us, wherever we are.. */
+MODULE_VERSION(nfs, 1);
+MODULE_DEPEND(nfs, nfscommon, 1, 1, 1);
+MODULE_DEPEND(nfs, krpc, 1, 1, 1);
+MODULE_DEPEND(nfs, nfssvc, 1, 1, 1);
+MODULE_DEPEND(nfs, nfslock, 1, 1, 1);
+
+/*
+ * This structure is now defined in sys/nfs/nfs_diskless.c so that it
+ * can be shared by both NFS clients. It is declared here so that it
+ * will be defined for kernels built without NFS_ROOT, although it
+ * isn't used in that case.
+ */
+#if !defined(NFS_ROOT)
+struct nfs_diskless	nfs_diskless = { { { 0 } } };
+struct nfsv3_diskless	nfsv3_diskless = { { { 0 } } };
+int			nfs_diskless_valid = 0;
+#endif
+
+SYSCTL_INT(_vfs_nfs, OID_AUTO, diskless_valid, CTLFLAG_RD,
+    &nfs_diskless_valid, 0,
+    "Has the diskless struct been filled correctly");
+
+SYSCTL_STRING(_vfs_nfs, OID_AUTO, diskless_rootpath, CTLFLAG_RD,
+    nfsv3_diskless.root_hostnam, 0, "Path to nfs root");
+
+SYSCTL_OPAQUE(_vfs_nfs, OID_AUTO, diskless_rootaddr, CTLFLAG_RD,
+    &nfsv3_diskless.root_saddr, sizeof(nfsv3_diskless.root_saddr),
+    "%Ssockaddr_in", "Diskless root nfs address");
+
+
+void		newnfsargs_ntoh(struct nfs_args *);
+static int	nfs_mountdiskless(char *,
+		    struct sockaddr_in *, struct nfs_args *,
+		    struct thread *, struct vnode **, struct mount *);
+static void	nfs_convert_diskless(void);
+static void	nfs_convert_oargs(struct nfs_args *args,
+		    struct onfs_args *oargs);
+
+int
+newnfs_iosize(struct nfsmount *nmp)
+{
+	int iosize, maxio;
+
+	/* First, set the upper limit for iosize */
+	if (nmp->nm_flag & NFSMNT_NFSV4) {
+		maxio = NFS_MAXBSIZE;
+	} else if (nmp->nm_flag & NFSMNT_NFSV3) {
+		if (nmp->nm_sotype == SOCK_DGRAM)
+			maxio = NFS_MAXDGRAMDATA;
+		else
+			maxio = NFS_MAXBSIZE;
+	} else {
+		maxio = NFS_V2MAXDATA;
+	}
+	if (nmp->nm_rsize > maxio || nmp->nm_rsize == 0)
+		nmp->nm_rsize = maxio;
+	if (nmp->nm_rsize > NFS_MAXBSIZE)
+		nmp->nm_rsize = NFS_MAXBSIZE;
+	if (nmp->nm_readdirsize > maxio || nmp->nm_readdirsize == 0)
+		nmp->nm_readdirsize = maxio;
+	if (nmp->nm_readdirsize > nmp->nm_rsize)
+		nmp->nm_readdirsize = nmp->nm_rsize;
+	if (nmp->nm_wsize > maxio || nmp->nm_wsize == 0)
+		nmp->nm_wsize = maxio;
+	if (nmp->nm_wsize > NFS_MAXBSIZE)
+		nmp->nm_wsize = NFS_MAXBSIZE;
+
+	/*
+	 * Calculate the size used for io buffers.  Use the larger
+	 * of the two sizes to minimise nfs requests but make sure
+	 * that it is at least one VM page to avoid wasting buffer
+	 * space.  It must also be at least NFS_DIRBLKSIZ, since
+	 * that is the buffer size used for directories.
+	 */
+	iosize = imax(nmp->nm_rsize, nmp->nm_wsize);
+	iosize = imax(iosize, PAGE_SIZE);
+	iosize = imax(iosize, NFS_DIRBLKSIZ);
+	nmp->nm_mountp->mnt_stat.f_iosize = iosize;
+	return (iosize);
+}
+
+static void
+nfs_convert_oargs(struct nfs_args *args, struct onfs_args *oargs)
+{
+
+	args->version = NFS_ARGSVERSION;
+	args->addr = oargs->addr;
+	args->addrlen = oargs->addrlen;
+	args->sotype = oargs->sotype;
+	args->proto = oargs->proto;
+	args->fh = oargs->fh;
+	args->fhsize = oargs->fhsize;
+	args->flags = oargs->flags;
+	args->wsize = oargs->wsize;
+	args->rsize = oargs->rsize;
+	args->readdirsize = oargs->readdirsize;
+	args->timeo = oargs->timeo;
+	args->retrans = oargs->retrans;
+	args->readahead = oargs->readahead;
+	args->hostname = oargs->hostname;
+}
+
+static void
+nfs_convert_diskless(void)
+{
+
+	bcopy(&nfs_diskless.myif, &nfsv3_diskless.myif,
+		sizeof(struct ifaliasreq));
+	bcopy(&nfs_diskless.mygateway, &nfsv3_diskless.mygateway,
+		sizeof(struct sockaddr_in));
+	nfs_convert_oargs(&nfsv3_diskless.root_args,&nfs_diskless.root_args);
+	if (nfsv3_diskless.root_args.flags & NFSMNT_NFSV3) {
+		nfsv3_diskless.root_fhsize = NFSX_MYFH;
+		bcopy(nfs_diskless.root_fh, nfsv3_diskless.root_fh, NFSX_MYFH);
+	} else {
+		nfsv3_diskless.root_fhsize = NFSX_V2FH;
+		bcopy(nfs_diskless.root_fh, nfsv3_diskless.root_fh, NFSX_V2FH);
+	}
+	bcopy(&nfs_diskless.root_saddr,&nfsv3_diskless.root_saddr,
+		sizeof(struct sockaddr_in));
+	bcopy(nfs_diskless.root_hostnam, nfsv3_diskless.root_hostnam, MNAMELEN);
+	nfsv3_diskless.root_time = nfs_diskless.root_time;
+	bcopy(nfs_diskless.my_hostnam, nfsv3_diskless.my_hostnam,
+		MAXHOSTNAMELEN);
+	nfs_diskless_valid = 3;
+}
+
+/*
+ * nfs statfs call
+ */
+static int
+nfs_statfs(struct mount *mp, struct statfs *sbp)
+{
+	struct vnode *vp;
+	struct thread *td;
+	struct nfsmount *nmp = VFSTONFS(mp);
+	struct nfsvattr nfsva;
+	struct nfsfsinfo fs;
+	struct nfsstatfs sb;
+	int error = 0, attrflag, gotfsinfo = 0, ret;
+	struct nfsnode *np;
+
+	td = curthread;
+
+	error = vfs_busy(mp, MBF_NOWAIT);
+	if (error)
+		return (error);
+	error = ncl_nget(mp, nmp->nm_fh, nmp->nm_fhsize, &np, LK_EXCLUSIVE);
+	if (error) {
+		vfs_unbusy(mp);
+		return (error);
+	}
+	vp = NFSTOV(np);
+	mtx_lock(&nmp->nm_mtx);
+	if (NFSHASNFSV3(nmp) && !NFSHASGOTFSINFO(nmp)) {
+		mtx_unlock(&nmp->nm_mtx);
+		error = nfsrpc_fsinfo(vp, &fs, td->td_ucred, td, &nfsva,
+		    &attrflag, NULL);
+		if (!error)
+			gotfsinfo = 1;
+	} else
+		mtx_unlock(&nmp->nm_mtx);
+	if (!error)
+		error = nfsrpc_statfs(vp, &sb, &fs, td->td_ucred, td, &nfsva,
+		    &attrflag, NULL);
+	if (error != 0)
+		NFSCL_DEBUG(2, "statfs=%d\n", error);
+	if (attrflag == 0) {
+		ret = nfsrpc_getattrnovp(nmp, nmp->nm_fh, nmp->nm_fhsize, 1,
+		    td->td_ucred, td, &nfsva, NULL, NULL);
+		if (ret) {
+			/*
+			 * Just set default values to get things going.
+			 */
+			NFSBZERO((caddr_t)&nfsva, sizeof (struct nfsvattr));
+			nfsva.na_vattr.va_type = VDIR;
+			nfsva.na_vattr.va_mode = 0777;
+			nfsva.na_vattr.va_nlink = 100;
+			nfsva.na_vattr.va_uid = (uid_t)0;
+			nfsva.na_vattr.va_gid = (gid_t)0;
+			nfsva.na_vattr.va_fileid = 2;
+			nfsva.na_vattr.va_gen = 1;
+			nfsva.na_vattr.va_blocksize = NFS_FABLKSIZE;
+			nfsva.na_vattr.va_size = 512 * 1024;
+		}
+	}
+	(void) nfscl_loadattrcache(&vp, &nfsva, NULL, NULL, 0, 1);
+	if (!error) {
+	    mtx_lock(&nmp->nm_mtx);
+	    if (gotfsinfo || (nmp->nm_flag & NFSMNT_NFSV4))
+		nfscl_loadfsinfo(nmp, &fs);
+	    nfscl_loadsbinfo(nmp, &sb, sbp);
+	    sbp->f_iosize = newnfs_iosize(nmp);
+	    mtx_unlock(&nmp->nm_mtx);
+	    if (sbp != &mp->mnt_stat) {
+		bcopy(mp->mnt_stat.f_mntonname, sbp->f_mntonname, MNAMELEN);
+		bcopy(mp->mnt_stat.f_mntfromname, sbp->f_mntfromname, MNAMELEN);
+	    }
+	    strncpy(&sbp->f_fstypename[0], mp->mnt_vfc->vfc_name, MFSNAMELEN);
+	} else if (NFS_ISV4(vp)) {
+		error = nfscl_maperr(td, error, (uid_t)0, (gid_t)0);
+	}
+	vput(vp);
+	vfs_unbusy(mp);
+	return (error);
+}
+
+/*
+ * nfs version 3 fsinfo rpc call
+ */
+int
+ncl_fsinfo(struct nfsmount *nmp, struct vnode *vp, struct ucred *cred,
+    struct thread *td)
+{
+	struct nfsfsinfo fs;
+	struct nfsvattr nfsva;
+	int error, attrflag;
+	
+	error = nfsrpc_fsinfo(vp, &fs, cred, td, &nfsva, &attrflag, NULL);
+	if (!error) {
+		if (attrflag)
+			(void) nfscl_loadattrcache(&vp, &nfsva, NULL, NULL, 0,
+			    1);
+		mtx_lock(&nmp->nm_mtx);
+		nfscl_loadfsinfo(nmp, &fs);
+		mtx_unlock(&nmp->nm_mtx);
+	}
+	return (error);
+}
+
+/*
+ * Mount a remote root fs via. nfs. This depends on the info in the
+ * nfs_diskless structure that has been filled in properly by some primary
+ * bootstrap.
+ * It goes something like this:
+ * - do enough of "ifconfig" by calling ifioctl() so that the system
+ *   can talk to the server
+ * - If nfs_diskless.mygateway is filled in, use that address as
+ *   a default gateway.
+ * - build the rootfs mount point and call mountnfs() to do the rest.
+ *
+ * It is assumed to be safe to read, modify, and write the nfsv3_diskless
+ * structure, as well as other global NFS client variables here, as
+ * nfs_mountroot() will be called once in the boot before any other NFS
+ * client activity occurs.
+ */
+static int
+nfs_mountroot(struct mount *mp)
+{
+	struct thread *td = curthread;
+	struct nfsv3_diskless *nd = &nfsv3_diskless;
+	struct socket *so;
+	struct vnode *vp;
+	struct ifreq ir;
+	int error;
+	u_long l;
+	char buf[128];
+	char *cp;
+
+#if defined(BOOTP_NFSROOT) && defined(BOOTP)
+	bootpc_init();		/* use bootp to get nfs_diskless filled in */
+#elif defined(NFS_ROOT)
+	nfs_setup_diskless();
+#endif
+
+	if (nfs_diskless_valid == 0)
+		return (-1);
+	if (nfs_diskless_valid == 1)
+		nfs_convert_diskless();
+
+	/*
+	 * Do enough of ifconfig(8) so that the critical net interface can
+	 * talk to the server.
+	 */
+	error = socreate(nd->myif.ifra_addr.sa_family, &so, nd->root_args.sotype, 0,
+	    td->td_ucred, td);
+	if (error)
+		panic("nfs_mountroot: socreate(%04x): %d",
+			nd->myif.ifra_addr.sa_family, error);
+
+#if 0 /* XXX Bad idea */
+	/*
+	 * We might not have been told the right interface, so we pass
+	 * over the first ten interfaces of the same kind, until we get
+	 * one of them configured.
+	 */
+
+	for (i = strlen(nd->myif.ifra_name) - 1;
+		nd->myif.ifra_name[i] >= '0' &&
+		nd->myif.ifra_name[i] <= '9';
+		nd->myif.ifra_name[i] ++) {
+		error = ifioctl(so, SIOCAIFADDR, (caddr_t)&nd->myif, td);
+		if(!error)
+			break;
+	}
+#endif
+	error = ifioctl(so, SIOCAIFADDR, (caddr_t)&nd->myif, td);
+	if (error)
+		panic("nfs_mountroot: SIOCAIFADDR: %d", error);
+	if ((cp = kern_getenv("boot.netif.mtu")) != NULL) {
+		ir.ifr_mtu = strtol(cp, NULL, 10);
+		bcopy(nd->myif.ifra_name, ir.ifr_name, IFNAMSIZ);
+		freeenv(cp);
+		error = ifioctl(so, SIOCSIFMTU, (caddr_t)&ir, td);
+		if (error)
+			printf("nfs_mountroot: SIOCSIFMTU: %d", error);
+	}
+	soclose(so);
+
+	/*
+	 * If the gateway field is filled in, set it as the default route.
+	 * Note that pxeboot will set a default route of 0 if the route
+	 * is not set by the DHCP server.  Check also for a value of 0
+	 * to avoid panicking inappropriately in that situation.
+	 */
+	if (nd->mygateway.sin_len != 0 &&
+	    nd->mygateway.sin_addr.s_addr != 0) {
+		struct sockaddr_in mask, sin;
+
+		bzero((caddr_t)&mask, sizeof(mask));
+		sin = mask;
+		sin.sin_family = AF_INET;
+		sin.sin_len = sizeof(sin);
+                /* XXX MRT use table 0 for this sort of thing */
+		CURVNET_SET(TD_TO_VNET(td));
+		error = rtrequest_fib(RTM_ADD, (struct sockaddr *)&sin,
+		    (struct sockaddr *)&nd->mygateway,
+		    (struct sockaddr *)&mask,
+		    RTF_UP | RTF_GATEWAY, NULL, RT_DEFAULT_FIB);
+		CURVNET_RESTORE();
+		if (error)
+			panic("nfs_mountroot: RTM_ADD: %d", error);
+	}
+
+	/*
+	 * Create the rootfs mount point.
+	 */
+	nd->root_args.fh = nd->root_fh;
+	nd->root_args.fhsize = nd->root_fhsize;
+	l = ntohl(nd->root_saddr.sin_addr.s_addr);
+	snprintf(buf, sizeof(buf), "%ld.%ld.%ld.%ld:%s",
+		(l >> 24) & 0xff, (l >> 16) & 0xff,
+		(l >>  8) & 0xff, (l >>  0) & 0xff, nd->root_hostnam);
+	printf("NFS ROOT: %s\n", buf);
+	nd->root_args.hostname = buf;
+	if ((error = nfs_mountdiskless(buf,
+	    &nd->root_saddr, &nd->root_args, td, &vp, mp)) != 0) {
+		return (error);
+	}
+
+	/*
+	 * This is not really an nfs issue, but it is much easier to
+	 * set hostname here and then let the "/etc/rc.xxx" files
+	 * mount the right /var based upon its preset value.
+	 */
+	mtx_lock(&prison0.pr_mtx);
+	strlcpy(prison0.pr_hostname, nd->my_hostnam,
+	    sizeof(prison0.pr_hostname));
+	mtx_unlock(&prison0.pr_mtx);
+	inittodr(ntohl(nd->root_time));
+	return (0);
+}
+
+/*
+ * Internal version of mount system call for diskless setup.
+ */
+static int
+nfs_mountdiskless(char *path,
+    struct sockaddr_in *sin, struct nfs_args *args, struct thread *td,
+    struct vnode **vpp, struct mount *mp)
+{
+	struct sockaddr *nam;
+	int dirlen, error;
+	char *dirpath;
+
+	/*
+	 * Find the directory path in "path", which also has the server's
+	 * name/ip address in it.
+	 */
+	dirpath = strchr(path, ':');
+	if (dirpath != NULL)
+		dirlen = strlen(++dirpath);
+	else
+		dirlen = 0;
+	nam = sodupsockaddr((struct sockaddr *)sin, M_WAITOK);
+	if ((error = mountnfs(args, mp, nam, path, NULL, 0, dirpath, dirlen,
+	    NULL, 0, vpp, td->td_ucred, td, NFS_DEFAULT_NAMETIMEO, 
+	    NFS_DEFAULT_NEGNAMETIMEO, 0)) != 0) {
+		printf("nfs_mountroot: mount %s on /: %d\n", path, error);
+		return (error);
+	}
+	return (0);
+}
+
+static void
+nfs_sec_name(char *sec, int *flagsp)
+{
+	if (!strcmp(sec, "krb5"))
+		*flagsp |= NFSMNT_KERB;
+	else if (!strcmp(sec, "krb5i"))
+		*flagsp |= (NFSMNT_KERB | NFSMNT_INTEGRITY);
+	else if (!strcmp(sec, "krb5p"))
+		*flagsp |= (NFSMNT_KERB | NFSMNT_PRIVACY);
+}
+
+static void
+nfs_decode_args(struct mount *mp, struct nfsmount *nmp, struct nfs_args *argp,
+    const char *hostname, struct ucred *cred, struct thread *td)
+{
+	int adjsock;
+	char *p;
+
+	/*
+	 * Set read-only flag if requested; otherwise, clear it if this is
+	 * an update.  If this is not an update, then either the read-only
+	 * flag is already clear, or this is a root mount and it was set
+	 * intentionally at some previous point.
+	 */
+	if (vfs_getopt(mp->mnt_optnew, "ro", NULL, NULL) == 0) {
+		MNT_ILOCK(mp);
+		mp->mnt_flag |= MNT_RDONLY;
+		MNT_IUNLOCK(mp);
+	} else if (mp->mnt_flag & MNT_UPDATE) {
+		MNT_ILOCK(mp);
+		mp->mnt_flag &= ~MNT_RDONLY;
+		MNT_IUNLOCK(mp);
+	}
+
+	/*
+	 * Silently clear NFSMNT_NOCONN if it's a TCP mount, it makes
+	 * no sense in that context.  Also, set up appropriate retransmit
+	 * and soft timeout behavior.
+	 */
+	if (argp->sotype == SOCK_STREAM) {
+		nmp->nm_flag &= ~NFSMNT_NOCONN;
+		nmp->nm_timeo = NFS_MAXTIMEO;
+		if ((argp->flags & NFSMNT_NFSV4) != 0)
+			nmp->nm_retry = INT_MAX;
+		else
+			nmp->nm_retry = NFS_RETRANS_TCP;
+	}
+
+	/* Also clear RDIRPLUS if NFSv2, it crashes some servers */
+	if ((argp->flags & (NFSMNT_NFSV3 | NFSMNT_NFSV4)) == 0) {
+		argp->flags &= ~NFSMNT_RDIRPLUS;
+		nmp->nm_flag &= ~NFSMNT_RDIRPLUS;
+	}
+
+	/* Clear ONEOPENOWN for NFSv2, 3 and 4.0. */
+	if (nmp->nm_minorvers == 0) {
+		argp->flags &= ~NFSMNT_ONEOPENOWN;
+		nmp->nm_flag &= ~NFSMNT_ONEOPENOWN;
+	}
+
+	/* Re-bind if rsrvd port requested and wasn't on one */
+	adjsock = !(nmp->nm_flag & NFSMNT_RESVPORT)
+		  && (argp->flags & NFSMNT_RESVPORT);
+	/* Also re-bind if we're switching to/from a connected UDP socket */
+	adjsock |= ((nmp->nm_flag & NFSMNT_NOCONN) !=
+		    (argp->flags & NFSMNT_NOCONN));
+
+	/* Update flags atomically.  Don't change the lock bits. */
+	nmp->nm_flag = argp->flags | nmp->nm_flag;
+
+	if ((argp->flags & NFSMNT_TIMEO) && argp->timeo > 0) {
+		nmp->nm_timeo = (argp->timeo * NFS_HZ + 5) / 10;
+		if (nmp->nm_timeo < NFS_MINTIMEO)
+			nmp->nm_timeo = NFS_MINTIMEO;
+		else if (nmp->nm_timeo > NFS_MAXTIMEO)
+			nmp->nm_timeo = NFS_MAXTIMEO;
+	}
+
+	if ((argp->flags & NFSMNT_RETRANS) && argp->retrans > 1) {
+		nmp->nm_retry = argp->retrans;
+		if (nmp->nm_retry > NFS_MAXREXMIT)
+			nmp->nm_retry = NFS_MAXREXMIT;
+	}
+
+	if ((argp->flags & NFSMNT_WSIZE) && argp->wsize > 0) {
+		nmp->nm_wsize = argp->wsize;
+		/*
+		 * Clip at the power of 2 below the size. There is an
+		 * issue (not isolated) that causes intermittent page
+		 * faults if this is not done.
+		 */
+		if (nmp->nm_wsize > NFS_FABLKSIZE)
+			nmp->nm_wsize = 1 << (fls(nmp->nm_wsize) - 1);
+		else
+			nmp->nm_wsize = NFS_FABLKSIZE;
+	}
+
+	if ((argp->flags & NFSMNT_RSIZE) && argp->rsize > 0) {
+		nmp->nm_rsize = argp->rsize;
+		/*
+		 * Clip at the power of 2 below the size. There is an
+		 * issue (not isolated) that causes intermittent page
+		 * faults if this is not done.
+		 */
+		if (nmp->nm_rsize > NFS_FABLKSIZE)
+			nmp->nm_rsize = 1 << (fls(nmp->nm_rsize) - 1);
+		else
+			nmp->nm_rsize = NFS_FABLKSIZE;
+	}
+
+	if ((argp->flags & NFSMNT_READDIRSIZE) && argp->readdirsize > 0) {
+		nmp->nm_readdirsize = argp->readdirsize;
+	}
+
+	if ((argp->flags & NFSMNT_ACREGMIN) && argp->acregmin >= 0)
+		nmp->nm_acregmin = argp->acregmin;
+	else
+		nmp->nm_acregmin = NFS_MINATTRTIMO;
+	if ((argp->flags & NFSMNT_ACREGMAX) && argp->acregmax >= 0)
+		nmp->nm_acregmax = argp->acregmax;
+	else
+		nmp->nm_acregmax = NFS_MAXATTRTIMO;
+	if ((argp->flags & NFSMNT_ACDIRMIN) && argp->acdirmin >= 0)
+		nmp->nm_acdirmin = argp->acdirmin;
+	else
+		nmp->nm_acdirmin = NFS_MINDIRATTRTIMO;
+	if ((argp->flags & NFSMNT_ACDIRMAX) && argp->acdirmax >= 0)
+		nmp->nm_acdirmax = argp->acdirmax;
+	else
+		nmp->nm_acdirmax = NFS_MAXDIRATTRTIMO;
+	if (nmp->nm_acdirmin > nmp->nm_acdirmax)
+		nmp->nm_acdirmin = nmp->nm_acdirmax;
+	if (nmp->nm_acregmin > nmp->nm_acregmax)
+		nmp->nm_acregmin = nmp->nm_acregmax;
+
+	if ((argp->flags & NFSMNT_READAHEAD) && argp->readahead >= 0) {
+		if (argp->readahead <= NFS_MAXRAHEAD)
+			nmp->nm_readahead = argp->readahead;
+		else
+			nmp->nm_readahead = NFS_MAXRAHEAD;
+	}
+	if ((argp->flags & NFSMNT_WCOMMITSIZE) && argp->wcommitsize >= 0) {
+		if (argp->wcommitsize < nmp->nm_wsize)
+			nmp->nm_wcommitsize = nmp->nm_wsize;
+		else
+			nmp->nm_wcommitsize = argp->wcommitsize;
+	}
+
+	adjsock |= ((nmp->nm_sotype != argp->sotype) ||
+		    (nmp->nm_soproto != argp->proto));
+
+	if (nmp->nm_client != NULL && adjsock) {
+		int haslock = 0, error = 0;
+
+		if (nmp->nm_sotype == SOCK_STREAM) {
+			error = newnfs_sndlock(&nmp->nm_sockreq.nr_lock);
+			if (!error)
+				haslock = 1;
+		}
+		if (!error) {
+		    newnfs_disconnect(&nmp->nm_sockreq);
+		    if (haslock)
+			newnfs_sndunlock(&nmp->nm_sockreq.nr_lock);
+		    nmp->nm_sotype = argp->sotype;
+		    nmp->nm_soproto = argp->proto;
+		    if (nmp->nm_sotype == SOCK_DGRAM)
+			while (newnfs_connect(nmp, &nmp->nm_sockreq,
+			    cred, td, 0)) {
+				printf("newnfs_args: retrying connect\n");
+				(void) nfs_catnap(PSOCK, 0, "nfscon");
+			}
+		}
+	} else {
+		nmp->nm_sotype = argp->sotype;
+		nmp->nm_soproto = argp->proto;
+	}
+
+	if (hostname != NULL) {
+		strlcpy(nmp->nm_hostname, hostname,
+		    sizeof(nmp->nm_hostname));
+		p = strchr(nmp->nm_hostname, ':');
+		if (p != NULL)
+			*p = '\0';
+	}
+}
+
+static const char *nfs_opts[] = { "from", "nfs_args",
+    "noac", "noatime", "noexec", "suiddir", "nosuid", "nosymfollow", "union",
+    "noclusterr", "noclusterw", "multilabel", "acls", "force", "update",
+    "async", "noconn", "nolockd", "conn", "lockd", "intr", "rdirplus",
+    "readdirsize", "soft", "hard", "mntudp", "tcp", "udp", "wsize", "rsize",
+    "retrans", "actimeo", "acregmin", "acregmax", "acdirmin", "acdirmax",
+    "resvport", "readahead", "hostname", "timeo", "timeout", "addr", "fh",
+    "nfsv3", "sec", "principal", "nfsv4", "gssname", "allgssname", "dirpath",
+    "minorversion", "nametimeo", "negnametimeo", "nocto", "noncontigwr",
+    "pnfs", "wcommitsize", "oneopenown",
+    NULL };
+
+/*
+ * Parse the "from" mountarg, passed by the generic mount(8) program
+ * or the mountroot code.  This is used when rerooting into NFS.
+ *
+ * Note that the "hostname" is actually a "hostname:/share/path" string.
+ */
+static int
+nfs_mount_parse_from(struct vfsoptlist *opts, char **hostnamep,
+    struct sockaddr_in **sinp, char *dirpath, size_t dirpathsize, int *dirlenp)
+{
+	char *nam, *delimp, *hostp, *spec;
+	int error, have_bracket = 0, offset, rv, speclen;
+	struct sockaddr_in *sin;
+	size_t len;
+
+	error = vfs_getopt(opts, "from", (void **)&spec, &speclen);
+	if (error != 0)
+		return (error);
+	nam = malloc(MNAMELEN + 1, M_TEMP, M_WAITOK);
+
+	/*
+	 * This part comes from sbin/mount_nfs/mount_nfs.c:getnfsargs().
+	 */
+	if (*spec == '[' && (delimp = strchr(spec + 1, ']')) != NULL &&
+	    *(delimp + 1) == ':') {
+		hostp = spec + 1;
+		spec = delimp + 2;
+		have_bracket = 1;
+	} else if ((delimp = strrchr(spec, ':')) != NULL) {
+		hostp = spec;
+		spec = delimp + 1;
+	} else if ((delimp = strrchr(spec, '@')) != NULL) {
+		printf("%s: path at server syntax is deprecated, "
+		    "use server:path\n", __func__);
+		hostp = delimp + 1;
+	} else {
+		printf("%s: no <host>:<dirpath> nfs-name\n", __func__);
+		free(nam, M_TEMP);
+		return (EINVAL);
+	}
+	*delimp = '\0';
+
+	/*
+	 * If there has been a trailing slash at mounttime it seems
+	 * that some mountd implementations fail to remove the mount
+	 * entries from their mountlist while unmounting.
+	 */
+	for (speclen = strlen(spec);
+	    speclen > 1 && spec[speclen - 1] == '/';
+	    speclen--)
+		spec[speclen - 1] = '\0';
+	if (strlen(hostp) + strlen(spec) + 1 > MNAMELEN) {
+		printf("%s: %s:%s: name too long", __func__, hostp, spec);
+		free(nam, M_TEMP);
+		return (EINVAL);
+	}
+	/* Make both '@' and ':' notations equal */
+	if (*hostp != '\0') {
+		len = strlen(hostp);
+		offset = 0;
+		if (have_bracket)
+			nam[offset++] = '[';
+		memmove(nam + offset, hostp, len);
+		if (have_bracket)
+			nam[len + offset++] = ']';
+		nam[len + offset++] = ':';
+		memmove(nam + len + offset, spec, speclen);
+		nam[len + speclen + offset] = '\0';
+	} else
+		nam[0] = '\0';
+
+	/*
+	 * XXX: IPv6
+	 */
+	sin = malloc(sizeof(*sin), M_SONAME, M_WAITOK);
+	rv = inet_pton(AF_INET, hostp, &sin->sin_addr);
+	if (rv != 1) {
+		printf("%s: cannot parse '%s', inet_pton() returned %d\n",
+		    __func__, hostp, rv);
+		free(nam, M_TEMP);
+		free(sin, M_SONAME);
+		return (EINVAL);
+	}
+
+	sin->sin_len = sizeof(*sin);
+	sin->sin_family = AF_INET;
+	/*
+	 * XXX: hardcoded port number.
+	 */
+	sin->sin_port = htons(2049);
+
+	*hostnamep = strdup(nam, M_NEWNFSMNT);
+	*sinp = sin;
+	strlcpy(dirpath, spec, dirpathsize);
+	*dirlenp = strlen(dirpath);
+
+	free(nam, M_TEMP);
+	return (0);
+}
+
+/*
+ * VFS Operations.
+ *
+ * mount system call
+ * It seems a bit dumb to copyinstr() the host and path here and then
+ * bcopy() them in mountnfs(), but I wanted to detect errors before
+ * doing the getsockaddr() call because getsockaddr() allocates an mbuf and
+ * an error after that means that I have to release the mbuf.
+ */
+/* ARGSUSED */
+static int
+nfs_mount(struct mount *mp)
+{
+	struct nfs_args args = {
+	    .version = NFS_ARGSVERSION,
+	    .addr = NULL,
+	    .addrlen = sizeof (struct sockaddr_in),
+	    .sotype = SOCK_STREAM,
+	    .proto = 0,
+	    .fh = NULL,
+	    .fhsize = 0,
+	    .flags = NFSMNT_RESVPORT,
+	    .wsize = NFS_WSIZE,
+	    .rsize = NFS_RSIZE,
+	    .readdirsize = NFS_READDIRSIZE,
+	    .timeo = 10,
+	    .retrans = NFS_RETRANS,
+	    .readahead = NFS_DEFRAHEAD,
+	    .wcommitsize = 0,			/* was: NQ_DEFLEASE */
+	    .hostname = NULL,
+	    .acregmin = NFS_MINATTRTIMO,
+	    .acregmax = NFS_MAXATTRTIMO,
+	    .acdirmin = NFS_MINDIRATTRTIMO,
+	    .acdirmax = NFS_MAXDIRATTRTIMO,
+	};
+	int error = 0, ret, len;
+	struct sockaddr *nam = NULL;
+	struct vnode *vp;
+	struct thread *td;
+	char *hst;
+	u_char nfh[NFSX_FHMAX], krbname[100], dirpath[100], srvkrbname[100];
+	char *cp, *opt, *name, *secname;
+	int nametimeo = NFS_DEFAULT_NAMETIMEO;
+	int negnametimeo = NFS_DEFAULT_NEGNAMETIMEO;
+	int minvers = 0;
+	int dirlen, has_nfs_args_opt, has_nfs_from_opt,
+	    krbnamelen, srvkrbnamelen;
+	size_t hstlen;
+
+	has_nfs_args_opt = 0;
+	has_nfs_from_opt = 0;
+	hst = malloc(MNAMELEN, M_TEMP, M_WAITOK);
+	if (vfs_filteropt(mp->mnt_optnew, nfs_opts)) {
+		error = EINVAL;
+		goto out;
+	}
+
+	td = curthread;
+	if ((mp->mnt_flag & (MNT_ROOTFS | MNT_UPDATE)) == MNT_ROOTFS &&
+	    nfs_diskless_valid != 0) {
+		error = nfs_mountroot(mp);
+		goto out;
+	}
+
+	nfscl_init();
+
+	/*
+	 * The old mount_nfs program passed the struct nfs_args
+	 * from userspace to kernel.  The new mount_nfs program
+	 * passes string options via nmount() from userspace to kernel
+	 * and we populate the struct nfs_args in the kernel.
+	 */
+	if (vfs_getopt(mp->mnt_optnew, "nfs_args", NULL, NULL) == 0) {
+		error = vfs_copyopt(mp->mnt_optnew, "nfs_args", &args,
+		    sizeof(args));
+		if (error != 0)
+			goto out;
+
+		if (args.version != NFS_ARGSVERSION) {
+			error = EPROGMISMATCH;
+			goto out;
+		}
+		has_nfs_args_opt = 1;
+	}
+
+	/* Handle the new style options. */
+	if (vfs_getopt(mp->mnt_optnew, "noac", NULL, NULL) == 0) {
+		args.acdirmin = args.acdirmax =
+		    args.acregmin = args.acregmax = 0;
+		args.flags |= NFSMNT_ACDIRMIN | NFSMNT_ACDIRMAX |
+		    NFSMNT_ACREGMIN | NFSMNT_ACREGMAX;
+	}
+	if (vfs_getopt(mp->mnt_optnew, "noconn", NULL, NULL) == 0)
+		args.flags |= NFSMNT_NOCONN;
+	if (vfs_getopt(mp->mnt_optnew, "conn", NULL, NULL) == 0)
+		args.flags &= ~NFSMNT_NOCONN;
+	if (vfs_getopt(mp->mnt_optnew, "nolockd", NULL, NULL) == 0)
+		args.flags |= NFSMNT_NOLOCKD;
+	if (vfs_getopt(mp->mnt_optnew, "lockd", NULL, NULL) == 0)
+		args.flags &= ~NFSMNT_NOLOCKD;
+	if (vfs_getopt(mp->mnt_optnew, "intr", NULL, NULL) == 0)
+		args.flags |= NFSMNT_INT;
+	if (vfs_getopt(mp->mnt_optnew, "rdirplus", NULL, NULL) == 0)
+		args.flags |= NFSMNT_RDIRPLUS;
+	if (vfs_getopt(mp->mnt_optnew, "resvport", NULL, NULL) == 0)
+		args.flags |= NFSMNT_RESVPORT;
+	if (vfs_getopt(mp->mnt_optnew, "noresvport", NULL, NULL) == 0)
+		args.flags &= ~NFSMNT_RESVPORT;
+	if (vfs_getopt(mp->mnt_optnew, "soft", NULL, NULL) == 0)
+		args.flags |= NFSMNT_SOFT;
+	if (vfs_getopt(mp->mnt_optnew, "hard", NULL, NULL) == 0)
+		args.flags &= ~NFSMNT_SOFT;
+	if (vfs_getopt(mp->mnt_optnew, "mntudp", NULL, NULL) == 0)
+		args.sotype = SOCK_DGRAM;
+	if (vfs_getopt(mp->mnt_optnew, "udp", NULL, NULL) == 0)
+		args.sotype = SOCK_DGRAM;
+	if (vfs_getopt(mp->mnt_optnew, "tcp", NULL, NULL) == 0)
+		args.sotype = SOCK_STREAM;
+	if (vfs_getopt(mp->mnt_optnew, "nfsv3", NULL, NULL) == 0)
+		args.flags |= NFSMNT_NFSV3;
+	if (vfs_getopt(mp->mnt_optnew, "nfsv4", NULL, NULL) == 0) {
+		args.flags |= NFSMNT_NFSV4;
+		args.sotype = SOCK_STREAM;
+	}
+	if (vfs_getopt(mp->mnt_optnew, "allgssname", NULL, NULL) == 0)
+		args.flags |= NFSMNT_ALLGSSNAME;
+	if (vfs_getopt(mp->mnt_optnew, "nocto", NULL, NULL) == 0)
+		args.flags |= NFSMNT_NOCTO;
+	if (vfs_getopt(mp->mnt_optnew, "noncontigwr", NULL, NULL) == 0)
+		args.flags |= NFSMNT_NONCONTIGWR;
+	if (vfs_getopt(mp->mnt_optnew, "pnfs", NULL, NULL) == 0)
+		args.flags |= NFSMNT_PNFS;
+	if (vfs_getopt(mp->mnt_optnew, "oneopenown", NULL, NULL) == 0)
+		args.flags |= NFSMNT_ONEOPENOWN;
+	if (vfs_getopt(mp->mnt_optnew, "readdirsize", (void **)&opt, NULL) == 0) {
+		if (opt == NULL) { 
+			vfs_mount_error(mp, "illegal readdirsize");
+			error = EINVAL;
+			goto out;
+		}
+		ret = sscanf(opt, "%d", &args.readdirsize);
+		if (ret != 1 || args.readdirsize <= 0) {
+			vfs_mount_error(mp, "illegal readdirsize: %s",
+			    opt);
+			error = EINVAL;
+			goto out;
+		}
+		args.flags |= NFSMNT_READDIRSIZE;
+	}
+	if (vfs_getopt(mp->mnt_optnew, "readahead", (void **)&opt, NULL) == 0) {
+		if (opt == NULL) { 
+			vfs_mount_error(mp, "illegal readahead");
+			error = EINVAL;
+			goto out;
+		}
+		ret = sscanf(opt, "%d", &args.readahead);
+		if (ret != 1 || args.readahead <= 0) {
+			vfs_mount_error(mp, "illegal readahead: %s",
+			    opt);
+			error = EINVAL;
+			goto out;
+		}
+		args.flags |= NFSMNT_READAHEAD;
+	}
+	if (vfs_getopt(mp->mnt_optnew, "wsize", (void **)&opt, NULL) == 0) {
+		if (opt == NULL) { 
+			vfs_mount_error(mp, "illegal wsize");
+			error = EINVAL;
+			goto out;
+		}
+		ret = sscanf(opt, "%d", &args.wsize);
+		if (ret != 1 || args.wsize <= 0) {
+			vfs_mount_error(mp, "illegal wsize: %s",
+			    opt);
+			error = EINVAL;
+			goto out;
+		}
+		args.flags |= NFSMNT_WSIZE;
+	}
+	if (vfs_getopt(mp->mnt_optnew, "rsize", (void **)&opt, NULL) == 0) {
+		if (opt == NULL) { 
+			vfs_mount_error(mp, "illegal rsize");
+			error = EINVAL;
+			goto out;
+		}
+		ret = sscanf(opt, "%d", &args.rsize);
+		if (ret != 1 || args.rsize <= 0) {
+			vfs_mount_error(mp, "illegal wsize: %s",
+			    opt);
+			error = EINVAL;
+			goto out;
+		}
+		args.flags |= NFSMNT_RSIZE;
+	}
+	if (vfs_getopt(mp->mnt_optnew, "retrans", (void **)&opt, NULL) == 0) {
+		if (opt == NULL) { 
+			vfs_mount_error(mp, "illegal retrans");
+			error = EINVAL;
+			goto out;
+		}
+		ret = sscanf(opt, "%d", &args.retrans);
+		if (ret != 1 || args.retrans <= 0) {
+			vfs_mount_error(mp, "illegal retrans: %s",
+			    opt);
+			error = EINVAL;
+			goto out;
+		}
+		args.flags |= NFSMNT_RETRANS;
+	}
+	if (vfs_getopt(mp->mnt_optnew, "actimeo", (void **)&opt, NULL) == 0) {
+		ret = sscanf(opt, "%d", &args.acregmin);
+		if (ret != 1 || args.acregmin < 0) {
+			vfs_mount_error(mp, "illegal actimeo: %s",
+			    opt);
+			error = EINVAL;
+			goto out;
+		}
+		args.acdirmin = args.acdirmax = args.acregmax = args.acregmin;
+		args.flags |= NFSMNT_ACDIRMIN | NFSMNT_ACDIRMAX |
+		    NFSMNT_ACREGMIN | NFSMNT_ACREGMAX;
+	}
+	if (vfs_getopt(mp->mnt_optnew, "acregmin", (void **)&opt, NULL) == 0) {
+		ret = sscanf(opt, "%d", &args.acregmin);
+		if (ret != 1 || args.acregmin < 0) {
+			vfs_mount_error(mp, "illegal acregmin: %s",
+			    opt);
+			error = EINVAL;
+			goto out;
+		}
+		args.flags |= NFSMNT_ACREGMIN;
+	}
+	if (vfs_getopt(mp->mnt_optnew, "acregmax", (void **)&opt, NULL) == 0) {
+		ret = sscanf(opt, "%d", &args.acregmax);
+		if (ret != 1 || args.acregmax < 0) {
+			vfs_mount_error(mp, "illegal acregmax: %s",
+			    opt);
+			error = EINVAL;
+			goto out;
+		}
+		args.flags |= NFSMNT_ACREGMAX;
+	}
+	if (vfs_getopt(mp->mnt_optnew, "acdirmin", (void **)&opt, NULL) == 0) {
+		ret = sscanf(opt, "%d", &args.acdirmin);
+		if (ret != 1 || args.acdirmin < 0) {
+			vfs_mount_error(mp, "illegal acdirmin: %s",
+			    opt);
+			error = EINVAL;
+			goto out;
+		}
+		args.flags |= NFSMNT_ACDIRMIN;
+	}
+	if (vfs_getopt(mp->mnt_optnew, "acdirmax", (void **)&opt, NULL) == 0) {
+		ret = sscanf(opt, "%d", &args.acdirmax);
+		if (ret != 1 || args.acdirmax < 0) {
+			vfs_mount_error(mp, "illegal acdirmax: %s",
+			    opt);
+			error = EINVAL;
+			goto out;
+		}
+		args.flags |= NFSMNT_ACDIRMAX;
+	}
+	if (vfs_getopt(mp->mnt_optnew, "wcommitsize", (void **)&opt, NULL) == 0) {
+		ret = sscanf(opt, "%d", &args.wcommitsize);
+		if (ret != 1 || args.wcommitsize < 0) {
+			vfs_mount_error(mp, "illegal wcommitsize: %s", opt);
+			error = EINVAL;
+			goto out;
+		}
+		args.flags |= NFSMNT_WCOMMITSIZE;
+	}
+	if (vfs_getopt(mp->mnt_optnew, "timeo", (void **)&opt, NULL) == 0) {
+		ret = sscanf(opt, "%d", &args.timeo);
+		if (ret != 1 || args.timeo <= 0) {
+			vfs_mount_error(mp, "illegal timeo: %s",
+			    opt);
+			error = EINVAL;
+			goto out;
+		}
+		args.flags |= NFSMNT_TIMEO;
+	}
+	if (vfs_getopt(mp->mnt_optnew, "timeout", (void **)&opt, NULL) == 0) {
+		ret = sscanf(opt, "%d", &args.timeo);
+		if (ret != 1 || args.timeo <= 0) {
+			vfs_mount_error(mp, "illegal timeout: %s",
+			    opt);
+			error = EINVAL;
+			goto out;
+		}
+		args.flags |= NFSMNT_TIMEO;
+	}
+	if (vfs_getopt(mp->mnt_optnew, "nametimeo", (void **)&opt, NULL) == 0) {
+		ret = sscanf(opt, "%d", &nametimeo);
+		if (ret != 1 || nametimeo < 0) {
+			vfs_mount_error(mp, "illegal nametimeo: %s", opt);
+			error = EINVAL;
+			goto out;
+		}
+	}
+	if (vfs_getopt(mp->mnt_optnew, "negnametimeo", (void **)&opt, NULL)
+	    == 0) {
+		ret = sscanf(opt, "%d", &negnametimeo);
+		if (ret != 1 || negnametimeo < 0) {
+			vfs_mount_error(mp, "illegal negnametimeo: %s",
+			    opt);
+			error = EINVAL;
+			goto out;
+		}
+	}
+	if (vfs_getopt(mp->mnt_optnew, "minorversion", (void **)&opt, NULL) ==
+	    0) {
+		ret = sscanf(opt, "%d", &minvers);
+		if (ret != 1 || minvers < 0 || minvers > 1 ||
+		    (args.flags & NFSMNT_NFSV4) == 0) {
+			vfs_mount_error(mp, "illegal minorversion: %s", opt);
+			error = EINVAL;
+			goto out;
+		}
+	}
+	if (vfs_getopt(mp->mnt_optnew, "sec",
+		(void **) &secname, NULL) == 0)
+		nfs_sec_name(secname, &args.flags);
+
+	if (mp->mnt_flag & MNT_UPDATE) {
+		struct nfsmount *nmp = VFSTONFS(mp);
+
+		if (nmp == NULL) {
+			error = EIO;
+			goto out;
+		}
+
+		/*
+		 * If a change from TCP->UDP is done and there are thread(s)
+		 * that have I/O RPC(s) in progress with a transfer size
+		 * greater than NFS_MAXDGRAMDATA, those thread(s) will be
+		 * hung, retrying the RPC(s) forever. Usually these threads
+		 * will be seen doing an uninterruptible sleep on wait channel
+		 * "nfsreq".
+		 */
+		if (args.sotype == SOCK_DGRAM && nmp->nm_sotype == SOCK_STREAM)
+			tprintf(td->td_proc, LOG_WARNING,
+	"Warning: mount -u that changes TCP->UDP can result in hung threads\n");
+
+		/*
+		 * When doing an update, we can't change version,
+		 * security, switch lockd strategies, change cookie
+		 * translation or switch oneopenown.
+		 */
+		args.flags = (args.flags &
+		    ~(NFSMNT_NFSV3 |
+		      NFSMNT_NFSV4 |
+		      NFSMNT_KERB |
+		      NFSMNT_INTEGRITY |
+		      NFSMNT_PRIVACY |
+		      NFSMNT_ONEOPENOWN |
+		      NFSMNT_NOLOCKD /*|NFSMNT_XLATECOOKIE*/)) |
+		    (nmp->nm_flag &
+			(NFSMNT_NFSV3 |
+			 NFSMNT_NFSV4 |
+			 NFSMNT_KERB |
+			 NFSMNT_INTEGRITY |
+			 NFSMNT_PRIVACY |
+			 NFSMNT_ONEOPENOWN |
+			 NFSMNT_NOLOCKD /*|NFSMNT_XLATECOOKIE*/));
+		nfs_decode_args(mp, nmp, &args, NULL, td->td_ucred, td);
+		goto out;
+	}
+
+	/*
+	 * Make the nfs_ip_paranoia sysctl serve as the default connection
+	 * or no-connection mode for those protocols that support 
+	 * no-connection mode (the flag will be cleared later for protocols
+	 * that do not support no-connection mode).  This will allow a client
+	 * to receive replies from a different IP then the request was
+	 * sent to.  Note: default value for nfs_ip_paranoia is 1 (paranoid),
+	 * not 0.
+	 */
+	if (nfs_ip_paranoia == 0)
+		args.flags |= NFSMNT_NOCONN;
+
+	if (has_nfs_args_opt != 0) {
+		/*
+		 * In the 'nfs_args' case, the pointers in the args
+		 * structure are in userland - we copy them in here.
+		 */
+		if (args.fhsize < 0 || args.fhsize > NFSX_V3FHMAX) {
+			vfs_mount_error(mp, "Bad file handle");
+			error = EINVAL;
+			goto out;
+		}
+		error = copyin((caddr_t)args.fh, (caddr_t)nfh,
+		    args.fhsize);
+		if (error != 0)
+			goto out;
+		error = copyinstr(args.hostname, hst, MNAMELEN - 1, &hstlen);
+		if (error != 0)
+			goto out;
+		bzero(&hst[hstlen], MNAMELEN - hstlen);
+		args.hostname = hst;
+		/* getsockaddr() call must be after above copyin() calls */
+		error = getsockaddr(&nam, (caddr_t)args.addr,
+		    args.addrlen);
+		if (error != 0)
+			goto out;
+	} else if (nfs_mount_parse_from(mp->mnt_optnew,
+	    &args.hostname, (struct sockaddr_in **)&nam, dirpath,
+	    sizeof(dirpath), &dirlen) == 0) {
+		has_nfs_from_opt = 1;
+		bcopy(args.hostname, hst, MNAMELEN);
+		hst[MNAMELEN - 1] = '\0';
+
+		/*
+		 * This only works with NFSv4 for now.
+		 */
+		args.fhsize = 0;
+		args.flags |= NFSMNT_NFSV4;
+		args.sotype = SOCK_STREAM;
+	} else {
+		if (vfs_getopt(mp->mnt_optnew, "fh", (void **)&args.fh,
+		    &args.fhsize) == 0) {
+			if (args.fhsize < 0 || args.fhsize > NFSX_FHMAX) {
+				vfs_mount_error(mp, "Bad file handle");
+				error = EINVAL;
+				goto out;
+			}
+			bcopy(args.fh, nfh, args.fhsize);
+		} else {
+			args.fhsize = 0;
+		}
+		(void) vfs_getopt(mp->mnt_optnew, "hostname",
+		    (void **)&args.hostname, &len);
+		if (args.hostname == NULL) {
+			vfs_mount_error(mp, "Invalid hostname");
+			error = EINVAL;
+			goto out;
+		}
+		if (len >= MNAMELEN) {
+			vfs_mount_error(mp, "Hostname too long");
+			error = EINVAL;
+			goto out;
+		}
+		bcopy(args.hostname, hst, len);
+		hst[len] = '\0';
+	}
+
+	if (vfs_getopt(mp->mnt_optnew, "principal", (void **)&name, NULL) == 0)
+		strlcpy(srvkrbname, name, sizeof (srvkrbname));
+	else {
+		snprintf(srvkrbname, sizeof (srvkrbname), "nfs@%s", hst);
+		cp = strchr(srvkrbname, ':');
+		if (cp != NULL)
+			*cp = '\0';
+	}
+	srvkrbnamelen = strlen(srvkrbname);
+
+	if (vfs_getopt(mp->mnt_optnew, "gssname", (void **)&name, NULL) == 0)
+		strlcpy(krbname, name, sizeof (krbname));
+	else
+		krbname[0] = '\0';
+	krbnamelen = strlen(krbname);
+
+	if (has_nfs_from_opt == 0) {
+		if (vfs_getopt(mp->mnt_optnew,
+		    "dirpath", (void **)&name, NULL) == 0)
+			strlcpy(dirpath, name, sizeof (dirpath));
+		else
+			dirpath[0] = '\0';
+		dirlen = strlen(dirpath);
+	}
+
+	if (has_nfs_args_opt == 0 && has_nfs_from_opt == 0) {
+		if (vfs_getopt(mp->mnt_optnew, "addr",
+		    (void **)&args.addr, &args.addrlen) == 0) {
+			if (args.addrlen > SOCK_MAXADDRLEN) {
+				error = ENAMETOOLONG;
+				goto out;
+			}
+			nam = malloc(args.addrlen, M_SONAME, M_WAITOK);
+			bcopy(args.addr, nam, args.addrlen);
+			nam->sa_len = args.addrlen;
+		} else {
+			vfs_mount_error(mp, "No server address");
+			error = EINVAL;
+			goto out;
+		}
+	}
+
+	args.fh = nfh;
+	error = mountnfs(&args, mp, nam, hst, krbname, krbnamelen, dirpath,
+	    dirlen, srvkrbname, srvkrbnamelen, &vp, td->td_ucred, td,
+	    nametimeo, negnametimeo, minvers);
+out:
+	if (!error) {
+		MNT_ILOCK(mp);
+		mp->mnt_kern_flag |= MNTK_LOOKUP_SHARED | MNTK_NO_IOPF |
+		    MNTK_USES_BCACHE;
+		if ((VFSTONFS(mp)->nm_flag & NFSMNT_NFSV4) != 0)
+			mp->mnt_kern_flag |= MNTK_NULL_NOCACHE;
+		MNT_IUNLOCK(mp);
+	}
+	free(hst, M_TEMP);
+	return (error);
+}
+
+
+/*
+ * VFS Operations.
+ *
+ * mount system call
+ * It seems a bit dumb to copyinstr() the host and path here and then
+ * bcopy() them in mountnfs(), but I wanted to detect errors before
+ * doing the getsockaddr() call because getsockaddr() allocates an mbuf and
+ * an error after that means that I have to release the mbuf.
+ */
+/* ARGSUSED */
+static int
+nfs_cmount(struct mntarg *ma, void *data, uint64_t flags)
+{
+	int error;
+	struct nfs_args args;
+
+	error = copyin(data, &args, sizeof (struct nfs_args));
+	if (error)
+		return error;
+
+	ma = mount_arg(ma, "nfs_args", &args, sizeof args);
+
+	error = kernel_mount(ma, flags);
+	return (error);
+}
+
+/*
+ * Common code for mount and mountroot
+ */
+static int
+mountnfs(struct nfs_args *argp, struct mount *mp, struct sockaddr *nam,
+    char *hst, u_char *krbname, int krbnamelen, u_char *dirpath, int dirlen,
+    u_char *srvkrbname, int srvkrbnamelen, struct vnode **vpp,
+    struct ucred *cred, struct thread *td, int nametimeo, int negnametimeo,
+    int minvers)
+{
+	struct nfsmount *nmp;
+	struct nfsnode *np;
+	int error, trycnt, ret;
+	struct nfsvattr nfsva;
+	struct nfsclclient *clp;
+	struct nfsclds *dsp, *tdsp;
+	uint32_t lease;
+	static u_int64_t clval = 0;
+
+	NFSCL_DEBUG(3, "in mnt\n");
+	clp = NULL;
+	if (mp->mnt_flag & MNT_UPDATE) {
+		nmp = VFSTONFS(mp);
+		printf("%s: MNT_UPDATE is no longer handled here\n", __func__);
+		free(nam, M_SONAME);
+		return (0);
+	} else {
+		nmp = malloc(sizeof (struct nfsmount) +
+		    krbnamelen + dirlen + srvkrbnamelen + 2,
+		    M_NEWNFSMNT, M_WAITOK | M_ZERO);
+		TAILQ_INIT(&nmp->nm_bufq);
+		TAILQ_INIT(&nmp->nm_sess);
+		if (clval == 0)
+			clval = (u_int64_t)nfsboottime.tv_sec;
+		nmp->nm_clval = clval++;
+		nmp->nm_krbnamelen = krbnamelen;
+		nmp->nm_dirpathlen = dirlen;
+		nmp->nm_srvkrbnamelen = srvkrbnamelen;
+		if (td->td_ucred->cr_uid != (uid_t)0) {
+			/*
+			 * nm_uid is used to get KerberosV credentials for
+			 * the nfsv4 state handling operations if there is
+			 * no host based principal set. Use the uid of
+			 * this user if not root, since they are doing the
+			 * mount. I don't think setting this for root will
+			 * work, since root normally does not have user
+			 * credentials in a credentials cache.
+			 */
+			nmp->nm_uid = td->td_ucred->cr_uid;
+		} else {
+			/*
+			 * Just set to -1, so it won't be used.
+			 */
+			nmp->nm_uid = (uid_t)-1;
+		}
+
+		/* Copy and null terminate all the names */
+		if (nmp->nm_krbnamelen > 0) {
+			bcopy(krbname, nmp->nm_krbname, nmp->nm_krbnamelen);
+			nmp->nm_name[nmp->nm_krbnamelen] = '\0';
+		}
+		if (nmp->nm_dirpathlen > 0) {
+			bcopy(dirpath, NFSMNT_DIRPATH(nmp),
+			    nmp->nm_dirpathlen);
+			nmp->nm_name[nmp->nm_krbnamelen + nmp->nm_dirpathlen
+			    + 1] = '\0';
+		}
+		if (nmp->nm_srvkrbnamelen > 0) {
+			bcopy(srvkrbname, NFSMNT_SRVKRBNAME(nmp),
+			    nmp->nm_srvkrbnamelen);
+			nmp->nm_name[nmp->nm_krbnamelen + nmp->nm_dirpathlen
+			    + nmp->nm_srvkrbnamelen + 2] = '\0';
+		}
+		nmp->nm_sockreq.nr_cred = crhold(cred);
+		mtx_init(&nmp->nm_sockreq.nr_mtx, "nfssock", NULL, MTX_DEF);
+		mp->mnt_data = nmp;
+		nmp->nm_getinfo = nfs_getnlminfo;
+		nmp->nm_vinvalbuf = ncl_vinvalbuf;
+	}
+	vfs_getnewfsid(mp);
+	nmp->nm_mountp = mp;
+	mtx_init(&nmp->nm_mtx, "NFSmount lock", NULL, MTX_DEF | MTX_DUPOK);
+
+	/*
+	 * Since nfs_decode_args() might optionally set them, these
+	 * need to be set to defaults before the call, so that the
+	 * optional settings aren't overwritten.
+	 */
+	nmp->nm_nametimeo = nametimeo;
+	nmp->nm_negnametimeo = negnametimeo;
+	nmp->nm_timeo = NFS_TIMEO;
+	nmp->nm_retry = NFS_RETRANS;
+	nmp->nm_readahead = NFS_DEFRAHEAD;
+
+	/* This is empirical approximation of sqrt(hibufspace) * 256. */
+	nmp->nm_wcommitsize = NFS_MAXBSIZE / 256;
+	while ((long)nmp->nm_wcommitsize * nmp->nm_wcommitsize < hibufspace)
+		nmp->nm_wcommitsize *= 2;
+	nmp->nm_wcommitsize *= 256;
+
+	if ((argp->flags & NFSMNT_NFSV4) != 0)
+		nmp->nm_minorvers = minvers;
+	else
+		nmp->nm_minorvers = 0;
+
+	nfs_decode_args(mp, nmp, argp, hst, cred, td);
+
+	/*
+	 * V2 can only handle 32 bit filesizes.  A 4GB-1 limit may be too
+	 * high, depending on whether we end up with negative offsets in
+	 * the client or server somewhere.  2GB-1 may be safer.
+	 *
+	 * For V3, ncl_fsinfo will adjust this as necessary.  Assume maximum
+	 * that we can handle until we find out otherwise.
+	 */
+	if ((argp->flags & (NFSMNT_NFSV3 | NFSMNT_NFSV4)) == 0)
+		nmp->nm_maxfilesize = 0xffffffffLL;
+	else
+		nmp->nm_maxfilesize = OFF_MAX;
+
+	if ((argp->flags & (NFSMNT_NFSV3 | NFSMNT_NFSV4)) == 0) {
+		nmp->nm_wsize = NFS_WSIZE;
+		nmp->nm_rsize = NFS_RSIZE;
+		nmp->nm_readdirsize = NFS_READDIRSIZE;
+	}
+	nmp->nm_numgrps = NFS_MAXGRPS;
+	nmp->nm_tprintf_delay = nfs_tprintf_delay;
+	if (nmp->nm_tprintf_delay < 0)
+		nmp->nm_tprintf_delay = 0;
+	nmp->nm_tprintf_initial_delay = nfs_tprintf_initial_delay;
+	if (nmp->nm_tprintf_initial_delay < 0)
+		nmp->nm_tprintf_initial_delay = 0;
+	nmp->nm_fhsize = argp->fhsize;
+	if (nmp->nm_fhsize > 0)
+		bcopy((caddr_t)argp->fh, (caddr_t)nmp->nm_fh, argp->fhsize);
+	bcopy(hst, mp->mnt_stat.f_mntfromname, MNAMELEN);
+	nmp->nm_nam = nam;
+	/* Set up the sockets and per-host congestion */
+	nmp->nm_sotype = argp->sotype;
+	nmp->nm_soproto = argp->proto;
+	nmp->nm_sockreq.nr_prog = NFS_PROG;
+	if ((argp->flags & NFSMNT_NFSV4))
+		nmp->nm_sockreq.nr_vers = NFS_VER4;
+	else if ((argp->flags & NFSMNT_NFSV3))
+		nmp->nm_sockreq.nr_vers = NFS_VER3;
+	else
+		nmp->nm_sockreq.nr_vers = NFS_VER2;
+
+
+	if ((error = newnfs_connect(nmp, &nmp->nm_sockreq, cred, td, 0)))
+		goto bad;
+	/* For NFSv4.1, get the clientid now. */
+	if (nmp->nm_minorvers > 0) {
+		NFSCL_DEBUG(3, "at getcl\n");
+		error = nfscl_getcl(mp, cred, td, 0, &clp);
+		NFSCL_DEBUG(3, "aft getcl=%d\n", error);
+		if (error != 0)
+			goto bad;
+	}
+
+	if (nmp->nm_fhsize == 0 && (nmp->nm_flag & NFSMNT_NFSV4) &&
+	    nmp->nm_dirpathlen > 0) {
+		NFSCL_DEBUG(3, "in dirp\n");
+		/*
+		 * If the fhsize on the mount point == 0 for V4, the mount
+		 * path needs to be looked up.
+		 */
+		trycnt = 3;
+		do {
+			error = nfsrpc_getdirpath(nmp, NFSMNT_DIRPATH(nmp),
+			    cred, td);
+			NFSCL_DEBUG(3, "aft dirp=%d\n", error);
+			if (error)
+				(void) nfs_catnap(PZERO, error, "nfsgetdirp");
+		} while (error && --trycnt > 0);
+		if (error)
+			goto bad;
+	}
+
+	/*
+	 * A reference count is needed on the nfsnode representing the
+	 * remote root.  If this object is not persistent, then backward
+	 * traversals of the mount point (i.e. "..") will not work if
+	 * the nfsnode gets flushed out of the cache. Ufs does not have
+	 * this problem, because one can identify root inodes by their
+	 * number == UFS_ROOTINO (2).
+	 */
+	if (nmp->nm_fhsize > 0) {
+		/*
+		 * Set f_iosize to NFS_DIRBLKSIZ so that bo_bsize gets set
+		 * non-zero for the root vnode. f_iosize will be set correctly
+		 * by nfs_statfs() before any I/O occurs.
+		 */
+		mp->mnt_stat.f_iosize = NFS_DIRBLKSIZ;
+		error = ncl_nget(mp, nmp->nm_fh, nmp->nm_fhsize, &np,
+		    LK_EXCLUSIVE);
+		if (error)
+			goto bad;
+		*vpp = NFSTOV(np);
+	
+		/*
+		 * Get file attributes and transfer parameters for the
+		 * mountpoint.  This has the side effect of filling in
+		 * (*vpp)->v_type with the correct value.
+		 */
+		ret = nfsrpc_getattrnovp(nmp, nmp->nm_fh, nmp->nm_fhsize, 1,
+		    cred, td, &nfsva, NULL, &lease);
+		if (ret) {
+			/*
+			 * Just set default values to get things going.
+			 */
+			NFSBZERO((caddr_t)&nfsva, sizeof (struct nfsvattr));
+			nfsva.na_vattr.va_type = VDIR;
+			nfsva.na_vattr.va_mode = 0777;
+			nfsva.na_vattr.va_nlink = 100;
+			nfsva.na_vattr.va_uid = (uid_t)0;
+			nfsva.na_vattr.va_gid = (gid_t)0;
+			nfsva.na_vattr.va_fileid = 2;
+			nfsva.na_vattr.va_gen = 1;
+			nfsva.na_vattr.va_blocksize = NFS_FABLKSIZE;
+			nfsva.na_vattr.va_size = 512 * 1024;
+			lease = 60;
+		}
+		(void) nfscl_loadattrcache(vpp, &nfsva, NULL, NULL, 0, 1);
+		if (nmp->nm_minorvers > 0) {
+			NFSCL_DEBUG(3, "lease=%d\n", (int)lease);
+			NFSLOCKCLSTATE();
+			clp->nfsc_renew = NFSCL_RENEW(lease);
+			clp->nfsc_expire = NFSD_MONOSEC + clp->nfsc_renew;
+			clp->nfsc_clientidrev++;
+			if (clp->nfsc_clientidrev == 0)
+				clp->nfsc_clientidrev++;
+			NFSUNLOCKCLSTATE();
+			/*
+			 * Mount will succeed, so the renew thread can be
+			 * started now.
+			 */
+			nfscl_start_renewthread(clp);
+			nfscl_clientrelease(clp);
+		}
+		if (argp->flags & NFSMNT_NFSV3)
+			ncl_fsinfo(nmp, *vpp, cred, td);
+	
+		/* Mark if the mount point supports NFSv4 ACLs. */
+		if ((argp->flags & NFSMNT_NFSV4) != 0 && nfsrv_useacl != 0 &&
+		    ret == 0 &&
+		    NFSISSET_ATTRBIT(&nfsva.na_suppattr, NFSATTRBIT_ACL)) {
+			MNT_ILOCK(mp);
+			mp->mnt_flag |= MNT_NFS4ACLS;
+			MNT_IUNLOCK(mp);
+		}
+	
+		/*
+		 * Lose the lock but keep the ref.
+		 */
+		NFSVOPUNLOCK(*vpp, 0);
+		return (0);
+	}
+	error = EIO;
+
+bad:
+	if (clp != NULL)
+		nfscl_clientrelease(clp);
+	newnfs_disconnect(&nmp->nm_sockreq);
+	crfree(nmp->nm_sockreq.nr_cred);
+	if (nmp->nm_sockreq.nr_auth != NULL)
+		AUTH_DESTROY(nmp->nm_sockreq.nr_auth);
+	mtx_destroy(&nmp->nm_sockreq.nr_mtx);
+	mtx_destroy(&nmp->nm_mtx);
+	if (nmp->nm_clp != NULL) {
+		NFSLOCKCLSTATE();
+		LIST_REMOVE(nmp->nm_clp, nfsc_list);
+		NFSUNLOCKCLSTATE();
+		free(nmp->nm_clp, M_NFSCLCLIENT);
+	}
+	TAILQ_FOREACH_SAFE(dsp, &nmp->nm_sess, nfsclds_list, tdsp) {
+		if (dsp != TAILQ_FIRST(&nmp->nm_sess) &&
+		    dsp->nfsclds_sockp != NULL)
+			newnfs_disconnect(dsp->nfsclds_sockp);
+		nfscl_freenfsclds(dsp);
+	}
+	free(nmp, M_NEWNFSMNT);
+	free(nam, M_SONAME);
+	return (error);
+}
+
+/*
+ * unmount system call
+ */
+static int
+nfs_unmount(struct mount *mp, int mntflags)
+{
+	struct thread *td;
+	struct nfsmount *nmp;
+	int error, flags = 0, i, trycnt = 0;
+	struct nfsclds *dsp, *tdsp;
+
+	td = curthread;
+
+	if (mntflags & MNT_FORCE)
+		flags |= FORCECLOSE;
+	nmp = VFSTONFS(mp);
+	error = 0;
+	/*
+	 * Goes something like this..
+	 * - Call vflush() to clear out vnodes for this filesystem
+	 * - Close the socket
+	 * - Free up the data structures
+	 */
+	/* In the forced case, cancel any outstanding requests. */
+	if (mntflags & MNT_FORCE) {
+		NFSDDSLOCK();
+		if (nfsv4_findmirror(nmp) != NULL)
+			error = ENXIO;
+		NFSDDSUNLOCK();
+		if (error)
+			goto out;
+		error = newnfs_nmcancelreqs(nmp);
+		if (error)
+			goto out;
+		/* For a forced close, get rid of the renew thread now */
+		nfscl_umount(nmp, td);
+	}
+	/* We hold 1 extra ref on the root vnode; see comment in mountnfs(). */
+	do {
+		error = vflush(mp, 1, flags, td);
+		if ((mntflags & MNT_FORCE) && error != 0 && ++trycnt < 30)
+			(void) nfs_catnap(PSOCK, error, "newndm");
+	} while ((mntflags & MNT_FORCE) && error != 0 && trycnt < 30);
+	if (error)
+		goto out;
+
+	/*
+	 * We are now committed to the unmount.
+	 */
+	if ((mntflags & MNT_FORCE) == 0)
+		nfscl_umount(nmp, td);
+	else {
+		mtx_lock(&nmp->nm_mtx);
+		nmp->nm_privflag |= NFSMNTP_FORCEDISM;
+		mtx_unlock(&nmp->nm_mtx);
+	}
+	/* Make sure no nfsiods are assigned to this mount. */
+	NFSLOCKIOD();
+	for (i = 0; i < NFS_MAXASYNCDAEMON; i++)
+		if (ncl_iodmount[i] == nmp) {
+			ncl_iodwant[i] = NFSIOD_AVAILABLE;
+			ncl_iodmount[i] = NULL;
+		}
+	NFSUNLOCKIOD();
+
+	/*
+	 * We can now set mnt_data to NULL and wait for
+	 * nfssvc(NFSSVC_FORCEDISM) to complete.
+	 */
+	mtx_lock(&mountlist_mtx);
+	mtx_lock(&nmp->nm_mtx);
+	mp->mnt_data = NULL;
+	mtx_unlock(&mountlist_mtx);
+	while ((nmp->nm_privflag & NFSMNTP_CANCELRPCS) != 0)
+		msleep(nmp, &nmp->nm_mtx, PVFS, "nfsfdism", 0);
+	mtx_unlock(&nmp->nm_mtx);
+
+	newnfs_disconnect(&nmp->nm_sockreq);
+	crfree(nmp->nm_sockreq.nr_cred);
+	free(nmp->nm_nam, M_SONAME);
+	if (nmp->nm_sockreq.nr_auth != NULL)
+		AUTH_DESTROY(nmp->nm_sockreq.nr_auth);
+	mtx_destroy(&nmp->nm_sockreq.nr_mtx);
+	mtx_destroy(&nmp->nm_mtx);
+	TAILQ_FOREACH_SAFE(dsp, &nmp->nm_sess, nfsclds_list, tdsp) {
+		if (dsp != TAILQ_FIRST(&nmp->nm_sess) &&
+		    dsp->nfsclds_sockp != NULL)
+			newnfs_disconnect(dsp->nfsclds_sockp);
+		nfscl_freenfsclds(dsp);
+	}
+	free(nmp, M_NEWNFSMNT);
+out:
+	return (error);
+}
+
+/*
+ * Return root of a filesystem
+ */
+static int
+nfs_root(struct mount *mp, int flags, struct vnode **vpp)
+{
+	struct vnode *vp;
+	struct nfsmount *nmp;
+	struct nfsnode *np;
+	int error;
+
+	nmp = VFSTONFS(mp);
+	error = ncl_nget(mp, nmp->nm_fh, nmp->nm_fhsize, &np, flags);
+	if (error)
+		return error;
+	vp = NFSTOV(np);
+	/*
+	 * Get transfer parameters and attributes for root vnode once.
+	 */
+	mtx_lock(&nmp->nm_mtx);
+	if (NFSHASNFSV3(nmp) && !NFSHASGOTFSINFO(nmp)) {
+		mtx_unlock(&nmp->nm_mtx);
+		ncl_fsinfo(nmp, vp, curthread->td_ucred, curthread);
+	} else 
+		mtx_unlock(&nmp->nm_mtx);
+	if (vp->v_type == VNON)
+	    vp->v_type = VDIR;
+	vp->v_vflag |= VV_ROOT;
+	*vpp = vp;
+	return (0);
+}
+
+/*
+ * Flush out the buffer cache
+ */
+/* ARGSUSED */
+static int
+nfs_sync(struct mount *mp, int waitfor)
+{
+	struct vnode *vp, *mvp;
+	struct thread *td;
+	int error, allerror = 0;
+
+	td = curthread;
+
+	MNT_ILOCK(mp);
+	/*
+	 * If a forced dismount is in progress, return from here so that
+	 * the umount(2) syscall doesn't get stuck in VFS_SYNC() before
+	 * calling VFS_UNMOUNT().
+	 */
+	if (NFSCL_FORCEDISM(mp)) {
+		MNT_IUNLOCK(mp);
+		return (EBADF);
+	}
+	MNT_IUNLOCK(mp);
+
+	/*
+	 * Force stale buffer cache information to be flushed.
+	 */
+loop:
+	MNT_VNODE_FOREACH_ALL(vp, mp, mvp) {
+		/* XXX Racy bv_cnt check. */
+		if (NFSVOPISLOCKED(vp) || vp->v_bufobj.bo_dirty.bv_cnt == 0 ||
+		    waitfor == MNT_LAZY) {
+			VI_UNLOCK(vp);
+			continue;
+		}
+		if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, td)) {
+			MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp);
+			goto loop;
+		}
+		error = VOP_FSYNC(vp, waitfor, td);
+		if (error)
+			allerror = error;
+		NFSVOPUNLOCK(vp, 0);
+		vrele(vp);
+	}
+	return (allerror);
+}
+
+static int
+nfs_sysctl(struct mount *mp, fsctlop_t op, struct sysctl_req *req)
+{
+	struct nfsmount *nmp = VFSTONFS(mp);
+	struct vfsquery vq;
+	int error;
+
+	bzero(&vq, sizeof(vq));
+	switch (op) {
+#if 0
+	case VFS_CTL_NOLOCKS:
+		val = (nmp->nm_flag & NFSMNT_NOLOCKS) ? 1 : 0;
+ 		if (req->oldptr != NULL) {
+ 			error = SYSCTL_OUT(req, &val, sizeof(val));
+ 			if (error)
+ 				return (error);
+ 		}
+ 		if (req->newptr != NULL) {
+ 			error = SYSCTL_IN(req, &val, sizeof(val));
+ 			if (error)
+ 				return (error);
+			if (val)
+				nmp->nm_flag |= NFSMNT_NOLOCKS;
+			else
+				nmp->nm_flag &= ~NFSMNT_NOLOCKS;
+ 		}
+		break;
+#endif
+	case VFS_CTL_QUERY:
+		mtx_lock(&nmp->nm_mtx);
+		if (nmp->nm_state & NFSSTA_TIMEO)
+			vq.vq_flags |= VQ_NOTRESP;
+		mtx_unlock(&nmp->nm_mtx);
+#if 0
+		if (!(nmp->nm_flag & NFSMNT_NOLOCKS) &&
+		    (nmp->nm_state & NFSSTA_LOCKTIMEO))
+			vq.vq_flags |= VQ_NOTRESPLOCK;
+#endif
+		error = SYSCTL_OUT(req, &vq, sizeof(vq));
+		break;
+ 	case VFS_CTL_TIMEO:
+ 		if (req->oldptr != NULL) {
+ 			error = SYSCTL_OUT(req, &nmp->nm_tprintf_initial_delay,
+ 			    sizeof(nmp->nm_tprintf_initial_delay));
+ 			if (error)
+ 				return (error);
+ 		}
+ 		if (req->newptr != NULL) {
+			error = vfs_suser(mp, req->td);
+			if (error)
+				return (error);
+ 			error = SYSCTL_IN(req, &nmp->nm_tprintf_initial_delay,
+ 			    sizeof(nmp->nm_tprintf_initial_delay));
+ 			if (error)
+ 				return (error);
+ 			if (nmp->nm_tprintf_initial_delay < 0)
+ 				nmp->nm_tprintf_initial_delay = 0;
+ 		}
+		break;
+	default:
+		return (ENOTSUP);
+	}
+	return (0);
+}
+
+/*
+ * Purge any RPCs in progress, so that they will all return errors.
+ * This allows dounmount() to continue as far as VFS_UNMOUNT() for a
+ * forced dismount.
+ */
+static void
+nfs_purge(struct mount *mp)
+{
+	struct nfsmount *nmp = VFSTONFS(mp);
+
+	newnfs_nmcancelreqs(nmp);
+}
+
+/*
+ * Extract the information needed by the nlm from the nfs vnode.
+ */
+static void
+nfs_getnlminfo(struct vnode *vp, uint8_t *fhp, size_t *fhlenp,
+    struct sockaddr_storage *sp, int *is_v3p, off_t *sizep,
+    struct timeval *timeop)
+{
+	struct nfsmount *nmp;
+	struct nfsnode *np = VTONFS(vp);
+
+	nmp = VFSTONFS(vp->v_mount);
+	if (fhlenp != NULL)
+		*fhlenp = (size_t)np->n_fhp->nfh_len;
+	if (fhp != NULL)
+		bcopy(np->n_fhp->nfh_fh, fhp, np->n_fhp->nfh_len);
+	if (sp != NULL)
+		bcopy(nmp->nm_nam, sp, min(nmp->nm_nam->sa_len, sizeof(*sp)));
+	if (is_v3p != NULL)
+		*is_v3p = NFS_ISV3(vp);
+	if (sizep != NULL)
+		*sizep = np->n_size;
+	if (timeop != NULL) {
+		timeop->tv_sec = nmp->nm_timeo / NFS_HZ;
+		timeop->tv_usec = (nmp->nm_timeo % NFS_HZ) * (1000000 / NFS_HZ);
+	}
+}
+
+/*
+ * This function prints out an option name, based on the conditional
+ * argument.
+ */
+static __inline void nfscl_printopt(struct nfsmount *nmp, int testval,
+    char *opt, char **buf, size_t *blen)
+{
+	int len;
+
+	if (testval != 0 && *blen > strlen(opt)) {
+		len = snprintf(*buf, *blen, "%s", opt);
+		if (len != strlen(opt))
+			printf("EEK!!\n");
+		*buf += len;
+		*blen -= len;
+	}
+}
+
+/*
+ * This function printf out an options integer value.
+ */
+static __inline void nfscl_printoptval(struct nfsmount *nmp, int optval,
+    char *opt, char **buf, size_t *blen)
+{
+	int len;
+
+	if (*blen > strlen(opt) + 1) {
+		/* Could result in truncated output string. */
+		len = snprintf(*buf, *blen, "%s=%d", opt, optval);
+		if (len < *blen) {
+			*buf += len;
+			*blen -= len;
+		}
+	}
+}
+
+/*
+ * Load the option flags and values into the buffer.
+ */
+void nfscl_retopts(struct nfsmount *nmp, char *buffer, size_t buflen)
+{
+	char *buf;
+	size_t blen;
+
+	buf = buffer;
+	blen = buflen;
+	nfscl_printopt(nmp, (nmp->nm_flag & NFSMNT_NFSV4) != 0, "nfsv4", &buf,
+	    &blen);
+	if ((nmp->nm_flag & NFSMNT_NFSV4) != 0) {
+		nfscl_printoptval(nmp, nmp->nm_minorvers, ",minorversion", &buf,
+		    &blen);
+		nfscl_printopt(nmp, (nmp->nm_flag & NFSMNT_PNFS) != 0, ",pnfs",
+		    &buf, &blen);
+		nfscl_printopt(nmp, (nmp->nm_flag & NFSMNT_ONEOPENOWN) != 0 &&
+		    nmp->nm_minorvers > 0, ",oneopenown", &buf, &blen);
+	}
+	nfscl_printopt(nmp, (nmp->nm_flag & NFSMNT_NFSV3) != 0, "nfsv3", &buf,
+	    &blen);
+	nfscl_printopt(nmp, (nmp->nm_flag & (NFSMNT_NFSV3 | NFSMNT_NFSV4)) == 0,
+	    "nfsv2", &buf, &blen);
+	nfscl_printopt(nmp, nmp->nm_sotype == SOCK_STREAM, ",tcp", &buf, &blen);
+	nfscl_printopt(nmp, nmp->nm_sotype != SOCK_STREAM, ",udp", &buf, &blen);
+	nfscl_printopt(nmp, (nmp->nm_flag & NFSMNT_RESVPORT) != 0, ",resvport",
+	    &buf, &blen);
+	nfscl_printopt(nmp, (nmp->nm_flag & NFSMNT_NOCONN) != 0, ",noconn",
+	    &buf, &blen);
+	nfscl_printopt(nmp, (nmp->nm_flag & NFSMNT_SOFT) == 0, ",hard", &buf,
+	    &blen);
+	nfscl_printopt(nmp, (nmp->nm_flag & NFSMNT_SOFT) != 0, ",soft", &buf,
+	    &blen);
+	nfscl_printopt(nmp, (nmp->nm_flag & NFSMNT_INT) != 0, ",intr", &buf,
+	    &blen);
+	nfscl_printopt(nmp, (nmp->nm_flag & NFSMNT_NOCTO) == 0, ",cto", &buf,
+	    &blen);
+	nfscl_printopt(nmp, (nmp->nm_flag & NFSMNT_NOCTO) != 0, ",nocto", &buf,
+	    &blen);
+	nfscl_printopt(nmp, (nmp->nm_flag & NFSMNT_NONCONTIGWR) != 0,
+	    ",noncontigwr", &buf, &blen);
+	nfscl_printopt(nmp, (nmp->nm_flag & (NFSMNT_NOLOCKD | NFSMNT_NFSV4)) ==
+	    0, ",lockd", &buf, &blen);
+	nfscl_printopt(nmp, (nmp->nm_flag & (NFSMNT_NOLOCKD | NFSMNT_NFSV4)) ==
+	    NFSMNT_NOLOCKD, ",nolockd", &buf, &blen);
+	nfscl_printopt(nmp, (nmp->nm_flag & NFSMNT_RDIRPLUS) != 0, ",rdirplus",
+	    &buf, &blen);
+	nfscl_printopt(nmp, (nmp->nm_flag & NFSMNT_KERB) == 0, ",sec=sys",
+	    &buf, &blen);
+	nfscl_printopt(nmp, (nmp->nm_flag & (NFSMNT_KERB | NFSMNT_INTEGRITY |
+	    NFSMNT_PRIVACY)) == NFSMNT_KERB, ",sec=krb5", &buf, &blen);
+	nfscl_printopt(nmp, (nmp->nm_flag & (NFSMNT_KERB | NFSMNT_INTEGRITY |
+	    NFSMNT_PRIVACY)) == (NFSMNT_KERB | NFSMNT_INTEGRITY), ",sec=krb5i",
+	    &buf, &blen);
+	nfscl_printopt(nmp, (nmp->nm_flag & (NFSMNT_KERB | NFSMNT_INTEGRITY |
+	    NFSMNT_PRIVACY)) == (NFSMNT_KERB | NFSMNT_PRIVACY), ",sec=krb5p",
+	    &buf, &blen);
+	nfscl_printoptval(nmp, nmp->nm_acdirmin, ",acdirmin", &buf, &blen);
+	nfscl_printoptval(nmp, nmp->nm_acdirmax, ",acdirmax", &buf, &blen);
+	nfscl_printoptval(nmp, nmp->nm_acregmin, ",acregmin", &buf, &blen);
+	nfscl_printoptval(nmp, nmp->nm_acregmax, ",acregmax", &buf, &blen);
+	nfscl_printoptval(nmp, nmp->nm_nametimeo, ",nametimeo", &buf, &blen);
+	nfscl_printoptval(nmp, nmp->nm_negnametimeo, ",negnametimeo", &buf,
+	    &blen);
+	nfscl_printoptval(nmp, nmp->nm_rsize, ",rsize", &buf, &blen);
+	nfscl_printoptval(nmp, nmp->nm_wsize, ",wsize", &buf, &blen);
+	nfscl_printoptval(nmp, nmp->nm_readdirsize, ",readdirsize", &buf,
+	    &blen);
+	nfscl_printoptval(nmp, nmp->nm_readahead, ",readahead", &buf, &blen);
+	nfscl_printoptval(nmp, nmp->nm_wcommitsize, ",wcommitsize", &buf,
+	    &blen);
+	nfscl_printoptval(nmp, nmp->nm_timeo, ",timeout", &buf, &blen);
+	nfscl_printoptval(nmp, nmp->nm_retry, ",retrans", &buf, &blen);
+}
+
diff --git a/freebsd/sys/fs/nfsclient/nfs_clvnops.c b/freebsd/sys/fs/nfsclient/nfs_clvnops.c
new file mode 100644
index 0000000..478ee05
--- /dev/null
+++ b/freebsd/sys/fs/nfsclient/nfs_clvnops.c
@@ -0,0 +1,3604 @@
+/*-
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 1989, 1993
+ *	The Regents of the University of California.  All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Rick Macklem at The University of Guelph.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ *	from nfs_vnops.c	8.16 (Berkeley) 5/27/95
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+/*
+ * vnode op calls for Sun NFS version 2, 3 and 4
+ */
+
+#include "opt_inet.h"
+
+#include <sys/param.h>
+#include <sys/kernel.h>
+#include <sys/systm.h>
+#include <sys/resourcevar.h>
+#include <sys/proc.h>
+#include <sys/mount.h>
+#include <sys/bio.h>
+#include <sys/buf.h>
+#include <sys/jail.h>
+#include <sys/malloc.h>
+#include <sys/mbuf.h>
+#include <sys/namei.h>
+#include <sys/socket.h>
+#include <sys/vnode.h>
+#include <sys/dirent.h>
+#include <sys/fcntl.h>
+#include <sys/lockf.h>
+#include <sys/stat.h>
+#include <sys/sysctl.h>
+#include <sys/signalvar.h>
+
+#include <vm/vm.h>
+#include <vm/vm_extern.h>
+#include <vm/vm_object.h>
+
+#include <fs/nfs/nfsport.h>
+#include <fs/nfsclient/nfsnode.h>
+#include <fs/nfsclient/nfsmount.h>
+#include <fs/nfsclient/nfs.h>
+#include <fs/nfsclient/nfs_kdtrace.h>
+
+#include <net/if.h>
+#include <netinet/in.h>
+#include <netinet/in_var.h>
+
+#include <nfs/nfs_lock.h>
+
+#ifdef KDTRACE_HOOKS
+#include <sys/dtrace_bsd.h>
+
+dtrace_nfsclient_accesscache_flush_probe_func_t
+		dtrace_nfscl_accesscache_flush_done_probe;
+uint32_t	nfscl_accesscache_flush_done_id;
+
+dtrace_nfsclient_accesscache_get_probe_func_t
+		dtrace_nfscl_accesscache_get_hit_probe,
+		dtrace_nfscl_accesscache_get_miss_probe;
+uint32_t	nfscl_accesscache_get_hit_id;
+uint32_t	nfscl_accesscache_get_miss_id;
+
+dtrace_nfsclient_accesscache_load_probe_func_t
+		dtrace_nfscl_accesscache_load_done_probe;
+uint32_t	nfscl_accesscache_load_done_id;
+#endif /* !KDTRACE_HOOKS */
+
+/* Defs */
+#define	TRUE	1
+#define	FALSE	0
+
+extern struct nfsstatsv1 nfsstatsv1;
+extern int nfsrv_useacl;
+extern int nfscl_debuglevel;
+MALLOC_DECLARE(M_NEWNFSREQ);
+
+static vop_read_t	nfsfifo_read;
+static vop_write_t	nfsfifo_write;
+static vop_close_t	nfsfifo_close;
+static int	nfs_setattrrpc(struct vnode *, struct vattr *, struct ucred *,
+		    struct thread *);
+static vop_lookup_t	nfs_lookup;
+static vop_create_t	nfs_create;
+static vop_mknod_t	nfs_mknod;
+static vop_open_t	nfs_open;
+static vop_pathconf_t	nfs_pathconf;
+static vop_close_t	nfs_close;
+static vop_access_t	nfs_access;
+static vop_getattr_t	nfs_getattr;
+static vop_setattr_t	nfs_setattr;
+static vop_read_t	nfs_read;
+static vop_fsync_t	nfs_fsync;
+static vop_remove_t	nfs_remove;
+static vop_link_t	nfs_link;
+static vop_rename_t	nfs_rename;
+static vop_mkdir_t	nfs_mkdir;
+static vop_rmdir_t	nfs_rmdir;
+static vop_symlink_t	nfs_symlink;
+static vop_readdir_t	nfs_readdir;
+static vop_strategy_t	nfs_strategy;
+static	int	nfs_lookitup(struct vnode *, char *, int,
+		    struct ucred *, struct thread *, struct nfsnode **);
+static	int	nfs_sillyrename(struct vnode *, struct vnode *,
+		    struct componentname *);
+static vop_access_t	nfsspec_access;
+static vop_readlink_t	nfs_readlink;
+static vop_print_t	nfs_print;
+static vop_advlock_t	nfs_advlock;
+static vop_advlockasync_t nfs_advlockasync;
+static vop_getacl_t nfs_getacl;
+static vop_setacl_t nfs_setacl;
+static vop_lock1_t	nfs_lock;
+
+/*
+ * Global vfs data structures for nfs
+ */
+struct vop_vector newnfs_vnodeops = {
+	.vop_default =		&default_vnodeops,
+	.vop_access =		nfs_access,
+	.vop_advlock =		nfs_advlock,
+	.vop_advlockasync =	nfs_advlockasync,
+	.vop_close =		nfs_close,
+	.vop_create =		nfs_create,
+	.vop_fsync =		nfs_fsync,
+	.vop_getattr =		nfs_getattr,
+	.vop_getpages =		ncl_getpages,
+	.vop_putpages =		ncl_putpages,
+	.vop_inactive =		ncl_inactive,
+	.vop_link =		nfs_link,
+	.vop_lock1 =		nfs_lock,
+	.vop_lookup =		nfs_lookup,
+	.vop_mkdir =		nfs_mkdir,
+	.vop_mknod =		nfs_mknod,
+	.vop_open =		nfs_open,
+	.vop_pathconf =		nfs_pathconf,
+	.vop_print =		nfs_print,
+	.vop_read =		nfs_read,
+	.vop_readdir =		nfs_readdir,
+	.vop_readlink =		nfs_readlink,
+	.vop_reclaim =		ncl_reclaim,
+	.vop_remove =		nfs_remove,
+	.vop_rename =		nfs_rename,
+	.vop_rmdir =		nfs_rmdir,
+	.vop_setattr =		nfs_setattr,
+	.vop_strategy =		nfs_strategy,
+	.vop_symlink =		nfs_symlink,
+	.vop_write =		ncl_write,
+	.vop_getacl =		nfs_getacl,
+	.vop_setacl =		nfs_setacl,
+};
+
+struct vop_vector newnfs_fifoops = {
+	.vop_default =		&fifo_specops,
+	.vop_access =		nfsspec_access,
+	.vop_close =		nfsfifo_close,
+	.vop_fsync =		nfs_fsync,
+	.vop_getattr =		nfs_getattr,
+	.vop_inactive =		ncl_inactive,
+	.vop_pathconf =		nfs_pathconf,
+	.vop_print =		nfs_print,
+	.vop_read =		nfsfifo_read,
+	.vop_reclaim =		ncl_reclaim,
+	.vop_setattr =		nfs_setattr,
+	.vop_write =		nfsfifo_write,
+};
+
+static int nfs_mknodrpc(struct vnode *dvp, struct vnode **vpp,
+    struct componentname *cnp, struct vattr *vap);
+static int nfs_removerpc(struct vnode *dvp, struct vnode *vp, char *name,
+    int namelen, struct ucred *cred, struct thread *td);
+static int nfs_renamerpc(struct vnode *fdvp, struct vnode *fvp,
+    char *fnameptr, int fnamelen, struct vnode *tdvp, struct vnode *tvp,
+    char *tnameptr, int tnamelen, struct ucred *cred, struct thread *td);
+static int nfs_renameit(struct vnode *sdvp, struct vnode *svp,
+    struct componentname *scnp, struct sillyrename *sp);
+
+/*
+ * Global variables
+ */
+SYSCTL_DECL(_vfs_nfs);
+
+static int	nfsaccess_cache_timeout = NFS_MAXATTRTIMO;
+SYSCTL_INT(_vfs_nfs, OID_AUTO, access_cache_timeout, CTLFLAG_RW,
+	   &nfsaccess_cache_timeout, 0, "NFS ACCESS cache timeout");
+
+static int	nfs_prime_access_cache = 0;
+SYSCTL_INT(_vfs_nfs, OID_AUTO, prime_access_cache, CTLFLAG_RW,
+	   &nfs_prime_access_cache, 0,
+	   "Prime NFS ACCESS cache when fetching attributes");
+
+static int	newnfs_commit_on_close = 0;
+SYSCTL_INT(_vfs_nfs, OID_AUTO, commit_on_close, CTLFLAG_RW,
+    &newnfs_commit_on_close, 0, "write+commit on close, else only write");
+
+static int	nfs_clean_pages_on_close = 1;
+SYSCTL_INT(_vfs_nfs, OID_AUTO, clean_pages_on_close, CTLFLAG_RW,
+	   &nfs_clean_pages_on_close, 0, "NFS clean dirty pages on close");
+
+int newnfs_directio_enable = 0;
+SYSCTL_INT(_vfs_nfs, OID_AUTO, nfs_directio_enable, CTLFLAG_RW,
+	   &newnfs_directio_enable, 0, "Enable NFS directio");
+
+int nfs_keep_dirty_on_error;
+SYSCTL_INT(_vfs_nfs, OID_AUTO, nfs_keep_dirty_on_error, CTLFLAG_RW,
+    &nfs_keep_dirty_on_error, 0, "Retry pageout if error returned");
+
+/*
+ * This sysctl allows other processes to mmap a file that has been opened
+ * O_DIRECT by a process.  In general, having processes mmap the file while
+ * Direct IO is in progress can lead to Data Inconsistencies.  But, we allow
+ * this by default to prevent DoS attacks - to prevent a malicious user from
+ * opening up files O_DIRECT preventing other users from mmap'ing these
+ * files.  "Protected" environments where stricter consistency guarantees are
+ * required can disable this knob.  The process that opened the file O_DIRECT
+ * cannot mmap() the file, because mmap'ed IO on an O_DIRECT open() is not
+ * meaningful.
+ */
+int newnfs_directio_allow_mmap = 1;
+SYSCTL_INT(_vfs_nfs, OID_AUTO, nfs_directio_allow_mmap, CTLFLAG_RW,
+	   &newnfs_directio_allow_mmap, 0, "Enable mmaped IO on file with O_DIRECT opens");
+
+#define	NFSACCESS_ALL (NFSACCESS_READ | NFSACCESS_MODIFY		\
+			 | NFSACCESS_EXTEND | NFSACCESS_EXECUTE	\
+			 | NFSACCESS_DELETE | NFSACCESS_LOOKUP)
+
+/*
+ * SMP Locking Note :
+ * The list of locks after the description of the lock is the ordering
+ * of other locks acquired with the lock held.
+ * np->n_mtx : Protects the fields in the nfsnode.
+       VM Object Lock
+       VI_MTX (acquired indirectly)
+ * nmp->nm_mtx : Protects the fields in the nfsmount.
+       rep->r_mtx
+ * ncl_iod_mutex : Global lock, protects shared nfsiod state.
+ * nfs_reqq_mtx : Global lock, protects the nfs_reqq list.
+       nmp->nm_mtx
+       rep->r_mtx
+ * rep->r_mtx : Protects the fields in an nfsreq.
+ */
+
+static int
+nfs_lock(struct vop_lock1_args *ap)
+{
+	struct vnode *vp;
+	struct nfsnode *np;
+	u_quad_t nsize;
+	int error, lktype;
+	bool onfault;
+
+	vp = ap->a_vp;
+	lktype = ap->a_flags & LK_TYPE_MASK;
+	error = VOP_LOCK1_APV(&default_vnodeops, ap);
+	if (error != 0 || vp->v_op != &newnfs_vnodeops)
+		return (error);
+	np = VTONFS(vp);
+	if (np == NULL)
+		return (0);
+	NFSLOCKNODE(np);
+	if ((np->n_flag & NVNSETSZSKIP) == 0 || (lktype != LK_SHARED &&
+	    lktype != LK_EXCLUSIVE && lktype != LK_UPGRADE &&
+	    lktype != LK_TRYUPGRADE)) {
+		NFSUNLOCKNODE(np);
+		return (0);
+	}
+	onfault = (ap->a_flags & LK_EATTR_MASK) == LK_NOWAIT &&
+	    (ap->a_flags & LK_INIT_MASK) == LK_CANRECURSE &&
+	    (lktype == LK_SHARED || lktype == LK_EXCLUSIVE);
+	if (onfault && vp->v_vnlock->lk_recurse == 0) {
+		/*
+		 * Force retry in vm_fault(), to make the lock request
+		 * sleepable, which allows us to piggy-back the
+		 * sleepable call to vnode_pager_setsize().
+		 */
+		NFSUNLOCKNODE(np);
+		VOP_UNLOCK(vp, 0);
+		return (EBUSY);
+	}
+	if ((ap->a_flags & LK_NOWAIT) != 0 ||
+	    (lktype == LK_SHARED && vp->v_vnlock->lk_recurse > 0)) {
+		NFSUNLOCKNODE(np);
+		return (0);
+	}
+	if (lktype == LK_SHARED) {
+		NFSUNLOCKNODE(np);
+		VOP_UNLOCK(vp, 0);
+		ap->a_flags &= ~(LK_TYPE_MASK | LK_INTERLOCK);
+		ap->a_flags |= LK_EXCLUSIVE;
+		error = VOP_LOCK1_APV(&default_vnodeops, ap);
+		if (error != 0 || vp->v_op != &newnfs_vnodeops)
+			return (error);
+		if (vp->v_data == NULL)
+			goto downgrade;
+		MPASS(vp->v_data == np);
+		NFSLOCKNODE(np);
+		if ((np->n_flag & NVNSETSZSKIP) == 0) {
+			NFSUNLOCKNODE(np);
+			goto downgrade;
+		}
+	}
+	np->n_flag &= ~NVNSETSZSKIP;
+	nsize = np->n_size;
+	NFSUNLOCKNODE(np);
+	vnode_pager_setsize(vp, nsize);
+downgrade:
+	if (lktype == LK_SHARED) {
+		ap->a_flags &= ~(LK_TYPE_MASK | LK_INTERLOCK);
+		ap->a_flags |= LK_DOWNGRADE;
+		(void)VOP_LOCK1_APV(&default_vnodeops, ap);
+	}
+	return (0);
+}
+
+static int
+nfs34_access_otw(struct vnode *vp, int wmode, struct thread *td,
+    struct ucred *cred, u_int32_t *retmode)
+{
+	int error = 0, attrflag, i, lrupos;
+	u_int32_t rmode;
+	struct nfsnode *np = VTONFS(vp);
+	struct nfsvattr nfsva;
+
+	error = nfsrpc_accessrpc(vp, wmode, cred, td, &nfsva, &attrflag,
+	    &rmode, NULL);
+	if (attrflag)
+		(void) nfscl_loadattrcache(&vp, &nfsva, NULL, NULL, 0, 1);
+	if (!error) {
+		lrupos = 0;
+		NFSLOCKNODE(np);
+		for (i = 0; i < NFS_ACCESSCACHESIZE; i++) {
+			if (np->n_accesscache[i].uid == cred->cr_uid) {
+				np->n_accesscache[i].mode = rmode;
+				np->n_accesscache[i].stamp = time_second;
+				break;
+			}
+			if (i > 0 && np->n_accesscache[i].stamp <
+			    np->n_accesscache[lrupos].stamp)
+				lrupos = i;
+		}
+		if (i == NFS_ACCESSCACHESIZE) {
+			np->n_accesscache[lrupos].uid = cred->cr_uid;
+			np->n_accesscache[lrupos].mode = rmode;
+			np->n_accesscache[lrupos].stamp = time_second;
+		}
+		NFSUNLOCKNODE(np);
+		if (retmode != NULL)
+			*retmode = rmode;
+		KDTRACE_NFS_ACCESSCACHE_LOAD_DONE(vp, cred->cr_uid, rmode, 0);
+	} else if (NFS_ISV4(vp)) {
+		error = nfscl_maperr(td, error, (uid_t)0, (gid_t)0);
+	}
+#ifdef KDTRACE_HOOKS
+	if (error != 0)
+		KDTRACE_NFS_ACCESSCACHE_LOAD_DONE(vp, cred->cr_uid, 0,
+		    error);
+#endif
+	return (error);
+}
+
+/*
+ * nfs access vnode op.
+ * For nfs version 2, just return ok. File accesses may fail later.
+ * For nfs version 3, use the access rpc to check accessibility. If file modes
+ * are changed on the server, accesses might still fail later.
+ */
+static int
+nfs_access(struct vop_access_args *ap)
+{
+	struct vnode *vp = ap->a_vp;
+	int error = 0, i, gotahit;
+	u_int32_t mode, wmode, rmode;
+	int v34 = NFS_ISV34(vp);
+	struct nfsnode *np = VTONFS(vp);
+
+	/*
+	 * Disallow write attempts on filesystems mounted read-only;
+	 * unless the file is a socket, fifo, or a block or character
+	 * device resident on the filesystem.
+	 */
+	if ((ap->a_accmode & (VWRITE | VAPPEND | VWRITE_NAMED_ATTRS |
+	    VDELETE_CHILD | VWRITE_ATTRIBUTES | VDELETE | VWRITE_ACL |
+	    VWRITE_OWNER)) != 0 && (vp->v_mount->mnt_flag & MNT_RDONLY) != 0) {
+		switch (vp->v_type) {
+		case VREG:
+		case VDIR:
+		case VLNK:
+			return (EROFS);
+		default:
+			break;
+		}
+	}
+	/*
+	 * For nfs v3 or v4, check to see if we have done this recently, and if
+	 * so return our cached result instead of making an ACCESS call.
+	 * If not, do an access rpc, otherwise you are stuck emulating
+	 * ufs_access() locally using the vattr. This may not be correct,
+	 * since the server may apply other access criteria such as
+	 * client uid-->server uid mapping that we do not know about.
+	 */
+	if (v34) {
+		if (ap->a_accmode & VREAD)
+			mode = NFSACCESS_READ;
+		else
+			mode = 0;
+		if (vp->v_type != VDIR) {
+			if (ap->a_accmode & VWRITE)
+				mode |= (NFSACCESS_MODIFY | NFSACCESS_EXTEND);
+			if (ap->a_accmode & VAPPEND)
+				mode |= NFSACCESS_EXTEND;
+			if (ap->a_accmode & VEXEC)
+				mode |= NFSACCESS_EXECUTE;
+			if (ap->a_accmode & VDELETE)
+				mode |= NFSACCESS_DELETE;
+		} else {
+			if (ap->a_accmode & VWRITE)
+				mode |= (NFSACCESS_MODIFY | NFSACCESS_EXTEND);
+			if (ap->a_accmode & VAPPEND)
+				mode |= NFSACCESS_EXTEND;
+			if (ap->a_accmode & VEXEC)
+				mode |= NFSACCESS_LOOKUP;
+			if (ap->a_accmode & VDELETE)
+				mode |= NFSACCESS_DELETE;
+			if (ap->a_accmode & VDELETE_CHILD)
+				mode |= NFSACCESS_MODIFY;
+		}
+		/* XXX safety belt, only make blanket request if caching */
+		if (nfsaccess_cache_timeout > 0) {
+			wmode = NFSACCESS_READ | NFSACCESS_MODIFY |
+				NFSACCESS_EXTEND | NFSACCESS_EXECUTE |
+				NFSACCESS_DELETE | NFSACCESS_LOOKUP;
+		} else {
+			wmode = mode;
+		}
+
+		/*
+		 * Does our cached result allow us to give a definite yes to
+		 * this request?
+		 */
+		gotahit = 0;
+		NFSLOCKNODE(np);
+		for (i = 0; i < NFS_ACCESSCACHESIZE; i++) {
+			if (ap->a_cred->cr_uid == np->n_accesscache[i].uid) {
+			    if (time_second < (np->n_accesscache[i].stamp
+				+ nfsaccess_cache_timeout) &&
+				(np->n_accesscache[i].mode & mode) == mode) {
+				NFSINCRGLOBAL(nfsstatsv1.accesscache_hits);
+				gotahit = 1;
+			    }
+			    break;
+			}
+		}
+		NFSUNLOCKNODE(np);
+#ifdef KDTRACE_HOOKS
+		if (gotahit != 0)
+			KDTRACE_NFS_ACCESSCACHE_GET_HIT(vp,
+			    ap->a_cred->cr_uid, mode);
+		else
+			KDTRACE_NFS_ACCESSCACHE_GET_MISS(vp,
+			    ap->a_cred->cr_uid, mode);
+#endif
+		if (gotahit == 0) {
+			/*
+			 * Either a no, or a don't know.  Go to the wire.
+			 */
+			NFSINCRGLOBAL(nfsstatsv1.accesscache_misses);
+		        error = nfs34_access_otw(vp, wmode, ap->a_td,
+			    ap->a_cred, &rmode);
+			if (!error &&
+			    (rmode & mode) != mode)
+				error = EACCES;
+		}
+		return (error);
+	} else {
+		if ((error = nfsspec_access(ap)) != 0) {
+			return (error);
+		}
+		/*
+		 * Attempt to prevent a mapped root from accessing a file
+		 * which it shouldn't.  We try to read a byte from the file
+		 * if the user is root and the file is not zero length.
+		 * After calling nfsspec_access, we should have the correct
+		 * file size cached.
+		 */
+		NFSLOCKNODE(np);
+		if (ap->a_cred->cr_uid == 0 && (ap->a_accmode & VREAD)
+		    && VTONFS(vp)->n_size > 0) {
+			struct iovec aiov;
+			struct uio auio;
+			char buf[1];
+
+			NFSUNLOCKNODE(np);
+			aiov.iov_base = buf;
+			aiov.iov_len = 1;
+			auio.uio_iov = &aiov;
+			auio.uio_iovcnt = 1;
+			auio.uio_offset = 0;
+			auio.uio_resid = 1;
+			auio.uio_segflg = UIO_SYSSPACE;
+			auio.uio_rw = UIO_READ;
+			auio.uio_td = ap->a_td;
+
+			if (vp->v_type == VREG)
+				error = ncl_readrpc(vp, &auio, ap->a_cred);
+			else if (vp->v_type == VDIR) {
+				char* bp;
+				bp = malloc(NFS_DIRBLKSIZ, M_TEMP, M_WAITOK);
+				aiov.iov_base = bp;
+				aiov.iov_len = auio.uio_resid = NFS_DIRBLKSIZ;
+				error = ncl_readdirrpc(vp, &auio, ap->a_cred,
+				    ap->a_td);
+				free(bp, M_TEMP);
+			} else if (vp->v_type == VLNK)
+				error = ncl_readlinkrpc(vp, &auio, ap->a_cred);
+			else
+				error = EACCES;
+		} else
+			NFSUNLOCKNODE(np);
+		return (error);
+	}
+}
+
+
+/*
+ * nfs open vnode op
+ * Check to see if the type is ok
+ * and that deletion is not in progress.
+ * For paged in text files, you will need to flush the page cache
+ * if consistency is lost.
+ */
+/* ARGSUSED */
+static int
+nfs_open(struct vop_open_args *ap)
+{
+	struct vnode *vp = ap->a_vp;
+	struct nfsnode *np = VTONFS(vp);
+	struct vattr vattr;
+	int error;
+	int fmode = ap->a_mode;
+	struct ucred *cred;
+	vm_object_t obj;
+
+	if (vp->v_type != VREG && vp->v_type != VDIR && vp->v_type != VLNK)
+		return (EOPNOTSUPP);
+
+	/*
+	 * For NFSv4, we need to do the Open Op before cache validation,
+	 * so that we conform to RFC3530 Sec. 9.3.1.
+	 */
+	if (NFS_ISV4(vp)) {
+		error = nfsrpc_open(vp, fmode, ap->a_cred, ap->a_td);
+		if (error) {
+			error = nfscl_maperr(ap->a_td, error, (uid_t)0,
+			    (gid_t)0);
+			return (error);
+		}
+	}
+
+	/*
+	 * Now, if this Open will be doing reading, re-validate/flush the
+	 * cache, so that Close/Open coherency is maintained.
+	 */
+	NFSLOCKNODE(np);
+	if (np->n_flag & NMODIFIED) {
+		NFSUNLOCKNODE(np);
+		error = ncl_vinvalbuf(vp, V_SAVE, ap->a_td, 1);
+		if (error == EINTR || error == EIO) {
+			if (NFS_ISV4(vp))
+				(void) nfsrpc_close(vp, 0, ap->a_td);
+			return (error);
+		}
+		NFSLOCKNODE(np);
+		np->n_attrstamp = 0;
+		KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp);
+		if (vp->v_type == VDIR)
+			np->n_direofoffset = 0;
+		NFSUNLOCKNODE(np);
+		error = VOP_GETATTR(vp, &vattr, ap->a_cred);
+		if (error) {
+			if (NFS_ISV4(vp))
+				(void) nfsrpc_close(vp, 0, ap->a_td);
+			return (error);
+		}
+		NFSLOCKNODE(np);
+		np->n_mtime = vattr.va_mtime;
+		if (NFS_ISV4(vp))
+			np->n_change = vattr.va_filerev;
+	} else {
+		NFSUNLOCKNODE(np);
+		error = VOP_GETATTR(vp, &vattr, ap->a_cred);
+		if (error) {
+			if (NFS_ISV4(vp))
+				(void) nfsrpc_close(vp, 0, ap->a_td);
+			return (error);
+		}
+		NFSLOCKNODE(np);
+		if ((NFS_ISV4(vp) && np->n_change != vattr.va_filerev) ||
+		    NFS_TIMESPEC_COMPARE(&np->n_mtime, &vattr.va_mtime)) {
+			if (vp->v_type == VDIR)
+				np->n_direofoffset = 0;
+			NFSUNLOCKNODE(np);
+			error = ncl_vinvalbuf(vp, V_SAVE, ap->a_td, 1);
+			if (error == EINTR || error == EIO) {
+				if (NFS_ISV4(vp))
+					(void) nfsrpc_close(vp, 0, ap->a_td);
+				return (error);
+			}
+			NFSLOCKNODE(np);
+			np->n_mtime = vattr.va_mtime;
+			if (NFS_ISV4(vp))
+				np->n_change = vattr.va_filerev;
+		}
+	}
+
+	/*
+	 * If the object has >= 1 O_DIRECT active opens, we disable caching.
+	 */
+	if (newnfs_directio_enable && (fmode & O_DIRECT) &&
+	    (vp->v_type == VREG)) {
+		if (np->n_directio_opens == 0) {
+			NFSUNLOCKNODE(np);
+			error = ncl_vinvalbuf(vp, V_SAVE, ap->a_td, 1);
+			if (error) {
+				if (NFS_ISV4(vp))
+					(void) nfsrpc_close(vp, 0, ap->a_td);
+				return (error);
+			}
+			NFSLOCKNODE(np);
+			np->n_flag |= NNONCACHE;
+		}
+		np->n_directio_opens++;
+	}
+
+	/* If opened for writing via NFSv4.1 or later, mark that for pNFS. */
+	if (NFSHASPNFS(VFSTONFS(vp->v_mount)) && (fmode & FWRITE) != 0)
+		np->n_flag |= NWRITEOPENED;
+
+	/*
+	 * If this is an open for writing, capture a reference to the
+	 * credentials, so they can be used by ncl_putpages(). Using
+	 * these write credentials is preferable to the credentials of
+	 * whatever thread happens to be doing the VOP_PUTPAGES() since
+	 * the write RPCs are less likely to fail with EACCES.
+	 */
+	if ((fmode & FWRITE) != 0) {
+		cred = np->n_writecred;
+		np->n_writecred = crhold(ap->a_cred);
+	} else
+		cred = NULL;
+	NFSUNLOCKNODE(np);
+
+	if (cred != NULL)
+		crfree(cred);
+	vnode_create_vobject(vp, vattr.va_size, ap->a_td);
+
+	/*
+	 * If the text file has been mmap'd, flush any dirty pages to the
+	 * buffer cache and then...
+	 * Make sure all writes are pushed to the NFS server.  If this is not
+	 * done, the modify time of the file can change while the text
+	 * file is being executed.  This will cause the process that is
+	 * executing the text file to be terminated.
+	 */
+	if (vp->v_writecount <= -1) {
+		if ((obj = vp->v_object) != NULL &&
+		    (obj->flags & OBJ_MIGHTBEDIRTY) != 0) {
+			VM_OBJECT_WLOCK(obj);
+			vm_object_page_clean(obj, 0, 0, OBJPC_SYNC);
+			VM_OBJECT_WUNLOCK(obj);
+		}
+
+		/* Now, flush the buffer cache. */
+		ncl_flush(vp, MNT_WAIT, curthread, 0, 0);
+
+		/* And, finally, make sure that n_mtime is up to date. */
+		np = VTONFS(vp);
+		NFSLOCKNODE(np);
+		np->n_mtime = np->n_vattr.na_mtime;
+		NFSUNLOCKNODE(np);
+	}
+	return (0);
+}
+
+/*
+ * nfs close vnode op
+ * What an NFS client should do upon close after writing is a debatable issue.
+ * Most NFS clients push delayed writes to the server upon close, basically for
+ * two reasons:
+ * 1 - So that any write errors may be reported back to the client process
+ *     doing the close system call. By far the two most likely errors are
+ *     NFSERR_NOSPC and NFSERR_DQUOT to indicate space allocation failure.
+ * 2 - To put a worst case upper bound on cache inconsistency between
+ *     multiple clients for the file.
+ * There is also a consistency problem for Version 2 of the protocol w.r.t.
+ * not being able to tell if other clients are writing a file concurrently,
+ * since there is no way of knowing if the changed modify time in the reply
+ * is only due to the write for this client.
+ * (NFS Version 3 provides weak cache consistency data in the reply that
+ *  should be sufficient to detect and handle this case.)
+ *
+ * The current code does the following:
+ * for NFS Version 2 - play it safe and flush/invalidate all dirty buffers
+ * for NFS Version 3 - flush dirty buffers to the server but don't invalidate
+ *                     or commit them (this satisfies 1 and 2 except for the
+ *                     case where the server crashes after this close but
+ *                     before the commit RPC, which is felt to be "good
+ *                     enough". Changing the last argument to ncl_flush() to
+ *                     a 1 would force a commit operation, if it is felt a
+ *                     commit is necessary now.
+ * for NFS Version 4 - flush the dirty buffers and commit them, if
+ *		       nfscl_mustflush() says this is necessary.
+ *                     It is necessary if there is no write delegation held,
+ *                     in order to satisfy open/close coherency.
+ *                     If the file isn't cached on local stable storage,
+ *                     it may be necessary in order to detect "out of space"
+ *                     errors from the server, if the write delegation
+ *                     issued by the server doesn't allow the file to grow.
+ */
+/* ARGSUSED */
+static int
+nfs_close(struct vop_close_args *ap)
+{
+	struct vnode *vp = ap->a_vp;
+	struct nfsnode *np = VTONFS(vp);
+	struct nfsvattr nfsva;
+	struct ucred *cred;
+	int error = 0, ret, localcred = 0;
+	int fmode = ap->a_fflag;
+
+	if (NFSCL_FORCEDISM(vp->v_mount))
+		return (0);
+	/*
+	 * During shutdown, a_cred isn't valid, so just use root.
+	 */
+	if (ap->a_cred == NOCRED) {
+		cred = newnfs_getcred();
+		localcred = 1;
+	} else {
+		cred = ap->a_cred;
+	}
+	if (vp->v_type == VREG) {
+	    /*
+	     * Examine and clean dirty pages, regardless of NMODIFIED.
+	     * This closes a major hole in close-to-open consistency.
+	     * We want to push out all dirty pages (and buffers) on
+	     * close, regardless of whether they were dirtied by
+	     * mmap'ed writes or via write().
+	     */
+	    if (nfs_clean_pages_on_close && vp->v_object) {
+		VM_OBJECT_WLOCK(vp->v_object);
+		vm_object_page_clean(vp->v_object, 0, 0, 0);
+		VM_OBJECT_WUNLOCK(vp->v_object);
+	    }
+	    NFSLOCKNODE(np);
+	    if (np->n_flag & NMODIFIED) {
+		NFSUNLOCKNODE(np);
+		if (NFS_ISV3(vp)) {
+		    /*
+		     * Under NFSv3 we have dirty buffers to dispose of.  We
+		     * must flush them to the NFS server.  We have the option
+		     * of waiting all the way through the commit rpc or just
+		     * waiting for the initial write.  The default is to only
+		     * wait through the initial write so the data is in the
+		     * server's cache, which is roughly similar to the state
+		     * a standard disk subsystem leaves the file in on close().
+		     *
+		     * We cannot clear the NMODIFIED bit in np->n_flag due to
+		     * potential races with other processes, and certainly
+		     * cannot clear it if we don't commit.
+		     * These races occur when there is no longer the old
+		     * traditional vnode locking implemented for Vnode Ops.
+		     */
+		    int cm = newnfs_commit_on_close ? 1 : 0;
+		    error = ncl_flush(vp, MNT_WAIT, ap->a_td, cm, 0);
+		    /* np->n_flag &= ~NMODIFIED; */
+		} else if (NFS_ISV4(vp)) { 
+			if (nfscl_mustflush(vp) != 0) {
+				int cm = newnfs_commit_on_close ? 1 : 0;
+				error = ncl_flush(vp, MNT_WAIT, ap->a_td,
+				    cm, 0);
+				/*
+				 * as above w.r.t races when clearing
+				 * NMODIFIED.
+				 * np->n_flag &= ~NMODIFIED;
+				 */
+			}
+		} else {
+			error = ncl_vinvalbuf(vp, V_SAVE, ap->a_td, 1);
+		}
+		NFSLOCKNODE(np);
+	    }
+ 	    /* 
+ 	     * Invalidate the attribute cache in all cases.
+ 	     * An open is going to fetch fresh attrs any way, other procs
+ 	     * on this node that have file open will be forced to do an 
+ 	     * otw attr fetch, but this is safe.
+	     * --> A user found that their RPC count dropped by 20% when
+	     *     this was commented out and I can't see any requirement
+	     *     for it, so I've disabled it when negative lookups are
+	     *     enabled. (What does this have to do with negative lookup
+	     *     caching? Well nothing, except it was reported by the
+	     *     same user that needed negative lookup caching and I wanted
+	     *     there to be a way to disable it to see if it
+	     *     is the cause of some caching/coherency issue that might
+	     *     crop up.)
+ 	     */
+	    if (VFSTONFS(vp->v_mount)->nm_negnametimeo == 0) {
+		    np->n_attrstamp = 0;
+		    KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp);
+	    }
+	    if (np->n_flag & NWRITEERR) {
+		np->n_flag &= ~NWRITEERR;
+		error = np->n_error;
+	    }
+	    NFSUNLOCKNODE(np);
+	}
+
+	if (NFS_ISV4(vp)) {
+		/*
+		 * Get attributes so "change" is up to date.
+		 */
+		if (error == 0 && nfscl_mustflush(vp) != 0 &&
+		    vp->v_type == VREG &&
+		    (VFSTONFS(vp->v_mount)->nm_flag & NFSMNT_NOCTO) == 0) {
+			ret = nfsrpc_getattr(vp, cred, ap->a_td, &nfsva,
+			    NULL);
+			if (!ret) {
+				np->n_change = nfsva.na_filerev;
+				(void) nfscl_loadattrcache(&vp, &nfsva, NULL,
+				    NULL, 0, 0);
+			}
+		}
+
+		/*
+		 * and do the close.
+		 */
+		ret = nfsrpc_close(vp, 0, ap->a_td);
+		if (!error && ret)
+			error = ret;
+		if (error)
+			error = nfscl_maperr(ap->a_td, error, (uid_t)0,
+			    (gid_t)0);
+	}
+	if (newnfs_directio_enable)
+		KASSERT((np->n_directio_asyncwr == 0),
+			("nfs_close: dirty unflushed (%d) directio buffers\n",
+			 np->n_directio_asyncwr));
+	if (newnfs_directio_enable && (fmode & O_DIRECT) && (vp->v_type == VREG)) {
+		NFSLOCKNODE(np);
+		KASSERT((np->n_directio_opens > 0), 
+			("nfs_close: unexpectedly value (0) of n_directio_opens\n"));
+		np->n_directio_opens--;
+		if (np->n_directio_opens == 0)
+			np->n_flag &= ~NNONCACHE;
+		NFSUNLOCKNODE(np);
+	}
+	if (localcred)
+		NFSFREECRED(cred);
+	return (error);
+}
+
+/*
+ * nfs getattr call from vfs.
+ */
+static int
+nfs_getattr(struct vop_getattr_args *ap)
+{
+	struct vnode *vp = ap->a_vp;
+	struct thread *td = curthread;	/* XXX */
+	struct nfsnode *np = VTONFS(vp);
+	int error = 0;
+	struct nfsvattr nfsva;
+	struct vattr *vap = ap->a_vap;
+	struct vattr vattr;
+
+	/*
+	 * Update local times for special files.
+	 */
+	NFSLOCKNODE(np);
+	if (np->n_flag & (NACC | NUPD))
+		np->n_flag |= NCHG;
+	NFSUNLOCKNODE(np);
+	/*
+	 * First look in the cache.
+	 */
+	if (ncl_getattrcache(vp, &vattr) == 0) {
+		vap->va_type = vattr.va_type;
+		vap->va_mode = vattr.va_mode;
+		vap->va_nlink = vattr.va_nlink;
+		vap->va_uid = vattr.va_uid;
+		vap->va_gid = vattr.va_gid;
+		vap->va_fsid = vattr.va_fsid;
+		vap->va_fileid = vattr.va_fileid;
+		vap->va_size = vattr.va_size;
+		vap->va_blocksize = vattr.va_blocksize;
+		vap->va_atime = vattr.va_atime;
+		vap->va_mtime = vattr.va_mtime;
+		vap->va_ctime = vattr.va_ctime;
+		vap->va_gen = vattr.va_gen;
+		vap->va_flags = vattr.va_flags;
+		vap->va_rdev = vattr.va_rdev;
+		vap->va_bytes = vattr.va_bytes;
+		vap->va_filerev = vattr.va_filerev;
+		/*
+		 * Get the local modify time for the case of a write
+		 * delegation.
+		 */
+		nfscl_deleggetmodtime(vp, &vap->va_mtime);
+		return (0);
+	}
+
+	if (NFS_ISV34(vp) && nfs_prime_access_cache &&
+	    nfsaccess_cache_timeout > 0) {
+		NFSINCRGLOBAL(nfsstatsv1.accesscache_misses);
+		nfs34_access_otw(vp, NFSACCESS_ALL, td, ap->a_cred, NULL);
+		if (ncl_getattrcache(vp, ap->a_vap) == 0) {
+			nfscl_deleggetmodtime(vp, &ap->a_vap->va_mtime);
+			return (0);
+		}
+	}
+	error = nfsrpc_getattr(vp, ap->a_cred, td, &nfsva, NULL);
+	if (!error)
+		error = nfscl_loadattrcache(&vp, &nfsva, vap, NULL, 0, 0);
+	if (!error) {
+		/*
+		 * Get the local modify time for the case of a write
+		 * delegation.
+		 */
+		nfscl_deleggetmodtime(vp, &vap->va_mtime);
+	} else if (NFS_ISV4(vp)) {
+		error = nfscl_maperr(td, error, (uid_t)0, (gid_t)0);
+	}
+	return (error);
+}
+
+/*
+ * nfs setattr call.
+ */
+static int
+nfs_setattr(struct vop_setattr_args *ap)
+{
+	struct vnode *vp = ap->a_vp;
+	struct nfsnode *np = VTONFS(vp);
+	struct thread *td = curthread;	/* XXX */
+	struct vattr *vap = ap->a_vap;
+	int error = 0;
+	u_quad_t tsize;
+
+#ifndef nolint
+	tsize = (u_quad_t)0;
+#endif
+
+	/*
+	 * Setting of flags and marking of atimes are not supported.
+	 */
+	if (vap->va_flags != VNOVAL)
+		return (EOPNOTSUPP);
+
+	/*
+	 * Disallow write attempts if the filesystem is mounted read-only.
+	 */
+  	if ((vap->va_flags != VNOVAL || vap->va_uid != (uid_t)VNOVAL ||
+	    vap->va_gid != (gid_t)VNOVAL || vap->va_atime.tv_sec != VNOVAL ||
+	    vap->va_mtime.tv_sec != VNOVAL || vap->va_mode != (mode_t)VNOVAL) &&
+	    (vp->v_mount->mnt_flag & MNT_RDONLY))
+		return (EROFS);
+	if (vap->va_size != VNOVAL) {
+ 		switch (vp->v_type) {
+ 		case VDIR:
+ 			return (EISDIR);
+ 		case VCHR:
+ 		case VBLK:
+ 		case VSOCK:
+ 		case VFIFO:
+			if (vap->va_mtime.tv_sec == VNOVAL &&
+			    vap->va_atime.tv_sec == VNOVAL &&
+			    vap->va_mode == (mode_t)VNOVAL &&
+			    vap->va_uid == (uid_t)VNOVAL &&
+			    vap->va_gid == (gid_t)VNOVAL)
+				return (0);		
+ 			vap->va_size = VNOVAL;
+ 			break;
+ 		default:
+			/*
+			 * Disallow write attempts if the filesystem is
+			 * mounted read-only.
+			 */
+			if (vp->v_mount->mnt_flag & MNT_RDONLY)
+				return (EROFS);
+			/*
+			 *  We run vnode_pager_setsize() early (why?),
+			 * we must set np->n_size now to avoid vinvalbuf
+			 * V_SAVE races that might setsize a lower
+			 * value.
+			 */
+			NFSLOCKNODE(np);
+			tsize = np->n_size;
+			NFSUNLOCKNODE(np);
+			error = ncl_meta_setsize(vp, td, vap->va_size);
+			NFSLOCKNODE(np);
+ 			if (np->n_flag & NMODIFIED) {
+			    tsize = np->n_size;
+			    NFSUNLOCKNODE(np);
+			    error = ncl_vinvalbuf(vp, vap->va_size == 0 ?
+			        0 : V_SAVE, td, 1);
+			    if (error != 0) {
+				    vnode_pager_setsize(vp, tsize);
+				    return (error);
+			    }
+			    /*
+			     * Call nfscl_delegmodtime() to set the modify time
+			     * locally, as required.
+			     */
+			    nfscl_delegmodtime(vp);
+ 			} else
+			    NFSUNLOCKNODE(np);
+			/*
+			 * np->n_size has already been set to vap->va_size
+			 * in ncl_meta_setsize(). We must set it again since
+			 * nfs_loadattrcache() could be called through
+			 * ncl_meta_setsize() and could modify np->n_size.
+			 */
+			NFSLOCKNODE(np);
+ 			np->n_vattr.na_size = np->n_size = vap->va_size;
+			NFSUNLOCKNODE(np);
+  		}
+  	} else {
+		NFSLOCKNODE(np);
+		if ((vap->va_mtime.tv_sec != VNOVAL || vap->va_atime.tv_sec != VNOVAL) && 
+		    (np->n_flag & NMODIFIED) && vp->v_type == VREG) {
+			NFSUNLOCKNODE(np);
+			error = ncl_vinvalbuf(vp, V_SAVE, td, 1);
+			if (error == EINTR || error == EIO)
+				return (error);
+		} else
+			NFSUNLOCKNODE(np);
+	}
+	error = nfs_setattrrpc(vp, vap, ap->a_cred, td);
+	if (error && vap->va_size != VNOVAL) {
+		NFSLOCKNODE(np);
+		np->n_size = np->n_vattr.na_size = tsize;
+		vnode_pager_setsize(vp, tsize);
+		NFSUNLOCKNODE(np);
+	}
+	return (error);
+}
+
+/*
+ * Do an nfs setattr rpc.
+ */
+static int
+nfs_setattrrpc(struct vnode *vp, struct vattr *vap, struct ucred *cred,
+    struct thread *td)
+{
+	struct nfsnode *np = VTONFS(vp);
+	int error, ret, attrflag, i;
+	struct nfsvattr nfsva;
+
+	if (NFS_ISV34(vp)) {
+		NFSLOCKNODE(np);
+		for (i = 0; i < NFS_ACCESSCACHESIZE; i++)
+			np->n_accesscache[i].stamp = 0;
+		np->n_flag |= NDELEGMOD;
+		NFSUNLOCKNODE(np);
+		KDTRACE_NFS_ACCESSCACHE_FLUSH_DONE(vp);
+	}
+	error = nfsrpc_setattr(vp, vap, NULL, cred, td, &nfsva, &attrflag,
+	    NULL);
+	if (attrflag) {
+		ret = nfscl_loadattrcache(&vp, &nfsva, NULL, NULL, 0, 1);
+		if (ret && !error)
+			error = ret;
+	}
+	if (error && NFS_ISV4(vp))
+		error = nfscl_maperr(td, error, vap->va_uid, vap->va_gid);
+	return (error);
+}
+
+/*
+ * nfs lookup call, one step at a time...
+ * First look in cache
+ * If not found, unlock the directory nfsnode and do the rpc
+ */
+static int
+nfs_lookup(struct vop_lookup_args *ap)
+{
+	struct componentname *cnp = ap->a_cnp;
+	struct vnode *dvp = ap->a_dvp;
+	struct vnode **vpp = ap->a_vpp;
+	struct mount *mp = dvp->v_mount;
+	int flags = cnp->cn_flags;
+	struct vnode *newvp;
+	struct nfsmount *nmp;
+	struct nfsnode *np, *newnp;
+	int error = 0, attrflag, dattrflag, ltype, ncticks;
+	struct thread *td = cnp->cn_thread;
+	struct nfsfh *nfhp;
+	struct nfsvattr dnfsva, nfsva;
+	struct vattr vattr;
+	struct timespec nctime;
+	
+	*vpp = NULLVP;
+	if ((flags & ISLASTCN) && (mp->mnt_flag & MNT_RDONLY) &&
+	    (cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME))
+		return (EROFS);
+	if (dvp->v_type != VDIR)
+		return (ENOTDIR);
+	nmp = VFSTONFS(mp);
+	np = VTONFS(dvp);
+
+	/* For NFSv4, wait until any remove is done. */
+	NFSLOCKNODE(np);
+	while (NFSHASNFSV4(nmp) && (np->n_flag & NREMOVEINPROG)) {
+		np->n_flag |= NREMOVEWANT;
+		(void) msleep((caddr_t)np, &np->n_mtx, PZERO, "nfslkup", 0);
+	}
+	NFSUNLOCKNODE(np);
+
+	error = vn_dir_check_exec(dvp, cnp);
+	if (error != 0)
+		return (error);
+	error = cache_lookup(dvp, vpp, cnp, &nctime, &ncticks);
+	if (error > 0 && error != ENOENT)
+		return (error);
+	if (error == -1) {
+		/*
+		 * Lookups of "." are special and always return the
+		 * current directory.  cache_lookup() already handles
+		 * associated locking bookkeeping, etc.
+		 */
+		if (cnp->cn_namelen == 1 && cnp->cn_nameptr[0] == '.') {
+			/* XXX: Is this really correct? */
+			if (cnp->cn_nameiop != LOOKUP &&
+			    (flags & ISLASTCN))
+				cnp->cn_flags |= SAVENAME;
+			return (0);
+		}
+
+		/*
+		 * We only accept a positive hit in the cache if the
+		 * change time of the file matches our cached copy.
+		 * Otherwise, we discard the cache entry and fallback
+		 * to doing a lookup RPC.  We also only trust cache
+		 * entries for less than nm_nametimeo seconds.
+		 *
+		 * To better handle stale file handles and attributes,
+		 * clear the attribute cache of this node if it is a
+		 * leaf component, part of an open() call, and not
+		 * locally modified before fetching the attributes.
+		 * This should allow stale file handles to be detected
+		 * here where we can fall back to a LOOKUP RPC to
+		 * recover rather than having nfs_open() detect the
+		 * stale file handle and failing open(2) with ESTALE.
+		 */
+		newvp = *vpp;
+		newnp = VTONFS(newvp);
+		if (!(nmp->nm_flag & NFSMNT_NOCTO) &&
+		    (flags & (ISLASTCN | ISOPEN)) == (ISLASTCN | ISOPEN) &&
+		    !(newnp->n_flag & NMODIFIED)) {
+			NFSLOCKNODE(newnp);
+			newnp->n_attrstamp = 0;
+			KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(newvp);
+			NFSUNLOCKNODE(newnp);
+		}
+		if (nfscl_nodeleg(newvp, 0) == 0 ||
+		    ((u_int)(ticks - ncticks) < (nmp->nm_nametimeo * hz) &&
+		    VOP_GETATTR(newvp, &vattr, cnp->cn_cred) == 0 &&
+		    timespeccmp(&vattr.va_ctime, &nctime, ==))) {
+			NFSINCRGLOBAL(nfsstatsv1.lookupcache_hits);
+			if (cnp->cn_nameiop != LOOKUP &&
+			    (flags & ISLASTCN))
+				cnp->cn_flags |= SAVENAME;
+			return (0);
+		}
+		cache_purge(newvp);
+		if (dvp != newvp)
+			vput(newvp);
+		else 
+			vrele(newvp);
+		*vpp = NULLVP;
+	} else if (error == ENOENT) {
+		if (dvp->v_iflag & VI_DOOMED)
+			return (ENOENT);
+		/*
+		 * We only accept a negative hit in the cache if the
+		 * modification time of the parent directory matches
+		 * the cached copy in the name cache entry.
+		 * Otherwise, we discard all of the negative cache
+		 * entries for this directory.  We also only trust
+		 * negative cache entries for up to nm_negnametimeo
+		 * seconds.
+		 */
+		if ((u_int)(ticks - ncticks) < (nmp->nm_negnametimeo * hz) &&
+		    VOP_GETATTR(dvp, &vattr, cnp->cn_cred) == 0 &&
+		    timespeccmp(&vattr.va_mtime, &nctime, ==)) {
+			NFSINCRGLOBAL(nfsstatsv1.lookupcache_hits);
+			return (ENOENT);
+		}
+		cache_purge_negative(dvp);
+	}
+
+	error = 0;
+	newvp = NULLVP;
+	NFSINCRGLOBAL(nfsstatsv1.lookupcache_misses);
+	error = nfsrpc_lookup(dvp, cnp->cn_nameptr, cnp->cn_namelen,
+	    cnp->cn_cred, td, &dnfsva, &nfsva, &nfhp, &attrflag, &dattrflag,
+	    NULL);
+	if (dattrflag)
+		(void) nfscl_loadattrcache(&dvp, &dnfsva, NULL, NULL, 0, 1);
+	if (error) {
+		if (newvp != NULLVP) {
+			vput(newvp);
+			*vpp = NULLVP;
+		}
+
+		if (error != ENOENT) {
+			if (NFS_ISV4(dvp))
+				error = nfscl_maperr(td, error, (uid_t)0,
+				    (gid_t)0);
+			return (error);
+		}
+
+		/* The requested file was not found. */
+		if ((cnp->cn_nameiop == CREATE || cnp->cn_nameiop == RENAME) &&
+		    (flags & ISLASTCN)) {
+			/*
+			 * XXX: UFS does a full VOP_ACCESS(dvp,
+			 * VWRITE) here instead of just checking
+			 * MNT_RDONLY.
+			 */
+			if (mp->mnt_flag & MNT_RDONLY)
+				return (EROFS);
+			cnp->cn_flags |= SAVENAME;
+			return (EJUSTRETURN);
+		}
+
+		if ((cnp->cn_flags & MAKEENTRY) != 0 && dattrflag) {
+			/*
+			 * Cache the modification time of the parent
+			 * directory from the post-op attributes in
+			 * the name cache entry.  The negative cache
+			 * entry will be ignored once the directory
+			 * has changed.  Don't bother adding the entry
+			 * if the directory has already changed.
+			 */
+			NFSLOCKNODE(np);
+			if (timespeccmp(&np->n_vattr.na_mtime,
+			    &dnfsva.na_mtime, ==)) {
+				NFSUNLOCKNODE(np);
+				cache_enter_time(dvp, NULL, cnp,
+				    &dnfsva.na_mtime, NULL);
+			} else
+				NFSUNLOCKNODE(np);
+		}
+		return (ENOENT);
+	}
+
+	/*
+	 * Handle RENAME case...
+	 */
+	if (cnp->cn_nameiop == RENAME && (flags & ISLASTCN)) {
+		if (NFS_CMPFH(np, nfhp->nfh_fh, nfhp->nfh_len)) {
+			free(nfhp, M_NFSFH);
+			return (EISDIR);
+		}
+		error = nfscl_nget(mp, dvp, nfhp, cnp, td, &np, NULL,
+		    LK_EXCLUSIVE);
+		if (error)
+			return (error);
+		newvp = NFSTOV(np);
+		if (attrflag)
+			(void) nfscl_loadattrcache(&newvp, &nfsva, NULL, NULL,
+			    0, 1);
+		*vpp = newvp;
+		cnp->cn_flags |= SAVENAME;
+		return (0);
+	}
+
+	if (flags & ISDOTDOT) {
+		ltype = NFSVOPISLOCKED(dvp);
+		error = vfs_busy(mp, MBF_NOWAIT);
+		if (error != 0) {
+			vfs_ref(mp);
+			NFSVOPUNLOCK(dvp, 0);
+			error = vfs_busy(mp, 0);
+			NFSVOPLOCK(dvp, ltype | LK_RETRY);
+			vfs_rel(mp);
+			if (error == 0 && (dvp->v_iflag & VI_DOOMED)) {
+				vfs_unbusy(mp);
+				error = ENOENT;
+			}
+			if (error != 0)
+				return (error);
+		}
+		NFSVOPUNLOCK(dvp, 0);
+		error = nfscl_nget(mp, dvp, nfhp, cnp, td, &np, NULL,
+		    cnp->cn_lkflags);
+		if (error == 0)
+			newvp = NFSTOV(np);
+		vfs_unbusy(mp);
+		if (newvp != dvp)
+			NFSVOPLOCK(dvp, ltype | LK_RETRY);
+		if (dvp->v_iflag & VI_DOOMED) {
+			if (error == 0) {
+				if (newvp == dvp)
+					vrele(newvp);
+				else
+					vput(newvp);
+			}
+			error = ENOENT;
+		}
+		if (error != 0)
+			return (error);
+		if (attrflag)
+			(void) nfscl_loadattrcache(&newvp, &nfsva, NULL, NULL,
+			    0, 1);
+	} else if (NFS_CMPFH(np, nfhp->nfh_fh, nfhp->nfh_len)) {
+		free(nfhp, M_NFSFH);
+		VREF(dvp);
+		newvp = dvp;
+		if (attrflag)
+			(void) nfscl_loadattrcache(&newvp, &nfsva, NULL, NULL,
+			    0, 1);
+	} else {
+		error = nfscl_nget(mp, dvp, nfhp, cnp, td, &np, NULL,
+		    cnp->cn_lkflags);
+		if (error)
+			return (error);
+		newvp = NFSTOV(np);
+		if (attrflag)
+			(void) nfscl_loadattrcache(&newvp, &nfsva, NULL, NULL,
+			    0, 1);
+		else if ((flags & (ISLASTCN | ISOPEN)) == (ISLASTCN | ISOPEN) &&
+		    !(np->n_flag & NMODIFIED)) {			
+			/*
+			 * Flush the attribute cache when opening a
+			 * leaf node to ensure that fresh attributes
+			 * are fetched in nfs_open() since we did not
+			 * fetch attributes from the LOOKUP reply.
+			 */
+			NFSLOCKNODE(np);
+			np->n_attrstamp = 0;
+			KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(newvp);
+			NFSUNLOCKNODE(np);
+		}
+	}
+	if (cnp->cn_nameiop != LOOKUP && (flags & ISLASTCN))
+		cnp->cn_flags |= SAVENAME;
+	if ((cnp->cn_flags & MAKEENTRY) &&
+	    (cnp->cn_nameiop != DELETE || !(flags & ISLASTCN)) &&
+	    attrflag != 0 && (newvp->v_type != VDIR || dattrflag != 0))
+		cache_enter_time(dvp, newvp, cnp, &nfsva.na_ctime,
+		    newvp->v_type != VDIR ? NULL : &dnfsva.na_ctime);
+	*vpp = newvp;
+	return (0);
+}
+
+/*
+ * nfs read call.
+ * Just call ncl_bioread() to do the work.
+ */
+static int
+nfs_read(struct vop_read_args *ap)
+{
+	struct vnode *vp = ap->a_vp;
+
+	switch (vp->v_type) {
+	case VREG:
+		return (ncl_bioread(vp, ap->a_uio, ap->a_ioflag, ap->a_cred));
+	case VDIR:
+		return (EISDIR);
+	default:
+		return (EOPNOTSUPP);
+	}
+}
+
+/*
+ * nfs readlink call
+ */
+static int
+nfs_readlink(struct vop_readlink_args *ap)
+{
+	struct vnode *vp = ap->a_vp;
+
+	if (vp->v_type != VLNK)
+		return (EINVAL);
+	return (ncl_bioread(vp, ap->a_uio, 0, ap->a_cred));
+}
+
+/*
+ * Do a readlink rpc.
+ * Called by ncl_doio() from below the buffer cache.
+ */
+int
+ncl_readlinkrpc(struct vnode *vp, struct uio *uiop, struct ucred *cred)
+{
+	int error, ret, attrflag;
+	struct nfsvattr nfsva;
+
+	error = nfsrpc_readlink(vp, uiop, cred, uiop->uio_td, &nfsva,
+	    &attrflag, NULL);
+	if (attrflag) {
+		ret = nfscl_loadattrcache(&vp, &nfsva, NULL, NULL, 0, 1);
+		if (ret && !error)
+			error = ret;
+	}
+	if (error && NFS_ISV4(vp))
+		error = nfscl_maperr(uiop->uio_td, error, (uid_t)0, (gid_t)0);
+	return (error);
+}
+
+/*
+ * nfs read rpc call
+ * Ditto above
+ */
+int
+ncl_readrpc(struct vnode *vp, struct uio *uiop, struct ucred *cred)
+{
+	int error, ret, attrflag;
+	struct nfsvattr nfsva;
+	struct nfsmount *nmp;
+
+	nmp = VFSTONFS(vnode_mount(vp));
+	error = EIO;
+	attrflag = 0;
+	if (NFSHASPNFS(nmp))
+		error = nfscl_doiods(vp, uiop, NULL, NULL,
+		    NFSV4OPEN_ACCESSREAD, 0, cred, uiop->uio_td);
+	NFSCL_DEBUG(4, "readrpc: aft doiods=%d\n", error);
+	if (error != 0)
+		error = nfsrpc_read(vp, uiop, cred, uiop->uio_td, &nfsva,
+		    &attrflag, NULL);
+	if (attrflag) {
+		ret = nfscl_loadattrcache(&vp, &nfsva, NULL, NULL, 0, 1);
+		if (ret && !error)
+			error = ret;
+	}
+	if (error && NFS_ISV4(vp))
+		error = nfscl_maperr(uiop->uio_td, error, (uid_t)0, (gid_t)0);
+	return (error);
+}
+
+/*
+ * nfs write call
+ */
+int
+ncl_writerpc(struct vnode *vp, struct uio *uiop, struct ucred *cred,
+    int *iomode, int *must_commit, int called_from_strategy)
+{
+	struct nfsvattr nfsva;
+	int error, attrflag, ret;
+	struct nfsmount *nmp;
+
+	nmp = VFSTONFS(vnode_mount(vp));
+	error = EIO;
+	attrflag = 0;
+	if (NFSHASPNFS(nmp))
+		error = nfscl_doiods(vp, uiop, iomode, must_commit,
+		    NFSV4OPEN_ACCESSWRITE, 0, cred, uiop->uio_td);
+	NFSCL_DEBUG(4, "writerpc: aft doiods=%d\n", error);
+	if (error != 0)
+		error = nfsrpc_write(vp, uiop, iomode, must_commit, cred,
+		    uiop->uio_td, &nfsva, &attrflag, NULL,
+		    called_from_strategy);
+	if (attrflag) {
+		if (VTONFS(vp)->n_flag & ND_NFSV4)
+			ret = nfscl_loadattrcache(&vp, &nfsva, NULL, NULL, 1,
+			    1);
+		else
+			ret = nfscl_loadattrcache(&vp, &nfsva, NULL, NULL, 0,
+			    1);
+		if (ret && !error)
+			error = ret;
+	}
+	if (DOINGASYNC(vp))
+		*iomode = NFSWRITE_FILESYNC;
+	if (error && NFS_ISV4(vp))
+		error = nfscl_maperr(uiop->uio_td, error, (uid_t)0, (gid_t)0);
+	return (error);
+}
+
+/*
+ * nfs mknod rpc
+ * For NFS v2 this is a kludge. Use a create rpc but with the IFMT bits of the
+ * mode set to specify the file type and the size field for rdev.
+ */
+static int
+nfs_mknodrpc(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp,
+    struct vattr *vap)
+{
+	struct nfsvattr nfsva, dnfsva;
+	struct vnode *newvp = NULL;
+	struct nfsnode *np = NULL, *dnp;
+	struct nfsfh *nfhp;
+	struct vattr vattr;
+	int error = 0, attrflag, dattrflag;
+	u_int32_t rdev;
+
+	if (vap->va_type == VCHR || vap->va_type == VBLK)
+		rdev = vap->va_rdev;
+	else if (vap->va_type == VFIFO || vap->va_type == VSOCK)
+		rdev = 0xffffffff;
+	else
+		return (EOPNOTSUPP);
+	if ((error = VOP_GETATTR(dvp, &vattr, cnp->cn_cred)))
+		return (error);
+	error = nfsrpc_mknod(dvp, cnp->cn_nameptr, cnp->cn_namelen, vap,
+	    rdev, vap->va_type, cnp->cn_cred, cnp->cn_thread, &dnfsva,
+	    &nfsva, &nfhp, &attrflag, &dattrflag, NULL);
+	if (!error) {
+		if (!nfhp)
+			(void) nfsrpc_lookup(dvp, cnp->cn_nameptr,
+			    cnp->cn_namelen, cnp->cn_cred, cnp->cn_thread,
+			    &dnfsva, &nfsva, &nfhp, &attrflag, &dattrflag,
+			    NULL);
+		if (nfhp)
+			error = nfscl_nget(dvp->v_mount, dvp, nfhp, cnp,
+			    cnp->cn_thread, &np, NULL, LK_EXCLUSIVE);
+	}
+	if (dattrflag)
+		(void) nfscl_loadattrcache(&dvp, &dnfsva, NULL, NULL, 0, 1);
+	if (!error) {
+		newvp = NFSTOV(np);
+		if (attrflag != 0) {
+			error = nfscl_loadattrcache(&newvp, &nfsva, NULL, NULL,
+			    0, 1);
+			if (error != 0)
+				vput(newvp);
+		}
+	}
+	if (!error) {
+		*vpp = newvp;
+	} else if (NFS_ISV4(dvp)) {
+		error = nfscl_maperr(cnp->cn_thread, error, vap->va_uid,
+		    vap->va_gid);
+	}
+	dnp = VTONFS(dvp);
+	NFSLOCKNODE(dnp);
+	dnp->n_flag |= NMODIFIED;
+	if (!dattrflag) {
+		dnp->n_attrstamp = 0;
+		KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(dvp);
+	}
+	NFSUNLOCKNODE(dnp);
+	return (error);
+}
+
+/*
+ * nfs mknod vop
+ * just call nfs_mknodrpc() to do the work.
+ */
+/* ARGSUSED */
+static int
+nfs_mknod(struct vop_mknod_args *ap)
+{
+	return (nfs_mknodrpc(ap->a_dvp, ap->a_vpp, ap->a_cnp, ap->a_vap));
+}
+
+static struct mtx nfs_cverf_mtx;
+MTX_SYSINIT(nfs_cverf_mtx, &nfs_cverf_mtx, "NFS create verifier mutex",
+    MTX_DEF);
+
+static nfsquad_t
+nfs_get_cverf(void)
+{
+	static nfsquad_t cverf;
+	nfsquad_t ret;
+	static int cverf_initialized = 0;
+
+	mtx_lock(&nfs_cverf_mtx);
+	if (cverf_initialized == 0) {
+		cverf.lval[0] = arc4random();
+		cverf.lval[1] = arc4random();
+		cverf_initialized = 1;
+	} else
+		cverf.qval++;
+	ret = cverf;
+	mtx_unlock(&nfs_cverf_mtx);
+
+	return (ret);
+}
+
+/*
+ * nfs file create call
+ */
+static int
+nfs_create(struct vop_create_args *ap)
+{
+	struct vnode *dvp = ap->a_dvp;
+	struct vattr *vap = ap->a_vap;
+	struct componentname *cnp = ap->a_cnp;
+	struct nfsnode *np = NULL, *dnp;
+	struct vnode *newvp = NULL;
+	struct nfsmount *nmp;
+	struct nfsvattr dnfsva, nfsva;
+	struct nfsfh *nfhp;
+	nfsquad_t cverf;
+	int error = 0, attrflag, dattrflag, fmode = 0;
+	struct vattr vattr;
+
+	/*
+	 * Oops, not for me..
+	 */
+	if (vap->va_type == VSOCK)
+		return (nfs_mknodrpc(dvp, ap->a_vpp, cnp, vap));
+
+	if ((error = VOP_GETATTR(dvp, &vattr, cnp->cn_cred)))
+		return (error);
+	if (vap->va_vaflags & VA_EXCLUSIVE)
+		fmode |= O_EXCL;
+	dnp = VTONFS(dvp);
+	nmp = VFSTONFS(vnode_mount(dvp));
+again:
+	/* For NFSv4, wait until any remove is done. */
+	NFSLOCKNODE(dnp);
+	while (NFSHASNFSV4(nmp) && (dnp->n_flag & NREMOVEINPROG)) {
+		dnp->n_flag |= NREMOVEWANT;
+		(void) msleep((caddr_t)dnp, &dnp->n_mtx, PZERO, "nfscrt", 0);
+	}
+	NFSUNLOCKNODE(dnp);
+
+	cverf = nfs_get_cverf();
+	error = nfsrpc_create(dvp, cnp->cn_nameptr, cnp->cn_namelen,
+	    vap, cverf, fmode, cnp->cn_cred, cnp->cn_thread, &dnfsva, &nfsva,
+	    &nfhp, &attrflag, &dattrflag, NULL);
+	if (!error) {
+		if (nfhp == NULL)
+			(void) nfsrpc_lookup(dvp, cnp->cn_nameptr,
+			    cnp->cn_namelen, cnp->cn_cred, cnp->cn_thread,
+			    &dnfsva, &nfsva, &nfhp, &attrflag, &dattrflag,
+			    NULL);
+		if (nfhp != NULL)
+			error = nfscl_nget(dvp->v_mount, dvp, nfhp, cnp,
+			    cnp->cn_thread, &np, NULL, LK_EXCLUSIVE);
+	}
+	if (dattrflag)
+		(void) nfscl_loadattrcache(&dvp, &dnfsva, NULL, NULL, 0, 1);
+	if (!error) {
+		newvp = NFSTOV(np);
+		if (attrflag == 0)
+			error = nfsrpc_getattr(newvp, cnp->cn_cred,
+			    cnp->cn_thread, &nfsva, NULL);
+		if (error == 0)
+			error = nfscl_loadattrcache(&newvp, &nfsva, NULL, NULL,
+			    0, 1);
+	}
+	if (error) {
+		if (newvp != NULL) {
+			vput(newvp);
+			newvp = NULL;
+		}
+		if (NFS_ISV34(dvp) && (fmode & O_EXCL) &&
+		    error == NFSERR_NOTSUPP) {
+			fmode &= ~O_EXCL;
+			goto again;
+		}
+	} else if (NFS_ISV34(dvp) && (fmode & O_EXCL)) {
+		if (nfscl_checksattr(vap, &nfsva)) {
+			error = nfsrpc_setattr(newvp, vap, NULL, cnp->cn_cred,
+			    cnp->cn_thread, &nfsva, &attrflag, NULL);
+			if (error && (vap->va_uid != (uid_t)VNOVAL ||
+			    vap->va_gid != (gid_t)VNOVAL)) {
+				/* try again without setting uid/gid */
+				vap->va_uid = (uid_t)VNOVAL;
+				vap->va_gid = (uid_t)VNOVAL;
+				error = nfsrpc_setattr(newvp, vap, NULL, 
+				    cnp->cn_cred, cnp->cn_thread, &nfsva,
+				    &attrflag, NULL);
+			}
+			if (attrflag)
+				(void) nfscl_loadattrcache(&newvp, &nfsva, NULL,
+				    NULL, 0, 1);
+			if (error != 0)
+				vput(newvp);
+		}
+	}
+	if (!error) {
+		if ((cnp->cn_flags & MAKEENTRY) && attrflag)
+			cache_enter_time(dvp, newvp, cnp, &nfsva.na_ctime,
+			    NULL);
+		*ap->a_vpp = newvp;
+	} else if (NFS_ISV4(dvp)) {
+		error = nfscl_maperr(cnp->cn_thread, error, vap->va_uid,
+		    vap->va_gid);
+	}
+	NFSLOCKNODE(dnp);
+	dnp->n_flag |= NMODIFIED;
+	if (!dattrflag) {
+		dnp->n_attrstamp = 0;
+		KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(dvp);
+	}
+	NFSUNLOCKNODE(dnp);
+	return (error);
+}
+
+/*
+ * nfs file remove call
+ * To try and make nfs semantics closer to ufs semantics, a file that has
+ * other processes using the vnode is renamed instead of removed and then
+ * removed later on the last close.
+ * - If v_usecount > 1
+ *	  If a rename is not already in the works
+ *	     call nfs_sillyrename() to set it up
+ *     else
+ *	  do the remove rpc
+ */
+static int
+nfs_remove(struct vop_remove_args *ap)
+{
+	struct vnode *vp = ap->a_vp;
+	struct vnode *dvp = ap->a_dvp;
+	struct componentname *cnp = ap->a_cnp;
+	struct nfsnode *np = VTONFS(vp);
+	int error = 0;
+	struct vattr vattr;
+
+	KASSERT((cnp->cn_flags & HASBUF) != 0, ("nfs_remove: no name"));
+	KASSERT(vrefcnt(vp) > 0, ("nfs_remove: bad v_usecount"));
+	if (vp->v_type == VDIR)
+		error = EPERM;
+	else if (vrefcnt(vp) == 1 || (np->n_sillyrename &&
+	    VOP_GETATTR(vp, &vattr, cnp->cn_cred) == 0 &&
+	    vattr.va_nlink > 1)) {
+		/*
+		 * Purge the name cache so that the chance of a lookup for
+		 * the name succeeding while the remove is in progress is
+		 * minimized. Without node locking it can still happen, such
+		 * that an I/O op returns ESTALE, but since you get this if
+		 * another host removes the file..
+		 */
+		cache_purge(vp);
+		/*
+		 * throw away biocache buffers, mainly to avoid
+		 * unnecessary delayed writes later.
+		 */
+		error = ncl_vinvalbuf(vp, 0, cnp->cn_thread, 1);
+		if (error != EINTR && error != EIO)
+			/* Do the rpc */
+			error = nfs_removerpc(dvp, vp, cnp->cn_nameptr,
+			    cnp->cn_namelen, cnp->cn_cred, cnp->cn_thread);
+		/*
+		 * Kludge City: If the first reply to the remove rpc is lost..
+		 *   the reply to the retransmitted request will be ENOENT
+		 *   since the file was in fact removed
+		 *   Therefore, we cheat and return success.
+		 */
+		if (error == ENOENT)
+			error = 0;
+	} else if (!np->n_sillyrename)
+		error = nfs_sillyrename(dvp, vp, cnp);
+	NFSLOCKNODE(np);
+	np->n_attrstamp = 0;
+	NFSUNLOCKNODE(np);
+	KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp);
+	return (error);
+}
+
+/*
+ * nfs file remove rpc called from nfs_inactive
+ */
+int
+ncl_removeit(struct sillyrename *sp, struct vnode *vp)
+{
+	/*
+	 * Make sure that the directory vnode is still valid.
+	 * XXX we should lock sp->s_dvp here.
+	 */
+	if (sp->s_dvp->v_type == VBAD)
+		return (0);
+	return (nfs_removerpc(sp->s_dvp, vp, sp->s_name, sp->s_namlen,
+	    sp->s_cred, NULL));
+}
+
+/*
+ * Nfs remove rpc, called from nfs_remove() and ncl_removeit().
+ */
+static int
+nfs_removerpc(struct vnode *dvp, struct vnode *vp, char *name,
+    int namelen, struct ucred *cred, struct thread *td)
+{
+	struct nfsvattr dnfsva;
+	struct nfsnode *dnp = VTONFS(dvp);
+	int error = 0, dattrflag;
+
+	NFSLOCKNODE(dnp);
+	dnp->n_flag |= NREMOVEINPROG;
+	NFSUNLOCKNODE(dnp);
+	error = nfsrpc_remove(dvp, name, namelen, vp, cred, td, &dnfsva,
+	    &dattrflag, NULL);
+	NFSLOCKNODE(dnp);
+	if ((dnp->n_flag & NREMOVEWANT)) {
+		dnp->n_flag &= ~(NREMOVEWANT | NREMOVEINPROG);
+		NFSUNLOCKNODE(dnp);
+		wakeup((caddr_t)dnp);
+	} else {
+		dnp->n_flag &= ~NREMOVEINPROG;
+		NFSUNLOCKNODE(dnp);
+	}
+	if (dattrflag)
+		(void) nfscl_loadattrcache(&dvp, &dnfsva, NULL, NULL, 0, 1);
+	NFSLOCKNODE(dnp);
+	dnp->n_flag |= NMODIFIED;
+	if (!dattrflag) {
+		dnp->n_attrstamp = 0;
+		KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(dvp);
+	}
+	NFSUNLOCKNODE(dnp);
+	if (error && NFS_ISV4(dvp))
+		error = nfscl_maperr(td, error, (uid_t)0, (gid_t)0);
+	return (error);
+}
+
+/*
+ * nfs file rename call
+ */
+static int
+nfs_rename(struct vop_rename_args *ap)
+{
+	struct vnode *fvp = ap->a_fvp;
+	struct vnode *tvp = ap->a_tvp;
+	struct vnode *fdvp = ap->a_fdvp;
+	struct vnode *tdvp = ap->a_tdvp;
+	struct componentname *tcnp = ap->a_tcnp;
+	struct componentname *fcnp = ap->a_fcnp;
+	struct nfsnode *fnp = VTONFS(ap->a_fvp);
+	struct nfsnode *tdnp = VTONFS(ap->a_tdvp);
+	struct nfsv4node *newv4 = NULL;
+	int error;
+
+	KASSERT((tcnp->cn_flags & HASBUF) != 0 &&
+	    (fcnp->cn_flags & HASBUF) != 0, ("nfs_rename: no name"));
+	/* Check for cross-device rename */
+	if ((fvp->v_mount != tdvp->v_mount) ||
+	    (tvp && (fvp->v_mount != tvp->v_mount))) {
+		error = EXDEV;
+		goto out;
+	}
+
+	if (fvp == tvp) {
+		printf("nfs_rename: fvp == tvp (can't happen)\n");
+		error = 0;
+		goto out;
+	}
+	if ((error = NFSVOPLOCK(fvp, LK_EXCLUSIVE)) != 0)
+		goto out;
+
+	/*
+	 * We have to flush B_DELWRI data prior to renaming
+	 * the file.  If we don't, the delayed-write buffers
+	 * can be flushed out later after the file has gone stale
+	 * under NFSV3.  NFSV2 does not have this problem because
+	 * ( as far as I can tell ) it flushes dirty buffers more
+	 * often.
+	 * 
+	 * Skip the rename operation if the fsync fails, this can happen
+	 * due to the server's volume being full, when we pushed out data
+	 * that was written back to our cache earlier. Not checking for
+	 * this condition can result in potential (silent) data loss.
+	 */
+	error = VOP_FSYNC(fvp, MNT_WAIT, fcnp->cn_thread);
+	NFSVOPUNLOCK(fvp, 0);
+	if (!error && tvp)
+		error = VOP_FSYNC(tvp, MNT_WAIT, tcnp->cn_thread);
+	if (error)
+		goto out;
+
+	/*
+	 * If the tvp exists and is in use, sillyrename it before doing the
+	 * rename of the new file over it.
+	 * XXX Can't sillyrename a directory.
+	 */
+	if (tvp && vrefcnt(tvp) > 1 && !VTONFS(tvp)->n_sillyrename &&
+		tvp->v_type != VDIR && !nfs_sillyrename(tdvp, tvp, tcnp)) {
+		vput(tvp);
+		tvp = NULL;
+	}
+
+	error = nfs_renamerpc(fdvp, fvp, fcnp->cn_nameptr, fcnp->cn_namelen,
+	    tdvp, tvp, tcnp->cn_nameptr, tcnp->cn_namelen, tcnp->cn_cred,
+	    tcnp->cn_thread);
+
+	if (error == 0 && NFS_ISV4(tdvp)) {
+		/*
+		 * For NFSv4, check to see if it is the same name and
+		 * replace the name, if it is different.
+		 */
+		newv4 = malloc(
+		    sizeof (struct nfsv4node) +
+		    tdnp->n_fhp->nfh_len + tcnp->cn_namelen - 1,
+		    M_NFSV4NODE, M_WAITOK);
+		NFSLOCKNODE(tdnp);
+		NFSLOCKNODE(fnp);
+		if (fnp->n_v4 != NULL && fvp->v_type == VREG &&
+		    (fnp->n_v4->n4_namelen != tcnp->cn_namelen ||
+		      NFSBCMP(tcnp->cn_nameptr, NFS4NODENAME(fnp->n_v4),
+		      tcnp->cn_namelen) ||
+		      tdnp->n_fhp->nfh_len != fnp->n_v4->n4_fhlen ||
+		      NFSBCMP(tdnp->n_fhp->nfh_fh, fnp->n_v4->n4_data,
+			tdnp->n_fhp->nfh_len))) {
+#ifdef notdef
+{ char nnn[100]; int nnnl;
+nnnl = (tcnp->cn_namelen < 100) ? tcnp->cn_namelen : 99;
+bcopy(tcnp->cn_nameptr, nnn, nnnl);
+nnn[nnnl] = '\0';
+printf("ren replace=%s\n",nnn);
+}
+#endif
+			free(fnp->n_v4, M_NFSV4NODE);
+			fnp->n_v4 = newv4;
+			newv4 = NULL;
+			fnp->n_v4->n4_fhlen = tdnp->n_fhp->nfh_len;
+			fnp->n_v4->n4_namelen = tcnp->cn_namelen;
+			NFSBCOPY(tdnp->n_fhp->nfh_fh, fnp->n_v4->n4_data,
+			    tdnp->n_fhp->nfh_len);
+			NFSBCOPY(tcnp->cn_nameptr,
+			    NFS4NODENAME(fnp->n_v4), tcnp->cn_namelen);
+		}
+		NFSUNLOCKNODE(tdnp);
+		NFSUNLOCKNODE(fnp);
+		if (newv4 != NULL)
+			free(newv4, M_NFSV4NODE);
+	}
+
+	if (fvp->v_type == VDIR) {
+		if (tvp != NULL && tvp->v_type == VDIR)
+			cache_purge(tdvp);
+		cache_purge(fdvp);
+	}
+
+out:
+	if (tdvp == tvp)
+		vrele(tdvp);
+	else
+		vput(tdvp);
+	if (tvp)
+		vput(tvp);
+	vrele(fdvp);
+	vrele(fvp);
+	/*
+	 * Kludge: Map ENOENT => 0 assuming that it is a reply to a retry.
+	 */
+	if (error == ENOENT)
+		error = 0;
+	return (error);
+}
+
+/*
+ * nfs file rename rpc called from nfs_remove() above
+ */
+static int
+nfs_renameit(struct vnode *sdvp, struct vnode *svp, struct componentname *scnp,
+    struct sillyrename *sp)
+{
+
+	return (nfs_renamerpc(sdvp, svp, scnp->cn_nameptr, scnp->cn_namelen,
+	    sdvp, NULL, sp->s_name, sp->s_namlen, scnp->cn_cred,
+	    scnp->cn_thread));
+}
+
+/*
+ * Do an nfs rename rpc. Called from nfs_rename() and nfs_renameit().
+ */
+static int
+nfs_renamerpc(struct vnode *fdvp, struct vnode *fvp, char *fnameptr,
+    int fnamelen, struct vnode *tdvp, struct vnode *tvp, char *tnameptr,
+    int tnamelen, struct ucred *cred, struct thread *td)
+{
+	struct nfsvattr fnfsva, tnfsva;
+	struct nfsnode *fdnp = VTONFS(fdvp);
+	struct nfsnode *tdnp = VTONFS(tdvp);
+	int error = 0, fattrflag, tattrflag;
+
+	error = nfsrpc_rename(fdvp, fvp, fnameptr, fnamelen, tdvp, tvp,
+	    tnameptr, tnamelen, cred, td, &fnfsva, &tnfsva, &fattrflag,
+	    &tattrflag, NULL, NULL);
+	NFSLOCKNODE(fdnp);
+	fdnp->n_flag |= NMODIFIED;
+	if (fattrflag != 0) {
+		NFSUNLOCKNODE(fdnp);
+		(void) nfscl_loadattrcache(&fdvp, &fnfsva, NULL, NULL, 0, 1);
+	} else {
+		fdnp->n_attrstamp = 0;
+		NFSUNLOCKNODE(fdnp);
+		KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(fdvp);
+	}
+	NFSLOCKNODE(tdnp);
+	tdnp->n_flag |= NMODIFIED;
+	if (tattrflag != 0) {
+		NFSUNLOCKNODE(tdnp);
+		(void) nfscl_loadattrcache(&tdvp, &tnfsva, NULL, NULL, 0, 1);
+	} else {
+		tdnp->n_attrstamp = 0;
+		NFSUNLOCKNODE(tdnp);
+		KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(tdvp);
+	}
+	if (error && NFS_ISV4(fdvp))
+		error = nfscl_maperr(td, error, (uid_t)0, (gid_t)0);
+	return (error);
+}
+
+/*
+ * nfs hard link create call
+ */
+static int
+nfs_link(struct vop_link_args *ap)
+{
+	struct vnode *vp = ap->a_vp;
+	struct vnode *tdvp = ap->a_tdvp;
+	struct componentname *cnp = ap->a_cnp;
+	struct nfsnode *np, *tdnp;
+	struct nfsvattr nfsva, dnfsva;
+	int error = 0, attrflag, dattrflag;
+
+	/*
+	 * Push all writes to the server, so that the attribute cache
+	 * doesn't get "out of sync" with the server.
+	 * XXX There should be a better way!
+	 */
+	VOP_FSYNC(vp, MNT_WAIT, cnp->cn_thread);
+
+	error = nfsrpc_link(tdvp, vp, cnp->cn_nameptr, cnp->cn_namelen,
+	    cnp->cn_cred, cnp->cn_thread, &dnfsva, &nfsva, &attrflag,
+	    &dattrflag, NULL);
+	tdnp = VTONFS(tdvp);
+	NFSLOCKNODE(tdnp);
+	tdnp->n_flag |= NMODIFIED;
+	if (dattrflag != 0) {
+		NFSUNLOCKNODE(tdnp);
+		(void) nfscl_loadattrcache(&tdvp, &dnfsva, NULL, NULL, 0, 1);
+	} else {
+		tdnp->n_attrstamp = 0;
+		NFSUNLOCKNODE(tdnp);
+		KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(tdvp);
+	}
+	if (attrflag)
+		(void) nfscl_loadattrcache(&vp, &nfsva, NULL, NULL, 0, 1);
+	else {
+		np = VTONFS(vp);
+		NFSLOCKNODE(np);
+		np->n_attrstamp = 0;
+		NFSUNLOCKNODE(np);
+		KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp);
+	}
+	/*
+	 * If negative lookup caching is enabled, I might as well
+	 * add an entry for this node. Not necessary for correctness,
+	 * but if negative caching is enabled, then the system
+	 * must care about lookup caching hit rate, so...
+	 */
+	if (VFSTONFS(vp->v_mount)->nm_negnametimeo != 0 &&
+	    (cnp->cn_flags & MAKEENTRY) && attrflag != 0 && error == 0) {
+		cache_enter_time(tdvp, vp, cnp, &nfsva.na_ctime, NULL);
+	}
+	if (error && NFS_ISV4(vp))
+		error = nfscl_maperr(cnp->cn_thread, error, (uid_t)0,
+		    (gid_t)0);
+	return (error);
+}
+
+/*
+ * nfs symbolic link create call
+ */
+static int
+nfs_symlink(struct vop_symlink_args *ap)
+{
+	struct vnode *dvp = ap->a_dvp;
+	struct vattr *vap = ap->a_vap;
+	struct componentname *cnp = ap->a_cnp;
+	struct nfsvattr nfsva, dnfsva;
+	struct nfsfh *nfhp;
+	struct nfsnode *np = NULL, *dnp;
+	struct vnode *newvp = NULL;
+	int error = 0, attrflag, dattrflag, ret;
+
+	vap->va_type = VLNK;
+	error = nfsrpc_symlink(dvp, cnp->cn_nameptr, cnp->cn_namelen,
+	    ap->a_target, vap, cnp->cn_cred, cnp->cn_thread, &dnfsva,
+	    &nfsva, &nfhp, &attrflag, &dattrflag, NULL);
+	if (nfhp) {
+		ret = nfscl_nget(dvp->v_mount, dvp, nfhp, cnp, cnp->cn_thread,
+		    &np, NULL, LK_EXCLUSIVE);
+		if (!ret)
+			newvp = NFSTOV(np);
+		else if (!error)
+			error = ret;
+	}
+	if (newvp != NULL) {
+		if (attrflag)
+			(void) nfscl_loadattrcache(&newvp, &nfsva, NULL, NULL,
+			    0, 1);
+	} else if (!error) {
+		/*
+		 * If we do not have an error and we could not extract the
+		 * newvp from the response due to the request being NFSv2, we
+		 * have to do a lookup in order to obtain a newvp to return.
+		 */
+		error = nfs_lookitup(dvp, cnp->cn_nameptr, cnp->cn_namelen,
+		    cnp->cn_cred, cnp->cn_thread, &np);
+		if (!error)
+			newvp = NFSTOV(np);
+	}
+	if (error) {
+		if (newvp)
+			vput(newvp);
+		if (NFS_ISV4(dvp))
+			error = nfscl_maperr(cnp->cn_thread, error,
+			    vap->va_uid, vap->va_gid);
+	} else {
+		*ap->a_vpp = newvp;
+	}
+
+	dnp = VTONFS(dvp);
+	NFSLOCKNODE(dnp);
+	dnp->n_flag |= NMODIFIED;
+	if (dattrflag != 0) {
+		NFSUNLOCKNODE(dnp);
+		(void) nfscl_loadattrcache(&dvp, &dnfsva, NULL, NULL, 0, 1);
+	} else {
+		dnp->n_attrstamp = 0;
+		NFSUNLOCKNODE(dnp);
+		KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(dvp);
+	}
+	/*
+	 * If negative lookup caching is enabled, I might as well
+	 * add an entry for this node. Not necessary for correctness,
+	 * but if negative caching is enabled, then the system
+	 * must care about lookup caching hit rate, so...
+	 */
+	if (VFSTONFS(dvp->v_mount)->nm_negnametimeo != 0 &&
+	    (cnp->cn_flags & MAKEENTRY) && attrflag != 0 && error == 0) {
+		cache_enter_time(dvp, newvp, cnp, &nfsva.na_ctime, NULL);
+	}
+	return (error);
+}
+
+/*
+ * nfs make dir call
+ */
+static int
+nfs_mkdir(struct vop_mkdir_args *ap)
+{
+	struct vnode *dvp = ap->a_dvp;
+	struct vattr *vap = ap->a_vap;
+	struct componentname *cnp = ap->a_cnp;
+	struct nfsnode *np = NULL, *dnp;
+	struct vnode *newvp = NULL;
+	struct vattr vattr;
+	struct nfsfh *nfhp;
+	struct nfsvattr nfsva, dnfsva;
+	int error = 0, attrflag, dattrflag, ret;
+
+	if ((error = VOP_GETATTR(dvp, &vattr, cnp->cn_cred)) != 0)
+		return (error);
+	vap->va_type = VDIR;
+	error = nfsrpc_mkdir(dvp, cnp->cn_nameptr, cnp->cn_namelen,
+	    vap, cnp->cn_cred, cnp->cn_thread, &dnfsva, &nfsva, &nfhp,
+	    &attrflag, &dattrflag, NULL);
+	dnp = VTONFS(dvp);
+	NFSLOCKNODE(dnp);
+	dnp->n_flag |= NMODIFIED;
+	if (dattrflag != 0) {
+		NFSUNLOCKNODE(dnp);
+		(void) nfscl_loadattrcache(&dvp, &dnfsva, NULL, NULL, 0, 1);
+	} else {
+		dnp->n_attrstamp = 0;
+		NFSUNLOCKNODE(dnp);
+		KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(dvp);
+	}
+	if (nfhp) {
+		ret = nfscl_nget(dvp->v_mount, dvp, nfhp, cnp, cnp->cn_thread,
+		    &np, NULL, LK_EXCLUSIVE);
+		if (!ret) {
+			newvp = NFSTOV(np);
+			if (attrflag)
+			   (void) nfscl_loadattrcache(&newvp, &nfsva, NULL,
+				NULL, 0, 1);
+		} else if (!error)
+			error = ret;
+	}
+	if (!error && newvp == NULL) {
+		error = nfs_lookitup(dvp, cnp->cn_nameptr, cnp->cn_namelen,
+		    cnp->cn_cred, cnp->cn_thread, &np);
+		if (!error) {
+			newvp = NFSTOV(np);
+			if (newvp->v_type != VDIR)
+				error = EEXIST;
+		}
+	}
+	if (error) {
+		if (newvp)
+			vput(newvp);
+		if (NFS_ISV4(dvp))
+			error = nfscl_maperr(cnp->cn_thread, error,
+			    vap->va_uid, vap->va_gid);
+	} else {
+		/*
+		 * If negative lookup caching is enabled, I might as well
+		 * add an entry for this node. Not necessary for correctness,
+		 * but if negative caching is enabled, then the system
+		 * must care about lookup caching hit rate, so...
+		 */
+		if (VFSTONFS(dvp->v_mount)->nm_negnametimeo != 0 &&
+		    (cnp->cn_flags & MAKEENTRY) &&
+		    attrflag != 0 && dattrflag != 0)
+			cache_enter_time(dvp, newvp, cnp, &nfsva.na_ctime,
+			    &dnfsva.na_ctime);
+		*ap->a_vpp = newvp;
+	}
+	return (error);
+}
+
+/*
+ * nfs remove directory call
+ */
+static int
+nfs_rmdir(struct vop_rmdir_args *ap)
+{
+	struct vnode *vp = ap->a_vp;
+	struct vnode *dvp = ap->a_dvp;
+	struct componentname *cnp = ap->a_cnp;
+	struct nfsnode *dnp;
+	struct nfsvattr dnfsva;
+	int error, dattrflag;
+
+	if (dvp == vp)
+		return (EINVAL);
+	error = nfsrpc_rmdir(dvp, cnp->cn_nameptr, cnp->cn_namelen,
+	    cnp->cn_cred, cnp->cn_thread, &dnfsva, &dattrflag, NULL);
+	dnp = VTONFS(dvp);
+	NFSLOCKNODE(dnp);
+	dnp->n_flag |= NMODIFIED;
+	if (dattrflag != 0) {
+		NFSUNLOCKNODE(dnp);
+		(void) nfscl_loadattrcache(&dvp, &dnfsva, NULL, NULL, 0, 1);
+	} else {
+		dnp->n_attrstamp = 0;
+		NFSUNLOCKNODE(dnp);
+		KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(dvp);
+	}
+
+	cache_purge(dvp);
+	cache_purge(vp);
+	if (error && NFS_ISV4(dvp))
+		error = nfscl_maperr(cnp->cn_thread, error, (uid_t)0,
+		    (gid_t)0);
+	/*
+	 * Kludge: Map ENOENT => 0 assuming that you have a reply to a retry.
+	 */
+	if (error == ENOENT)
+		error = 0;
+	return (error);
+}
+
+/*
+ * nfs readdir call
+ */
+static int
+nfs_readdir(struct vop_readdir_args *ap)
+{
+	struct vnode *vp = ap->a_vp;
+	struct nfsnode *np = VTONFS(vp);
+	struct uio *uio = ap->a_uio;
+	ssize_t tresid, left;
+	int error = 0;
+	struct vattr vattr;
+	
+	if (ap->a_eofflag != NULL)
+		*ap->a_eofflag = 0;
+	if (vp->v_type != VDIR) 
+		return(EPERM);
+
+	/*
+	 * First, check for hit on the EOF offset cache
+	 */
+	if (np->n_direofoffset > 0 && uio->uio_offset >= np->n_direofoffset &&
+	    (np->n_flag & NMODIFIED) == 0) {
+		if (VOP_GETATTR(vp, &vattr, ap->a_cred) == 0) {
+			NFSLOCKNODE(np);
+			if ((NFS_ISV4(vp) && np->n_change == vattr.va_filerev) ||
+			    !NFS_TIMESPEC_COMPARE(&np->n_mtime, &vattr.va_mtime)) {
+				NFSUNLOCKNODE(np);
+				NFSINCRGLOBAL(nfsstatsv1.direofcache_hits);
+				if (ap->a_eofflag != NULL)
+					*ap->a_eofflag = 1;
+				return (0);
+			} else
+				NFSUNLOCKNODE(np);
+		}
+	}
+
+	/*
+	 * NFS always guarantees that directory entries don't straddle
+	 * DIRBLKSIZ boundaries.  As such, we need to limit the size
+	 * to an exact multiple of DIRBLKSIZ, to avoid copying a partial
+	 * directory entry.
+	 */
+	left = uio->uio_resid % DIRBLKSIZ;
+	if (left == uio->uio_resid)
+		return (EINVAL);
+	uio->uio_resid -= left;
+
+	/*
+	 * Call ncl_bioread() to do the real work.
+	 */
+	tresid = uio->uio_resid;
+	error = ncl_bioread(vp, uio, 0, ap->a_cred);
+
+	if (!error && uio->uio_resid == tresid) {
+		NFSINCRGLOBAL(nfsstatsv1.direofcache_misses);
+		if (ap->a_eofflag != NULL)
+			*ap->a_eofflag = 1;
+	}
+	
+	/* Add the partial DIRBLKSIZ (left) back in. */
+	uio->uio_resid += left;
+	return (error);
+}
+
+/*
+ * Readdir rpc call.
+ * Called from below the buffer cache by ncl_doio().
+ */
+int
+ncl_readdirrpc(struct vnode *vp, struct uio *uiop, struct ucred *cred,
+    struct thread *td)
+{
+	struct nfsvattr nfsva;
+	nfsuint64 *cookiep, cookie;
+	struct nfsnode *dnp = VTONFS(vp);
+	struct nfsmount *nmp = VFSTONFS(vp->v_mount);
+	int error = 0, eof, attrflag;
+
+	KASSERT(uiop->uio_iovcnt == 1 &&
+	    (uiop->uio_offset & (DIRBLKSIZ - 1)) == 0 &&
+	    (uiop->uio_resid & (DIRBLKSIZ - 1)) == 0,
+	    ("nfs readdirrpc bad uio"));
+
+	/*
+	 * If there is no cookie, assume directory was stale.
+	 */
+	ncl_dircookie_lock(dnp);
+	cookiep = ncl_getcookie(dnp, uiop->uio_offset, 0);
+	if (cookiep) {
+		cookie = *cookiep;
+		ncl_dircookie_unlock(dnp);
+	} else {
+		ncl_dircookie_unlock(dnp);		
+		return (NFSERR_BAD_COOKIE);
+	}
+
+	if (NFSHASNFSV3(nmp) && !NFSHASGOTFSINFO(nmp))
+		(void)ncl_fsinfo(nmp, vp, cred, td);
+
+	error = nfsrpc_readdir(vp, uiop, &cookie, cred, td, &nfsva,
+	    &attrflag, &eof, NULL);
+	if (attrflag)
+		(void) nfscl_loadattrcache(&vp, &nfsva, NULL, NULL, 0, 1);
+
+	if (!error) {
+		/*
+		 * We are now either at the end of the directory or have filled
+		 * the block.
+		 */
+		if (eof)
+			dnp->n_direofoffset = uiop->uio_offset;
+		else {
+			if (uiop->uio_resid > 0)
+				printf("EEK! readdirrpc resid > 0\n");
+			ncl_dircookie_lock(dnp);
+			cookiep = ncl_getcookie(dnp, uiop->uio_offset, 1);
+			*cookiep = cookie;
+			ncl_dircookie_unlock(dnp);
+		}
+	} else if (NFS_ISV4(vp)) {
+		error = nfscl_maperr(td, error, (uid_t)0, (gid_t)0);
+	}
+	return (error);
+}
+
+/*
+ * NFS V3 readdir plus RPC. Used in place of ncl_readdirrpc().
+ */
+int
+ncl_readdirplusrpc(struct vnode *vp, struct uio *uiop, struct ucred *cred,
+    struct thread *td)
+{
+	struct nfsvattr nfsva;
+	nfsuint64 *cookiep, cookie;
+	struct nfsnode *dnp = VTONFS(vp);
+	struct nfsmount *nmp = VFSTONFS(vp->v_mount);
+	int error = 0, attrflag, eof;
+
+	KASSERT(uiop->uio_iovcnt == 1 &&
+	    (uiop->uio_offset & (DIRBLKSIZ - 1)) == 0 &&
+	    (uiop->uio_resid & (DIRBLKSIZ - 1)) == 0,
+	    ("nfs readdirplusrpc bad uio"));
+
+	/*
+	 * If there is no cookie, assume directory was stale.
+	 */
+	ncl_dircookie_lock(dnp);
+	cookiep = ncl_getcookie(dnp, uiop->uio_offset, 0);
+	if (cookiep) {
+		cookie = *cookiep;
+		ncl_dircookie_unlock(dnp);
+	} else {
+		ncl_dircookie_unlock(dnp);
+		return (NFSERR_BAD_COOKIE);
+	}
+
+	if (NFSHASNFSV3(nmp) && !NFSHASGOTFSINFO(nmp))
+		(void)ncl_fsinfo(nmp, vp, cred, td);
+	error = nfsrpc_readdirplus(vp, uiop, &cookie, cred, td, &nfsva,
+	    &attrflag, &eof, NULL);
+	if (attrflag)
+		(void) nfscl_loadattrcache(&vp, &nfsva, NULL, NULL, 0, 1);
+
+	if (!error) {
+		/*
+		 * We are now either at end of the directory or have filled the
+		 * the block.
+		 */
+		if (eof)
+			dnp->n_direofoffset = uiop->uio_offset;
+		else {
+			if (uiop->uio_resid > 0)
+				printf("EEK! readdirplusrpc resid > 0\n");
+			ncl_dircookie_lock(dnp);
+			cookiep = ncl_getcookie(dnp, uiop->uio_offset, 1);
+			*cookiep = cookie;
+			ncl_dircookie_unlock(dnp);
+		}
+	} else if (NFS_ISV4(vp)) {
+		error = nfscl_maperr(td, error, (uid_t)0, (gid_t)0);
+	}
+	return (error);
+}
+
+/*
+ * Silly rename. To make the NFS filesystem that is stateless look a little
+ * more like the "ufs" a remove of an active vnode is translated to a rename
+ * to a funny looking filename that is removed by nfs_inactive on the
+ * nfsnode. There is the potential for another process on a different client
+ * to create the same funny name between the nfs_lookitup() fails and the
+ * nfs_rename() completes, but...
+ */
+static int
+nfs_sillyrename(struct vnode *dvp, struct vnode *vp, struct componentname *cnp)
+{
+	struct sillyrename *sp;
+	struct nfsnode *np;
+	int error;
+	short pid;
+	unsigned int lticks;
+
+	cache_purge(dvp);
+	np = VTONFS(vp);
+	KASSERT(vp->v_type != VDIR, ("nfs: sillyrename dir"));
+	sp = malloc(sizeof (struct sillyrename),
+	    M_NEWNFSREQ, M_WAITOK);
+	sp->s_cred = crhold(cnp->cn_cred);
+	sp->s_dvp = dvp;
+	VREF(dvp);
+
+	/* 
+	 * Fudge together a funny name.
+	 * Changing the format of the funny name to accommodate more 
+	 * sillynames per directory.
+	 * The name is now changed to .nfs.<ticks>.<pid>.4, where ticks is 
+	 * CPU ticks since boot.
+	 */
+	pid = cnp->cn_thread->td_proc->p_pid;
+	lticks = (unsigned int)ticks;
+	for ( ; ; ) {
+		sp->s_namlen = sprintf(sp->s_name, 
+				       ".nfs.%08x.%04x4.4", lticks, 
+				       pid);
+		if (nfs_lookitup(dvp, sp->s_name, sp->s_namlen, sp->s_cred,
+				 cnp->cn_thread, NULL))
+			break;
+		lticks++;
+	}
+	error = nfs_renameit(dvp, vp, cnp, sp);
+	if (error)
+		goto bad;
+	error = nfs_lookitup(dvp, sp->s_name, sp->s_namlen, sp->s_cred,
+		cnp->cn_thread, &np);
+	np->n_sillyrename = sp;
+	return (0);
+bad:
+	vrele(sp->s_dvp);
+	crfree(sp->s_cred);
+	free(sp, M_NEWNFSREQ);
+	return (error);
+}
+
+/*
+ * Look up a file name and optionally either update the file handle or
+ * allocate an nfsnode, depending on the value of npp.
+ * npp == NULL	--> just do the lookup
+ * *npp == NULL --> allocate a new nfsnode and make sure attributes are
+ *			handled too
+ * *npp != NULL --> update the file handle in the vnode
+ */
+static int
+nfs_lookitup(struct vnode *dvp, char *name, int len, struct ucred *cred,
+    struct thread *td, struct nfsnode **npp)
+{
+	struct vnode *newvp = NULL, *vp;
+	struct nfsnode *np, *dnp = VTONFS(dvp);
+	struct nfsfh *nfhp, *onfhp;
+	struct nfsvattr nfsva, dnfsva;
+	struct componentname cn;
+	int error = 0, attrflag, dattrflag;
+	u_int hash;
+
+	error = nfsrpc_lookup(dvp, name, len, cred, td, &dnfsva, &nfsva,
+	    &nfhp, &attrflag, &dattrflag, NULL);
+	if (dattrflag)
+		(void) nfscl_loadattrcache(&dvp, &dnfsva, NULL, NULL, 0, 1);
+	if (npp && !error) {
+		if (*npp != NULL) {
+		    np = *npp;
+		    vp = NFSTOV(np);
+		    /*
+		     * For NFSv4, check to see if it is the same name and
+		     * replace the name, if it is different.
+		     */
+		    if (np->n_v4 != NULL && nfsva.na_type == VREG &&
+			(np->n_v4->n4_namelen != len ||
+			 NFSBCMP(name, NFS4NODENAME(np->n_v4), len) ||
+			 dnp->n_fhp->nfh_len != np->n_v4->n4_fhlen ||
+			 NFSBCMP(dnp->n_fhp->nfh_fh, np->n_v4->n4_data,
+			 dnp->n_fhp->nfh_len))) {
+#ifdef notdef
+{ char nnn[100]; int nnnl;
+nnnl = (len < 100) ? len : 99;
+bcopy(name, nnn, nnnl);
+nnn[nnnl] = '\0';
+printf("replace=%s\n",nnn);
+}
+#endif
+			    free(np->n_v4, M_NFSV4NODE);
+			    np->n_v4 = malloc(
+				sizeof (struct nfsv4node) +
+				dnp->n_fhp->nfh_len + len - 1,
+				M_NFSV4NODE, M_WAITOK);
+			    np->n_v4->n4_fhlen = dnp->n_fhp->nfh_len;
+			    np->n_v4->n4_namelen = len;
+			    NFSBCOPY(dnp->n_fhp->nfh_fh, np->n_v4->n4_data,
+				dnp->n_fhp->nfh_len);
+			    NFSBCOPY(name, NFS4NODENAME(np->n_v4), len);
+		    }
+		    hash = fnv_32_buf(nfhp->nfh_fh, nfhp->nfh_len,
+			FNV1_32_INIT);
+		    onfhp = np->n_fhp;
+		    /*
+		     * Rehash node for new file handle.
+		     */
+		    vfs_hash_rehash(vp, hash);
+		    np->n_fhp = nfhp;
+		    if (onfhp != NULL)
+			free(onfhp, M_NFSFH);
+		    newvp = NFSTOV(np);
+		} else if (NFS_CMPFH(dnp, nfhp->nfh_fh, nfhp->nfh_len)) {
+		    free(nfhp, M_NFSFH);
+		    VREF(dvp);
+		    newvp = dvp;
+		} else {
+		    cn.cn_nameptr = name;
+		    cn.cn_namelen = len;
+		    error = nfscl_nget(dvp->v_mount, dvp, nfhp, &cn, td,
+			&np, NULL, LK_EXCLUSIVE);
+		    if (error)
+			return (error);
+		    newvp = NFSTOV(np);
+		}
+		if (!attrflag && *npp == NULL) {
+			if (newvp == dvp)
+				vrele(newvp);
+			else
+				vput(newvp);
+			return (ENOENT);
+		}
+		if (attrflag)
+			(void) nfscl_loadattrcache(&newvp, &nfsva, NULL, NULL,
+			    0, 1);
+	}
+	if (npp && *npp == NULL) {
+		if (error) {
+			if (newvp) {
+				if (newvp == dvp)
+					vrele(newvp);
+				else
+					vput(newvp);
+			}
+		} else
+			*npp = np;
+	}
+	if (error && NFS_ISV4(dvp))
+		error = nfscl_maperr(td, error, (uid_t)0, (gid_t)0);
+	return (error);
+}
+
+/*
+ * Nfs Version 3 and 4 commit rpc
+ */
+int
+ncl_commit(struct vnode *vp, u_quad_t offset, int cnt, struct ucred *cred,
+   struct thread *td)
+{
+	struct nfsvattr nfsva;
+	struct nfsmount *nmp = VFSTONFS(vp->v_mount);
+	struct nfsnode *np;
+	struct uio uio;
+	int error, attrflag;
+
+	np = VTONFS(vp);
+	error = EIO;
+	attrflag = 0;
+	if (NFSHASPNFS(nmp) && (np->n_flag & NDSCOMMIT) != 0) {
+		uio.uio_offset = offset;
+		uio.uio_resid = cnt;
+		error = nfscl_doiods(vp, &uio, NULL, NULL,
+		    NFSV4OPEN_ACCESSWRITE, 1, cred, td);
+		if (error != 0) {
+			NFSLOCKNODE(np);
+			np->n_flag &= ~NDSCOMMIT;
+			NFSUNLOCKNODE(np);
+		}
+	}
+	if (error != 0) {
+		mtx_lock(&nmp->nm_mtx);
+		if ((nmp->nm_state & NFSSTA_HASWRITEVERF) == 0) {
+			mtx_unlock(&nmp->nm_mtx);
+			return (0);
+		}
+		mtx_unlock(&nmp->nm_mtx);
+		error = nfsrpc_commit(vp, offset, cnt, cred, td, &nfsva,
+		    &attrflag, NULL);
+	}
+	if (attrflag != 0)
+		(void) nfscl_loadattrcache(&vp, &nfsva, NULL, NULL,
+		    0, 1);
+	if (error != 0 && NFS_ISV4(vp))
+		error = nfscl_maperr(td, error, (uid_t)0, (gid_t)0);
+	return (error);
+}
+
+/*
+ * Strategy routine.
+ * For async requests when nfsiod(s) are running, queue the request by
+ * calling ncl_asyncio(), otherwise just all ncl_doio() to do the
+ * request.
+ */
+static int
+nfs_strategy(struct vop_strategy_args *ap)
+{
+	struct buf *bp;
+	struct vnode *vp;
+	struct ucred *cr;
+
+	bp = ap->a_bp;
+	vp = ap->a_vp;
+	KASSERT(bp->b_vp == vp, ("missing b_getvp"));
+	KASSERT(!(bp->b_flags & B_DONE),
+	    ("nfs_strategy: buffer %p unexpectedly marked B_DONE", bp));
+	BUF_ASSERT_HELD(bp);
+
+	if (vp->v_type == VREG && bp->b_blkno == bp->b_lblkno)
+		bp->b_blkno = bp->b_lblkno * (vp->v_bufobj.bo_bsize /
+		    DEV_BSIZE);
+	if (bp->b_iocmd == BIO_READ)
+		cr = bp->b_rcred;
+	else
+		cr = bp->b_wcred;
+
+	/*
+	 * If the op is asynchronous and an i/o daemon is waiting
+	 * queue the request, wake it up and wait for completion
+	 * otherwise just do it ourselves.
+	 */
+	if ((bp->b_flags & B_ASYNC) == 0 ||
+	    ncl_asyncio(VFSTONFS(vp->v_mount), bp, NOCRED, curthread))
+		(void) ncl_doio(vp, bp, cr, curthread, 1);
+	return (0);
+}
+
+/*
+ * fsync vnode op. Just call ncl_flush() with commit == 1.
+ */
+/* ARGSUSED */
+static int
+nfs_fsync(struct vop_fsync_args *ap)
+{
+
+	if (ap->a_vp->v_type != VREG) {
+		/*
+		 * For NFS, metadata is changed synchronously on the server,
+		 * so there is nothing to flush. Also, ncl_flush() clears
+		 * the NMODIFIED flag and that shouldn't be done here for
+		 * directories.
+		 */
+		return (0);
+	}
+	return (ncl_flush(ap->a_vp, ap->a_waitfor, ap->a_td, 1, 0));
+}
+
+/*
+ * Flush all the blocks associated with a vnode.
+ * 	Walk through the buffer pool and push any dirty pages
+ *	associated with the vnode.
+ * If the called_from_renewthread argument is TRUE, it has been called
+ * from the NFSv4 renew thread and, as such, cannot block indefinitely
+ * waiting for a buffer write to complete.
+ */
+int
+ncl_flush(struct vnode *vp, int waitfor, struct thread *td,
+    int commit, int called_from_renewthread)
+{
+	struct nfsnode *np = VTONFS(vp);
+	struct buf *bp;
+	int i;
+	struct buf *nbp;
+	struct nfsmount *nmp = VFSTONFS(vp->v_mount);
+	int error = 0, slptimeo = 0, slpflag = 0, retv, bvecpos;
+	int passone = 1, trycnt = 0;
+	u_quad_t off, endoff, toff;
+	struct ucred* wcred = NULL;
+	struct buf **bvec = NULL;
+	struct bufobj *bo;
+#ifndef NFS_COMMITBVECSIZ
+#define	NFS_COMMITBVECSIZ	20
+#endif
+	struct buf *bvec_on_stack[NFS_COMMITBVECSIZ];
+	u_int bvecsize = 0, bveccount;
+
+	if (called_from_renewthread != 0)
+		slptimeo = hz;
+	if (nmp->nm_flag & NFSMNT_INT)
+		slpflag = PCATCH;
+	if (!commit)
+		passone = 0;
+	bo = &vp->v_bufobj;
+	/*
+	 * A b_flags == (B_DELWRI | B_NEEDCOMMIT) block has been written to the
+	 * server, but has not been committed to stable storage on the server
+	 * yet. On the first pass, the byte range is worked out and the commit
+	 * rpc is done. On the second pass, ncl_writebp() is called to do the
+	 * job.
+	 */
+again:
+	off = (u_quad_t)-1;
+	endoff = 0;
+	bvecpos = 0;
+	if (NFS_ISV34(vp) && commit) {
+		if (bvec != NULL && bvec != bvec_on_stack)
+			free(bvec, M_TEMP);
+		/*
+		 * Count up how many buffers waiting for a commit.
+		 */
+		bveccount = 0;
+		BO_LOCK(bo);
+		TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) {
+			if (!BUF_ISLOCKED(bp) &&
+			    (bp->b_flags & (B_DELWRI | B_NEEDCOMMIT))
+				== (B_DELWRI | B_NEEDCOMMIT))
+				bveccount++;
+		}
+		/*
+		 * Allocate space to remember the list of bufs to commit.  It is
+		 * important to use M_NOWAIT here to avoid a race with nfs_write.
+		 * If we can't get memory (for whatever reason), we will end up
+		 * committing the buffers one-by-one in the loop below.
+		 */
+		if (bveccount > NFS_COMMITBVECSIZ) {
+			/*
+			 * Release the vnode interlock to avoid a lock
+			 * order reversal.
+			 */
+			BO_UNLOCK(bo);
+			bvec = (struct buf **)
+				malloc(bveccount * sizeof(struct buf *),
+				       M_TEMP, M_NOWAIT);
+			BO_LOCK(bo);
+			if (bvec == NULL) {
+				bvec = bvec_on_stack;
+				bvecsize = NFS_COMMITBVECSIZ;
+			} else
+				bvecsize = bveccount;
+		} else {
+			bvec = bvec_on_stack;
+			bvecsize = NFS_COMMITBVECSIZ;
+		}
+		TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) {
+			if (bvecpos >= bvecsize)
+				break;
+			if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL)) {
+				nbp = TAILQ_NEXT(bp, b_bobufs);
+				continue;
+			}
+			if ((bp->b_flags & (B_DELWRI | B_NEEDCOMMIT)) !=
+			    (B_DELWRI | B_NEEDCOMMIT)) {
+				BUF_UNLOCK(bp);
+				nbp = TAILQ_NEXT(bp, b_bobufs);
+				continue;
+			}
+			BO_UNLOCK(bo);
+			bremfree(bp);
+			/*
+			 * Work out if all buffers are using the same cred
+			 * so we can deal with them all with one commit.
+			 *
+			 * NOTE: we are not clearing B_DONE here, so we have
+			 * to do it later on in this routine if we intend to
+			 * initiate I/O on the bp.
+			 *
+			 * Note: to avoid loopback deadlocks, we do not
+			 * assign b_runningbufspace.
+			 */
+			if (wcred == NULL)
+				wcred = bp->b_wcred;
+			else if (wcred != bp->b_wcred)
+				wcred = NOCRED;
+			vfs_busy_pages(bp, 1);
+
+			BO_LOCK(bo);
+			/*
+			 * bp is protected by being locked, but nbp is not
+			 * and vfs_busy_pages() may sleep.  We have to
+			 * recalculate nbp.
+			 */
+			nbp = TAILQ_NEXT(bp, b_bobufs);
+
+			/*
+			 * A list of these buffers is kept so that the
+			 * second loop knows which buffers have actually
+			 * been committed. This is necessary, since there
+			 * may be a race between the commit rpc and new
+			 * uncommitted writes on the file.
+			 */
+			bvec[bvecpos++] = bp;
+			toff = ((u_quad_t)bp->b_blkno) * DEV_BSIZE +
+				bp->b_dirtyoff;
+			if (toff < off)
+				off = toff;
+			toff += (u_quad_t)(bp->b_dirtyend - bp->b_dirtyoff);
+			if (toff > endoff)
+				endoff = toff;
+		}
+		BO_UNLOCK(bo);
+	}
+	if (bvecpos > 0) {
+		/*
+		 * Commit data on the server, as required.
+		 * If all bufs are using the same wcred, then use that with
+		 * one call for all of them, otherwise commit each one
+		 * separately.
+		 */
+		if (wcred != NOCRED)
+			retv = ncl_commit(vp, off, (int)(endoff - off),
+					  wcred, td);
+		else {
+			retv = 0;
+			for (i = 0; i < bvecpos; i++) {
+				off_t off, size;
+				bp = bvec[i];
+				off = ((u_quad_t)bp->b_blkno) * DEV_BSIZE +
+					bp->b_dirtyoff;
+				size = (u_quad_t)(bp->b_dirtyend
+						  - bp->b_dirtyoff);
+				retv = ncl_commit(vp, off, (int)size,
+						  bp->b_wcred, td);
+				if (retv) break;
+			}
+		}
+
+		if (retv == NFSERR_STALEWRITEVERF)
+			ncl_clearcommit(vp->v_mount);
+
+		/*
+		 * Now, either mark the blocks I/O done or mark the
+		 * blocks dirty, depending on whether the commit
+		 * succeeded.
+		 */
+		for (i = 0; i < bvecpos; i++) {
+			bp = bvec[i];
+			bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK);
+			if (retv) {
+				/*
+				 * Error, leave B_DELWRI intact
+				 */
+				vfs_unbusy_pages(bp);
+				brelse(bp);
+			} else {
+				/*
+				 * Success, remove B_DELWRI ( bundirty() ).
+				 *
+				 * b_dirtyoff/b_dirtyend seem to be NFS
+				 * specific.  We should probably move that
+				 * into bundirty(). XXX
+				 */
+				bufobj_wref(bo);
+				bp->b_flags |= B_ASYNC;
+				bundirty(bp);
+				bp->b_flags &= ~B_DONE;
+				bp->b_ioflags &= ~BIO_ERROR;
+				bp->b_dirtyoff = bp->b_dirtyend = 0;
+				bufdone(bp);
+			}
+		}
+	}
+
+	/*
+	 * Start/do any write(s) that are required.
+	 */
+loop:
+	BO_LOCK(bo);
+	TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) {
+		if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL)) {
+			if (waitfor != MNT_WAIT || passone)
+				continue;
+
+			error = BUF_TIMELOCK(bp,
+			    LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK,
+			    BO_LOCKPTR(bo), "nfsfsync", slpflag, slptimeo);
+			if (error == 0) {
+				BUF_UNLOCK(bp);
+				goto loop;
+			}
+			if (error == ENOLCK) {
+				error = 0;
+				goto loop;
+			}
+			if (called_from_renewthread != 0) {
+				/*
+				 * Return EIO so the flush will be retried
+				 * later.
+				 */
+				error = EIO;
+				goto done;
+			}
+			if (newnfs_sigintr(nmp, td)) {
+				error = EINTR;
+				goto done;
+			}
+			if (slpflag == PCATCH) {
+				slpflag = 0;
+				slptimeo = 2 * hz;
+			}
+			goto loop;
+		}
+		if ((bp->b_flags & B_DELWRI) == 0)
+			panic("nfs_fsync: not dirty");
+		if ((passone || !commit) && (bp->b_flags & B_NEEDCOMMIT)) {
+			BUF_UNLOCK(bp);
+			continue;
+		}
+		BO_UNLOCK(bo);
+		bremfree(bp);
+		if (passone || !commit)
+		    bp->b_flags |= B_ASYNC;
+		else
+		    bp->b_flags |= B_ASYNC;
+		bwrite(bp);
+		if (newnfs_sigintr(nmp, td)) {
+			error = EINTR;
+			goto done;
+		}
+		goto loop;
+	}
+	if (passone) {
+		passone = 0;
+		BO_UNLOCK(bo);
+		goto again;
+	}
+	if (waitfor == MNT_WAIT) {
+		while (bo->bo_numoutput) {
+			error = bufobj_wwait(bo, slpflag, slptimeo);
+			if (error) {
+			    BO_UNLOCK(bo);
+			    if (called_from_renewthread != 0) {
+				/*
+				 * Return EIO so that the flush will be
+				 * retried later.
+				 */
+				error = EIO;
+				goto done;
+			    }
+			    error = newnfs_sigintr(nmp, td);
+			    if (error)
+				goto done;
+			    if (slpflag == PCATCH) {
+				slpflag = 0;
+				slptimeo = 2 * hz;
+			    }
+			    BO_LOCK(bo);
+			}
+		}
+		if (bo->bo_dirty.bv_cnt != 0 && commit) {
+			BO_UNLOCK(bo);
+			goto loop;
+		}
+		/*
+		 * Wait for all the async IO requests to drain
+		 */
+		BO_UNLOCK(bo);
+		NFSLOCKNODE(np);
+		while (np->n_directio_asyncwr > 0) {
+			np->n_flag |= NFSYNCWAIT;
+			error = newnfs_msleep(td, &np->n_directio_asyncwr,
+			    &np->n_mtx, slpflag | (PRIBIO + 1), 
+			    "nfsfsync", 0);
+			if (error) {
+				if (newnfs_sigintr(nmp, td)) {
+					NFSUNLOCKNODE(np);
+					error = EINTR;	
+					goto done;
+				}
+			}
+		}
+		NFSUNLOCKNODE(np);
+	} else
+		BO_UNLOCK(bo);
+	if (NFSHASPNFS(nmp)) {
+		nfscl_layoutcommit(vp, td);
+		/*
+		 * Invalidate the attribute cache, since writes to a DS
+		 * won't update the size attribute.
+		 */
+		NFSLOCKNODE(np);
+		np->n_attrstamp = 0;
+	} else
+		NFSLOCKNODE(np);
+	if (np->n_flag & NWRITEERR) {
+		error = np->n_error;
+		np->n_flag &= ~NWRITEERR;
+	}
+  	if (commit && bo->bo_dirty.bv_cnt == 0 &&
+	    bo->bo_numoutput == 0 && np->n_directio_asyncwr == 0)
+  		np->n_flag &= ~NMODIFIED;
+	NFSUNLOCKNODE(np);
+done:
+	if (bvec != NULL && bvec != bvec_on_stack)
+		free(bvec, M_TEMP);
+	if (error == 0 && commit != 0 && waitfor == MNT_WAIT &&
+	    (bo->bo_dirty.bv_cnt != 0 || bo->bo_numoutput != 0 ||
+	    np->n_directio_asyncwr != 0)) {
+		if (trycnt++ < 5) {
+			/* try, try again... */
+			passone = 1;
+			wcred = NULL;
+			bvec = NULL;
+			bvecsize = 0;
+			goto again;
+		}
+		vn_printf(vp, "ncl_flush failed");
+		error = called_from_renewthread != 0 ? EIO : EBUSY;
+	}
+	return (error);
+}
+
+/*
+ * NFS advisory byte-level locks.
+ */
+static int
+nfs_advlock(struct vop_advlock_args *ap)
+{
+	struct vnode *vp = ap->a_vp;
+	struct ucred *cred;
+	struct nfsnode *np = VTONFS(ap->a_vp);
+	struct proc *p = (struct proc *)ap->a_id;
+	struct thread *td = curthread;	/* XXX */
+	struct vattr va;
+	int ret, error = EOPNOTSUPP;
+	u_quad_t size;
+	
+	ret = NFSVOPLOCK(vp, LK_SHARED);
+	if (ret != 0)
+		return (EBADF);
+	if (NFS_ISV4(vp) && (ap->a_flags & (F_POSIX | F_FLOCK)) != 0) {
+		if (vp->v_type != VREG) {
+			NFSVOPUNLOCK(vp, 0);
+			return (EINVAL);
+		}
+		if ((ap->a_flags & F_POSIX) != 0)
+			cred = p->p_ucred;
+		else
+			cred = td->td_ucred;
+		NFSVOPLOCK(vp, LK_UPGRADE | LK_RETRY);
+		if (vp->v_iflag & VI_DOOMED) {
+			NFSVOPUNLOCK(vp, 0);
+			return (EBADF);
+		}
+
+		/*
+		 * If this is unlocking a write locked region, flush and
+		 * commit them before unlocking. This is required by
+		 * RFC3530 Sec. 9.3.2.
+		 */
+		if (ap->a_op == F_UNLCK &&
+		    nfscl_checkwritelocked(vp, ap->a_fl, cred, td, ap->a_id,
+		    ap->a_flags))
+			(void) ncl_flush(vp, MNT_WAIT, td, 1, 0);
+
+		/*
+		 * Loop around doing the lock op, while a blocking lock
+		 * must wait for the lock op to succeed.
+		 */
+		do {
+			ret = nfsrpc_advlock(vp, np->n_size, ap->a_op,
+			    ap->a_fl, 0, cred, td, ap->a_id, ap->a_flags);
+			if (ret == NFSERR_DENIED && (ap->a_flags & F_WAIT) &&
+			    ap->a_op == F_SETLK) {
+				NFSVOPUNLOCK(vp, 0);
+				error = nfs_catnap(PZERO | PCATCH, ret,
+				    "ncladvl");
+				if (error)
+					return (EINTR);
+				NFSVOPLOCK(vp, LK_EXCLUSIVE | LK_RETRY);
+				if (vp->v_iflag & VI_DOOMED) {
+					NFSVOPUNLOCK(vp, 0);
+					return (EBADF);
+				}
+			}
+		} while (ret == NFSERR_DENIED && (ap->a_flags & F_WAIT) &&
+		     ap->a_op == F_SETLK);
+		if (ret == NFSERR_DENIED) {
+			NFSVOPUNLOCK(vp, 0);
+			return (EAGAIN);
+		} else if (ret == EINVAL || ret == EBADF || ret == EINTR) {
+			NFSVOPUNLOCK(vp, 0);
+			return (ret);
+		} else if (ret != 0) {
+			NFSVOPUNLOCK(vp, 0);
+			return (EACCES);
+		}
+
+		/*
+		 * Now, if we just got a lock, invalidate data in the buffer
+		 * cache, as required, so that the coherency conforms with
+		 * RFC3530 Sec. 9.3.2.
+		 */
+		if (ap->a_op == F_SETLK) {
+			if ((np->n_flag & NMODIFIED) == 0) {
+				np->n_attrstamp = 0;
+				KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp);
+				ret = VOP_GETATTR(vp, &va, cred);
+			}
+			if ((np->n_flag & NMODIFIED) || ret ||
+			    np->n_change != va.va_filerev) {
+				(void) ncl_vinvalbuf(vp, V_SAVE, td, 1);
+				np->n_attrstamp = 0;
+				KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp);
+				ret = VOP_GETATTR(vp, &va, cred);
+				if (!ret) {
+					np->n_mtime = va.va_mtime;
+					np->n_change = va.va_filerev;
+				}
+			}
+			/* Mark that a file lock has been acquired. */
+			NFSLOCKNODE(np);
+			np->n_flag |= NHASBEENLOCKED;
+			NFSUNLOCKNODE(np);
+		}
+		NFSVOPUNLOCK(vp, 0);
+		return (0);
+	} else if (!NFS_ISV4(vp)) {
+		if ((VFSTONFS(vp->v_mount)->nm_flag & NFSMNT_NOLOCKD) != 0) {
+			size = VTONFS(vp)->n_size;
+			NFSVOPUNLOCK(vp, 0);
+			error = lf_advlock(ap, &(vp->v_lockf), size);
+		} else {
+			if (nfs_advlock_p != NULL)
+				error = nfs_advlock_p(ap);
+			else {
+				NFSVOPUNLOCK(vp, 0);
+				error = ENOLCK;
+			}
+		}
+		if (error == 0 && ap->a_op == F_SETLK) {
+			error = NFSVOPLOCK(vp, LK_SHARED);
+			if (error == 0) {
+				/* Mark that a file lock has been acquired. */
+				NFSLOCKNODE(np);
+				np->n_flag |= NHASBEENLOCKED;
+				NFSUNLOCKNODE(np);
+				NFSVOPUNLOCK(vp, 0);
+			}
+		}
+	} else
+		NFSVOPUNLOCK(vp, 0);
+	return (error);
+}
+
+/*
+ * NFS advisory byte-level locks.
+ */
+static int
+nfs_advlockasync(struct vop_advlockasync_args *ap)
+{
+	struct vnode *vp = ap->a_vp;
+	u_quad_t size;
+	int error;
+	
+	if (NFS_ISV4(vp))
+		return (EOPNOTSUPP);
+	error = NFSVOPLOCK(vp, LK_SHARED);
+	if (error)
+		return (error);
+	if ((VFSTONFS(vp->v_mount)->nm_flag & NFSMNT_NOLOCKD) != 0) {
+		size = VTONFS(vp)->n_size;
+		NFSVOPUNLOCK(vp, 0);
+		error = lf_advlockasync(ap, &(vp->v_lockf), size);
+	} else {
+		NFSVOPUNLOCK(vp, 0);
+		error = EOPNOTSUPP;
+	}
+	return (error);
+}
+
+/*
+ * Print out the contents of an nfsnode.
+ */
+static int
+nfs_print(struct vop_print_args *ap)
+{
+	struct vnode *vp = ap->a_vp;
+	struct nfsnode *np = VTONFS(vp);
+
+	printf("\tfileid %jd fsid 0x%jx", (uintmax_t)np->n_vattr.na_fileid,
+	    (uintmax_t)np->n_vattr.na_fsid);
+	if (vp->v_type == VFIFO)
+		fifo_printinfo(vp);
+	printf("\n");
+	return (0);
+}
+
+/*
+ * This is the "real" nfs::bwrite(struct buf*).
+ * We set B_CACHE if this is a VMIO buffer.
+ */
+int
+ncl_writebp(struct buf *bp, int force __unused, struct thread *td)
+{
+	int oldflags, rtval;
+
+	BUF_ASSERT_HELD(bp);
+
+	if (bp->b_flags & B_INVAL) {
+		brelse(bp);
+		return (0);
+	}
+
+	oldflags = bp->b_flags;
+	bp->b_flags |= B_CACHE;
+
+	/*
+	 * Undirty the bp.  We will redirty it later if the I/O fails.
+	 */
+	bundirty(bp);
+	bp->b_flags &= ~B_DONE;
+	bp->b_ioflags &= ~BIO_ERROR;
+	bp->b_iocmd = BIO_WRITE;
+
+	bufobj_wref(bp->b_bufobj);
+	curthread->td_ru.ru_oublock++;
+
+	/*
+	 * Note: to avoid loopback deadlocks, we do not
+	 * assign b_runningbufspace.
+	 */
+	vfs_busy_pages(bp, 1);
+
+	BUF_KERNPROC(bp);
+	bp->b_iooffset = dbtob(bp->b_blkno);
+	bstrategy(bp);
+
+	if ((oldflags & B_ASYNC) != 0)
+		return (0);
+
+	rtval = bufwait(bp);
+	if (oldflags & B_DELWRI)
+		reassignbuf(bp);
+	brelse(bp);
+	return (rtval);
+}
+
+/*
+ * nfs special file access vnode op.
+ * Essentially just get vattr and then imitate iaccess() since the device is
+ * local to the client.
+ */
+static int
+nfsspec_access(struct vop_access_args *ap)
+{
+	struct vattr *vap;
+	struct ucred *cred = ap->a_cred;
+	struct vnode *vp = ap->a_vp;
+	accmode_t accmode = ap->a_accmode;
+	struct vattr vattr;
+	int error;
+
+	/*
+	 * Disallow write attempts on filesystems mounted read-only;
+	 * unless the file is a socket, fifo, or a block or character
+	 * device resident on the filesystem.
+	 */
+	if ((accmode & VWRITE) && (vp->v_mount->mnt_flag & MNT_RDONLY)) {
+		switch (vp->v_type) {
+		case VREG:
+		case VDIR:
+		case VLNK:
+			return (EROFS);
+		default:
+			break;
+		}
+	}
+	vap = &vattr;
+	error = VOP_GETATTR(vp, vap, cred);
+	if (error)
+		goto out;
+	error  = vaccess(vp->v_type, vap->va_mode, vap->va_uid, vap->va_gid,
+	    accmode, cred, NULL);
+out:
+	return error;
+}
+
+/*
+ * Read wrapper for fifos.
+ */
+static int
+nfsfifo_read(struct vop_read_args *ap)
+{
+	struct nfsnode *np = VTONFS(ap->a_vp);
+	int error;
+
+	/*
+	 * Set access flag.
+	 */
+	NFSLOCKNODE(np);
+	np->n_flag |= NACC;
+	vfs_timestamp(&np->n_atim);
+	NFSUNLOCKNODE(np);
+	error = fifo_specops.vop_read(ap);
+	return error;	
+}
+
+/*
+ * Write wrapper for fifos.
+ */
+static int
+nfsfifo_write(struct vop_write_args *ap)
+{
+	struct nfsnode *np = VTONFS(ap->a_vp);
+
+	/*
+	 * Set update flag.
+	 */
+	NFSLOCKNODE(np);
+	np->n_flag |= NUPD;
+	vfs_timestamp(&np->n_mtim);
+	NFSUNLOCKNODE(np);
+	return(fifo_specops.vop_write(ap));
+}
+
+/*
+ * Close wrapper for fifos.
+ *
+ * Update the times on the nfsnode then do fifo close.
+ */
+static int
+nfsfifo_close(struct vop_close_args *ap)
+{
+	struct vnode *vp = ap->a_vp;
+	struct nfsnode *np = VTONFS(vp);
+	struct vattr vattr;
+	struct timespec ts;
+
+	NFSLOCKNODE(np);
+	if (np->n_flag & (NACC | NUPD)) {
+		vfs_timestamp(&ts);
+		if (np->n_flag & NACC)
+			np->n_atim = ts;
+		if (np->n_flag & NUPD)
+			np->n_mtim = ts;
+		np->n_flag |= NCHG;
+		if (vrefcnt(vp) == 1 &&
+		    (vp->v_mount->mnt_flag & MNT_RDONLY) == 0) {
+			VATTR_NULL(&vattr);
+			if (np->n_flag & NACC)
+				vattr.va_atime = np->n_atim;
+			if (np->n_flag & NUPD)
+				vattr.va_mtime = np->n_mtim;
+			NFSUNLOCKNODE(np);
+			(void)VOP_SETATTR(vp, &vattr, ap->a_cred);
+			goto out;
+		}
+	}
+	NFSUNLOCKNODE(np);
+out:
+	return (fifo_specops.vop_close(ap));
+}
+
+/*
+ * Just call ncl_writebp() with the force argument set to 1.
+ *
+ * NOTE: B_DONE may or may not be set in a_bp on call.
+ */
+static int
+nfs_bwrite(struct buf *bp)
+{
+
+	return (ncl_writebp(bp, 1, curthread));
+}
+
+struct buf_ops buf_ops_newnfs = {
+	.bop_name	=	"buf_ops_nfs",
+	.bop_write	=	nfs_bwrite,
+	.bop_strategy	=	bufstrategy,
+	.bop_sync	=	bufsync,
+	.bop_bdflush	=	bufbdflush,
+};
+
+static int
+nfs_getacl(struct vop_getacl_args *ap)
+{
+	int error;
+
+	if (ap->a_type != ACL_TYPE_NFS4)
+		return (EOPNOTSUPP);
+	error = nfsrpc_getacl(ap->a_vp, ap->a_cred, ap->a_td, ap->a_aclp,
+	    NULL);
+	if (error > NFSERR_STALE) {
+		(void) nfscl_maperr(ap->a_td, error, (uid_t)0, (gid_t)0);
+		error = EPERM;
+	}
+	return (error);
+}
+
+static int
+nfs_setacl(struct vop_setacl_args *ap)
+{
+	int error;
+
+	if (ap->a_type != ACL_TYPE_NFS4)
+		return (EOPNOTSUPP);
+	error = nfsrpc_setacl(ap->a_vp, ap->a_cred, ap->a_td, ap->a_aclp,
+	    NULL);
+	if (error > NFSERR_STALE) {
+		(void) nfscl_maperr(ap->a_td, error, (uid_t)0, (gid_t)0);
+		error = EPERM;
+	}
+	return (error);
+}
+
+/*
+ * Return POSIX pathconf information applicable to nfs filesystems.
+ */
+static int
+nfs_pathconf(struct vop_pathconf_args *ap)
+{
+	struct nfsv3_pathconf pc;
+	struct nfsvattr nfsva;
+	struct vnode *vp = ap->a_vp;
+	struct thread *td = curthread;
+	int attrflag, error;
+
+	if ((NFS_ISV34(vp) && (ap->a_name == _PC_LINK_MAX ||
+	    ap->a_name == _PC_NAME_MAX || ap->a_name == _PC_CHOWN_RESTRICTED ||
+	    ap->a_name == _PC_NO_TRUNC)) ||
+	    (NFS_ISV4(vp) && ap->a_name == _PC_ACL_NFS4)) {
+		/*
+		 * Since only the above 4 a_names are returned by the NFSv3
+		 * Pathconf RPC, there is no point in doing it for others.
+		 * For NFSv4, the Pathconf RPC (actually a Getattr Op.) can
+		 * be used for _PC_NFS4_ACL as well.
+		 */
+		error = nfsrpc_pathconf(vp, &pc, td->td_ucred, td, &nfsva,
+		    &attrflag, NULL);
+		if (attrflag != 0)
+			(void) nfscl_loadattrcache(&vp, &nfsva, NULL, NULL, 0,
+			    1);
+		if (error != 0)
+			return (error);
+	} else {
+		/*
+		 * For NFSv2 (or NFSv3 when not one of the above 4 a_names),
+		 * just fake them.
+		 */
+		pc.pc_linkmax = NFS_LINK_MAX;
+		pc.pc_namemax = NFS_MAXNAMLEN;
+		pc.pc_notrunc = 1;
+		pc.pc_chownrestricted = 1;
+		pc.pc_caseinsensitive = 0;
+		pc.pc_casepreserving = 1;
+		error = 0;
+	}
+	switch (ap->a_name) {
+	case _PC_LINK_MAX:
+#ifdef _LP64
+		*ap->a_retval = pc.pc_linkmax;
+#else
+		*ap->a_retval = MIN(LONG_MAX, pc.pc_linkmax);
+#endif
+		break;
+	case _PC_NAME_MAX:
+		*ap->a_retval = pc.pc_namemax;
+		break;
+	case _PC_PIPE_BUF:
+		if (ap->a_vp->v_type == VDIR || ap->a_vp->v_type == VFIFO)
+			*ap->a_retval = PIPE_BUF;
+		else
+			error = EINVAL;
+		break;
+	case _PC_CHOWN_RESTRICTED:
+		*ap->a_retval = pc.pc_chownrestricted;
+		break;
+	case _PC_NO_TRUNC:
+		*ap->a_retval = pc.pc_notrunc;
+		break;
+	case _PC_ACL_NFS4:
+		if (NFS_ISV4(vp) && nfsrv_useacl != 0 && attrflag != 0 &&
+		    NFSISSET_ATTRBIT(&nfsva.na_suppattr, NFSATTRBIT_ACL))
+			*ap->a_retval = 1;
+		else
+			*ap->a_retval = 0;
+		break;
+	case _PC_ACL_PATH_MAX:
+		if (NFS_ISV4(vp))
+			*ap->a_retval = ACL_MAX_ENTRIES;
+		else
+			*ap->a_retval = 3;
+		break;
+	case _PC_PRIO_IO:
+		*ap->a_retval = 0;
+		break;
+	case _PC_SYNC_IO:
+		*ap->a_retval = 0;
+		break;
+	case _PC_ALLOC_SIZE_MIN:
+		*ap->a_retval = vp->v_mount->mnt_stat.f_bsize;
+		break;
+	case _PC_FILESIZEBITS:
+		if (NFS_ISV34(vp))
+			*ap->a_retval = 64;
+		else
+			*ap->a_retval = 32;
+		break;
+	case _PC_REC_INCR_XFER_SIZE:
+		*ap->a_retval = vp->v_mount->mnt_stat.f_iosize;
+		break;
+	case _PC_REC_MAX_XFER_SIZE:
+		*ap->a_retval = -1; /* means ``unlimited'' */
+		break;
+	case _PC_REC_MIN_XFER_SIZE:
+		*ap->a_retval = vp->v_mount->mnt_stat.f_iosize;
+		break;
+	case _PC_REC_XFER_ALIGN:
+		*ap->a_retval = PAGE_SIZE;
+		break;
+	case _PC_SYMLINK_MAX:
+		*ap->a_retval = NFS_MAXPATHLEN;
+		break;
+
+	default:
+		error = vop_stdpathconf(ap);
+		break;
+	}
+	return (error);
+}
+
diff --git a/freebsd/sys/fs/nfsclient/nfs_kdtrace.h b/freebsd/sys/fs/nfsclient/nfs_kdtrace.h
new file mode 100644
index 0000000..650f063
--- /dev/null
+++ b/freebsd/sys/fs/nfsclient/nfs_kdtrace.h
@@ -0,0 +1,122 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 2009 Robert N. M. Watson
+ * All rights reserved.
+ *
+ * This software was developed at the University of Cambridge Computer
+ * Laboratory with support from a grant from Google, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _NFSCL_NFS_KDTRACE_H_
+#define	_NFSCL_NFS_KDTRACE_H_
+
+/*
+ * Definitions for NFS access cache probes.
+ */
+extern uint32_t	nfscl_accesscache_flush_done_id;
+extern uint32_t	nfscl_accesscache_get_hit_id;
+extern uint32_t	nfscl_accesscache_get_miss_id;
+extern uint32_t	nfscl_accesscache_load_done_id;
+
+/*
+ * Definitions for NFS attribute cache probes.
+ */
+extern uint32_t	nfscl_attrcache_flush_done_id;
+extern uint32_t	nfscl_attrcache_get_hit_id;
+extern uint32_t	nfscl_attrcache_get_miss_id;
+extern uint32_t	nfscl_attrcache_load_done_id;
+
+#ifdef KDTRACE_HOOKS
+#include <sys/dtrace_bsd.h>
+
+#define	KDTRACE_NFS_ACCESSCACHE_FLUSH_DONE(vp)	do {			\
+	if (dtrace_nfscl_accesscache_flush_done_probe != NULL)		\
+		(dtrace_nfscl_accesscache_flush_done_probe)(		\
+		    nfscl_accesscache_flush_done_id, (vp));		\
+} while (0)
+
+#define	KDTRACE_NFS_ACCESSCACHE_GET_HIT(vp, uid, mode)	do {		\
+	if (dtrace_nfscl_accesscache_get_hit_probe != NULL)		\
+		(dtrace_nfscl_accesscache_get_hit_probe)(		\
+		    nfscl_accesscache_get_hit_id, (vp), (uid),		\
+		    (mode));						\
+} while (0)
+	
+#define	KDTRACE_NFS_ACCESSCACHE_GET_MISS(vp, uid, mode)	do {		\
+	if (dtrace_nfscl_accesscache_get_miss_probe != NULL)		\
+		(dtrace_nfscl_accesscache_get_miss_probe)(		\
+		    nfscl_accesscache_get_miss_id, (vp), (uid),		\
+		    (mode));						\
+} while (0)
+
+#define	KDTRACE_NFS_ACCESSCACHE_LOAD_DONE(vp, uid, rmode, error) do {	\
+	if (dtrace_nfscl_accesscache_load_done_probe != NULL)		\
+		(dtrace_nfscl_accesscache_load_done_probe)(		\
+		    nfscl_accesscache_load_done_id, (vp), (uid),	\
+		    (rmode), (error));					\
+} while (0)
+
+#define	KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp)	do {			\
+	if (dtrace_nfscl_attrcache_flush_done_probe != NULL)		\
+		(dtrace_nfscl_attrcache_flush_done_probe)(		\
+		    nfscl_attrcache_flush_done_id, (vp));		\
+} while (0)
+
+#define	KDTRACE_NFS_ATTRCACHE_GET_HIT(vp, vap)	do {			\
+	if (dtrace_nfscl_attrcache_get_hit_probe != NULL)		\
+		(dtrace_nfscl_attrcache_get_hit_probe)(			\
+		    nfscl_attrcache_get_hit_id, (vp), (vap));		\
+} while (0)
+
+#define	KDTRACE_NFS_ATTRCACHE_GET_MISS(vp)	do {			\
+	if (dtrace_nfscl_attrcache_get_miss_probe != NULL)		\
+		(dtrace_nfscl_attrcache_get_miss_probe)(		\
+			    nfscl_attrcache_get_miss_id, (vp));		\
+} while (0)
+
+#define	KDTRACE_NFS_ATTRCACHE_LOAD_DONE(vp, vap, error)	do {		\
+	if (dtrace_nfscl_attrcache_load_done_probe != NULL)		\
+		(dtrace_nfscl_attrcache_load_done_probe)(		\
+		    nfscl_attrcache_load_done_id, (vp), (vap),		\
+		    (error));						\
+} while (0)
+
+#else /* !KDTRACE_HOOKS */
+
+#define	KDTRACE_NFS_ACCESSCACHE_FLUSH_DONE(vp)
+#define	KDTRACE_NFS_ACCESSCACHE_GET_HIT(vp, uid, mode)
+#define	KDTRACE_NFS_ACCESSCACHE_GET_MISS(vp, uid, mode)
+#define	KDTRACE_NFS_ACCESSCACHE_LOAD_DONE(vp, uid, rmode, error)
+
+#define	KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp)
+#define	KDTRACE_NFS_ATTRCACHE_GET_HIT(vp, vap)
+#define	KDTRACE_NFS_ATTRCACHE_GET_MISS(vp)
+#define	KDTRACE_NFS_ATTRCACHE_LOAD_DONE(vp, vap, error)
+
+#endif /* KDTRACE_HOOKS */
+
+#endif /* !_NFSCL_NFS_KDTRACE_H_ */
diff --git a/freebsd/sys/fs/nfsclient/nfsmount.h b/freebsd/sys/fs/nfsclient/nfsmount.h
new file mode 100644
index 0000000..649e59e
--- /dev/null
+++ b/freebsd/sys/fs/nfsclient/nfsmount.h
@@ -0,0 +1,129 @@
+/*-
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 1989, 1993
+ *	The Regents of the University of California.  All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Rick Macklem at The University of Guelph.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _NFSCLIENT_NFSMOUNT_H_
+#define	_NFSCLIENT_NFSMOUNT_H_
+
+#include <nfs/nfs_mountcommon.h>
+
+/*
+ * Mount structure.
+ * One allocated on every NFS mount.
+ * Holds NFS specific information for mount.
+ */
+struct	nfsmount {
+	struct	nfsmount_common nm_com;	/* Common fields for nlm */
+	uint32_t nm_privflag;		/* Private flags */
+	int	nm_numgrps;		/* Max. size of groupslist */
+	u_char	nm_fh[NFSX_FHMAX];	/* File handle of root dir */
+	int	nm_fhsize;		/* Size of root file handle */
+	struct	nfssockreq nm_sockreq;	/* Socket Info */
+	int	nm_timeouts;		/* Request timeouts */
+	int	nm_rsize;		/* Max size of read rpc */
+	int	nm_wsize;		/* Max size of write rpc */
+	int	nm_readdirsize;		/* Size of a readdir rpc */
+	int	nm_readahead;		/* Num. of blocks to readahead */
+	int	nm_wcommitsize;		/* Max size of commit for write */
+	int	nm_acdirmin;		/* Directory attr cache min lifetime */
+	int	nm_acdirmax;		/* Directory attr cache max lifetime */
+	int	nm_acregmin;		/* Reg file attr cache min lifetime */
+	int	nm_acregmax;		/* Reg file attr cache max lifetime */
+	u_char	nm_verf[NFSX_VERF];	/* write verifier */
+	TAILQ_HEAD(, buf) nm_bufq;	/* async io buffer queue */
+	short	nm_bufqlen;		/* number of buffers in queue */
+	short	nm_bufqwant;		/* process wants to add to the queue */
+	int	nm_bufqiods;		/* number of iods processing queue */
+	u_int64_t nm_maxfilesize;	/* maximum file size */
+	int	nm_tprintf_initial_delay; /* initial delay */
+	int	nm_tprintf_delay;	/* interval for messages */
+	int	nm_nametimeo;		/* timeout for +ve entries (sec) */
+	int	nm_negnametimeo;	/* timeout for -ve entries (sec) */
+
+	/* Newnfs additions */
+	TAILQ_HEAD(, nfsclds) nm_sess;	/* Session(s) for NFSv4.1. */
+	struct	nfsclclient *nm_clp;
+	uid_t	nm_uid;			/* Uid for SetClientID etc. */
+	u_int64_t nm_clval;		/* identifies which clientid */
+	u_int64_t nm_fsid[2];		/* NFSv4 fsid */
+	int	nm_minorvers;		/* Minor version # for NFSv4 */
+	u_int16_t nm_krbnamelen;	/* Krb5 host principal, if any */
+	u_int16_t nm_dirpathlen;	/* and mount dirpath, for V4 */
+	u_int16_t nm_srvkrbnamelen;	/* and the server's target name */
+	u_char	nm_name[1];	/* malloc'd actual len of krbname + dirpath */
+};
+
+#define	nm_nam		nm_sockreq.nr_nam
+#define	nm_sotype	nm_sockreq.nr_sotype
+#define	nm_so		nm_sockreq.nr_so
+#define	nm_soflags	nm_sockreq.nr_soflags
+#define	nm_soproto	nm_sockreq.nr_soproto
+#define	nm_client	nm_sockreq.nr_client
+#define	nm_krbname	nm_name
+#define	nm_mtx		nm_com.nmcom_mtx
+#define	nm_flag		nm_com.nmcom_flag
+#define	nm_state	nm_com.nmcom_state
+#define	nm_mountp	nm_com.nmcom_mountp
+#define	nm_timeo	nm_com.nmcom_timeo
+#define	nm_retry	nm_com.nmcom_retry
+#define	nm_hostname	nm_com.nmcom_hostname
+#define	nm_getinfo	nm_com.nmcom_getinfo
+#define	nm_vinvalbuf	nm_com.nmcom_vinvalbuf
+
+/* Private flags. */
+#define	NFSMNTP_FORCEDISM	0x00000001
+#define	NFSMNTP_CANCELRPCS	0x00000002
+
+#define	NFSMNT_DIRPATH(m)	(&((m)->nm_name[(m)->nm_krbnamelen + 1]))
+#define	NFSMNT_SRVKRBNAME(m)						\
+	(&((m)->nm_name[(m)->nm_krbnamelen + (m)->nm_dirpathlen + 2]))
+
+#if defined(_KERNEL)
+/*
+ * Convert mount ptr to nfsmount ptr.
+ */
+#define	VFSTONFS(mp)	((struct nfsmount *)((mp)->mnt_data))
+
+#ifndef NFS_DEFAULT_NAMETIMEO
+#define NFS_DEFAULT_NAMETIMEO		60
+#endif
+
+#ifndef NFS_DEFAULT_NEGNAMETIMEO
+#define NFS_DEFAULT_NEGNAMETIMEO	60
+#endif
+
+#endif	/* _KERNEL */
+
+#endif	/* _NFSCLIENT_NFSMOUNT_H_ */
diff --git a/freebsd/sys/fs/nfsclient/nfsnode.h b/freebsd/sys/fs/nfsclient/nfsnode.h
new file mode 100644
index 0000000..66a2de3
--- /dev/null
+++ b/freebsd/sys/fs/nfsclient/nfsnode.h
@@ -0,0 +1,199 @@
+/*-
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 1989, 1993
+ *	The Regents of the University of California.  All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Rick Macklem at The University of Guelph.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _NFSCLIENT_NFSNODE_H_
+#define	_NFSCLIENT_NFSNODE_H_
+
+#include <sys/_task.h>
+
+/*
+ * Silly rename structure that hangs off the nfsnode until the name
+ * can be removed by nfs_inactive()
+ */
+struct sillyrename {
+	struct	task s_task;
+	struct	ucred *s_cred;
+	struct	vnode *s_dvp;
+	long	s_namlen;
+	char	s_name[32];
+};
+
+/*
+ * This structure is used to save the logical directory offset to
+ * NFS cookie mappings.
+ * The mappings are stored in a list headed
+ * by n_cookies, as required.
+ * There is one mapping for each NFS_DIRBLKSIZ bytes of directory information
+ * stored in increasing logical offset byte order.
+ */
+#define	NFSNUMCOOKIES		31
+
+struct nfsdmap {
+	LIST_ENTRY(nfsdmap)	ndm_list;
+	int			ndm_eocookie;
+	union {
+		nfsuint64	ndmu3_cookies[NFSNUMCOOKIES];
+		uint64_t	ndmu4_cookies[NFSNUMCOOKIES];
+	} ndm_un1;
+};
+
+#define	ndm_cookies	ndm_un1.ndmu3_cookies
+#define	ndm4_cookies	ndm_un1.ndmu4_cookies
+
+struct nfs_accesscache {
+	u_int32_t		mode;	/* ACCESS mode cache */
+	uid_t			uid;	/* credentials having mode */
+	time_t			stamp;	/* mode cache timestamp */
+};
+
+/*
+ * The nfsnode is the nfs equivalent to ufs's inode. Any similarity
+ * is purely coincidental.
+ * There is a unique nfsnode allocated for each active file,
+ * each current directory, each mounted-on file, text file, and the root.
+ * An nfsnode is 'named' by its file handle. (nget/nfs_node.c)
+ * If this structure exceeds 256 bytes (it is currently 256 using 4.4BSD-Lite
+ * type definitions), file handles of > 32 bytes should probably be split out
+ * into a separate malloc()'d data structure. (Reduce the size of nfsfh_t by
+ * changing the definition in nfsproto.h of NFS_SMALLFH.)
+ * NB: Hopefully the current order of the fields is such that everything will
+ *     be well aligned and, therefore, tightly packed.
+ */
+struct nfsnode {
+	struct mtx 		n_mtx;		/* Protects all of these members */
+	struct lock		n_excl;		/* Exclusive helper for shared
+						   vnode lock */
+	u_quad_t		n_size;		/* Current size of file */
+	u_quad_t		n_brev;		/* Modify rev when cached */
+	u_quad_t		n_lrev;		/* Modify rev for lease */
+	struct nfsvattr		n_vattr;	/* Vnode attribute cache */
+	time_t			n_attrstamp;	/* Attr. cache timestamp */
+	struct nfs_accesscache	n_accesscache[NFS_ACCESSCACHESIZE];
+	struct timespec		n_mtime;	/* Prev modify time. */
+	struct nfsfh		*n_fhp;		/* NFS File Handle */
+	struct vnode		*n_vnode;	/* associated vnode */
+	struct vnode		*n_dvp;		/* parent vnode */
+	struct lockf		*n_lockf;	/* Locking record of file */
+	int			n_error;	/* Save write error value */
+	union {
+		struct timespec	nf_atim;	/* Special file times */
+		nfsuint64	nd_cookieverf;	/* Cookie verifier (dir only) */
+		u_char		nd4_cookieverf[NFSX_VERF];
+	} n_un1;
+	union {
+		struct timespec	nf_mtim;
+		off_t		nd_direof;	/* Dir. EOF offset cache */
+	} n_un2;
+	union {
+		struct sillyrename *nf_silly;	/* Ptr to silly rename struct */
+		LIST_HEAD(, nfsdmap) nd_cook;	/* cookies */
+	} n_un3;
+	short			n_fhsize;	/* size in bytes, of fh */
+	u_int32_t		n_flag;		/* Flag for locking.. */
+	int			n_directio_opens;
+	int                     n_directio_asyncwr;
+	u_int64_t		 n_change;	/* old Change attribute */
+	struct nfsv4node	*n_v4;		/* extra V4 stuff */
+	struct ucred		*n_writecred;	/* Cred. for putpages */
+};
+
+#define	n_atim		n_un1.nf_atim
+#define	n_mtim		n_un2.nf_mtim
+#define	n_sillyrename	n_un3.nf_silly
+#define	n_cookieverf	n_un1.nd_cookieverf
+#define	n4_cookieverf	n_un1.nd4_cookieverf
+#define	n_direofoffset	n_un2.nd_direof
+#define	n_cookies	n_un3.nd_cook
+
+/*
+ * Flags for n_flag
+ */
+#define	NDIRCOOKIELK	0x00000001  /* Lock to serialize access to directory cookies */
+#define	NFSYNCWAIT      0x00000002  /* fsync waiting for all directio async
+				  writes to drain */
+#define	NMODIFIED	0x00000004  /* Might have a modified buffer in bio */
+#define	NWRITEERR	0x00000008  /* Flag write errors so close will know */
+#define	NCREATED	0x00000010  /* Opened by nfs_create() */
+#define	NTRUNCATE	0x00000020  /* Opened by nfs_setattr() */
+#define	NSIZECHANGED	0x00000040  /* File size has changed: need cache inval */
+#define	NNONCACHE	0x00000080  /* Node marked as noncacheable */
+#define	NACC		0x00000100  /* Special file accessed */
+#define	NUPD		0x00000200  /* Special file updated */
+#define	NCHG		0x00000400  /* Special file times changed */
+#define	NDELEGMOD	0x00000800  /* Modified delegation */
+#define	NDELEGRECALL	0x00001000  /* Recall in progress */
+#define	NREMOVEINPROG	0x00002000  /* Remove in progress */
+#define	NREMOVEWANT	0x00004000  /* Want notification that remove is done */
+#define	NLOCK		0x00008000  /* Sleep lock the node */
+#define	NLOCKWANT	0x00010000  /* Want the sleep lock */
+#define	NNOLAYOUT	0x00020000  /* Can't get a layout for this file */
+#define	NWRITEOPENED	0x00040000  /* Has been opened for writing */
+#define	NHASBEENLOCKED	0x00080000  /* Has been file locked. */
+#define	NDSCOMMIT	0x00100000  /* Commit is done via the DS. */
+#define	NVNSETSZSKIP	0x00200000  /* Skipped vnode_pager_setsize() */
+
+/*
+ * Convert between nfsnode pointers and vnode pointers
+ */
+#define	VTONFS(vp)	((struct nfsnode *)(vp)->v_data)
+#define	NFSTOV(np)	((struct vnode *)(np)->n_vnode)
+
+#define	NFS_TIMESPEC_COMPARE(T1, T2)	(((T1)->tv_sec != (T2)->tv_sec) || ((T1)->tv_nsec != (T2)->tv_nsec))
+
+#if defined(_KERNEL)
+
+/*
+ * Prototypes for NFS vnode operations
+ */
+int	ncl_getpages(struct vop_getpages_args *);
+int	ncl_putpages(struct vop_putpages_args *);
+int	ncl_write(struct vop_write_args *);
+int	ncl_inactive(struct vop_inactive_args *);
+int	ncl_reclaim(struct vop_reclaim_args *);
+
+/* other stuff */
+int	ncl_removeit(struct sillyrename *, struct vnode *);
+int	ncl_nget(struct mount *, u_int8_t *, int, struct nfsnode **, int);
+nfsuint64 *ncl_getcookie(struct nfsnode *, off_t, int);
+void	ncl_invaldir(struct vnode *);
+bool	ncl_excl_start(struct vnode *);
+void	ncl_excl_finish(struct vnode *, bool old_lock);
+void	ncl_dircookie_lock(struct nfsnode *);
+void	ncl_dircookie_unlock(struct nfsnode *);
+
+#endif /* _KERNEL */
+
+#endif	/* _NFSCLIENT_NFSNODE_H_ */
diff --git a/freebsd/sys/fs/nfsclient/nlminfo.h b/freebsd/sys/fs/nfsclient/nlminfo.h
new file mode 100644
index 0000000..872e20e
--- /dev/null
+++ b/freebsd/sys/fs/nfsclient/nlminfo.h
@@ -0,0 +1,43 @@
+/*-
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 1998 Berkeley Software Design, Inc. All rights reserved.
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Berkeley Software Design Inc's name may not be used to endorse or
+ *    promote products derived from this software without specific prior
+ *    written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+/*
+ * Misc NLM informationi, some needed for the master lockd process, and some
+ * needed by every process doing nlm based locking.
+ */
+struct  nlminfo {
+	/* these are used by any process doing nlm locking */
+        int             msg_seq;        /* sequence counter for lock requests */
+        int             retcode;        /* return code for lock requests */
+	int		set_getlk_pid;
+	int		getlk_pid;
+        struct  timeval pid_start;      /* process starting time */
+};
diff --git a/freebsd/sys/nfs/bootp_subr.c b/freebsd/sys/nfs/bootp_subr.c
new file mode 100644
index 0000000..07418af
--- /dev/null
+++ b/freebsd/sys/nfs/bootp_subr.c
@@ -0,0 +1,1904 @@
+/*-
+ * SPDX-License-Identifier: BSD-4-Clause
+ *
+ * Copyright (c) 1995 Gordon Ross, Adam Glass
+ * Copyright (c) 1992 Regents of the University of California.
+ * All rights reserved.
+ *
+ * This software was developed by the Computer Systems Engineering group
+ * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
+ * contributed to Berkeley.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ *    must display the following acknowledgement:
+ *	This product includes software developed by the University of
+ *	California, Lawrence Berkeley Laboratory and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * based on:
+ *      nfs/krpc_subr.c
+ *	$NetBSD: krpc_subr.c,v 1.10 1995/08/08 20:43:43 gwr Exp $
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include "opt_bootp.h"
+#include "opt_nfs.h"
+#include "opt_rootdevname.h"
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/endian.h>
+#include <sys/jail.h>
+#include <sys/kernel.h>
+#include <sys/sockio.h>
+#include <sys/malloc.h>
+#include <sys/mount.h>
+#include <sys/mbuf.h>
+#include <sys/proc.h>
+#include <sys/reboot.h>
+#include <sys/socket.h>
+#include <sys/socketvar.h>
+#include <sys/sysctl.h>
+#include <sys/uio.h>
+
+#include <net/if.h>
+#include <net/if_var.h>
+#include <net/route.h>
+#ifdef BOOTP_DEBUG
+#include <net/route_var.h>
+#endif
+
+#include <netinet/in.h>
+#include <netinet/in_var.h>
+#include <net/if_types.h>
+#include <net/if_dl.h>
+#include <net/vnet.h>
+
+#include <nfs/nfsproto.h>
+#include <nfsclient/nfs.h>
+#include <nfs/nfsdiskless.h>
+#include <nfs/krpc.h>
+#include <nfs/xdr_subs.h>
+
+#define BOOTP_MIN_LEN		300	/* Minimum size of bootp udp packet */
+
+#ifndef BOOTP_SETTLE_DELAY
+#define BOOTP_SETTLE_DELAY 3
+#endif
+
+/* 
+ * Wait 10 seconds for interface appearance
+ * USB ethernet adapters might require some time to pop up
+ */
+#ifndef	BOOTP_IFACE_WAIT_TIMEOUT
+#define	BOOTP_IFACE_WAIT_TIMEOUT	10
+#endif
+
+/*
+ * What is the longest we will wait before re-sending a request?
+ * Note this is also the frequency of "RPC timeout" messages.
+ * The re-send loop count sup linearly to this maximum, so the
+ * first complaint will happen after (1+2+3+4+5)=15 seconds.
+ */
+#define	MAX_RESEND_DELAY 5	/* seconds */
+
+/* Definitions from RFC951 */
+struct bootp_packet {
+	u_int8_t op;
+	u_int8_t htype;
+	u_int8_t hlen;
+	u_int8_t hops;
+	u_int32_t xid;
+	u_int16_t secs;
+	u_int16_t flags;
+	struct in_addr ciaddr;
+	struct in_addr yiaddr;
+	struct in_addr siaddr;
+	struct in_addr giaddr;
+	unsigned char chaddr[16];
+	char sname[64];
+	char file[128];
+	unsigned char vend[1222];
+};
+
+struct bootpc_ifcontext {
+	STAILQ_ENTRY(bootpc_ifcontext) next;
+	struct bootp_packet call;
+	struct bootp_packet reply;
+	int replylen;
+	int overload;
+	union {
+		struct ifreq _ifreq;
+		struct in_aliasreq _in_alias_req;
+	} _req;
+#define	ireq	_req._ifreq
+#define	iareq	_req._in_alias_req
+	struct ifnet *ifp;
+	struct sockaddr_dl *sdl;
+	struct sockaddr_in myaddr;
+	struct sockaddr_in netmask;
+	struct sockaddr_in gw;
+	int gotgw;
+	int gotnetmask;
+	int gotrootpath;
+	int outstanding;
+	int sentmsg;
+	u_int32_t xid;
+	enum {
+		IF_BOOTP_UNRESOLVED,
+		IF_BOOTP_RESOLVED,
+		IF_BOOTP_FAILED,
+		IF_DHCP_UNRESOLVED,
+		IF_DHCP_OFFERED,
+		IF_DHCP_RESOLVED,
+		IF_DHCP_FAILED,
+	} state;
+	int dhcpquerytype;		/* dhcp type sent */
+	struct in_addr dhcpserver;
+	int gotdhcpserver;
+	uint16_t mtu;
+};
+
+#define TAG_MAXLEN 1024
+struct bootpc_tagcontext {
+	char buf[TAG_MAXLEN + 1];
+	int overload;
+	int badopt;
+	int badtag;
+	int foundopt;
+	int taglen;
+};
+
+struct bootpc_globalcontext {
+	STAILQ_HEAD(, bootpc_ifcontext) interfaces;
+	u_int32_t xid;
+	int any_root_overrides;
+	int gotrootpath;
+	int gotgw;
+	int ifnum;
+	int secs;
+	int starttime;
+	struct bootp_packet reply;
+	int replylen;
+	struct bootpc_ifcontext *setrootfs;
+	struct bootpc_ifcontext *sethostname;
+	struct bootpc_tagcontext tmptag;
+	struct bootpc_tagcontext tag;
+};
+
+#define IPPORT_BOOTPC 68
+#define IPPORT_BOOTPS 67
+
+#define BOOTP_REQUEST 1
+#define BOOTP_REPLY 2
+
+/* Common tags */
+#define TAG_PAD		  0  /* Pad option, implicit length 1 */
+#define TAG_SUBNETMASK	  1  /* RFC 950 subnet mask */
+#define TAG_ROUTERS	  3  /* Routers (in order of preference) */
+#define TAG_HOSTNAME	 12  /* Client host name */
+#define TAG_ROOT	 17  /* Root path */
+#define TAG_INTF_MTU	 26  /* Interface MTU Size (RFC2132) */
+
+/* DHCP specific tags */
+#define TAG_OVERLOAD	 52  /* Option Overload */
+#define TAG_MAXMSGSIZE   57  /* Maximum DHCP Message Size */
+
+#define TAG_END		255  /* End Option (i.e. no more options) */
+
+/* Overload values */
+#define OVERLOAD_FILE     1
+#define OVERLOAD_SNAME    2
+
+/* Site specific tags: */
+#define TAG_ROOTOPTS	130
+#define TAG_COOKIE	134	/* ascii info for userland, via sysctl */
+
+#define TAG_DHCP_MSGTYPE 53
+#define TAG_DHCP_REQ_ADDR 50
+#define TAG_DHCP_SERVERID 54
+#define TAG_DHCP_LEASETIME 51
+
+#define TAG_VENDOR_INDENTIFIER 60
+
+#define DHCP_NOMSG    0
+#define DHCP_DISCOVER 1
+#define DHCP_OFFER    2
+#define DHCP_REQUEST  3
+#define DHCP_ACK      5
+
+/* NFS read/write block size */
+#ifndef BOOTP_BLOCKSIZE
+#define	BOOTP_BLOCKSIZE	8192
+#endif
+
+static char bootp_cookie[128];
+static struct socket *bootp_so;
+SYSCTL_STRING(_kern, OID_AUTO, bootp_cookie, CTLFLAG_RD,
+	bootp_cookie, 0, "Cookie (T134) supplied by bootp server");
+
+/* mountd RPC */
+static int	md_mount(struct sockaddr_in *mdsin, char *path, u_char *fhp,
+		    int *fhsizep, struct nfs_args *args, struct thread *td);
+static int	setfs(struct sockaddr_in *addr, char *path, char *p,
+		    const struct in_addr *siaddr);
+static int	getdec(char **ptr);
+static int	getip(char **ptr, struct in_addr *ip);
+static void	mountopts(struct nfs_args *args, char *p);
+static int	xdr_opaque_decode(struct mbuf **ptr, u_char *buf, int len);
+static int	xdr_int_decode(struct mbuf **ptr, int *iptr);
+static void	print_in_addr(struct in_addr addr);
+static void	print_sin_addr(struct sockaddr_in *addr);
+static void	clear_sinaddr(struct sockaddr_in *sin);
+static void	allocifctx(struct bootpc_globalcontext *gctx);
+static void	bootpc_compose_query(struct bootpc_ifcontext *ifctx,
+		    struct thread *td);
+static unsigned char *bootpc_tag(struct bootpc_tagcontext *tctx,
+		    struct bootp_packet *bp, int len, int tag);
+static void bootpc_tag_helper(struct bootpc_tagcontext *tctx,
+		    unsigned char *start, int len, int tag);
+
+#ifdef BOOTP_DEBUG
+void bootpboot_p_sa(struct sockaddr *sa, struct sockaddr *ma);
+void bootpboot_p_rtentry(struct rtentry *rt);
+void bootpboot_p_tree(struct radix_node *rn);
+void bootpboot_p_rtlist(void);
+void bootpboot_p_if(struct ifnet *ifp, struct ifaddr *ifa);
+void bootpboot_p_iflist(void);
+#endif
+
+static int	bootpc_call(struct bootpc_globalcontext *gctx,
+		    struct thread *td);
+
+static void	bootpc_fakeup_interface(struct bootpc_ifcontext *ifctx,
+		    struct thread *td);
+
+static void	bootpc_adjust_interface(struct bootpc_ifcontext *ifctx,
+		    struct bootpc_globalcontext *gctx, struct thread *td);
+
+static void	bootpc_decode_reply(struct nfsv3_diskless *nd,
+		    struct bootpc_ifcontext *ifctx,
+		    struct bootpc_globalcontext *gctx);
+
+static int	bootpc_received(struct bootpc_globalcontext *gctx,
+		    struct bootpc_ifcontext *ifctx);
+
+static __inline int bootpc_ifctx_isresolved(struct bootpc_ifcontext *ifctx);
+static __inline int bootpc_ifctx_isunresolved(struct bootpc_ifcontext *ifctx);
+static __inline int bootpc_ifctx_isfailed(struct bootpc_ifcontext *ifctx);
+
+/*
+ * In order to have multiple active interfaces with address 0.0.0.0
+ * and be able to send data to a selected interface, we first set
+ * mask to /8 on all interfaces, and temporarily set it to /0 when
+ * doing sosend().
+ */
+
+#ifdef BOOTP_DEBUG
+void
+bootpboot_p_sa(struct sockaddr *sa, struct sockaddr *ma)
+{
+
+	if (sa == NULL) {
+		printf("(sockaddr *) <null>");
+		return;
+	}
+	switch (sa->sa_family) {
+	case AF_INET:
+	{
+		struct sockaddr_in *sin;
+
+		sin = (struct sockaddr_in *) sa;
+		printf("inet ");
+		print_sin_addr(sin);
+		if (ma != NULL) {
+			sin = (struct sockaddr_in *) ma;
+			printf(" mask ");
+			print_sin_addr(sin);
+		}
+	}
+	break;
+	case AF_LINK:
+	{
+		struct sockaddr_dl *sli;
+		int i;
+
+		sli = (struct sockaddr_dl *) sa;
+		printf("link %.*s ", sli->sdl_nlen, sli->sdl_data);
+		for (i = 0; i < sli->sdl_alen; i++) {
+			if (i > 0)
+				printf(":");
+			printf("%x", ((unsigned char *) LLADDR(sli))[i]);
+		}
+	}
+	break;
+	default:
+		printf("af%d", sa->sa_family);
+	}
+}
+
+void
+bootpboot_p_rtentry(struct rtentry *rt)
+{
+
+	bootpboot_p_sa(rt_key(rt), rt_mask(rt));
+	printf(" ");
+	bootpboot_p_sa(rt->rt_gateway, NULL);
+	printf(" ");
+	printf("flags %x", (unsigned short) rt->rt_flags);
+	printf(" %d", (int) rt->rt_expire);
+	printf(" %s\n", rt->rt_ifp->if_xname);
+}
+
+void
+bootpboot_p_tree(struct radix_node *rn)
+{
+
+	while (rn != NULL) {
+		if (rn->rn_bit < 0) {
+			if ((rn->rn_flags & RNF_ROOT) != 0) {
+			} else {
+				bootpboot_p_rtentry((struct rtentry *) rn);
+			}
+			rn = rn->rn_dupedkey;
+		} else {
+			bootpboot_p_tree(rn->rn_left);
+			bootpboot_p_tree(rn->rn_right);
+			return;
+		}
+	}
+}
+
+void
+bootpboot_p_rtlist(void)
+{
+	RIB_RLOCK_TRACKER;
+	struct rib_head *rnh;
+
+	printf("Routing table:\n");
+	rnh = rt_tables_get_rnh(0, AF_INET);
+	if (rnh == NULL)
+		return;
+	RIB_RLOCK(rnh);	/* could sleep XXX */
+	bootpboot_p_tree(rnh->rnh_treetop);
+	RIB_RUNLOCK(rnh);
+}
+
+void
+bootpboot_p_if(struct ifnet *ifp, struct ifaddr *ifa)
+{
+
+	printf("%s flags %x, addr ",
+	       ifp->if_xname, ifp->if_flags);
+	print_sin_addr((struct sockaddr_in *) ifa->ifa_addr);
+	printf(", broadcast ");
+	print_sin_addr((struct sockaddr_in *) ifa->ifa_dstaddr);
+	printf(", netmask ");
+	print_sin_addr((struct sockaddr_in *) ifa->ifa_netmask);
+	printf("\n");
+}
+
+void
+bootpboot_p_iflist(void)
+{
+	struct ifnet *ifp;
+	struct ifaddr *ifa;
+
+	printf("Interface list:\n");
+	IFNET_RLOCK();
+	for (ifp = CK_STAILQ_FIRST(&V_ifnet);
+	     ifp != NULL;
+	     ifp = CK_STAILQ_NEXT(ifp, if_link)) {
+		for (ifa = CK_STAILQ_FIRST(&ifp->if_addrhead);
+		     ifa != NULL;
+		     ifa = CK_STAILQ_NEXT(ifa, ifa_link))
+			if (ifa->ifa_addr->sa_family == AF_INET)
+				bootpboot_p_if(ifp, ifa);
+	}
+	IFNET_RUNLOCK();
+}
+#endif /* defined(BOOTP_DEBUG) */
+
+static void
+clear_sinaddr(struct sockaddr_in *sin)
+{
+
+	bzero(sin, sizeof(*sin));
+	sin->sin_len = sizeof(*sin);
+	sin->sin_family = AF_INET;
+	sin->sin_addr.s_addr = INADDR_ANY; /* XXX: htonl(INAADDR_ANY) ? */
+	sin->sin_port = 0;
+}
+
+static void
+allocifctx(struct bootpc_globalcontext *gctx)
+{
+	struct bootpc_ifcontext *ifctx;
+
+	ifctx = malloc(sizeof(*ifctx), M_TEMP, M_WAITOK | M_ZERO);
+	ifctx->xid = gctx->xid;
+#ifdef BOOTP_NO_DHCP
+	ifctx->state = IF_BOOTP_UNRESOLVED;
+#else
+	ifctx->state = IF_DHCP_UNRESOLVED;
+#endif
+	gctx->xid += 0x100;
+	STAILQ_INSERT_TAIL(&gctx->interfaces, ifctx, next);
+}
+
+static __inline int
+bootpc_ifctx_isresolved(struct bootpc_ifcontext *ifctx)
+{
+
+	if (ifctx->state == IF_BOOTP_RESOLVED ||
+	    ifctx->state == IF_DHCP_RESOLVED)
+		return 1;
+	return 0;
+}
+
+static __inline int
+bootpc_ifctx_isunresolved(struct bootpc_ifcontext *ifctx)
+{
+
+	if (ifctx->state == IF_BOOTP_UNRESOLVED ||
+	    ifctx->state == IF_DHCP_UNRESOLVED)
+		return 1;
+	return 0;
+}
+
+static __inline int
+bootpc_ifctx_isfailed(struct bootpc_ifcontext *ifctx)
+{
+
+	if (ifctx->state == IF_BOOTP_FAILED ||
+	    ifctx->state == IF_DHCP_FAILED)
+		return 1;
+	return 0;
+}
+
+static int
+bootpc_received(struct bootpc_globalcontext *gctx,
+    struct bootpc_ifcontext *ifctx)
+{
+	unsigned char dhcpreplytype;
+	char *p;
+
+	/*
+	 * Need timeout for fallback to less
+	 * desirable alternative.
+	 */
+
+	/* This call used for the side effect (badopt flag) */
+	(void) bootpc_tag(&gctx->tmptag, &gctx->reply,
+			  gctx->replylen,
+			  TAG_END);
+
+	/* If packet is invalid, ignore it */
+	if (gctx->tmptag.badopt != 0)
+		return 0;
+
+	p = bootpc_tag(&gctx->tmptag, &gctx->reply,
+		       gctx->replylen, TAG_DHCP_MSGTYPE);
+	if (p != NULL)
+		dhcpreplytype = *p;
+	else
+		dhcpreplytype = DHCP_NOMSG;
+
+	switch (ifctx->dhcpquerytype) {
+	case DHCP_DISCOVER:
+		if (dhcpreplytype != DHCP_OFFER 	/* Normal DHCP offer */
+#ifndef BOOTP_FORCE_DHCP
+		    && dhcpreplytype != DHCP_NOMSG	/* Fallback to BOOTP */
+#endif
+			)
+			return 0;
+		break;
+	case DHCP_REQUEST:
+		if (dhcpreplytype != DHCP_ACK)
+			return 0;
+	case DHCP_NOMSG:
+		break;
+	}
+
+	/* Ignore packet unless it gives us a root tag we didn't have */
+
+	if ((ifctx->state == IF_BOOTP_RESOLVED ||
+	     (ifctx->dhcpquerytype == DHCP_DISCOVER &&
+	      (ifctx->state == IF_DHCP_OFFERED ||
+	       ifctx->state == IF_DHCP_RESOLVED))) &&
+	    (bootpc_tag(&gctx->tmptag, &ifctx->reply,
+			ifctx->replylen,
+			TAG_ROOT) != NULL ||
+	     bootpc_tag(&gctx->tmptag, &gctx->reply,
+			gctx->replylen,
+			TAG_ROOT) == NULL))
+		return 0;
+
+	bcopy(&gctx->reply, &ifctx->reply, gctx->replylen);
+	ifctx->replylen = gctx->replylen;
+
+	/* XXX: Only reset if 'perfect' response */
+	if (ifctx->state == IF_BOOTP_UNRESOLVED)
+		ifctx->state = IF_BOOTP_RESOLVED;
+	else if (ifctx->state == IF_DHCP_UNRESOLVED &&
+		 ifctx->dhcpquerytype == DHCP_DISCOVER) {
+		if (dhcpreplytype == DHCP_OFFER)
+			ifctx->state = IF_DHCP_OFFERED;
+		else
+			ifctx->state = IF_BOOTP_RESOLVED;	/* Fallback */
+	} else if (ifctx->state == IF_DHCP_OFFERED &&
+		   ifctx->dhcpquerytype == DHCP_REQUEST)
+		ifctx->state = IF_DHCP_RESOLVED;
+
+
+	if (ifctx->dhcpquerytype == DHCP_DISCOVER &&
+	    ifctx->state != IF_BOOTP_RESOLVED) {
+		p = bootpc_tag(&gctx->tmptag, &ifctx->reply,
+			       ifctx->replylen, TAG_DHCP_SERVERID);
+		if (p != NULL && gctx->tmptag.taglen == 4) {
+			memcpy(&ifctx->dhcpserver, p, 4);
+			ifctx->gotdhcpserver = 1;
+		} else
+			ifctx->gotdhcpserver = 0;
+		return 1;
+	}
+
+	ifctx->gotrootpath = (bootpc_tag(&gctx->tmptag, &ifctx->reply,
+					 ifctx->replylen,
+					 TAG_ROOT) != NULL);
+	ifctx->gotgw = (bootpc_tag(&gctx->tmptag, &ifctx->reply,
+				   ifctx->replylen,
+				   TAG_ROUTERS) != NULL);
+	ifctx->gotnetmask = (bootpc_tag(&gctx->tmptag, &ifctx->reply,
+					ifctx->replylen,
+					TAG_SUBNETMASK) != NULL);
+	return 1;
+}
+
+static int
+bootpc_call(struct bootpc_globalcontext *gctx, struct thread *td)
+{
+	struct sockaddr_in *sin, dst;
+	struct uio auio;
+	struct sockopt sopt;
+	struct iovec aio;
+	int error, on, rcvflg, timo, len;
+	time_t atimo;
+	time_t rtimo;
+	struct timeval tv;
+	struct bootpc_ifcontext *ifctx;
+	int outstanding;
+	int gotrootpath;
+	int retry;
+	const char *s;
+
+	tv.tv_sec = 1;
+	tv.tv_usec = 0;
+	bzero(&sopt, sizeof(sopt));
+	sopt.sopt_dir = SOPT_SET;
+	sopt.sopt_level = SOL_SOCKET;
+	sopt.sopt_name = SO_RCVTIMEO;
+	sopt.sopt_val = &tv;
+	sopt.sopt_valsize = sizeof tv;
+
+	error = sosetopt(bootp_so, &sopt);
+	if (error != 0)
+		goto out;
+
+	/*
+	 * Enable broadcast.
+	 */
+	on = 1;
+	sopt.sopt_name = SO_BROADCAST;
+	sopt.sopt_val = &on;
+	sopt.sopt_valsize = sizeof on;
+
+	error = sosetopt(bootp_so, &sopt);
+	if (error != 0)
+		goto out;
+
+	/*
+	 * Disable routing.
+	 */
+
+	on = 1;
+	sopt.sopt_name = SO_DONTROUTE;
+	sopt.sopt_val = &on;
+	sopt.sopt_valsize = sizeof on;
+
+	error = sosetopt(bootp_so, &sopt);
+	if (error != 0)
+		goto out;
+
+	/*
+	 * Bind the local endpoint to a bootp client port.
+	 */
+	sin = &dst;
+	clear_sinaddr(sin);
+	sin->sin_port = htons(IPPORT_BOOTPC);
+	error = sobind(bootp_so, (struct sockaddr *)sin, td);
+	if (error != 0) {
+		printf("bind failed\n");
+		goto out;
+	}
+
+	/*
+	 * Setup socket address for the server.
+	 */
+	sin = &dst;
+	clear_sinaddr(sin);
+	sin->sin_addr.s_addr = INADDR_BROADCAST;
+	sin->sin_port = htons(IPPORT_BOOTPS);
+
+	/*
+	 * Send it, repeatedly, until a reply is received,
+	 * but delay each re-send by an increasing amount.
+	 * If the delay hits the maximum, start complaining.
+	 */
+	timo = 0;
+	rtimo = 0;
+	for (;;) {
+
+		outstanding = 0;
+		gotrootpath = 0;
+
+		STAILQ_FOREACH(ifctx, &gctx->interfaces, next) {
+			if (bootpc_ifctx_isresolved(ifctx) != 0 &&
+			    bootpc_tag(&gctx->tmptag, &ifctx->reply,
+				       ifctx->replylen,
+				       TAG_ROOT) != NULL)
+				gotrootpath = 1;
+		}
+
+		STAILQ_FOREACH(ifctx, &gctx->interfaces, next) {
+			struct in_aliasreq *ifra = &ifctx->iareq;
+			sin = (struct sockaddr_in *)&ifra->ifra_mask;
+
+			ifctx->outstanding = 0;
+			if (bootpc_ifctx_isresolved(ifctx)  != 0 &&
+			    gotrootpath != 0) {
+				continue;
+			}
+			if (bootpc_ifctx_isfailed(ifctx) != 0)
+				continue;
+
+			outstanding++;
+			ifctx->outstanding = 1;
+
+			/* Proceed to next step in DHCP negotiation */
+			if ((ifctx->state == IF_DHCP_OFFERED &&
+			     ifctx->dhcpquerytype != DHCP_REQUEST) ||
+			    (ifctx->state == IF_DHCP_UNRESOLVED &&
+			     ifctx->dhcpquerytype != DHCP_DISCOVER) ||
+			    (ifctx->state == IF_BOOTP_UNRESOLVED &&
+			     ifctx->dhcpquerytype != DHCP_NOMSG)) {
+				ifctx->sentmsg = 0;
+				bootpc_compose_query(ifctx, td);
+			}
+
+			/* Send BOOTP request (or re-send). */
+
+			if (ifctx->sentmsg == 0) {
+				switch(ifctx->dhcpquerytype) {
+				case DHCP_DISCOVER:
+					s = "DHCP Discover";
+					break;
+				case DHCP_REQUEST:
+					s = "DHCP Request";
+					break;
+				case DHCP_NOMSG:
+				default:
+					s = "BOOTP Query";
+					break;
+				}
+				printf("Sending %s packet from "
+				       "interface %s (%*D)\n",
+				       s,
+				       ifctx->ireq.ifr_name,
+				       ifctx->sdl->sdl_alen,
+				       (unsigned char *) LLADDR(ifctx->sdl),
+				       ":");
+				ifctx->sentmsg = 1;
+			}
+
+			aio.iov_base = (caddr_t) &ifctx->call;
+			aio.iov_len = sizeof(ifctx->call);
+
+			auio.uio_iov = &aio;
+			auio.uio_iovcnt = 1;
+			auio.uio_segflg = UIO_SYSSPACE;
+			auio.uio_rw = UIO_WRITE;
+			auio.uio_offset = 0;
+			auio.uio_resid = sizeof(ifctx->call);
+			auio.uio_td = td;
+
+			/* Set netmask to 0.0.0.0 */
+			clear_sinaddr(sin);
+			error = ifioctl(bootp_so, SIOCAIFADDR, (caddr_t)ifra,
+			    td);
+			if (error != 0)
+				panic("%s: SIOCAIFADDR, error=%d", __func__,
+				    error);
+
+			error = sosend(bootp_so, (struct sockaddr *) &dst,
+				       &auio, NULL, NULL, 0, td);
+			if (error != 0)
+				printf("%s: sosend: %d state %08x\n", __func__,
+				    error, (int )bootp_so->so_state);
+
+			/* Set netmask to 255.0.0.0 */
+			sin->sin_addr.s_addr = htonl(IN_CLASSA_NET);
+			error = ifioctl(bootp_so, SIOCAIFADDR, (caddr_t)ifra,
+			    td);
+			if (error != 0)
+				panic("%s: SIOCAIFADDR, error=%d", __func__,
+				    error);
+		}
+
+		if (outstanding == 0 &&
+		    (rtimo == 0 || time_second >= rtimo)) {
+			error = 0;
+			goto out;
+		}
+
+		/* Determine new timeout. */
+		if (timo < MAX_RESEND_DELAY)
+			timo++;
+		else {
+			printf("DHCP/BOOTP timeout for server ");
+			print_sin_addr(&dst);
+			printf("\n");
+		}
+
+		/*
+		 * Wait for up to timo seconds for a reply.
+		 * The socket receive timeout was set to 1 second.
+		 */
+		atimo = timo + time_second;
+		while (time_second < atimo) {
+			aio.iov_base = (caddr_t) &gctx->reply;
+			aio.iov_len = sizeof(gctx->reply);
+
+			auio.uio_iov = &aio;
+			auio.uio_iovcnt = 1;
+			auio.uio_segflg = UIO_SYSSPACE;
+			auio.uio_rw = UIO_READ;
+			auio.uio_offset = 0;
+			auio.uio_resid = sizeof(gctx->reply);
+			auio.uio_td = td;
+
+			rcvflg = 0;
+			error = soreceive(bootp_so, NULL, &auio,
+					  NULL, NULL, &rcvflg);
+			gctx->secs = time_second - gctx->starttime;
+			STAILQ_FOREACH(ifctx, &gctx->interfaces, next) {
+				if (bootpc_ifctx_isresolved(ifctx) != 0 ||
+				    bootpc_ifctx_isfailed(ifctx) != 0)
+					continue;
+
+				ifctx->call.secs = htons(gctx->secs);
+			}
+			if (error == EWOULDBLOCK)
+				continue;
+			if (error != 0)
+				goto out;
+			len = sizeof(gctx->reply) - auio.uio_resid;
+
+			/* Do we have the required number of bytes ? */
+			if (len < BOOTP_MIN_LEN)
+				continue;
+			gctx->replylen = len;
+
+			/* Is it a reply? */
+			if (gctx->reply.op != BOOTP_REPLY)
+				continue;
+
+			/* Is this an answer to our query */
+			STAILQ_FOREACH(ifctx, &gctx->interfaces, next) {
+				if (gctx->reply.xid != ifctx->call.xid)
+					continue;
+
+				/* Same HW address size ? */
+				if (gctx->reply.hlen != ifctx->call.hlen)
+					continue;
+
+				/* Correct HW address ? */
+				if (bcmp(gctx->reply.chaddr,
+					 ifctx->call.chaddr,
+					 ifctx->call.hlen) != 0)
+					continue;
+
+				break;
+			}
+
+			if (ifctx != NULL) {
+				s =  bootpc_tag(&gctx->tmptag,
+						&gctx->reply,
+						gctx->replylen,
+						TAG_DHCP_MSGTYPE);
+				if (s != NULL) {
+					switch (*s) {
+					case DHCP_OFFER:
+						s = "DHCP Offer";
+						break;
+					case DHCP_ACK:
+						s = "DHCP Ack";
+						break;
+					default:
+						s = "DHCP (unexpected)";
+						break;
+					}
+				} else
+					s = "BOOTP Reply";
+
+				printf("Received %s packet"
+				       " on %s from ",
+				       s,
+				       ifctx->ireq.ifr_name);
+				print_in_addr(gctx->reply.siaddr);
+				if (gctx->reply.giaddr.s_addr !=
+				    htonl(INADDR_ANY)) {
+					printf(" via ");
+					print_in_addr(gctx->reply.giaddr);
+				}
+				if (bootpc_received(gctx, ifctx) != 0) {
+					printf(" (accepted)");
+					if (ifctx->outstanding) {
+						ifctx->outstanding = 0;
+						outstanding--;
+					}
+					/* Network settle delay */
+					if (outstanding == 0)
+						atimo = time_second +
+							BOOTP_SETTLE_DELAY;
+				} else
+					printf(" (ignored)");
+				if (ifctx->gotrootpath || 
+				    gctx->any_root_overrides) {
+					gotrootpath = 1;
+					rtimo = time_second +
+						BOOTP_SETTLE_DELAY;
+					if (ifctx->gotrootpath)
+						printf(" (got root path)");
+				}
+				printf("\n");
+			}
+		} /* while secs */
+#ifdef BOOTP_TIMEOUT
+		if (gctx->secs > BOOTP_TIMEOUT && BOOTP_TIMEOUT > 0)
+			break;
+#endif
+		/* Force a retry if halfway in DHCP negotiation */
+		retry = 0;
+		STAILQ_FOREACH(ifctx, &gctx->interfaces, next)
+			if (ifctx->state == IF_DHCP_OFFERED) {
+				if (ifctx->dhcpquerytype == DHCP_DISCOVER)
+					retry = 1;
+				else
+					ifctx->state = IF_DHCP_UNRESOLVED;
+			}
+
+		if (retry != 0)
+			continue;
+
+		if (gotrootpath != 0) {
+			gctx->gotrootpath = gotrootpath;
+			if (rtimo != 0 && time_second >= rtimo)
+				break;
+		}
+	} /* forever send/receive */
+
+	/*
+	 * XXX: These are errors of varying seriousness being silently
+	 * ignored
+	 */
+
+	STAILQ_FOREACH(ifctx, &gctx->interfaces, next)
+		if (bootpc_ifctx_isresolved(ifctx) == 0) {
+			printf("%s timeout for interface %s\n",
+			       ifctx->dhcpquerytype != DHCP_NOMSG ?
+			       "DHCP" : "BOOTP",
+			       ifctx->ireq.ifr_name);
+		}
+
+	if (gctx->gotrootpath != 0) {
+#if 0
+		printf("Got a root path, ignoring remaining timeout\n");
+#endif
+		error = 0;
+		goto out;
+	}
+#ifndef BOOTP_NFSROOT
+	STAILQ_FOREACH(ifctx, &gctx->interfaces, next)
+		if (bootpc_ifctx_isresolved(ifctx) != 0) {
+			error = 0;
+			goto out;
+		}
+#endif
+	error = ETIMEDOUT;
+
+out:
+	return (error);
+}
+
+static void
+bootpc_fakeup_interface(struct bootpc_ifcontext *ifctx, struct thread *td)
+{
+	struct ifreq *ifr;
+	struct in_aliasreq *ifra;
+	struct sockaddr_in *sin;
+	int error;
+
+	ifr = &ifctx->ireq;
+	ifra = &ifctx->iareq;
+
+	/*
+	 * Bring up the interface.
+	 *
+	 * Get the old interface flags and or IFF_UP into them; if
+	 * IFF_UP set blindly, interface selection can be clobbered.
+	 */
+	error = ifioctl(bootp_so, SIOCGIFFLAGS, (caddr_t)ifr, td);
+	if (error != 0)
+		panic("%s: SIOCGIFFLAGS, error=%d", __func__, error);
+	ifr->ifr_flags |= IFF_UP;
+	error = ifioctl(bootp_so, SIOCSIFFLAGS, (caddr_t)ifr, td);
+	if (error != 0)
+		panic("%s: SIOCSIFFLAGS, error=%d", __func__, error);
+
+	/*
+	 * Do enough of ifconfig(8) so that the chosen interface
+	 * can talk to the servers. Set address to 0.0.0.0/8 and
+	 * broadcast address to local broadcast.
+	 */
+	sin = (struct sockaddr_in *)&ifra->ifra_addr;
+	clear_sinaddr(sin);
+	sin = (struct sockaddr_in *)&ifra->ifra_mask;
+	clear_sinaddr(sin);
+	sin->sin_addr.s_addr = htonl(IN_CLASSA_NET);
+	sin = (struct sockaddr_in *)&ifra->ifra_broadaddr;
+	clear_sinaddr(sin);
+	sin->sin_addr.s_addr = htonl(INADDR_BROADCAST);
+	error = ifioctl(bootp_so, SIOCAIFADDR, (caddr_t)ifra, td);
+	if (error != 0)
+		panic("%s: SIOCAIFADDR, error=%d", __func__, error);
+}
+
+static void
+bootpc_shutdown_interface(struct bootpc_ifcontext *ifctx, struct thread *td)
+{
+	struct ifreq *ifr;
+	struct sockaddr_in *sin;
+	int error;
+
+	ifr = &ifctx->ireq;
+
+	printf("Shutdown interface %s\n", ifctx->ireq.ifr_name);
+	error = ifioctl(bootp_so, SIOCGIFFLAGS, (caddr_t)ifr, td);
+	if (error != 0)
+		panic("%s: SIOCGIFFLAGS, error=%d", __func__, error);
+	ifr->ifr_flags &= ~IFF_UP;
+	error = ifioctl(bootp_so, SIOCSIFFLAGS, (caddr_t)ifr, td);
+	if (error != 0)
+		panic("%s: SIOCSIFFLAGS, error=%d", __func__, error);
+
+	sin = (struct sockaddr_in *) &ifr->ifr_addr;
+	clear_sinaddr(sin);
+	error = ifioctl(bootp_so, SIOCDIFADDR, (caddr_t) ifr, td);
+	if (error != 0)
+		panic("%s: SIOCDIFADDR, error=%d", __func__, error);
+}
+
+static void
+bootpc_adjust_interface(struct bootpc_ifcontext *ifctx,
+    struct bootpc_globalcontext *gctx, struct thread *td)
+{
+	int error;
+	struct sockaddr_in *sin;
+	struct ifreq *ifr;
+	struct in_aliasreq *ifra;
+	struct sockaddr_in *myaddr;
+	struct sockaddr_in *netmask;
+
+	ifr = &ifctx->ireq;
+	ifra = &ifctx->iareq;
+	myaddr = &ifctx->myaddr;
+	netmask = &ifctx->netmask;
+
+	if (bootpc_ifctx_isresolved(ifctx) == 0) {
+		/* Shutdown interfaces where BOOTP failed */
+		bootpc_shutdown_interface(ifctx, td);
+		return;
+	}
+
+	printf("Adjusted interface %s", ifctx->ireq.ifr_name);
+
+	/* Do BOOTP interface options */
+	if (ifctx->mtu != 0) {
+		printf(" (MTU=%d%s)", ifctx->mtu, 
+		    (ifctx->mtu > 1514) ? "/JUMBO" : "");
+		ifr->ifr_mtu = ifctx->mtu;
+		error = ifioctl(bootp_so, SIOCSIFMTU, (caddr_t) ifr, td);
+		if (error != 0)
+			panic("%s: SIOCSIFMTU, error=%d", __func__, error);
+	}
+	printf("\n");
+
+	/*
+	 * Do enough of ifconfig(8) so that the chosen interface
+	 * can talk to the servers.  (just set the address)
+	 */
+	sin = (struct sockaddr_in *) &ifr->ifr_addr;
+	clear_sinaddr(sin);
+	error = ifioctl(bootp_so, SIOCDIFADDR, (caddr_t) ifr, td);
+	if (error != 0)
+		panic("%s: SIOCDIFADDR, error=%d", __func__, error);
+
+	bcopy(myaddr, &ifra->ifra_addr, sizeof(*myaddr));
+	bcopy(netmask, &ifra->ifra_mask, sizeof(*netmask));
+	clear_sinaddr(&ifra->ifra_broadaddr);
+	ifra->ifra_broadaddr.sin_addr.s_addr = myaddr->sin_addr.s_addr |
+	    ~netmask->sin_addr.s_addr;
+
+	error = ifioctl(bootp_so, SIOCAIFADDR, (caddr_t)ifra, td);
+	if (error != 0)
+		panic("%s: SIOCAIFADDR, error=%d", __func__, error);
+}
+
+static void
+bootpc_add_default_route(struct bootpc_ifcontext *ifctx)
+{
+	int error;
+	struct sockaddr_in defdst;
+	struct sockaddr_in defmask;
+
+	if (ifctx->gw.sin_addr.s_addr == htonl(INADDR_ANY))
+		return;
+
+	clear_sinaddr(&defdst);
+	clear_sinaddr(&defmask);
+
+	error = rtrequest_fib(RTM_ADD, (struct sockaddr *)&defdst,
+	    (struct sockaddr *) &ifctx->gw, (struct sockaddr *)&defmask,
+	    (RTF_UP | RTF_GATEWAY | RTF_STATIC), NULL, RT_DEFAULT_FIB);
+	if (error != 0) {
+		printf("%s: RTM_ADD, error=%d\n", __func__, error);
+	}
+}
+
+static void
+bootpc_remove_default_route(struct bootpc_ifcontext *ifctx)
+{
+	int error;
+	struct sockaddr_in defdst;
+	struct sockaddr_in defmask;
+
+	if (ifctx->gw.sin_addr.s_addr == htonl(INADDR_ANY))
+		return;
+
+	clear_sinaddr(&defdst);
+	clear_sinaddr(&defmask);
+
+	error = rtrequest_fib(RTM_DELETE, (struct sockaddr *)&defdst,
+	    (struct sockaddr *) &ifctx->gw, (struct sockaddr *)&defmask,
+	    (RTF_UP | RTF_GATEWAY | RTF_STATIC), NULL, RT_DEFAULT_FIB);
+	if (error != 0) {
+		printf("%s: RTM_DELETE, error=%d\n", __func__, error);
+	}
+}
+
+static int
+setfs(struct sockaddr_in *addr, char *path, char *p,
+    const struct in_addr *siaddr)
+{
+
+	if (getip(&p, &addr->sin_addr) == 0) {
+		if (siaddr != NULL && *p == '/')
+			bcopy(siaddr, &addr->sin_addr, sizeof(struct in_addr));
+		else
+			return 0;
+	} else {
+		if (*p != ':')
+			return 0;
+		p++;
+	}
+		
+	addr->sin_len = sizeof(struct sockaddr_in);
+	addr->sin_family = AF_INET;
+
+	strlcpy(path, p, MNAMELEN);
+	return 1;
+}
+
+static int
+getip(char **ptr, struct in_addr *addr)
+{
+	char *p;
+	unsigned int ip;
+	int val;
+
+	p = *ptr;
+	ip = 0;
+	if (((val = getdec(&p)) < 0) || (val > 255))
+		return 0;
+	ip = val << 24;
+	if (*p != '.')
+		return 0;
+	p++;
+	if (((val = getdec(&p)) < 0) || (val > 255))
+		return 0;
+	ip |= (val << 16);
+	if (*p != '.')
+		return 0;
+	p++;
+	if (((val = getdec(&p)) < 0) || (val > 255))
+		return 0;
+	ip |= (val << 8);
+	if (*p != '.')
+		return 0;
+	p++;
+	if (((val = getdec(&p)) < 0) || (val > 255))
+		return 0;
+	ip |= val;
+
+	addr->s_addr = htonl(ip);
+	*ptr = p;
+	return 1;
+}
+
+static int
+getdec(char **ptr)
+{
+	char *p;
+	int ret;
+
+	p = *ptr;
+	ret = 0;
+	if ((*p < '0') || (*p > '9'))
+		return -1;
+	while ((*p >= '0') && (*p <= '9')) {
+		ret = ret * 10 + (*p - '0');
+		p++;
+	}
+	*ptr = p;
+	return ret;
+}
+
+static void
+mountopts(struct nfs_args *args, char *p)
+{
+	args->version = NFS_ARGSVERSION;
+	args->rsize = BOOTP_BLOCKSIZE;
+	args->wsize = BOOTP_BLOCKSIZE;
+	args->flags = NFSMNT_RSIZE | NFSMNT_WSIZE | NFSMNT_RESVPORT;
+	args->sotype = SOCK_DGRAM;
+	if (p != NULL)
+		nfs_parse_options(p, args);
+}
+
+static int
+xdr_opaque_decode(struct mbuf **mptr, u_char *buf, int len)
+{
+	struct mbuf *m;
+	int alignedlen;
+
+	m = *mptr;
+	alignedlen = ( len + 3 ) & ~3;
+
+	if (m->m_len < alignedlen) {
+		m = m_pullup(m, alignedlen);
+		if (m == NULL) {
+			*mptr = NULL;
+			return EBADRPC;
+		}
+	}
+	bcopy(mtod(m, u_char *), buf, len);
+	m_adj(m, alignedlen);
+	*mptr = m;
+	return 0;
+}
+
+static int
+xdr_int_decode(struct mbuf **mptr, int *iptr)
+{
+	u_int32_t i;
+
+	if (xdr_opaque_decode(mptr, (u_char *) &i, sizeof(u_int32_t)) != 0)
+		return EBADRPC;
+	*iptr = fxdr_unsigned(u_int32_t, i);
+	return 0;
+}
+
+static void
+print_sin_addr(struct sockaddr_in *sin)
+{
+
+	print_in_addr(sin->sin_addr);
+}
+
+static void
+print_in_addr(struct in_addr addr)
+{
+	unsigned int ip;
+
+	ip = ntohl(addr.s_addr);
+	printf("%d.%d.%d.%d",
+	       ip >> 24, (ip >> 16) & 255, (ip >> 8) & 255, ip & 255);
+}
+
+static void
+bootpc_compose_query(struct bootpc_ifcontext *ifctx, struct thread *td)
+{
+	unsigned char *vendp;
+	unsigned char vendor_client[64];
+	uint32_t leasetime;
+	uint8_t vendor_client_len;
+
+	ifctx->gotrootpath = 0;
+
+	bzero((caddr_t) &ifctx->call, sizeof(ifctx->call));
+
+	/* bootpc part */
+	ifctx->call.op = BOOTP_REQUEST; 	/* BOOTREQUEST */
+	ifctx->call.htype = 1;			/* 10mb ethernet */
+	ifctx->call.hlen = ifctx->sdl->sdl_alen;/* Hardware address length */
+	ifctx->call.hops = 0;
+	if (bootpc_ifctx_isunresolved(ifctx) != 0)
+		ifctx->xid++;
+	ifctx->call.xid = txdr_unsigned(ifctx->xid);
+	bcopy(LLADDR(ifctx->sdl), &ifctx->call.chaddr, ifctx->sdl->sdl_alen);
+
+	vendp = ifctx->call.vend;
+	*vendp++ = 99;		/* RFC1048 cookie */
+	*vendp++ = 130;
+	*vendp++ = 83;
+	*vendp++ = 99;
+	*vendp++ = TAG_MAXMSGSIZE;
+	*vendp++ = 2;
+	*vendp++ = (sizeof(struct bootp_packet) >> 8) & 255;
+	*vendp++ = sizeof(struct bootp_packet) & 255;
+
+	snprintf(vendor_client, sizeof(vendor_client), "%s:%s:%s",
+		ostype, MACHINE, osrelease);
+	vendor_client_len = strlen(vendor_client);
+	*vendp++ = TAG_VENDOR_INDENTIFIER;
+	*vendp++ = vendor_client_len;
+	memcpy(vendp, vendor_client, vendor_client_len);
+	vendp += vendor_client_len;
+	ifctx->dhcpquerytype = DHCP_NOMSG;
+	switch (ifctx->state) {
+	case IF_DHCP_UNRESOLVED:
+		*vendp++ = TAG_DHCP_MSGTYPE;
+		*vendp++ = 1;
+		*vendp++ = DHCP_DISCOVER;
+		ifctx->dhcpquerytype = DHCP_DISCOVER;
+		ifctx->gotdhcpserver = 0;
+		break;
+	case IF_DHCP_OFFERED:
+		*vendp++ = TAG_DHCP_MSGTYPE;
+		*vendp++ = 1;
+		*vendp++ = DHCP_REQUEST;
+		ifctx->dhcpquerytype = DHCP_REQUEST;
+		*vendp++ = TAG_DHCP_REQ_ADDR;
+		*vendp++ = 4;
+		memcpy(vendp, &ifctx->reply.yiaddr, 4);
+		vendp += 4;
+		if (ifctx->gotdhcpserver != 0) {
+			*vendp++ = TAG_DHCP_SERVERID;
+			*vendp++ = 4;
+			memcpy(vendp, &ifctx->dhcpserver, 4);
+			vendp += 4;
+		}
+		*vendp++ = TAG_DHCP_LEASETIME;
+		*vendp++ = 4;
+		leasetime = htonl(300);
+		memcpy(vendp, &leasetime, 4);
+		vendp += 4;
+		break;
+	default:
+		break;
+	}
+	*vendp = TAG_END;
+
+	ifctx->call.secs = 0;
+	ifctx->call.flags = htons(0x8000); /* We need a broadcast answer */
+}
+
+static int
+bootpc_hascookie(struct bootp_packet *bp)
+{
+
+	return (bp->vend[0] == 99 && bp->vend[1] == 130 &&
+		bp->vend[2] == 83 && bp->vend[3] == 99);
+}
+
+static void
+bootpc_tag_helper(struct bootpc_tagcontext *tctx,
+    unsigned char *start, int len, int tag)
+{
+	unsigned char *j;
+	unsigned char *ej;
+	unsigned char code;
+
+	if (tctx->badtag != 0 || tctx->badopt != 0)
+		return;
+
+	j = start;
+	ej = j + len;
+
+	while (j < ej) {
+		code = *j++;
+		if (code == TAG_PAD)
+			continue;
+		if (code == TAG_END)
+			return;
+		if (j >= ej || j + *j + 1 > ej) {
+			tctx->badopt = 1;
+			return;
+		}
+		len = *j++;
+		if (code == tag) {
+			if (tctx->taglen + len > TAG_MAXLEN) {
+				tctx->badtag = 1;
+				return;
+			}
+			tctx->foundopt = 1;
+			if (len > 0)
+				memcpy(tctx->buf + tctx->taglen,
+				       j, len);
+			tctx->taglen += len;
+		}
+		if (code == TAG_OVERLOAD)
+			tctx->overload = *j;
+
+		j += len;
+	}
+}
+
+static unsigned char *
+bootpc_tag(struct bootpc_tagcontext *tctx,
+    struct bootp_packet *bp, int len, int tag)
+{
+	tctx->overload = 0;
+	tctx->badopt = 0;
+	tctx->badtag = 0;
+	tctx->foundopt = 0;
+	tctx->taglen = 0;
+
+	if (bootpc_hascookie(bp) == 0)
+		return NULL;
+
+	bootpc_tag_helper(tctx, &bp->vend[4],
+			  (unsigned char *) bp + len - &bp->vend[4], tag);
+
+	if ((tctx->overload & OVERLOAD_FILE) != 0)
+		bootpc_tag_helper(tctx,
+				  (unsigned char *) bp->file,
+				  sizeof(bp->file),
+				  tag);
+	if ((tctx->overload & OVERLOAD_SNAME) != 0)
+		bootpc_tag_helper(tctx,
+				  (unsigned char *) bp->sname,
+				  sizeof(bp->sname),
+				  tag);
+
+	if (tctx->badopt != 0 || tctx->badtag != 0 || tctx->foundopt == 0)
+		return NULL;
+	tctx->buf[tctx->taglen] = '\0';
+	return tctx->buf;
+}
+
+static void
+bootpc_decode_reply(struct nfsv3_diskless *nd, struct bootpc_ifcontext *ifctx,
+    struct bootpc_globalcontext *gctx)
+{
+	char *p, *s;
+	unsigned int ip;
+
+	ifctx->gotgw = 0;
+	ifctx->gotnetmask = 0;
+
+	clear_sinaddr(&ifctx->myaddr);
+	clear_sinaddr(&ifctx->netmask);
+	clear_sinaddr(&ifctx->gw);
+
+	ifctx->myaddr.sin_addr = ifctx->reply.yiaddr;
+
+	ip = ntohl(ifctx->myaddr.sin_addr.s_addr);
+
+	printf("%s at ", ifctx->ireq.ifr_name);
+	print_sin_addr(&ifctx->myaddr);
+	printf(" server ");
+	print_in_addr(ifctx->reply.siaddr);
+
+	ifctx->gw.sin_addr = ifctx->reply.giaddr;
+	if (ifctx->reply.giaddr.s_addr != htonl(INADDR_ANY)) {
+		printf(" via gateway ");
+		print_in_addr(ifctx->reply.giaddr);
+	}
+
+	/* This call used for the side effect (overload flag) */
+	(void) bootpc_tag(&gctx->tmptag,
+			  &ifctx->reply, ifctx->replylen, TAG_END);
+
+	if ((gctx->tmptag.overload & OVERLOAD_SNAME) == 0)
+		if (ifctx->reply.sname[0] != '\0')
+			printf(" server name %s", ifctx->reply.sname);
+	if ((gctx->tmptag.overload & OVERLOAD_FILE) == 0)
+		if (ifctx->reply.file[0] != '\0')
+			printf(" boot file %s", ifctx->reply.file);
+
+	printf("\n");
+
+	p = bootpc_tag(&gctx->tag, &ifctx->reply, ifctx->replylen,
+		       TAG_SUBNETMASK);
+	if (p != NULL) {
+		if (gctx->tag.taglen != 4)
+			panic("bootpc: subnet mask len is %d",
+			      gctx->tag.taglen);
+		bcopy(p, &ifctx->netmask.sin_addr, 4);
+		ifctx->gotnetmask = 1;
+		printf("subnet mask ");
+		print_sin_addr(&ifctx->netmask);
+		printf(" ");
+	}
+
+	p = bootpc_tag(&gctx->tag, &ifctx->reply, ifctx->replylen,
+		       TAG_ROUTERS);
+	if (p != NULL) {
+		/* Routers */
+		if (gctx->tag.taglen % 4)
+			panic("bootpc: Router Len is %d", gctx->tag.taglen);
+		if (gctx->tag.taglen > 0) {
+			bcopy(p, &ifctx->gw.sin_addr, 4);
+			printf("router ");
+			print_sin_addr(&ifctx->gw);
+			printf(" ");
+			ifctx->gotgw = 1;
+			gctx->gotgw = 1;
+		}
+	}
+
+	/*
+	 * Choose a root filesystem.  If a value is forced in the environment
+	 * and it contains "nfs:", use it unconditionally.  Otherwise, if the
+	 * kernel is compiled with the ROOTDEVNAME option, then use it if:
+	 *  - The server doesn't provide a pathname.
+	 *  - The boothowto flags include RB_DFLTROOT (user said to override
+	 *    the server value).
+	 */
+	p = NULL;
+	if ((s = kern_getenv("vfs.root.mountfrom")) != NULL) {
+		if ((p = strstr(s, "nfs:")) != NULL)
+			p = strdup(p + 4, M_TEMP);
+		freeenv(s);
+	}
+	if (p == NULL) {
+		p = bootpc_tag(&gctx->tag, &ifctx->reply, ifctx->replylen,
+		       TAG_ROOT);
+		if (p != NULL)
+			ifctx->gotrootpath = 1;
+	}
+#ifdef ROOTDEVNAME
+	if ((p == NULL || (boothowto & RB_DFLTROOT) != 0) && 
+	    (p = strstr(ROOTDEVNAME, "nfs:")) != NULL) {
+		p += 4;
+	}
+#endif
+	if (p != NULL) {
+		if (gctx->setrootfs != NULL) {
+			printf("rootfs %s (ignored) ", p);
+		} else 	if (setfs(&nd->root_saddr,
+				  nd->root_hostnam, p, &ifctx->reply.siaddr)) {
+			if (*p == '/') {
+				printf("root_server ");
+				print_sin_addr(&nd->root_saddr);
+				printf(" ");
+			}
+			printf("rootfs %s ", p);
+			gctx->gotrootpath = 1;
+			gctx->setrootfs = ifctx;
+
+			p = bootpc_tag(&gctx->tag, &ifctx->reply,
+				       ifctx->replylen,
+				       TAG_ROOTOPTS);
+			if (p != NULL) {
+				mountopts(&nd->root_args, p);
+				printf("rootopts %s ", p);
+			}
+		} else
+			panic("Failed to set rootfs to %s", p);
+	}
+
+	p = bootpc_tag(&gctx->tag, &ifctx->reply, ifctx->replylen,
+		       TAG_HOSTNAME);
+	if (p != NULL) {
+		if (gctx->tag.taglen >= MAXHOSTNAMELEN)
+			panic("bootpc: hostname >= %d bytes",
+			      MAXHOSTNAMELEN);
+		if (gctx->sethostname != NULL) {
+			printf("hostname %s (ignored) ", p);
+		} else {
+			strcpy(nd->my_hostnam, p);
+			mtx_lock(&prison0.pr_mtx);
+			strcpy(prison0.pr_hostname, p);
+			mtx_unlock(&prison0.pr_mtx);
+			printf("hostname %s ", p);
+			gctx->sethostname = ifctx;
+		}
+	}
+	p = bootpc_tag(&gctx->tag, &ifctx->reply, ifctx->replylen,
+			TAG_COOKIE);
+	if (p != NULL) {        /* store in a sysctl variable */
+		int i, l = sizeof(bootp_cookie) - 1;
+		for (i = 0; i < l && p[i] != '\0'; i++)
+			bootp_cookie[i] = p[i];
+		p[i] = '\0';
+	}
+
+	p = bootpc_tag(&gctx->tag, &ifctx->reply, ifctx->replylen,
+		       TAG_INTF_MTU);
+	if (p != NULL) {
+		ifctx->mtu = be16dec(p);
+	}
+
+	printf("\n");
+
+	if (ifctx->gotnetmask == 0) {
+		if (IN_CLASSA(ntohl(ifctx->myaddr.sin_addr.s_addr)))
+			ifctx->netmask.sin_addr.s_addr = htonl(IN_CLASSA_NET);
+		else if (IN_CLASSB(ntohl(ifctx->myaddr.sin_addr.s_addr)))
+			ifctx->netmask.sin_addr.s_addr = htonl(IN_CLASSB_NET);
+		else
+			ifctx->netmask.sin_addr.s_addr = htonl(IN_CLASSC_NET);
+	}
+}
+
+void
+bootpc_init(void)
+{
+	struct bootpc_ifcontext *ifctx;		/* Interface BOOTP contexts */
+	struct bootpc_globalcontext *gctx; 	/* Global BOOTP context */
+	struct ifnet *ifp;
+	struct sockaddr_dl *sdl;
+	struct ifaddr *ifa;
+	int error;
+#ifndef BOOTP_WIRED_TO
+	int ifcnt;
+#endif
+	struct nfsv3_diskless *nd;
+	struct thread *td;
+	int timeout;
+	int delay;
+
+	timeout = BOOTP_IFACE_WAIT_TIMEOUT * hz;
+	delay = hz / 10;
+
+	nd = &nfsv3_diskless;
+	td = curthread;
+
+	/*
+	 * If already filled in, don't touch it here
+	 */
+	if (nfs_diskless_valid != 0)
+		return;
+
+	gctx = malloc(sizeof(*gctx), M_TEMP, M_WAITOK | M_ZERO);
+	STAILQ_INIT(&gctx->interfaces);
+	gctx->xid = ~0xFFFF;
+	gctx->starttime = time_second;
+
+	/*
+	 * If ROOTDEVNAME is defined or vfs.root.mountfrom is set then we have
+	 * root-path overrides that can potentially let us boot even if we don't
+	 * get a root path from the server, so we can treat that as a non-error.
+	 */
+#ifdef ROOTDEVNAME
+	gctx->any_root_overrides = 1;
+#else
+	gctx->any_root_overrides = testenv("vfs.root.mountfrom");
+#endif
+
+	/*
+	 * Find a network interface.
+	 */
+	CURVNET_SET(TD_TO_VNET(td));
+#ifdef BOOTP_WIRED_TO
+	printf("%s: wired to interface '%s'\n", __func__, 
+	       __XSTRING(BOOTP_WIRED_TO));
+	allocifctx(gctx);
+#else
+	/*
+	 * Preallocate interface context storage, if another interface
+	 * attaches and wins the race, it won't be eligible for bootp.
+	 */
+	ifcnt = 0;
+	IFNET_RLOCK();
+	CK_STAILQ_FOREACH(ifp, &V_ifnet, if_link) {
+		if ((ifp->if_flags &
+		     (IFF_LOOPBACK | IFF_POINTOPOINT | IFF_BROADCAST)) !=
+		    IFF_BROADCAST)
+			continue;
+		switch (ifp->if_alloctype) {
+			case IFT_ETHER:
+				break;
+			default:
+				continue;
+		}
+		ifcnt++;
+	}
+	IFNET_RUNLOCK();
+	if (ifcnt == 0)
+		panic("%s: no eligible interfaces", __func__);
+	for (; ifcnt > 0; ifcnt--)
+		allocifctx(gctx);
+#endif
+
+retry:
+	ifctx = STAILQ_FIRST(&gctx->interfaces);
+	IFNET_RLOCK();
+	CK_STAILQ_FOREACH(ifp, &V_ifnet, if_link) {
+		if (ifctx == NULL)
+			break;
+#ifdef BOOTP_WIRED_TO
+		if (strcmp(ifp->if_xname, __XSTRING(BOOTP_WIRED_TO)) != 0)
+			continue;
+#else
+		if ((ifp->if_flags &
+		     (IFF_LOOPBACK | IFF_POINTOPOINT | IFF_BROADCAST)) !=
+		    IFF_BROADCAST)
+			continue;
+		switch (ifp->if_alloctype) {
+			case IFT_ETHER:
+				break;
+			default:
+				continue;
+		}
+#endif
+		strlcpy(ifctx->ireq.ifr_name, ifp->if_xname,
+		    sizeof(ifctx->ireq.ifr_name));
+		ifctx->ifp = ifp;
+
+		/* Get HW address */
+		sdl = NULL;
+		CK_STAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link)
+			if (ifa->ifa_addr->sa_family == AF_LINK) {
+				sdl = (struct sockaddr_dl *)ifa->ifa_addr;
+				if (sdl->sdl_type == IFT_ETHER)
+					break;
+			}
+		if (sdl == NULL)
+			panic("bootpc: Unable to find HW address for %s",
+			    ifctx->ireq.ifr_name);
+		ifctx->sdl = sdl;
+
+		ifctx = STAILQ_NEXT(ifctx, next);
+	}
+	IFNET_RUNLOCK();
+	CURVNET_RESTORE();
+
+	if (STAILQ_EMPTY(&gctx->interfaces) ||
+	    STAILQ_FIRST(&gctx->interfaces)->ifp == NULL) {
+		if (timeout > 0) {
+			pause("bootpc", delay);
+			timeout -= delay;
+			goto retry;
+		}
+#ifdef BOOTP_WIRED_TO
+		panic("%s: Could not find interface specified "
+		      "by BOOTP_WIRED_TO: "
+		      __XSTRING(BOOTP_WIRED_TO), __func__);
+#else
+		panic("%s: no suitable interface", __func__);
+#endif
+	}
+
+	error = socreate(AF_INET, &bootp_so, SOCK_DGRAM, 0, td->td_ucred, td);
+	if (error != 0)
+		panic("%s: socreate, error=%d", __func__, error);
+
+	STAILQ_FOREACH(ifctx, &gctx->interfaces, next)
+		bootpc_fakeup_interface(ifctx, td);
+
+	STAILQ_FOREACH(ifctx, &gctx->interfaces, next)
+		bootpc_compose_query(ifctx, td);
+
+	error = bootpc_call(gctx, td);
+	if (error != 0) {
+		printf("BOOTP call failed\n");
+	}
+
+	mountopts(&nd->root_args, NULL);
+
+	STAILQ_FOREACH(ifctx, &gctx->interfaces, next)
+		if (bootpc_ifctx_isresolved(ifctx) != 0)
+			bootpc_decode_reply(nd, ifctx, gctx);
+
+#ifdef BOOTP_NFSROOT
+	if (gctx->gotrootpath == 0 && gctx->any_root_overrides == 0)
+		panic("bootpc: No root path offered");
+#endif
+
+	STAILQ_FOREACH(ifctx, &gctx->interfaces, next)
+		bootpc_adjust_interface(ifctx, gctx, td);
+
+	soclose(bootp_so);
+
+	STAILQ_FOREACH(ifctx, &gctx->interfaces, next)
+		if (ifctx->gotrootpath != 0)
+			break;
+	if (ifctx == NULL) {
+		STAILQ_FOREACH(ifctx, &gctx->interfaces, next)
+			if (bootpc_ifctx_isresolved(ifctx) != 0)
+				break;
+	}
+	if (ifctx == NULL)
+		goto out;
+
+	if (gctx->gotrootpath != 0) {
+
+		kern_setenv("boot.netif.name", ifctx->ifp->if_xname);
+
+		bootpc_add_default_route(ifctx);
+		error = md_mount(&nd->root_saddr, nd->root_hostnam,
+				 nd->root_fh, &nd->root_fhsize,
+				 &nd->root_args, td);
+		bootpc_remove_default_route(ifctx);
+		if (error != 0) {
+			if (gctx->any_root_overrides == 0)
+				panic("nfs_boot: mount root, error=%d", error);
+			else
+				goto out;
+		}
+		rootdevnames[0] = "nfs:";
+		nfs_diskless_valid = 3;
+	}
+
+	strcpy(nd->myif.ifra_name, ifctx->ireq.ifr_name);
+	bcopy(&ifctx->myaddr, &nd->myif.ifra_addr, sizeof(ifctx->myaddr));
+	bcopy(&ifctx->myaddr, &nd->myif.ifra_broadaddr, sizeof(ifctx->myaddr));
+	((struct sockaddr_in *) &nd->myif.ifra_broadaddr)->sin_addr.s_addr =
+		ifctx->myaddr.sin_addr.s_addr |
+		~ ifctx->netmask.sin_addr.s_addr;
+	bcopy(&ifctx->netmask, &nd->myif.ifra_mask, sizeof(ifctx->netmask));
+	bcopy(&ifctx->gw, &nd->mygateway, sizeof(ifctx->gw));
+
+out:
+	while((ifctx = STAILQ_FIRST(&gctx->interfaces)) != NULL) {
+		STAILQ_REMOVE_HEAD(&gctx->interfaces, next);
+		free(ifctx, M_TEMP);
+	}
+	free(gctx, M_TEMP);
+}
+
+/*
+ * RPC: mountd/mount
+ * Given a server pathname, get an NFS file handle.
+ * Also, sets sin->sin_port to the NFS service port.
+ */
+static int
+md_mount(struct sockaddr_in *mdsin, char *path, u_char *fhp, int *fhsizep,
+    struct nfs_args *args, struct thread *td)
+{
+	struct mbuf *m;
+	int error;
+	int authunixok;
+	int authcount;
+	int authver;
+
+#define	RPCPROG_MNT	100005
+#define	RPCMNT_VER1	1
+#define RPCMNT_VER3	3
+#define	RPCMNT_MOUNT	1
+#define	AUTH_SYS	1		/* unix style (uid, gids) */
+#define AUTH_UNIX	AUTH_SYS
+
+	/* XXX honor v2/v3 flags in args->flags? */
+#ifdef BOOTP_NFSV3
+	/* First try NFS v3 */
+	/* Get port number for MOUNTD. */
+	error = krpc_portmap(mdsin, RPCPROG_MNT, RPCMNT_VER3,
+			     &mdsin->sin_port, td);
+	if (error == 0) {
+		m = xdr_string_encode(path, strlen(path));
+
+		/* Do RPC to mountd. */
+		error = krpc_call(mdsin, RPCPROG_MNT, RPCMNT_VER3,
+				  RPCMNT_MOUNT, &m, NULL, td);
+	}
+	if (error == 0) {
+		args->flags |= NFSMNT_NFSV3;
+	} else {
+#endif
+		/* Fallback to NFS v2 */
+
+		/* Get port number for MOUNTD. */
+		error = krpc_portmap(mdsin, RPCPROG_MNT, RPCMNT_VER1,
+				     &mdsin->sin_port, td);
+		if (error != 0)
+			return error;
+
+		m = xdr_string_encode(path, strlen(path));
+
+		/* Do RPC to mountd. */
+		error = krpc_call(mdsin, RPCPROG_MNT, RPCMNT_VER1,
+				  RPCMNT_MOUNT, &m, NULL, td);
+		if (error != 0)
+			return error;	/* message already freed */
+
+#ifdef BOOTP_NFSV3
+	}
+#endif
+
+	if (xdr_int_decode(&m, &error) != 0 || error != 0)
+		goto bad;
+
+	if ((args->flags & NFSMNT_NFSV3) != 0) {
+		if (xdr_int_decode(&m, fhsizep) != 0 ||
+		    *fhsizep > NFSX_V3FHMAX ||
+		    *fhsizep <= 0)
+			goto bad;
+	} else
+		*fhsizep = NFSX_V2FH;
+
+	if (xdr_opaque_decode(&m, fhp, *fhsizep) != 0)
+		goto bad;
+
+	if (args->flags & NFSMNT_NFSV3) {
+		if (xdr_int_decode(&m, &authcount) != 0)
+			goto bad;
+		authunixok = 0;
+		if (authcount < 0 || authcount > 100)
+			goto bad;
+		while (authcount > 0) {
+			if (xdr_int_decode(&m, &authver) != 0)
+				goto bad;
+			if (authver == AUTH_UNIX)
+				authunixok = 1;
+			authcount--;
+		}
+		if (authunixok == 0)
+			goto bad;
+	}
+
+	/* Set port number for NFS use. */
+	error = krpc_portmap(mdsin, NFS_PROG,
+			     (args->flags &
+			      NFSMNT_NFSV3) ? NFS_VER3 : NFS_VER2,
+			     &mdsin->sin_port, td);
+
+	goto out;
+
+bad:
+	error = EBADRPC;
+
+out:
+	m_freem(m);
+	return error;
+}
+
+SYSINIT(bootp_rootconf, SI_SUB_ROOT_CONF, SI_ORDER_FIRST, bootpc_init, NULL);
diff --git a/freebsd/sys/nfs/krpc.h b/freebsd/sys/nfs/krpc.h
new file mode 100644
index 0000000..44b84e0
--- /dev/null
+++ b/freebsd/sys/nfs/krpc.h
@@ -0,0 +1,31 @@
+/*	$NetBSD: krpc.h,v 1.4 1995/12/19 23:07:11 cgd Exp $	*/
+/* $FreeBSD$	*/
+
+#include <sys/cdefs.h>
+
+struct mbuf;
+struct thread;
+struct sockaddr;
+struct sockaddr_in;
+
+int krpc_call(struct sockaddr_in *_sin,
+	u_int prog, u_int vers, u_int func,
+	struct mbuf **data, struct sockaddr **from, struct thread *td);
+
+int krpc_portmap(struct sockaddr_in *_sin,
+	u_int prog, u_int vers, u_int16_t *portp, struct thread *td);
+
+struct mbuf *xdr_string_encode(char *str, int len);
+
+/*
+ * RPC definitions for the portmapper
+ */
+#define	PMAPPORT		111
+#define	PMAPPROG		100000
+#define	PMAPVERS		2
+#define	PMAPPROC_NULL		0
+#define	PMAPPROC_SET		1
+#define	PMAPPROC_UNSET		2
+#define	PMAPPROC_GETPORT	3
+#define	PMAPPROC_DUMP		4
+#define	PMAPPROC_CALLIT		5
diff --git a/freebsd/sys/nfs/krpc_subr.c b/freebsd/sys/nfs/krpc_subr.c
new file mode 100644
index 0000000..21e8e3a
--- /dev/null
+++ b/freebsd/sys/nfs/krpc_subr.c
@@ -0,0 +1,470 @@
+/*	$NetBSD: krpc_subr.c,v 1.12.4.1 1996/06/07 00:52:26 cgd Exp $	*/
+
+/*-
+ * SPDX-License-Identifier: BSD-4-Clause
+ *
+ * Copyright (c) 1995 Gordon Ross, Adam Glass
+ * Copyright (c) 1992 Regents of the University of California.
+ * All rights reserved.
+ *
+ * This software was developed by the Computer Systems Engineering group
+ * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
+ * contributed to Berkeley.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ *    must display the following acknowledgement:
+ *	This product includes software developed by the University of
+ *	California, Lawrence Berkeley Laboratory and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * partially based on:
+ *      libnetboot/rpc.c
+ *               @(#) Header: rpc.c,v 1.12 93/09/28 08:31:56 leres Exp  (LBL)
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/jail.h>
+#include <sys/malloc.h>
+#include <sys/mbuf.h>
+#include <sys/proc.h>
+#include <sys/socket.h>
+#include <sys/socketvar.h>
+#include <sys/uio.h>
+
+#include <net/if.h>
+#include <net/vnet.h>
+
+#include <netinet/in.h>
+
+#include <rpc/types.h>
+#include <rpc/auth.h>
+#include <rpc/rpc_msg.h>
+#include <nfs/krpc.h>
+#include <nfs/xdr_subs.h>
+
+/*
+ * Kernel support for Sun RPC
+ *
+ * Used currently for bootstrapping in nfs diskless configurations.
+ */
+
+/*
+ * Generic RPC headers
+ */
+
+struct auth_info {
+	u_int32_t 	authtype;	/* auth type */
+	u_int32_t	authlen;	/* auth length */
+};
+
+struct auth_unix {
+	int32_t   ua_time;
+	int32_t   ua_hostname;	/* null */
+	int32_t   ua_uid;
+	int32_t   ua_gid;
+	int32_t   ua_gidlist;	/* null */
+};
+
+struct krpc_call {
+	u_int32_t	rp_xid;		/* request transaction id */
+	int32_t 	rp_direction;	/* call direction (0) */
+	u_int32_t	rp_rpcvers;	/* rpc version (2) */
+	u_int32_t	rp_prog;	/* program */
+	u_int32_t	rp_vers;	/* version */
+	u_int32_t	rp_proc;	/* procedure */
+	struct	auth_info rpc_auth;
+	struct	auth_unix rpc_unix;
+	struct	auth_info rpc_verf;
+};
+
+struct krpc_reply {
+	u_int32_t rp_xid;		/* request transaction id */
+	int32_t  rp_direction;		/* call direction (1) */
+	int32_t  rp_astatus;		/* accept status (0: accepted) */
+	union {
+		u_int32_t rpu_errno;
+		struct {
+			struct auth_info rok_auth;
+			u_int32_t	rok_status;
+		} rpu_rok;
+	} rp_u;
+};
+#define rp_errno  rp_u.rpu_errno
+#define rp_auth   rp_u.rpu_rok.rok_auth
+#define rp_status rp_u.rpu_rok.rok_status
+
+#define MIN_REPLY_HDR 16	/* xid, dir, astat, errno */
+
+/*
+ * What is the longest we will wait before re-sending a request?
+ * Note this is also the frequency of "RPC timeout" messages.
+ * The re-send loop count sup linearly to this maximum, so the
+ * first complaint will happen after (1+2+3+4+5)=15 seconds.
+ */
+#define	MAX_RESEND_DELAY 5	/* seconds */
+
+/*
+ * Call portmap to lookup a port number for a particular rpc program
+ * Returns non-zero error on failure.
+ */
+int
+krpc_portmap(struct sockaddr_in *sin, u_int prog, u_int vers, u_int16_t *portp,
+    struct thread *td)
+{
+	struct sdata {
+		u_int32_t prog;		/* call program */
+		u_int32_t vers;		/* call version */
+		u_int32_t proto;	/* call protocol */
+		u_int32_t port;		/* call port (unused) */
+	} *sdata;
+	struct rdata {
+		u_int16_t pad;
+		u_int16_t port;
+	} *rdata;
+	struct mbuf *m;
+	int error;
+
+	/* The portmapper port is fixed. */
+	if (prog == PMAPPROG) {
+		*portp = htons(PMAPPORT);
+		return 0;
+	}
+
+	m = m_get(M_WAITOK, MT_DATA);
+	sdata = mtod(m, struct sdata *);
+	m->m_len = sizeof(*sdata);
+
+	/* Do the RPC to get it. */
+	sdata->prog = txdr_unsigned(prog);
+	sdata->vers = txdr_unsigned(vers);
+	sdata->proto = txdr_unsigned(IPPROTO_UDP);
+	sdata->port = 0;
+
+	sin->sin_port = htons(PMAPPORT);
+	error = krpc_call(sin, PMAPPROG, PMAPVERS,
+					  PMAPPROC_GETPORT, &m, NULL, td);
+	if (error)
+		return error;
+
+	if (m->m_len < sizeof(*rdata)) {
+		m = m_pullup(m, sizeof(*rdata));
+		if (m == NULL)
+			return ENOBUFS;
+	}
+	rdata = mtod(m, struct rdata *);
+	*portp = rdata->port;
+
+	m_freem(m);
+	return 0;
+}
+
+/*
+ * Do a remote procedure call (RPC) and wait for its reply.
+ * If from_p is non-null, then we are doing broadcast, and
+ * the address from whence the response came is saved there.
+ */
+int
+krpc_call(struct sockaddr_in *sa, u_int prog, u_int vers, u_int func,
+    struct mbuf **data, struct sockaddr **from_p, struct thread *td)
+{
+	struct socket *so;
+	struct sockaddr_in *sin, ssin;
+	struct sockaddr *from;
+	struct mbuf *m, *nam, *mhead;
+	struct krpc_call *call;
+	struct krpc_reply *reply;
+	struct sockopt sopt;
+	struct timeval tv;
+	struct uio auio;
+	int error, rcvflg, timo, secs, len;
+	static u_int32_t xid = ~0xFF;
+	u_int16_t tport;
+	u_int32_t saddr;
+
+	/*
+	 * Validate address family.
+	 * Sorry, this is INET specific...
+	 */
+	if (sa->sin_family != AF_INET)
+		return (EAFNOSUPPORT);
+
+	/* Free at end if not null. */
+	nam = mhead = NULL;
+	from = NULL;
+
+	/*
+	 * Create socket and set its receive timeout.
+	 */
+	if ((error = socreate(AF_INET, &so, SOCK_DGRAM, 0, td->td_ucred, td)))
+		return error;
+
+	tv.tv_sec = 1;
+	tv.tv_usec = 0;
+	bzero(&sopt, sizeof sopt);
+	sopt.sopt_dir = SOPT_SET;
+	sopt.sopt_level = SOL_SOCKET;
+	sopt.sopt_name = SO_RCVTIMEO;
+	sopt.sopt_val = &tv;
+	sopt.sopt_valsize = sizeof tv;
+
+	if ((error = sosetopt(so, &sopt)) != 0)
+		goto out;
+
+	/*
+	 * Enable broadcast if necessary.
+	 */
+	if (from_p) {
+		int on = 1;
+		sopt.sopt_name = SO_BROADCAST;
+		sopt.sopt_val = &on;
+		sopt.sopt_valsize = sizeof on;
+		if ((error = sosetopt(so, &sopt)) != 0)
+			goto out;
+	}
+
+	/*
+	 * Bind the local endpoint to a reserved port,
+	 * because some NFS servers refuse requests from
+	 * non-reserved (non-privileged) ports.
+	 */
+	sin = &ssin;
+	bzero(sin, sizeof *sin);
+	sin->sin_len = sizeof(*sin);
+	sin->sin_family = AF_INET;
+	sin->sin_addr.s_addr = INADDR_ANY;
+	tport = IPPORT_RESERVED;
+	do {
+		tport--;
+		sin->sin_port = htons(tport);
+		error = sobind(so, (struct sockaddr *)sin, td);
+	} while (error == EADDRINUSE &&
+			 tport > IPPORT_RESERVED / 2);
+	if (error) {
+		printf("bind failed\n");
+		goto out;
+	}
+
+	/*
+	 * Setup socket address for the server.
+	 */
+
+	/*
+	 * Prepend RPC message header.
+	 */
+	mhead = m_gethdr(M_WAITOK, MT_DATA);
+	mhead->m_next = *data;
+	call = mtod(mhead, struct krpc_call *);
+	mhead->m_len = sizeof(*call);
+	bzero((caddr_t)call, sizeof(*call));
+	/* rpc_call part */
+	xid++;
+	call->rp_xid = txdr_unsigned(xid);
+	/* call->rp_direction = 0; */
+	call->rp_rpcvers = txdr_unsigned(2);
+	call->rp_prog = txdr_unsigned(prog);
+	call->rp_vers = txdr_unsigned(vers);
+	call->rp_proc = txdr_unsigned(func);
+	/* rpc_auth part (auth_unix as root) */
+	call->rpc_auth.authtype = txdr_unsigned(AUTH_UNIX);
+	call->rpc_auth.authlen  = txdr_unsigned(sizeof(struct auth_unix));
+	/* rpc_verf part (auth_null) */
+	call->rpc_verf.authtype = 0;
+	call->rpc_verf.authlen  = 0;
+
+	/*
+	 * Setup packet header
+	 */
+	m_fixhdr(mhead);
+	mhead->m_pkthdr.rcvif = NULL;
+
+	/*
+	 * Send it, repeatedly, until a reply is received,
+	 * but delay each re-send by an increasing amount.
+	 * If the delay hits the maximum, start complaining.
+	 */
+	timo = 0;
+	for (;;) {
+		/* Send RPC request (or re-send). */
+		m = m_copym(mhead, 0, M_COPYALL, M_WAITOK);
+		error = sosend(so, (struct sockaddr *)sa, NULL, m,
+			       NULL, 0, td);
+		if (error) {
+			printf("krpc_call: sosend: %d\n", error);
+			goto out;
+		}
+		m = NULL;
+
+		/* Determine new timeout. */
+		if (timo < MAX_RESEND_DELAY)
+			timo++;
+		else {
+			saddr = ntohl(sa->sin_addr.s_addr);
+			printf("RPC timeout for server %d.%d.%d.%d\n",
+			       (saddr >> 24) & 255,
+			       (saddr >> 16) & 255,
+			       (saddr >> 8) & 255,
+			       saddr & 255);
+		}
+
+		/*
+		 * Wait for up to timo seconds for a reply.
+		 * The socket receive timeout was set to 1 second.
+		 */
+		secs = timo;
+		while (secs > 0) {
+			if (from) {
+				free(from, M_SONAME);
+				from = NULL;
+			}
+			if (m) {
+				m_freem(m);
+				m = NULL;
+			}
+			bzero(&auio, sizeof(auio));
+			auio.uio_resid = len = 1<<16;
+			rcvflg = 0;
+			error = soreceive(so, &from, &auio, &m, NULL, &rcvflg);
+			if (error == EWOULDBLOCK) {
+				secs--;
+				continue;
+			}
+			if (error)
+				goto out;
+			len -= auio.uio_resid;
+
+			/* Does the reply contain at least a header? */
+			if (len < MIN_REPLY_HDR)
+				continue;
+			if (m->m_len < MIN_REPLY_HDR)
+				continue;
+			reply = mtod(m, struct krpc_reply *);
+
+			/* Is it the right reply? */
+			if (reply->rp_direction != txdr_unsigned(REPLY))
+				continue;
+
+			if (reply->rp_xid != txdr_unsigned(xid))
+				continue;
+
+			/* Was RPC accepted? (authorization OK) */
+			if (reply->rp_astatus != 0) {
+				error = fxdr_unsigned(u_int32_t, reply->rp_errno);
+				printf("rpc denied, error=%d\n", error);
+				continue;
+			}
+
+			/* Did the call succeed? */
+			if (reply->rp_status != 0) {
+				error = fxdr_unsigned(u_int32_t, reply->rp_status);
+				if (error == PROG_MISMATCH) {
+				  error = EBADRPC;
+				  goto out;
+				}
+				printf("rpc denied, status=%d\n", error);
+				continue;
+			}
+
+			goto gotreply;	/* break two levels */
+
+		} /* while secs */
+	} /* forever send/receive */
+
+	error = ETIMEDOUT;
+	goto out;
+
+ gotreply:
+
+	/*
+	 * Get RPC reply header into first mbuf,
+	 * get its length, then strip it off.
+	 */
+	len = sizeof(*reply);
+	if (m->m_len < len) {
+		m = m_pullup(m, len);
+		if (m == NULL) {
+			error = ENOBUFS;
+			goto out;
+		}
+	}
+	reply = mtod(m, struct krpc_reply *);
+	if (reply->rp_auth.authtype != 0) {
+		len += fxdr_unsigned(u_int32_t, reply->rp_auth.authlen);
+		len = (len + 3) & ~3; /* XXX? */
+	}
+	m_adj(m, len);
+
+	/* result */
+	*data = m;
+	if (from_p) {
+		*from_p = from;
+		from = NULL;
+	}
+
+ out:
+	if (mhead) m_freem(mhead);
+	if (from) free(from, M_SONAME);
+	soclose(so);
+	return error;
+}
+
+/*
+ * eXternal Data Representation routines.
+ * (but with non-standard args...)
+ */
+
+/*
+ * String representation for RPC.
+ */
+struct xdr_string {
+	u_int32_t len;		/* length without null or padding */
+	char data[4];	/* data (longer, of course) */
+    /* data is padded to a long-word boundary */
+};
+
+struct mbuf *
+xdr_string_encode(char *str, int len)
+{
+	struct mbuf *m;
+	struct xdr_string *xs;
+	int dlen;	/* padded string length */
+	int mlen;	/* message length */
+
+	dlen = (len + 3) & ~3;
+	mlen = dlen + 4;
+
+	if (mlen > MCLBYTES)		/* If too big, we just can't do it. */
+		return (NULL);
+
+	m = m_get2(mlen, M_WAITOK, MT_DATA, 0);
+	xs = mtod(m, struct xdr_string *);
+	m->m_len = mlen;
+	xs->len = txdr_unsigned(len);
+	bcopy(str, xs->data, len);
+	return (m);
+}
diff --git a/freebsd/sys/nfs/nfs_common.h b/freebsd/sys/nfs/nfs_common.h
new file mode 100644
index 0000000..a821934
--- /dev/null
+++ b/freebsd/sys/nfs/nfs_common.h
@@ -0,0 +1,137 @@
+/*-
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 1989, 1993
+ *	The Regents of the University of California.  All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Rick Macklem at The University of Guelph.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ *	@(#)nfsm_subs.h	8.2 (Berkeley) 3/30/95
+ * $FreeBSD$
+ */
+
+#ifndef _NFS_NFS_COMMON_H_
+#define _NFS_NFS_COMMON_H_
+
+extern enum vtype nv3tov_type[];
+extern nfstype nfsv3_type[];
+
+#define	vtonfsv2_mode(t, m) \
+    txdr_unsigned(((t) == VFIFO) ? MAKEIMODE(VCHR, (m)) : MAKEIMODE((t), (m)))
+
+#define	nfsv3tov_type(a)	nv3tov_type[fxdr_unsigned(u_int32_t,(a))&0x7]
+#define	vtonfsv3_type(a)	txdr_unsigned(nfsv3_type[((int32_t)(a))])
+
+int	nfs_adv(struct mbuf **, caddr_t *, int, int);
+void	*nfsm_disct(struct mbuf **, caddr_t *, int, int, int);
+int	nfs_realign(struct mbuf **, int);
+
+/* ****************************** */
+/* Build request/reply phase macros */
+
+void	*nfsm_build_xx(int s, struct mbuf **mb, caddr_t *bpos);
+
+#define	nfsm_build(c, s) \
+	(c)nfsm_build_xx((s), &mb, &bpos)
+
+/* ****************************** */
+/* Interpretation phase macros */
+
+void	*nfsm_dissect_xx(int s, struct mbuf **md, caddr_t *dpos);
+void	*nfsm_dissect_xx_nonblock(int s, struct mbuf **md, caddr_t *dpos);
+int	nfsm_strsiz_xx(int *s, int m, struct mbuf **md, caddr_t *dpos);
+int	nfsm_adv_xx(int s, struct mbuf **md, caddr_t *dpos);
+
+/* Error check helpers */
+#define nfsm_dcheck(t1, mrep) \
+do { \
+	if (t1 != 0) { \
+		error = t1; \
+		m_freem((mrep)); \
+		(mrep) = NULL; \
+		goto nfsmout; \
+	} \
+} while (0)
+
+#define nfsm_dcheckp(retp, mrep) \
+do { \
+	if (retp == NULL) { \
+		error = EBADRPC; \
+		m_freem((mrep)); \
+		(mrep) = NULL; \
+		goto nfsmout; \
+	} \
+} while (0)
+
+#define	nfsm_dissect(c, s) \
+({ \
+	void *ret; \
+	ret = nfsm_dissect_xx((s), &md, &dpos); \
+	nfsm_dcheckp(ret, mrep); \
+	(c)ret; \
+})
+
+#define	nfsm_dissect_nonblock(c, s) \
+({ \
+	void *ret; \
+	ret = nfsm_dissect_xx_nonblock((s), &md, &dpos); \
+	nfsm_dcheckp(ret, mrep); \
+	(c)ret; \
+})
+
+#define	nfsm_strsiz(s,m) \
+do { \
+	int t1; \
+	t1 = nfsm_strsiz_xx(&(s), (m), &md, &dpos); \
+	nfsm_dcheck(t1, mrep); \
+} while(0)
+
+#define nfsm_mtouio(p,s) \
+do {\
+	int32_t t1 = 0; \
+	if ((s) > 0) \
+		t1 = nfsm_mbuftouio(&md, (p), (s), &dpos); \
+	nfsm_dcheck(t1, mrep); \
+} while (0)
+
+#define nfsm_rndup(a)	(((a)+3)&(~0x3))
+
+#define	nfsm_adv(s) \
+do { \
+	int t1; \
+	t1 = nfsm_adv_xx((s), &md, &dpos); \
+	nfsm_dcheck(t1, mrep); \
+} while (0)
+
+#ifdef __NO_STRICT_ALIGNMENT
+#define nfsm_aligned(p, t)	1
+#else
+#define nfsm_aligned(p, t)	((((u_long)(p)) & (sizeof(t) - 1)) == 0)
+#endif
+
+#endif
diff --git a/freebsd/sys/nfs/nfs_fha.c b/freebsd/sys/nfs/nfs_fha.c
new file mode 100644
index 0000000..8c5c144
--- /dev/null
+++ b/freebsd/sys/nfs/nfs_fha.c
@@ -0,0 +1,527 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 2008 Isilon Inc http://www.isilon.com/
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/sysproto.h>
+#include <sys/kernel.h>
+#include <sys/sysctl.h>
+#include <sys/vnode.h>
+#include <sys/malloc.h>
+#include <sys/mount.h>
+#include <sys/mbuf.h>
+#include <sys/sbuf.h>
+
+#include <rpc/rpc.h>
+#include <nfs/nfs_fha.h>
+
+static MALLOC_DEFINE(M_NFS_FHA, "NFS FHA", "NFS FHA");
+
+/*
+ * XXX need to commonize definitions between old and new NFS code.  Define
+ * this here so we don't include one nfsproto.h over the other.
+ */
+#define	NFS_PROG		100003
+
+void
+fha_init(struct fha_params *softc)
+{
+	int i;
+
+	for (i = 0; i < FHA_HASH_SIZE; i++)
+		mtx_init(&softc->fha_hash[i].mtx, "fhalock", NULL, MTX_DEF);
+
+	/*
+	 * Set the default tuning parameters.
+	 */
+	softc->ctls.enable = FHA_DEF_ENABLE;
+	softc->ctls.read = FHA_DEF_READ;
+	softc->ctls.write = FHA_DEF_WRITE;
+	softc->ctls.bin_shift = FHA_DEF_BIN_SHIFT;
+	softc->ctls.max_nfsds_per_fh = FHA_DEF_MAX_NFSDS_PER_FH;
+	softc->ctls.max_reqs_per_nfsd = FHA_DEF_MAX_REQS_PER_NFSD;
+
+	/*
+	 * Add sysctls so the user can change the tuning parameters.
+	 */
+	SYSCTL_ADD_UINT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
+	    OID_AUTO, "enable", CTLFLAG_RWTUN,
+	    &softc->ctls.enable, 0, "Enable NFS File Handle Affinity (FHA)");
+
+	SYSCTL_ADD_UINT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
+	    OID_AUTO, "read", CTLFLAG_RWTUN,
+	    &softc->ctls.read, 0, "Enable NFS FHA read locality");
+
+	SYSCTL_ADD_UINT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
+	    OID_AUTO, "write", CTLFLAG_RWTUN,
+	    &softc->ctls.write, 0, "Enable NFS FHA write locality");
+
+	SYSCTL_ADD_UINT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
+	    OID_AUTO, "bin_shift", CTLFLAG_RWTUN,
+	    &softc->ctls.bin_shift, 0, "Maximum locality distance 2^(bin_shift) bytes");
+
+	SYSCTL_ADD_UINT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
+	    OID_AUTO, "max_nfsds_per_fh", CTLFLAG_RWTUN,
+	    &softc->ctls.max_nfsds_per_fh, 0, "Maximum nfsd threads that "
+	    "should be working on requests for the same file handle");
+
+	SYSCTL_ADD_UINT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
+	    OID_AUTO, "max_reqs_per_nfsd", CTLFLAG_RWTUN,
+	    &softc->ctls.max_reqs_per_nfsd, 0, "Maximum requests that "
+	    "single nfsd thread should be working on at any time");
+
+	SYSCTL_ADD_OID(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
+	    OID_AUTO, "fhe_stats", CTLTYPE_STRING | CTLFLAG_RD, 0, 0,
+	    softc->callbacks.fhe_stats_sysctl, "A", "");
+
+}
+
+void
+fha_uninit(struct fha_params *softc)
+{
+	int i;
+
+	sysctl_ctx_free(&softc->sysctl_ctx);
+	for (i = 0; i < FHA_HASH_SIZE; i++)
+		mtx_destroy(&softc->fha_hash[i].mtx);
+}
+
+/*
+ * This just specifies that offsets should obey affinity when within
+ * the same 1Mbyte (1<<20) chunk for the file (reads only for now).
+ */
+static void
+fha_extract_info(struct svc_req *req, struct fha_info *i,
+    struct fha_callbacks *cb)
+{
+	struct mbuf *md;
+	caddr_t dpos;
+	static u_int64_t random_fh = 0;
+	int error;
+	int v3 = (req->rq_vers == 3);
+	rpcproc_t procnum;
+
+	/*
+	 * We start off with a random fh.  If we get a reasonable
+	 * procnum, we set the fh.  If there's a concept of offset
+	 * that we're interested in, we set that.
+	 */
+	i->fh = ++random_fh;
+	i->offset = 0;
+	i->locktype = LK_EXCLUSIVE;
+	i->read = i->write = 0;
+
+	/*
+	 * Extract the procnum and convert to v3 form if necessary,
+	 * taking care to deal with out-of-range procnums.  Caller will
+	 * ensure that rq_vers is either 2 or 3.
+	 */
+	procnum = req->rq_proc;
+	if (!v3) {
+		rpcproc_t tmp_procnum;
+
+		tmp_procnum = cb->get_procnum(procnum);
+		if (tmp_procnum == -1)
+			goto out;
+		procnum = tmp_procnum;
+	}
+
+	/*
+	 * We do affinity for most.  However, we divide a realm of affinity
+	 * by file offset so as to allow for concurrent random access.  We
+	 * only do this for reads today, but this may change when IFS supports
+	 * efficient concurrent writes.
+	 */
+	if (cb->no_offset(procnum))
+		goto out;
+
+	i->read = cb->is_read(procnum);
+	i->write = cb->is_write(procnum);
+
+	error = cb->realign(&req->rq_args, M_NOWAIT);
+	if (error)
+		goto out;
+	md = req->rq_args;
+	dpos = mtod(md, caddr_t);
+
+	/* Grab the filehandle. */
+	error = cb->get_fh(&i->fh, v3, &md, &dpos);
+	if (error)
+		goto out;
+
+	/* Content ourselves with zero offset for all but reads. */
+	if (i->read || i->write)
+		cb->get_offset(&md, &dpos, v3, i);
+
+out:
+	cb->set_locktype(procnum, i);
+}
+
+static struct fha_hash_entry *
+fha_hash_entry_new(u_int64_t fh)
+{
+	struct fha_hash_entry *e;
+
+	e = malloc(sizeof(*e), M_NFS_FHA, M_WAITOK);
+	e->fh = fh;
+	e->num_rw = 0;
+	e->num_exclusive = 0;
+	e->num_threads = 0;
+	LIST_INIT(&e->threads);
+
+	return (e);
+}
+
+static void
+fha_hash_entry_destroy(struct fha_hash_entry *e)
+{
+
+	mtx_assert(e->mtx, MA_OWNED);
+	KASSERT(e->num_rw == 0,
+	    ("%d reqs on destroyed fhe %p", e->num_rw, e));
+	KASSERT(e->num_exclusive == 0,
+	    ("%d exclusive reqs on destroyed fhe %p", e->num_exclusive, e));
+	KASSERT(e->num_threads == 0,
+	    ("%d threads on destroyed fhe %p", e->num_threads, e));
+	free(e, M_NFS_FHA);
+}
+
+static void
+fha_hash_entry_remove(struct fha_hash_entry *e)
+{
+
+	mtx_assert(e->mtx, MA_OWNED);
+	LIST_REMOVE(e, link);
+	fha_hash_entry_destroy(e);
+}
+
+static struct fha_hash_entry *
+fha_hash_entry_lookup(struct fha_params *softc, u_int64_t fh)
+{
+	struct fha_hash_slot *fhs;
+	struct fha_hash_entry *fhe, *new_fhe;
+
+	fhs = &softc->fha_hash[fh % FHA_HASH_SIZE];
+	new_fhe = fha_hash_entry_new(fh);
+	new_fhe->mtx = &fhs->mtx;
+	mtx_lock(&fhs->mtx);
+	LIST_FOREACH(fhe, &fhs->list, link)
+		if (fhe->fh == fh)
+			break;
+	if (!fhe) {
+		fhe = new_fhe;
+		LIST_INSERT_HEAD(&fhs->list, fhe, link);
+	} else
+		fha_hash_entry_destroy(new_fhe);
+	return (fhe);
+}
+
+static void
+fha_hash_entry_add_thread(struct fha_hash_entry *fhe, SVCTHREAD *thread)
+{
+
+	mtx_assert(fhe->mtx, MA_OWNED);
+	thread->st_p2 = 0;
+	LIST_INSERT_HEAD(&fhe->threads, thread, st_alink);
+	fhe->num_threads++;
+}
+
+static void
+fha_hash_entry_remove_thread(struct fha_hash_entry *fhe, SVCTHREAD *thread)
+{
+
+	mtx_assert(fhe->mtx, MA_OWNED);
+	KASSERT(thread->st_p2 == 0,
+	    ("%d reqs on removed thread %p", thread->st_p2, thread));
+	LIST_REMOVE(thread, st_alink);
+	fhe->num_threads--;
+}
+
+/*
+ * Account for an ongoing operation associated with this file.
+ */
+static void
+fha_hash_entry_add_op(struct fha_hash_entry *fhe, int locktype, int count)
+{
+
+	mtx_assert(fhe->mtx, MA_OWNED);
+	if (LK_EXCLUSIVE == locktype)
+		fhe->num_exclusive += count;
+	else
+		fhe->num_rw += count;
+}
+
+/*
+ * Get the service thread currently associated with the fhe that is
+ * appropriate to handle this operation.
+ */
+static SVCTHREAD *
+fha_hash_entry_choose_thread(struct fha_params *softc,
+    struct fha_hash_entry *fhe, struct fha_info *i, SVCTHREAD *this_thread)
+{
+	SVCTHREAD *thread, *min_thread = NULL;
+	int req_count, min_count = 0;
+	off_t offset1, offset2;
+
+	LIST_FOREACH(thread, &fhe->threads, st_alink) {
+		req_count = thread->st_p2;
+
+		/* If there are any writes in progress, use the first thread. */
+		if (fhe->num_exclusive) {
+#if 0
+			ITRACE_CURPROC(ITRACE_NFS, ITRACE_INFO,
+			    "fha: %p(%d)w", thread, req_count);
+#endif
+			return (thread);
+		}
+
+		/* Check whether we should consider locality. */
+		if ((i->read && !softc->ctls.read) ||
+		    (i->write && !softc->ctls.write))
+			goto noloc;
+
+		/*
+		 * Check for locality, making sure that we won't
+		 * exceed our per-thread load limit in the process.
+		 */
+		offset1 = i->offset;
+		offset2 = thread->st_p3;
+
+		if (((offset1 >= offset2)
+		  && ((offset1 - offset2) < (1 << softc->ctls.bin_shift)))
+		 || ((offset2 > offset1)
+		  && ((offset2 - offset1) < (1 << softc->ctls.bin_shift)))) {
+			if ((softc->ctls.max_reqs_per_nfsd == 0) ||
+			    (req_count < softc->ctls.max_reqs_per_nfsd)) {
+#if 0
+				ITRACE_CURPROC(ITRACE_NFS, ITRACE_INFO,
+				    "fha: %p(%d)r", thread, req_count);
+#endif
+				return (thread);
+			}
+		}
+
+noloc:
+		/*
+		 * We don't have a locality match, so skip this thread,
+		 * but keep track of the most attractive thread in case
+		 * we need to come back to it later.
+		 */
+#if 0
+		ITRACE_CURPROC(ITRACE_NFS, ITRACE_INFO,
+		    "fha: %p(%d)s off1 %llu off2 %llu", thread,
+		    req_count, offset1, offset2);
+#endif
+		if ((min_thread == NULL) || (req_count < min_count)) {
+			min_count = req_count;
+			min_thread = thread;
+		}
+	}
+
+	/*
+	 * We didn't find a good match yet.  See if we can add
+	 * a new thread to this file handle entry's thread list.
+	 */
+	if ((softc->ctls.max_nfsds_per_fh == 0) ||
+	    (fhe->num_threads < softc->ctls.max_nfsds_per_fh)) {
+		thread = this_thread;
+#if 0
+		ITRACE_CURPROC(ITRACE_NFS, ITRACE_INFO,
+		    "fha: %p(%d)t", thread, thread->st_p2);
+#endif
+		fha_hash_entry_add_thread(fhe, thread);
+	} else {
+		/*
+		 * We don't want to use any more threads for this file, so
+		 * go back to the most attractive nfsd we're already using.
+		 */
+		thread = min_thread;
+	}
+
+	return (thread);
+}
+
+/*
+ * After getting a request, try to assign it to some thread.  Usually we
+ * handle it ourselves.
+ */
+SVCTHREAD *
+fha_assign(SVCTHREAD *this_thread, struct svc_req *req,
+    struct fha_params *softc)
+{
+	SVCTHREAD *thread;
+	struct fha_info i;
+	struct fha_hash_entry *fhe;
+	struct fha_callbacks *cb;
+
+	cb = &softc->callbacks;
+
+	/* Check to see whether we're enabled. */
+	if (softc->ctls.enable == 0)
+		goto thist;
+
+	/*
+	 * Only do placement if this is an NFS request.
+	 */
+	if (req->rq_prog != NFS_PROG)
+		goto thist;
+
+	if (req->rq_vers != 2 && req->rq_vers != 3)
+		goto thist;
+
+	fha_extract_info(req, &i, cb);
+
+	/*
+	 * We save the offset associated with this request for later
+	 * nfsd matching.
+	 */
+	fhe = fha_hash_entry_lookup(softc, i.fh);
+	req->rq_p1 = fhe;
+	req->rq_p2 = i.locktype;
+	req->rq_p3 = i.offset;
+
+	/*
+	 * Choose a thread, taking into consideration locality, thread load,
+	 * and the number of threads already working on this file.
+	 */
+	thread = fha_hash_entry_choose_thread(softc, fhe, &i, this_thread);
+	KASSERT(thread, ("fha_assign: NULL thread!"));
+	fha_hash_entry_add_op(fhe, i.locktype, 1);
+	thread->st_p2++;
+	thread->st_p3 = i.offset;
+
+	/*
+	 * Grab the pool lock here to not let chosen thread go away before
+	 * the new request inserted to its queue while we drop fhe lock.
+	 */
+	mtx_lock(&thread->st_lock);
+	mtx_unlock(fhe->mtx);
+
+	return (thread);
+thist:
+	req->rq_p1 = NULL;
+	mtx_lock(&this_thread->st_lock);
+	return (this_thread);
+}
+
+/*
+ * Called when we're done with an operation.  The request has already
+ * been de-queued.
+ */
+void
+fha_nd_complete(SVCTHREAD *thread, struct svc_req *req)
+{
+	struct fha_hash_entry *fhe = req->rq_p1;
+	struct mtx *mtx;
+
+	/*
+	 * This may be called for reqs that didn't go through
+	 * fha_assign (e.g. extra NULL ops used for RPCSEC_GSS.
+	 */
+	if (!fhe)
+		return;
+
+	mtx = fhe->mtx;
+	mtx_lock(mtx);
+	fha_hash_entry_add_op(fhe, req->rq_p2, -1);
+	thread->st_p2--;
+	KASSERT(thread->st_p2 >= 0, ("Negative request count %d on %p",
+	    thread->st_p2, thread));
+	if (thread->st_p2 == 0) {
+		fha_hash_entry_remove_thread(fhe, thread);
+		if (0 == fhe->num_rw + fhe->num_exclusive)
+			fha_hash_entry_remove(fhe);
+	}
+	mtx_unlock(mtx);
+}
+
+int
+fhe_stats_sysctl(SYSCTL_HANDLER_ARGS, struct fha_params *softc)
+{
+	int error, i;
+	struct sbuf sb;
+	struct fha_hash_entry *fhe;
+	bool_t first, hfirst;
+	SVCTHREAD *thread;
+
+	sbuf_new(&sb, NULL, 65536, SBUF_FIXEDLEN);
+
+	if (!*softc->pool) {
+		sbuf_printf(&sb, "NFSD not running\n");
+		goto out;
+	}
+
+	for (i = 0; i < FHA_HASH_SIZE; i++)
+		if (!LIST_EMPTY(&softc->fha_hash[i].list))
+			break;
+
+	if (i == FHA_HASH_SIZE) {
+		sbuf_printf(&sb, "No file handle entries.\n");
+		goto out;
+	}
+
+	hfirst = TRUE;
+	for (; i < FHA_HASH_SIZE; i++) {
+		mtx_lock(&softc->fha_hash[i].mtx);
+		if (LIST_EMPTY(&softc->fha_hash[i].list)) {
+			mtx_unlock(&softc->fha_hash[i].mtx);
+			continue;
+		}
+		sbuf_printf(&sb, "%shash %d: {\n", hfirst ? "" : ", ", i);
+		first = TRUE;
+		LIST_FOREACH(fhe, &softc->fha_hash[i].list, link) {
+			sbuf_printf(&sb, "%sfhe %p: {\n", first ? "  " : ", ", fhe);
+
+			sbuf_printf(&sb, "    fh: %ju\n", (uintmax_t) fhe->fh);
+			sbuf_printf(&sb, "    num_rw/exclusive: %d/%d\n",
+			    fhe->num_rw, fhe->num_exclusive);
+			sbuf_printf(&sb, "    num_threads: %d\n", fhe->num_threads);
+
+			LIST_FOREACH(thread, &fhe->threads, st_alink) {
+				sbuf_printf(&sb, "      thread %p offset %ju "
+				    "reqs %d\n", thread,
+				    thread->st_p3, thread->st_p2);
+			}
+
+			sbuf_printf(&sb, "  }");
+			first = FALSE;
+		}
+		sbuf_printf(&sb, "\n}");
+		mtx_unlock(&softc->fha_hash[i].mtx);
+		hfirst = FALSE;
+	}
+
+ out:
+	sbuf_trim(&sb);
+	sbuf_finish(&sb);
+	error = sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req);
+	sbuf_delete(&sb);
+	return (error);
+}
diff --git a/freebsd/sys/nfs/nfs_fha.h b/freebsd/sys/nfs/nfs_fha.h
new file mode 100644
index 0000000..230d13b
--- /dev/null
+++ b/freebsd/sys/nfs/nfs_fha.h
@@ -0,0 +1,122 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 2008 Isilon Inc http://www.isilon.com/
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+/* $FreeBSD$ */
+
+#ifndef	_NFS_FHA_H
+#define	_NFS_FHA_H 1
+
+#ifdef	_KERNEL
+
+/* Sysctl defaults. */
+#define FHA_DEF_ENABLE			1
+#define FHA_DEF_READ			1
+#define FHA_DEF_WRITE			1
+#define FHA_DEF_BIN_SHIFT		22 /* 4MB */
+#define FHA_DEF_MAX_NFSDS_PER_FH	8
+#define FHA_DEF_MAX_REQS_PER_NFSD	0  /* Unlimited */
+
+#define FHA_HASH_SIZE	251
+
+struct fha_ctls {
+	int	 enable;
+	int	 read;
+	int	 write;
+	uint32_t bin_shift;
+	uint32_t max_nfsds_per_fh;
+	uint32_t max_reqs_per_nfsd;
+};
+
+/*
+ * These are the entries in the filehandle hash.  They talk about a specific
+ * file, requests against which are being handled by one or more nfsds.  We
+ * keep a chain of nfsds against the file. We only have more than one if reads
+ * are ongoing, and then only if the reads affect disparate regions of the
+ * file.
+ *
+ * In general, we want to assign a new request to an existing nfsd if it is
+ * going to contend with work happening already on that nfsd, or if the
+ * operation is a read and the nfsd is already handling a proximate read.  We
+ * do this to avoid jumping around in the read stream unnecessarily, and to
+ * avoid contention between threads over single files.
+ */
+struct fha_hash_entry {
+	struct mtx *mtx;
+	LIST_ENTRY(fha_hash_entry) link;
+	u_int64_t fh;
+	u_int32_t num_rw;
+	u_int32_t num_exclusive;
+	u_int8_t num_threads;
+	struct svcthread_list threads;
+};
+
+LIST_HEAD(fha_hash_entry_list, fha_hash_entry);
+
+struct fha_hash_slot {
+	struct fha_hash_entry_list list;
+	struct mtx mtx;
+};
+
+/* A structure used for passing around data internally. */
+struct fha_info {
+	u_int64_t fh;
+	off_t offset;
+	int locktype;
+	int read;
+	int write;
+};
+
+struct fha_callbacks {
+	rpcproc_t (*get_procnum)(rpcproc_t procnum);
+	int (*realign)(struct mbuf **mb, int malloc_flags);
+	int (*get_fh)(uint64_t *fh, int v3, struct mbuf **md, caddr_t *dpos);
+	int (*is_read)(rpcproc_t procnum);
+	int (*is_write)(rpcproc_t procnum);
+	int (*get_offset)(struct mbuf **md, caddr_t *dpos, int v3, struct
+			  fha_info *info);
+	int (*no_offset)(rpcproc_t procnum);
+	void (*set_locktype)(rpcproc_t procnum, struct fha_info *info);
+	int (*fhe_stats_sysctl)(SYSCTL_HANDLER_ARGS);
+};
+
+struct fha_params {
+	struct fha_hash_slot fha_hash[FHA_HASH_SIZE];
+	struct sysctl_ctx_list sysctl_ctx;
+	struct sysctl_oid *sysctl_tree;
+	struct fha_ctls ctls;
+	struct fha_callbacks callbacks;
+	char server_name[32];
+	SVCPOOL **pool;
+};
+
+void fha_nd_complete(SVCTHREAD *, struct svc_req *);
+SVCTHREAD *fha_assign(SVCTHREAD *, struct svc_req *, struct fha_params *);
+void fha_init(struct fha_params *softc);
+void fha_uninit(struct fha_params *softc);
+int fhe_stats_sysctl(SYSCTL_HANDLER_ARGS, struct fha_params *softc);
+
+#endif /* _KERNEL */
+#endif /* _NFS_FHA_H_ */
diff --git a/freebsd/sys/nfs/nfs_kdtrace.h b/freebsd/sys/nfs/nfs_kdtrace.h
new file mode 100644
index 0000000..13d68aa
--- /dev/null
+++ b/freebsd/sys/nfs/nfs_kdtrace.h
@@ -0,0 +1,122 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 2009 Robert N. M. Watson
+ * All rights reserved.
+ *
+ * This software was developed at the University of Cambridge Computer
+ * Laboratory with support from a grant from Google, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _NFSCLIENT_NFS_KDTRACE_H_
+#define	_NFSCLIENT_NFS_KDTRACE_H_
+
+#ifdef KDTRACE_HOOKS
+#include <sys/dtrace_bsd.h>
+
+/*
+ * Definitions for NFS access cache probes.
+ */
+extern uint32_t nfsclient_accesscache_flush_done_id;
+extern uint32_t nfsclient_accesscache_get_hit_id;
+extern uint32_t nfsclient_accesscache_get_miss_id;
+extern uint32_t nfsclient_accesscache_load_done_id;
+
+#define	KDTRACE_NFS_ACCESSCACHE_FLUSH_DONE(vp)	do {			\
+	if (dtrace_nfsclient_accesscache_flush_done_probe != NULL)	\
+		(dtrace_nfsclient_accesscache_flush_done_probe)(	\
+		    nfsclient_accesscache_flush_done_id, (vp));		\
+} while (0)
+
+#define	KDTRACE_NFS_ACCESSCACHE_GET_HIT(vp, uid, mode)	do {		\
+	if (dtrace_nfsclient_accesscache_get_hit_probe != NULL)		\
+		(dtrace_nfsclient_accesscache_get_hit_probe)(		\
+		    nfsclient_accesscache_get_hit_id, (vp), (uid),	\
+		    (mode));						\
+} while (0)
+	
+#define	KDTRACE_NFS_ACCESSCACHE_GET_MISS(vp, uid, mode)	do {		\
+	if (dtrace_nfsclient_accesscache_get_miss_probe != NULL)	\
+		(dtrace_nfsclient_accesscache_get_miss_probe)(		\
+		    nfsclient_accesscache_get_miss_id, (vp), (uid),	\
+		    (mode));						\
+} while (0)
+
+#define	KDTRACE_NFS_ACCESSCACHE_LOAD_DONE(vp, uid, rmode, error) do {	\
+	if (dtrace_nfsclient_accesscache_load_done_probe != NULL)	\
+		(dtrace_nfsclient_accesscache_load_done_probe)(		\
+		    nfsclient_accesscache_load_done_id, (vp), (uid),	\
+		    (rmode), (error));					\
+} while (0)
+
+/*
+ * Definitions for NFS attribute cache probes.
+ */
+extern uint32_t nfsclient_attrcache_flush_done_id;
+extern uint32_t nfsclient_attrcache_get_hit_id;
+extern uint32_t nfsclient_attrcache_get_miss_id;
+extern uint32_t nfsclient_attrcache_load_done_id;
+
+#define	KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp)	do {			\
+	if (dtrace_nfsclient_attrcache_flush_done_probe != NULL)	\
+		(dtrace_nfsclient_attrcache_flush_done_probe)(		\
+		    nfsclient_attrcache_flush_done_id, (vp));		\
+} while (0)
+
+#define	KDTRACE_NFS_ATTRCACHE_GET_HIT(vp, vap)	do {			\
+	if (dtrace_nfsclient_attrcache_get_hit_probe != NULL)		\
+		(dtrace_nfsclient_attrcache_get_hit_probe)(		\
+		    nfsclient_attrcache_get_hit_id, (vp), (vap));	\
+} while (0)
+
+#define	KDTRACE_NFS_ATTRCACHE_GET_MISS(vp)	do {			\
+	if (dtrace_nfsclient_attrcache_get_miss_probe != NULL)		\
+		(dtrace_nfsclient_attrcache_get_miss_probe)(		\
+			    nfsclient_attrcache_get_miss_id, (vp));	\
+} while (0)
+
+#define	KDTRACE_NFS_ATTRCACHE_LOAD_DONE(vp, vap, error)	do {		\
+	if (dtrace_nfsclient_attrcache_load_done_probe != NULL)		\
+		(dtrace_nfsclient_attrcache_load_done_probe)(		\
+		    nfsclient_attrcache_load_done_id, (vp), (vap),	\
+		    (error));						\
+} while (0)
+
+#else /* !KDTRACE_HOOKS */
+
+#define	KDTRACE_NFS_ACCESSCACHE_FLUSH_DONE(vp)
+#define	KDTRACE_NFS_ACCESSCACHE_GET_HIT(vp, uid, mode)
+#define	KDTRACE_NFS_ACCESSCACHE_GET_MISS(vp, uid, mode)
+#define	KDTRACE_NFS_ACCESSCACHE_LOAD_DONE(vp, uid, rmode, error)
+
+#define	KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp)
+#define	KDTRACE_NFS_ATTRCACHE_GET_HIT(vp, vap)
+#define	KDTRACE_NFS_ATTRCACHE_GET_MISS(vp)
+#define	KDTRACE_NFS_ATTRCACHE_LOAD_DONE(vp, vap, error)
+
+#endif /* KDTRACE_HOOKS */
+
+#endif /* !_NFSCLIENT_NFS_KDTRACE_H_ */
diff --git a/freebsd/sys/nfs/nfs_lock.c b/freebsd/sys/nfs/nfs_lock.c
new file mode 100644
index 0000000..efa1e80
--- /dev/null
+++ b/freebsd/sys/nfs/nfs_lock.c
@@ -0,0 +1,403 @@
+/*-
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 1997 Berkeley Software Design, Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Berkeley Software Design Inc's name may not be used to endorse or
+ *    promote products derived from this software without specific prior
+ *    written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ *      from BSDI nfs_lock.c,v 2.4 1998/12/14 23:49:56 jch Exp
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/conf.h>
+#include <sys/fcntl.h>
+#include <sys/kernel.h>		/* for hz */
+#include <sys/limits.h>
+#include <sys/lock.h>
+#include <sys/malloc.h>
+#include <sys/lockf.h>		/* for hz */ /* Must come after sys/malloc.h */
+#include <sys/mbuf.h>
+#include <sys/mount.h>
+#include <sys/namei.h>
+#include <sys/priv.h>
+#include <sys/proc.h>
+#include <sys/resourcevar.h>
+#include <sys/socket.h>
+#include <sys/socket.h>
+#include <sys/unistd.h>
+#include <sys/vnode.h>
+
+#include <net/if.h>
+
+#include <nfs/nfsproto.h>
+#include <nfs/nfs_lock.h>
+#include <nfsclient/nfs.h>
+#include <nfsclient/nfsmount.h>
+#include <nfsclient/nfsnode.h>
+#include <nfsclient/nlminfo.h>
+
+extern void (*nlminfo_release_p)(struct proc *p);
+
+vop_advlock_t	*nfs_advlock_p = nfs_dolock;
+vop_reclaim_t	*nfs_reclaim_p = NULL;
+
+static MALLOC_DEFINE(M_NFSLOCK, "nfsclient_lock", "NFS lock request");
+static MALLOC_DEFINE(M_NLMINFO, "nfsclient_nlminfo",
+    "NFS lock process structure");
+
+static int nfslockdans(struct thread *td, struct lockd_ans *ansp);
+static void nlminfo_release(struct proc *p);
+/*
+ * --------------------------------------------------------------------
+ * A miniature device driver which the userland uses to talk to us.
+ *
+ */
+
+static struct cdev *nfslock_dev;
+static struct mtx nfslock_mtx;
+static int nfslock_isopen;
+static TAILQ_HEAD(,__lock_msg)	nfslock_list;
+
+static int
+nfslock_open(struct cdev *dev, int oflags, int devtype, struct thread *td)
+{
+	int error;
+
+	error = priv_check(td, PRIV_NFS_LOCKD);
+	if (error)
+		return (error);
+
+	mtx_lock(&nfslock_mtx);
+	if (!nfslock_isopen) {
+		error = 0;
+		nfslock_isopen = 1;
+	} else {
+		error = EOPNOTSUPP;
+	}
+	mtx_unlock(&nfslock_mtx);
+		
+	return (error);
+}
+
+static int
+nfslock_close(struct cdev *dev, int fflag, int devtype, struct thread *td)
+{
+	struct __lock_msg *lm;
+
+	mtx_lock(&nfslock_mtx);
+	nfslock_isopen = 0;
+	while (!TAILQ_EMPTY(&nfslock_list)) {
+		lm = TAILQ_FIRST(&nfslock_list);
+		/* XXX: answer request */
+		TAILQ_REMOVE(&nfslock_list, lm, lm_link);
+		free(lm, M_NFSLOCK);
+	}
+	mtx_unlock(&nfslock_mtx);
+	return (0);
+}
+
+static int
+nfslock_read(struct cdev *dev, struct uio *uio, int ioflag)
+{
+	int error;
+	struct __lock_msg *lm;
+
+	if (uio->uio_resid != sizeof *lm)
+		return (EOPNOTSUPP);
+	lm = NULL;
+	error = 0;
+	mtx_lock(&nfslock_mtx);
+	while (TAILQ_EMPTY(&nfslock_list)) {
+		error = msleep(&nfslock_list, &nfslock_mtx, PSOCK | PCATCH,
+		    "nfslockd", 0);
+		if (error)
+			break;
+	}
+	if (!error) {
+		lm = TAILQ_FIRST(&nfslock_list);
+		TAILQ_REMOVE(&nfslock_list, lm, lm_link);
+	}
+	mtx_unlock(&nfslock_mtx);
+	if (!error) {
+		error = uiomove(lm, sizeof *lm, uio);
+		free(lm, M_NFSLOCK);
+	}
+	return (error);
+}
+
+static int
+nfslock_write(struct cdev *dev, struct uio *uio, int ioflag)
+{
+	struct lockd_ans la;
+	int error;
+
+	if (uio->uio_resid != sizeof la)
+		return (EOPNOTSUPP);
+	error = uiomove(&la, sizeof la, uio);
+	if (!error)
+		error = nfslockdans(curthread, &la);
+	return (error);
+}
+
+static int
+nfslock_send(struct __lock_msg *lm)
+{
+	struct __lock_msg *lm2;
+	int error;
+
+	error = 0;
+	lm2 = malloc(sizeof *lm2, M_NFSLOCK, M_WAITOK);
+	mtx_lock(&nfslock_mtx);
+	if (nfslock_isopen) {
+		memcpy(lm2, lm, sizeof *lm2);
+		TAILQ_INSERT_TAIL(&nfslock_list, lm2, lm_link);
+		wakeup(&nfslock_list);
+	} else {
+		error = EOPNOTSUPP;
+	}
+	mtx_unlock(&nfslock_mtx);
+	if (error)
+		free(lm2, M_NFSLOCK);
+	return (error);
+}
+
+static struct cdevsw nfslock_cdevsw = {
+	.d_version =	D_VERSION,
+	.d_open =	nfslock_open,
+	.d_close =	nfslock_close,
+	.d_read =	nfslock_read,
+	.d_write =	nfslock_write,
+	.d_name =	"nfslock"
+};
+
+static int
+nfslock_modevent(module_t mod __unused, int type, void *data __unused)
+{
+
+	switch (type) {
+	case MOD_LOAD:
+		if (bootverbose)
+			printf("nfslock: pseudo-device\n");
+		mtx_init(&nfslock_mtx, "nfslock", NULL, MTX_DEF);
+		TAILQ_INIT(&nfslock_list);
+		nlminfo_release_p = nlminfo_release;
+		nfslock_dev = make_dev(&nfslock_cdevsw, 0,
+		    UID_ROOT, GID_KMEM, 0600, _PATH_NFSLCKDEV);
+		return (0);
+	default:
+		return (EOPNOTSUPP);
+	}
+}
+
+DEV_MODULE(nfslock, nfslock_modevent, NULL);
+MODULE_VERSION(nfslock, 1);
+
+
+/*
+ * XXX
+ * We have to let the process know if the call succeeded.  I'm using an extra
+ * field in the p_nlminfo field in the proc structure, as it is already for
+ * lockd stuff.
+ */
+
+/*
+ * nfs_advlock --
+ *      NFS advisory byte-level locks.
+ *
+ * The vnode shall be (shared) locked on the entry, it is
+ * unconditionally unlocked after.
+ */
+int
+nfs_dolock(struct vop_advlock_args *ap)
+{
+	LOCKD_MSG msg;
+	struct thread *td;
+	struct vnode *vp;
+	int error;
+	struct flock *fl;
+	struct proc *p;
+	struct nfsmount *nmp;
+	struct timeval boottime;
+
+	td = curthread;
+	p = td->td_proc;
+
+	vp = ap->a_vp;
+	fl = ap->a_fl;
+	nmp = VFSTONFS(vp->v_mount);
+
+	ASSERT_VOP_LOCKED(vp, "nfs_dolock");
+
+	nmp->nm_getinfo(vp, msg.lm_fh, &msg.lm_fh_len, &msg.lm_addr,
+	    &msg.lm_nfsv3, NULL, NULL);
+	VOP_UNLOCK(vp, 0);
+
+	/*
+	 * the NLM protocol doesn't allow the server to return an error
+	 * on ranges, so we do it.
+	 */
+	if (fl->l_whence != SEEK_END) {
+		if ((fl->l_whence != SEEK_CUR && fl->l_whence != SEEK_SET) ||
+		    fl->l_start < 0 ||
+		    (fl->l_len < 0 &&
+		     (fl->l_start == 0 || fl->l_start + fl->l_len < 0)))
+			return (EINVAL);
+		if (fl->l_len > 0 &&
+			 (fl->l_len - 1 > OFF_MAX - fl->l_start))
+			return (EOVERFLOW);
+	}
+
+	/*
+	 * Fill in the information structure.
+	 */
+	msg.lm_version = LOCKD_MSG_VERSION;
+	msg.lm_msg_ident.pid = p->p_pid;
+
+	mtx_lock(&Giant);
+	/*
+	 * if there is no nfsowner table yet, allocate one.
+	 */
+	if (p->p_nlminfo == NULL) {
+		p->p_nlminfo = malloc(sizeof(struct nlminfo),
+		    M_NLMINFO, M_WAITOK | M_ZERO);
+		p->p_nlminfo->pid_start = p->p_stats->p_start;
+		getboottime(&boottime);
+		timevaladd(&p->p_nlminfo->pid_start, &boottime);
+	}
+	msg.lm_msg_ident.pid_start = p->p_nlminfo->pid_start;
+	msg.lm_msg_ident.msg_seq = ++(p->p_nlminfo->msg_seq);
+
+	msg.lm_fl = *fl;
+	msg.lm_wait = ap->a_flags & F_WAIT;
+	msg.lm_getlk = ap->a_op == F_GETLK;
+	cru2x(td->td_ucred, &msg.lm_cred);
+
+	for (;;) {
+		error = nfslock_send(&msg);
+		if (error)
+			goto out;
+
+		/* Unlocks succeed immediately.  */
+		if (fl->l_type == F_UNLCK)
+			goto out;
+
+		/*
+		 * Retry after 20 seconds if we haven't gotten a response yet.
+		 * This number was picked out of thin air... but is longer
+		 * then even a reasonably loaded system should take (at least
+		 * on a local network).  XXX Probably should use a back-off
+		 * scheme.
+		 *
+		 * XXX: No PCATCH here since we currently have no useful
+		 * way to signal to the userland rpc.lockd that the request
+		 * has been aborted.  Once the rpc.lockd implementation
+		 * can handle aborts, and we report them properly,
+		 * PCATCH can be put back.  In the mean time, if we did
+		 * permit aborting, the lock attempt would "get lost"
+		 * and the lock would get stuck in the locked state.
+		 */
+		error = tsleep(p->p_nlminfo, PUSER, "lockd", 20*hz);
+		if (error != 0) {
+			if (error == EWOULDBLOCK) {
+				/*
+				 * We timed out, so we rewrite the request
+				 * to the fifo.
+				 */
+				continue;
+			}
+
+			break;
+		}
+
+		if (msg.lm_getlk && p->p_nlminfo->retcode == 0) {
+			if (p->p_nlminfo->set_getlk_pid) {
+				fl->l_sysid = 0; /* XXX */
+				fl->l_pid = p->p_nlminfo->getlk_pid;
+			} else {
+				fl->l_type = F_UNLCK;
+			}
+		}
+		error = p->p_nlminfo->retcode;
+		break;
+	}
+ out:
+	mtx_unlock(&Giant);
+	return (error);
+}
+
+/*
+ * nfslockdans --
+ *      NFS advisory byte-level locks answer from the lock daemon.
+ */
+static int
+nfslockdans(struct thread *td, struct lockd_ans *ansp)
+{
+	struct proc *targetp;
+
+	/* the version should match, or we're out of sync */
+	if (ansp->la_vers != LOCKD_ANS_VERSION)
+		return (EINVAL);
+
+	/* Find the process, set its return errno and wake it up. */
+	if ((targetp = pfind(ansp->la_msg_ident.pid)) == NULL)
+		return (ESRCH);
+
+	/* verify the pid hasn't been reused (if we can), and it isn't waiting
+	 * for an answer from a more recent request.  We return an EPIPE if
+	 * the match fails, because we've already used ESRCH above, and this
+	 * is sort of like writing on a pipe after the reader has closed it.
+	 */
+	if (targetp->p_nlminfo == NULL ||
+	    ((ansp->la_msg_ident.msg_seq != -1) &&
+	      (timevalcmp(&targetp->p_nlminfo->pid_start,
+			&ansp->la_msg_ident.pid_start, !=) ||
+	       targetp->p_nlminfo->msg_seq != ansp->la_msg_ident.msg_seq))) {
+		PROC_UNLOCK(targetp);
+		return (EPIPE);
+	}
+
+	targetp->p_nlminfo->retcode = ansp->la_errno;
+	targetp->p_nlminfo->set_getlk_pid = ansp->la_set_getlk_pid;
+	targetp->p_nlminfo->getlk_pid = ansp->la_getlk_pid;
+
+	wakeup(targetp->p_nlminfo);
+
+	PROC_UNLOCK(targetp);
+	return (0);
+}
+
+/*
+ * Free nlminfo attached to process.
+ */
+void        
+nlminfo_release(struct proc *p)
+{  
+	free(p->p_nlminfo, M_NLMINFO);
+	p->p_nlminfo = NULL;
+}
diff --git a/freebsd/sys/nfs/nfs_lock.h b/freebsd/sys/nfs/nfs_lock.h
new file mode 100644
index 0000000..63fdb20
--- /dev/null
+++ b/freebsd/sys/nfs/nfs_lock.h
@@ -0,0 +1,94 @@
+/*-
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 1998 Berkeley Software Design, Inc. All rights reserved.
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Berkeley Software Design Inc's name may not be used to endorse or
+ *    promote products derived from this software without specific prior
+ *    written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ *      from nfs_lock.h,v 2.2 1998/04/28 19:38:41 don Exp
+ * $FreeBSD$
+ */
+
+/*
+ * lockd uses the nfssvc system call to get the unique kernel services it needs.
+ * It passes in a request structure with a version number at the start.
+ * This prevents libc from needing to change if the information passed
+ * between lockd and the kernel needs to change.
+ *
+ * If a structure changes, you must bump the version number.
+ */
+
+/*
+ * The fifo where the kernel writes requests for locks on remote NFS files,
+ * and where lockd reads these requests.
+ *
+ */
+#define	_PATH_NFSLCKDEV	"nfslock"
+
+/*
+ * This structure is used to uniquely identify the process which originated
+ * a particular message to lockd.  A sequence number is used to differentiate
+ * multiple messages from the same process.  A process start time is used to
+ * detect the unlikely, but possible, event of the recycling of a pid.
+ */
+struct lockd_msg_ident {
+	pid_t		pid;            /* The process ID. */
+	struct timeval	pid_start;	/* Start time of process id */
+	int		msg_seq;	/* Sequence number of message */
+};
+
+#define LOCKD_MSG_VERSION	3
+
+/*
+ * The structure that the kernel hands us for each lock request.
+ */
+typedef struct __lock_msg {
+	TAILQ_ENTRY(__lock_msg)	lm_link;	/* internal linkage */
+	int			lm_version;	/* which version is this */
+	struct lockd_msg_ident	lm_msg_ident;	/* originator of the message */
+	struct flock		lm_fl;             /* The lock request. */
+	int			lm_wait;           /* The F_WAIT flag. */
+	int			lm_getlk;		/* is this a F_GETLK request */
+	struct sockaddr_storage lm_addr;		/* The address. */
+	int			lm_nfsv3;		/* If NFS version 3. */
+	size_t			lm_fh_len;		/* The file handle length. */
+	struct xucred		lm_cred;		/* user cred for lock req */
+	u_int8_t		lm_fh[NFSX_V3FHMAX];/* The file handle. */
+} LOCKD_MSG;
+
+#define LOCKD_ANS_VERSION	1
+
+struct lockd_ans {
+	int		la_vers;
+	struct lockd_msg_ident	la_msg_ident;	/* originator of the message */
+	int		la_errno;
+	int		la_set_getlk_pid;		/* use returned pid */
+	int		la_getlk_pid;		/* returned pid for F_GETLK */
+};
+
+#ifdef _KERNEL
+int	nfs_dolock(struct vop_advlock_args *ap);
+extern	vop_advlock_t *nfs_advlock_p;
+extern	vop_reclaim_t *nfs_reclaim_p;
+#endif
diff --git a/freebsd/sys/nfs/nfs_mountcommon.h b/freebsd/sys/nfs/nfs_mountcommon.h
new file mode 100644
index 0000000..d8762cf
--- /dev/null
+++ b/freebsd/sys/nfs/nfs_mountcommon.h
@@ -0,0 +1,56 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 2009 Rick Macklem, University of Guelph
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _NFS_MOUNTCOMMON_H_
+#define	_NFS_MOUNTCOMMON_H_
+
+/*
+ * The common fields of the nfsmount structure for the two clients
+ * used by the nlm. It includes a function pointer that provides
+ * a mechanism for getting the client specific info for an nfs vnode.
+ */
+typedef void	nfs_getinfofromvp_ftype(struct vnode *, uint8_t *, size_t *,
+		    struct sockaddr_storage *, int *, off_t *,
+		    struct timeval *);
+typedef int	nfs_vinvalbuf_ftype(struct vnode *, int, struct thread *, int);
+
+struct	nfsmount_common {
+	struct mtx	nmcom_mtx;
+	int	nmcom_flag;		/* Flags for soft/hard... */
+	int	nmcom_state;		/* Internal state flags */
+	struct	mount *nmcom_mountp;	/* Vfs structure for this filesystem */
+	int	nmcom_timeo;		/* Init timer for NFSMNT_DUMBTIMR */
+	int	nmcom_retry;		/* Max retries */
+	char	nmcom_hostname[MNAMELEN];	/* server's name */
+	nfs_getinfofromvp_ftype	*nmcom_getinfo;	/* Get info from nfsnode */
+	nfs_vinvalbuf_ftype	*nmcom_vinvalbuf; /* Invalidate buffers */
+};
+
+#endif	/* _NFS_MOUNTCOMMON_H_ */
diff --git a/freebsd/sys/nfs/nfs_nfssvc.c b/freebsd/sys/nfs/nfs_nfssvc.c
new file mode 100644
index 0000000..19ac16a
--- /dev/null
+++ b/freebsd/sys/nfs/nfs_nfssvc.c
@@ -0,0 +1,154 @@
+/*-
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 1989, 1993
+ *	The Regents of the University of California.  All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Rick Macklem at The University of Guelph.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include "opt_nfs.h"
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/sysproto.h>
+#include <sys/kernel.h>
+#include <sys/sysctl.h>
+#include <sys/priv.h>
+#include <sys/proc.h>
+#include <sys/lock.h>
+#include <sys/mutex.h>
+#include <sys/module.h>
+#include <sys/sysent.h>
+#include <sys/syscall.h>
+#include <sys/sysproto.h>
+
+#include <security/audit/audit.h>
+
+#include <nfs/nfssvc.h>
+
+static struct syscall_helper_data nfssvc_syscalls[] = {
+	SYSCALL_INIT_HELPER(nfssvc),
+	SYSCALL_INIT_LAST
+};
+
+/*
+ * This tiny module simply handles the nfssvc() system call. The other
+ * nfs modules that use the system call register themselves by setting
+ * the nfsd_call_xxx function pointers non-NULL.
+ */
+
+int (*nfsd_call_nfsserver)(struct thread *, struct nfssvc_args *) = NULL;
+int (*nfsd_call_nfscommon)(struct thread *, struct nfssvc_args *) = NULL;
+int (*nfsd_call_nfscl)(struct thread *, struct nfssvc_args *) = NULL;
+int (*nfsd_call_nfsd)(struct thread *, struct nfssvc_args *) = NULL;
+
+/*
+ * Nfs server pseudo system call for the nfsd's
+ */
+int
+sys_nfssvc(struct thread *td, struct nfssvc_args *uap)
+{
+	int error;
+
+	KASSERT(!mtx_owned(&Giant), ("nfssvc(): called with Giant"));
+
+	AUDIT_ARG_CMD(uap->flag);
+
+	/* Allow anyone to get the stats. */
+	if ((uap->flag & ~NFSSVC_GETSTATS) != 0) {
+		error = priv_check(td, PRIV_NFS_DAEMON);
+		if (error != 0)
+			return (error);
+	}
+	error = EINVAL;
+	if ((uap->flag & (NFSSVC_ADDSOCK | NFSSVC_OLDNFSD | NFSSVC_NFSD)) &&
+	    nfsd_call_nfsserver != NULL)
+		error = (*nfsd_call_nfsserver)(td, uap);
+	else if ((uap->flag & (NFSSVC_CBADDSOCK | NFSSVC_NFSCBD |
+	    NFSSVC_DUMPMNTOPTS | NFSSVC_FORCEDISM)) && nfsd_call_nfscl != NULL)
+		error = (*nfsd_call_nfscl)(td, uap);
+	else if ((uap->flag & (NFSSVC_IDNAME | NFSSVC_GETSTATS |
+	    NFSSVC_GSSDADDPORT | NFSSVC_GSSDADDFIRST | NFSSVC_GSSDDELETEALL |
+	    NFSSVC_NFSUSERDPORT | NFSSVC_NFSUSERDDELPORT)) &&
+	    nfsd_call_nfscommon != NULL)
+		error = (*nfsd_call_nfscommon)(td, uap);
+	else if ((uap->flag & (NFSSVC_NFSDNFSD | NFSSVC_NFSDADDSOCK |
+	    NFSSVC_PUBLICFH | NFSSVC_V4ROOTEXPORT | NFSSVC_NOPUBLICFH |
+	    NFSSVC_STABLERESTART | NFSSVC_ADMINREVOKE |
+	    NFSSVC_DUMPCLIENTS | NFSSVC_DUMPLOCKS | NFSSVC_BACKUPSTABLE |
+	    NFSSVC_SUSPENDNFSD | NFSSVC_RESUMENFSD | NFSSVC_PNFSDS)) &&
+	    nfsd_call_nfsd != NULL)
+		error = (*nfsd_call_nfsd)(td, uap);
+	if (error == EINTR || error == ERESTART)
+		error = 0;
+	return (error);
+}
+
+/*
+ * Called once to initialize data structures...
+ */
+static int
+nfssvc_modevent(module_t mod, int type, void *data)
+{
+	int error = 0;
+
+	switch (type) {
+	case MOD_LOAD:
+		error = syscall_helper_register(nfssvc_syscalls,
+		    SY_THR_STATIC_KLD);
+		break;
+
+	case MOD_UNLOAD:
+		if (nfsd_call_nfsserver != NULL || nfsd_call_nfscommon != NULL
+		    || nfsd_call_nfscl != NULL || nfsd_call_nfsd != NULL) {
+			error = EBUSY;
+			break;
+		}
+		syscall_helper_unregister(nfssvc_syscalls);
+		break;
+	default:
+		error = EOPNOTSUPP;
+		break;
+	}
+	return error;
+}
+static moduledata_t nfssvc_mod = {
+	"nfssvc",
+	nfssvc_modevent,
+	NULL,
+};
+DECLARE_MODULE(nfssvc, nfssvc_mod, SI_SUB_VFS, SI_ORDER_ANY);
+
+/* So that loader and kldload(2) can find us, wherever we are.. */
+MODULE_VERSION(nfssvc, 1);
+
diff --git a/freebsd/sys/nfs/nfsdiskless.h b/freebsd/sys/nfs/nfsdiskless.h
new file mode 100644
index 0000000..ff038d0
--- /dev/null
+++ b/freebsd/sys/nfs/nfsdiskless.h
@@ -0,0 +1,116 @@
+/*-
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 1991, 1993
+ *	The Regents of the University of California.  All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Rick Macklem at The University of Guelph.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ *	@(#)nfsdiskless.h	8.2 (Berkeley) 3/30/95
+ * $FreeBSD$
+ */
+
+#ifndef _NFSCLIENT_NFSDISKLESS_H_
+#define _NFSCLIENT_NFSDISKLESS_H_
+
+/*
+ * Structure that must be initialized for a diskless nfs client.
+ * This structure is used by nfs_mountroot() to set up the root vnode,
+ * and to do a partial ifconfig(8) and route(8) so that the critical net
+ * interface can communicate with the server.
+ * The primary bootstrap is expected to fill in the appropriate fields before
+ * starting the kernel.
+ * Currently only works for AF_INET protocols.
+ * NB: All fields are stored in net byte order to avoid hassles with
+ * client/server byte ordering differences.
+ */
+
+/*
+ * I have defined a new structure that can handle an NFS Version 3 file handle
+ * but the kernel still expects the old Version 2 one to be provided. The
+ * changes required in nfs_vfsops.c for using the new are documented there in
+ * comments. (I felt that breaking network booting code by changing this
+ * structure would not be prudent at this time, since almost all servers are
+ * still Version 2 anyhow.)
+ */
+struct nfsv3_diskless {
+	struct ifaliasreq myif;			/* Default interface */
+	struct sockaddr_in mygateway;		/* Default gateway */
+	struct nfs_args	root_args;		/* Mount args for root fs */
+	int		root_fhsize;		/* Size of root file handle */
+	u_char		root_fh[NFSX_V3FHMAX];	/* File handle of root dir */
+	struct sockaddr_in root_saddr;		/* Address of root server */
+	char		root_hostnam[MNAMELEN];	/* Host name for mount pt */
+	long		root_time;		/* Timestamp of root fs */
+	char		my_hostnam[MAXHOSTNAMELEN]; /* Client host name */
+};
+
+/*
+ * Old arguments to mount NFS
+ */
+struct onfs_args {
+	struct sockaddr	*addr;		/* file server address */
+	int		addrlen;	/* length of address */
+	int		sotype;		/* Socket type */
+	int		proto;		/* and Protocol */
+	u_char		*fh;		/* File handle to be mounted */
+	int		fhsize;		/* Size, in bytes, of fh */
+	int		flags;		/* flags */
+	int		wsize;		/* write size in bytes */
+	int		rsize;		/* read size in bytes */
+	int		readdirsize;	/* readdir size in bytes */
+	int		timeo;		/* initial timeout in .1 secs */
+	int		retrans;	/* times to retry send */
+	int		maxgrouplist;	/* Max. size of group list */
+	int		readahead;	/* # of blocks to readahead */
+	int		leaseterm;	/* Term (sec) of lease */
+	int		deadthresh;	/* Retrans threshold */
+	char		*hostname;	/* server's name */
+};
+
+struct nfs_diskless {
+	struct ifaliasreq myif;			/* Default interface */
+	struct sockaddr_in mygateway;		/* Default gateway */
+	struct onfs_args root_args;		/* Mount args for root fs */
+	u_char		root_fh[NFSX_V2FH];	/* File handle of root dir */
+	struct sockaddr_in root_saddr;		/* Address of root server */
+	char		root_hostnam[MNAMELEN];	/* Host name for mount pt */
+	long		root_time;		/* Timestamp of root fs */
+	char		my_hostnam[MAXHOSTNAMELEN]; /* Client host name */
+};
+
+#ifdef _KERNEL
+extern struct nfsv3_diskless nfsv3_diskless;
+extern struct nfs_diskless nfs_diskless;
+extern int	nfs_diskless_valid;
+void bootpc_init(void);
+void nfs_setup_diskless(void);
+void nfs_parse_options(const char *, struct nfs_args *);
+#endif
+
+#endif
diff --git a/freebsd/sys/nfs/nfsproto.h b/freebsd/sys/nfs/nfsproto.h
new file mode 100644
index 0000000..dfaf768
--- /dev/null
+++ b/freebsd/sys/nfs/nfsproto.h
@@ -0,0 +1,701 @@
+/*-
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 1989, 1993
+ *	The Regents of the University of California.  All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Rick Macklem at The University of Guelph.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ *	@(#)nfsproto.h  8.2 (Berkeley) 3/30/95
+ * $FreeBSD$
+ */
+
+#ifndef _NFS_NFSPROTO_H_
+#define _NFS_NFSPROTO_H_
+
+/*
+ * nfs definitions as per the Version 2 and 3 specs
+ */
+
+/*
+ * Constants as defined in the Sun NFS Version 2 and 3 specs.
+ * "NFS: Network File System Protocol Specification" RFC1094
+ * and in the "NFS: Network File System Version 3 Protocol
+ * Specification"
+ */
+
+#define NFS_PORT	2049
+#define	NFS_PROG	100003
+#define NFS_VER2	2
+#define	NFS_VER3	3
+#define NFS_VER4	4
+
+#define NFS_V2MAXDATA	8192
+#define	NFS_MAXDGRAMDATA 16384
+#define	NFS_MAXDATA	32768
+#define	NFS_MAXPATHLEN	1024
+#define	NFS_MAXNAMLEN	255
+#define	NFS_MAXPKTHDR	404	/* XXXv4 this needs to be adjust for v4 */
+#define NFS_MAXPACKET	(NFS_MAXPKTHDR + NFS_MAXDATA)
+#define	NFS_MINPACKET	20
+#define	NFS_FABLKSIZE	512	/* Size in bytes of a block wrt fa_blocks */
+
+/* Stat numbers for rpc returns (version 2, 3 and 4) */
+#define	NFS_OK			0
+#define	NFSERR_PERM		1
+#define	NFSERR_NOENT		2
+#define	NFSERR_IO		5
+#define	NFSERR_NXIO		6
+#define	NFSERR_ACCES		13
+#define	NFSERR_EXIST		17
+#define	NFSERR_XDEV		18	/* Version 3 only */
+#define	NFSERR_NODEV		19
+#define	NFSERR_NOTDIR		20
+#define	NFSERR_ISDIR		21
+#define	NFSERR_INVAL		22	/* Version 3 only */
+#define	NFSERR_FBIG		27
+#define	NFSERR_NOSPC		28
+#define	NFSERR_ROFS		30
+#define	NFSERR_MLINK		31	/* Version 3 only */
+#define	NFSERR_NAMETOL		63
+#define	NFSERR_NOTEMPTY		66
+#define	NFSERR_DQUOT		69
+#define	NFSERR_STALE		70
+#define	NFSERR_REMOTE		71	/* Version 3 only */
+#define	NFSERR_WFLUSH		99	/* Version 2 only */
+#define	NFSERR_BADHANDLE	10001	/* The rest Version 3, 4 only */
+#define	NFSERR_NOT_SYNC		10002
+#define	NFSERR_BAD_COOKIE	10003
+#define	NFSERR_NOTSUPP		10004
+#define	NFSERR_TOOSMALL		10005
+#define	NFSERR_SERVERFAULT	10006
+#define	NFSERR_BADTYPE		10007
+#define	NFSERR_JUKEBOX		10008
+#define NFSERR_TRYLATER		NFSERR_JUKEBOX
+#define	NFSERR_SAME		10009   /* The rest Version 4 only */
+#define	NFSERR_DENIED		10010
+#define	NFSERR_EXPIRED		10011
+#define	NFSERR_LOCKED		10012
+#define	NFSERR_GRACE		10013
+#define	NFSERR_FHEXPIRED	10014
+#define	NFSERR_SHARDE_DENIED	10015
+#define	NFSERR_WRONGSEC		10016
+#define	NFSERR_CLID_INUSE	10017
+#define	NFSERR_RESOURCE		10018
+#define	NFSERR_MOVED		10019
+#define	NFSERR_NOFILEHANDLE	10020
+#define	NFSERR_MINOR_VERS_MISMATCH 10021
+#define	NFSERR_STALE_CLIENTID	10022
+#define	NFSERR_STALE_STATEID	10023
+#define	NFSERR_OLD_STATEID	10024
+#define	NFSERR_BAD_STATEID	10025
+#define	NFSERR_BAD_SEQID	10026
+#define	NFSERR_NOT_SAME		10027
+#define	NFSERR_LOCK_RANGE	10028
+#define	NFSERR_SYMLINK		10029
+#define	NFSERR_READDIR_NOSPC	10030
+#define	NFSERR_LEASE_MOVED	10031
+#define	NFSERR_ATTRNOTSUPP	10032
+#define	NFSERR_NO_GRACE		10033
+#define	NFSERR_RECLAIM_BAD	10034
+#define	NFSERR_RECLAIM_CONFLICT	10035
+#define	NFSERR_BADXDR		10036
+#define	NFSERR_LOCKS_HELD	10037
+#define	NFSERR_OPENMODE		10038
+#define	NFSERR_BADOWNER		10039
+#define	NFSERR_BADCHAR		10040
+#define	NFSERR_BADNAME		10041
+#define	NFSERR_BAD_RANGE	10042
+#define	NFSERR_LOCK_NOTSUPP	10043
+#define	NFSERR_OP_ILLEGAL	10044
+#define	NFSERR_DEADLOCK		10045
+#define	NFSERR_FILE_OPEN	10046
+#define	NFSERR_STALEWRITEVERF	30001	/* Fake return for nfs_commit() */
+
+
+
+#define NFSERR_RETVOID		0x20000000 /* Return void, not error */
+#define NFSERR_AUTHERR		0x40000000 /* Mark an authentication error */
+#define NFSERR_RETERR		0x80000000 /* Mark an error return for V3 */
+
+/* Sizes in bytes of various nfs rpc components */
+#define	NFSX_UNSIGNED	4
+
+/* specific to NFS Version 2 */
+#define	NFSX_V2FH	32
+#define	NFSX_V2FATTR	68
+#define	NFSX_V2SATTR	32
+#define	NFSX_V2COOKIE	4
+#define NFSX_V2STATFS	20
+
+/* specific to NFS Version 3 */
+#define NFSX_V3FH		(sizeof (fhandle_t)) /* size this server uses */
+#define	NFSX_V3FHMAX		64	/* max. allowed by protocol */
+#define NFSX_V3FATTR		84
+#define NFSX_V3SATTR		60	/* max. all fields filled in */
+#define NFSX_V3SRVSATTR		(sizeof (struct nfsv3_sattr))
+#define NFSX_V3POSTOPATTR	(NFSX_V3FATTR + NFSX_UNSIGNED)
+#define NFSX_V3WCCDATA		(NFSX_V3POSTOPATTR + 8 * NFSX_UNSIGNED)
+#define NFSX_V3COOKIEVERF 	8
+#define NFSX_V3WRITEVERF 	8
+#define NFSX_V3CREATEVERF	8
+#define NFSX_V3STATFS		52
+#define NFSX_V3FSINFO		48
+#define NFSX_V3PATHCONF		24
+
+/* specific to NFS Version 4 */
+#define NFSX_V4VERF		8
+#define NFSX_V4FH		128
+#define NFSX_V4STATEID		16
+
+/* variants for both versions */
+#define NFSX_FH(v3)		((v3) ? (NFSX_V3FHMAX + NFSX_UNSIGNED) : \
+					NFSX_V2FH)
+#define NFSX_SRVFH(v3)		((v3) ? NFSX_V3FH : NFSX_V2FH)
+#define	NFSX_FATTR(v3)		((v3) ? NFSX_V3FATTR : NFSX_V2FATTR)
+#define NFSX_PREOPATTR(v3)	((v3) ? (7 * NFSX_UNSIGNED) : 0)
+#define NFSX_POSTOPATTR(v3)	((v3) ? (NFSX_V3FATTR + NFSX_UNSIGNED) : 0)
+#define NFSX_POSTOPORFATTR(v3)	((v3) ? (NFSX_V3FATTR + NFSX_UNSIGNED) : \
+					NFSX_V2FATTR)
+#define NFSX_WCCDATA(v3)	((v3) ? NFSX_V3WCCDATA : 0)
+#define NFSX_WCCORFATTR(v3)	((v3) ? NFSX_V3WCCDATA : NFSX_V2FATTR)
+#define	NFSX_SATTR(v3)		((v3) ? NFSX_V3SATTR : NFSX_V2SATTR)
+#define	NFSX_COOKIEVERF(v3)	((v3) ? NFSX_V3COOKIEVERF : 0)
+#define	NFSX_WRITEVERF(v3)	((v3) ? NFSX_V3WRITEVERF : 0)
+#define NFSX_READDIR(v3)	((v3) ? (5 * NFSX_UNSIGNED) : \
+					(2 * NFSX_UNSIGNED))
+#define	NFSX_STATFS(v3)		((v3) ? NFSX_V3STATFS : NFSX_V2STATFS)
+
+/* nfs rpc procedure numbers (before version mapping) */
+#define	NFSPROC_NULL		0
+#define	NFSPROC_GETATTR		1
+#define	NFSPROC_SETATTR		2
+#define	NFSPROC_LOOKUP		3
+#define	NFSPROC_ACCESS		4
+#define	NFSPROC_READLINK	5
+#define	NFSPROC_READ		6
+#define	NFSPROC_WRITE		7
+#define	NFSPROC_CREATE		8
+#define	NFSPROC_MKDIR		9
+#define	NFSPROC_SYMLINK		10
+#define	NFSPROC_MKNOD		11
+#define	NFSPROC_REMOVE		12
+#define	NFSPROC_RMDIR		13
+#define	NFSPROC_RENAME		14
+#define	NFSPROC_LINK		15
+#define	NFSPROC_READDIR		16
+#define	NFSPROC_READDIRPLUS	17
+#define	NFSPROC_FSSTAT		18
+#define	NFSPROC_FSINFO		19
+#define	NFSPROC_PATHCONF	20
+#define	NFSPROC_COMMIT		21
+#define NFSPROC_NOOP		22
+#define	NFS_NPROCS		23
+
+/* Actual Version 2 procedure numbers */
+#define	NFSV2PROC_NULL		0
+#define	NFSV2PROC_GETATTR	1
+#define	NFSV2PROC_SETATTR	2
+#define	NFSV2PROC_NOOP		3
+#define	NFSV2PROC_ROOT		NFSV2PROC_NOOP	/* Obsolete */
+#define	NFSV2PROC_LOOKUP	4
+#define	NFSV2PROC_READLINK	5
+#define	NFSV2PROC_READ		6
+#define	NFSV2PROC_WRITECACHE	NFSV2PROC_NOOP	/* Obsolete */
+#define	NFSV2PROC_WRITE		8
+#define	NFSV2PROC_CREATE	9
+#define	NFSV2PROC_REMOVE	10
+#define	NFSV2PROC_RENAME	11
+#define	NFSV2PROC_LINK		12
+#define	NFSV2PROC_SYMLINK	13
+#define	NFSV2PROC_MKDIR		14
+#define	NFSV2PROC_RMDIR		15
+#define	NFSV2PROC_READDIR	16
+#define	NFSV2PROC_STATFS	17
+
+/* Version 4 procedure numbers */
+#define NFSV4PROC_NULL         0
+#define NFSV4PROC_COMPOUND     1
+
+/* Version 4 operation numbers */
+#define NFSV4OP_ACCESS		3
+#define NFSV4OP_CLOSE		4
+#define NFSV4OP_COMMIT		5
+#define NFSV4OP_CREATE		6
+#define NFSV4OP_DELEGPURGE	7
+#define NFSV4OP_DELEGRETURN	8
+#define NFSV4OP_GETATTR		9
+#define NFSV4OP_GETFH		10
+#define NFSV4OP_LINK		11
+#define NFSV4OP_LOCK		12
+#define NFSV4OP_LOCKT		13
+#define NFSV4OP_LOCKU		14
+#define NFSV4OP_LOOKUP		15
+#define NFSV4OP_LOOKUPP		16
+#define NFSV4OP_NVERIFY		17
+#define NFSV4OP_OPEN		18
+#define NFSV4OP_OPENATTR	19
+#define NFSV4OP_OPEN_CONFIRM	20
+#define NFSV4OP_OPEN_DOWNGRADE	21
+#define NFSV4OP_PUTFH		22
+#define NFSV4OP_PUTPUBFH	23
+#define NFSV4OP_PUTROOTFH	24
+#define NFSV4OP_READ		25
+#define NFSV4OP_READDIR		26
+#define NFSV4OP_READLINK	27
+#define NFSV4OP_REMOVE		28
+#define NFSV4OP_RENAME		29
+#define NFSV4OP_RENEW		30
+#define NFSV4OP_RESTOREFH	31
+#define NFSV4OP_SAVEFH		32
+#define NFSV4OP_SECINFO		33
+#define NFSV4OP_SETATTR		34
+#define NFSV4OP_SETCLIENTID	35
+#define NFSV4OP_SETCLIENTID_CONFIRM 36
+#define NFSV4OP_VERIFY		37
+#define NFSV4OP_WRITE		38
+
+/*
+ * Constants used by the Version 3 protocol for various RPCs
+ */
+#define NFSV3SATTRTIME_DONTCHANGE	0
+#define NFSV3SATTRTIME_TOSERVER		1
+#define NFSV3SATTRTIME_TOCLIENT		2
+
+#define NFSV3ACCESS_READ		0x01
+#define NFSV3ACCESS_LOOKUP		0x02
+#define NFSV3ACCESS_MODIFY		0x04
+#define NFSV3ACCESS_EXTEND		0x08
+#define NFSV3ACCESS_DELETE		0x10
+#define NFSV3ACCESS_EXECUTE		0x20
+
+#define NFSV3WRITE_UNSTABLE		0
+#define NFSV3WRITE_DATASYNC		1
+#define NFSV3WRITE_FILESYNC		2
+
+#define NFSV3CREATE_UNCHECKED		0
+#define NFSV3CREATE_GUARDED		1
+#define NFSV3CREATE_EXCLUSIVE		2
+
+#define NFSV3FSINFO_LINK		0x01
+#define NFSV3FSINFO_SYMLINK		0x02
+#define NFSV3FSINFO_HOMOGENEOUS		0x08
+#define NFSV3FSINFO_CANSETTIME		0x10
+
+/*
+ * Constants used by the Version 4 protocol for various RPCs
+ */
+
+#define NFSV4ACCESS_READ		0x01
+#define NFSV4ACCESS_LOOKUP		0x02
+#define NFSV4ACCESS_MODIFY		0x04
+#define NFSV4ACCESS_EXTEND		0x08
+#define NFSV4ACCESS_DELETE		0x10
+#define NFSV4ACCESS_EXECUTE		0x20
+
+#define NFSV4OPENRES_MLOCK		0x01
+#define NFSV4OPENRES_CONFIRM		0x02
+
+#define NFSV4OPENSHARE_ACCESS_READ	0x01
+#define NFSV4OPENSHARE_ACCESS_WRITE	0x02
+#define NFSV4OPENSHARE_ACCESS_BOTH	0x03
+#define NFSV4OPENSHARE_DENY_NONE	0x00
+#define NFSV4OPENSHARE_DENY_READ	0x01
+#define NFSV4OPENSHARE_DENY_WRITE	0x02
+#define NFSV4OPENSHARE_DENY_BOTH	0x03
+
+/* File types */
+typedef enum {
+	NFNON=0,
+	NFREG=1,
+	NFDIR=2,
+	NFBLK=3,
+	NFCHR=4,
+	NFLNK=5,
+	NFSOCK=6,
+	NFFIFO=7,
+	NFATTRDIR = 8,
+	NFNAMEDATTR = 9,
+	NFBAD = 10,
+} nfstype;	
+
+/* NFSv4 claim type */
+typedef enum {
+	NCLNULL = 0,
+	NCLPREV = 1,
+	NCLDELEGCUR = 2,
+	NCLDELEGPREV = 3,
+} nfsv4cltype;
+
+/* Other NFSv4 types */
+typedef enum {
+	NSHUNSTABLE = 0,
+	NSHDATASYNC = 1,
+	NSHFILESYNC = 2,
+} nfsv4stablehow;
+
+typedef enum { OTNOCREATE = 0, OTCREATE = 1 } nfsv4opentype;
+typedef enum { CMUNCHECKED = 0, CMGUARDED = 1, CMEXCLUSIVE = 2 } nfsv4createmode;
+typedef enum { THSERVERTIME = 0, THCLIENTTIME = 1 } nfsv4timehow;
+typedef enum { ODNONE = 0, ODREAD = 1, ODWRITE = 2 } nfsv4opendelegtype;
+
+/* Structs for common parts of the rpc's */
+
+/*
+ * File Handle (32 bytes for version 2), variable up to 64 for version 3.
+ * File Handles of up to NFS_SMALLFH in size are stored directly in the
+ * nfs node, whereas larger ones are malloc'd. (This never happens when
+ * NFS_SMALLFH is set to 64.)
+ * NFS_SMALLFH should be in the range of 32 to 64 and be divisible by 4.
+ */
+#ifndef NFS_SMALLFH
+#define NFS_SMALLFH	128
+#endif
+union nfsfh {
+	fhandle_t	fh_generic;
+	u_char		fh_bytes[NFS_SMALLFH];
+};
+typedef union nfsfh nfsfh_t;
+
+struct nfsv2_time {
+	u_int32_t	nfsv2_sec;
+	u_int32_t	nfsv2_usec;
+};
+typedef struct nfsv2_time	nfstime2;
+
+struct nfsv3_time {
+	u_int32_t	nfsv3_sec;
+	u_int32_t	nfsv3_nsec;
+};
+typedef struct nfsv3_time	nfstime3;
+
+/*
+ * Quads are defined as arrays of 2 longs to ensure dense packing for the
+ * protocol and to facilitate xdr conversion.
+ */
+struct nfs_uquad {
+	u_int32_t	nfsuquad[2];
+};
+typedef	struct nfs_uquad	nfsuint64;
+
+/*
+ * Used to convert between two u_longs and a u_quad_t.
+ */
+union nfs_quadconvert {
+	u_int32_t	lval[2];
+	u_quad_t	qval;
+};
+typedef union nfs_quadconvert	nfsquad_t;
+
+/*
+ * NFS Version 3 special file number.
+ */
+struct nfsv3_spec {
+	u_int32_t	specdata1;
+	u_int32_t	specdata2;
+};
+typedef	struct nfsv3_spec	nfsv3spec;
+
+/*
+ * NFS Version 4 bitmap.
+ */
+struct nfsv4_bitmap {
+	uint32_t	bmlen;
+	uint32_t	*bmval;
+};
+typedef struct nfsv4_bitmap nfsv4bitmap;
+
+struct nfsv4_changeinfo {
+	u_int		ciatomic;
+	uint64_t	cibefore;
+	uint64_t	ciafter;
+};
+typedef struct nfsv4_changeinfo nfsv4changeinfo;
+
+/*
+ * File attributes and setable attributes. These structures cover both
+ * NFS version 2 and the version 3 protocol. Note that the union is only
+ * used so that one pointer can refer to both variants. These structures
+ * go out on the wire and must be densely packed, so no quad data types
+ * are used. (all fields are longs or u_longs or structures of same)
+ * NB: You can't do sizeof(struct nfs_fattr), you must use the
+ *     NFSX_FATTR(v3) macro.
+ */
+struct nfs_fattr {
+	u_int32_t	fa_type;
+	u_int32_t	fa_mode;
+	u_int32_t	fa_nlink;
+	u_int32_t	fa_uid;
+	u_int32_t	fa_gid;
+	union {
+		struct {
+			u_int32_t	nfsv2fa_size;
+			u_int32_t	nfsv2fa_blocksize;
+			u_int32_t	nfsv2fa_rdev;
+			u_int32_t	nfsv2fa_blocks;
+			u_int32_t	nfsv2fa_fsid;
+			u_int32_t	nfsv2fa_fileid;
+			nfstime2	nfsv2fa_atime;
+			nfstime2	nfsv2fa_mtime;
+			nfstime2	nfsv2fa_ctime;
+		} fa_nfsv2;
+		struct {
+			nfsuint64	nfsv3fa_size;
+			nfsuint64	nfsv3fa_used;
+			nfsv3spec	nfsv3fa_rdev;
+			nfsuint64	nfsv3fa_fsid;
+			nfsuint64	nfsv3fa_fileid;
+			nfstime3	nfsv3fa_atime;
+			nfstime3	nfsv3fa_mtime;
+			nfstime3	nfsv3fa_ctime;
+		} fa_nfsv3;
+	} fa_un;
+};
+
+/* and some ugly defines for accessing union components */
+#define	fa2_size		fa_un.fa_nfsv2.nfsv2fa_size
+#define	fa2_blocksize		fa_un.fa_nfsv2.nfsv2fa_blocksize
+#define	fa2_rdev		fa_un.fa_nfsv2.nfsv2fa_rdev
+#define	fa2_blocks		fa_un.fa_nfsv2.nfsv2fa_blocks
+#define	fa2_fsid		fa_un.fa_nfsv2.nfsv2fa_fsid
+#define	fa2_fileid		fa_un.fa_nfsv2.nfsv2fa_fileid
+#define	fa2_atime		fa_un.fa_nfsv2.nfsv2fa_atime
+#define	fa2_mtime		fa_un.fa_nfsv2.nfsv2fa_mtime
+#define	fa2_ctime		fa_un.fa_nfsv2.nfsv2fa_ctime
+#define	fa3_size		fa_un.fa_nfsv3.nfsv3fa_size
+#define	fa3_used		fa_un.fa_nfsv3.nfsv3fa_used
+#define	fa3_rdev		fa_un.fa_nfsv3.nfsv3fa_rdev
+#define	fa3_fsid		fa_un.fa_nfsv3.nfsv3fa_fsid
+#define	fa3_fileid		fa_un.fa_nfsv3.nfsv3fa_fileid
+#define	fa3_atime		fa_un.fa_nfsv3.nfsv3fa_atime
+#define	fa3_mtime		fa_un.fa_nfsv3.nfsv3fa_mtime
+#define	fa3_ctime		fa_un.fa_nfsv3.nfsv3fa_ctime
+
+struct nfsv4_fattr {
+	u_int		fa4_valid;
+	nfstype		fa4_type;
+	off_t		fa4_size;
+	uint64_t	fa4_fsid_major;
+	uint64_t	fa4_fsid_minor;
+	uint64_t	fa4_fileid;
+	mode_t		fa4_mode;
+	nlink_t		fa4_nlink;
+	uid_t		fa4_uid;
+	gid_t		fa4_gid;
+	uint32_t	fa4_rdev_major;
+	uint32_t	fa4_rdev_minor;
+	struct timespec	fa4_atime;
+	struct timespec	fa4_btime;
+	struct timespec	fa4_ctime;
+	struct timespec	fa4_mtime;
+	uint64_t	fa4_maxread;
+	uint64_t	fa4_maxwrite;
+	uint64_t	fa4_ffree;
+	uint64_t	fa4_ftotal;
+	uint32_t	fa4_maxname;
+	uint64_t	fa4_savail;
+	uint64_t	fa4_sfree;
+	uint64_t	fa4_stotal;
+	uint64_t	fa4_changeid;
+	uint32_t	fa4_lease_time;
+	uint64_t	fa4_maxfilesize;
+};
+
+/* Flags for fa4_valid */
+#define FA4V_SIZE	0x00000001
+#define FA4V_FSID	0x00000002
+#define FA4V_FILEID	0x00000004
+#define FA4V_MODE	0x00000008
+#define FA4V_NLINK	0x00000010
+#define FA4V_UID	0x00000020
+#define FA4V_GID	0x00000040
+#define FA4V_RDEV	0x00000080
+#define FA4V_ATIME	0x00000100
+#define FA4V_BTIME	0x00000200
+#define FA4V_CTIME	0x00000400
+#define FA4V_MTIME	0x00000800
+#define FA4V_MAXREAD	0x00001000
+#define FA4V_MAXWRITE	0x00002000
+#define FA4V_TYPE	0x00004000
+#define FA4V_FFREE	0x00008000
+#define FA4V_FTOTAL	0x00010000
+#define FA4V_MAXNAME	0x00020000
+#define FA4V_SAVAIL	0x00040000
+#define FA4V_SFREE	0x00080000
+#define FA4V_STOTAL	0x00100000
+#define FA4V_CHANGEID	0x00200000
+#define FA4V_LEASE_TIME	0x00400000
+#define FA4V_MAXFILESIZE 0x00800000
+#define FA4V_ACL	0x01000000
+
+/* Offsets into bitmask */
+#define FA4_SUPPORTED_ATTRS	0
+#define FA4_TYPE		1
+#define FA4_FH_EXPIRE_TYPE	2
+#define FA4_CHANGE		3
+#define FA4_SIZE		4
+#define FA4_LINK_SUPPORT	5
+#define FA4_SYMLINK_SUPPORT	6
+#define FA4_NAMED_ATTR		7
+#define FA4_FSID		8
+#define FA4_UNIQUE_HANDLES	9
+#define FA4_LEASE_TIME		10
+#define FA4_RDATTR_ERROR	11
+#define FA4_ACL			12
+#define FA4_ACLSUPPORT		13
+#define FA4_ARCHIVE		14
+#define FA4_CANSETTIME		15
+#define FA4_CASE_INSENSITIVE	16
+#define FA4_CASE_PRESERVING	17 
+#define FA4_CHOWN_RESTRICTED	18
+#define FA4_FILEHANDLE		19
+#define FA4_FILEID		20
+#define FA4_FILES_AVAIL		21
+#define FA4_FILES_FREE		22
+#define FA4_FILES_TOTAL		23
+#define FA4_FS_LOCATIONS	24
+#define FA4_HIDDEN		25
+#define FA4_HOMOGENEOUS		26
+#define FA4_MAXFILESIZE		27
+#define FA4_MAXLINK		28
+#define FA4_MAXNAME		29
+#define FA4_MAXREAD		30
+#define FA4_MAXWRITE		31
+#define FA4_MIMETYPE		32
+#define FA4_MODE		33
+#define FA4_NO_TRUNC		34
+#define FA4_NUMLINKS		35
+#define FA4_OWNER		36
+#define FA4_OWNER_GROUP		37
+#define FA4_QUOTA_HARD		38
+#define FA4_QUOTA_SOFT		39
+#define FA4_QUOTA_USED		40
+#define FA4_RAWDEV		41
+#define FA4_SPACE_AVAIL		42
+#define FA4_SPACE_FREE		43
+#define FA4_SPACE_TOTAL		44
+#define FA4_SPACE_USED		45
+#define FA4_SYSTEM		46
+#define FA4_TIME_ACCESS		47
+#define FA4_TIME_ACCESS_SET	48
+#define FA4_TIME_BACKUP		49
+#define FA4_TIME_CREATE		50
+#define FA4_TIME_DELTA		51
+#define FA4_TIME_METADATA	52
+#define FA4_TIME_MODIFY		53
+#define FA4_TIME_MODIFY_SET	54
+#define FA4_ATTR_MAX		55
+
+/* Macros for v4 fattr manipulation */
+#define FA4_SET(n, p)	((p)[(n)/32] |= (1 << ((n) % 32)))
+#define FA4_CLR(n, p)	((p)[(n)/32] &= ~(1 << ((n) % 32)))
+#define FA4_ISSET(n, p)	((p)[(n)/32] & (1 << ((n) % 32)))
+#define FA4_ZERO(p)	bzero((p), 8)
+#define FA4_SKIP(p)	((p) += 2)
+
+struct nfsv2_sattr {
+	u_int32_t	sa_mode;
+	u_int32_t	sa_uid;
+	u_int32_t	sa_gid;
+	u_int32_t	sa_size;
+	nfstime2	sa_atime;
+	nfstime2	sa_mtime;
+};
+
+/*
+ * NFS Version 3 sattr structure for the new node creation case.
+ */
+struct nfsv3_sattr {
+	u_int32_t	sa_modetrue;
+	u_int32_t	sa_mode;
+	u_int32_t	sa_uidfalse;
+	u_int32_t	sa_gidfalse;
+	u_int32_t	sa_sizefalse;
+	u_int32_t	sa_atimetype;
+	nfstime3	sa_atime;
+	u_int32_t	sa_mtimetype;
+	nfstime3	sa_mtime;
+};
+
+struct nfs_statfs {
+	union {
+		struct {
+			u_int32_t	nfsv2sf_tsize;
+			u_int32_t	nfsv2sf_bsize;
+			u_int32_t	nfsv2sf_blocks;
+			u_int32_t	nfsv2sf_bfree;
+			u_int32_t	nfsv2sf_bavail;
+		} sf_nfsv2;
+		struct {
+			nfsuint64	nfsv3sf_tbytes;
+			nfsuint64	nfsv3sf_fbytes;
+			nfsuint64	nfsv3sf_abytes;
+			nfsuint64	nfsv3sf_tfiles;
+			nfsuint64	nfsv3sf_ffiles;
+			nfsuint64	nfsv3sf_afiles;
+			u_int32_t	nfsv3sf_invarsec;
+		} sf_nfsv3;
+	} sf_un;
+};
+
+#define sf_tsize	sf_un.sf_nfsv2.nfsv2sf_tsize
+#define sf_bsize	sf_un.sf_nfsv2.nfsv2sf_bsize
+#define sf_blocks	sf_un.sf_nfsv2.nfsv2sf_blocks
+#define sf_bfree	sf_un.sf_nfsv2.nfsv2sf_bfree
+#define sf_bavail	sf_un.sf_nfsv2.nfsv2sf_bavail
+#define sf_tbytes	sf_un.sf_nfsv3.nfsv3sf_tbytes
+#define sf_fbytes	sf_un.sf_nfsv3.nfsv3sf_fbytes
+#define sf_abytes	sf_un.sf_nfsv3.nfsv3sf_abytes
+#define sf_tfiles	sf_un.sf_nfsv3.nfsv3sf_tfiles
+#define sf_ffiles	sf_un.sf_nfsv3.nfsv3sf_ffiles
+#define sf_afiles	sf_un.sf_nfsv3.nfsv3sf_afiles
+#define sf_invarsec	sf_un.sf_nfsv3.nfsv3sf_invarsec
+
+struct nfsv3_fsinfo {
+	u_int32_t	fs_rtmax;
+	u_int32_t	fs_rtpref;
+	u_int32_t	fs_rtmult;
+	u_int32_t	fs_wtmax;
+	u_int32_t	fs_wtpref;
+	u_int32_t	fs_wtmult;
+	u_int32_t	fs_dtpref;
+	nfsuint64	fs_maxfilesize;
+	nfstime3	fs_timedelta;
+	u_int32_t	fs_properties;
+};
+
+struct nfsv3_pathconf {
+	u_int32_t	pc_linkmax;
+	u_int32_t	pc_namemax;
+	u_int32_t	pc_notrunc;
+	u_int32_t	pc_chownrestricted;
+	u_int32_t	pc_caseinsensitive;
+	u_int32_t	pc_casepreserving;
+};
+
+#endif
diff --git a/freebsd/sys/nfs/nfssvc.h b/freebsd/sys/nfs/nfssvc.h
new file mode 100644
index 0000000..4d0ef56
--- /dev/null
+++ b/freebsd/sys/nfs/nfssvc.h
@@ -0,0 +1,85 @@
+/*-
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 1989, 1993, 1995
+ *	The Regents of the University of California.  All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Rick Macklem at The University of Guelph.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _NFS_NFSSVC_H_
+#define _NFS_NFSSVC_H_
+
+/*
+ * Flags for nfssvc() system call.
+ */
+#define	NFSSVC_OLDNFSD	0x004
+#define	NFSSVC_ADDSOCK	0x008
+#define	NFSSVC_NFSD	0x010
+
+/*
+ * and ones for nfsv4.
+ */
+#define	NFSSVC_NOPUBLICFH	0x00000020
+#define	NFSSVC_STABLERESTART	0x00000040
+#define	NFSSVC_NFSDNFSD		0x00000080
+#define	NFSSVC_NFSDADDSOCK	0x00000100
+#define	NFSSVC_IDNAME		0x00000200
+#define	NFSSVC_GSSDDELETEALL	0x00000400
+#define	NFSSVC_GSSDADDPORT	0x00000800
+#define	NFSSVC_NFSUSERDPORT	0x00001000
+#define	NFSSVC_NFSUSERDDELPORT	0x00002000
+#define	NFSSVC_V4ROOTEXPORT	0x00004000
+#define	NFSSVC_ADMINREVOKE	0x00008000
+#define	NFSSVC_DUMPCLIENTS	0x00010000
+#define	NFSSVC_DUMPLOCKS	0x00020000
+#define	NFSSVC_GSSDADDFIRST	0x00040000
+#define	NFSSVC_PUBLICFH		0x00080000
+#define	NFSSVC_NFSCBD		0x00100000
+#define	NFSSVC_CBADDSOCK	0x00200000
+#define	NFSSVC_GETSTATS		0x00400000
+#define	NFSSVC_BACKUPSTABLE	0x00800000
+#define	NFSSVC_ZEROCLTSTATS	0x01000000	/* modifier for GETSTATS */
+#define	NFSSVC_ZEROSRVSTATS	0x02000000	/* modifier for GETSTATS */
+#define	NFSSVC_SUSPENDNFSD	0x04000000
+#define	NFSSVC_RESUMENFSD	0x08000000
+#define	NFSSVC_DUMPMNTOPTS	0x10000000
+#define	NFSSVC_NEWSTRUCT	0x20000000
+#define	NFSSVC_FORCEDISM	0x40000000
+#define	NFSSVC_PNFSDS		0x80000000
+
+/* Argument structure for NFSSVC_DUMPMNTOPTS. */
+struct nfscl_dumpmntopts {
+	char	*ndmnt_fname;		/* File Name */
+	size_t	ndmnt_blen;		/* Size of buffer */
+	void	*ndmnt_buf;		/* and the buffer */
+};
+
+#endif /* _NFS_NFSSVC_H */
diff --git a/freebsd/sys/nfs/xdr_subs.h b/freebsd/sys/nfs/xdr_subs.h
new file mode 100644
index 0000000..829711e
--- /dev/null
+++ b/freebsd/sys/nfs/xdr_subs.h
@@ -0,0 +1,93 @@
+/*-
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 1989, 1993
+ *	The Regents of the University of California.  All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Rick Macklem at The University of Guelph.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ *	@(#)xdr_subs.h	8.3 (Berkeley) 3/30/95
+ * $FreeBSD$
+ */
+
+
+#ifndef _NFS_XDR_SUBS_H_
+#define _NFS_XDR_SUBS_H_
+
+/*
+ * Macros used for conversion to/from xdr representation by nfs...
+ * These use the MACHINE DEPENDENT routines ntohl, htonl
+ * As defined by "XDR: External Data Representation Standard" RFC1014
+ *
+ * To simplify the implementation, we use ntohl/htonl even on big-endian
+ * machines, and count on them being `#define'd away.  Some of these
+ * might be slightly more efficient as quad_t copies on a big-endian,
+ * but we cannot count on their alignment anyway.
+ */
+
+#define	fxdr_unsigned(t, v)	((t)ntohl((int32_t)(v)))
+#define	txdr_unsigned(v)	(htonl((int32_t)(v)))
+
+#define	fxdr_nfsv2time(f, t) \
+do { \
+	(t)->tv_sec = ntohl(((struct nfsv2_time *)(f))->nfsv2_sec); \
+	if (((struct nfsv2_time *)(f))->nfsv2_usec != 0xffffffff) \
+		(t)->tv_nsec = 1000 * ntohl(((struct nfsv2_time *)(f))->nfsv2_usec); \
+	else \
+		(t)->tv_nsec = 0; \
+} while (0)
+#define	txdr_nfsv2time(f, t) \
+do { \
+	((struct nfsv2_time *)(t))->nfsv2_sec = htonl((f)->tv_sec); \
+	if ((f)->tv_nsec != -1) \
+		((struct nfsv2_time *)(t))->nfsv2_usec = htonl((f)->tv_nsec / 1000); \
+	else \
+		((struct nfsv2_time *)(t))->nfsv2_usec = 0xffffffff; \
+} while (0)
+
+#define	fxdr_nfsv3time(f, t) \
+do { \
+	(t)->tv_sec = ntohl(((struct nfsv3_time *)(f))->nfsv3_sec); \
+	(t)->tv_nsec = ntohl(((struct nfsv3_time *)(f))->nfsv3_nsec); \
+} while (0)
+#define	txdr_nfsv3time(f, t) \
+do { \
+	((struct nfsv3_time *)(t))->nfsv3_sec = htonl((f)->tv_sec); \
+	((struct nfsv3_time *)(t))->nfsv3_nsec = htonl((f)->tv_nsec); \
+} while (0)
+
+#define	fxdr_hyper(f) \
+	((((u_quad_t)ntohl(((u_int32_t *)(f))[0])) << 32) | \
+	 (u_quad_t)(ntohl(((u_int32_t *)(f))[1])))
+#define	txdr_hyper(f, t) \
+do { \
+	((u_int32_t *)(t))[0] = htonl((u_int32_t)((f) >> 32)); \
+	((u_int32_t *)(t))[1] = htonl((u_int32_t)((f) & 0xffffffff)); \
+} while (0)
+
+#endif
diff --git a/freebsd/sys/nfsclient/nfs.h b/freebsd/sys/nfsclient/nfs.h
new file mode 100644
index 0000000..ab50d89
--- /dev/null
+++ b/freebsd/sys/nfsclient/nfs.h
@@ -0,0 +1,295 @@
+/*-
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 1989, 1993, 1995
+ *	The Regents of the University of California.  All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Rick Macklem at The University of Guelph.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ *	@(#)nfs.h	8.4 (Berkeley) 5/1/95
+ * $FreeBSD$
+ */
+
+#ifndef _NFSCLIENT_NFS_H_
+#define _NFSCLIENT_NFS_H_
+
+#ifdef _KERNEL
+#include "opt_nfs.h"
+#endif
+
+#include <nfsclient/nfsargs.h>
+
+/*
+ * Tunable constants for nfs
+ */
+
+#define NFS_TICKINTVL	10		/* Desired time for a tick (msec) */
+#define NFS_HZ		(hz / nfs_ticks) /* Ticks/sec */
+#define	NFS_TIMEO	(1 * NFS_HZ)	/* Default timeout = 1 second */
+#define	NFS_MINTIMEO	(1 * NFS_HZ)	/* Min timeout to use */
+#define	NFS_MAXTIMEO	(60 * NFS_HZ)	/* Max timeout to backoff to */
+#define	NFS_MINIDEMTIMEO (5 * NFS_HZ)	/* Min timeout for non-idempotent ops*/
+#define	NFS_MAXREXMIT	100		/* Stop counting after this many */
+#define	NFS_RETRANS	10		/* Num of retrans for UDP soft mounts */
+#define	NFS_RETRANS_TCP	2		/* Num of retrans for TCP soft mounts */
+#define	NFS_MAXGRPS	16		/* Max. size of groups list */
+#ifndef NFS_MINATTRTIMO
+#define	NFS_MINATTRTIMO 3		/* VREG attrib cache timeout in sec */
+#endif
+#ifndef NFS_MAXATTRTIMO
+#define	NFS_MAXATTRTIMO 60
+#endif
+#ifndef NFS_MINDIRATTRTIMO
+#define	NFS_MINDIRATTRTIMO 3		/* VDIR attrib cache timeout in sec */
+#endif
+#ifndef NFS_MAXDIRATTRTIMO
+#define	NFS_MAXDIRATTRTIMO 60
+#endif
+#ifndef	NFS_ACCESSCACHESIZE
+#define	NFS_ACCESSCACHESIZE 8		/* Per-node access cache entries */
+#endif
+#define	NFS_WSIZE	8192		/* Def. write data size <= 8192 */
+#define	NFS_RSIZE	8192		/* Def. read data size <= 8192 */
+#define NFS_READDIRSIZE	8192		/* Def. readdir size */
+#define	NFS_DEFRAHEAD	1		/* Def. read ahead # blocks */
+#define	NFS_MAXRAHEAD	4		/* Max. read ahead # blocks */
+#define	NFS_MAXASYNCDAEMON 	64	/* Max. number async_daemons runnable */
+#define	NFS_DIRBLKSIZ	4096		/* Must be a multiple of DIRBLKSIZ */
+#ifdef _KERNEL
+#define	DIRBLKSIZ	512		/* XXX we used to use ufs's DIRBLKSIZ */
+#endif
+#define NFS_MAXDEADTHRESH	9	/* How long till we say 'server not responding' */
+
+/*
+ * Oddballs
+ */
+#define NFS_CMPFH(n, f, s) \
+	((n)->n_fhsize == (s) && !bcmp((caddr_t)(n)->n_fhp, (caddr_t)(f), (s)))
+#define NFS_ISV3(v)	(VFSTONFS((v)->v_mount)->nm_flag & NFSMNT_NFSV3)
+#define NFS_ISV4(v)	(VFSTONFS((v)->v_mount)->nm_flag & NFSMNT_NFSV4)
+
+#define NFSSTA_HASWRITEVERF	0x00040000  /* Has write verifier for V3 */
+#define NFSSTA_GOTFSINFO	0x00100000  /* Got the V3 fsinfo */
+#define	NFSSTA_SNDLOCK		0x01000000  /* Send socket lock */
+#define	NFSSTA_WANTSND		0x02000000  /* Want above */
+#define	NFSSTA_TIMEO		0x10000000  /* Experiencing a timeout */
+#define	NFSSTA_LOCKTIMEO	0x20000000  /* Experiencing a lockd timeout */
+
+
+/*
+ * XXX to allow amd to include nfs.h without nfsproto.h
+ */
+#ifdef NFS_NPROCS
+#include <nfsclient/nfsstats.h>
+#endif
+
+/*
+ * vfs.oldnfs sysctl(3) identifiers
+ */
+#define NFS_NFSSTATS	1		/* struct: struct nfsstats */
+
+#ifdef _KERNEL
+
+#ifdef MALLOC_DECLARE
+MALLOC_DECLARE(M_NFSDIROFF);
+MALLOC_DECLARE(M_NFSDIRECTIO);
+#endif
+
+extern struct uma_zone *nfsmount_zone;
+
+extern struct nfsstats nfsstats;
+extern struct mtx nfs_iod_mtx;
+extern struct task nfs_nfsiodnew_task;
+
+extern int nfs_numasync;
+extern unsigned int nfs_iodmax;
+extern int nfs_pbuf_freecnt;
+extern int nfs_ticks;
+
+/* Data constants in XDR form */
+extern u_int32_t nfs_true, nfs_false, nfs_xdrneg1;
+extern u_int32_t rpc_reply, rpc_msgdenied, rpc_mismatch, rpc_vers;
+extern u_int32_t rpc_auth_unix, rpc_msgaccepted, rpc_call, rpc_autherr;
+
+extern int nfsv3_procid[NFS_NPROCS];
+
+/*
+ * Socket errors ignored for connectionless sockets??
+ * For now, ignore them all
+ */
+#define	NFSIGNORE_SOERROR(s, e) \
+		((e) != EINTR && (e) != EIO && \
+		(e) != ERESTART && (e) != EWOULDBLOCK && \
+		((s) & PR_CONNREQUIRED) == 0)
+
+struct nfsmount;
+
+struct buf;
+struct socket;
+struct uio;
+struct vattr;
+
+/*
+ * Pointers to ops that differ from v3 to v4
+ */
+struct nfs_rpcops {
+	int	(*nr_readrpc)(struct vnode *vp, struct uio *uiop,
+		    struct ucred *cred);
+	int	(*nr_writerpc)(struct vnode *vp, struct uio *uiop,
+		    struct ucred *cred, int *iomode, int *must_commit);
+	int	(*nr_writebp)(struct buf *bp, int force, struct thread *td);
+	int	(*nr_readlinkrpc)(struct vnode *vp, struct uio *uiop,
+		    struct ucred *cred);
+	void	(*nr_invaldir)(struct vnode *vp);
+	int	(*nr_commit)(struct vnode *vp, u_quad_t offset, int cnt,
+		    struct ucred *cred, struct thread *td);
+};
+
+/*
+ * Defines for WebNFS
+ */
+
+#define WEBNFS_ESC_CHAR		'%'
+#define WEBNFS_SPECCHAR_START	0x80
+
+#define WEBNFS_NATIVE_CHAR	0x80
+/*
+ * ..
+ * Possibly more here in the future.
+ */
+
+/*
+ * Macro for converting escape characters in WebNFS pathnames.
+ * Should really be in libkern.
+ */
+
+#define HEXTOC(c) \
+	((c) >= 'a' ? ((c) - ('a' - 10)) : \
+	    ((c) >= 'A' ? ((c) - ('A' - 10)) : ((c) - '0')))
+#define HEXSTRTOI(p) \
+	((HEXTOC(p[0]) << 4) + HEXTOC(p[1]))
+
+/* nfs_sigintr() helper, when 'rep' has all we need */
+#define NFS_SIGREP(rep)		nfs_sigintr((rep)->r_nmp, (rep), (rep)->r_td)
+
+#ifdef NFS_DEBUG
+
+extern int nfs_debug;
+#define NFS_DEBUG_ASYNCIO	1 /* asynchronous i/o */
+#define NFS_DEBUG_WG		2 /* server write gathering */
+#define NFS_DEBUG_RC		4 /* server request caching */
+
+#define NFS_DPF(cat, args)					\
+	do {							\
+		if (nfs_debug & NFS_DEBUG_##cat) printf args;	\
+	} while (0)
+
+#else
+
+#define NFS_DPF(cat, args)
+
+#endif
+
+/*
+ * On fast networks, the estimator will try to reduce the
+ * timeout lower than the latency of the server's disks,
+ * which results in too many timeouts, so cap the lower
+ * bound.
+ */
+#define NFS_MINRTO	(NFS_HZ >> 2)
+
+/*
+ * Keep the RTO from increasing to unreasonably large values
+ * when a server is not responding.
+ */
+#define NFS_MAXRTO	(20 * NFS_HZ)
+
+enum nfs_rto_timer_t {
+	NFS_DEFAULT_TIMER,
+	NFS_GETATTR_TIMER,
+	NFS_LOOKUP_TIMER,
+	NFS_READ_TIMER,
+	NFS_WRITE_TIMER,
+};
+#define NFS_MAX_TIMER	(NFS_WRITE_TIMER)
+
+#define NFS_INITRTT	(NFS_HZ << 3)
+
+vfs_init_t nfs_init;
+vfs_uninit_t nfs_uninit;
+int	nfs_mountroot(struct mount *mp);
+
+void	nfs_purgecache(struct vnode *);
+int	nfs_vinvalbuf(struct vnode *, int, struct thread *, int);
+int	nfs_readrpc(struct vnode *, struct uio *, struct ucred *);
+int	nfs_writerpc(struct vnode *, struct uio *, struct ucred *, int *,
+	    int *);
+int	nfs_commit(struct vnode *vp, u_quad_t offset, int cnt,
+	    struct ucred *cred, struct thread *td);
+int	nfs_readdirrpc(struct vnode *, struct uio *, struct ucred *);
+void	nfs_nfsiodnew(void);
+void	nfs_nfsiodnew_tq(__unused void *, int);
+int	nfs_asyncio(struct nfsmount *, struct buf *, struct ucred *, struct thread *);
+int	nfs_doio(struct vnode *, struct buf *, struct ucred *, struct thread *);
+void	nfs_doio_directwrite (struct buf *);
+int	nfs_readlinkrpc(struct vnode *, struct uio *, struct ucred *);
+int	nfs_sigintr(struct nfsmount *, struct thread *);
+int	nfs_readdirplusrpc(struct vnode *, struct uio *, struct ucred *);
+int	nfs_request(struct vnode *, struct mbuf *, int, struct thread *,
+	    struct ucred *, struct mbuf **, struct mbuf **, caddr_t *);
+int	nfs_loadattrcache(struct vnode **, struct mbuf **, caddr_t *,
+	    struct vattr *, int);
+int	nfsm_mbuftouio(struct mbuf **, struct uio *, int, caddr_t *);
+void	nfs_nhinit(void);
+void	nfs_nhuninit(void);
+int	nfs_nmcancelreqs(struct nfsmount *);
+void	nfs_timer(void*);
+
+int	nfs_connect(struct nfsmount *);
+void	nfs_disconnect(struct nfsmount *);
+void	nfs_safedisconnect(struct nfsmount *);
+int	nfs_getattrcache(struct vnode *, struct vattr *);
+int	nfs_iosize(struct nfsmount *nmp);
+int	nfsm_strtmbuf(struct mbuf **, char **, const char *, long);
+int	nfs_bioread(struct vnode *, struct uio *, int, struct ucred *);
+int	nfsm_uiotombuf(struct uio *, struct mbuf **, int, caddr_t *);
+void	nfs_clearcommit(struct mount *);
+int	nfs_writebp(struct buf *, int, struct thread *);
+int	nfs_fsinfo(struct nfsmount *, struct vnode *, struct ucred *,
+	    struct thread *);
+int	nfs_meta_setsize (struct vnode *, struct ucred *,
+	    struct thread *, u_quad_t);
+
+void	nfs_set_sigmask(struct thread *td, sigset_t *oldset);
+void	nfs_restore_sigmask(struct thread *td, sigset_t *set);
+int	nfs_msleep(struct thread *td, void *ident, struct mtx *mtx,
+	    int priority, char *wmesg, int timo);
+
+#endif	/* _KERNEL */
+
+#endif
diff --git a/freebsd/sys/nfsclient/nfsargs.h b/freebsd/sys/nfsclient/nfsargs.h
new file mode 100644
index 0000000..4635694
--- /dev/null
+++ b/freebsd/sys/nfsclient/nfsargs.h
@@ -0,0 +1,106 @@
+/*-
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 1989, 1993, 1995
+ *	The Regents of the University of California.  All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Rick Macklem at The University of Guelph.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ *	@(#)nfs.h	8.4 (Berkeley) 5/1/95
+ * $FreeBSD$
+ */
+
+#ifndef _NFSCLIENT_NFSARGS_H_
+#define _NFSCLIENT_NFSARGS_H_
+
+/*
+ * Arguments to mount NFS
+ */
+#define NFS_ARGSVERSION	3		/* change when nfs_args changes */
+struct nfs_args {
+	int		version;	/* args structure version number */
+	struct sockaddr	*addr;		/* file server address */
+	int		addrlen;	/* length of address */
+	int		sotype;		/* Socket type */
+	int		proto;		/* and Protocol */
+	u_char		*fh;		/* File handle to be mounted */
+	int		fhsize;		/* Size, in bytes, of fh */
+	int		flags;		/* flags */
+	int		wsize;		/* write size in bytes */
+	int		rsize;		/* read size in bytes */
+	int		readdirsize;	/* readdir size in bytes */
+	int		timeo;		/* initial timeout in .1 secs */
+	int		retrans;	/* times to retry send */
+	int		maxgrouplist;	/* Max. size of group list */
+	int		readahead;	/* # of blocks to readahead */
+	int		wcommitsize;	/* Max. write commit size in bytes */
+	int		deadthresh;	/* Retrans threshold */
+	char		*hostname;	/* server's name */
+	int		acregmin;	/* cache attrs for reg files min time */
+	int		acregmax;	/* cache attrs for reg files max time */
+	int		acdirmin;	/* cache attrs for dirs min time */
+	int		acdirmax;	/* cache attrs for dirs max time */
+};
+
+/*
+ * NFS mount option flags
+ */
+#define	NFSMNT_SOFT		0x00000001  /* soft mount (hard is default) */
+#define	NFSMNT_WSIZE		0x00000002  /* set write size */
+#define	NFSMNT_RSIZE		0x00000004  /* set read size */
+#define	NFSMNT_TIMEO		0x00000008  /* set initial timeout */
+#define	NFSMNT_RETRANS		0x00000010  /* set number of request retries */
+#define	NFSMNT_MAXGRPS		0x00000020  /* set maximum grouplist size */
+#define	NFSMNT_INT		0x00000040  /* allow interrupts on hard mount */
+#define	NFSMNT_NOCONN		0x00000080  /* Don't Connect the socket */
+#define	NFSMNT_ONEOPENOWN	0x00000100  /* Use one OpenOwner for NFSv4.1 */
+#define	NFSMNT_NFSV3		0x00000200  /* Use NFS Version 3 protocol */
+#define	NFSMNT_KERB		0x00000400  /* Use RPCSEC_GSS/Krb5 */
+#define	NFSMNT_DUMBTIMR		0x00000800  /* Don't estimate rtt dynamically */
+#define	NFSMNT_WCOMMITSIZE	0x00001000  /* set max write commit size */
+#define	NFSMNT_READAHEAD	0x00002000  /* set read ahead */
+#define	NFSMNT_DEADTHRESH	0x00004000  /* set dead server retry thresh */
+#define	NFSMNT_RESVPORT		0x00008000  /* Allocate a reserved port */
+#define	NFSMNT_RDIRPLUS		0x00010000  /* Use Readdirplus for V3 */
+#define	NFSMNT_READDIRSIZE	0x00020000  /* Set readdir size */
+#define	NFSMNT_ACREGMIN		0x00040000
+#define	NFSMNT_ACREGMAX		0x00080000
+#define	NFSMNT_ACDIRMIN		0x00100000
+#define	NFSMNT_ACDIRMAX		0x00200000
+#define	NFSMNT_NOLOCKD		0x00400000 /* Locks are local */
+#define	NFSMNT_NFSV4		0x00800000 /* Use NFS Version 4 protocol */
+#define	NFSMNT_HASWRITEVERF	0x01000000 /* NFSv4 Write verifier */
+#define	NFSMNT_INTEGRITY	0x02000000 /* Use integrity with RPCSEC_GSS */
+#define	NFSMNT_PRIVACY		0x04000000 /* Use privacy with RPCSEC_GSS */
+#define	NFSMNT_ALLGSSNAME	0x08000000 /* Use principal for all accesses */
+#define	NFSMNT_STRICT3530	0x10000000 /* Adhere strictly to RFC3530 */
+#define	NFSMNT_NOCTO		0x20000000 /* Don't flush attrcache on open */
+#define	NFSMNT_PNFS		0x40000000 /* Enable pNFS support */
+#define	NFSMNT_NONCONTIGWR	0x80000000 /* Enable non-contiguous writes */
+
+#endif
diff --git a/freebsd/sys/nfsclient/nfsm_subs.h b/freebsd/sys/nfsclient/nfsm_subs.h
new file mode 100644
index 0000000..b918e78
--- /dev/null
+++ b/freebsd/sys/nfsclient/nfsm_subs.h
@@ -0,0 +1,180 @@
+/*-
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 1989, 1993
+ *	The Regents of the University of California.  All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Rick Macklem at The University of Guelph.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ *	@(#)nfsm_subs.h	8.2 (Berkeley) 3/30/95
+ * $FreeBSD$
+ */
+
+#ifndef _NFSCLIENT_NFSM_SUBS_H_
+#define _NFSCLIENT_NFSM_SUBS_H_
+
+#include <nfs/nfs_common.h>
+
+#define	nfsv2tov_type(a)	nv2tov_type[fxdr_unsigned(u_int32_t,(a))&0x7]
+
+struct ucred;
+struct vnode;
+
+/*
+ * These macros do strange and peculiar things to mbuf chains for
+ * the assistance of the nfs code. To attempt to use them for any
+ * other purpose will be dangerous. (they make weird assumptions)
+ */
+
+/*
+ * First define what the actual subs. return
+ */
+u_int32_t nfs_xid_gen(void);
+
+/* *********************************** */
+/* Request generation phase macros */
+
+int	nfsm_fhtom_xx(struct vnode *v, int v3, struct mbuf **mb,
+	    caddr_t *bpos);
+void	nfsm_v3attrbuild_xx(struct vattr *va, int full, struct mbuf **mb,
+	    caddr_t *bpos);
+int	nfsm_strtom_xx(const char *a, int s, int m, struct mbuf **mb,
+	    caddr_t *bpos);
+
+#define nfsm_bcheck(t1, mreq) \
+do { \
+	if (t1) { \
+		error = t1; \
+		m_freem(mreq); \
+		goto nfsmout; \
+	} \
+} while (0)
+
+#define nfsm_fhtom(v, v3) \
+do { \
+	int32_t t1; \
+	t1 = nfsm_fhtom_xx((v), (v3), &mb, &bpos); \
+	nfsm_bcheck(t1, mreq); \
+} while (0)
+
+/* If full is true, set all fields, otherwise just set mode and time fields */
+#define nfsm_v3attrbuild(a, full) \
+	nfsm_v3attrbuild_xx(a, full, &mb, &bpos)
+
+#define nfsm_uiotom(p, s) \
+do { \
+	int t1; \
+	t1 = nfsm_uiotombuf((p), &mb, (s), &bpos); \
+	nfsm_bcheck(t1, mreq); \
+} while (0)
+
+#define	nfsm_strtom(a, s, m) \
+do { \
+	int t1; \
+	t1 = nfsm_strtom_xx((a), (s), (m), &mb, &bpos); \
+	nfsm_bcheck(t1, mreq); \
+} while (0)
+
+/* *********************************** */
+/* Send the request */
+
+#define	nfsm_request(v, t, p, c) \
+do { \
+	sigset_t oldset; \
+	nfs_set_sigmask(p, &oldset); \
+	error = nfs_request((v), mreq, (t), (p), (c), &mrep, &md, &dpos); \
+	nfs_restore_sigmask(p, &oldset); \
+	if (error != 0) { \
+		if (error & NFSERR_RETERR) \
+			error &= ~NFSERR_RETERR; \
+		else \
+			goto nfsmout; \
+	} \
+} while (0)
+
+/* *********************************** */
+/* Reply interpretation phase macros */
+
+int	nfsm_mtofh_xx(struct vnode *d, struct vnode **v, int v3, int *f,
+	    struct mbuf **md, caddr_t *dpos);
+int	nfsm_getfh_xx(nfsfh_t **f, int *s, int v3, struct mbuf **md,
+	    caddr_t *dpos);
+int	nfsm_loadattr_xx(struct vnode **v, struct vattr *va, struct mbuf **md,
+	    caddr_t *dpos);
+int	nfsm_postop_attr_xx(struct vnode **v, int *f, struct vattr *va,
+	    struct mbuf **md, caddr_t *dpos);
+int	nfsm_wcc_data_xx(struct vnode **v, int *f, struct mbuf **md,
+	    caddr_t *dpos);
+
+#define nfsm_mtofh(d, v, v3, f) \
+do { \
+	int32_t t1; \
+	t1 = nfsm_mtofh_xx((d), &(v), (v3), &(f), &md, &dpos); \
+	nfsm_dcheck(t1, mrep); \
+} while (0)
+
+#define nfsm_getfh(f, s, v3) \
+do { \
+	int32_t t1; \
+	t1 = nfsm_getfh_xx(&(f), &(s), (v3), &md, &dpos); \
+	nfsm_dcheck(t1, mrep); \
+} while (0)
+
+#define	nfsm_loadattr(v, a) \
+do { \
+	int32_t t1; \
+	t1 = nfsm_loadattr_xx(&v, a, &md, &dpos); \
+	nfsm_dcheck(t1, mrep); \
+} while (0)
+
+#define	nfsm_postop_attr(v, f) \
+do { \
+	int32_t t1; \
+	t1 = nfsm_postop_attr_xx(&v, &f, NULL, &md, &dpos);	\
+	nfsm_dcheck(t1, mrep); \
+} while (0)
+
+#define	nfsm_postop_attr_va(v, f, va)		\
+do { \
+	int32_t t1; \
+	t1 = nfsm_postop_attr_xx(&v, &f, va, &md, &dpos);	\
+	nfsm_dcheck(t1, mrep); \
+} while (0)
+
+/* Used as (f) for nfsm_wcc_data() */
+#define NFSV3_WCCRATTR	0
+#define NFSV3_WCCCHK	1
+
+#define	nfsm_wcc_data(v, f) \
+do { \
+	int32_t t1; \
+	t1 = nfsm_wcc_data_xx(&v, &f, &md, &dpos); \
+	nfsm_dcheck(t1, mrep); \
+} while (0)
+
+#endif
diff --git a/freebsd/sys/nfsclient/nfsmount.h b/freebsd/sys/nfsclient/nfsmount.h
new file mode 100644
index 0000000..b048c10
--- /dev/null
+++ b/freebsd/sys/nfsclient/nfsmount.h
@@ -0,0 +1,132 @@
+/*-
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 1989, 1993
+ *	The Regents of the University of California.  All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Rick Macklem at The University of Guelph.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ *	@(#)nfsmount.h	8.3 (Berkeley) 3/30/95
+ * $FreeBSD$
+ */
+
+#ifndef _NFSCLIENT_NFSMOUNT_H_
+#define _NFSCLIENT_NFSMOUNT_H_
+
+#include <sys/socket.h>
+
+#include <nfs/nfs_mountcommon.h>
+
+#include <rpc/types.h>
+#include <rpc/auth.h>
+#include <rpc/clnt.h>
+#include <rpc/rpcsec_gss.h>
+
+/*
+ * Mount structure.
+ * One allocated on every NFS mount.
+ * Holds NFS specific information for mount.
+ */
+struct	nfsmount {
+	struct	nfsmount_common nm_com;	/* Common fields for nlm */
+	int	nm_numgrps;		/* Max. size of groupslist */
+	u_char	nm_fh[NFSX_V4FH];	/* File handle of root dir */
+	int	nm_fhsize;		/* Size of root file handle */
+	int	nm_sotype;		/* Type of socket */
+	int	nm_soproto;		/* and protocol */
+	int	nm_soflags;		/* pr_flags for socket protocol */
+	struct	sockaddr *nm_nam;	/* Addr of server */
+	int	nm_deadthresh;		/* Threshold of timeouts-->dead server*/
+	int	nm_rsize;		/* Max size of read rpc */
+	int	nm_wsize;		/* Max size of write rpc */
+	int	nm_readdirsize;		/* Size of a readdir rpc */
+	int	nm_readahead;		/* Num. of blocks to readahead */
+	int	nm_wcommitsize;		/* Max size of commit for write */
+	int	nm_acdirmin;		/* Directory attr cache min lifetime */
+	int	nm_acdirmax;		/* Directory attr cache max lifetime */
+	int	nm_acregmin;		/* Reg file attr cache min lifetime */
+	int	nm_acregmax;		/* Reg file attr cache max lifetime */
+	u_char	nm_verf[NFSX_V3WRITEVERF]; /* V3 write verifier */
+	TAILQ_HEAD(, buf) nm_bufq;	/* async io buffer queue */
+	short	nm_bufqlen;		/* number of buffers in queue */
+	short	nm_bufqwant;		/* process wants to add to the queue */
+	int	nm_bufqiods;		/* number of iods processing queue */
+	u_int64_t nm_maxfilesize;	/* maximum file size */
+	struct nfs_rpcops *nm_rpcops;
+	int	nm_tprintf_initial_delay;	/* initial delay */
+	int	nm_tprintf_delay;		/* interval for messages */
+	int	nm_secflavor;		 /* auth flavor to use for rpc */
+	struct __rpc_client *nm_client;
+	struct rpc_timers nm_timers[NFS_MAX_TIMER]; /* RTT Timers for rpcs */
+	char	nm_principal[MNAMELEN];	/* GSS-API principal of server */
+	gss_OID	nm_mech_oid;		/* OID of selected GSS-API mechanism */
+	int	nm_nametimeo;		/* timeout for +ve entries (sec) */
+	int	nm_negnametimeo;	/* timeout for -ve entries (sec) */
+
+	/* NFSv4 */
+	uint64_t nm_clientid;
+	fsid_t	nm_fsid;
+	u_int	nm_lease_time;
+	time_t	nm_last_renewal;
+};
+
+#define	nm_mtx		nm_com.nmcom_mtx
+#define	nm_flag		nm_com.nmcom_flag
+#define	nm_state	nm_com.nmcom_state
+#define	nm_mountp	nm_com.nmcom_mountp
+#define	nm_timeo	nm_com.nmcom_timeo
+#define	nm_retry	nm_com.nmcom_retry
+#define	nm_hostname	nm_com.nmcom_hostname
+#define	nm_getinfo	nm_com.nmcom_getinfo
+#define	nm_vinvalbuf	nm_com.nmcom_vinvalbuf
+
+#if defined(_KERNEL)
+/*
+ * Convert mount ptr to nfsmount ptr.
+ */
+#define VFSTONFS(mp)	((struct nfsmount *)((mp)->mnt_data))
+
+#ifndef NFS_TPRINTF_INITIAL_DELAY
+#define NFS_TPRINTF_INITIAL_DELAY       12
+#endif
+
+#ifndef NFS_TPRINTF_DELAY
+#define NFS_TPRINTF_DELAY               30
+#endif
+
+#ifndef NFS_DEFAULT_NAMETIMEO
+#define NFS_DEFAULT_NAMETIMEO		60
+#endif
+
+#ifndef NFS_DEFAULT_NEGNAMETIMEO
+#define NFS_DEFAULT_NEGNAMETIMEO	60
+#endif
+
+#endif
+
+#endif
diff --git a/freebsd/sys/nfsclient/nfsnode.h b/freebsd/sys/nfsclient/nfsnode.h
new file mode 100644
index 0000000..3c6856d
--- /dev/null
+++ b/freebsd/sys/nfsclient/nfsnode.h
@@ -0,0 +1,215 @@
+/*-
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 1989, 1993
+ *	The Regents of the University of California.  All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Rick Macklem at The University of Guelph.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ *	@(#)nfsnode.h	8.9 (Berkeley) 5/14/95
+ * $FreeBSD$
+ */
+
+#ifndef _NFSCLIENT_NFSNODE_H_
+#define _NFSCLIENT_NFSNODE_H_
+
+#include <sys/_task.h>
+#if !defined(_NFSCLIENT_NFS_H_) && !defined(_KERNEL)
+#include <nfs/nfs.h>
+#endif
+
+/*
+ * Silly rename structure that hangs off the nfsnode until the name
+ * can be removed by nfs_inactive()
+ */
+struct sillyrename {
+	struct	task s_task;
+	struct	ucred *s_cred;
+	struct	vnode *s_dvp;
+	int	(*s_removeit)(struct sillyrename *sp);
+	long	s_namlen;
+	char	s_name[32];
+};
+
+/*
+ * This structure is used to save the logical directory offset to
+ * NFS cookie mappings.
+ * The mappings are stored in a list headed
+ * by n_cookies, as required.
+ * There is one mapping for each NFS_DIRBLKSIZ bytes of directory information
+ * stored in increasing logical offset byte order.
+ */
+#define NFSNUMCOOKIES		31
+
+struct nfsdmap {
+	LIST_ENTRY(nfsdmap)	ndm_list;
+	int			ndm_eocookie;
+	union {
+		nfsuint64	ndmu3_cookies[NFSNUMCOOKIES];
+		uint64_t	ndmu4_cookies[NFSNUMCOOKIES];
+	} ndm_un1;
+};
+
+#define ndm_cookies	ndm_un1.ndmu3_cookies
+#define ndm4_cookies	ndm_un1.ndmu4_cookies
+
+struct nfs_accesscache {
+	u_int32_t		mode;		/* ACCESS mode cache */
+	uid_t			uid;		/* credentials having mode */
+	time_t			stamp;		/* mode cache timestamp */
+};
+	
+/*
+ * The nfsnode is the nfs equivalent to ufs's inode. Any similarity
+ * is purely coincidental.
+ * There is a unique nfsnode allocated for each active file,
+ * each current directory, each mounted-on file, text file, and the root.
+ * An nfsnode is 'named' by its file handle. (nget/nfs_node.c)
+ * If this structure exceeds 256 bytes (it is currently 256 using 4.4BSD-Lite
+ * type definitions), file handles of > 32 bytes should probably be split out
+ * into a separate malloc()'d data structure. (Reduce the size of nfsfh_t by
+ * changing the definition in nfsproto.h of NFS_SMALLFH.)
+ * NB: Hopefully the current order of the fields is such that everything will
+ *     be well aligned and, therefore, tightly packed.
+ */
+struct nfsnode {
+	struct mtx 		n_mtx;		/* Protects all of these members */
+	u_quad_t		n_size;		/* Current size of file */
+	u_quad_t		n_brev;		/* Modify rev when cached */
+	u_quad_t		n_lrev;		/* Modify rev for lease */
+	struct vattr		n_vattr;	/* Vnode attribute cache */
+	time_t			n_attrstamp;	/* Attr. cache timestamp */
+	struct nfs_accesscache	n_accesscache[NFS_ACCESSCACHESIZE];
+	struct timespec		n_mtime;	/* Prev modify time. */
+	nfsfh_t			*n_fhp;		/* NFS File Handle */
+	struct vnode		*n_vnode;	/* associated vnode */
+	struct vnode		*n_dvp;		/* parent vnode */
+	int			n_error;	/* Save write error value */
+	union {
+		struct timespec	nf_atim;	/* Special file times */
+		nfsuint64	nd_cookieverf;	/* Cookie verifier (dir only) */
+		u_char		nd4_cookieverf[NFSX_V4VERF];
+	} n_un1;
+	union {
+		struct timespec	nf_mtim;
+		off_t		nd_direof;	/* Dir. EOF offset cache */
+	} n_un2;
+	union {
+		struct sillyrename *nf_silly;	/* Ptr to silly rename struct */
+		LIST_HEAD(, nfsdmap) nd_cook;	/* cookies */
+	} n_un3;
+	short			n_fhsize;	/* size in bytes, of fh */
+	short			n_flag;		/* Flag for locking.. */
+	nfsfh_t			n_fh;		/* Small File Handle */
+	u_char			*n_name;	/* leaf name, for v4 OPEN op */
+	uint32_t		n_namelen;
+	int			n_directio_opens;
+	int                     n_directio_asyncwr;
+	struct ucred		*n_writecred;	/* Cred. for putpages */
+};
+
+#define n_atim		n_un1.nf_atim
+#define n_mtim		n_un2.nf_mtim
+#define n_sillyrename	n_un3.nf_silly
+#define n_cookieverf	n_un1.nd_cookieverf
+#define n4_cookieverf	n_un1.nd4_cookieverf
+#define n_direofoffset	n_un2.nd_direof
+#define n_cookies	n_un3.nd_cook
+
+/*
+ * Flags for n_flag
+ */
+#define NFSYNCWAIT      0x0002  /* fsync waiting for all directio async writes
+				  to drain */
+#define	NMODIFIED	0x0004	/* Might have a modified buffer in bio */
+#define	NWRITEERR	0x0008	/* Flag write errors so close will know */
+/* 0x20, 0x40, 0x80 free */
+#define	NACC		0x0100	/* Special file accessed */
+#define	NUPD		0x0200	/* Special file updated */
+#define	NCHG		0x0400	/* Special file times changed */
+#define	NCREATED	0x0800	/* Opened by nfs_create() */
+#define	NTRUNCATE	0x1000	/* Opened by nfs_setattr() */
+#define	NSIZECHANGED	0x2000  /* File size has changed: need cache inval */
+#define NNONCACHE	0x4000  /* Node marked as noncacheable */
+#define NDIRCOOKIELK	0x8000	/* Lock to serialize access to directory cookies */
+
+/*
+ * Convert between nfsnode pointers and vnode pointers
+ */
+#define VTONFS(vp)	((struct nfsnode *)(vp)->v_data)
+#define NFSTOV(np)	((struct vnode *)(np)->n_vnode)
+
+#define NFS_TIMESPEC_COMPARE(T1, T2)	(((T1)->tv_sec != (T2)->tv_sec) || ((T1)->tv_nsec != (T2)->tv_nsec))
+
+/*
+ * NFS iod threads can be in one of these two states once spawned.
+ * NFSIOD_NOT_AVAILABLE - Cannot be assigned an I/O operation at this time.
+ * NFSIOD_AVAILABLE - Available to be assigned an I/O operation.
+ */
+enum nfsiod_state {
+	NFSIOD_NOT_AVAILABLE = 0,
+	NFSIOD_AVAILABLE = 1,
+};
+
+/*
+ * Queue head for nfsiod's
+ */
+extern TAILQ_HEAD(nfs_bufq, buf) nfs_bufq;
+extern enum nfsiod_state nfs_iodwant[NFS_MAXASYNCDAEMON];
+extern struct nfsmount *nfs_iodmount[NFS_MAXASYNCDAEMON];
+
+#if defined(_KERNEL)
+
+extern	struct vop_vector	nfs_fifoops;
+extern	struct vop_vector	nfs_vnodeops;
+extern struct buf_ops buf_ops_nfs;
+
+/*
+ * Prototypes for NFS vnode operations
+ */
+int	nfs_getpages(struct vop_getpages_args *);
+int	nfs_putpages(struct vop_putpages_args *);
+int	nfs_write(struct vop_write_args *);
+int	nfs_inactive(struct vop_inactive_args *);
+int	nfs_reclaim(struct vop_reclaim_args *);
+
+/* other stuff */
+int	nfs_removeit(struct sillyrename *);
+int	nfs_nget(struct mount *, nfsfh_t *, int, struct nfsnode **, int flags);
+nfsuint64 *nfs_getcookie(struct nfsnode *, off_t, int);
+void	nfs_invaldir(struct vnode *);
+int	nfs_upgrade_vnlock(struct vnode *vp);
+void	nfs_downgrade_vnlock(struct vnode *vp, int old_lock);
+void	nfs_printf(const char *fmt, ...);
+
+void nfs_dircookie_lock(struct nfsnode *np);
+void nfs_dircookie_unlock(struct nfsnode *np);
+
+#endif /* _KERNEL */
+
+#endif
diff --git a/freebsd/sys/nfsclient/nfsstats.h b/freebsd/sys/nfsclient/nfsstats.h
new file mode 100644
index 0000000..c65ae6d
--- /dev/null
+++ b/freebsd/sys/nfsclient/nfsstats.h
@@ -0,0 +1,71 @@
+/*-
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 1989, 1993, 1995
+ *	The Regents of the University of California.  All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Rick Macklem at The University of Guelph.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ *	@(#)nfs.h	8.4 (Berkeley) 5/1/95
+ * $FreeBSD$
+ */
+
+#ifndef _NFSCLIENT_NFSSTATS_H_
+#define _NFSCLIENT_NFSSTATS_H_
+
+/*
+ * Stats structure
+ */
+struct nfsstats {
+	int	attrcache_hits;
+	int	attrcache_misses;
+	int	lookupcache_hits;
+	int	lookupcache_misses;
+	int	direofcache_hits;
+	int	direofcache_misses;
+	int	accesscache_hits;
+	int	accesscache_misses;
+	int	biocache_reads;
+	int	read_bios;
+	int	read_physios;
+	int	biocache_writes;
+	int	write_bios;
+	int	write_physios;
+	int	biocache_readlinks;
+	int	readlink_bios;
+	int	biocache_readdirs;
+	int	readdir_bios;
+	int	rpcretries;
+	int	rpcrequests;
+	int	rpctimeouts;
+	int	rpcunexpected;
+	int	rpcinvalid;
+	int	rpccnt[NFS_NPROCS];
+};
+
+#endif
diff --git a/freebsd/sys/nfsclient/nlminfo.h b/freebsd/sys/nfsclient/nlminfo.h
new file mode 100644
index 0000000..340bdbe
--- /dev/null
+++ b/freebsd/sys/nfsclient/nlminfo.h
@@ -0,0 +1,44 @@
+/*-
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 1998 Berkeley Software Design, Inc. All rights reserved.
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Berkeley Software Design Inc's name may not be used to endorse or
+ *    promote products derived from this software without specific prior
+ *    written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ *      from BSDI nlminfo.h,v 2.1 1998/03/18 01:30:38 don Exp
+ * $FreeBSD$
+ */
+
+/*
+ * Misc NLM informationi, some needed for the master lockd process, and some
+ * needed by every process doing nlm based locking.
+ */
+struct  nlminfo {
+	/* these are used by any process doing nlm locking */
+        int             msg_seq;        /* sequence counter for lock requests */
+        int             retcode;        /* return code for lock requests */
+	int		set_getlk_pid;
+	int		getlk_pid;
+        struct  timeval pid_start;      /* process starting time */
+};



More information about the vc mailing list