NFS Client Updates for Linux 5.3

Stable bugfixes:
 - Dequeue the request from the receive queue while we're re-encoding # v4.20+
 - Fix buffer handling of GSS MIC without slack # 5.1
 
 Features:
 - Increase xprtrdma maximum transport header and slot table sizes
 - Add support for nfs4_call_sync() calls using a custom rpc_task_struct
 - Optimize the default readahead size
 - Enable pNFS filelayout LAYOUTGET on OPEN
 
 Other bugfixes and cleanups:
 - Fix possible null-pointer dereferences and memory leaks
 - Various NFS over RDMA cleanups
 - Various NFS over RDMA comment updates
 - Don't receive TCP data into a reset request buffer
 - Don't try to parse incomplete RPC messages
 - Fix congestion window race with disconnect
 - Clean up pNFS return-on-close error handling
 - Fixes for NFS4ERR_OLD_STATEID handling
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEnZ5MQTpR7cLU7KEp18tUv7ClQOsFAl2NC04ACgkQ18tUv7Cl
 QOs4Tg//bAlGs+dIKixAmeMKmTd6I34laUnuyV/12yPQDgo6bryLrTngfe2BYvmG
 2l+8H7yHfR4/gQE4vhR0c15xFgu6pvjBGR0/nNRaXienIPXO4xsQkcaxVA7SFRY2
 HjffZwyoBfjyRps0jL+2sTsKbRtSkf9Dn+BONRgesg51jK1jyWkXqXpmgi4uMO4i
 ojpTrW81dwo7Yhv08U2A/Q1ifMJ8F9dVYuL5sm+fEbVI/Nxoz766qyB8rs8+b4Xj
 3gkfyh/Y1zoMmu6c+r2Q67rhj9WYbDKpa6HH9yX1zM/RLTiU7czMX+kjuQuOHWxY
 YiEk73NjJ48WJEep3odess1q/6WiAXX7UiJM1SnDFgAa9NZMdfhqMm6XduNO1m60
 sy0i8AdxdQciWYexOXMsBuDUCzlcoj4WYs1QGpY3uqO1MznQS/QUfu65fx8CzaT5
 snm6ki5ivqXH/js/0Z4MX2n/sd1PGJ5ynMkekxJ8G3gw+GC/oeSeGNawfedifLKK
 OdzyDdeiel5Me1p4I28j1WYVLHvtFmEWEU9oytdG0D/rjC/pgYgW/NYvAao8lQ4Z
 06wdcyAM66ViAPrbYeE7Bx4jy8zYRkiw6Y3kIbLgrlMugu3BhIW5Mi3BsgL4f4am
 KsqkzUqPZMCOVwDuUILSuPp4uHaR+JTJttywiLniTL6reF5kTiA=
 =4Ey6
 -----END PGP SIGNATURE-----

Merge tag 'nfs-for-5.4-1' of git://git.linux-nfs.org/projects/anna/linux-nfs

Pull NFS client updates from Anna Schumaker:
 "Stable bugfixes:
   - Dequeue the request from the receive queue while we're re-encoding
     # v4.20+
   - Fix buffer handling of GSS MIC without slack # 5.1

  Features:
   - Increase xprtrdma maximum transport header and slot table sizes
   - Add support for nfs4_call_sync() calls using a custom
     rpc_task_struct
   - Optimize the default readahead size
   - Enable pNFS filelayout LAYOUTGET on OPEN

  Other bugfixes and cleanups:
   - Fix possible null-pointer dereferences and memory leaks
   - Various NFS over RDMA cleanups
   - Various NFS over RDMA comment updates
   - Don't receive TCP data into a reset request buffer
   - Don't try to parse incomplete RPC messages
   - Fix congestion window race with disconnect
   - Clean up pNFS return-on-close error handling
   - Fixes for NFS4ERR_OLD_STATEID handling"

* tag 'nfs-for-5.4-1' of git://git.linux-nfs.org/projects/anna/linux-nfs: (53 commits)
  pNFS/filelayout: enable LAYOUTGET on OPEN
  NFS: Optimise the default readahead size
  NFSv4: Handle NFS4ERR_OLD_STATEID in LOCKU
  NFSv4: Handle NFS4ERR_OLD_STATEID in CLOSE/OPEN_DOWNGRADE
  NFSv4: Fix OPEN_DOWNGRADE error handling
  pNFS: Handle NFS4ERR_OLD_STATEID on layoutreturn by bumping the state seqid
  NFSv4: Add a helper to increment stateid seqids
  NFSv4: Handle RPC level errors in LAYOUTRETURN
  NFSv4: Handle NFS4ERR_DELAY correctly in return-on-close
  NFSv4: Clean up pNFS return-on-close error handling
  pNFS: Ensure we do clear the return-on-close layout stateid on fatal errors
  NFS: remove unused check for negative dentry
  NFSv3: use nfs_add_or_obtain() to create and reference inodes
  NFS: Refactor nfs_instantiate() for dentry referencing callers
  SUNRPC: Fix congestion window race with disconnect
  SUNRPC: Don't try to parse incomplete RPC messages
  SUNRPC: Rename xdr_buf_read_netobj to xdr_buf_read_mic
  SUNRPC: Fix buffer handling of GSS MIC without slack
  SUNRPC: RPC level errors should always set task->tk_rpc_status
  SUNRPC: Don't receive TCP data into a request buffer that has been reset
  ...
This commit is contained in:
Linus Torvalds 2019-09-26 12:20:14 -07:00
commit 972a2bf7df
29 changed files with 834 additions and 579 deletions

View File

@ -1669,10 +1669,8 @@ static int nfs4_lookup_revalidate(struct dentry *dentry, unsigned int flags)
#endif /* CONFIG_NFSV4 */
/*
* Code common to create, mkdir, and mknod.
*/
int nfs_instantiate(struct dentry *dentry, struct nfs_fh *fhandle,
struct dentry *
nfs_add_or_obtain(struct dentry *dentry, struct nfs_fh *fhandle,
struct nfs_fattr *fattr,
struct nfs4_label *label)
{
@ -1680,13 +1678,10 @@ int nfs_instantiate(struct dentry *dentry, struct nfs_fh *fhandle,
struct inode *dir = d_inode(parent);
struct inode *inode;
struct dentry *d;
int error = -EACCES;
int error;
d_drop(dentry);
/* We may have been initialized further down */
if (d_really_is_positive(dentry))
goto out;
if (fhandle->size == 0) {
error = NFS_PROTO(dir)->lookup(dir, &dentry->d_name, fhandle, fattr, NULL);
if (error)
@ -1702,18 +1697,32 @@ int nfs_instantiate(struct dentry *dentry, struct nfs_fh *fhandle,
}
inode = nfs_fhget(dentry->d_sb, fhandle, fattr, label);
d = d_splice_alias(inode, dentry);
if (IS_ERR(d)) {
error = PTR_ERR(d);
goto out_error;
}
dput(d);
out:
dput(parent);
return 0;
return d;
out_error:
nfs_mark_for_revalidate(dir);
dput(parent);
return error;
d = ERR_PTR(error);
goto out;
}
EXPORT_SYMBOL_GPL(nfs_add_or_obtain);
/*
* Code common to create, mkdir, and mknod.
*/
int nfs_instantiate(struct dentry *dentry, struct nfs_fh *fhandle,
struct nfs_fattr *fattr,
struct nfs4_label *label)
{
struct dentry *d;
d = nfs_add_or_obtain(dentry, fhandle, fattr, label);
if (IS_ERR(d))
return PTR_ERR(d);
/* Callers don't care */
dput(d);
return 0;
}
EXPORT_SYMBOL_GPL(nfs_instantiate);

View File

@ -1164,6 +1164,7 @@ static struct pnfs_layoutdriver_type filelayout_type = {
.id = LAYOUT_NFSV4_1_FILES,
.name = "LAYOUT_NFSV4_1_FILES",
.owner = THIS_MODULE,
.flags = PNFS_LAYOUTGET_ON_OPEN,
.max_layoutget_response = 4096, /* 1 page or so... */
.alloc_layout_hdr = filelayout_alloc_layout_hdr,
.free_layout_hdr = filelayout_free_layout_hdr,

View File

@ -16,14 +16,6 @@ extern const struct export_operations nfs_export_ops;
struct nfs_string;
/* Maximum number of readahead requests
* FIXME: this should really be a sysctl so that users may tune it to suit
* their needs. People that do NFS over a slow network, might for
* instance want to reduce it to something closer to 1 for improved
* interactive response.
*/
#define NFS_MAX_READAHEAD (RPC_DEF_SLOT_TABLE - 1)
static inline void nfs_attr_check_mountpoint(struct super_block *parent, struct nfs_fattr *fattr)
{
if (!nfs_fsid_equal(&NFS_SB(parent)->fsid, &fattr->fsid))

View File

@ -279,15 +279,17 @@ static struct nfs3_createdata *nfs3_alloc_createdata(void)
return data;
}
static int nfs3_do_create(struct inode *dir, struct dentry *dentry, struct nfs3_createdata *data)
static struct dentry *
nfs3_do_create(struct inode *dir, struct dentry *dentry, struct nfs3_createdata *data)
{
int status;
status = rpc_call_sync(NFS_CLIENT(dir), &data->msg, 0);
nfs_post_op_update_inode(dir, data->res.dir_attr);
if (status == 0)
status = nfs_instantiate(dentry, data->res.fh, data->res.fattr, NULL);
return status;
if (status != 0)
return ERR_PTR(status);
return nfs_add_or_obtain(dentry, data->res.fh, data->res.fattr, NULL);
}
static void nfs3_free_createdata(struct nfs3_createdata *data)
@ -304,6 +306,7 @@ nfs3_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
{
struct posix_acl *default_acl, *acl;
struct nfs3_createdata *data;
struct dentry *d_alias;
int status = -ENOMEM;
dprintk("NFS call create %pd\n", dentry);
@ -330,7 +333,8 @@ nfs3_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
goto out;
for (;;) {
status = nfs3_do_create(dir, dentry, data);
d_alias = nfs3_do_create(dir, dentry, data);
status = PTR_ERR_OR_ZERO(d_alias);
if (status != -ENOTSUPP)
break;
@ -355,6 +359,9 @@ nfs3_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
if (status != 0)
goto out_release_acls;
if (d_alias)
dentry = d_alias;
/* When we created the file with exclusive semantics, make
* sure we set the attributes afterwards. */
if (data->arg.create.createmode == NFS3_CREATE_EXCLUSIVE) {
@ -372,11 +379,13 @@ nfs3_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
nfs_post_op_update_inode(d_inode(dentry), data->res.fattr);
dprintk("NFS reply setattr (post-create): %d\n", status);
if (status != 0)
goto out_release_acls;
goto out_dput;
}
status = nfs3_proc_setacls(d_inode(dentry), acl, default_acl);
out_dput:
dput(d_alias);
out_release_acls:
posix_acl_release(acl);
posix_acl_release(default_acl);
@ -504,6 +513,7 @@ nfs3_proc_symlink(struct inode *dir, struct dentry *dentry, struct page *page,
unsigned int len, struct iattr *sattr)
{
struct nfs3_createdata *data;
struct dentry *d_alias;
int status = -ENOMEM;
if (len > NFS3_MAXPATHLEN)
@ -522,7 +532,11 @@ nfs3_proc_symlink(struct inode *dir, struct dentry *dentry, struct page *page,
data->arg.symlink.pathlen = len;
data->arg.symlink.sattr = sattr;
status = nfs3_do_create(dir, dentry, data);
d_alias = nfs3_do_create(dir, dentry, data);
status = PTR_ERR_OR_ZERO(d_alias);
if (status == 0)
dput(d_alias);
nfs3_free_createdata(data);
out:
@ -535,6 +549,7 @@ nfs3_proc_mkdir(struct inode *dir, struct dentry *dentry, struct iattr *sattr)
{
struct posix_acl *default_acl, *acl;
struct nfs3_createdata *data;
struct dentry *d_alias;
int status = -ENOMEM;
dprintk("NFS call mkdir %pd\n", dentry);
@ -553,12 +568,18 @@ nfs3_proc_mkdir(struct inode *dir, struct dentry *dentry, struct iattr *sattr)
data->arg.mkdir.len = dentry->d_name.len;
data->arg.mkdir.sattr = sattr;
status = nfs3_do_create(dir, dentry, data);
d_alias = nfs3_do_create(dir, dentry, data);
status = PTR_ERR_OR_ZERO(d_alias);
if (status != 0)
goto out_release_acls;
if (d_alias)
dentry = d_alias;
status = nfs3_proc_setacls(d_inode(dentry), acl, default_acl);
dput(d_alias);
out_release_acls:
posix_acl_release(acl);
posix_acl_release(default_acl);
@ -660,6 +681,7 @@ nfs3_proc_mknod(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
{
struct posix_acl *default_acl, *acl;
struct nfs3_createdata *data;
struct dentry *d_alias;
int status = -ENOMEM;
dprintk("NFS call mknod %pd %u:%u\n", dentry,
@ -698,12 +720,17 @@ nfs3_proc_mknod(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
goto out;
}
status = nfs3_do_create(dir, dentry, data);
d_alias = nfs3_do_create(dir, dentry, data);
status = PTR_ERR_OR_ZERO(d_alias);
if (status != 0)
goto out_release_acls;
if (d_alias)
dentry = d_alias;
status = nfs3_proc_setacls(d_inode(dentry), acl, default_acl);
dput(d_alias);
out_release_acls:
posix_acl_release(acl);
posix_acl_release(default_acl);

View File

@ -491,8 +491,6 @@ extern int nfs4_set_lock_state(struct nfs4_state *state, struct file_lock *fl);
extern int nfs4_select_rw_stateid(struct nfs4_state *, fmode_t,
const struct nfs_lock_context *, nfs4_stateid *,
const struct cred **);
extern bool nfs4_refresh_open_stateid(nfs4_stateid *dst,
struct nfs4_state *state);
extern bool nfs4_copy_open_stateid(nfs4_stateid *dst,
struct nfs4_state *state);
@ -574,6 +572,15 @@ static inline bool nfs4_stateid_is_newer(const nfs4_stateid *s1, const nfs4_stat
return (s32)(be32_to_cpu(s1->seqid) - be32_to_cpu(s2->seqid)) > 0;
}
static inline void nfs4_stateid_seqid_inc(nfs4_stateid *s1)
{
u32 seqid = be32_to_cpu(s1->seqid);
if (++seqid == 0)
++seqid;
s1->seqid = cpu_to_be32(seqid);
}
static inline bool nfs4_valid_open_stateid(const struct nfs4_state *state)
{
return test_bit(NFS_STATE_RECOVERY_FAILED, &state->flags) == 0;

View File

@ -1073,14 +1073,26 @@ static const struct rpc_call_ops nfs40_call_sync_ops = {
.rpc_call_done = nfs40_call_sync_done,
};
static int nfs4_call_sync_custom(struct rpc_task_setup *task_setup)
{
int ret;
struct rpc_task *task;
task = rpc_run_task(task_setup);
if (IS_ERR(task))
return PTR_ERR(task);
ret = task->tk_status;
rpc_put_task(task);
return ret;
}
static int nfs4_call_sync_sequence(struct rpc_clnt *clnt,
struct nfs_server *server,
struct rpc_message *msg,
struct nfs4_sequence_args *args,
struct nfs4_sequence_res *res)
{
int ret;
struct rpc_task *task;
struct nfs_client *clp = server->nfs_client;
struct nfs4_call_sync_data data = {
.seq_server = server,
@ -1094,14 +1106,7 @@ static int nfs4_call_sync_sequence(struct rpc_clnt *clnt,
.callback_data = &data
};
task = rpc_run_task(&task_setup);
if (IS_ERR(task))
ret = PTR_ERR(task);
else {
ret = task->tk_status;
rpc_put_task(task);
}
return ret;
return nfs4_call_sync_custom(&task_setup);
}
int nfs4_call_sync(struct rpc_clnt *clnt,
@ -3308,6 +3313,75 @@ nfs4_wait_on_layoutreturn(struct inode *inode, struct rpc_task *task)
return pnfs_wait_on_layoutreturn(inode, task);
}
/*
* Update the seqid of an open stateid
*/
static void nfs4_sync_open_stateid(nfs4_stateid *dst,
struct nfs4_state *state)
{
__be32 seqid_open;
u32 dst_seqid;
int seq;
for (;;) {
if (!nfs4_valid_open_stateid(state))
break;
seq = read_seqbegin(&state->seqlock);
if (!nfs4_state_match_open_stateid_other(state, dst)) {
nfs4_stateid_copy(dst, &state->open_stateid);
if (read_seqretry(&state->seqlock, seq))
continue;
break;
}
seqid_open = state->open_stateid.seqid;
if (read_seqretry(&state->seqlock, seq))
continue;
dst_seqid = be32_to_cpu(dst->seqid);
if ((s32)(dst_seqid - be32_to_cpu(seqid_open)) < 0)
dst->seqid = seqid_open;
break;
}
}
/*
* Update the seqid of an open stateid after receiving
* NFS4ERR_OLD_STATEID
*/
static bool nfs4_refresh_open_old_stateid(nfs4_stateid *dst,
struct nfs4_state *state)
{
__be32 seqid_open;
u32 dst_seqid;
bool ret;
int seq;
for (;;) {
ret = false;
if (!nfs4_valid_open_stateid(state))
break;
seq = read_seqbegin(&state->seqlock);
if (!nfs4_state_match_open_stateid_other(state, dst)) {
if (read_seqretry(&state->seqlock, seq))
continue;
break;
}
seqid_open = state->open_stateid.seqid;
if (read_seqretry(&state->seqlock, seq))
continue;
dst_seqid = be32_to_cpu(dst->seqid);
if ((s32)(dst_seqid - be32_to_cpu(seqid_open)) >= 0)
dst->seqid = cpu_to_be32(dst_seqid + 1);
else
dst->seqid = seqid_open;
ret = true;
break;
}
return ret;
}
struct nfs4_closedata {
struct inode *inode;
struct nfs4_state *state;
@ -3358,32 +3432,11 @@ static void nfs4_close_done(struct rpc_task *task, void *data)
trace_nfs4_close(state, &calldata->arg, &calldata->res, task->tk_status);
/* Handle Layoutreturn errors */
if (calldata->arg.lr_args && task->tk_status != 0) {
switch (calldata->res.lr_ret) {
default:
calldata->res.lr_ret = -NFS4ERR_NOMATCHING_LAYOUT;
break;
case 0:
calldata->arg.lr_args = NULL;
calldata->res.lr_res = NULL;
break;
case -NFS4ERR_OLD_STATEID:
if (nfs4_layoutreturn_refresh_stateid(&calldata->arg.lr_args->stateid,
&calldata->arg.lr_args->range,
calldata->inode))
goto lr_restart;
/* Fallthrough */
case -NFS4ERR_ADMIN_REVOKED:
case -NFS4ERR_DELEG_REVOKED:
case -NFS4ERR_EXPIRED:
case -NFS4ERR_BAD_STATEID:
case -NFS4ERR_UNKNOWN_LAYOUTTYPE:
case -NFS4ERR_WRONG_CRED:
calldata->arg.lr_args = NULL;
calldata->res.lr_res = NULL;
goto lr_restart;
}
}
if (pnfs_roc_done(task, calldata->inode,
&calldata->arg.lr_args,
&calldata->res.lr_res,
&calldata->res.lr_ret) == -EAGAIN)
goto out_restart;
/* hmm. we are done with the inode, and in the process of freeing
* the state_owner. we keep this around to process errors
@ -3403,7 +3456,7 @@ static void nfs4_close_done(struct rpc_task *task, void *data)
break;
case -NFS4ERR_OLD_STATEID:
/* Did we race with OPEN? */
if (nfs4_refresh_open_stateid(&calldata->arg.stateid,
if (nfs4_refresh_open_old_stateid(&calldata->arg.stateid,
state))
goto out_restart;
goto out_release;
@ -3415,7 +3468,9 @@ static void nfs4_close_done(struct rpc_task *task, void *data)
task->tk_msg.rpc_cred);
/* Fallthrough */
case -NFS4ERR_BAD_STATEID:
if (calldata->arg.fmode == 0)
break;
/* Fallthrough */
default:
task->tk_status = nfs4_async_handle_exception(task,
server, task->tk_status, &exception);
@ -3430,8 +3485,6 @@ static void nfs4_close_done(struct rpc_task *task, void *data)
nfs_refresh_inode(calldata->inode, &calldata->fattr);
dprintk("%s: done, ret = %d!\n", __func__, task->tk_status);
return;
lr_restart:
calldata->res.lr_ret = 0;
out_restart:
task->tk_status = 0;
rpc_restart_call_prepare(task);
@ -3472,8 +3525,8 @@ static void nfs4_close_prepare(struct rpc_task *task, void *data)
} else if (is_rdwr)
calldata->arg.fmode |= FMODE_READ|FMODE_WRITE;
if (!nfs4_valid_open_stateid(state) ||
!nfs4_refresh_open_stateid(&calldata->arg.stateid, state))
nfs4_sync_open_stateid(&calldata->arg.stateid, state);
if (!nfs4_valid_open_stateid(state))
call_close = 0;
spin_unlock(&state->owner->so_lock);
@ -6018,7 +6071,6 @@ int nfs4_proc_setclientid(struct nfs_client *clp, u32 program,
.rpc_resp = res,
.rpc_cred = cred,
};
struct rpc_task *task;
struct rpc_task_setup task_setup_data = {
.rpc_client = clp->cl_rpcclient,
.rpc_message = &msg,
@ -6051,17 +6103,12 @@ int nfs4_proc_setclientid(struct nfs_client *clp, u32 program,
dprintk("NFS call setclientid auth=%s, '%s'\n",
clp->cl_rpcclient->cl_auth->au_ops->au_name,
clp->cl_owner_id);
task = rpc_run_task(&task_setup_data);
if (IS_ERR(task)) {
status = PTR_ERR(task);
goto out;
}
status = task->tk_status;
status = nfs4_call_sync_custom(&task_setup_data);
if (setclientid.sc_cred) {
clp->cl_acceptor = rpcauth_stringify_acceptor(setclientid.sc_cred);
put_rpccred(setclientid.sc_cred);
}
rpc_put_task(task);
out:
trace_nfs4_setclientid(clp, status);
dprintk("NFS reply setclientid: %d\n", status);
@ -6129,32 +6176,11 @@ static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata)
trace_nfs4_delegreturn_exit(&data->args, &data->res, task->tk_status);
/* Handle Layoutreturn errors */
if (data->args.lr_args && task->tk_status != 0) {
switch(data->res.lr_ret) {
default:
data->res.lr_ret = -NFS4ERR_NOMATCHING_LAYOUT;
break;
case 0:
data->args.lr_args = NULL;
data->res.lr_res = NULL;
break;
case -NFS4ERR_OLD_STATEID:
if (nfs4_layoutreturn_refresh_stateid(&data->args.lr_args->stateid,
&data->args.lr_args->range,
data->inode))
goto lr_restart;
/* Fallthrough */
case -NFS4ERR_ADMIN_REVOKED:
case -NFS4ERR_DELEG_REVOKED:
case -NFS4ERR_EXPIRED:
case -NFS4ERR_BAD_STATEID:
case -NFS4ERR_UNKNOWN_LAYOUTTYPE:
case -NFS4ERR_WRONG_CRED:
data->args.lr_args = NULL;
data->res.lr_res = NULL;
goto lr_restart;
}
}
if (pnfs_roc_done(task, data->inode,
&data->args.lr_args,
&data->res.lr_res,
&data->res.lr_ret) == -EAGAIN)
goto out_restart;
switch (task->tk_status) {
case 0:
@ -6192,8 +6218,6 @@ static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata)
}
data->rpc_status = task->tk_status;
return;
lr_restart:
data->res.lr_ret = 0;
out_restart:
task->tk_status = 0;
rpc_restart_call_prepare(task);
@ -6386,6 +6410,42 @@ static int nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock *
return err;
}
/*
* Update the seqid of a lock stateid after receiving
* NFS4ERR_OLD_STATEID
*/
static bool nfs4_refresh_lock_old_stateid(nfs4_stateid *dst,
struct nfs4_lock_state *lsp)
{
struct nfs4_state *state = lsp->ls_state;
bool ret = false;
spin_lock(&state->state_lock);
if (!nfs4_stateid_match_other(dst, &lsp->ls_stateid))
goto out;
if (!nfs4_stateid_is_newer(&lsp->ls_stateid, dst))
nfs4_stateid_seqid_inc(dst);
else
dst->seqid = lsp->ls_stateid.seqid;
ret = true;
out:
spin_unlock(&state->state_lock);
return ret;
}
static bool nfs4_sync_lock_stateid(nfs4_stateid *dst,
struct nfs4_lock_state *lsp)
{
struct nfs4_state *state = lsp->ls_state;
bool ret;
spin_lock(&state->state_lock);
ret = !nfs4_stateid_match_other(dst, &lsp->ls_stateid);
nfs4_stateid_copy(dst, &lsp->ls_stateid);
spin_unlock(&state->state_lock);
return ret;
}
struct nfs4_unlockdata {
struct nfs_locku_args arg;
struct nfs_locku_res res;
@ -6403,7 +6463,8 @@ static struct nfs4_unlockdata *nfs4_alloc_unlockdata(struct file_lock *fl,
struct nfs_seqid *seqid)
{
struct nfs4_unlockdata *p;
struct inode *inode = lsp->ls_state->inode;
struct nfs4_state *state = lsp->ls_state;
struct inode *inode = state->inode;
p = kzalloc(sizeof(*p), GFP_NOFS);
if (p == NULL)
@ -6419,6 +6480,9 @@ static struct nfs4_unlockdata *nfs4_alloc_unlockdata(struct file_lock *fl,
locks_init_lock(&p->fl);
locks_copy_lock(&p->fl, fl);
p->server = NFS_SERVER(inode);
spin_lock(&state->state_lock);
nfs4_stateid_copy(&p->arg.stateid, &lsp->ls_stateid);
spin_unlock(&state->state_lock);
return p;
}
@ -6457,10 +6521,14 @@ static void nfs4_locku_done(struct rpc_task *task, void *data)
task->tk_msg.rpc_cred);
/* Fall through */
case -NFS4ERR_BAD_STATEID:
case -NFS4ERR_OLD_STATEID:
case -NFS4ERR_STALE_STATEID:
if (!nfs4_stateid_match(&calldata->arg.stateid,
&calldata->lsp->ls_stateid))
if (nfs4_sync_lock_stateid(&calldata->arg.stateid,
calldata->lsp))
rpc_restart_call_prepare(task);
break;
case -NFS4ERR_OLD_STATEID:
if (nfs4_refresh_lock_old_stateid(&calldata->arg.stateid,
calldata->lsp))
rpc_restart_call_prepare(task);
break;
default:
@ -6483,7 +6551,6 @@ static void nfs4_locku_prepare(struct rpc_task *task, void *data)
if (nfs_wait_on_sequence(calldata->arg.seqid, task) != 0)
goto out_wait;
nfs4_stateid_copy(&calldata->arg.stateid, &calldata->lsp->ls_stateid);
if (test_bit(NFS_LOCK_INITIALIZED, &calldata->lsp->ls_flags) == 0) {
/* Note: exit _without_ running nfs4_locku_done */
goto out_no_action;
@ -7645,6 +7712,8 @@ int nfs4_proc_fsid_present(struct inode *inode, const struct cred *cred)
static int _nfs4_proc_secinfo(struct inode *dir, const struct qstr *name, struct nfs4_secinfo_flavors *flavors, bool use_integrity)
{
int status;
struct rpc_clnt *clnt = NFS_SERVER(dir)->client;
struct nfs_client *clp = NFS_SERVER(dir)->nfs_client;
struct nfs4_secinfo_arg args = {
.dir_fh = NFS_FH(dir),
.name = name,
@ -7657,26 +7726,37 @@ static int _nfs4_proc_secinfo(struct inode *dir, const struct qstr *name, struct
.rpc_argp = &args,
.rpc_resp = &res,
};
struct rpc_clnt *clnt = NFS_SERVER(dir)->client;
struct nfs4_call_sync_data data = {
.seq_server = NFS_SERVER(dir),
.seq_args = &args.seq_args,
.seq_res = &res.seq_res,
};
struct rpc_task_setup task_setup = {
.rpc_client = clnt,
.rpc_message = &msg,
.callback_ops = clp->cl_mvops->call_sync_ops,
.callback_data = &data,
.flags = RPC_TASK_NO_ROUND_ROBIN,
};
const struct cred *cred = NULL;
if (use_integrity) {
clnt = NFS_SERVER(dir)->nfs_client->cl_rpcclient;
cred = nfs4_get_clid_cred(NFS_SERVER(dir)->nfs_client);
clnt = clp->cl_rpcclient;
task_setup.rpc_client = clnt;
cred = nfs4_get_clid_cred(clp);
msg.rpc_cred = cred;
}
dprintk("NFS call secinfo %s\n", name->name);
nfs4_state_protect(NFS_SERVER(dir)->nfs_client,
NFS_SP4_MACH_CRED_SECINFO, &clnt, &msg);
nfs4_state_protect(clp, NFS_SP4_MACH_CRED_SECINFO, &clnt, &msg);
nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 0);
status = nfs4_call_sync_custom(&task_setup);
status = nfs4_call_sync(clnt, NFS_SERVER(dir), &msg, &args.seq_args,
&res.seq_res, RPC_TASK_NO_ROUND_ROBIN);
dprintk("NFS reply secinfo: %d\n", status);
put_cred(cred);
return status;
}
@ -8344,7 +8424,6 @@ static const struct rpc_call_ops nfs4_get_lease_time_ops = {
int nfs4_proc_get_lease_time(struct nfs_client *clp, struct nfs_fsinfo *fsinfo)
{
struct rpc_task *task;
struct nfs4_get_lease_time_args args;
struct nfs4_get_lease_time_res res = {
.lr_fsinfo = fsinfo,
@ -8366,17 +8445,9 @@ int nfs4_proc_get_lease_time(struct nfs_client *clp, struct nfs_fsinfo *fsinfo)
.callback_data = &data,
.flags = RPC_TASK_TIMEOUT,
};
int status;
nfs4_init_sequence(&args.la_seq_args, &res.lr_seq_res, 0, 1);
task = rpc_run_task(&task_setup);
if (IS_ERR(task))
return PTR_ERR(task);
status = task->tk_status;
rpc_put_task(task);
return status;
return nfs4_call_sync_custom(&task_setup);
}
#ifdef CONFIG_NFS_V4_1
@ -8845,7 +8916,6 @@ static int nfs41_proc_reclaim_complete(struct nfs_client *clp,
const struct cred *cred)
{
struct nfs4_reclaim_complete_data *calldata;
struct rpc_task *task;
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RECLAIM_COMPLETE],
.rpc_cred = cred,
@ -8854,7 +8924,7 @@ static int nfs41_proc_reclaim_complete(struct nfs_client *clp,
.rpc_client = clp->cl_rpcclient,
.rpc_message = &msg,
.callback_ops = &nfs4_reclaim_complete_call_ops,
.flags = RPC_TASK_ASYNC | RPC_TASK_NO_ROUND_ROBIN,
.flags = RPC_TASK_NO_ROUND_ROBIN,
};
int status = -ENOMEM;
@ -8869,15 +8939,7 @@ static int nfs41_proc_reclaim_complete(struct nfs_client *clp,
msg.rpc_argp = &calldata->arg;
msg.rpc_resp = &calldata->res;
task_setup_data.callback_data = calldata;
task = rpc_run_task(&task_setup_data);
if (IS_ERR(task)) {
status = PTR_ERR(task);
goto out;
}
status = rpc_wait_for_completion_task(task);
if (status == 0)
status = task->tk_status;
rpc_put_task(task);
status = nfs4_call_sync_custom(&task_setup_data);
out:
dprintk("<-- %s status=%d\n", __func__, status);
return status;
@ -9103,10 +9165,19 @@ static void nfs4_layoutreturn_done(struct rpc_task *task, void *calldata)
if (!nfs41_sequence_process(task, &lrp->res.seq_res))
return;
/*
* Was there an RPC level error? Assume the call succeeded,
* and that we need to release the layout
*/
if (task->tk_rpc_status != 0 && RPC_WAS_SENT(task)) {
lrp->res.lrs_present = 0;
return;
}
server = NFS_SERVER(lrp->args.inode);
switch (task->tk_status) {
case -NFS4ERR_OLD_STATEID:
if (nfs4_layoutreturn_refresh_stateid(&lrp->args.stateid,
if (nfs4_layout_refresh_old_stateid(&lrp->args.stateid,
&lrp->args.range,
lrp->args.inode))
goto out_restart;
@ -9362,18 +9433,32 @@ _nfs41_proc_secinfo_no_name(struct nfs_server *server, struct nfs_fh *fhandle,
.rpc_resp = &res,
};
struct rpc_clnt *clnt = server->client;
struct nfs4_call_sync_data data = {
.seq_server = server,
.seq_args = &args.seq_args,
.seq_res = &res.seq_res,
};
struct rpc_task_setup task_setup = {
.rpc_client = server->client,
.rpc_message = &msg,
.callback_ops = server->nfs_client->cl_mvops->call_sync_ops,
.callback_data = &data,
.flags = RPC_TASK_NO_ROUND_ROBIN,
};
const struct cred *cred = NULL;
int status;
if (use_integrity) {
clnt = server->nfs_client->cl_rpcclient;
task_setup.rpc_client = clnt;
cred = nfs4_get_clid_cred(server->nfs_client);
msg.rpc_cred = cred;
}
dprintk("--> %s\n", __func__);
status = nfs4_call_sync(clnt, server, &msg, &args.seq_args,
&res.seq_res, RPC_TASK_NO_ROUND_ROBIN);
nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 0);
status = nfs4_call_sync_custom(&task_setup);
dprintk("<-- %s status=%d\n", __func__, status);
put_cred(cred);

View File

@ -1015,22 +1015,6 @@ static int nfs4_copy_lock_stateid(nfs4_stateid *dst,
return ret;
}
bool nfs4_refresh_open_stateid(nfs4_stateid *dst, struct nfs4_state *state)
{
bool ret;
int seq;
do {
ret = false;
seq = read_seqbegin(&state->seqlock);
if (nfs4_state_match_open_stateid_other(state, dst)) {
dst->seqid = state->open_stateid.seqid;
ret = true;
}
} while (read_seqretry(&state->seqlock, seq));
return ret;
}
bool nfs4_copy_open_stateid(nfs4_stateid *dst, struct nfs4_state *state)
{
bool ret;
@ -2095,8 +2079,10 @@ static int nfs4_try_migration(struct nfs_server *server, const struct cred *cred
}
status = nfs4_begin_drain_session(clp);
if (status != 0)
return status;
if (status != 0) {
result = status;
goto out;
}
status = nfs4_replace_transport(server, locations);
if (status != 0) {

View File

@ -1174,7 +1174,7 @@ static void encode_attrs(struct xdr_stream *xdr, const struct iattr *iap,
} else
*p++ = cpu_to_be32(NFS4_SET_TO_SERVER_TIME);
}
if (bmval[2] & FATTR4_WORD2_SECURITY_LABEL) {
if (label && (bmval[2] & FATTR4_WORD2_SECURITY_LABEL)) {
*p++ = cpu_to_be32(label->lfs);
*p++ = cpu_to_be32(label->pi);
*p++ = cpu_to_be32(label->len);

View File

@ -359,9 +359,10 @@ pnfs_clear_lseg_state(struct pnfs_layout_segment *lseg,
}
/*
* Update the seqid of a layout stateid
* Update the seqid of a layout stateid after receiving
* NFS4ERR_OLD_STATEID
*/
bool nfs4_layoutreturn_refresh_stateid(nfs4_stateid *dst,
bool nfs4_layout_refresh_old_stateid(nfs4_stateid *dst,
struct pnfs_layout_range *dst_range,
struct inode *inode)
{
@ -377,7 +378,15 @@ bool nfs4_layoutreturn_refresh_stateid(nfs4_stateid *dst,
spin_lock(&inode->i_lock);
lo = NFS_I(inode)->layout;
if (lo && nfs4_stateid_match_other(dst, &lo->plh_stateid)) {
if (lo && pnfs_layout_is_valid(lo) &&
nfs4_stateid_match_other(dst, &lo->plh_stateid)) {
/* Is our call using the most recent seqid? If so, bump it */
if (!nfs4_stateid_is_newer(&lo->plh_stateid, dst)) {
nfs4_stateid_seqid_inc(dst);
ret = true;
goto out;
}
/* Try to update the seqid to the most recent */
err = pnfs_mark_matching_lsegs_return(lo, &head, &range, 0);
if (err != -EBUSY) {
dst->seqid = lo->plh_stateid.seqid;
@ -385,6 +394,7 @@ bool nfs4_layoutreturn_refresh_stateid(nfs4_stateid *dst,
ret = true;
}
}
out:
spin_unlock(&inode->i_lock);
pnfs_free_lseg_list(&head);
return ret;
@ -1440,6 +1450,52 @@ bool pnfs_roc(struct inode *ino,
return false;
}
int pnfs_roc_done(struct rpc_task *task, struct inode *inode,
struct nfs4_layoutreturn_args **argpp,
struct nfs4_layoutreturn_res **respp,
int *ret)
{
struct nfs4_layoutreturn_args *arg = *argpp;
int retval = -EAGAIN;
if (!arg)
return 0;
/* Handle Layoutreturn errors */
switch (*ret) {
case 0:
retval = 0;
break;
case -NFS4ERR_NOMATCHING_LAYOUT:
/* Was there an RPC level error? If not, retry */
if (task->tk_rpc_status == 0)
break;
/* If the call was not sent, let caller handle it */
if (!RPC_WAS_SENT(task))
return 0;
/*
* Otherwise, assume the call succeeded and
* that we need to release the layout
*/
*ret = 0;
(*respp)->lrs_present = 0;
retval = 0;
break;
case -NFS4ERR_DELAY:
/* Let the caller handle the retry */
*ret = -NFS4ERR_NOMATCHING_LAYOUT;
return 0;
case -NFS4ERR_OLD_STATEID:
if (!nfs4_layout_refresh_old_stateid(&arg->stateid,
&arg->range, inode))
break;
*ret = -NFS4ERR_NOMATCHING_LAYOUT;
return -EAGAIN;
}
*argpp = NULL;
*respp = NULL;
return retval;
}
void pnfs_roc_release(struct nfs4_layoutreturn_args *args,
struct nfs4_layoutreturn_res *res,
int ret)
@ -1449,10 +1505,15 @@ void pnfs_roc_release(struct nfs4_layoutreturn_args *args,
const nfs4_stateid *res_stateid = NULL;
struct nfs4_xdr_opaque_data *ld_private = args->ld_private;
if (ret == 0) {
arg_stateid = &args->stateid;
switch (ret) {
case -NFS4ERR_NOMATCHING_LAYOUT:
break;
case 0:
if (res->lrs_present)
res_stateid = &res->stateid;
/* Fallthrough */
default:
arg_stateid = &args->stateid;
}
pnfs_layoutreturn_free_lsegs(lo, arg_stateid, &args->range,
res_stateid);

View File

@ -261,7 +261,7 @@ int pnfs_destroy_layouts_byfsid(struct nfs_client *clp,
bool is_recall);
int pnfs_destroy_layouts_byclid(struct nfs_client *clp,
bool is_recall);
bool nfs4_layoutreturn_refresh_stateid(nfs4_stateid *dst,
bool nfs4_layout_refresh_old_stateid(nfs4_stateid *dst,
struct pnfs_layout_range *dst_range,
struct inode *inode);
void pnfs_put_layout_hdr(struct pnfs_layout_hdr *lo);
@ -282,6 +282,10 @@ bool pnfs_roc(struct inode *ino,
struct nfs4_layoutreturn_args *args,
struct nfs4_layoutreturn_res *res,
const struct cred *cred);
int pnfs_roc_done(struct rpc_task *task, struct inode *inode,
struct nfs4_layoutreturn_args **argpp,
struct nfs4_layoutreturn_res **respp,
int *ret);
void pnfs_roc_release(struct nfs4_layoutreturn_args *args,
struct nfs4_layoutreturn_res *res,
int ret);
@ -701,6 +705,15 @@ pnfs_roc(struct inode *ino,
return false;
}
static inline int
pnfs_roc_done(struct rpc_task *task, struct inode *inode,
struct nfs4_layoutreturn_args **argpp,
struct nfs4_layoutreturn_res **respp,
int *ret)
{
return 0;
}
static inline void
pnfs_roc_release(struct nfs4_layoutreturn_args *args,
struct nfs4_layoutreturn_res *res,
@ -785,7 +798,7 @@ static inline void nfs4_pnfs_v3_ds_connect_unload(void)
{
}
static inline bool nfs4_layoutreturn_refresh_stateid(nfs4_stateid *dst,
static inline bool nfs4_layout_refresh_old_stateid(nfs4_stateid *dst,
struct pnfs_layout_range *dst_range,
struct inode *inode)
{

View File

@ -2645,6 +2645,13 @@ int nfs_clone_sb_security(struct super_block *s, struct dentry *mntroot,
}
EXPORT_SYMBOL_GPL(nfs_clone_sb_security);
static void nfs_set_readahead(struct backing_dev_info *bdi,
unsigned long iomax_pages)
{
bdi->ra_pages = VM_READAHEAD_PAGES;
bdi->io_pages = iomax_pages;
}
struct dentry *nfs_fs_mount_common(struct nfs_server *server,
int flags, const char *dev_name,
struct nfs_mount_info *mount_info,
@ -2687,7 +2694,7 @@ struct dentry *nfs_fs_mount_common(struct nfs_server *server,
mntroot = ERR_PTR(error);
goto error_splat_super;
}
s->s_bdi->ra_pages = server->rpages * NFS_MAX_READAHEAD;
nfs_set_readahead(s->s_bdi, server->rpages);
server->super = s;
}

View File

@ -490,6 +490,9 @@ extern const struct file_operations nfs_dir_operations;
extern const struct dentry_operations nfs_dentry_operations;
extern void nfs_force_lookup_revalidate(struct inode *dir);
extern struct dentry *nfs_add_or_obtain(struct dentry *dentry,
struct nfs_fh *fh, struct nfs_fattr *fattr,
struct nfs4_label *label);
extern int nfs_instantiate(struct dentry *dentry, struct nfs_fh *fh,
struct nfs_fattr *fattr, struct nfs4_label *label);
extern int nfs_may_open(struct inode *inode, const struct cred *cred, int openflags);

View File

@ -242,9 +242,6 @@ void rpc_sleep_on_priority_timeout(struct rpc_wait_queue *queue,
void rpc_sleep_on_priority(struct rpc_wait_queue *,
struct rpc_task *,
int priority);
void rpc_wake_up_queued_task_on_wq(struct workqueue_struct *wq,
struct rpc_wait_queue *queue,
struct rpc_task *task);
void rpc_wake_up_queued_task(struct rpc_wait_queue *,
struct rpc_task *);
void rpc_wake_up_queued_task_set_status(struct rpc_wait_queue *,

View File

@ -186,7 +186,7 @@ xdr_adjust_iovec(struct kvec *iov, __be32 *p)
extern void xdr_shift_buf(struct xdr_buf *, size_t);
extern void xdr_buf_from_iov(struct kvec *, struct xdr_buf *);
extern int xdr_buf_subsegment(struct xdr_buf *, struct xdr_buf *, unsigned int, unsigned int);
extern int xdr_buf_read_netobj(struct xdr_buf *, struct xdr_netobj *, unsigned int);
extern int xdr_buf_read_mic(struct xdr_buf *, struct xdr_netobj *, unsigned int);
extern int read_bytes_from_xdr_buf(struct xdr_buf *, unsigned int, void *, unsigned int);
extern int write_bytes_to_xdr_buf(struct xdr_buf *, unsigned int, void *, unsigned int);

View File

@ -352,6 +352,7 @@ bool xprt_prepare_transmit(struct rpc_task *task);
void xprt_request_enqueue_transmit(struct rpc_task *task);
void xprt_request_enqueue_receive(struct rpc_task *task);
void xprt_request_wait_receive(struct rpc_task *task);
void xprt_request_dequeue_xprt(struct rpc_task *task);
bool xprt_request_need_retransmit(struct rpc_task *task);
void xprt_transmit(struct rpc_task *task);
void xprt_end_transmit(struct rpc_task *task);

View File

@ -49,9 +49,9 @@
* fully-chunked NFS message (read chunks are the largest). Note only
* a single chunk type per message is supported currently.
*/
#define RPCRDMA_MIN_SLOT_TABLE (2U)
#define RPCRDMA_MIN_SLOT_TABLE (4U)
#define RPCRDMA_DEF_SLOT_TABLE (128U)
#define RPCRDMA_MAX_SLOT_TABLE (256U)
#define RPCRDMA_MAX_SLOT_TABLE (16384U)
#define RPCRDMA_MIN_INLINE (1024) /* min inline thresh */
#define RPCRDMA_DEF_INLINE (4096) /* default inline thresh */

View File

@ -451,20 +451,81 @@ TRACE_EVENT(xprtrdma_createmrs,
TP_STRUCT__entry(
__field(const void *, r_xprt)
__string(addr, rpcrdma_addrstr(r_xprt))
__string(port, rpcrdma_portstr(r_xprt))
__field(unsigned int, count)
),
TP_fast_assign(
__entry->r_xprt = r_xprt;
__entry->count = count;
__assign_str(addr, rpcrdma_addrstr(r_xprt));
__assign_str(port, rpcrdma_portstr(r_xprt));
),
TP_printk("r_xprt=%p: created %u MRs",
__entry->r_xprt, __entry->count
TP_printk("peer=[%s]:%s r_xprt=%p: created %u MRs",
__get_str(addr), __get_str(port), __entry->r_xprt,
__entry->count
)
);
DEFINE_RXPRT_EVENT(xprtrdma_nomrs);
TRACE_EVENT(xprtrdma_mr_get,
TP_PROTO(
const struct rpcrdma_req *req
),
TP_ARGS(req),
TP_STRUCT__entry(
__field(const void *, req)
__field(unsigned int, task_id)
__field(unsigned int, client_id)
__field(u32, xid)
),
TP_fast_assign(
const struct rpc_rqst *rqst = &req->rl_slot;
__entry->req = req;
__entry->task_id = rqst->rq_task->tk_pid;
__entry->client_id = rqst->rq_task->tk_client->cl_clid;
__entry->xid = be32_to_cpu(rqst->rq_xid);
),
TP_printk("task:%u@%u xid=0x%08x req=%p",
__entry->task_id, __entry->client_id, __entry->xid,
__entry->req
)
);
TRACE_EVENT(xprtrdma_nomrs,
TP_PROTO(
const struct rpcrdma_req *req
),
TP_ARGS(req),
TP_STRUCT__entry(
__field(const void *, req)
__field(unsigned int, task_id)
__field(unsigned int, client_id)
__field(u32, xid)
),
TP_fast_assign(
const struct rpc_rqst *rqst = &req->rl_slot;
__entry->req = req;
__entry->task_id = rqst->rq_task->tk_pid;
__entry->client_id = rqst->rq_task->tk_client->cl_clid;
__entry->xid = be32_to_cpu(rqst->rq_xid);
),
TP_printk("task:%u@%u xid=0x%08x req=%p",
__entry->task_id, __entry->client_id, __entry->xid,
__entry->req
)
);
DEFINE_RDCH_EVENT(read);
DEFINE_WRCH_EVENT(write);
@ -623,21 +684,21 @@ TRACE_EVENT(xprtrdma_post_send,
TRACE_EVENT(xprtrdma_post_recv,
TP_PROTO(
const struct ib_cqe *cqe
const struct rpcrdma_rep *rep
),
TP_ARGS(cqe),
TP_ARGS(rep),
TP_STRUCT__entry(
__field(const void *, cqe)
__field(const void *, rep)
),
TP_fast_assign(
__entry->cqe = cqe;
__entry->rep = rep;
),
TP_printk("cqe=%p",
__entry->cqe
TP_printk("rep=%p",
__entry->rep
)
);
@ -715,14 +776,15 @@ TRACE_EVENT(xprtrdma_wc_receive,
TP_ARGS(wc),
TP_STRUCT__entry(
__field(const void *, cqe)
__field(const void *, rep)
__field(u32, byte_len)
__field(unsigned int, status)
__field(u32, vendor_err)
),
TP_fast_assign(
__entry->cqe = wc->wr_cqe;
__entry->rep = container_of(wc->wr_cqe, struct rpcrdma_rep,
rr_cqe);
__entry->status = wc->status;
if (wc->status) {
__entry->byte_len = 0;
@ -733,8 +795,8 @@ TRACE_EVENT(xprtrdma_wc_receive,
}
),
TP_printk("cqe=%p %u bytes: %s (%u/0x%x)",
__entry->cqe, __entry->byte_len,
TP_printk("rep=%p %u bytes: %s (%u/0x%x)",
__entry->rep, __entry->byte_len,
rdma_show_wc_status(__entry->status),
__entry->status, __entry->vendor_err
)

View File

@ -1960,7 +1960,7 @@ gss_unwrap_resp_integ(struct rpc_task *task, struct rpc_cred *cred,
if (xdr_buf_subsegment(rcv_buf, &integ_buf, data_offset, integ_len))
goto unwrap_failed;
if (xdr_buf_read_netobj(rcv_buf, &mic, mic_offset))
if (xdr_buf_read_mic(rcv_buf, &mic, mic_offset))
goto unwrap_failed;
maj_stat = gss_verify_mic(ctx->gc_gss_ctx, &integ_buf, &mic);
if (maj_stat == GSS_S_CONTEXT_EXPIRED)

View File

@ -1837,7 +1837,7 @@ call_allocate(struct rpc_task *task)
return;
}
rpc_exit(task, -ERESTARTSYS);
rpc_call_rpcerror(task, -ERESTARTSYS);
}
static int
@ -1862,6 +1862,7 @@ rpc_xdr_encode(struct rpc_task *task)
req->rq_rbuffer,
req->rq_rcvsize);
req->rq_reply_bytes_recvd = 0;
req->rq_snd_buf.head[0].iov_len = 0;
xdr_init_encode(&xdr, &req->rq_snd_buf,
req->rq_snd_buf.head[0].iov_base, req);
@ -1881,6 +1882,8 @@ call_encode(struct rpc_task *task)
if (!rpc_task_need_encode(task))
goto out;
dprint_status(task);
/* Dequeue task from the receive queue while we're encoding */
xprt_request_dequeue_xprt(task);
/* Encode here so that rpcsec_gss can use correct sequence number. */
rpc_xdr_encode(task);
/* Did the encode result in an error condition? */
@ -2479,6 +2482,7 @@ call_decode(struct rpc_task *task)
struct rpc_clnt *clnt = task->tk_client;
struct rpc_rqst *req = task->tk_rqstp;
struct xdr_stream xdr;
int err;
dprint_status(task);
@ -2501,6 +2505,15 @@ call_decode(struct rpc_task *task)
* before it changed req->rq_reply_bytes_recvd.
*/
smp_rmb();
/*
* Did we ever call xprt_complete_rqst()? If not, we should assume
* the message is incomplete.
*/
err = -EAGAIN;
if (!req->rq_reply_bytes_recvd)
goto out;
req->rq_rcv_buf.len = req->rq_private_buf.len;
/* Check that the softirq receive buffer is valid */
@ -2509,7 +2522,9 @@ call_decode(struct rpc_task *task)
xdr_init_decode(&xdr, &req->rq_rcv_buf,
req->rq_rcv_buf.head[0].iov_base, req);
switch (rpc_decode_header(task, &xdr)) {
err = rpc_decode_header(task, &xdr);
out:
switch (err) {
case 0:
task->tk_action = rpc_exit_task;
task->tk_status = rpcauth_unwrap_resp(task, &xdr);
@ -2518,9 +2533,6 @@ call_decode(struct rpc_task *task)
return;
case -EAGAIN:
task->tk_status = 0;
xdr_free_bvec(&req->rq_rcv_buf);
req->rq_reply_bytes_recvd = 0;
req->rq_rcv_buf.len = 0;
if (task->tk_client->cl_discrtry)
xprt_conditional_disconnect(req->rq_xprt,
req->rq_connect_cookie);
@ -2561,7 +2573,7 @@ rpc_encode_header(struct rpc_task *task, struct xdr_stream *xdr)
return 0;
out_fail:
trace_rpc_bad_callhdr(task);
rpc_exit(task, error);
rpc_call_rpcerror(task, error);
return error;
}
@ -2628,7 +2640,7 @@ rpc_decode_header(struct rpc_task *task, struct xdr_stream *xdr)
return -EAGAIN;
}
out_err:
rpc_exit(task, error);
rpc_call_rpcerror(task, error);
return error;
out_unparsable:

View File

@ -541,33 +541,14 @@ rpc_wake_up_task_on_wq_queue_action_locked(struct workqueue_struct *wq,
return NULL;
}
static void
rpc_wake_up_task_on_wq_queue_locked(struct workqueue_struct *wq,
struct rpc_wait_queue *queue, struct rpc_task *task)
{
rpc_wake_up_task_on_wq_queue_action_locked(wq, queue, task, NULL, NULL);
}
/*
* Wake up a queued task while the queue lock is being held
*/
static void rpc_wake_up_task_queue_locked(struct rpc_wait_queue *queue, struct rpc_task *task)
{
rpc_wake_up_task_on_wq_queue_locked(rpciod_workqueue, queue, task);
}
/*
* Wake up a task on a specific queue
*/
void rpc_wake_up_queued_task_on_wq(struct workqueue_struct *wq,
struct rpc_wait_queue *queue,
static void rpc_wake_up_task_queue_locked(struct rpc_wait_queue *queue,
struct rpc_task *task)
{
if (!RPC_IS_QUEUED(task))
return;
spin_lock(&queue->lock);
rpc_wake_up_task_on_wq_queue_locked(wq, queue, task);
spin_unlock(&queue->lock);
rpc_wake_up_task_on_wq_queue_action_locked(rpciod_workqueue, queue,
task, NULL, NULL);
}
/*
@ -930,8 +911,10 @@ static void __rpc_execute(struct rpc_task *task)
/*
* Signalled tasks should exit rather than sleep.
*/
if (RPC_SIGNALLED(task))
if (RPC_SIGNALLED(task)) {
task->tk_rpc_status = -ERESTARTSYS;
rpc_exit(task, -ERESTARTSYS);
}
/*
* The queue->lock protects against races with
@ -967,6 +950,7 @@ static void __rpc_execute(struct rpc_task *task)
*/
dprintk("RPC: %5u got signal\n", task->tk_pid);
set_bit(RPC_TASK_SIGNALLED, &task->tk_runstate);
task->tk_rpc_status = -ERESTARTSYS;
rpc_exit(task, -ERESTARTSYS);
}
dprintk("RPC: %5u sync task resuming\n", task->tk_pid);

View File

@ -560,7 +560,7 @@ EXPORT_SYMBOL_GPL(xdr_init_encode);
* required at the end of encoding, or any other time when the xdr_buf
* data might be read.
*/
void xdr_commit_encode(struct xdr_stream *xdr)
inline void xdr_commit_encode(struct xdr_stream *xdr)
{
int shift = xdr->scratch.iov_len;
void *page;
@ -1236,43 +1236,60 @@ xdr_encode_word(struct xdr_buf *buf, unsigned int base, u32 obj)
}
EXPORT_SYMBOL_GPL(xdr_encode_word);
/* If the netobj starting offset bytes from the start of xdr_buf is contained
* entirely in the head or the tail, set object to point to it; otherwise
* try to find space for it at the end of the tail, copy it there, and
* set obj to point to it. */
int xdr_buf_read_netobj(struct xdr_buf *buf, struct xdr_netobj *obj, unsigned int offset)
/**
* xdr_buf_read_mic() - obtain the address of the GSS mic from xdr buf
* @buf: pointer to buffer containing a mic
* @mic: on success, returns the address of the mic
* @offset: the offset in buf where mic may be found
*
* This function may modify the xdr buf if the mic is found to be straddling
* a boundary between head, pages, and tail. On success the mic can be read
* from the address returned. There is no need to free the mic.
*
* Return: Success returns 0, otherwise an integer error.
*/
int xdr_buf_read_mic(struct xdr_buf *buf, struct xdr_netobj *mic, unsigned int offset)
{
struct xdr_buf subbuf;
unsigned int boundary;
if (xdr_decode_word(buf, offset, &obj->len))
if (xdr_decode_word(buf, offset, &mic->len))
return -EFAULT;
if (xdr_buf_subsegment(buf, &subbuf, offset + 4, obj->len))
offset += 4;
/* Is the mic partially in the head? */
boundary = buf->head[0].iov_len;
if (offset < boundary && (offset + mic->len) > boundary)
xdr_shift_buf(buf, boundary - offset);
/* Is the mic partially in the pages? */
boundary += buf->page_len;
if (offset < boundary && (offset + mic->len) > boundary)
xdr_shrink_pagelen(buf, boundary - offset);
if (xdr_buf_subsegment(buf, &subbuf, offset, mic->len))
return -EFAULT;
/* Is the obj contained entirely in the head? */
obj->data = subbuf.head[0].iov_base;
if (subbuf.head[0].iov_len == obj->len)
/* Is the mic contained entirely in the head? */
mic->data = subbuf.head[0].iov_base;
if (subbuf.head[0].iov_len == mic->len)
return 0;
/* ..or is the obj contained entirely in the tail? */
obj->data = subbuf.tail[0].iov_base;
if (subbuf.tail[0].iov_len == obj->len)
/* ..or is the mic contained entirely in the tail? */
mic->data = subbuf.tail[0].iov_base;
if (subbuf.tail[0].iov_len == mic->len)
return 0;
/* use end of tail as storage for obj:
* (We don't copy to the beginning because then we'd have
* to worry about doing a potentially overlapping copy.
* This assumes the object is at most half the length of the
* tail.) */
if (obj->len > buf->buflen - buf->len)
/* Find a contiguous area in @buf to hold all of @mic */
if (mic->len > buf->buflen - buf->len)
return -ENOMEM;
if (buf->tail[0].iov_len != 0)
obj->data = buf->tail[0].iov_base + buf->tail[0].iov_len;
mic->data = buf->tail[0].iov_base + buf->tail[0].iov_len;
else
obj->data = buf->head[0].iov_base + buf->head[0].iov_len;
__read_bytes_from_xdr_buf(&subbuf, obj->data, obj->len);
mic->data = buf->head[0].iov_base + buf->head[0].iov_len;
__read_bytes_from_xdr_buf(&subbuf, mic->data, mic->len);
return 0;
}
EXPORT_SYMBOL_GPL(xdr_buf_read_netobj);
EXPORT_SYMBOL_GPL(xdr_buf_read_mic);
/* Returns 0 on success, or else a negative error code. */
static int

View File

@ -456,6 +456,12 @@ void xprt_release_rqst_cong(struct rpc_task *task)
}
EXPORT_SYMBOL_GPL(xprt_release_rqst_cong);
static void xprt_clear_congestion_window_wait_locked(struct rpc_xprt *xprt)
{
if (test_and_clear_bit(XPRT_CWND_WAIT, &xprt->state))
__xprt_lock_write_next_cong(xprt);
}
/*
* Clear the congestion window wait flag and wake up the next
* entry on xprt->sending
@ -671,6 +677,7 @@ void xprt_disconnect_done(struct rpc_xprt *xprt)
spin_lock(&xprt->transport_lock);
xprt_clear_connected(xprt);
xprt_clear_write_space_locked(xprt);
xprt_clear_congestion_window_wait_locked(xprt);
xprt_wake_pending_tasks(xprt, -ENOTCONN);
spin_unlock(&xprt->transport_lock);
}
@ -1323,6 +1330,36 @@ xprt_request_dequeue_transmit(struct rpc_task *task)
spin_unlock(&xprt->queue_lock);
}
/**
* xprt_request_dequeue_xprt - remove a task from the transmit+receive queue
* @task: pointer to rpc_task
*
* Remove a task from the transmit and receive queues, and ensure that
* it is not pinned by the receive work item.
*/
void
xprt_request_dequeue_xprt(struct rpc_task *task)
{
struct rpc_rqst *req = task->tk_rqstp;
struct rpc_xprt *xprt = req->rq_xprt;
if (test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate) ||
test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate) ||
xprt_is_pinned_rqst(req)) {
spin_lock(&xprt->queue_lock);
xprt_request_dequeue_transmit_locked(task);
xprt_request_dequeue_receive_locked(task);
while (xprt_is_pinned_rqst(req)) {
set_bit(RPC_TASK_MSG_PIN_WAIT, &task->tk_runstate);
spin_unlock(&xprt->queue_lock);
xprt_wait_on_pinned_rqst(req);
spin_lock(&xprt->queue_lock);
clear_bit(RPC_TASK_MSG_PIN_WAIT, &task->tk_runstate);
}
spin_unlock(&xprt->queue_lock);
}
}
/**
* xprt_request_prepare - prepare an encoded request for transport
* @req: pointer to rpc_rqst
@ -1747,28 +1784,6 @@ void xprt_retry_reserve(struct rpc_task *task)
xprt_do_reserve(xprt, task);
}
static void
xprt_request_dequeue_all(struct rpc_task *task, struct rpc_rqst *req)
{
struct rpc_xprt *xprt = req->rq_xprt;
if (test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate) ||
test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate) ||
xprt_is_pinned_rqst(req)) {
spin_lock(&xprt->queue_lock);
xprt_request_dequeue_transmit_locked(task);
xprt_request_dequeue_receive_locked(task);
while (xprt_is_pinned_rqst(req)) {
set_bit(RPC_TASK_MSG_PIN_WAIT, &task->tk_runstate);
spin_unlock(&xprt->queue_lock);
xprt_wait_on_pinned_rqst(req);
spin_lock(&xprt->queue_lock);
clear_bit(RPC_TASK_MSG_PIN_WAIT, &task->tk_runstate);
}
spin_unlock(&xprt->queue_lock);
}
}
/**
* xprt_release - release an RPC request slot
* @task: task which is finished with the slot
@ -1788,7 +1803,7 @@ void xprt_release(struct rpc_task *task)
}
xprt = req->rq_xprt;
xprt_request_dequeue_all(task, req);
xprt_request_dequeue_xprt(task);
spin_lock(&xprt->transport_lock);
xprt->ops->release_xprt(xprt, task);
if (xprt->ops->release_request)

View File

@ -54,9 +54,7 @@ size_t xprt_rdma_bc_maxpayload(struct rpc_xprt *xprt)
unsigned int xprt_rdma_bc_max_slots(struct rpc_xprt *xprt)
{
struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
return r_xprt->rx_buf.rb_bc_srv_max_requests;
return RPCRDMA_BACKWARD_WRS >> 1;
}
static int rpcrdma_bc_marshal_reply(struct rpc_rqst *rqst)

View File

@ -7,67 +7,37 @@
/* Lightweight memory registration using Fast Registration Work
* Requests (FRWR).
*
* FRWR features ordered asynchronous registration and deregistration
* of arbitrarily sized memory regions. This is the fastest and safest
* FRWR features ordered asynchronous registration and invalidation
* of arbitrarily-sized memory regions. This is the fastest and safest
* but most complex memory registration mode.
*/
/* Normal operation
*
* A Memory Region is prepared for RDMA READ or WRITE using a FAST_REG
* A Memory Region is prepared for RDMA Read or Write using a FAST_REG
* Work Request (frwr_map). When the RDMA operation is finished, this
* Memory Region is invalidated using a LOCAL_INV Work Request
* (frwr_unmap_sync).
* (frwr_unmap_async and frwr_unmap_sync).
*
* Typically these Work Requests are not signaled, and neither are RDMA
* SEND Work Requests (with the exception of signaling occasionally to
* prevent provider work queue overflows). This greatly reduces HCA
* Typically FAST_REG Work Requests are not signaled, and neither are
* RDMA Send Work Requests (with the exception of signaling occasionally
* to prevent provider work queue overflows). This greatly reduces HCA
* interrupt workload.
*
* As an optimization, frwr_unmap marks MRs INVALID before the
* LOCAL_INV WR is posted. If posting succeeds, the MR is placed on
* rb_mrs immediately so that no work (like managing a linked list
* under a spinlock) is needed in the completion upcall.
*
* But this means that frwr_map() can occasionally encounter an MR
* that is INVALID but the LOCAL_INV WR has not completed. Work Queue
* ordering prevents a subsequent FAST_REG WR from executing against
* that MR while it is still being invalidated.
*/
/* Transport recovery
*
* ->op_map and the transport connect worker cannot run at the same
* time, but ->op_unmap can fire while the transport connect worker
* is running. Thus MR recovery is handled in ->op_map, to guarantee
* that recovered MRs are owned by a sending RPC, and not one where
* ->op_unmap could fire at the same time transport reconnect is
* being done.
* frwr_map and frwr_unmap_* cannot run at the same time the transport
* connect worker is running. The connect worker holds the transport
* send lock, just as ->send_request does. This prevents frwr_map and
* the connect worker from running concurrently. When a connection is
* closed, the Receive completion queue is drained before the allowing
* the connect worker to get control. This prevents frwr_unmap and the
* connect worker from running concurrently.
*
* When the underlying transport disconnects, MRs are left in one of
* four states:
*
* INVALID: The MR was not in use before the QP entered ERROR state.
*
* VALID: The MR was registered before the QP entered ERROR state.
*
* FLUSHED_FR: The MR was being registered when the QP entered ERROR
* state, and the pending WR was flushed.
*
* FLUSHED_LI: The MR was being invalidated when the QP entered ERROR
* state, and the pending WR was flushed.
*
* When frwr_map encounters FLUSHED and VALID MRs, they are recovered
* with ib_dereg_mr and then are re-initialized. Because MR recovery
* allocates fresh resources, it is deferred to a workqueue, and the
* recovered MRs are placed back on the rb_mrs list when recovery is
* complete. frwr_map allocates another MR for the current RPC while
* the broken MR is reset.
*
* To ensure that frwr_map doesn't encounter an MR that is marked
* INVALID but that is about to be flushed due to a previous transport
* disconnect, the transport connect worker attempts to drain all
* pending send queue WRs before the transport is reconnected.
* When the underlying transport disconnects, MRs that are in flight
* are flushed and are likely unusable. Thus all flushed MRs are
* destroyed. New MRs are created on demand.
*/
#include <linux/sunrpc/rpc_rdma.h>
@ -118,15 +88,8 @@ void frwr_release_mr(struct rpcrdma_mr *mr)
kfree(mr);
}
/* MRs are dynamically allocated, so simply clean up and release the MR.
* A replacement MR will subsequently be allocated on demand.
*/
static void
frwr_mr_recycle_worker(struct work_struct *work)
static void frwr_mr_recycle(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr *mr)
{
struct rpcrdma_mr *mr = container_of(work, struct rpcrdma_mr, mr_recycle);
struct rpcrdma_xprt *r_xprt = mr->mr_xprt;
trace_xprtrdma_mr_recycle(mr);
if (mr->mr_dir != DMA_NONE) {
@ -136,14 +99,40 @@ frwr_mr_recycle_worker(struct work_struct *work)
mr->mr_dir = DMA_NONE;
}
spin_lock(&r_xprt->rx_buf.rb_mrlock);
spin_lock(&r_xprt->rx_buf.rb_lock);
list_del(&mr->mr_all);
r_xprt->rx_stats.mrs_recycled++;
spin_unlock(&r_xprt->rx_buf.rb_mrlock);
spin_unlock(&r_xprt->rx_buf.rb_lock);
frwr_release_mr(mr);
}
/* MRs are dynamically allocated, so simply clean up and release the MR.
* A replacement MR will subsequently be allocated on demand.
*/
static void
frwr_mr_recycle_worker(struct work_struct *work)
{
struct rpcrdma_mr *mr = container_of(work, struct rpcrdma_mr,
mr_recycle);
frwr_mr_recycle(mr->mr_xprt, mr);
}
/* frwr_recycle - Discard MRs
* @req: request to reset
*
* Used after a reconnect. These MRs could be in flight, we can't
* tell. Safe thing to do is release them.
*/
void frwr_recycle(struct rpcrdma_req *req)
{
struct rpcrdma_mr *mr;
while ((mr = rpcrdma_mr_pop(&req->rl_registered)))
frwr_mr_recycle(mr->mr_xprt, mr);
}
/* frwr_reset - Place MRs back on the free list
* @req: request to reset
*
@ -156,12 +145,10 @@ frwr_mr_recycle_worker(struct work_struct *work)
*/
void frwr_reset(struct rpcrdma_req *req)
{
while (!list_empty(&req->rl_registered)) {
struct rpcrdma_mr *mr;
mr = rpcrdma_mr_pop(&req->rl_registered);
rpcrdma_mr_unmap_and_put(mr);
}
while ((mr = rpcrdma_mr_pop(&req->rl_registered)))
rpcrdma_mr_put(mr);
}
/**
@ -179,11 +166,14 @@ int frwr_init_mr(struct rpcrdma_ia *ia, struct rpcrdma_mr *mr)
struct ib_mr *frmr;
int rc;
/* NB: ib_alloc_mr and device drivers typically allocate
* memory with GFP_KERNEL.
*/
frmr = ib_alloc_mr(ia->ri_pd, ia->ri_mrtype, depth);
if (IS_ERR(frmr))
goto out_mr_err;
sg = kcalloc(depth, sizeof(*sg), GFP_KERNEL);
sg = kcalloc(depth, sizeof(*sg), GFP_NOFS);
if (!sg)
goto out_list_err;
@ -203,8 +193,6 @@ int frwr_init_mr(struct rpcrdma_ia *ia, struct rpcrdma_mr *mr)
return rc;
out_list_err:
dprintk("RPC: %s: sg allocation failure\n",
__func__);
ib_dereg_mr(frmr);
return -ENOMEM;
}
@ -290,8 +278,8 @@ int frwr_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep)
ep->rep_attr.cap.max_recv_wr += RPCRDMA_BACKWARD_WRS;
ep->rep_attr.cap.max_recv_wr += 1; /* for ib_drain_rq */
ia->ri_max_segs = max_t(unsigned int, 1, RPCRDMA_MAX_DATA_SEGS /
ia->ri_max_frwr_depth);
ia->ri_max_segs =
DIV_ROUND_UP(RPCRDMA_MAX_DATA_SEGS, ia->ri_max_frwr_depth);
/* Reply chunks require segments for head and tail buffers */
ia->ri_max_segs += 2;
if (ia->ri_max_segs > RPCRDMA_MAX_HDR_SEGS)
@ -323,31 +311,25 @@ size_t frwr_maxpages(struct rpcrdma_xprt *r_xprt)
* @nsegs: number of segments remaining
* @writing: true when RDMA Write will be used
* @xid: XID of RPC using the registered memory
* @out: initialized MR
* @mr: MR to fill in
*
* Prepare a REG_MR Work Request to register a memory region
* for remote access via RDMA READ or RDMA WRITE.
*
* Returns the next segment or a negative errno pointer.
* On success, the prepared MR is planted in @out.
* On success, @mr is filled in.
*/
struct rpcrdma_mr_seg *frwr_map(struct rpcrdma_xprt *r_xprt,
struct rpcrdma_mr_seg *seg,
int nsegs, bool writing, __be32 xid,
struct rpcrdma_mr **out)
struct rpcrdma_mr *mr)
{
struct rpcrdma_ia *ia = &r_xprt->rx_ia;
bool holes_ok = ia->ri_mrtype == IB_MR_TYPE_SG_GAPS;
struct rpcrdma_mr *mr;
struct ib_mr *ibmr;
struct ib_reg_wr *reg_wr;
struct ib_mr *ibmr;
int i, n;
u8 key;
mr = rpcrdma_mr_get(r_xprt);
if (!mr)
goto out_getmr_err;
if (nsegs > ia->ri_max_frwr_depth)
nsegs = ia->ri_max_frwr_depth;
for (i = 0; i < nsegs;) {
@ -362,7 +344,7 @@ struct rpcrdma_mr_seg *frwr_map(struct rpcrdma_xprt *r_xprt,
++seg;
++i;
if (holes_ok)
if (ia->ri_mrtype == IB_MR_TYPE_SG_GAPS)
continue;
if ((i < nsegs && offset_in_page(seg->mr_offset)) ||
offset_in_page((seg-1)->mr_offset + (seg-1)->mr_len))
@ -397,22 +379,15 @@ struct rpcrdma_mr_seg *frwr_map(struct rpcrdma_xprt *r_xprt,
mr->mr_offset = ibmr->iova;
trace_xprtrdma_mr_map(mr);
*out = mr;
return seg;
out_getmr_err:
xprt_wait_for_buffer_space(&r_xprt->rx_xprt);
return ERR_PTR(-EAGAIN);
out_dmamap_err:
mr->mr_dir = DMA_NONE;
trace_xprtrdma_frwr_sgerr(mr, i);
rpcrdma_mr_put(mr);
return ERR_PTR(-EIO);
out_mapmr_err:
trace_xprtrdma_frwr_maperr(mr, n);
rpcrdma_mr_recycle(mr);
return ERR_PTR(-EIO);
}
@ -485,7 +460,7 @@ void frwr_reminv(struct rpcrdma_rep *rep, struct list_head *mrs)
if (mr->mr_handle == rep->rr_inv_rkey) {
list_del_init(&mr->mr_list);
trace_xprtrdma_mr_remoteinv(mr);
rpcrdma_mr_unmap_and_put(mr);
rpcrdma_mr_put(mr);
break; /* only one invalidated MR per RPC */
}
}
@ -495,7 +470,7 @@ static void __frwr_release_mr(struct ib_wc *wc, struct rpcrdma_mr *mr)
if (wc->status != IB_WC_SUCCESS)
rpcrdma_mr_recycle(mr);
else
rpcrdma_mr_unmap_and_put(mr);
rpcrdma_mr_put(mr);
}
/**
@ -532,8 +507,8 @@ static void frwr_wc_localinv_wake(struct ib_cq *cq, struct ib_wc *wc)
/* WARNING: Only wr_cqe and status are reliable at this point */
trace_xprtrdma_wc_li_wake(wc, frwr);
complete(&frwr->fr_linv_done);
__frwr_release_mr(wc, mr);
complete(&frwr->fr_linv_done);
}
/**
@ -562,8 +537,7 @@ void frwr_unmap_sync(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
*/
frwr = NULL;
prev = &first;
while (!list_empty(&req->rl_registered)) {
mr = rpcrdma_mr_pop(&req->rl_registered);
while ((mr = rpcrdma_mr_pop(&req->rl_registered))) {
trace_xprtrdma_mr_localinv(mr);
r_xprt->rx_stats.local_inv_needed++;
@ -632,11 +606,15 @@ static void frwr_wc_localinv_done(struct ib_cq *cq, struct ib_wc *wc)
struct rpcrdma_frwr *frwr =
container_of(cqe, struct rpcrdma_frwr, fr_cqe);
struct rpcrdma_mr *mr = container_of(frwr, struct rpcrdma_mr, frwr);
struct rpcrdma_rep *rep = mr->mr_req->rl_reply;
/* WARNING: Only wr_cqe and status are reliable at this point */
trace_xprtrdma_wc_li_done(wc, frwr);
rpcrdma_complete_rqst(frwr->fr_req->rl_reply);
__frwr_release_mr(wc, mr);
/* Ensure @rep is generated before __frwr_release_mr */
smp_rmb();
rpcrdma_complete_rqst(rep);
}
/**
@ -662,15 +640,13 @@ void frwr_unmap_async(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
*/
frwr = NULL;
prev = &first;
while (!list_empty(&req->rl_registered)) {
mr = rpcrdma_mr_pop(&req->rl_registered);
while ((mr = rpcrdma_mr_pop(&req->rl_registered))) {
trace_xprtrdma_mr_localinv(mr);
r_xprt->rx_stats.local_inv_needed++;
frwr = &mr->frwr;
frwr->fr_cqe.done = frwr_wc_localinv;
frwr->fr_req = req;
last = &frwr->fr_invwr;
last->next = NULL;
last->wr_cqe = &frwr->fr_cqe;

View File

@ -342,6 +342,32 @@ encode_read_segment(struct xdr_stream *xdr, struct rpcrdma_mr *mr,
return 0;
}
static struct rpcrdma_mr_seg *rpcrdma_mr_prepare(struct rpcrdma_xprt *r_xprt,
struct rpcrdma_req *req,
struct rpcrdma_mr_seg *seg,
int nsegs, bool writing,
struct rpcrdma_mr **mr)
{
*mr = rpcrdma_mr_pop(&req->rl_free_mrs);
if (!*mr) {
*mr = rpcrdma_mr_get(r_xprt);
if (!*mr)
goto out_getmr_err;
trace_xprtrdma_mr_get(req);
(*mr)->mr_req = req;
}
rpcrdma_mr_push(*mr, &req->rl_registered);
return frwr_map(r_xprt, seg, nsegs, writing, req->rl_slot.rq_xid, *mr);
out_getmr_err:
trace_xprtrdma_nomrs(req);
xprt_wait_for_buffer_space(&r_xprt->rx_xprt);
if (r_xprt->rx_ep.rep_connected != -ENODEV)
schedule_work(&r_xprt->rx_buf.rb_refresh_worker);
return ERR_PTR(-EAGAIN);
}
/* Register and XDR encode the Read list. Supports encoding a list of read
* segments that belong to a single read chunk.
*
@ -356,9 +382,10 @@ encode_read_segment(struct xdr_stream *xdr, struct rpcrdma_mr *mr,
*
* Only a single @pos value is currently supported.
*/
static noinline int
rpcrdma_encode_read_list(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
struct rpc_rqst *rqst, enum rpcrdma_chunktype rtype)
static int rpcrdma_encode_read_list(struct rpcrdma_xprt *r_xprt,
struct rpcrdma_req *req,
struct rpc_rqst *rqst,
enum rpcrdma_chunktype rtype)
{
struct xdr_stream *xdr = &req->rl_stream;
struct rpcrdma_mr_seg *seg;
@ -379,10 +406,9 @@ rpcrdma_encode_read_list(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
return nsegs;
do {
seg = frwr_map(r_xprt, seg, nsegs, false, rqst->rq_xid, &mr);
seg = rpcrdma_mr_prepare(r_xprt, req, seg, nsegs, false, &mr);
if (IS_ERR(seg))
return PTR_ERR(seg);
rpcrdma_mr_push(mr, &req->rl_registered);
if (encode_read_segment(xdr, mr, pos) < 0)
return -EMSGSIZE;
@ -411,9 +437,10 @@ rpcrdma_encode_read_list(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
*
* Only a single Write chunk is currently supported.
*/
static noinline int
rpcrdma_encode_write_list(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
struct rpc_rqst *rqst, enum rpcrdma_chunktype wtype)
static int rpcrdma_encode_write_list(struct rpcrdma_xprt *r_xprt,
struct rpcrdma_req *req,
struct rpc_rqst *rqst,
enum rpcrdma_chunktype wtype)
{
struct xdr_stream *xdr = &req->rl_stream;
struct rpcrdma_mr_seg *seg;
@ -440,10 +467,9 @@ rpcrdma_encode_write_list(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
nchunks = 0;
do {
seg = frwr_map(r_xprt, seg, nsegs, true, rqst->rq_xid, &mr);
seg = rpcrdma_mr_prepare(r_xprt, req, seg, nsegs, true, &mr);
if (IS_ERR(seg))
return PTR_ERR(seg);
rpcrdma_mr_push(mr, &req->rl_registered);
if (encode_rdma_segment(xdr, mr) < 0)
return -EMSGSIZE;
@ -474,9 +500,10 @@ rpcrdma_encode_write_list(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
* Returns zero on success, or a negative errno if a failure occurred.
* @xdr is advanced to the next position in the stream.
*/
static noinline int
rpcrdma_encode_reply_chunk(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
struct rpc_rqst *rqst, enum rpcrdma_chunktype wtype)
static int rpcrdma_encode_reply_chunk(struct rpcrdma_xprt *r_xprt,
struct rpcrdma_req *req,
struct rpc_rqst *rqst,
enum rpcrdma_chunktype wtype)
{
struct xdr_stream *xdr = &req->rl_stream;
struct rpcrdma_mr_seg *seg;
@ -501,10 +528,9 @@ rpcrdma_encode_reply_chunk(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
nchunks = 0;
do {
seg = frwr_map(r_xprt, seg, nsegs, true, rqst->rq_xid, &mr);
seg = rpcrdma_mr_prepare(r_xprt, req, seg, nsegs, true, &mr);
if (IS_ERR(seg))
return PTR_ERR(seg);
rpcrdma_mr_push(mr, &req->rl_registered);
if (encode_rdma_segment(xdr, mr) < 0)
return -EMSGSIZE;
@ -841,12 +867,7 @@ rpcrdma_marshal_req(struct rpcrdma_xprt *r_xprt, struct rpc_rqst *rqst)
* chunks. Very likely the connection has been replaced,
* so these registrations are invalid and unusable.
*/
while (unlikely(!list_empty(&req->rl_registered))) {
struct rpcrdma_mr *mr;
mr = rpcrdma_mr_pop(&req->rl_registered);
rpcrdma_mr_recycle(mr);
}
frwr_recycle(req);
/* This implementation supports the following combinations
* of chunk lists in one RPC-over-RDMA Call message:
@ -1240,8 +1261,6 @@ void rpcrdma_complete_rqst(struct rpcrdma_rep *rep)
struct rpc_rqst *rqst = rep->rr_rqst;
int status;
xprt->reestablish_timeout = 0;
switch (rep->rr_proc) {
case rdma_msg:
status = rpcrdma_decode_msg(r_xprt, rep, rqst);
@ -1300,6 +1319,12 @@ void rpcrdma_reply_handler(struct rpcrdma_rep *rep)
u32 credits;
__be32 *p;
/* Any data means we had a useful conversation, so
* then we don't need to delay the next reconnect.
*/
if (xprt->reestablish_timeout)
xprt->reestablish_timeout = 0;
/* Fixed transport header fields */
xdr_init_decode(&rep->rr_stream, &rep->rr_hdrbuf,
rep->rr_hdrbuf.head[0].iov_base, NULL);

View File

@ -423,8 +423,6 @@ void xprt_rdma_close(struct rpc_xprt *xprt)
if (ep->rep_connected == -ENODEV)
return;
if (ep->rep_connected > 0)
xprt->reestablish_timeout = 0;
rpcrdma_ep_disconnect(ep, ia);
/* Prepare @xprt for the next connection by reinitializing
@ -434,6 +432,7 @@ void xprt_rdma_close(struct rpc_xprt *xprt)
xprt->cwnd = RPC_CWNDSHIFT;
out:
xprt->reestablish_timeout = 0;
++xprt->connect_cookie;
xprt_disconnect_done(xprt);
}
@ -494,7 +493,7 @@ xprt_rdma_timer(struct rpc_xprt *xprt, struct rpc_task *task)
* @reconnect_timeout: reconnect timeout after server disconnects
*
*/
static void xprt_rdma_tcp_set_connect_timeout(struct rpc_xprt *xprt,
static void xprt_rdma_set_connect_timeout(struct rpc_xprt *xprt,
unsigned long connect_timeout,
unsigned long reconnect_timeout)
{
@ -571,6 +570,7 @@ xprt_rdma_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task)
return;
out_sleep:
set_bit(XPRT_CONGESTED, &xprt->state);
rpc_sleep_on(&xprt->backlog, task, NULL);
task->tk_status = -EAGAIN;
}
@ -589,7 +589,8 @@ xprt_rdma_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *rqst)
memset(rqst, 0, sizeof(*rqst));
rpcrdma_buffer_put(&r_xprt->rx_buf, rpcr_to_rdmar(rqst));
rpc_wake_up_next(&xprt->backlog);
if (unlikely(!rpc_wake_up_next(&xprt->backlog)))
clear_bit(XPRT_CONGESTED, &xprt->state);
}
static bool rpcrdma_check_regbuf(struct rpcrdma_xprt *r_xprt,
@ -803,7 +804,7 @@ static const struct rpc_xprt_ops xprt_rdma_procs = {
.send_request = xprt_rdma_send_request,
.close = xprt_rdma_close,
.destroy = xprt_rdma_destroy,
.set_connect_timeout = xprt_rdma_tcp_set_connect_timeout,
.set_connect_timeout = xprt_rdma_set_connect_timeout,
.print_stats = xprt_rdma_print_stats,
.enable_swap = xprt_rdma_enable_swap,
.disable_swap = xprt_rdma_disable_swap,

View File

@ -53,6 +53,7 @@
#include <linux/slab.h>
#include <linux/sunrpc/addr.h>
#include <linux/sunrpc/svc_rdma.h>
#include <linux/log2.h>
#include <asm-generic/barrier.h>
#include <asm/bitops.h>
@ -74,8 +75,10 @@
* internal functions
*/
static void rpcrdma_sendctx_put_locked(struct rpcrdma_sendctx *sc);
static void rpcrdma_reps_destroy(struct rpcrdma_buffer *buf);
static void rpcrdma_mrs_create(struct rpcrdma_xprt *r_xprt);
static void rpcrdma_mrs_destroy(struct rpcrdma_buffer *buf);
static void rpcrdma_mr_free(struct rpcrdma_mr *mr);
static struct rpcrdma_regbuf *
rpcrdma_regbuf_alloc(size_t size, enum dma_data_direction direction,
gfp_t flags);
@ -405,9 +408,8 @@ rpcrdma_ia_remove(struct rpcrdma_ia *ia)
struct rpcrdma_ep *ep = &r_xprt->rx_ep;
struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
struct rpcrdma_req *req;
struct rpcrdma_rep *rep;
cancel_delayed_work_sync(&buf->rb_refresh_worker);
cancel_work_sync(&buf->rb_refresh_worker);
/* This is similar to rpcrdma_ep_destroy, but:
* - Don't cancel the connect worker.
@ -429,8 +431,7 @@ rpcrdma_ia_remove(struct rpcrdma_ia *ia)
/* The ULP is responsible for ensuring all DMA
* mappings and MRs are gone.
*/
list_for_each_entry(rep, &buf->rb_recv_bufs, rr_list)
rpcrdma_regbuf_dma_unmap(rep->rr_rdmabuf);
rpcrdma_reps_destroy(buf);
list_for_each_entry(req, &buf->rb_allreqs, rl_all) {
rpcrdma_regbuf_dma_unmap(req->rl_rdmabuf);
rpcrdma_regbuf_dma_unmap(req->rl_sendbuf);
@ -604,10 +605,10 @@ void rpcrdma_ep_destroy(struct rpcrdma_xprt *r_xprt)
* Unlike a normal reconnection, a fresh PD and a new set
* of MRs and buffers is needed.
*/
static int
rpcrdma_ep_recreate_xprt(struct rpcrdma_xprt *r_xprt,
struct rpcrdma_ep *ep, struct rpcrdma_ia *ia)
static int rpcrdma_ep_recreate_xprt(struct rpcrdma_xprt *r_xprt,
struct ib_qp_init_attr *qp_init_attr)
{
struct rpcrdma_ia *ia = &r_xprt->rx_ia;
int rc, err;
trace_xprtrdma_reinsert(r_xprt);
@ -624,7 +625,7 @@ rpcrdma_ep_recreate_xprt(struct rpcrdma_xprt *r_xprt,
}
rc = -ENETUNREACH;
err = rdma_create_qp(ia->ri_id, ia->ri_pd, &ep->rep_attr);
err = rdma_create_qp(ia->ri_id, ia->ri_pd, qp_init_attr);
if (err) {
pr_err("rpcrdma: rdma_create_qp returned %d\n", err);
goto out3;
@ -641,16 +642,16 @@ rpcrdma_ep_recreate_xprt(struct rpcrdma_xprt *r_xprt,
return rc;
}
static int
rpcrdma_ep_reconnect(struct rpcrdma_xprt *r_xprt, struct rpcrdma_ep *ep,
struct rpcrdma_ia *ia)
static int rpcrdma_ep_reconnect(struct rpcrdma_xprt *r_xprt,
struct ib_qp_init_attr *qp_init_attr)
{
struct rpcrdma_ia *ia = &r_xprt->rx_ia;
struct rdma_cm_id *id, *old;
int err, rc;
trace_xprtrdma_reconnect(r_xprt);
rpcrdma_ep_disconnect(ep, ia);
rpcrdma_ep_disconnect(&r_xprt->rx_ep, ia);
rc = -EHOSTUNREACH;
id = rpcrdma_create_id(r_xprt, ia);
@ -672,7 +673,7 @@ rpcrdma_ep_reconnect(struct rpcrdma_xprt *r_xprt, struct rpcrdma_ep *ep,
goto out_destroy;
}
err = rdma_create_qp(id, ia->ri_pd, &ep->rep_attr);
err = rdma_create_qp(id, ia->ri_pd, qp_init_attr);
if (err)
goto out_destroy;
@ -697,25 +698,27 @@ rpcrdma_ep_connect(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia)
struct rpcrdma_xprt *r_xprt = container_of(ia, struct rpcrdma_xprt,
rx_ia);
struct rpc_xprt *xprt = &r_xprt->rx_xprt;
struct ib_qp_init_attr qp_init_attr;
int rc;
retry:
memcpy(&qp_init_attr, &ep->rep_attr, sizeof(qp_init_attr));
switch (ep->rep_connected) {
case 0:
dprintk("RPC: %s: connecting...\n", __func__);
rc = rdma_create_qp(ia->ri_id, ia->ri_pd, &ep->rep_attr);
rc = rdma_create_qp(ia->ri_id, ia->ri_pd, &qp_init_attr);
if (rc) {
rc = -ENETUNREACH;
goto out_noupdate;
}
break;
case -ENODEV:
rc = rpcrdma_ep_recreate_xprt(r_xprt, ep, ia);
rc = rpcrdma_ep_recreate_xprt(r_xprt, &qp_init_attr);
if (rc)
goto out_noupdate;
break;
default:
rc = rpcrdma_ep_reconnect(r_xprt, ep, ia);
rc = rpcrdma_ep_reconnect(r_xprt, &qp_init_attr);
if (rc)
goto out;
}
@ -729,6 +732,8 @@ rpcrdma_ep_connect(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia)
if (rc)
goto out;
if (xprt->reestablish_timeout < RPCRDMA_INIT_REEST_TO)
xprt->reestablish_timeout = RPCRDMA_INIT_REEST_TO;
wait_event_interruptible(ep->rep_connect_wait, ep->rep_connected != 0);
if (ep->rep_connected <= 0) {
if (ep->rep_connected == -EAGAIN)
@ -942,14 +947,12 @@ rpcrdma_mrs_create(struct rpcrdma_xprt *r_xprt)
struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
struct rpcrdma_ia *ia = &r_xprt->rx_ia;
unsigned int count;
LIST_HEAD(free);
LIST_HEAD(all);
for (count = 0; count < ia->ri_max_segs; count++) {
struct rpcrdma_mr *mr;
int rc;
mr = kzalloc(sizeof(*mr), GFP_KERNEL);
mr = kzalloc(sizeof(*mr), GFP_NOFS);
if (!mr)
break;
@ -961,15 +964,13 @@ rpcrdma_mrs_create(struct rpcrdma_xprt *r_xprt)
mr->mr_xprt = r_xprt;
list_add(&mr->mr_list, &free);
list_add(&mr->mr_all, &all);
spin_lock(&buf->rb_lock);
list_add(&mr->mr_list, &buf->rb_mrs);
list_add(&mr->mr_all, &buf->rb_all_mrs);
spin_unlock(&buf->rb_lock);
}
spin_lock(&buf->rb_mrlock);
list_splice(&free, &buf->rb_mrs);
list_splice(&all, &buf->rb_all);
r_xprt->rx_stats.mrs_allocated += count;
spin_unlock(&buf->rb_mrlock);
trace_xprtrdma_createmrs(r_xprt, count);
}
@ -977,7 +978,7 @@ static void
rpcrdma_mr_refresh_worker(struct work_struct *work)
{
struct rpcrdma_buffer *buf = container_of(work, struct rpcrdma_buffer,
rb_refresh_worker.work);
rb_refresh_worker);
struct rpcrdma_xprt *r_xprt = container_of(buf, struct rpcrdma_xprt,
rx_buf);
@ -999,12 +1000,18 @@ struct rpcrdma_req *rpcrdma_req_create(struct rpcrdma_xprt *r_xprt, size_t size,
struct rpcrdma_buffer *buffer = &r_xprt->rx_buf;
struct rpcrdma_regbuf *rb;
struct rpcrdma_req *req;
size_t maxhdrsize;
req = kzalloc(sizeof(*req), flags);
if (req == NULL)
goto out1;
rb = rpcrdma_regbuf_alloc(RPCRDMA_HDRBUF_SIZE, DMA_TO_DEVICE, flags);
/* Compute maximum header buffer size in bytes */
maxhdrsize = rpcrdma_fixed_maxsz + 3 +
r_xprt->rx_ia.ri_max_segs * rpcrdma_readchunk_maxsz;
maxhdrsize *= sizeof(__be32);
rb = rpcrdma_regbuf_alloc(__roundup_pow_of_two(maxhdrsize),
DMA_TO_DEVICE, flags);
if (!rb)
goto out2;
req->rl_rdmabuf = rb;
@ -1018,6 +1025,7 @@ struct rpcrdma_req *rpcrdma_req_create(struct rpcrdma_xprt *r_xprt, size_t size,
if (!req->rl_recvbuf)
goto out4;
INIT_LIST_HEAD(&req->rl_free_mrs);
INIT_LIST_HEAD(&req->rl_registered);
spin_lock(&buffer->rb_lock);
list_add(&req->rl_all, &buffer->rb_allreqs);
@ -1065,6 +1073,40 @@ static struct rpcrdma_rep *rpcrdma_rep_create(struct rpcrdma_xprt *r_xprt,
return NULL;
}
static void rpcrdma_rep_destroy(struct rpcrdma_rep *rep)
{
rpcrdma_regbuf_free(rep->rr_rdmabuf);
kfree(rep);
}
static struct rpcrdma_rep *rpcrdma_rep_get_locked(struct rpcrdma_buffer *buf)
{
struct llist_node *node;
/* Calls to llist_del_first are required to be serialized */
node = llist_del_first(&buf->rb_free_reps);
if (!node)
return NULL;
return llist_entry(node, struct rpcrdma_rep, rr_node);
}
static void rpcrdma_rep_put(struct rpcrdma_buffer *buf,
struct rpcrdma_rep *rep)
{
if (!rep->rr_temp)
llist_add(&rep->rr_node, &buf->rb_free_reps);
else
rpcrdma_rep_destroy(rep);
}
static void rpcrdma_reps_destroy(struct rpcrdma_buffer *buf)
{
struct rpcrdma_rep *rep;
while ((rep = rpcrdma_rep_get_locked(buf)) != NULL)
rpcrdma_rep_destroy(rep);
}
/**
* rpcrdma_buffer_create - Create initial set of req/rep objects
* @r_xprt: transport instance to (re)initialize
@ -1078,12 +1120,10 @@ int rpcrdma_buffer_create(struct rpcrdma_xprt *r_xprt)
buf->rb_max_requests = r_xprt->rx_ep.rep_max_requests;
buf->rb_bc_srv_max_requests = 0;
spin_lock_init(&buf->rb_mrlock);
spin_lock_init(&buf->rb_lock);
INIT_LIST_HEAD(&buf->rb_mrs);
INIT_LIST_HEAD(&buf->rb_all);
INIT_DELAYED_WORK(&buf->rb_refresh_worker,
rpcrdma_mr_refresh_worker);
INIT_LIST_HEAD(&buf->rb_all_mrs);
INIT_WORK(&buf->rb_refresh_worker, rpcrdma_mr_refresh_worker);
rpcrdma_mrs_create(r_xprt);
@ -1102,7 +1142,7 @@ int rpcrdma_buffer_create(struct rpcrdma_xprt *r_xprt)
}
buf->rb_credits = 1;
INIT_LIST_HEAD(&buf->rb_recv_bufs);
init_llist_head(&buf->rb_free_reps);
rc = rpcrdma_sendctxs_create(r_xprt);
if (rc)
@ -1114,12 +1154,6 @@ int rpcrdma_buffer_create(struct rpcrdma_xprt *r_xprt)
return rc;
}
static void rpcrdma_rep_destroy(struct rpcrdma_rep *rep)
{
rpcrdma_regbuf_free(rep->rr_rdmabuf);
kfree(rep);
}
/**
* rpcrdma_req_destroy - Destroy an rpcrdma_req object
* @req: unused object to be destroyed
@ -1127,11 +1161,13 @@ static void rpcrdma_rep_destroy(struct rpcrdma_rep *rep)
* This function assumes that the caller prevents concurrent device
* unload and transport tear-down.
*/
void
rpcrdma_req_destroy(struct rpcrdma_req *req)
void rpcrdma_req_destroy(struct rpcrdma_req *req)
{
list_del(&req->rl_all);
while (!list_empty(&req->rl_free_mrs))
rpcrdma_mr_free(rpcrdma_mr_pop(&req->rl_free_mrs));
rpcrdma_regbuf_free(req->rl_recvbuf);
rpcrdma_regbuf_free(req->rl_sendbuf);
rpcrdma_regbuf_free(req->rl_rdmabuf);
@ -1147,25 +1183,19 @@ rpcrdma_mrs_destroy(struct rpcrdma_buffer *buf)
unsigned int count;
count = 0;
spin_lock(&buf->rb_mrlock);
while (!list_empty(&buf->rb_all)) {
mr = list_entry(buf->rb_all.next, struct rpcrdma_mr, mr_all);
spin_lock(&buf->rb_lock);
while ((mr = list_first_entry_or_null(&buf->rb_all_mrs,
struct rpcrdma_mr,
mr_all)) != NULL) {
list_del(&mr->mr_all);
spin_unlock(&buf->rb_mrlock);
/* Ensure MW is not on any rl_registered list */
if (!list_empty(&mr->mr_list))
list_del(&mr->mr_list);
spin_unlock(&buf->rb_lock);
frwr_release_mr(mr);
count++;
spin_lock(&buf->rb_mrlock);
spin_lock(&buf->rb_lock);
}
spin_unlock(&buf->rb_mrlock);
spin_unlock(&buf->rb_lock);
r_xprt->rx_stats.mrs_allocated = 0;
dprintk("RPC: %s: released %u MRs\n", __func__, count);
}
/**
@ -1179,18 +1209,10 @@ rpcrdma_mrs_destroy(struct rpcrdma_buffer *buf)
void
rpcrdma_buffer_destroy(struct rpcrdma_buffer *buf)
{
cancel_delayed_work_sync(&buf->rb_refresh_worker);
cancel_work_sync(&buf->rb_refresh_worker);
rpcrdma_sendctxs_destroy(buf);
while (!list_empty(&buf->rb_recv_bufs)) {
struct rpcrdma_rep *rep;
rep = list_first_entry(&buf->rb_recv_bufs,
struct rpcrdma_rep, rr_list);
list_del(&rep->rr_list);
rpcrdma_rep_destroy(rep);
}
rpcrdma_reps_destroy(buf);
while (!list_empty(&buf->rb_send_bufs)) {
struct rpcrdma_req *req;
@ -1215,54 +1237,20 @@ struct rpcrdma_mr *
rpcrdma_mr_get(struct rpcrdma_xprt *r_xprt)
{
struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
struct rpcrdma_mr *mr = NULL;
struct rpcrdma_mr *mr;
spin_lock(&buf->rb_mrlock);
if (!list_empty(&buf->rb_mrs))
spin_lock(&buf->rb_lock);
mr = rpcrdma_mr_pop(&buf->rb_mrs);
spin_unlock(&buf->rb_mrlock);
if (!mr)
goto out_nomrs;
spin_unlock(&buf->rb_lock);
return mr;
out_nomrs:
trace_xprtrdma_nomrs(r_xprt);
if (r_xprt->rx_ep.rep_connected != -ENODEV)
schedule_delayed_work(&buf->rb_refresh_worker, 0);
/* Allow the reply handler and refresh worker to run */
cond_resched();
return NULL;
}
static void
__rpcrdma_mr_put(struct rpcrdma_buffer *buf, struct rpcrdma_mr *mr)
{
spin_lock(&buf->rb_mrlock);
rpcrdma_mr_push(mr, &buf->rb_mrs);
spin_unlock(&buf->rb_mrlock);
}
/**
* rpcrdma_mr_put - Release an rpcrdma_mr object
* @mr: object to release
* rpcrdma_mr_put - DMA unmap an MR and release it
* @mr: MR to release
*
*/
void
rpcrdma_mr_put(struct rpcrdma_mr *mr)
{
__rpcrdma_mr_put(&mr->mr_xprt->rx_buf, mr);
}
/**
* rpcrdma_mr_unmap_and_put - DMA unmap an MR and release it
* @mr: object to release
*
*/
void
rpcrdma_mr_unmap_and_put(struct rpcrdma_mr *mr)
void rpcrdma_mr_put(struct rpcrdma_mr *mr)
{
struct rpcrdma_xprt *r_xprt = mr->mr_xprt;
@ -1272,7 +1260,19 @@ rpcrdma_mr_unmap_and_put(struct rpcrdma_mr *mr)
mr->mr_sg, mr->mr_nents, mr->mr_dir);
mr->mr_dir = DMA_NONE;
}
__rpcrdma_mr_put(&r_xprt->rx_buf, mr);
rpcrdma_mr_push(mr, &mr->mr_req->rl_free_mrs);
}
static void rpcrdma_mr_free(struct rpcrdma_mr *mr)
{
struct rpcrdma_xprt *r_xprt = mr->mr_xprt;
struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
mr->mr_req = NULL;
spin_lock(&buf->rb_lock);
rpcrdma_mr_push(mr, &buf->rb_mrs);
spin_unlock(&buf->rb_lock);
}
/**
@ -1303,39 +1303,24 @@ rpcrdma_buffer_get(struct rpcrdma_buffer *buffers)
*/
void rpcrdma_buffer_put(struct rpcrdma_buffer *buffers, struct rpcrdma_req *req)
{
struct rpcrdma_rep *rep = req->rl_reply;
if (req->rl_reply)
rpcrdma_rep_put(buffers, req->rl_reply);
req->rl_reply = NULL;
spin_lock(&buffers->rb_lock);
list_add(&req->rl_list, &buffers->rb_send_bufs);
if (rep) {
if (!rep->rr_temp) {
list_add(&rep->rr_list, &buffers->rb_recv_bufs);
rep = NULL;
}
}
spin_unlock(&buffers->rb_lock);
if (rep)
rpcrdma_rep_destroy(rep);
}
/*
* Put reply buffers back into pool when not attached to
* request. This happens in error conditions.
/**
* rpcrdma_recv_buffer_put - Release rpcrdma_rep back to free list
* @rep: rep to release
*
* Used after error conditions.
*/
void
rpcrdma_recv_buffer_put(struct rpcrdma_rep *rep)
void rpcrdma_recv_buffer_put(struct rpcrdma_rep *rep)
{
struct rpcrdma_buffer *buffers = &rep->rr_rxprt->rx_buf;
if (!rep->rr_temp) {
spin_lock(&buffers->rb_lock);
list_add(&rep->rr_list, &buffers->rb_recv_bufs);
spin_unlock(&buffers->rb_lock);
} else {
rpcrdma_rep_destroy(rep);
}
rpcrdma_rep_put(&rep->rr_rxprt->rx_buf, rep);
}
/* Returns a pointer to a rpcrdma_regbuf object, or NULL.
@ -1483,7 +1468,7 @@ rpcrdma_post_recvs(struct rpcrdma_xprt *r_xprt, bool temp)
count = 0;
needed = buf->rb_credits + (buf->rb_bc_srv_max_requests << 1);
if (ep->rep_receive_count > needed)
if (likely(ep->rep_receive_count > needed))
goto out;
needed -= ep->rep_receive_count;
if (!temp)
@ -1491,21 +1476,9 @@ rpcrdma_post_recvs(struct rpcrdma_xprt *r_xprt, bool temp)
/* fast path: all needed reps can be found on the free list */
wr = NULL;
spin_lock(&buf->rb_lock);
while (needed) {
rep = list_first_entry_or_null(&buf->rb_recv_bufs,
struct rpcrdma_rep, rr_list);
rep = rpcrdma_rep_get_locked(buf);
if (!rep)
break;
list_del(&rep->rr_list);
rep->rr_recv_wr.next = wr;
wr = &rep->rr_recv_wr;
--needed;
}
spin_unlock(&buf->rb_lock);
while (needed) {
rep = rpcrdma_rep_create(r_xprt, temp);
if (!rep)
break;
@ -1523,7 +1496,7 @@ rpcrdma_post_recvs(struct rpcrdma_xprt *r_xprt, bool temp)
if (!rpcrdma_regbuf_dma_map(r_xprt, rep->rr_rdmabuf))
goto release_wrs;
trace_xprtrdma_post_recv(rep->rr_recv_wr.wr_cqe);
trace_xprtrdma_post_recv(rep);
++count;
}

View File

@ -47,6 +47,7 @@
#include <linux/atomic.h> /* atomic_t, etc */
#include <linux/kref.h> /* struct kref */
#include <linux/workqueue.h> /* struct work_struct */
#include <linux/llist.h>
#include <rdma/rdma_cm.h> /* RDMA connection api */
#include <rdma/ib_verbs.h> /* RDMA verbs api */
@ -117,9 +118,6 @@ struct rpcrdma_ep {
#endif
/* Registered buffer -- registered kmalloc'd memory for RDMA SEND/RECV
*
* The below structure appears at the front of a large region of kmalloc'd
* memory, which always starts on a good alignment boundary.
*/
struct rpcrdma_regbuf {
@ -158,25 +156,22 @@ static inline void *rdmab_data(const struct rpcrdma_regbuf *rb)
/* To ensure a transport can always make forward progress,
* the number of RDMA segments allowed in header chunk lists
* is capped at 8. This prevents less-capable devices and
* memory registrations from overrunning the Send buffer
* while building chunk lists.
* is capped at 16. This prevents less-capable devices from
* overrunning the Send buffer while building chunk lists.
*
* Elements of the Read list take up more room than the
* Write list or Reply chunk. 8 read segments means the Read
* list (or Write list or Reply chunk) cannot consume more
* than
* Write list or Reply chunk. 16 read segments means the
* chunk lists cannot consume more than
*
* ((8 + 2) * read segment size) + 1 XDR words, or 244 bytes.
* ((16 + 2) * read segment size) + 1 XDR words,
*
* And the fixed part of the header is another 24 bytes.
*
* The smallest inline threshold is 1024 bytes, ensuring that
* at least 750 bytes are available for RPC messages.
* or about 400 bytes. The fixed part of the header is
* another 24 bytes. Thus when the inline threshold is
* 1024 bytes, at least 600 bytes are available for RPC
* message bodies.
*/
enum {
RPCRDMA_MAX_HDR_SEGS = 8,
RPCRDMA_HDRBUF_SIZE = 256,
RPCRDMA_MAX_HDR_SEGS = 16,
};
/*
@ -206,7 +201,7 @@ struct rpcrdma_rep {
struct rpc_rqst *rr_rqst;
struct xdr_buf rr_hdrbuf;
struct xdr_stream rr_stream;
struct list_head rr_list;
struct llist_node rr_node;
struct ib_recv_wr rr_recv_wr;
};
@ -240,20 +235,20 @@ struct rpcrdma_sendctx {
* An external memory region is any buffer or page that is registered
* on the fly (ie, not pre-registered).
*/
struct rpcrdma_req;
struct rpcrdma_frwr {
struct ib_mr *fr_mr;
struct ib_cqe fr_cqe;
struct completion fr_linv_done;
struct rpcrdma_req *fr_req;
union {
struct ib_reg_wr fr_regwr;
struct ib_send_wr fr_invwr;
};
};
struct rpcrdma_req;
struct rpcrdma_mr {
struct list_head mr_list;
struct rpcrdma_req *mr_req;
struct scatterlist *mr_sg;
int mr_nents;
enum dma_data_direction mr_dir;
@ -331,7 +326,8 @@ struct rpcrdma_req {
struct list_head rl_all;
struct kref rl_kref;
struct list_head rl_registered; /* registered segments */
struct list_head rl_free_mrs;
struct list_head rl_registered;
struct rpcrdma_mr_seg rl_segments[RPCRDMA_MAX_SEGS];
};
@ -344,7 +340,7 @@ rpcr_to_rdmar(const struct rpc_rqst *rqst)
static inline void
rpcrdma_mr_push(struct rpcrdma_mr *mr, struct list_head *list)
{
list_add_tail(&mr->mr_list, list);
list_add(&mr->mr_list, list);
}
static inline struct rpcrdma_mr *
@ -352,7 +348,8 @@ rpcrdma_mr_pop(struct list_head *list)
{
struct rpcrdma_mr *mr;
mr = list_first_entry(list, struct rpcrdma_mr, mr_list);
mr = list_first_entry_or_null(list, struct rpcrdma_mr, mr_list);
if (mr)
list_del_init(&mr->mr_list);
return mr;
}
@ -364,19 +361,19 @@ rpcrdma_mr_pop(struct list_head *list)
* One of these is associated with a transport instance
*/
struct rpcrdma_buffer {
spinlock_t rb_mrlock; /* protect rb_mrs list */
spinlock_t rb_lock;
struct list_head rb_send_bufs;
struct list_head rb_mrs;
struct list_head rb_all;
unsigned long rb_sc_head;
unsigned long rb_sc_tail;
unsigned long rb_sc_last;
struct rpcrdma_sendctx **rb_sc_ctxs;
spinlock_t rb_lock; /* protect buf lists */
struct list_head rb_send_bufs;
struct list_head rb_recv_bufs;
struct list_head rb_allreqs;
struct list_head rb_all_mrs;
struct llist_head rb_free_reps;
u32 rb_max_requests;
u32 rb_credits; /* most recent credit grant */
@ -384,7 +381,7 @@ struct rpcrdma_buffer {
u32 rb_bc_srv_max_requests;
u32 rb_bc_max_requests;
struct delayed_work rb_refresh_worker;
struct work_struct rb_refresh_worker;
};
/*
@ -490,7 +487,6 @@ struct rpcrdma_sendctx *rpcrdma_sendctx_get_locked(struct rpcrdma_xprt *r_xprt);
struct rpcrdma_mr *rpcrdma_mr_get(struct rpcrdma_xprt *r_xprt);
void rpcrdma_mr_put(struct rpcrdma_mr *mr);
void rpcrdma_mr_unmap_and_put(struct rpcrdma_mr *mr);
static inline void
rpcrdma_mr_recycle(struct rpcrdma_mr *mr)
@ -546,6 +542,7 @@ rpcrdma_data_dir(bool writing)
/* Memory registration calls xprtrdma/frwr_ops.c
*/
bool frwr_is_supported(struct ib_device *device);
void frwr_recycle(struct rpcrdma_req *req);
void frwr_reset(struct rpcrdma_req *req);
int frwr_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep);
int frwr_init_mr(struct rpcrdma_ia *ia, struct rpcrdma_mr *mr);
@ -554,7 +551,7 @@ size_t frwr_maxpages(struct rpcrdma_xprt *r_xprt);
struct rpcrdma_mr_seg *frwr_map(struct rpcrdma_xprt *r_xprt,
struct rpcrdma_mr_seg *seg,
int nsegs, bool writing, __be32 xid,
struct rpcrdma_mr **mr);
struct rpcrdma_mr *mr);
int frwr_send(struct rpcrdma_ia *ia, struct rpcrdma_req *req);
void frwr_reminv(struct rpcrdma_rep *rep, struct list_head *mrs);
void frwr_unmap_sync(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req);

View File

@ -562,10 +562,14 @@ xs_read_stream_call(struct sock_xprt *transport, struct msghdr *msg, int flags)
printk(KERN_WARNING "Callback slot table overflowed\n");
return -ESHUTDOWN;
}
if (transport->recv.copied && !req->rq_private_buf.len)
return -ESHUTDOWN;
ret = xs_read_stream_request(transport, msg, flags, req);
if (msg->msg_flags & (MSG_EOR|MSG_TRUNC))
xprt_complete_bc_request(req, transport->recv.copied);
else
req->rq_private_buf.len = transport->recv.copied;
return ret;
}
@ -587,7 +591,7 @@ xs_read_stream_reply(struct sock_xprt *transport, struct msghdr *msg, int flags)
/* Look up and lock the request corresponding to the given XID */
spin_lock(&xprt->queue_lock);
req = xprt_lookup_rqst(xprt, transport->recv.xid);
if (!req) {
if (!req || (transport->recv.copied && !req->rq_private_buf.len)) {
msg->msg_flags |= MSG_TRUNC;
goto out;
}
@ -599,6 +603,8 @@ xs_read_stream_reply(struct sock_xprt *transport, struct msghdr *msg, int flags)
spin_lock(&xprt->queue_lock);
if (msg->msg_flags & (MSG_EOR|MSG_TRUNC))
xprt_complete_rqst(req->rq_task, transport->recv.copied);
else
req->rq_private_buf.len = transport->recv.copied;
xprt_unpin_rqst(req);
out:
spin_unlock(&xprt->queue_lock);