All the mail mirrored from lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH] nfs4: limit callback decoding to received bytes
@ 2015-11-20 14:55 Benjamin Coddington
  2015-11-20 15:18 ` Chuck Lever
  2015-12-07 19:52 ` Trond Myklebust
  0 siblings, 2 replies; 7+ messages in thread
From: Benjamin Coddington @ 2015-11-20 14:55 UTC (permalink / raw
  To: Trond Myklebust, J. Bruce Fields, Anna Schumaker, Jeff Layton; +Cc: linux-nfs

A truncated cb_compound request will cause the client to decode null or
data from a previous callback for nfs4.1 backchannel case, or uninitialized
data for the nfs4.0 case. This is because the path through
svc_process_common() advances the request's iov_base and decrements iov_len
without adjusting the overall xdr_buf's len field.  That causes
xdr_init_decode() to set up the xdr_stream with an incorrect length in
nfs4_callback_compound().

Fixing this for the nfs4.1 backchannel case first requires setting the
correct iov_len and page_len based on the length of received data in the
same manner as the nfs4.0 case.

Then the request's xdr_buf length can be adjusted for both cases based upon
the remaining iov_len and page_len.

Signed-off-by: Benjamin Coddington <bcodding@redhat.com>
---
 fs/nfs/callback_xdr.c         |    7 +++++--
 net/sunrpc/backchannel_rqst.c |    8 ++++++++
 net/sunrpc/svc.c              |    1 +
 3 files changed, 14 insertions(+), 2 deletions(-)

diff --git a/fs/nfs/callback_xdr.c b/fs/nfs/callback_xdr.c
index 6b1697a..1c8213e 100644
--- a/fs/nfs/callback_xdr.c
+++ b/fs/nfs/callback_xdr.c
@@ -76,7 +76,8 @@ static __be32 *read_buf(struct xdr_stream *xdr, int nbytes)
 
 	p = xdr_inline_decode(xdr, nbytes);
 	if (unlikely(p == NULL))
-		printk(KERN_WARNING "NFS: NFSv4 callback reply buffer overflowed!\n");
+		printk(KERN_WARNING "NFS: NFSv4 callback reply buffer overflowed "
+							"or truncated request.\n");
 	return p;
 }
 
@@ -892,6 +893,7 @@ static __be32 nfs4_callback_compound(struct svc_rqst *rqstp, void *argp, void *r
 	struct cb_compound_hdr_arg hdr_arg = { 0 };
 	struct cb_compound_hdr_res hdr_res = { NULL };
 	struct xdr_stream xdr_in, xdr_out;
+	struct xdr_buf *rq_arg = &rqstp->rq_arg;
 	__be32 *p, status;
 	struct cb_process_state cps = {
 		.drc_status = 0,
@@ -903,7 +905,8 @@ static __be32 nfs4_callback_compound(struct svc_rqst *rqstp, void *argp, void *r
 
 	dprintk("%s: start\n", __func__);
 
-	xdr_init_decode(&xdr_in, &rqstp->rq_arg, rqstp->rq_arg.head[0].iov_base);
+	rq_arg->len = rq_arg->head[0].iov_len + rq_arg->page_len;
+	xdr_init_decode(&xdr_in, rq_arg, rq_arg->head[0].iov_base);
 
 	p = (__be32*)((char *)rqstp->rq_res.head[0].iov_base + rqstp->rq_res.head[0].iov_len);
 	xdr_init_encode(&xdr_out, &rqstp->rq_res, p);
diff --git a/net/sunrpc/backchannel_rqst.c b/net/sunrpc/backchannel_rqst.c
index 6255d14..d92cee1 100644
--- a/net/sunrpc/backchannel_rqst.c
+++ b/net/sunrpc/backchannel_rqst.c
@@ -333,12 +333,20 @@ void xprt_complete_bc_request(struct rpc_rqst *req, uint32_t copied)
 {
 	struct rpc_xprt *xprt = req->rq_xprt;
 	struct svc_serv *bc_serv = xprt->bc_serv;
+	struct xdr_buf *rq_rcv_buf = &req->rq_rcv_buf;
 
 	spin_lock(&xprt->bc_pa_lock);
 	list_del(&req->rq_bc_pa_list);
 	xprt_dec_alloc_count(xprt, 1);
 	spin_unlock(&xprt->bc_pa_lock);
 
+	if (copied <= rq_rcv_buf->head[0].iov_len) {
+		rq_rcv_buf->head[0].iov_len = copied;
+		rq_rcv_buf->page_len = 0;
+	} else {
+		rq_rcv_buf->page_len = copied - rq_rcv_buf->head[0].iov_len;
+	}
+
 	req->rq_private_buf.len = copied;
 	set_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state);
 
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
index a8f579d..af9f987 100644
--- a/net/sunrpc/svc.c
+++ b/net/sunrpc/svc.c
@@ -1363,6 +1363,7 @@ bc_svc_process(struct svc_serv *serv, struct rpc_rqst *req,
 	memcpy(&rqstp->rq_addr, &req->rq_xprt->addr, rqstp->rq_addrlen);
 	memcpy(&rqstp->rq_arg, &req->rq_rcv_buf, sizeof(rqstp->rq_arg));
 	memcpy(&rqstp->rq_res, &req->rq_snd_buf, sizeof(rqstp->rq_res));
+	rqstp->rq_arg.len = req->rq_private_buf.len;
 
 	/* reset result send buffer "put" position */
 	resv->iov_len = 0;
-- 
1.7.1


^ permalink raw reply related	[flat|nested] 7+ messages in thread

* Re: [PATCH] nfs4: limit callback decoding to received bytes
  2015-11-20 14:55 [PATCH] nfs4: limit callback decoding to received bytes Benjamin Coddington
@ 2015-11-20 15:18 ` Chuck Lever
  2015-11-20 16:20   ` Benjamin Coddington
  2015-12-07 19:52 ` Trond Myklebust
  1 sibling, 1 reply; 7+ messages in thread
From: Chuck Lever @ 2015-11-20 15:18 UTC (permalink / raw
  To: Benjamin Coddington
  Cc: Trond Myklebust, J. Bruce Fields, Anna Schumaker, Jeff Layton,
	Linux NFS Mailing List


> On Nov 20, 2015, at 9:55 AM, Benjamin Coddington <bcodding@redhat.com> =
wrote:
>=20
> A truncated cb_compound request will cause the client to decode null =
or
> data from a previous callback for nfs4.1 backchannel case, or =
uninitialized
> data for the nfs4.0 case. This is because the path through
> svc_process_common() advances the request's iov_base and decrements =
iov_len
> without adjusting the overall xdr_buf's len field.  That causes
> xdr_init_decode() to set up the xdr_stream with an incorrect length in
> nfs4_callback_compound().
>=20
> Fixing this for the nfs4.1 backchannel case first requires setting the
> correct iov_len and page_len based on the length of received data in =
the
> same manner as the nfs4.0 case.
>=20
> Then the request's xdr_buf length can be adjusted for both cases based =
upon
> the remaining iov_len and page_len.
>=20
> Signed-off-by: Benjamin Coddington <bcodding@redhat.com>
> ---
> fs/nfs/callback_xdr.c         |    7 +++++--
> net/sunrpc/backchannel_rqst.c |    8 ++++++++
> net/sunrpc/svc.c              |    1 +
> 3 files changed, 14 insertions(+), 2 deletions(-)
>=20
> diff --git a/fs/nfs/callback_xdr.c b/fs/nfs/callback_xdr.c
> index 6b1697a..1c8213e 100644
> --- a/fs/nfs/callback_xdr.c
> +++ b/fs/nfs/callback_xdr.c
> @@ -76,7 +76,8 @@ static __be32 *read_buf(struct xdr_stream *xdr, int =
nbytes)
>=20
> 	p =3D xdr_inline_decode(xdr, nbytes);
> 	if (unlikely(p =3D=3D NULL))
> -		printk(KERN_WARNING "NFS: NFSv4 callback reply buffer =
overflowed!\n");
> +		printk(KERN_WARNING "NFS: NFSv4 callback reply buffer =
overflowed "
> +							"or truncated =
request.\n");
> 	return p;
> }
>=20
> @@ -892,6 +893,7 @@ static __be32 nfs4_callback_compound(struct =
svc_rqst *rqstp, void *argp, void *r
> 	struct cb_compound_hdr_arg hdr_arg =3D { 0 };
> 	struct cb_compound_hdr_res hdr_res =3D { NULL };
> 	struct xdr_stream xdr_in, xdr_out;
> +	struct xdr_buf *rq_arg =3D &rqstp->rq_arg;
> 	__be32 *p, status;
> 	struct cb_process_state cps =3D {
> 		.drc_status =3D 0,
> @@ -903,7 +905,8 @@ static __be32 nfs4_callback_compound(struct =
svc_rqst *rqstp, void *argp, void *r
>=20
> 	dprintk("%s: start\n", __func__);
>=20
> -	xdr_init_decode(&xdr_in, &rqstp->rq_arg, =
rqstp->rq_arg.head[0].iov_base);
> +	rq_arg->len =3D rq_arg->head[0].iov_len + rq_arg->page_len;
> +	xdr_init_decode(&xdr_in, rq_arg, rq_arg->head[0].iov_base);
>=20
> 	p =3D (__be32*)((char *)rqstp->rq_res.head[0].iov_base + =
rqstp->rq_res.head[0].iov_len);
> 	xdr_init_encode(&xdr_out, &rqstp->rq_res, p);
> diff --git a/net/sunrpc/backchannel_rqst.c =
b/net/sunrpc/backchannel_rqst.c
> index 6255d14..d92cee1 100644
> --- a/net/sunrpc/backchannel_rqst.c
> +++ b/net/sunrpc/backchannel_rqst.c
> @@ -333,12 +333,20 @@ void xprt_complete_bc_request(struct rpc_rqst =
*req, uint32_t copied)
> {
> 	struct rpc_xprt *xprt =3D req->rq_xprt;
> 	struct svc_serv *bc_serv =3D xprt->bc_serv;
> +	struct xdr_buf *rq_rcv_buf =3D &req->rq_rcv_buf;
>=20
> 	spin_lock(&xprt->bc_pa_lock);
> 	list_del(&req->rq_bc_pa_list);
> 	xprt_dec_alloc_count(xprt, 1);
> 	spin_unlock(&xprt->bc_pa_lock);
>=20
> +	if (copied <=3D rq_rcv_buf->head[0].iov_len) {
> +		rq_rcv_buf->head[0].iov_len =3D copied;
> +		rq_rcv_buf->page_len =3D 0;
> +	} else {
> +		rq_rcv_buf->page_len =3D copied - =
rq_rcv_buf->head[0].iov_len;
> +	}
> +

Are similar changes needed in rpcrdma_bc_receive_call ?


> 	req->rq_private_buf.len =3D copied;
> 	set_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state);
>=20
> diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
> index a8f579d..af9f987 100644
> --- a/net/sunrpc/svc.c
> +++ b/net/sunrpc/svc.c
> @@ -1363,6 +1363,7 @@ bc_svc_process(struct svc_serv *serv, struct =
rpc_rqst *req,
> 	memcpy(&rqstp->rq_addr, &req->rq_xprt->addr, rqstp->rq_addrlen);
> 	memcpy(&rqstp->rq_arg, &req->rq_rcv_buf, sizeof(rqstp->rq_arg));
> 	memcpy(&rqstp->rq_res, &req->rq_snd_buf, sizeof(rqstp->rq_res));
> +	rqstp->rq_arg.len =3D req->rq_private_buf.len;
>=20
> 	/* reset result send buffer "put" position */
> 	resv->iov_len =3D 0;
> --=20
> 1.7.1
>=20
> --
> To unsubscribe from this list: send the line "unsubscribe linux-nfs" =
in
> the body of a message to majordomo@vger.kernel.org
> More majordomo info at  http://vger.kernel.org/majordomo-info.html

--
Chuck Lever

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH] nfs4: limit callback decoding to received bytes
  2015-11-20 15:18 ` Chuck Lever
@ 2015-11-20 16:20   ` Benjamin Coddington
  2015-11-20 17:24     ` Chuck Lever
  0 siblings, 1 reply; 7+ messages in thread
From: Benjamin Coddington @ 2015-11-20 16:20 UTC (permalink / raw
  To: Chuck Lever
  Cc: Trond Myklebust, J. Bruce Fields, Anna Schumaker, Jeff Layton,
	Linux NFS Mailing List

On Fri, 20 Nov 2015, Chuck Lever wrote:

>
> > On Nov 20, 2015, at 9:55 AM, Benjamin Coddington <bcodding@redhat.com> wrote:
> >
> > A truncated cb_compound request will cause the client to decode null or
> > data from a previous callback for nfs4.1 backchannel case, or uninitialized
> > data for the nfs4.0 case. This is because the path through
> > svc_process_common() advances the request's iov_base and decrements iov_len
> > without adjusting the overall xdr_buf's len field.  That causes
> > xdr_init_decode() to set up the xdr_stream with an incorrect length in
> > nfs4_callback_compound().
> >
> > Fixing this for the nfs4.1 backchannel case first requires setting the
> > correct iov_len and page_len based on the length of received data in the
> > same manner as the nfs4.0 case.
> >
> > Then the request's xdr_buf length can be adjusted for both cases based upon
> > the remaining iov_len and page_len.
> >
> > Signed-off-by: Benjamin Coddington <bcodding@redhat.com>
> > ---
> > fs/nfs/callback_xdr.c         |    7 +++++--
> > net/sunrpc/backchannel_rqst.c |    8 ++++++++
> > net/sunrpc/svc.c              |    1 +
> > 3 files changed, 14 insertions(+), 2 deletions(-)
> >
> > diff --git a/fs/nfs/callback_xdr.c b/fs/nfs/callback_xdr.c
> > index 6b1697a..1c8213e 100644
> > --- a/fs/nfs/callback_xdr.c
> > +++ b/fs/nfs/callback_xdr.c
> > @@ -76,7 +76,8 @@ static __be32 *read_buf(struct xdr_stream *xdr, int nbytes)
> >
> > 	p = xdr_inline_decode(xdr, nbytes);
> > 	if (unlikely(p == NULL))
> > -		printk(KERN_WARNING "NFS: NFSv4 callback reply buffer overflowed!\n");
> > +		printk(KERN_WARNING "NFS: NFSv4 callback reply buffer overflowed "
> > +							"or truncated request.\n");
> > 	return p;
> > }
> >
> > @@ -892,6 +893,7 @@ static __be32 nfs4_callback_compound(struct svc_rqst *rqstp, void *argp, void *r
> > 	struct cb_compound_hdr_arg hdr_arg = { 0 };
> > 	struct cb_compound_hdr_res hdr_res = { NULL };
> > 	struct xdr_stream xdr_in, xdr_out;
> > +	struct xdr_buf *rq_arg = &rqstp->rq_arg;
> > 	__be32 *p, status;
> > 	struct cb_process_state cps = {
> > 		.drc_status = 0,
> > @@ -903,7 +905,8 @@ static __be32 nfs4_callback_compound(struct svc_rqst *rqstp, void *argp, void *r
> >
> > 	dprintk("%s: start\n", __func__);
> >
> > -	xdr_init_decode(&xdr_in, &rqstp->rq_arg, rqstp->rq_arg.head[0].iov_base);
> > +	rq_arg->len = rq_arg->head[0].iov_len + rq_arg->page_len;
> > +	xdr_init_decode(&xdr_in, rq_arg, rq_arg->head[0].iov_base);
> >
> > 	p = (__be32*)((char *)rqstp->rq_res.head[0].iov_base + rqstp->rq_res.head[0].iov_len);
> > 	xdr_init_encode(&xdr_out, &rqstp->rq_res, p);
> > diff --git a/net/sunrpc/backchannel_rqst.c b/net/sunrpc/backchannel_rqst.c
> > index 6255d14..d92cee1 100644
> > --- a/net/sunrpc/backchannel_rqst.c
> > +++ b/net/sunrpc/backchannel_rqst.c
> > @@ -333,12 +333,20 @@ void xprt_complete_bc_request(struct rpc_rqst *req, uint32_t copied)
> > {
> > 	struct rpc_xprt *xprt = req->rq_xprt;
> > 	struct svc_serv *bc_serv = xprt->bc_serv;
> > +	struct xdr_buf *rq_rcv_buf = &req->rq_rcv_buf;
> >
> > 	spin_lock(&xprt->bc_pa_lock);
> > 	list_del(&req->rq_bc_pa_list);
> > 	xprt_dec_alloc_count(xprt, 1);
> > 	spin_unlock(&xprt->bc_pa_lock);
> >
> > +	if (copied <= rq_rcv_buf->head[0].iov_len) {
> > +		rq_rcv_buf->head[0].iov_len = copied;
> > +		rq_rcv_buf->page_len = 0;
> > +	} else {
> > +		rq_rcv_buf->page_len = copied - rq_rcv_buf->head[0].iov_len;
> > +	}
> > +
>
> Are similar changes needed in rpcrdma_bc_receive_call ?

Good question..  I was working on 4.3+ which was missing the rdma stuff, so
I overlooked this.

net/sunrpc/xprtrdma/backchannel.c:
324     size = rep->rr_len - RPCRDMA_HDRLEN_MIN;
...
347     memset(buf, 0, sizeof(*buf));
348     buf->head[0].iov_base = p;
349     buf->head[0].iov_len = size;

^^ that looks like it is going to set the iov_len to the size of the XDR
portion of the data, sans transport headers.. which means that it shouldn't
need the fixup just above.

In the case requiring the fixup iov_len was always set to 4096, and
decremented from there no matter the size of the request.

Ben

> > 	req->rq_private_buf.len = copied;
> > 	set_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state);
> >
> > diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
> > index a8f579d..af9f987 100644
> > --- a/net/sunrpc/svc.c
> > +++ b/net/sunrpc/svc.c
> > @@ -1363,6 +1363,7 @@ bc_svc_process(struct svc_serv *serv, struct rpc_rqst *req,
> > 	memcpy(&rqstp->rq_addr, &req->rq_xprt->addr, rqstp->rq_addrlen);
> > 	memcpy(&rqstp->rq_arg, &req->rq_rcv_buf, sizeof(rqstp->rq_arg));
> > 	memcpy(&rqstp->rq_res, &req->rq_snd_buf, sizeof(rqstp->rq_res));
> > +	rqstp->rq_arg.len = req->rq_private_buf.len;
> >
> > 	/* reset result send buffer "put" position */
> > 	resv->iov_len = 0;
> > --
> > 1.7.1
> >
> > --
> > To unsubscribe from this list: send the line "unsubscribe linux-nfs" in
> > the body of a message to majordomo@vger.kernel.org
> > More majordomo info at  http://vger.kernel.org/majordomo-info.html
>
> --
> Chuck Lever
>
>
>
>
>

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH] nfs4: limit callback decoding to received bytes
  2015-11-20 16:20   ` Benjamin Coddington
@ 2015-11-20 17:24     ` Chuck Lever
  0 siblings, 0 replies; 7+ messages in thread
From: Chuck Lever @ 2015-11-20 17:24 UTC (permalink / raw
  To: Benjamin Coddington
  Cc: Trond Myklebust, J. Bruce Fields, Anna Schumaker, Jeff Layton,
	Linux NFS Mailing List


> On Nov 20, 2015, at 11:20 AM, Benjamin Coddington =
<bcodding@redhat.com> wrote:
>=20
> On Fri, 20 Nov 2015, Chuck Lever wrote:
>=20
>>=20
>>> On Nov 20, 2015, at 9:55 AM, Benjamin Coddington =
<bcodding@redhat.com> wrote:
>>>=20
>>> A truncated cb_compound request will cause the client to decode null =
or
>>> data from a previous callback for nfs4.1 backchannel case, or =
uninitialized
>>> data for the nfs4.0 case. This is because the path through
>>> svc_process_common() advances the request's iov_base and decrements =
iov_len
>>> without adjusting the overall xdr_buf's len field.  That causes
>>> xdr_init_decode() to set up the xdr_stream with an incorrect length =
in
>>> nfs4_callback_compound().
>>>=20
>>> Fixing this for the nfs4.1 backchannel case first requires setting =
the
>>> correct iov_len and page_len based on the length of received data in =
the
>>> same manner as the nfs4.0 case.
>>>=20
>>> Then the request's xdr_buf length can be adjusted for both cases =
based upon
>>> the remaining iov_len and page_len.
>>>=20
>>> Signed-off-by: Benjamin Coddington <bcodding@redhat.com>
>>> ---
>>> fs/nfs/callback_xdr.c         |    7 +++++--
>>> net/sunrpc/backchannel_rqst.c |    8 ++++++++
>>> net/sunrpc/svc.c              |    1 +
>>> 3 files changed, 14 insertions(+), 2 deletions(-)
>>>=20
>>> diff --git a/fs/nfs/callback_xdr.c b/fs/nfs/callback_xdr.c
>>> index 6b1697a..1c8213e 100644
>>> --- a/fs/nfs/callback_xdr.c
>>> +++ b/fs/nfs/callback_xdr.c
>>> @@ -76,7 +76,8 @@ static __be32 *read_buf(struct xdr_stream *xdr, =
int nbytes)
>>>=20
>>> 	p =3D xdr_inline_decode(xdr, nbytes);
>>> 	if (unlikely(p =3D=3D NULL))
>>> -		printk(KERN_WARNING "NFS: NFSv4 callback reply buffer =
overflowed!\n");
>>> +		printk(KERN_WARNING "NFS: NFSv4 callback reply buffer =
overflowed "
>>> +							"or truncated =
request.\n");
>>> 	return p;
>>> }
>>>=20
>>> @@ -892,6 +893,7 @@ static __be32 nfs4_callback_compound(struct =
svc_rqst *rqstp, void *argp, void *r
>>> 	struct cb_compound_hdr_arg hdr_arg =3D { 0 };
>>> 	struct cb_compound_hdr_res hdr_res =3D { NULL };
>>> 	struct xdr_stream xdr_in, xdr_out;
>>> +	struct xdr_buf *rq_arg =3D &rqstp->rq_arg;
>>> 	__be32 *p, status;
>>> 	struct cb_process_state cps =3D {
>>> 		.drc_status =3D 0,
>>> @@ -903,7 +905,8 @@ static __be32 nfs4_callback_compound(struct =
svc_rqst *rqstp, void *argp, void *r
>>>=20
>>> 	dprintk("%s: start\n", __func__);
>>>=20
>>> -	xdr_init_decode(&xdr_in, &rqstp->rq_arg, =
rqstp->rq_arg.head[0].iov_base);
>>> +	rq_arg->len =3D rq_arg->head[0].iov_len + rq_arg->page_len;
>>> +	xdr_init_decode(&xdr_in, rq_arg, rq_arg->head[0].iov_base);
>>>=20
>>> 	p =3D (__be32*)((char *)rqstp->rq_res.head[0].iov_base + =
rqstp->rq_res.head[0].iov_len);
>>> 	xdr_init_encode(&xdr_out, &rqstp->rq_res, p);
>>> diff --git a/net/sunrpc/backchannel_rqst.c =
b/net/sunrpc/backchannel_rqst.c
>>> index 6255d14..d92cee1 100644
>>> --- a/net/sunrpc/backchannel_rqst.c
>>> +++ b/net/sunrpc/backchannel_rqst.c
>>> @@ -333,12 +333,20 @@ void xprt_complete_bc_request(struct rpc_rqst =
*req, uint32_t copied)
>>> {
>>> 	struct rpc_xprt *xprt =3D req->rq_xprt;
>>> 	struct svc_serv *bc_serv =3D xprt->bc_serv;
>>> +	struct xdr_buf *rq_rcv_buf =3D &req->rq_rcv_buf;
>>>=20
>>> 	spin_lock(&xprt->bc_pa_lock);
>>> 	list_del(&req->rq_bc_pa_list);
>>> 	xprt_dec_alloc_count(xprt, 1);
>>> 	spin_unlock(&xprt->bc_pa_lock);
>>>=20
>>> +	if (copied <=3D rq_rcv_buf->head[0].iov_len) {
>>> +		rq_rcv_buf->head[0].iov_len =3D copied;
>>> +		rq_rcv_buf->page_len =3D 0;
>>> +	} else {
>>> +		rq_rcv_buf->page_len =3D copied - =
rq_rcv_buf->head[0].iov_len;
>>> +	}
>>> +
>>=20
>> Are similar changes needed in rpcrdma_bc_receive_call ?
>=20
> Good question..  I was working on 4.3+ which was missing the rdma =
stuff, so
> I overlooked this.
>=20
> net/sunrpc/xprtrdma/backchannel.c:
> 324     size =3D rep->rr_len - RPCRDMA_HDRLEN_MIN;
> ...
> 347     memset(buf, 0, sizeof(*buf));
> 348     buf->head[0].iov_base =3D p;
> 349     buf->head[0].iov_len =3D size;
>=20
> ^^ that looks like it is going to set the iov_len to the size of the =
XDR
> portion of the data, sans transport headers.. which means that it =
shouldn't
> need the fixup just above.
>=20
> In the case requiring the fixup iov_len was always set to 4096, and
> decremented from there no matter the size of the request.

Thanks for checking!


> Ben
>=20
>>> 	req->rq_private_buf.len =3D copied;
>>> 	set_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state);
>>>=20
>>> diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
>>> index a8f579d..af9f987 100644
>>> --- a/net/sunrpc/svc.c
>>> +++ b/net/sunrpc/svc.c
>>> @@ -1363,6 +1363,7 @@ bc_svc_process(struct svc_serv *serv, struct =
rpc_rqst *req,
>>> 	memcpy(&rqstp->rq_addr, &req->rq_xprt->addr, rqstp->rq_addrlen);
>>> 	memcpy(&rqstp->rq_arg, &req->rq_rcv_buf, sizeof(rqstp->rq_arg));
>>> 	memcpy(&rqstp->rq_res, &req->rq_snd_buf, sizeof(rqstp->rq_res));
>>> +	rqstp->rq_arg.len =3D req->rq_private_buf.len;
>>>=20
>>> 	/* reset result send buffer "put" position */
>>> 	resv->iov_len =3D 0;
>>> --
>>> 1.7.1
>>>=20
>>> --
>>> To unsubscribe from this list: send the line "unsubscribe linux-nfs" =
in
>>> the body of a message to majordomo@vger.kernel.org
>>> More majordomo info at  http://vger.kernel.org/majordomo-info.html
>>=20
>> --
>> Chuck Lever
>>=20
>>=20
>>=20
>>=20
>>=20

--
Chuck Lever

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH] nfs4: limit callback decoding to received bytes
  2015-11-20 14:55 [PATCH] nfs4: limit callback decoding to received bytes Benjamin Coddington
  2015-11-20 15:18 ` Chuck Lever
@ 2015-12-07 19:52 ` Trond Myklebust
  2015-12-08 14:20   ` Benjamin Coddington
  1 sibling, 1 reply; 7+ messages in thread
From: Trond Myklebust @ 2015-12-07 19:52 UTC (permalink / raw
  To: Benjamin Coddington
  Cc: J. Bruce Fields, Anna Schumaker, Jeff Layton,
	Linux NFS Mailing List

Hi Ben,

I've hit a few problems with this patch after it went upstream.

On Fri, Nov 20, 2015 at 6:55 AM, Benjamin Coddington
<bcodding@redhat.com> wrote:
> A truncated cb_compound request will cause the client to decode null or
> data from a previous callback for nfs4.1 backchannel case, or uninitialized
> data for the nfs4.0 case. This is because the path through
> svc_process_common() advances the request's iov_base and decrements iov_len
> without adjusting the overall xdr_buf's len field.  That causes
> xdr_init_decode() to set up the xdr_stream with an incorrect length in
> nfs4_callback_compound().
>
> Fixing this for the nfs4.1 backchannel case first requires setting the
> correct iov_len and page_len based on the length of received data in the
> same manner as the nfs4.0 case.
>
> Then the request's xdr_buf length can be adjusted for both cases based upon
> the remaining iov_len and page_len.
>
> Signed-off-by: Benjamin Coddington <bcodding@redhat.com>
> ---
>  fs/nfs/callback_xdr.c         |    7 +++++--
>  net/sunrpc/backchannel_rqst.c |    8 ++++++++
>  net/sunrpc/svc.c              |    1 +
>  3 files changed, 14 insertions(+), 2 deletions(-)
>
> diff --git a/fs/nfs/callback_xdr.c b/fs/nfs/callback_xdr.c
> index 6b1697a..1c8213e 100644
> --- a/fs/nfs/callback_xdr.c
> +++ b/fs/nfs/callback_xdr.c
> @@ -76,7 +76,8 @@ static __be32 *read_buf(struct xdr_stream *xdr, int nbytes)
>
>         p = xdr_inline_decode(xdr, nbytes);
>         if (unlikely(p == NULL))
> -               printk(KERN_WARNING "NFS: NFSv4 callback reply buffer overflowed!\n");
> +               printk(KERN_WARNING "NFS: NFSv4 callback reply buffer overflowed "
> +                                                       "or truncated request.\n");
>         return p;
>  }
>
> @@ -892,6 +893,7 @@ static __be32 nfs4_callback_compound(struct svc_rqst *rqstp, void *argp, void *r
>         struct cb_compound_hdr_arg hdr_arg = { 0 };
>         struct cb_compound_hdr_res hdr_res = { NULL };
>         struct xdr_stream xdr_in, xdr_out;
> +       struct xdr_buf *rq_arg = &rqstp->rq_arg;
>         __be32 *p, status;
>         struct cb_process_state cps = {
>                 .drc_status = 0,
> @@ -903,7 +905,8 @@ static __be32 nfs4_callback_compound(struct svc_rqst *rqstp, void *argp, void *r
>
>         dprintk("%s: start\n", __func__);
>
> -       xdr_init_decode(&xdr_in, &rqstp->rq_arg, rqstp->rq_arg.head[0].iov_base);
> +       rq_arg->len = rq_arg->head[0].iov_len + rq_arg->page_len;

This is redundant.

> +       xdr_init_decode(&xdr_in, rq_arg, rq_arg->head[0].iov_base);
>
>         p = (__be32*)((char *)rqstp->rq_res.head[0].iov_base + rqstp->rq_res.head[0].iov_len);
>         xdr_init_encode(&xdr_out, &rqstp->rq_res, p);
> diff --git a/net/sunrpc/backchannel_rqst.c b/net/sunrpc/backchannel_rqst.c
> index 6255d14..d92cee1 100644
> --- a/net/sunrpc/backchannel_rqst.c
> +++ b/net/sunrpc/backchannel_rqst.c
> @@ -333,12 +333,20 @@ void xprt_complete_bc_request(struct rpc_rqst *req, uint32_t copied)
>  {
>         struct rpc_xprt *xprt = req->rq_xprt;
>         struct svc_serv *bc_serv = xprt->bc_serv;
> +       struct xdr_buf *rq_rcv_buf = &req->rq_rcv_buf;
>
>         spin_lock(&xprt->bc_pa_lock);
>         list_del(&req->rq_bc_pa_list);
>         xprt_dec_alloc_count(xprt, 1);
>         spin_unlock(&xprt->bc_pa_lock);
>
> +       if (copied <= rq_rcv_buf->head[0].iov_len) {
> +               rq_rcv_buf->head[0].iov_len = copied;
> +               rq_rcv_buf->page_len = 0;
> +       } else {
> +               rq_rcv_buf->page_len = copied - rq_rcv_buf->head[0].iov_len;
> +       }
> +

There is a problem with this approach: req->rq_rcv_buf is never reset,
and so NFSv4 callbacks are now broken.
The correct fix for this issue is to do the above adjustment in
bc_svc_process(), so that it only applies to the resulting
rqstp->rq_arg.

>         req->rq_private_buf.len = copied;
>         set_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state);
>
> diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
> index a8f579d..af9f987 100644
> --- a/net/sunrpc/svc.c
> +++ b/net/sunrpc/svc.c
> @@ -1363,6 +1363,7 @@ bc_svc_process(struct svc_serv *serv, struct rpc_rqst *req,
>         memcpy(&rqstp->rq_addr, &req->rq_xprt->addr, rqstp->rq_addrlen);
>         memcpy(&rqstp->rq_arg, &req->rq_rcv_buf, sizeof(rqstp->rq_arg));
>         memcpy(&rqstp->rq_res, &req->rq_snd_buf, sizeof(rqstp->rq_res));
> +       rqstp->rq_arg.len = req->rq_private_buf.len;

Cheers
  Trond

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH] nfs4: limit callback decoding to received bytes
  2015-12-07 19:52 ` Trond Myklebust
@ 2015-12-08 14:20   ` Benjamin Coddington
  2015-12-09 15:56     ` [PATCH] SUNRPC: init xdr_stream for zero iov_len, page_len Benjamin Coddington
  0 siblings, 1 reply; 7+ messages in thread
From: Benjamin Coddington @ 2015-12-08 14:20 UTC (permalink / raw
  To: Trond Myklebust
  Cc: J. Bruce Fields, Anna Schumaker, Jeff Layton,
	Linux NFS Mailing List

On Mon, 7 Dec 2015, Trond Myklebust wrote:

> Hi Ben,
>
> I've hit a few problems with this patch after it went upstream.
>
> On Fri, Nov 20, 2015 at 6:55 AM, Benjamin Coddington
> <bcodding@redhat.com> wrote:
> > A truncated cb_compound request will cause the client to decode null or
> > data from a previous callback for nfs4.1 backchannel case, or uninitialized
> > data for the nfs4.0 case. This is because the path through
> > svc_process_common() advances the request's iov_base and decrements iov_len
> > without adjusting the overall xdr_buf's len field.  That causes
> > xdr_init_decode() to set up the xdr_stream with an incorrect length in
> > nfs4_callback_compound().
> >
> > Fixing this for the nfs4.1 backchannel case first requires setting the
> > correct iov_len and page_len based on the length of received data in the
> > same manner as the nfs4.0 case.
> >
> > Then the request's xdr_buf length can be adjusted for both cases based upon
> > the remaining iov_len and page_len.
> >
> > Signed-off-by: Benjamin Coddington <bcodding@redhat.com>
> > ---
> >  fs/nfs/callback_xdr.c         |    7 +++++--
> >  net/sunrpc/backchannel_rqst.c |    8 ++++++++
> >  net/sunrpc/svc.c              |    1 +
> >  3 files changed, 14 insertions(+), 2 deletions(-)
> >
> > diff --git a/fs/nfs/callback_xdr.c b/fs/nfs/callback_xdr.c
> > index 6b1697a..1c8213e 100644
> > --- a/fs/nfs/callback_xdr.c
> > +++ b/fs/nfs/callback_xdr.c
> > @@ -76,7 +76,8 @@ static __be32 *read_buf(struct xdr_stream *xdr, int nbytes)
> >
> >         p = xdr_inline_decode(xdr, nbytes);
> >         if (unlikely(p == NULL))
> > -               printk(KERN_WARNING "NFS: NFSv4 callback reply buffer overflowed!\n");
> > +               printk(KERN_WARNING "NFS: NFSv4 callback reply buffer overflowed "
> > +                                                       "or truncated request.\n");
> >         return p;
> >  }
> >
> > @@ -892,6 +893,7 @@ static __be32 nfs4_callback_compound(struct svc_rqst *rqstp, void *argp, void *r
> >         struct cb_compound_hdr_arg hdr_arg = { 0 };
> >         struct cb_compound_hdr_res hdr_res = { NULL };
> >         struct xdr_stream xdr_in, xdr_out;
> > +       struct xdr_buf *rq_arg = &rqstp->rq_arg;
> >         __be32 *p, status;
> >         struct cb_process_state cps = {
> >                 .drc_status = 0,
> > @@ -903,7 +905,8 @@ static __be32 nfs4_callback_compound(struct svc_rqst *rqstp, void *argp, void *r
> >
> >         dprintk("%s: start\n", __func__);
> >
> > -       xdr_init_decode(&xdr_in, &rqstp->rq_arg, rqstp->rq_arg.head[0].iov_base);
> > +       rq_arg->len = rq_arg->head[0].iov_len + rq_arg->page_len;
>
> This is redundant.

The decoding routines in svc_process_common() and svc_authenticate() are
advancing the xdr_buf's iov_base and decremening iov_len without adjusting
the overall xdr_buf.len field, so I believed this adjustment was necessary
after they had moved things around.

I'm simulating this problem by sending CB_COMPOUND instead of CB_NULL when
setting up the callback channel (since that is easy to do in pynfs).  In
that scenario, just before xdr_init_decode() my xdr_buf.len is 80 (the size
of the RPC headers), but the xdr_buf.kvec iov_len is 0 (all the headers have
been decoded).  Then xdr_init_decode() then skips xdr_set_iov() so xdr->end
is never initialized, and xdr->nwords ends up at 20.

Then, as we start decoding, we end up in __xdr_inline_decode(), which
expects xdr->end to have a valid value if we haven't run out of nwords, or
we start decoding a previous request or uninitialized data.  Usually, this
is harmless, as the string decode overflows those 20 words, and NULL is
returned in my test; however I believe it is possible to decode part of a
previous request here..

With this adjustment, nwords ends up at 0, so __xdr_inline_decode() returns
before using xdr->end.

Maybe the right fix is to adjust xdr_init_decode() to make sure xdr->end is
always initialized based on iov_len.

Let me know if it would be helpful for me to provide exactly the steps to
reproduce my test.

> > +       xdr_init_decode(&xdr_in, rq_arg, rq_arg->head[0].iov_base);
> >
> >         p = (__be32*)((char *)rqstp->rq_res.head[0].iov_base + rqstp->rq_res.head[0].iov_len);
> >         xdr_init_encode(&xdr_out, &rqstp->rq_res, p);
> > diff --git a/net/sunrpc/backchannel_rqst.c b/net/sunrpc/backchannel_rqst.c
> > index 6255d14..d92cee1 100644
> > --- a/net/sunrpc/backchannel_rqst.c
> > +++ b/net/sunrpc/backchannel_rqst.c
> > @@ -333,12 +333,20 @@ void xprt_complete_bc_request(struct rpc_rqst *req, uint32_t copied)
> >  {
> >         struct rpc_xprt *xprt = req->rq_xprt;
> >         struct svc_serv *bc_serv = xprt->bc_serv;
> > +       struct xdr_buf *rq_rcv_buf = &req->rq_rcv_buf;
> >
> >         spin_lock(&xprt->bc_pa_lock);
> >         list_del(&req->rq_bc_pa_list);
> >         xprt_dec_alloc_count(xprt, 1);
> >         spin_unlock(&xprt->bc_pa_lock);
> >
> > +       if (copied <= rq_rcv_buf->head[0].iov_len) {
> > +               rq_rcv_buf->head[0].iov_len = copied;
> > +               rq_rcv_buf->page_len = 0;
> > +       } else {
> > +               rq_rcv_buf->page_len = copied - rq_rcv_buf->head[0].iov_len;
> > +       }
> > +
>
> There is a problem with this approach: req->rq_rcv_buf is never reset,
> and so NFSv4 callbacks are now broken.
> The correct fix for this issue is to do the above adjustment in
> bc_svc_process(), so that it only applies to the resulting
> rqstp->rq_arg.

I assumed it was being reset, and my test didn't catch it.  Very sorry for
the trouble, Trond.  It makes much more sense to adjust the service's rqstp
after the copy.

Ben

> >         req->rq_private_buf.len = copied;
> >         set_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state);
> >
> > diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
> > index a8f579d..af9f987 100644
> > --- a/net/sunrpc/svc.c
> > +++ b/net/sunrpc/svc.c
> > @@ -1363,6 +1363,7 @@ bc_svc_process(struct svc_serv *serv, struct rpc_rqst *req,
> >         memcpy(&rqstp->rq_addr, &req->rq_xprt->addr, rqstp->rq_addrlen);
> >         memcpy(&rqstp->rq_arg, &req->rq_rcv_buf, sizeof(rqstp->rq_arg));
> >         memcpy(&rqstp->rq_res, &req->rq_snd_buf, sizeof(rqstp->rq_res));
> > +       rqstp->rq_arg.len = req->rq_private_buf.len;
>
> Cheers
>   Trond
>

^ permalink raw reply	[flat|nested] 7+ messages in thread

* [PATCH] SUNRPC: init xdr_stream for zero iov_len, page_len
  2015-12-08 14:20   ` Benjamin Coddington
@ 2015-12-09 15:56     ` Benjamin Coddington
  0 siblings, 0 replies; 7+ messages in thread
From: Benjamin Coddington @ 2015-12-09 15:56 UTC (permalink / raw
  To: Trond Myklebust
  Cc: J. Bruce Fields, Anna Schumaker, Jeff Layton,
	Linux NFS Mailing List

On Tue, 8 Dec 2015, Benjamin Coddington wrote:

> On Mon, 7 Dec 2015, Trond Myklebust wrote:
> 
> > Hi Ben,
> >
> > I've hit a few problems with this patch after it went upstream.
> >
> > On Fri, Nov 20, 2015 at 6:55 AM, Benjamin Coddington
> > <bcodding@redhat.com> wrote:
> > > A truncated cb_compound request will cause the client to decode null or
> > > data from a previous callback for nfs4.1 backchannel case, or uninitialized
> > > data for the nfs4.0 case. This is because the path through
> > > svc_process_common() advances the request's iov_base and decrements iov_len
> > > without adjusting the overall xdr_buf's len field.  That causes
> > > xdr_init_decode() to set up the xdr_stream with an incorrect length in
> > > nfs4_callback_compound().
> > >
> > > Fixing this for the nfs4.1 backchannel case first requires setting the
> > > correct iov_len and page_len based on the length of received data in the
> > > same manner as the nfs4.0 case.
> > >
> > > Then the request's xdr_buf length can be adjusted for both cases based upon
> > > the remaining iov_len and page_len.
> > >
> > > Signed-off-by: Benjamin Coddington <bcodding@redhat.com>
> > > ---
> > >  fs/nfs/callback_xdr.c         |    7 +++++--
> > >  net/sunrpc/backchannel_rqst.c |    8 ++++++++
> > >  net/sunrpc/svc.c              |    1 +
> > >  3 files changed, 14 insertions(+), 2 deletions(-)
> > >
> > > diff --git a/fs/nfs/callback_xdr.c b/fs/nfs/callback_xdr.c
> > > index 6b1697a..1c8213e 100644
> > > --- a/fs/nfs/callback_xdr.c
> > > +++ b/fs/nfs/callback_xdr.c
> > > @@ -76,7 +76,8 @@ static __be32 *read_buf(struct xdr_stream *xdr, int nbytes)
> > >
> > >         p = xdr_inline_decode(xdr, nbytes);
> > >         if (unlikely(p == NULL))
> > > -               printk(KERN_WARNING "NFS: NFSv4 callback reply buffer overflowed!\n");
> > > +               printk(KERN_WARNING "NFS: NFSv4 callback reply buffer overflowed "
> > > +                                                       "or truncated request.\n");
> > >         return p;
> > >  }
> > >
> > > @@ -892,6 +893,7 @@ static __be32 nfs4_callback_compound(struct svc_rqst *rqstp, void *argp, void *r
> > >         struct cb_compound_hdr_arg hdr_arg = { 0 };
> > >         struct cb_compound_hdr_res hdr_res = { NULL };
> > >         struct xdr_stream xdr_in, xdr_out;
> > > +       struct xdr_buf *rq_arg = &rqstp->rq_arg;
> > >         __be32 *p, status;
> > >         struct cb_process_state cps = {
> > >                 .drc_status = 0,
> > > @@ -903,7 +905,8 @@ static __be32 nfs4_callback_compound(struct svc_rqst *rqstp, void *argp, void *r
> > >
> > >         dprintk("%s: start\n", __func__);
> > >
> > > -       xdr_init_decode(&xdr_in, &rqstp->rq_arg, rqstp->rq_arg.head[0].iov_base);
> > > +       rq_arg->len = rq_arg->head[0].iov_len + rq_arg->page_len;
> >
> > This is redundant.
> 
> The decoding routines in svc_process_common() and svc_authenticate() are
> advancing the xdr_buf's iov_base and decremening iov_len without adjusting
> the overall xdr_buf.len field, so I believed this adjustment was necessary
> after they had moved things around.
> 
> I'm simulating this problem by sending CB_COMPOUND instead of CB_NULL when
> setting up the callback channel (since that is easy to do in pynfs).  In
> that scenario, just before xdr_init_decode() my xdr_buf.len is 80 (the size
> of the RPC headers), but the xdr_buf.kvec iov_len is 0 (all the headers have
> been decoded).  Then xdr_init_decode() then skips xdr_set_iov() so xdr->end
> is never initialized, and xdr->nwords ends up at 20.
> 
> Then, as we start decoding, we end up in __xdr_inline_decode(), which
> expects xdr->end to have a valid value if we haven't run out of nwords, or
> we start decoding a previous request or uninitialized data.  Usually, this
> is harmless, as the string decode overflows those 20 words, and NULL is
> returned in my test; however I believe it is possible to decode part of a
> previous request here..
> 
> With this adjustment, nwords ends up at 0, so __xdr_inline_decode() returns
> before using xdr->end.
> 
> Maybe the right fix is to adjust xdr_init_decode() to make sure xdr->end is
> always initialized based on iov_len.

8<----------------------------------------------------------------------------- 

An xdr_buf with head[0].iov_len = 0 and page_len = 0 will cause
xdr_init_decode() to incorrectly setup the xdr_stream.

Signed-off-by: Benjamin Coddington <bcodding@redhat.com>
---
 net/sunrpc/xdr.c |    2 ++
 1 files changed, 2 insertions(+), 0 deletions(-)

diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c
index 4439ac4..4f29e30 100644
--- a/net/sunrpc/xdr.c
+++ b/net/sunrpc/xdr.c
@@ -797,6 +797,8 @@ void xdr_init_decode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p)
 		xdr_set_iov(xdr, buf->head, buf->len);
 	else if (buf->page_len != 0)
 		xdr_set_page_base(xdr, 0, buf->len);
+	else
+		xdr_set_iov(xdr, buf->head, buf->len);
 	if (p != NULL && p > xdr->p && xdr->end >= p) {
 		xdr->nwords -= p - xdr->p;
 		xdr->p = p;
-- 
1.7.1


^ permalink raw reply related	[flat|nested] 7+ messages in thread

end of thread, other threads:[~2015-12-09 15:56 UTC | newest]

Thread overview: 7+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2015-11-20 14:55 [PATCH] nfs4: limit callback decoding to received bytes Benjamin Coddington
2015-11-20 15:18 ` Chuck Lever
2015-11-20 16:20   ` Benjamin Coddington
2015-11-20 17:24     ` Chuck Lever
2015-12-07 19:52 ` Trond Myklebust
2015-12-08 14:20   ` Benjamin Coddington
2015-12-09 15:56     ` [PATCH] SUNRPC: init xdr_stream for zero iov_len, page_len Benjamin Coddington

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.