|
| 1 | +/****************************************************************************** |
| 2 | +
|
| 3 | +(c) 2007 Network Appliance, Inc. All Rights Reserved. |
| 4 | +(c) 2009 NetApp. All Rights Reserved. |
| 5 | +
|
| 6 | +NetApp provides this source code under the GPL v2 License. |
| 7 | +The GPL v2 license is available at |
| 8 | +http://opensource.org/licenses/gpl-license.php. |
| 9 | +
|
| 10 | +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
| 11 | +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
| 12 | +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
| 13 | +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR |
| 14 | +CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, |
| 15 | +EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, |
| 16 | +PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR |
| 17 | +PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF |
| 18 | +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING |
| 19 | +NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS |
| 20 | +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| 21 | +
|
| 22 | +******************************************************************************/ |
| 23 | + |
| 24 | +#include <linux/tcp.h> |
| 25 | +#include <linux/sunrpc/xprt.h> |
| 26 | + |
| 27 | +#ifdef RPC_DEBUG |
| 28 | +#define RPCDBG_FACILITY RPCDBG_TRANS |
| 29 | +#endif |
| 30 | + |
| 31 | +#if defined(CONFIG_NFS_V4_1) |
| 32 | + |
| 33 | +/* |
| 34 | + * Helper routines that track the number of preallocation elements |
| 35 | + * on the transport. |
| 36 | + */ |
| 37 | +static inline int xprt_need_to_requeue(struct rpc_xprt *xprt) |
| 38 | +{ |
| 39 | + return xprt->bc_alloc_count > 0; |
| 40 | +} |
| 41 | + |
| 42 | +static inline void xprt_inc_alloc_count(struct rpc_xprt *xprt, unsigned int n) |
| 43 | +{ |
| 44 | + xprt->bc_alloc_count += n; |
| 45 | +} |
| 46 | + |
| 47 | +static inline int xprt_dec_alloc_count(struct rpc_xprt *xprt, unsigned int n) |
| 48 | +{ |
| 49 | + return xprt->bc_alloc_count -= n; |
| 50 | +} |
| 51 | + |
| 52 | +/* |
| 53 | + * Free the preallocated rpc_rqst structure and the memory |
| 54 | + * buffers hanging off of it. |
| 55 | + */ |
| 56 | +static void xprt_free_allocation(struct rpc_rqst *req) |
| 57 | +{ |
| 58 | + struct xdr_buf *xbufp; |
| 59 | + |
| 60 | + dprintk("RPC: free allocations for req= %p\n", req); |
| 61 | + BUG_ON(test_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state)); |
| 62 | + xbufp = &req->rq_private_buf; |
| 63 | + free_page((unsigned long)xbufp->head[0].iov_base); |
| 64 | + xbufp = &req->rq_snd_buf; |
| 65 | + free_page((unsigned long)xbufp->head[0].iov_base); |
| 66 | + list_del(&req->rq_bc_pa_list); |
| 67 | + kfree(req); |
| 68 | +} |
| 69 | + |
| 70 | +/* |
| 71 | + * Preallocate up to min_reqs structures and related buffers for use |
| 72 | + * by the backchannel. This function can be called multiple times |
| 73 | + * when creating new sessions that use the same rpc_xprt. The |
| 74 | + * preallocated buffers are added to the pool of resources used by |
| 75 | + * the rpc_xprt. Anyone of these resources may be used used by an |
| 76 | + * incoming callback request. It's up to the higher levels in the |
| 77 | + * stack to enforce that the maximum number of session slots is not |
| 78 | + * being exceeded. |
| 79 | + * |
| 80 | + * Some callback arguments can be large. For example, a pNFS server |
| 81 | + * using multiple deviceids. The list can be unbound, but the client |
| 82 | + * has the ability to tell the server the maximum size of the callback |
| 83 | + * requests. Each deviceID is 16 bytes, so allocate one page |
| 84 | + * for the arguments to have enough room to receive a number of these |
| 85 | + * deviceIDs. The NFS client indicates to the pNFS server that its |
| 86 | + * callback requests can be up to 4096 bytes in size. |
| 87 | + */ |
| 88 | +int xprt_setup_backchannel(struct rpc_xprt *xprt, unsigned int min_reqs) |
| 89 | +{ |
| 90 | + struct page *page_rcv = NULL, *page_snd = NULL; |
| 91 | + struct xdr_buf *xbufp = NULL; |
| 92 | + struct rpc_rqst *req, *tmp; |
| 93 | + struct list_head tmp_list; |
| 94 | + int i; |
| 95 | + |
| 96 | + dprintk("RPC: setup backchannel transport\n"); |
| 97 | + |
| 98 | + /* |
| 99 | + * We use a temporary list to keep track of the preallocated |
| 100 | + * buffers. Once we're done building the list we splice it |
| 101 | + * into the backchannel preallocation list off of the rpc_xprt |
| 102 | + * struct. This helps minimize the amount of time the list |
| 103 | + * lock is held on the rpc_xprt struct. It also makes cleanup |
| 104 | + * easier in case of memory allocation errors. |
| 105 | + */ |
| 106 | + INIT_LIST_HEAD(&tmp_list); |
| 107 | + for (i = 0; i < min_reqs; i++) { |
| 108 | + /* Pre-allocate one backchannel rpc_rqst */ |
| 109 | + req = kzalloc(sizeof(struct rpc_rqst), GFP_KERNEL); |
| 110 | + if (req == NULL) { |
| 111 | + printk(KERN_ERR "Failed to create bc rpc_rqst\n"); |
| 112 | + goto out_free; |
| 113 | + } |
| 114 | + |
| 115 | + /* Add the allocated buffer to the tmp list */ |
| 116 | + dprintk("RPC: adding req= %p\n", req); |
| 117 | + list_add(&req->rq_bc_pa_list, &tmp_list); |
| 118 | + |
| 119 | + req->rq_xprt = xprt; |
| 120 | + INIT_LIST_HEAD(&req->rq_list); |
| 121 | + INIT_LIST_HEAD(&req->rq_bc_list); |
| 122 | + |
| 123 | + /* Preallocate one XDR receive buffer */ |
| 124 | + page_rcv = alloc_page(GFP_KERNEL); |
| 125 | + if (page_rcv == NULL) { |
| 126 | + printk(KERN_ERR "Failed to create bc receive xbuf\n"); |
| 127 | + goto out_free; |
| 128 | + } |
| 129 | + xbufp = &req->rq_rcv_buf; |
| 130 | + xbufp->head[0].iov_base = page_address(page_rcv); |
| 131 | + xbufp->head[0].iov_len = PAGE_SIZE; |
| 132 | + xbufp->tail[0].iov_base = NULL; |
| 133 | + xbufp->tail[0].iov_len = 0; |
| 134 | + xbufp->page_len = 0; |
| 135 | + xbufp->len = PAGE_SIZE; |
| 136 | + xbufp->buflen = PAGE_SIZE; |
| 137 | + |
| 138 | + /* Preallocate one XDR send buffer */ |
| 139 | + page_snd = alloc_page(GFP_KERNEL); |
| 140 | + if (page_snd == NULL) { |
| 141 | + printk(KERN_ERR "Failed to create bc snd xbuf\n"); |
| 142 | + goto out_free; |
| 143 | + } |
| 144 | + |
| 145 | + xbufp = &req->rq_snd_buf; |
| 146 | + xbufp->head[0].iov_base = page_address(page_snd); |
| 147 | + xbufp->head[0].iov_len = 0; |
| 148 | + xbufp->tail[0].iov_base = NULL; |
| 149 | + xbufp->tail[0].iov_len = 0; |
| 150 | + xbufp->page_len = 0; |
| 151 | + xbufp->len = 0; |
| 152 | + xbufp->buflen = PAGE_SIZE; |
| 153 | + } |
| 154 | + |
| 155 | + /* |
| 156 | + * Add the temporary list to the backchannel preallocation list |
| 157 | + */ |
| 158 | + spin_lock_bh(&xprt->bc_pa_lock); |
| 159 | + list_splice(&tmp_list, &xprt->bc_pa_list); |
| 160 | + xprt_inc_alloc_count(xprt, min_reqs); |
| 161 | + spin_unlock_bh(&xprt->bc_pa_lock); |
| 162 | + |
| 163 | + dprintk("RPC: setup backchannel transport done\n"); |
| 164 | + return 0; |
| 165 | + |
| 166 | +out_free: |
| 167 | + /* |
| 168 | + * Memory allocation failed, free the temporary list |
| 169 | + */ |
| 170 | + list_for_each_entry_safe(req, tmp, &tmp_list, rq_bc_pa_list) |
| 171 | + xprt_free_allocation(req); |
| 172 | + |
| 173 | + dprintk("RPC: setup backchannel transport failed\n"); |
| 174 | + return -1; |
| 175 | +} |
| 176 | +EXPORT_SYMBOL(xprt_setup_backchannel); |
| 177 | + |
| 178 | +/* |
| 179 | + * Destroys the backchannel preallocated structures. |
| 180 | + * Since these structures may have been allocated by multiple calls |
| 181 | + * to xprt_setup_backchannel, we only destroy up to the maximum number |
| 182 | + * of reqs specified by the caller. |
| 183 | + * @xprt: the transport holding the preallocated strucures |
| 184 | + * @max_reqs the maximum number of preallocated structures to destroy |
| 185 | + */ |
| 186 | +void xprt_destroy_backchannel(struct rpc_xprt *xprt, unsigned int max_reqs) |
| 187 | +{ |
| 188 | + struct rpc_rqst *req = NULL, *tmp = NULL; |
| 189 | + |
| 190 | + dprintk("RPC: destroy backchannel transport\n"); |
| 191 | + |
| 192 | + BUG_ON(max_reqs == 0); |
| 193 | + spin_lock_bh(&xprt->bc_pa_lock); |
| 194 | + xprt_dec_alloc_count(xprt, max_reqs); |
| 195 | + list_for_each_entry_safe(req, tmp, &xprt->bc_pa_list, rq_bc_pa_list) { |
| 196 | + dprintk("RPC: req=%p\n", req); |
| 197 | + xprt_free_allocation(req); |
| 198 | + if (--max_reqs == 0) |
| 199 | + break; |
| 200 | + } |
| 201 | + spin_unlock_bh(&xprt->bc_pa_lock); |
| 202 | + |
| 203 | + dprintk("RPC: backchannel list empty= %s\n", |
| 204 | + list_empty(&xprt->bc_pa_list) ? "true" : "false"); |
| 205 | +} |
| 206 | +EXPORT_SYMBOL(xprt_destroy_backchannel); |
| 207 | + |
| 208 | +/* |
| 209 | + * One or more rpc_rqst structure have been preallocated during the |
| 210 | + * backchannel setup. Buffer space for the send and private XDR buffers |
| 211 | + * has been preallocated as well. Use xprt_alloc_bc_request to allocate |
| 212 | + * to this request. Use xprt_free_bc_request to return it. |
| 213 | + * |
| 214 | + * Return an available rpc_rqst, otherwise NULL if non are available. |
| 215 | + */ |
| 216 | +struct rpc_rqst *xprt_alloc_bc_request(struct rpc_xprt *xprt) |
| 217 | +{ |
| 218 | + struct rpc_rqst *req; |
| 219 | + |
| 220 | + dprintk("RPC: allocate a backchannel request\n"); |
| 221 | + spin_lock_bh(&xprt->bc_pa_lock); |
| 222 | + if (!list_empty(&xprt->bc_pa_list)) { |
| 223 | + req = list_first_entry(&xprt->bc_pa_list, struct rpc_rqst, |
| 224 | + rq_bc_pa_list); |
| 225 | + list_del(&req->rq_bc_pa_list); |
| 226 | + } else { |
| 227 | + req = NULL; |
| 228 | + } |
| 229 | + spin_unlock_bh(&xprt->bc_pa_lock); |
| 230 | + |
| 231 | + if (req != NULL) { |
| 232 | + set_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state); |
| 233 | + req->rq_received = 0; |
| 234 | + req->rq_bytes_sent = 0; |
| 235 | + memcpy(&req->rq_private_buf, &req->rq_rcv_buf, |
| 236 | + sizeof(req->rq_private_buf)); |
| 237 | + } |
| 238 | + dprintk("RPC: backchannel req=%p\n", req); |
| 239 | + return req; |
| 240 | +} |
| 241 | + |
| 242 | +/* |
| 243 | + * Return the preallocated rpc_rqst structure and XDR buffers |
| 244 | + * associated with this rpc_task. |
| 245 | + */ |
| 246 | +void xprt_free_bc_request(struct rpc_rqst *req) |
| 247 | +{ |
| 248 | + struct rpc_xprt *xprt = req->rq_xprt; |
| 249 | + |
| 250 | + dprintk("RPC: free backchannel req=%p\n", req); |
| 251 | + |
| 252 | + smp_mb__before_clear_bit(); |
| 253 | + BUG_ON(!test_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state)); |
| 254 | + clear_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state); |
| 255 | + smp_mb__after_clear_bit(); |
| 256 | + |
| 257 | + if (!xprt_need_to_requeue(xprt)) { |
| 258 | + /* |
| 259 | + * The last remaining session was destroyed while this |
| 260 | + * entry was in use. Free the entry and don't attempt |
| 261 | + * to add back to the list because there is no need to |
| 262 | + * have anymore preallocated entries. |
| 263 | + */ |
| 264 | + dprintk("RPC: Last session removed req=%p\n", req); |
| 265 | + xprt_free_allocation(req); |
| 266 | + return; |
| 267 | + } |
| 268 | + |
| 269 | + /* |
| 270 | + * Return it to the list of preallocations so that it |
| 271 | + * may be reused by a new callback request. |
| 272 | + */ |
| 273 | + spin_lock_bh(&xprt->bc_pa_lock); |
| 274 | + list_add(&req->rq_bc_pa_list, &xprt->bc_pa_list); |
| 275 | + spin_unlock_bh(&xprt->bc_pa_lock); |
| 276 | +} |
| 277 | + |
| 278 | +#endif /* CONFIG_NFS_V4_1 */ |
0 commit comments