trans_rdma.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784
  1. /*
  2. * linux/fs/9p/trans_rdma.c
  3. *
  4. * RDMA transport layer based on the trans_fd.c implementation.
  5. *
  6. * Copyright (C) 2008 by Tom Tucker <tom@opengridcomputing.com>
  7. * Copyright (C) 2006 by Russ Cox <rsc@swtch.com>
  8. * Copyright (C) 2004-2005 by Latchesar Ionkov <lucho@ionkov.net>
  9. * Copyright (C) 2004-2008 by Eric Van Hensbergen <ericvh@gmail.com>
  10. * Copyright (C) 1997-2002 by Ron Minnich <rminnich@sarnoff.com>
  11. *
  12. * This program is free software; you can redistribute it and/or modify
  13. * it under the terms of the GNU General Public License version 2
  14. * as published by the Free Software Foundation.
  15. *
  16. * This program is distributed in the hope that it will be useful,
  17. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  18. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  19. * GNU General Public License for more details.
  20. *
  21. * You should have received a copy of the GNU General Public License
  22. * along with this program; if not, write to:
  23. * Free Software Foundation
  24. * 51 Franklin Street, Fifth Floor
  25. * Boston, MA 02111-1301 USA
  26. *
  27. */
  28. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  29. #include <linux/in.h>
  30. #include <linux/module.h>
  31. #include <linux/net.h>
  32. #include <linux/ipv6.h>
  33. #include <linux/kthread.h>
  34. #include <linux/errno.h>
  35. #include <linux/kernel.h>
  36. #include <linux/un.h>
  37. #include <linux/uaccess.h>
  38. #include <linux/inet.h>
  39. #include <linux/idr.h>
  40. #include <linux/file.h>
  41. #include <linux/parser.h>
  42. #include <linux/semaphore.h>
  43. #include <linux/slab.h>
  44. #include <net/9p/9p.h>
  45. #include <net/9p/client.h>
  46. #include <net/9p/transport.h>
  47. #include <rdma/ib_verbs.h>
  48. #include <rdma/rdma_cm.h>
  49. #define P9_PORT 5640
  50. #define P9_RDMA_SQ_DEPTH 32
  51. #define P9_RDMA_RQ_DEPTH 32
  52. #define P9_RDMA_SEND_SGE 4
  53. #define P9_RDMA_RECV_SGE 4
  54. #define P9_RDMA_IRD 0
  55. #define P9_RDMA_ORD 0
  56. #define P9_RDMA_TIMEOUT 30000 /* 30 seconds */
  57. #define P9_RDMA_MAXSIZE (1024*1024) /* 1MB */
  58. /**
  59. * struct p9_trans_rdma - RDMA transport instance
  60. *
  61. * @state: tracks the transport state machine for connection setup and tear down
  62. * @cm_id: The RDMA CM ID
  63. * @pd: Protection Domain pointer
  64. * @qp: Queue Pair pointer
  65. * @cq: Completion Queue pointer
  66. * @dm_mr: DMA Memory Region pointer
  67. * @lkey: The local access only memory region key
  68. * @timeout: Number of uSecs to wait for connection management events
  69. * @sq_depth: The depth of the Send Queue
  70. * @sq_sem: Semaphore for the SQ
  71. * @rq_depth: The depth of the Receive Queue.
  72. * @rq_sem: Semaphore for the RQ
  73. * @excess_rc : Amount of posted Receive Contexts without a pending request.
  74. * See rdma_request()
  75. * @addr: The remote peer's address
  76. * @req_lock: Protects the active request list
  77. * @cm_done: Completion event for connection management tracking
  78. */
  79. struct p9_trans_rdma {
  80. enum {
  81. P9_RDMA_INIT,
  82. P9_RDMA_ADDR_RESOLVED,
  83. P9_RDMA_ROUTE_RESOLVED,
  84. P9_RDMA_CONNECTED,
  85. P9_RDMA_FLUSHING,
  86. P9_RDMA_CLOSING,
  87. P9_RDMA_CLOSED,
  88. } state;
  89. struct rdma_cm_id *cm_id;
  90. struct ib_pd *pd;
  91. struct ib_qp *qp;
  92. struct ib_cq *cq;
  93. long timeout;
  94. int sq_depth;
  95. struct semaphore sq_sem;
  96. int rq_depth;
  97. struct semaphore rq_sem;
  98. atomic_t excess_rc;
  99. struct sockaddr_in addr;
  100. spinlock_t req_lock;
  101. struct completion cm_done;
  102. };
  103. /**
  104. * p9_rdma_context - Keeps track of in-process WR
  105. *
  106. * @wc_op: The original WR op for when the CQE completes in error.
  107. * @busa: Bus address to unmap when the WR completes
  108. * @req: Keeps track of requests (send)
  109. * @rc: Keepts track of replies (receive)
  110. */
  111. struct p9_rdma_req;
  112. struct p9_rdma_context {
  113. enum ib_wc_opcode wc_op;
  114. dma_addr_t busa;
  115. union {
  116. struct p9_req_t *req;
  117. struct p9_fcall *rc;
  118. };
  119. };
  120. /**
  121. * p9_rdma_opts - Collection of mount options
  122. * @port: port of connection
  123. * @sq_depth: The requested depth of the SQ. This really doesn't need
  124. * to be any deeper than the number of threads used in the client
  125. * @rq_depth: The depth of the RQ. Should be greater than or equal to SQ depth
  126. * @timeout: Time to wait in msecs for CM events
  127. */
  128. struct p9_rdma_opts {
  129. short port;
  130. int sq_depth;
  131. int rq_depth;
  132. long timeout;
  133. int privport;
  134. };
  135. /*
  136. * Option Parsing (code inspired by NFS code)
  137. */
  138. enum {
  139. /* Options that take integer arguments */
  140. Opt_port, Opt_rq_depth, Opt_sq_depth, Opt_timeout,
  141. /* Options that take no argument */
  142. Opt_privport,
  143. Opt_err,
  144. };
  145. static match_table_t tokens = {
  146. {Opt_port, "port=%u"},
  147. {Opt_sq_depth, "sq=%u"},
  148. {Opt_rq_depth, "rq=%u"},
  149. {Opt_timeout, "timeout=%u"},
  150. {Opt_privport, "privport"},
  151. {Opt_err, NULL},
  152. };
  153. /**
  154. * parse_opts - parse mount options into rdma options structure
  155. * @params: options string passed from mount
  156. * @opts: rdma transport-specific structure to parse options into
  157. *
  158. * Returns 0 upon success, -ERRNO upon failure
  159. */
  160. static int parse_opts(char *params, struct p9_rdma_opts *opts)
  161. {
  162. char *p;
  163. substring_t args[MAX_OPT_ARGS];
  164. int option;
  165. char *options, *tmp_options;
  166. opts->port = P9_PORT;
  167. opts->sq_depth = P9_RDMA_SQ_DEPTH;
  168. opts->rq_depth = P9_RDMA_RQ_DEPTH;
  169. opts->timeout = P9_RDMA_TIMEOUT;
  170. opts->privport = 0;
  171. if (!params)
  172. return 0;
  173. tmp_options = kstrdup(params, GFP_KERNEL);
  174. if (!tmp_options) {
  175. p9_debug(P9_DEBUG_ERROR,
  176. "failed to allocate copy of option string\n");
  177. return -ENOMEM;
  178. }
  179. options = tmp_options;
  180. while ((p = strsep(&options, ",")) != NULL) {
  181. int token;
  182. int r;
  183. if (!*p)
  184. continue;
  185. token = match_token(p, tokens, args);
  186. if ((token != Opt_err) && (token != Opt_privport)) {
  187. r = match_int(&args[0], &option);
  188. if (r < 0) {
  189. p9_debug(P9_DEBUG_ERROR,
  190. "integer field, but no integer?\n");
  191. continue;
  192. }
  193. }
  194. switch (token) {
  195. case Opt_port:
  196. opts->port = option;
  197. break;
  198. case Opt_sq_depth:
  199. opts->sq_depth = option;
  200. break;
  201. case Opt_rq_depth:
  202. opts->rq_depth = option;
  203. break;
  204. case Opt_timeout:
  205. opts->timeout = option;
  206. break;
  207. case Opt_privport:
  208. opts->privport = 1;
  209. break;
  210. default:
  211. continue;
  212. }
  213. }
  214. /* RQ must be at least as large as the SQ */
  215. opts->rq_depth = max(opts->rq_depth, opts->sq_depth);
  216. kfree(tmp_options);
  217. return 0;
  218. }
  219. static int
  220. p9_cm_event_handler(struct rdma_cm_id *id, struct rdma_cm_event *event)
  221. {
  222. struct p9_client *c = id->context;
  223. struct p9_trans_rdma *rdma = c->trans;
  224. switch (event->event) {
  225. case RDMA_CM_EVENT_ADDR_RESOLVED:
  226. BUG_ON(rdma->state != P9_RDMA_INIT);
  227. rdma->state = P9_RDMA_ADDR_RESOLVED;
  228. break;
  229. case RDMA_CM_EVENT_ROUTE_RESOLVED:
  230. BUG_ON(rdma->state != P9_RDMA_ADDR_RESOLVED);
  231. rdma->state = P9_RDMA_ROUTE_RESOLVED;
  232. break;
  233. case RDMA_CM_EVENT_ESTABLISHED:
  234. BUG_ON(rdma->state != P9_RDMA_ROUTE_RESOLVED);
  235. rdma->state = P9_RDMA_CONNECTED;
  236. break;
  237. case RDMA_CM_EVENT_DISCONNECTED:
  238. if (rdma)
  239. rdma->state = P9_RDMA_CLOSED;
  240. if (c)
  241. c->status = Disconnected;
  242. break;
  243. case RDMA_CM_EVENT_TIMEWAIT_EXIT:
  244. break;
  245. case RDMA_CM_EVENT_ADDR_CHANGE:
  246. case RDMA_CM_EVENT_ROUTE_ERROR:
  247. case RDMA_CM_EVENT_DEVICE_REMOVAL:
  248. case RDMA_CM_EVENT_MULTICAST_JOIN:
  249. case RDMA_CM_EVENT_MULTICAST_ERROR:
  250. case RDMA_CM_EVENT_REJECTED:
  251. case RDMA_CM_EVENT_CONNECT_REQUEST:
  252. case RDMA_CM_EVENT_CONNECT_RESPONSE:
  253. case RDMA_CM_EVENT_CONNECT_ERROR:
  254. case RDMA_CM_EVENT_ADDR_ERROR:
  255. case RDMA_CM_EVENT_UNREACHABLE:
  256. c->status = Disconnected;
  257. rdma_disconnect(rdma->cm_id);
  258. break;
  259. default:
  260. BUG();
  261. }
  262. complete(&rdma->cm_done);
  263. return 0;
  264. }
  265. static void
  266. handle_recv(struct p9_client *client, struct p9_trans_rdma *rdma,
  267. struct p9_rdma_context *c, enum ib_wc_status status, u32 byte_len)
  268. {
  269. struct p9_req_t *req;
  270. int err = 0;
  271. int16_t tag;
  272. req = NULL;
  273. ib_dma_unmap_single(rdma->cm_id->device, c->busa, client->msize,
  274. DMA_FROM_DEVICE);
  275. if (status != IB_WC_SUCCESS)
  276. goto err_out;
  277. err = p9_parse_header(c->rc, NULL, NULL, &tag, 1);
  278. if (err)
  279. goto err_out;
  280. req = p9_tag_lookup(client, tag);
  281. if (!req)
  282. goto err_out;
  283. /* Check that we have not yet received a reply for this request.
  284. */
  285. if (unlikely(req->rc)) {
  286. pr_err("Duplicate reply for request %d", tag);
  287. goto err_out;
  288. }
  289. req->rc = c->rc;
  290. p9_client_cb(client, req, REQ_STATUS_RCVD);
  291. return;
  292. err_out:
  293. p9_debug(P9_DEBUG_ERROR, "req %p err %d status %d\n", req, err, status);
  294. rdma->state = P9_RDMA_FLUSHING;
  295. client->status = Disconnected;
  296. }
  297. static void
  298. handle_send(struct p9_client *client, struct p9_trans_rdma *rdma,
  299. struct p9_rdma_context *c, enum ib_wc_status status, u32 byte_len)
  300. {
  301. ib_dma_unmap_single(rdma->cm_id->device,
  302. c->busa, c->req->tc->size,
  303. DMA_TO_DEVICE);
  304. }
  305. static void qp_event_handler(struct ib_event *event, void *context)
  306. {
  307. p9_debug(P9_DEBUG_ERROR, "QP event %d context %p\n",
  308. event->event, context);
  309. }
  310. static void cq_comp_handler(struct ib_cq *cq, void *cq_context)
  311. {
  312. struct p9_client *client = cq_context;
  313. struct p9_trans_rdma *rdma = client->trans;
  314. int ret;
  315. struct ib_wc wc;
  316. ib_req_notify_cq(rdma->cq, IB_CQ_NEXT_COMP);
  317. while ((ret = ib_poll_cq(cq, 1, &wc)) > 0) {
  318. struct p9_rdma_context *c = (void *) (unsigned long) wc.wr_id;
  319. switch (c->wc_op) {
  320. case IB_WC_RECV:
  321. handle_recv(client, rdma, c, wc.status, wc.byte_len);
  322. up(&rdma->rq_sem);
  323. break;
  324. case IB_WC_SEND:
  325. handle_send(client, rdma, c, wc.status, wc.byte_len);
  326. up(&rdma->sq_sem);
  327. break;
  328. default:
  329. pr_err("unexpected completion type, c->wc_op=%d, wc.opcode=%d, status=%d\n",
  330. c->wc_op, wc.opcode, wc.status);
  331. break;
  332. }
  333. kfree(c);
  334. }
  335. }
  336. static void cq_event_handler(struct ib_event *e, void *v)
  337. {
  338. p9_debug(P9_DEBUG_ERROR, "CQ event %d context %p\n", e->event, v);
  339. }
  340. static void rdma_destroy_trans(struct p9_trans_rdma *rdma)
  341. {
  342. if (!rdma)
  343. return;
  344. if (rdma->qp && !IS_ERR(rdma->qp))
  345. ib_destroy_qp(rdma->qp);
  346. if (rdma->pd && !IS_ERR(rdma->pd))
  347. ib_dealloc_pd(rdma->pd);
  348. if (rdma->cq && !IS_ERR(rdma->cq))
  349. ib_destroy_cq(rdma->cq);
  350. if (rdma->cm_id && !IS_ERR(rdma->cm_id))
  351. rdma_destroy_id(rdma->cm_id);
  352. kfree(rdma);
  353. }
  354. static int
  355. post_recv(struct p9_client *client, struct p9_rdma_context *c)
  356. {
  357. struct p9_trans_rdma *rdma = client->trans;
  358. struct ib_recv_wr wr, *bad_wr;
  359. struct ib_sge sge;
  360. c->busa = ib_dma_map_single(rdma->cm_id->device,
  361. c->rc->sdata, client->msize,
  362. DMA_FROM_DEVICE);
  363. if (ib_dma_mapping_error(rdma->cm_id->device, c->busa))
  364. goto error;
  365. sge.addr = c->busa;
  366. sge.length = client->msize;
  367. sge.lkey = rdma->pd->local_dma_lkey;
  368. wr.next = NULL;
  369. c->wc_op = IB_WC_RECV;
  370. wr.wr_id = (unsigned long) c;
  371. wr.sg_list = &sge;
  372. wr.num_sge = 1;
  373. return ib_post_recv(rdma->qp, &wr, &bad_wr);
  374. error:
  375. p9_debug(P9_DEBUG_ERROR, "EIO\n");
  376. return -EIO;
  377. }
  378. static int rdma_request(struct p9_client *client, struct p9_req_t *req)
  379. {
  380. struct p9_trans_rdma *rdma = client->trans;
  381. struct ib_send_wr wr, *bad_wr;
  382. struct ib_sge sge;
  383. int err = 0;
  384. unsigned long flags;
  385. struct p9_rdma_context *c = NULL;
  386. struct p9_rdma_context *rpl_context = NULL;
  387. /* When an error occurs between posting the recv and the send,
  388. * there will be a receive context posted without a pending request.
  389. * Since there is no way to "un-post" it, we remember it and skip
  390. * post_recv() for the next request.
  391. * So here,
  392. * see if we are this `next request' and need to absorb an excess rc.
  393. * If yes, then drop and free our own, and do not recv_post().
  394. **/
  395. if (unlikely(atomic_read(&rdma->excess_rc) > 0)) {
  396. if ((atomic_sub_return(1, &rdma->excess_rc) >= 0)) {
  397. /* Got one ! */
  398. kfree(req->rc);
  399. req->rc = NULL;
  400. goto dont_need_post_recv;
  401. } else {
  402. /* We raced and lost. */
  403. atomic_inc(&rdma->excess_rc);
  404. }
  405. }
  406. /* Allocate an fcall for the reply */
  407. rpl_context = kmalloc(sizeof *rpl_context, GFP_NOFS);
  408. if (!rpl_context) {
  409. err = -ENOMEM;
  410. goto recv_error;
  411. }
  412. rpl_context->rc = req->rc;
  413. /*
  414. * Post a receive buffer for this request. We need to ensure
  415. * there is a reply buffer available for every outstanding
  416. * request. A flushed request can result in no reply for an
  417. * outstanding request, so we must keep a count to avoid
  418. * overflowing the RQ.
  419. */
  420. if (down_interruptible(&rdma->rq_sem)) {
  421. err = -EINTR;
  422. goto recv_error;
  423. }
  424. err = post_recv(client, rpl_context);
  425. if (err) {
  426. p9_debug(P9_DEBUG_FCALL, "POST RECV failed\n");
  427. goto recv_error;
  428. }
  429. /* remove posted receive buffer from request structure */
  430. req->rc = NULL;
  431. dont_need_post_recv:
  432. /* Post the request */
  433. c = kmalloc(sizeof *c, GFP_NOFS);
  434. if (!c) {
  435. err = -ENOMEM;
  436. goto send_error;
  437. }
  438. c->req = req;
  439. c->busa = ib_dma_map_single(rdma->cm_id->device,
  440. c->req->tc->sdata, c->req->tc->size,
  441. DMA_TO_DEVICE);
  442. if (ib_dma_mapping_error(rdma->cm_id->device, c->busa)) {
  443. err = -EIO;
  444. goto send_error;
  445. }
  446. sge.addr = c->busa;
  447. sge.length = c->req->tc->size;
  448. sge.lkey = rdma->pd->local_dma_lkey;
  449. wr.next = NULL;
  450. c->wc_op = IB_WC_SEND;
  451. wr.wr_id = (unsigned long) c;
  452. wr.opcode = IB_WR_SEND;
  453. wr.send_flags = IB_SEND_SIGNALED;
  454. wr.sg_list = &sge;
  455. wr.num_sge = 1;
  456. if (down_interruptible(&rdma->sq_sem)) {
  457. err = -EINTR;
  458. goto send_error;
  459. }
  460. /* Mark request as `sent' *before* we actually send it,
  461. * because doing if after could erase the REQ_STATUS_RCVD
  462. * status in case of a very fast reply.
  463. */
  464. req->status = REQ_STATUS_SENT;
  465. err = ib_post_send(rdma->qp, &wr, &bad_wr);
  466. if (err)
  467. goto send_error;
  468. /* Success */
  469. return 0;
  470. /* Handle errors that happened during or while preparing the send: */
  471. send_error:
  472. req->status = REQ_STATUS_ERROR;
  473. kfree(c);
  474. p9_debug(P9_DEBUG_ERROR, "Error %d in rdma_request()\n", err);
  475. /* Ach.
  476. * We did recv_post(), but not send. We have one recv_post in excess.
  477. */
  478. atomic_inc(&rdma->excess_rc);
  479. return err;
  480. /* Handle errors that happened during or while preparing post_recv(): */
  481. recv_error:
  482. kfree(rpl_context);
  483. spin_lock_irqsave(&rdma->req_lock, flags);
  484. if (rdma->state < P9_RDMA_CLOSING) {
  485. rdma->state = P9_RDMA_CLOSING;
  486. spin_unlock_irqrestore(&rdma->req_lock, flags);
  487. rdma_disconnect(rdma->cm_id);
  488. } else
  489. spin_unlock_irqrestore(&rdma->req_lock, flags);
  490. return err;
  491. }
  492. static void rdma_close(struct p9_client *client)
  493. {
  494. struct p9_trans_rdma *rdma;
  495. if (!client)
  496. return;
  497. rdma = client->trans;
  498. if (!rdma)
  499. return;
  500. client->status = Disconnected;
  501. rdma_disconnect(rdma->cm_id);
  502. rdma_destroy_trans(rdma);
  503. }
  504. /**
  505. * alloc_rdma - Allocate and initialize the rdma transport structure
  506. * @opts: Mount options structure
  507. */
  508. static struct p9_trans_rdma *alloc_rdma(struct p9_rdma_opts *opts)
  509. {
  510. struct p9_trans_rdma *rdma;
  511. rdma = kzalloc(sizeof(struct p9_trans_rdma), GFP_KERNEL);
  512. if (!rdma)
  513. return NULL;
  514. rdma->sq_depth = opts->sq_depth;
  515. rdma->rq_depth = opts->rq_depth;
  516. rdma->timeout = opts->timeout;
  517. spin_lock_init(&rdma->req_lock);
  518. init_completion(&rdma->cm_done);
  519. sema_init(&rdma->sq_sem, rdma->sq_depth);
  520. sema_init(&rdma->rq_sem, rdma->rq_depth);
  521. atomic_set(&rdma->excess_rc, 0);
  522. return rdma;
  523. }
  524. static int rdma_cancel(struct p9_client *client, struct p9_req_t *req)
  525. {
  526. /* Nothing to do here.
  527. * We will take care of it (if we have to) in rdma_cancelled()
  528. */
  529. return 1;
  530. }
  531. /* A request has been fully flushed without a reply.
  532. * That means we have posted one buffer in excess.
  533. */
  534. static int rdma_cancelled(struct p9_client *client, struct p9_req_t *req)
  535. {
  536. struct p9_trans_rdma *rdma = client->trans;
  537. atomic_inc(&rdma->excess_rc);
  538. return 0;
  539. }
  540. static int p9_rdma_bind_privport(struct p9_trans_rdma *rdma)
  541. {
  542. struct sockaddr_in cl = {
  543. .sin_family = AF_INET,
  544. .sin_addr.s_addr = htonl(INADDR_ANY),
  545. };
  546. int port, err = -EINVAL;
  547. for (port = P9_DEF_MAX_RESVPORT; port >= P9_DEF_MIN_RESVPORT; port--) {
  548. cl.sin_port = htons((ushort)port);
  549. err = rdma_bind_addr(rdma->cm_id, (struct sockaddr *)&cl);
  550. if (err != -EADDRINUSE)
  551. break;
  552. }
  553. return err;
  554. }
  555. /**
  556. * trans_create_rdma - Transport method for creating atransport instance
  557. * @client: client instance
  558. * @addr: IP address string
  559. * @args: Mount options string
  560. */
  561. static int
  562. rdma_create_trans(struct p9_client *client, const char *addr, char *args)
  563. {
  564. int err;
  565. struct p9_rdma_opts opts;
  566. struct p9_trans_rdma *rdma;
  567. struct rdma_conn_param conn_param;
  568. struct ib_qp_init_attr qp_attr;
  569. struct ib_cq_init_attr cq_attr = {};
  570. if (addr == NULL)
  571. return -EINVAL;
  572. /* Parse the transport specific mount options */
  573. err = parse_opts(args, &opts);
  574. if (err < 0)
  575. return err;
  576. /* Create and initialize the RDMA transport structure */
  577. rdma = alloc_rdma(&opts);
  578. if (!rdma)
  579. return -ENOMEM;
  580. /* Create the RDMA CM ID */
  581. rdma->cm_id = rdma_create_id(&init_net, p9_cm_event_handler, client,
  582. RDMA_PS_TCP, IB_QPT_RC);
  583. if (IS_ERR(rdma->cm_id))
  584. goto error;
  585. /* Associate the client with the transport */
  586. client->trans = rdma;
  587. /* Bind to a privileged port if we need to */
  588. if (opts.privport) {
  589. err = p9_rdma_bind_privport(rdma);
  590. if (err < 0) {
  591. pr_err("%s (%d): problem binding to privport: %d\n",
  592. __func__, task_pid_nr(current), -err);
  593. goto error;
  594. }
  595. }
  596. /* Resolve the server's address */
  597. rdma->addr.sin_family = AF_INET;
  598. rdma->addr.sin_addr.s_addr = in_aton(addr);
  599. rdma->addr.sin_port = htons(opts.port);
  600. err = rdma_resolve_addr(rdma->cm_id, NULL,
  601. (struct sockaddr *)&rdma->addr,
  602. rdma->timeout);
  603. if (err)
  604. goto error;
  605. err = wait_for_completion_interruptible(&rdma->cm_done);
  606. if (err || (rdma->state != P9_RDMA_ADDR_RESOLVED))
  607. goto error;
  608. /* Resolve the route to the server */
  609. err = rdma_resolve_route(rdma->cm_id, rdma->timeout);
  610. if (err)
  611. goto error;
  612. err = wait_for_completion_interruptible(&rdma->cm_done);
  613. if (err || (rdma->state != P9_RDMA_ROUTE_RESOLVED))
  614. goto error;
  615. /* Create the Completion Queue */
  616. cq_attr.cqe = opts.sq_depth + opts.rq_depth + 1;
  617. rdma->cq = ib_create_cq(rdma->cm_id->device, cq_comp_handler,
  618. cq_event_handler, client,
  619. &cq_attr);
  620. if (IS_ERR(rdma->cq))
  621. goto error;
  622. ib_req_notify_cq(rdma->cq, IB_CQ_NEXT_COMP);
  623. /* Create the Protection Domain */
  624. rdma->pd = ib_alloc_pd(rdma->cm_id->device);
  625. if (IS_ERR(rdma->pd))
  626. goto error;
  627. /* Create the Queue Pair */
  628. memset(&qp_attr, 0, sizeof qp_attr);
  629. qp_attr.event_handler = qp_event_handler;
  630. qp_attr.qp_context = client;
  631. qp_attr.cap.max_send_wr = opts.sq_depth;
  632. qp_attr.cap.max_recv_wr = opts.rq_depth;
  633. qp_attr.cap.max_send_sge = P9_RDMA_SEND_SGE;
  634. qp_attr.cap.max_recv_sge = P9_RDMA_RECV_SGE;
  635. qp_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
  636. qp_attr.qp_type = IB_QPT_RC;
  637. qp_attr.send_cq = rdma->cq;
  638. qp_attr.recv_cq = rdma->cq;
  639. err = rdma_create_qp(rdma->cm_id, rdma->pd, &qp_attr);
  640. if (err)
  641. goto error;
  642. rdma->qp = rdma->cm_id->qp;
  643. /* Request a connection */
  644. memset(&conn_param, 0, sizeof(conn_param));
  645. conn_param.private_data = NULL;
  646. conn_param.private_data_len = 0;
  647. conn_param.responder_resources = P9_RDMA_IRD;
  648. conn_param.initiator_depth = P9_RDMA_ORD;
  649. err = rdma_connect(rdma->cm_id, &conn_param);
  650. if (err)
  651. goto error;
  652. err = wait_for_completion_interruptible(&rdma->cm_done);
  653. if (err || (rdma->state != P9_RDMA_CONNECTED))
  654. goto error;
  655. client->status = Connected;
  656. return 0;
  657. error:
  658. rdma_destroy_trans(rdma);
  659. return -ENOTCONN;
  660. }
  661. static struct p9_trans_module p9_rdma_trans = {
  662. .name = "rdma",
  663. .maxsize = P9_RDMA_MAXSIZE,
  664. .def = 0,
  665. .owner = THIS_MODULE,
  666. .create = rdma_create_trans,
  667. .close = rdma_close,
  668. .request = rdma_request,
  669. .cancel = rdma_cancel,
  670. .cancelled = rdma_cancelled,
  671. };
  672. /**
  673. * p9_trans_rdma_init - Register the 9P RDMA transport driver
  674. */
  675. static int __init p9_trans_rdma_init(void)
  676. {
  677. v9fs_register_trans(&p9_rdma_trans);
  678. return 0;
  679. }
  680. static void __exit p9_trans_rdma_exit(void)
  681. {
  682. v9fs_unregister_trans(&p9_rdma_trans);
  683. }
  684. module_init(p9_trans_rdma_init);
  685. module_exit(p9_trans_rdma_exit);
  686. MODULE_AUTHOR("Tom Tucker <tom@opengridcomputing.com>");
  687. MODULE_DESCRIPTION("RDMA Transport for 9P");
  688. MODULE_LICENSE("Dual BSD/GPL");