fnic_fcs.c 37 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377
  1. /*
  2. * Copyright 2008 Cisco Systems, Inc. All rights reserved.
  3. * Copyright 2007 Nuova Systems, Inc. All rights reserved.
  4. *
  5. * This program is free software; you may redistribute it and/or modify
  6. * it under the terms of the GNU General Public License as published by
  7. * the Free Software Foundation; version 2 of the License.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  10. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  11. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  12. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  13. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  14. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  15. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  16. * SOFTWARE.
  17. */
  18. #include <linux/errno.h>
  19. #include <linux/pci.h>
  20. #include <linux/slab.h>
  21. #include <linux/skbuff.h>
  22. #include <linux/interrupt.h>
  23. #include <linux/spinlock.h>
  24. #include <linux/if_ether.h>
  25. #include <linux/if_vlan.h>
  26. #include <linux/workqueue.h>
  27. #include <scsi/fc/fc_fip.h>
  28. #include <scsi/fc/fc_els.h>
  29. #include <scsi/fc/fc_fcoe.h>
  30. #include <scsi/fc_frame.h>
  31. #include <scsi/libfc.h>
  32. #include "fnic_io.h"
  33. #include "fnic.h"
  34. #include "fnic_fip.h"
  35. #include "cq_enet_desc.h"
  36. #include "cq_exch_desc.h"
  37. static u8 fcoe_all_fcfs[ETH_ALEN] = FIP_ALL_FCF_MACS;
  38. struct workqueue_struct *fnic_fip_queue;
  39. struct workqueue_struct *fnic_event_queue;
  40. static void fnic_set_eth_mode(struct fnic *);
  41. static void fnic_fcoe_send_vlan_req(struct fnic *fnic);
  42. static void fnic_fcoe_start_fcf_disc(struct fnic *fnic);
  43. static void fnic_fcoe_process_vlan_resp(struct fnic *fnic, struct sk_buff *);
  44. static int fnic_fcoe_vlan_check(struct fnic *fnic, u16 flag);
  45. static int fnic_fcoe_handle_fip_frame(struct fnic *fnic, struct sk_buff *skb);
  46. void fnic_handle_link(struct work_struct *work)
  47. {
  48. struct fnic *fnic = container_of(work, struct fnic, link_work);
  49. unsigned long flags;
  50. int old_link_status;
  51. u32 old_link_down_cnt;
  52. spin_lock_irqsave(&fnic->fnic_lock, flags);
  53. if (fnic->stop_rx_link_events) {
  54. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  55. return;
  56. }
  57. old_link_down_cnt = fnic->link_down_cnt;
  58. old_link_status = fnic->link_status;
  59. fnic->link_status = vnic_dev_link_status(fnic->vdev);
  60. fnic->link_down_cnt = vnic_dev_link_down_cnt(fnic->vdev);
  61. if (old_link_status == fnic->link_status) {
  62. if (!fnic->link_status) {
  63. /* DOWN -> DOWN */
  64. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  65. fnic_fc_trace_set_data(fnic->lport->host->host_no,
  66. FNIC_FC_LE, "Link Status: DOWN->DOWN",
  67. strlen("Link Status: DOWN->DOWN"));
  68. } else {
  69. if (old_link_down_cnt != fnic->link_down_cnt) {
  70. /* UP -> DOWN -> UP */
  71. fnic->lport->host_stats.link_failure_count++;
  72. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  73. fnic_fc_trace_set_data(
  74. fnic->lport->host->host_no,
  75. FNIC_FC_LE,
  76. "Link Status:UP_DOWN_UP",
  77. strlen("Link_Status:UP_DOWN_UP")
  78. );
  79. FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
  80. "link down\n");
  81. fcoe_ctlr_link_down(&fnic->ctlr);
  82. if (fnic->config.flags & VFCF_FIP_CAPABLE) {
  83. /* start FCoE VLAN discovery */
  84. fnic_fc_trace_set_data(
  85. fnic->lport->host->host_no,
  86. FNIC_FC_LE,
  87. "Link Status: UP_DOWN_UP_VLAN",
  88. strlen(
  89. "Link Status: UP_DOWN_UP_VLAN")
  90. );
  91. fnic_fcoe_send_vlan_req(fnic);
  92. return;
  93. }
  94. FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
  95. "link up\n");
  96. fcoe_ctlr_link_up(&fnic->ctlr);
  97. } else {
  98. /* UP -> UP */
  99. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  100. fnic_fc_trace_set_data(
  101. fnic->lport->host->host_no, FNIC_FC_LE,
  102. "Link Status: UP_UP",
  103. strlen("Link Status: UP_UP"));
  104. }
  105. }
  106. } else if (fnic->link_status) {
  107. /* DOWN -> UP */
  108. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  109. if (fnic->config.flags & VFCF_FIP_CAPABLE) {
  110. /* start FCoE VLAN discovery */
  111. fnic_fc_trace_set_data(
  112. fnic->lport->host->host_no,
  113. FNIC_FC_LE, "Link Status: DOWN_UP_VLAN",
  114. strlen("Link Status: DOWN_UP_VLAN"));
  115. fnic_fcoe_send_vlan_req(fnic);
  116. return;
  117. }
  118. FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "link up\n");
  119. fnic_fc_trace_set_data(fnic->lport->host->host_no, FNIC_FC_LE,
  120. "Link Status: DOWN_UP", strlen("Link Status: DOWN_UP"));
  121. fcoe_ctlr_link_up(&fnic->ctlr);
  122. } else {
  123. /* UP -> DOWN */
  124. fnic->lport->host_stats.link_failure_count++;
  125. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  126. FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "link down\n");
  127. fnic_fc_trace_set_data(
  128. fnic->lport->host->host_no, FNIC_FC_LE,
  129. "Link Status: UP_DOWN",
  130. strlen("Link Status: UP_DOWN"));
  131. if (fnic->config.flags & VFCF_FIP_CAPABLE) {
  132. FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
  133. "deleting fip-timer during link-down\n");
  134. del_timer_sync(&fnic->fip_timer);
  135. }
  136. fcoe_ctlr_link_down(&fnic->ctlr);
  137. }
  138. }
  139. /*
  140. * This function passes incoming fabric frames to libFC
  141. */
  142. void fnic_handle_frame(struct work_struct *work)
  143. {
  144. struct fnic *fnic = container_of(work, struct fnic, frame_work);
  145. struct fc_lport *lp = fnic->lport;
  146. unsigned long flags;
  147. struct sk_buff *skb;
  148. struct fc_frame *fp;
  149. while ((skb = skb_dequeue(&fnic->frame_queue))) {
  150. spin_lock_irqsave(&fnic->fnic_lock, flags);
  151. if (fnic->stop_rx_link_events) {
  152. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  153. dev_kfree_skb(skb);
  154. return;
  155. }
  156. fp = (struct fc_frame *)skb;
  157. /*
  158. * If we're in a transitional state, just re-queue and return.
  159. * The queue will be serviced when we get to a stable state.
  160. */
  161. if (fnic->state != FNIC_IN_FC_MODE &&
  162. fnic->state != FNIC_IN_ETH_MODE) {
  163. skb_queue_head(&fnic->frame_queue, skb);
  164. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  165. return;
  166. }
  167. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  168. fc_exch_recv(lp, fp);
  169. }
  170. }
  171. void fnic_fcoe_evlist_free(struct fnic *fnic)
  172. {
  173. struct fnic_event *fevt = NULL;
  174. struct fnic_event *next = NULL;
  175. unsigned long flags;
  176. spin_lock_irqsave(&fnic->fnic_lock, flags);
  177. if (list_empty(&fnic->evlist)) {
  178. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  179. return;
  180. }
  181. list_for_each_entry_safe(fevt, next, &fnic->evlist, list) {
  182. list_del(&fevt->list);
  183. kfree(fevt);
  184. }
  185. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  186. }
  187. void fnic_handle_event(struct work_struct *work)
  188. {
  189. struct fnic *fnic = container_of(work, struct fnic, event_work);
  190. struct fnic_event *fevt = NULL;
  191. struct fnic_event *next = NULL;
  192. unsigned long flags;
  193. spin_lock_irqsave(&fnic->fnic_lock, flags);
  194. if (list_empty(&fnic->evlist)) {
  195. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  196. return;
  197. }
  198. list_for_each_entry_safe(fevt, next, &fnic->evlist, list) {
  199. if (fnic->stop_rx_link_events) {
  200. list_del(&fevt->list);
  201. kfree(fevt);
  202. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  203. return;
  204. }
  205. /*
  206. * If we're in a transitional state, just re-queue and return.
  207. * The queue will be serviced when we get to a stable state.
  208. */
  209. if (fnic->state != FNIC_IN_FC_MODE &&
  210. fnic->state != FNIC_IN_ETH_MODE) {
  211. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  212. return;
  213. }
  214. list_del(&fevt->list);
  215. switch (fevt->event) {
  216. case FNIC_EVT_START_VLAN_DISC:
  217. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  218. fnic_fcoe_send_vlan_req(fnic);
  219. spin_lock_irqsave(&fnic->fnic_lock, flags);
  220. break;
  221. case FNIC_EVT_START_FCF_DISC:
  222. FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
  223. "Start FCF Discovery\n");
  224. fnic_fcoe_start_fcf_disc(fnic);
  225. break;
  226. default:
  227. FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
  228. "Unknown event 0x%x\n", fevt->event);
  229. break;
  230. }
  231. kfree(fevt);
  232. }
  233. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  234. }
  235. /**
  236. * Check if the Received FIP FLOGI frame is rejected
  237. * @fip: The FCoE controller that received the frame
  238. * @skb: The received FIP frame
  239. *
  240. * Returns non-zero if the frame is rejected with unsupported cmd with
  241. * insufficient resource els explanation.
  242. */
  243. static inline int is_fnic_fip_flogi_reject(struct fcoe_ctlr *fip,
  244. struct sk_buff *skb)
  245. {
  246. struct fc_lport *lport = fip->lp;
  247. struct fip_header *fiph;
  248. struct fc_frame_header *fh = NULL;
  249. struct fip_desc *desc;
  250. struct fip_encaps *els;
  251. enum fip_desc_type els_dtype = 0;
  252. u16 op;
  253. u8 els_op;
  254. u8 sub;
  255. size_t els_len = 0;
  256. size_t rlen;
  257. size_t dlen = 0;
  258. if (skb_linearize(skb))
  259. return 0;
  260. if (skb->len < sizeof(*fiph))
  261. return 0;
  262. fiph = (struct fip_header *)skb->data;
  263. op = ntohs(fiph->fip_op);
  264. sub = fiph->fip_subcode;
  265. if (op != FIP_OP_LS)
  266. return 0;
  267. if (sub != FIP_SC_REP)
  268. return 0;
  269. rlen = ntohs(fiph->fip_dl_len) * 4;
  270. if (rlen + sizeof(*fiph) > skb->len)
  271. return 0;
  272. desc = (struct fip_desc *)(fiph + 1);
  273. dlen = desc->fip_dlen * FIP_BPW;
  274. if (desc->fip_dtype == FIP_DT_FLOGI) {
  275. if (dlen < sizeof(*els) + sizeof(*fh) + 1)
  276. return 0;
  277. els_len = dlen - sizeof(*els);
  278. els = (struct fip_encaps *)desc;
  279. fh = (struct fc_frame_header *)(els + 1);
  280. els_dtype = desc->fip_dtype;
  281. if (!fh)
  282. return 0;
  283. /*
  284. * ELS command code, reason and explanation should be = Reject,
  285. * unsupported command and insufficient resource
  286. */
  287. els_op = *(u8 *)(fh + 1);
  288. if (els_op == ELS_LS_RJT) {
  289. shost_printk(KERN_INFO, lport->host,
  290. "Flogi Request Rejected by Switch\n");
  291. return 1;
  292. }
  293. shost_printk(KERN_INFO, lport->host,
  294. "Flogi Request Accepted by Switch\n");
  295. }
  296. return 0;
  297. }
  298. static void fnic_fcoe_send_vlan_req(struct fnic *fnic)
  299. {
  300. struct fcoe_ctlr *fip = &fnic->ctlr;
  301. struct fnic_stats *fnic_stats = &fnic->fnic_stats;
  302. struct sk_buff *skb;
  303. char *eth_fr;
  304. int fr_len;
  305. struct fip_vlan *vlan;
  306. u64 vlan_tov;
  307. fnic_fcoe_reset_vlans(fnic);
  308. fnic->set_vlan(fnic, 0);
  309. FNIC_FCS_DBG(KERN_INFO, fnic->lport->host,
  310. "Sending VLAN request...\n");
  311. skb = dev_alloc_skb(sizeof(struct fip_vlan));
  312. if (!skb)
  313. return;
  314. fr_len = sizeof(*vlan);
  315. eth_fr = (char *)skb->data;
  316. vlan = (struct fip_vlan *)eth_fr;
  317. memset(vlan, 0, sizeof(*vlan));
  318. memcpy(vlan->eth.h_source, fip->ctl_src_addr, ETH_ALEN);
  319. memcpy(vlan->eth.h_dest, fcoe_all_fcfs, ETH_ALEN);
  320. vlan->eth.h_proto = htons(ETH_P_FIP);
  321. vlan->fip.fip_ver = FIP_VER_ENCAPS(FIP_VER);
  322. vlan->fip.fip_op = htons(FIP_OP_VLAN);
  323. vlan->fip.fip_subcode = FIP_SC_VL_REQ;
  324. vlan->fip.fip_dl_len = htons(sizeof(vlan->desc) / FIP_BPW);
  325. vlan->desc.mac.fd_desc.fip_dtype = FIP_DT_MAC;
  326. vlan->desc.mac.fd_desc.fip_dlen = sizeof(vlan->desc.mac) / FIP_BPW;
  327. memcpy(&vlan->desc.mac.fd_mac, fip->ctl_src_addr, ETH_ALEN);
  328. vlan->desc.wwnn.fd_desc.fip_dtype = FIP_DT_NAME;
  329. vlan->desc.wwnn.fd_desc.fip_dlen = sizeof(vlan->desc.wwnn) / FIP_BPW;
  330. put_unaligned_be64(fip->lp->wwnn, &vlan->desc.wwnn.fd_wwn);
  331. atomic64_inc(&fnic_stats->vlan_stats.vlan_disc_reqs);
  332. skb_put(skb, sizeof(*vlan));
  333. skb->protocol = htons(ETH_P_FIP);
  334. skb_reset_mac_header(skb);
  335. skb_reset_network_header(skb);
  336. fip->send(fip, skb);
  337. /* set a timer so that we can retry if there no response */
  338. vlan_tov = jiffies + msecs_to_jiffies(FCOE_CTLR_FIPVLAN_TOV);
  339. mod_timer(&fnic->fip_timer, round_jiffies(vlan_tov));
  340. }
  341. static void fnic_fcoe_process_vlan_resp(struct fnic *fnic, struct sk_buff *skb)
  342. {
  343. struct fcoe_ctlr *fip = &fnic->ctlr;
  344. struct fip_header *fiph;
  345. struct fip_desc *desc;
  346. struct fnic_stats *fnic_stats = &fnic->fnic_stats;
  347. u16 vid;
  348. size_t rlen;
  349. size_t dlen;
  350. struct fcoe_vlan *vlan;
  351. u64 sol_time;
  352. unsigned long flags;
  353. FNIC_FCS_DBG(KERN_INFO, fnic->lport->host,
  354. "Received VLAN response...\n");
  355. fiph = (struct fip_header *) skb->data;
  356. FNIC_FCS_DBG(KERN_INFO, fnic->lport->host,
  357. "Received VLAN response... OP 0x%x SUB_OP 0x%x\n",
  358. ntohs(fiph->fip_op), fiph->fip_subcode);
  359. rlen = ntohs(fiph->fip_dl_len) * 4;
  360. fnic_fcoe_reset_vlans(fnic);
  361. spin_lock_irqsave(&fnic->vlans_lock, flags);
  362. desc = (struct fip_desc *)(fiph + 1);
  363. while (rlen > 0) {
  364. dlen = desc->fip_dlen * FIP_BPW;
  365. switch (desc->fip_dtype) {
  366. case FIP_DT_VLAN:
  367. vid = ntohs(((struct fip_vlan_desc *)desc)->fd_vlan);
  368. shost_printk(KERN_INFO, fnic->lport->host,
  369. "process_vlan_resp: FIP VLAN %d\n", vid);
  370. vlan = kmalloc(sizeof(*vlan),
  371. GFP_ATOMIC);
  372. if (!vlan) {
  373. /* retry from timer */
  374. spin_unlock_irqrestore(&fnic->vlans_lock,
  375. flags);
  376. goto out;
  377. }
  378. memset(vlan, 0, sizeof(struct fcoe_vlan));
  379. vlan->vid = vid & 0x0fff;
  380. vlan->state = FIP_VLAN_AVAIL;
  381. list_add_tail(&vlan->list, &fnic->vlans);
  382. break;
  383. }
  384. desc = (struct fip_desc *)((char *)desc + dlen);
  385. rlen -= dlen;
  386. }
  387. /* any VLAN descriptors present ? */
  388. if (list_empty(&fnic->vlans)) {
  389. /* retry from timer */
  390. atomic64_inc(&fnic_stats->vlan_stats.resp_withno_vlanID);
  391. FNIC_FCS_DBG(KERN_INFO, fnic->lport->host,
  392. "No VLAN descriptors in FIP VLAN response\n");
  393. spin_unlock_irqrestore(&fnic->vlans_lock, flags);
  394. goto out;
  395. }
  396. vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list);
  397. fnic->set_vlan(fnic, vlan->vid);
  398. vlan->state = FIP_VLAN_SENT; /* sent now */
  399. vlan->sol_count++;
  400. spin_unlock_irqrestore(&fnic->vlans_lock, flags);
  401. /* start the solicitation */
  402. fcoe_ctlr_link_up(fip);
  403. sol_time = jiffies + msecs_to_jiffies(FCOE_CTLR_START_DELAY);
  404. mod_timer(&fnic->fip_timer, round_jiffies(sol_time));
  405. out:
  406. return;
  407. }
  408. static void fnic_fcoe_start_fcf_disc(struct fnic *fnic)
  409. {
  410. unsigned long flags;
  411. struct fcoe_vlan *vlan;
  412. u64 sol_time;
  413. spin_lock_irqsave(&fnic->vlans_lock, flags);
  414. vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list);
  415. fnic->set_vlan(fnic, vlan->vid);
  416. vlan->state = FIP_VLAN_SENT; /* sent now */
  417. vlan->sol_count = 1;
  418. spin_unlock_irqrestore(&fnic->vlans_lock, flags);
  419. /* start the solicitation */
  420. fcoe_ctlr_link_up(&fnic->ctlr);
  421. sol_time = jiffies + msecs_to_jiffies(FCOE_CTLR_START_DELAY);
  422. mod_timer(&fnic->fip_timer, round_jiffies(sol_time));
  423. }
  424. static int fnic_fcoe_vlan_check(struct fnic *fnic, u16 flag)
  425. {
  426. unsigned long flags;
  427. struct fcoe_vlan *fvlan;
  428. spin_lock_irqsave(&fnic->vlans_lock, flags);
  429. if (list_empty(&fnic->vlans)) {
  430. spin_unlock_irqrestore(&fnic->vlans_lock, flags);
  431. return -EINVAL;
  432. }
  433. fvlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list);
  434. if (fvlan->state == FIP_VLAN_USED) {
  435. spin_unlock_irqrestore(&fnic->vlans_lock, flags);
  436. return 0;
  437. }
  438. if (fvlan->state == FIP_VLAN_SENT) {
  439. fvlan->state = FIP_VLAN_USED;
  440. spin_unlock_irqrestore(&fnic->vlans_lock, flags);
  441. return 0;
  442. }
  443. spin_unlock_irqrestore(&fnic->vlans_lock, flags);
  444. return -EINVAL;
  445. }
  446. static void fnic_event_enq(struct fnic *fnic, enum fnic_evt ev)
  447. {
  448. struct fnic_event *fevt;
  449. unsigned long flags;
  450. fevt = kmalloc(sizeof(*fevt), GFP_ATOMIC);
  451. if (!fevt)
  452. return;
  453. fevt->fnic = fnic;
  454. fevt->event = ev;
  455. spin_lock_irqsave(&fnic->fnic_lock, flags);
  456. list_add_tail(&fevt->list, &fnic->evlist);
  457. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  458. schedule_work(&fnic->event_work);
  459. }
  460. static int fnic_fcoe_handle_fip_frame(struct fnic *fnic, struct sk_buff *skb)
  461. {
  462. struct fip_header *fiph;
  463. int ret = 1;
  464. u16 op;
  465. u8 sub;
  466. if (!skb || !(skb->data))
  467. return -1;
  468. if (skb_linearize(skb))
  469. goto drop;
  470. fiph = (struct fip_header *)skb->data;
  471. op = ntohs(fiph->fip_op);
  472. sub = fiph->fip_subcode;
  473. if (FIP_VER_DECAPS(fiph->fip_ver) != FIP_VER)
  474. goto drop;
  475. if (ntohs(fiph->fip_dl_len) * FIP_BPW + sizeof(*fiph) > skb->len)
  476. goto drop;
  477. if (op == FIP_OP_DISC && sub == FIP_SC_ADV) {
  478. if (fnic_fcoe_vlan_check(fnic, ntohs(fiph->fip_flags)))
  479. goto drop;
  480. /* pass it on to fcoe */
  481. ret = 1;
  482. } else if (op == FIP_OP_VLAN && sub == FIP_SC_VL_REP) {
  483. /* set the vlan as used */
  484. fnic_fcoe_process_vlan_resp(fnic, skb);
  485. ret = 0;
  486. } else if (op == FIP_OP_CTRL && sub == FIP_SC_CLR_VLINK) {
  487. /* received CVL request, restart vlan disc */
  488. fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC);
  489. /* pass it on to fcoe */
  490. ret = 1;
  491. }
  492. drop:
  493. return ret;
  494. }
  495. void fnic_handle_fip_frame(struct work_struct *work)
  496. {
  497. struct fnic *fnic = container_of(work, struct fnic, fip_frame_work);
  498. struct fnic_stats *fnic_stats = &fnic->fnic_stats;
  499. unsigned long flags;
  500. struct sk_buff *skb;
  501. struct ethhdr *eh;
  502. while ((skb = skb_dequeue(&fnic->fip_frame_queue))) {
  503. spin_lock_irqsave(&fnic->fnic_lock, flags);
  504. if (fnic->stop_rx_link_events) {
  505. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  506. dev_kfree_skb(skb);
  507. return;
  508. }
  509. /*
  510. * If we're in a transitional state, just re-queue and return.
  511. * The queue will be serviced when we get to a stable state.
  512. */
  513. if (fnic->state != FNIC_IN_FC_MODE &&
  514. fnic->state != FNIC_IN_ETH_MODE) {
  515. skb_queue_head(&fnic->fip_frame_queue, skb);
  516. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  517. return;
  518. }
  519. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  520. eh = (struct ethhdr *)skb->data;
  521. if (eh->h_proto == htons(ETH_P_FIP)) {
  522. skb_pull(skb, sizeof(*eh));
  523. if (fnic_fcoe_handle_fip_frame(fnic, skb) <= 0) {
  524. dev_kfree_skb(skb);
  525. continue;
  526. }
  527. /*
  528. * If there's FLOGI rejects - clear all
  529. * fcf's & restart from scratch
  530. */
  531. if (is_fnic_fip_flogi_reject(&fnic->ctlr, skb)) {
  532. atomic64_inc(
  533. &fnic_stats->vlan_stats.flogi_rejects);
  534. shost_printk(KERN_INFO, fnic->lport->host,
  535. "Trigger a Link down - VLAN Disc\n");
  536. fcoe_ctlr_link_down(&fnic->ctlr);
  537. /* start FCoE VLAN discovery */
  538. fnic_fcoe_send_vlan_req(fnic);
  539. dev_kfree_skb(skb);
  540. continue;
  541. }
  542. fcoe_ctlr_recv(&fnic->ctlr, skb);
  543. continue;
  544. }
  545. }
  546. }
  547. /**
  548. * fnic_import_rq_eth_pkt() - handle received FCoE or FIP frame.
  549. * @fnic: fnic instance.
  550. * @skb: Ethernet Frame.
  551. */
  552. static inline int fnic_import_rq_eth_pkt(struct fnic *fnic, struct sk_buff *skb)
  553. {
  554. struct fc_frame *fp;
  555. struct ethhdr *eh;
  556. struct fcoe_hdr *fcoe_hdr;
  557. struct fcoe_crc_eof *ft;
  558. /*
  559. * Undo VLAN encapsulation if present.
  560. */
  561. eh = (struct ethhdr *)skb->data;
  562. if (eh->h_proto == htons(ETH_P_8021Q)) {
  563. memmove((u8 *)eh + VLAN_HLEN, eh, ETH_ALEN * 2);
  564. eh = (struct ethhdr *)skb_pull(skb, VLAN_HLEN);
  565. skb_reset_mac_header(skb);
  566. }
  567. if (eh->h_proto == htons(ETH_P_FIP)) {
  568. if (!(fnic->config.flags & VFCF_FIP_CAPABLE)) {
  569. printk(KERN_ERR "Dropped FIP frame, as firmware "
  570. "uses non-FIP mode, Enable FIP "
  571. "using UCSM\n");
  572. goto drop;
  573. }
  574. if ((fnic_fc_trace_set_data(fnic->lport->host->host_no,
  575. FNIC_FC_RECV|0x80, (char *)skb->data, skb->len)) != 0) {
  576. printk(KERN_ERR "fnic ctlr frame trace error!!!");
  577. }
  578. skb_queue_tail(&fnic->fip_frame_queue, skb);
  579. queue_work(fnic_fip_queue, &fnic->fip_frame_work);
  580. return 1; /* let caller know packet was used */
  581. }
  582. if (eh->h_proto != htons(ETH_P_FCOE))
  583. goto drop;
  584. skb_set_network_header(skb, sizeof(*eh));
  585. skb_pull(skb, sizeof(*eh));
  586. fcoe_hdr = (struct fcoe_hdr *)skb->data;
  587. if (FC_FCOE_DECAPS_VER(fcoe_hdr) != FC_FCOE_VER)
  588. goto drop;
  589. fp = (struct fc_frame *)skb;
  590. fc_frame_init(fp);
  591. fr_sof(fp) = fcoe_hdr->fcoe_sof;
  592. skb_pull(skb, sizeof(struct fcoe_hdr));
  593. skb_reset_transport_header(skb);
  594. ft = (struct fcoe_crc_eof *)(skb->data + skb->len - sizeof(*ft));
  595. fr_eof(fp) = ft->fcoe_eof;
  596. skb_trim(skb, skb->len - sizeof(*ft));
  597. return 0;
  598. drop:
  599. dev_kfree_skb_irq(skb);
  600. return -1;
  601. }
  602. /**
  603. * fnic_update_mac_locked() - set data MAC address and filters.
  604. * @fnic: fnic instance.
  605. * @new: newly-assigned FCoE MAC address.
  606. *
  607. * Called with the fnic lock held.
  608. */
  609. void fnic_update_mac_locked(struct fnic *fnic, u8 *new)
  610. {
  611. u8 *ctl = fnic->ctlr.ctl_src_addr;
  612. u8 *data = fnic->data_src_addr;
  613. if (is_zero_ether_addr(new))
  614. new = ctl;
  615. if (ether_addr_equal(data, new))
  616. return;
  617. FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "update_mac %pM\n", new);
  618. if (!is_zero_ether_addr(data) && !ether_addr_equal(data, ctl))
  619. vnic_dev_del_addr(fnic->vdev, data);
  620. memcpy(data, new, ETH_ALEN);
  621. if (!ether_addr_equal(new, ctl))
  622. vnic_dev_add_addr(fnic->vdev, new);
  623. }
  624. /**
  625. * fnic_update_mac() - set data MAC address and filters.
  626. * @lport: local port.
  627. * @new: newly-assigned FCoE MAC address.
  628. */
  629. void fnic_update_mac(struct fc_lport *lport, u8 *new)
  630. {
  631. struct fnic *fnic = lport_priv(lport);
  632. spin_lock_irq(&fnic->fnic_lock);
  633. fnic_update_mac_locked(fnic, new);
  634. spin_unlock_irq(&fnic->fnic_lock);
  635. }
  636. /**
  637. * fnic_set_port_id() - set the port_ID after successful FLOGI.
  638. * @lport: local port.
  639. * @port_id: assigned FC_ID.
  640. * @fp: received frame containing the FLOGI accept or NULL.
  641. *
  642. * This is called from libfc when a new FC_ID has been assigned.
  643. * This causes us to reset the firmware to FC_MODE and setup the new MAC
  644. * address and FC_ID.
  645. *
  646. * It is also called with FC_ID 0 when we're logged off.
  647. *
  648. * If the FC_ID is due to point-to-point, fp may be NULL.
  649. */
  650. void fnic_set_port_id(struct fc_lport *lport, u32 port_id, struct fc_frame *fp)
  651. {
  652. struct fnic *fnic = lport_priv(lport);
  653. u8 *mac;
  654. int ret;
  655. FNIC_FCS_DBG(KERN_DEBUG, lport->host, "set port_id %x fp %p\n",
  656. port_id, fp);
  657. /*
  658. * If we're clearing the FC_ID, change to use the ctl_src_addr.
  659. * Set ethernet mode to send FLOGI.
  660. */
  661. if (!port_id) {
  662. fnic_update_mac(lport, fnic->ctlr.ctl_src_addr);
  663. fnic_set_eth_mode(fnic);
  664. return;
  665. }
  666. if (fp) {
  667. mac = fr_cb(fp)->granted_mac;
  668. if (is_zero_ether_addr(mac)) {
  669. /* non-FIP - FLOGI already accepted - ignore return */
  670. fcoe_ctlr_recv_flogi(&fnic->ctlr, lport, fp);
  671. }
  672. fnic_update_mac(lport, mac);
  673. }
  674. /* Change state to reflect transition to FC mode */
  675. spin_lock_irq(&fnic->fnic_lock);
  676. if (fnic->state == FNIC_IN_ETH_MODE || fnic->state == FNIC_IN_FC_MODE)
  677. fnic->state = FNIC_IN_ETH_TRANS_FC_MODE;
  678. else {
  679. FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
  680. "Unexpected fnic state %s while"
  681. " processing flogi resp\n",
  682. fnic_state_to_str(fnic->state));
  683. spin_unlock_irq(&fnic->fnic_lock);
  684. return;
  685. }
  686. spin_unlock_irq(&fnic->fnic_lock);
  687. /*
  688. * Send FLOGI registration to firmware to set up FC mode.
  689. * The new address will be set up when registration completes.
  690. */
  691. ret = fnic_flogi_reg_handler(fnic, port_id);
  692. if (ret < 0) {
  693. spin_lock_irq(&fnic->fnic_lock);
  694. if (fnic->state == FNIC_IN_ETH_TRANS_FC_MODE)
  695. fnic->state = FNIC_IN_ETH_MODE;
  696. spin_unlock_irq(&fnic->fnic_lock);
  697. }
  698. }
  699. static void fnic_rq_cmpl_frame_recv(struct vnic_rq *rq, struct cq_desc
  700. *cq_desc, struct vnic_rq_buf *buf,
  701. int skipped __attribute__((unused)),
  702. void *opaque)
  703. {
  704. struct fnic *fnic = vnic_dev_priv(rq->vdev);
  705. struct sk_buff *skb;
  706. struct fc_frame *fp;
  707. struct fnic_stats *fnic_stats = &fnic->fnic_stats;
  708. unsigned int eth_hdrs_stripped;
  709. u8 type, color, eop, sop, ingress_port, vlan_stripped;
  710. u8 fcoe = 0, fcoe_sof, fcoe_eof;
  711. u8 fcoe_fc_crc_ok = 1, fcoe_enc_error = 0;
  712. u8 tcp_udp_csum_ok, udp, tcp, ipv4_csum_ok;
  713. u8 ipv6, ipv4, ipv4_fragment, rss_type, csum_not_calc;
  714. u8 fcs_ok = 1, packet_error = 0;
  715. u16 q_number, completed_index, bytes_written = 0, vlan, checksum;
  716. u32 rss_hash;
  717. u16 exchange_id, tmpl;
  718. u8 sof = 0;
  719. u8 eof = 0;
  720. u32 fcp_bytes_written = 0;
  721. unsigned long flags;
  722. pci_unmap_single(fnic->pdev, buf->dma_addr, buf->len,
  723. PCI_DMA_FROMDEVICE);
  724. skb = buf->os_buf;
  725. fp = (struct fc_frame *)skb;
  726. buf->os_buf = NULL;
  727. cq_desc_dec(cq_desc, &type, &color, &q_number, &completed_index);
  728. if (type == CQ_DESC_TYPE_RQ_FCP) {
  729. cq_fcp_rq_desc_dec((struct cq_fcp_rq_desc *)cq_desc,
  730. &type, &color, &q_number, &completed_index,
  731. &eop, &sop, &fcoe_fc_crc_ok, &exchange_id,
  732. &tmpl, &fcp_bytes_written, &sof, &eof,
  733. &ingress_port, &packet_error,
  734. &fcoe_enc_error, &fcs_ok, &vlan_stripped,
  735. &vlan);
  736. eth_hdrs_stripped = 1;
  737. skb_trim(skb, fcp_bytes_written);
  738. fr_sof(fp) = sof;
  739. fr_eof(fp) = eof;
  740. } else if (type == CQ_DESC_TYPE_RQ_ENET) {
  741. cq_enet_rq_desc_dec((struct cq_enet_rq_desc *)cq_desc,
  742. &type, &color, &q_number, &completed_index,
  743. &ingress_port, &fcoe, &eop, &sop,
  744. &rss_type, &csum_not_calc, &rss_hash,
  745. &bytes_written, &packet_error,
  746. &vlan_stripped, &vlan, &checksum,
  747. &fcoe_sof, &fcoe_fc_crc_ok,
  748. &fcoe_enc_error, &fcoe_eof,
  749. &tcp_udp_csum_ok, &udp, &tcp,
  750. &ipv4_csum_ok, &ipv6, &ipv4,
  751. &ipv4_fragment, &fcs_ok);
  752. eth_hdrs_stripped = 0;
  753. skb_trim(skb, bytes_written);
  754. if (!fcs_ok) {
  755. atomic64_inc(&fnic_stats->misc_stats.frame_errors);
  756. FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
  757. "fcs error. dropping packet.\n");
  758. goto drop;
  759. }
  760. if (fnic_import_rq_eth_pkt(fnic, skb))
  761. return;
  762. } else {
  763. /* wrong CQ type*/
  764. shost_printk(KERN_ERR, fnic->lport->host,
  765. "fnic rq_cmpl wrong cq type x%x\n", type);
  766. goto drop;
  767. }
  768. if (!fcs_ok || packet_error || !fcoe_fc_crc_ok || fcoe_enc_error) {
  769. atomic64_inc(&fnic_stats->misc_stats.frame_errors);
  770. FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
  771. "fnic rq_cmpl fcoe x%x fcsok x%x"
  772. " pkterr x%x fcoe_fc_crc_ok x%x, fcoe_enc_err"
  773. " x%x\n",
  774. fcoe, fcs_ok, packet_error,
  775. fcoe_fc_crc_ok, fcoe_enc_error);
  776. goto drop;
  777. }
  778. spin_lock_irqsave(&fnic->fnic_lock, flags);
  779. if (fnic->stop_rx_link_events) {
  780. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  781. goto drop;
  782. }
  783. fr_dev(fp) = fnic->lport;
  784. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  785. if ((fnic_fc_trace_set_data(fnic->lport->host->host_no, FNIC_FC_RECV,
  786. (char *)skb->data, skb->len)) != 0) {
  787. printk(KERN_ERR "fnic ctlr frame trace error!!!");
  788. }
  789. skb_queue_tail(&fnic->frame_queue, skb);
  790. queue_work(fnic_event_queue, &fnic->frame_work);
  791. return;
  792. drop:
  793. dev_kfree_skb_irq(skb);
  794. }
  795. static int fnic_rq_cmpl_handler_cont(struct vnic_dev *vdev,
  796. struct cq_desc *cq_desc, u8 type,
  797. u16 q_number, u16 completed_index,
  798. void *opaque)
  799. {
  800. struct fnic *fnic = vnic_dev_priv(vdev);
  801. vnic_rq_service(&fnic->rq[q_number], cq_desc, completed_index,
  802. VNIC_RQ_RETURN_DESC, fnic_rq_cmpl_frame_recv,
  803. NULL);
  804. return 0;
  805. }
  806. int fnic_rq_cmpl_handler(struct fnic *fnic, int rq_work_to_do)
  807. {
  808. unsigned int tot_rq_work_done = 0, cur_work_done;
  809. unsigned int i;
  810. int err;
  811. for (i = 0; i < fnic->rq_count; i++) {
  812. cur_work_done = vnic_cq_service(&fnic->cq[i], rq_work_to_do,
  813. fnic_rq_cmpl_handler_cont,
  814. NULL);
  815. if (cur_work_done) {
  816. err = vnic_rq_fill(&fnic->rq[i], fnic_alloc_rq_frame);
  817. if (err)
  818. shost_printk(KERN_ERR, fnic->lport->host,
  819. "fnic_alloc_rq_frame can't alloc"
  820. " frame\n");
  821. }
  822. tot_rq_work_done += cur_work_done;
  823. }
  824. return tot_rq_work_done;
  825. }
  826. /*
  827. * This function is called once at init time to allocate and fill RQ
  828. * buffers. Subsequently, it is called in the interrupt context after RQ
  829. * buffer processing to replenish the buffers in the RQ
  830. */
  831. int fnic_alloc_rq_frame(struct vnic_rq *rq)
  832. {
  833. struct fnic *fnic = vnic_dev_priv(rq->vdev);
  834. struct sk_buff *skb;
  835. u16 len;
  836. dma_addr_t pa;
  837. int r;
  838. len = FC_FRAME_HEADROOM + FC_MAX_FRAME + FC_FRAME_TAILROOM;
  839. skb = dev_alloc_skb(len);
  840. if (!skb) {
  841. FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
  842. "Unable to allocate RQ sk_buff\n");
  843. return -ENOMEM;
  844. }
  845. skb_reset_mac_header(skb);
  846. skb_reset_transport_header(skb);
  847. skb_reset_network_header(skb);
  848. skb_put(skb, len);
  849. pa = pci_map_single(fnic->pdev, skb->data, len, PCI_DMA_FROMDEVICE);
  850. if (pci_dma_mapping_error(fnic->pdev, pa)) {
  851. r = -ENOMEM;
  852. printk(KERN_ERR "PCI mapping failed with error %d\n", r);
  853. goto free_skb;
  854. }
  855. fnic_queue_rq_desc(rq, skb, pa, len);
  856. return 0;
  857. free_skb:
  858. kfree_skb(skb);
  859. return r;
  860. }
  861. void fnic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf)
  862. {
  863. struct fc_frame *fp = buf->os_buf;
  864. struct fnic *fnic = vnic_dev_priv(rq->vdev);
  865. pci_unmap_single(fnic->pdev, buf->dma_addr, buf->len,
  866. PCI_DMA_FROMDEVICE);
  867. dev_kfree_skb(fp_skb(fp));
  868. buf->os_buf = NULL;
  869. }
  870. /**
  871. * fnic_eth_send() - Send Ethernet frame.
  872. * @fip: fcoe_ctlr instance.
  873. * @skb: Ethernet Frame, FIP, without VLAN encapsulation.
  874. */
  875. void fnic_eth_send(struct fcoe_ctlr *fip, struct sk_buff *skb)
  876. {
  877. struct fnic *fnic = fnic_from_ctlr(fip);
  878. struct vnic_wq *wq = &fnic->wq[0];
  879. dma_addr_t pa;
  880. struct ethhdr *eth_hdr;
  881. struct vlan_ethhdr *vlan_hdr;
  882. unsigned long flags;
  883. int r;
  884. if (!fnic->vlan_hw_insert) {
  885. eth_hdr = (struct ethhdr *)skb_mac_header(skb);
  886. vlan_hdr = (struct vlan_ethhdr *)skb_push(skb,
  887. sizeof(*vlan_hdr) - sizeof(*eth_hdr));
  888. memcpy(vlan_hdr, eth_hdr, 2 * ETH_ALEN);
  889. vlan_hdr->h_vlan_proto = htons(ETH_P_8021Q);
  890. vlan_hdr->h_vlan_encapsulated_proto = eth_hdr->h_proto;
  891. vlan_hdr->h_vlan_TCI = htons(fnic->vlan_id);
  892. if ((fnic_fc_trace_set_data(fnic->lport->host->host_no,
  893. FNIC_FC_SEND|0x80, (char *)eth_hdr, skb->len)) != 0) {
  894. printk(KERN_ERR "fnic ctlr frame trace error!!!");
  895. }
  896. } else {
  897. if ((fnic_fc_trace_set_data(fnic->lport->host->host_no,
  898. FNIC_FC_SEND|0x80, (char *)skb->data, skb->len)) != 0) {
  899. printk(KERN_ERR "fnic ctlr frame trace error!!!");
  900. }
  901. }
  902. pa = pci_map_single(fnic->pdev, skb->data, skb->len, PCI_DMA_TODEVICE);
  903. r = pci_dma_mapping_error(fnic->pdev, pa);
  904. if (r) {
  905. printk(KERN_ERR "PCI mapping failed with error %d\n", r);
  906. goto free_skb;
  907. }
  908. spin_lock_irqsave(&fnic->wq_lock[0], flags);
  909. if (!vnic_wq_desc_avail(wq))
  910. goto irq_restore;
  911. fnic_queue_wq_eth_desc(wq, skb, pa, skb->len,
  912. 0 /* hw inserts cos value */,
  913. fnic->vlan_id, 1);
  914. spin_unlock_irqrestore(&fnic->wq_lock[0], flags);
  915. return;
  916. irq_restore:
  917. spin_unlock_irqrestore(&fnic->wq_lock[0], flags);
  918. pci_unmap_single(fnic->pdev, pa, skb->len, PCI_DMA_TODEVICE);
  919. free_skb:
  920. kfree_skb(skb);
  921. }
  922. /*
  923. * Send FC frame.
  924. */
  925. static int fnic_send_frame(struct fnic *fnic, struct fc_frame *fp)
  926. {
  927. struct vnic_wq *wq = &fnic->wq[0];
  928. struct sk_buff *skb;
  929. dma_addr_t pa;
  930. struct ethhdr *eth_hdr;
  931. struct vlan_ethhdr *vlan_hdr;
  932. struct fcoe_hdr *fcoe_hdr;
  933. struct fc_frame_header *fh;
  934. u32 tot_len, eth_hdr_len;
  935. int ret = 0;
  936. unsigned long flags;
  937. fh = fc_frame_header_get(fp);
  938. skb = fp_skb(fp);
  939. if (unlikely(fh->fh_r_ctl == FC_RCTL_ELS_REQ) &&
  940. fcoe_ctlr_els_send(&fnic->ctlr, fnic->lport, skb))
  941. return 0;
  942. if (!fnic->vlan_hw_insert) {
  943. eth_hdr_len = sizeof(*vlan_hdr) + sizeof(*fcoe_hdr);
  944. vlan_hdr = (struct vlan_ethhdr *)skb_push(skb, eth_hdr_len);
  945. eth_hdr = (struct ethhdr *)vlan_hdr;
  946. vlan_hdr->h_vlan_proto = htons(ETH_P_8021Q);
  947. vlan_hdr->h_vlan_encapsulated_proto = htons(ETH_P_FCOE);
  948. vlan_hdr->h_vlan_TCI = htons(fnic->vlan_id);
  949. fcoe_hdr = (struct fcoe_hdr *)(vlan_hdr + 1);
  950. } else {
  951. eth_hdr_len = sizeof(*eth_hdr) + sizeof(*fcoe_hdr);
  952. eth_hdr = (struct ethhdr *)skb_push(skb, eth_hdr_len);
  953. eth_hdr->h_proto = htons(ETH_P_FCOE);
  954. fcoe_hdr = (struct fcoe_hdr *)(eth_hdr + 1);
  955. }
  956. if (fnic->ctlr.map_dest)
  957. fc_fcoe_set_mac(eth_hdr->h_dest, fh->fh_d_id);
  958. else
  959. memcpy(eth_hdr->h_dest, fnic->ctlr.dest_addr, ETH_ALEN);
  960. memcpy(eth_hdr->h_source, fnic->data_src_addr, ETH_ALEN);
  961. tot_len = skb->len;
  962. BUG_ON(tot_len % 4);
  963. memset(fcoe_hdr, 0, sizeof(*fcoe_hdr));
  964. fcoe_hdr->fcoe_sof = fr_sof(fp);
  965. if (FC_FCOE_VER)
  966. FC_FCOE_ENCAPS_VER(fcoe_hdr, FC_FCOE_VER);
  967. pa = pci_map_single(fnic->pdev, eth_hdr, tot_len, PCI_DMA_TODEVICE);
  968. if (pci_dma_mapping_error(fnic->pdev, pa)) {
  969. ret = -ENOMEM;
  970. printk(KERN_ERR "DMA map failed with error %d\n", ret);
  971. goto free_skb_on_err;
  972. }
  973. if ((fnic_fc_trace_set_data(fnic->lport->host->host_no, FNIC_FC_SEND,
  974. (char *)eth_hdr, tot_len)) != 0) {
  975. printk(KERN_ERR "fnic ctlr frame trace error!!!");
  976. }
  977. spin_lock_irqsave(&fnic->wq_lock[0], flags);
  978. if (!vnic_wq_desc_avail(wq)) {
  979. pci_unmap_single(fnic->pdev, pa,
  980. tot_len, PCI_DMA_TODEVICE);
  981. ret = -1;
  982. goto irq_restore;
  983. }
  984. fnic_queue_wq_desc(wq, skb, pa, tot_len, fr_eof(fp),
  985. 0 /* hw inserts cos value */,
  986. fnic->vlan_id, 1, 1, 1);
  987. irq_restore:
  988. spin_unlock_irqrestore(&fnic->wq_lock[0], flags);
  989. free_skb_on_err:
  990. if (ret)
  991. dev_kfree_skb_any(fp_skb(fp));
  992. return ret;
  993. }
  994. /*
  995. * fnic_send
  996. * Routine to send a raw frame
  997. */
  998. int fnic_send(struct fc_lport *lp, struct fc_frame *fp)
  999. {
  1000. struct fnic *fnic = lport_priv(lp);
  1001. unsigned long flags;
  1002. if (fnic->in_remove) {
  1003. dev_kfree_skb(fp_skb(fp));
  1004. return -1;
  1005. }
  1006. /*
  1007. * Queue frame if in a transitional state.
  1008. * This occurs while registering the Port_ID / MAC address after FLOGI.
  1009. */
  1010. spin_lock_irqsave(&fnic->fnic_lock, flags);
  1011. if (fnic->state != FNIC_IN_FC_MODE && fnic->state != FNIC_IN_ETH_MODE) {
  1012. skb_queue_tail(&fnic->tx_queue, fp_skb(fp));
  1013. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  1014. return 0;
  1015. }
  1016. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  1017. return fnic_send_frame(fnic, fp);
  1018. }
  1019. /**
  1020. * fnic_flush_tx() - send queued frames.
  1021. * @fnic: fnic device
  1022. *
  1023. * Send frames that were waiting to go out in FC or Ethernet mode.
  1024. * Whenever changing modes we purge queued frames, so these frames should
  1025. * be queued for the stable mode that we're in, either FC or Ethernet.
  1026. *
  1027. * Called without fnic_lock held.
  1028. */
  1029. void fnic_flush_tx(struct fnic *fnic)
  1030. {
  1031. struct sk_buff *skb;
  1032. struct fc_frame *fp;
  1033. while ((skb = skb_dequeue(&fnic->tx_queue))) {
  1034. fp = (struct fc_frame *)skb;
  1035. fnic_send_frame(fnic, fp);
  1036. }
  1037. }
  1038. /**
  1039. * fnic_set_eth_mode() - put fnic into ethernet mode.
  1040. * @fnic: fnic device
  1041. *
  1042. * Called without fnic lock held.
  1043. */
  1044. static void fnic_set_eth_mode(struct fnic *fnic)
  1045. {
  1046. unsigned long flags;
  1047. enum fnic_state old_state;
  1048. int ret;
  1049. spin_lock_irqsave(&fnic->fnic_lock, flags);
  1050. again:
  1051. old_state = fnic->state;
  1052. switch (old_state) {
  1053. case FNIC_IN_FC_MODE:
  1054. case FNIC_IN_ETH_TRANS_FC_MODE:
  1055. default:
  1056. fnic->state = FNIC_IN_FC_TRANS_ETH_MODE;
  1057. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  1058. ret = fnic_fw_reset_handler(fnic);
  1059. spin_lock_irqsave(&fnic->fnic_lock, flags);
  1060. if (fnic->state != FNIC_IN_FC_TRANS_ETH_MODE)
  1061. goto again;
  1062. if (ret)
  1063. fnic->state = old_state;
  1064. break;
  1065. case FNIC_IN_FC_TRANS_ETH_MODE:
  1066. case FNIC_IN_ETH_MODE:
  1067. break;
  1068. }
  1069. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  1070. }
  1071. static void fnic_wq_complete_frame_send(struct vnic_wq *wq,
  1072. struct cq_desc *cq_desc,
  1073. struct vnic_wq_buf *buf, void *opaque)
  1074. {
  1075. struct sk_buff *skb = buf->os_buf;
  1076. struct fc_frame *fp = (struct fc_frame *)skb;
  1077. struct fnic *fnic = vnic_dev_priv(wq->vdev);
  1078. pci_unmap_single(fnic->pdev, buf->dma_addr,
  1079. buf->len, PCI_DMA_TODEVICE);
  1080. dev_kfree_skb_irq(fp_skb(fp));
  1081. buf->os_buf = NULL;
  1082. }
  1083. static int fnic_wq_cmpl_handler_cont(struct vnic_dev *vdev,
  1084. struct cq_desc *cq_desc, u8 type,
  1085. u16 q_number, u16 completed_index,
  1086. void *opaque)
  1087. {
  1088. struct fnic *fnic = vnic_dev_priv(vdev);
  1089. unsigned long flags;
  1090. spin_lock_irqsave(&fnic->wq_lock[q_number], flags);
  1091. vnic_wq_service(&fnic->wq[q_number], cq_desc, completed_index,
  1092. fnic_wq_complete_frame_send, NULL);
  1093. spin_unlock_irqrestore(&fnic->wq_lock[q_number], flags);
  1094. return 0;
  1095. }
  1096. int fnic_wq_cmpl_handler(struct fnic *fnic, int work_to_do)
  1097. {
  1098. unsigned int wq_work_done = 0;
  1099. unsigned int i;
  1100. for (i = 0; i < fnic->raw_wq_count; i++) {
  1101. wq_work_done += vnic_cq_service(&fnic->cq[fnic->rq_count+i],
  1102. work_to_do,
  1103. fnic_wq_cmpl_handler_cont,
  1104. NULL);
  1105. }
  1106. return wq_work_done;
  1107. }
  1108. void fnic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf)
  1109. {
  1110. struct fc_frame *fp = buf->os_buf;
  1111. struct fnic *fnic = vnic_dev_priv(wq->vdev);
  1112. pci_unmap_single(fnic->pdev, buf->dma_addr,
  1113. buf->len, PCI_DMA_TODEVICE);
  1114. dev_kfree_skb(fp_skb(fp));
  1115. buf->os_buf = NULL;
  1116. }
  1117. void fnic_fcoe_reset_vlans(struct fnic *fnic)
  1118. {
  1119. unsigned long flags;
  1120. struct fcoe_vlan *vlan;
  1121. struct fcoe_vlan *next;
  1122. /*
  1123. * indicate a link down to fcoe so that all fcf's are free'd
  1124. * might not be required since we did this before sending vlan
  1125. * discovery request
  1126. */
  1127. spin_lock_irqsave(&fnic->vlans_lock, flags);
  1128. if (!list_empty(&fnic->vlans)) {
  1129. list_for_each_entry_safe(vlan, next, &fnic->vlans, list) {
  1130. list_del(&vlan->list);
  1131. kfree(vlan);
  1132. }
  1133. }
  1134. spin_unlock_irqrestore(&fnic->vlans_lock, flags);
  1135. }
  1136. void fnic_handle_fip_timer(struct fnic *fnic)
  1137. {
  1138. unsigned long flags;
  1139. struct fcoe_vlan *vlan;
  1140. struct fnic_stats *fnic_stats = &fnic->fnic_stats;
  1141. u64 sol_time;
  1142. spin_lock_irqsave(&fnic->fnic_lock, flags);
  1143. if (fnic->stop_rx_link_events) {
  1144. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  1145. return;
  1146. }
  1147. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  1148. if (fnic->ctlr.mode == FIP_ST_NON_FIP)
  1149. return;
  1150. spin_lock_irqsave(&fnic->vlans_lock, flags);
  1151. if (list_empty(&fnic->vlans)) {
  1152. /* no vlans available, try again */
  1153. FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
  1154. "Start VLAN Discovery\n");
  1155. spin_unlock_irqrestore(&fnic->vlans_lock, flags);
  1156. fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC);
  1157. return;
  1158. }
  1159. vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list);
  1160. shost_printk(KERN_DEBUG, fnic->lport->host,
  1161. "fip_timer: vlan %d state %d sol_count %d\n",
  1162. vlan->vid, vlan->state, vlan->sol_count);
  1163. switch (vlan->state) {
  1164. case FIP_VLAN_USED:
  1165. FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
  1166. "FIP VLAN is selected for FC transaction\n");
  1167. spin_unlock_irqrestore(&fnic->vlans_lock, flags);
  1168. break;
  1169. case FIP_VLAN_FAILED:
  1170. /* if all vlans are in failed state, restart vlan disc */
  1171. FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
  1172. "Start VLAN Discovery\n");
  1173. spin_unlock_irqrestore(&fnic->vlans_lock, flags);
  1174. fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC);
  1175. break;
  1176. case FIP_VLAN_SENT:
  1177. if (vlan->sol_count >= FCOE_CTLR_MAX_SOL) {
  1178. /*
  1179. * no response on this vlan, remove from the list.
  1180. * Try the next vlan
  1181. */
  1182. shost_printk(KERN_INFO, fnic->lport->host,
  1183. "Dequeue this VLAN ID %d from list\n",
  1184. vlan->vid);
  1185. list_del(&vlan->list);
  1186. kfree(vlan);
  1187. vlan = NULL;
  1188. if (list_empty(&fnic->vlans)) {
  1189. /* we exhausted all vlans, restart vlan disc */
  1190. spin_unlock_irqrestore(&fnic->vlans_lock,
  1191. flags);
  1192. shost_printk(KERN_INFO, fnic->lport->host,
  1193. "fip_timer: vlan list empty, "
  1194. "trigger vlan disc\n");
  1195. fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC);
  1196. return;
  1197. }
  1198. /* check the next vlan */
  1199. vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan,
  1200. list);
  1201. fnic->set_vlan(fnic, vlan->vid);
  1202. vlan->state = FIP_VLAN_SENT; /* sent now */
  1203. }
  1204. spin_unlock_irqrestore(&fnic->vlans_lock, flags);
  1205. atomic64_inc(&fnic_stats->vlan_stats.sol_expiry_count);
  1206. vlan->sol_count++;
  1207. sol_time = jiffies + msecs_to_jiffies
  1208. (FCOE_CTLR_START_DELAY);
  1209. mod_timer(&fnic->fip_timer, round_jiffies(sol_time));
  1210. break;
  1211. }
  1212. }