iscsi_target_util.c 37 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482
  1. /*******************************************************************************
  2. * This file contains the iSCSI Target specific utility functions.
  3. *
  4. * (c) Copyright 2007-2013 Datera, Inc.
  5. *
  6. * Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License as published by
  10. * the Free Software Foundation; either version 2 of the License, or
  11. * (at your option) any later version.
  12. *
  13. * This program is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  16. * GNU General Public License for more details.
  17. ******************************************************************************/
  18. #include <linux/list.h>
  19. #include <linux/percpu_ida.h>
  20. #include <scsi/scsi_tcq.h>
  21. #include <scsi/iscsi_proto.h>
  22. #include <target/target_core_base.h>
  23. #include <target/target_core_fabric.h>
  24. #include <target/iscsi/iscsi_transport.h>
  25. #include <target/iscsi/iscsi_target_core.h>
  26. #include "iscsi_target_parameters.h"
  27. #include "iscsi_target_seq_pdu_list.h"
  28. #include "iscsi_target_datain_values.h"
  29. #include "iscsi_target_erl0.h"
  30. #include "iscsi_target_erl1.h"
  31. #include "iscsi_target_erl2.h"
  32. #include "iscsi_target_tpg.h"
  33. #include "iscsi_target_util.h"
  34. #include "iscsi_target.h"
  35. #define PRINT_BUFF(buff, len) \
  36. { \
  37. int zzz; \
  38. \
  39. pr_debug("%d:\n", __LINE__); \
  40. for (zzz = 0; zzz < len; zzz++) { \
  41. if (zzz % 16 == 0) { \
  42. if (zzz) \
  43. pr_debug("\n"); \
  44. pr_debug("%4i: ", zzz); \
  45. } \
  46. pr_debug("%02x ", (unsigned char) (buff)[zzz]); \
  47. } \
  48. if ((len + 1) % 16) \
  49. pr_debug("\n"); \
  50. }
  51. extern struct list_head g_tiqn_list;
  52. extern spinlock_t tiqn_lock;
  53. /*
  54. * Called with cmd->r2t_lock held.
  55. */
  56. int iscsit_add_r2t_to_list(
  57. struct iscsi_cmd *cmd,
  58. u32 offset,
  59. u32 xfer_len,
  60. int recovery,
  61. u32 r2t_sn)
  62. {
  63. struct iscsi_r2t *r2t;
  64. r2t = kmem_cache_zalloc(lio_r2t_cache, GFP_ATOMIC);
  65. if (!r2t) {
  66. pr_err("Unable to allocate memory for struct iscsi_r2t.\n");
  67. return -1;
  68. }
  69. INIT_LIST_HEAD(&r2t->r2t_list);
  70. r2t->recovery_r2t = recovery;
  71. r2t->r2t_sn = (!r2t_sn) ? cmd->r2t_sn++ : r2t_sn;
  72. r2t->offset = offset;
  73. r2t->xfer_len = xfer_len;
  74. list_add_tail(&r2t->r2t_list, &cmd->cmd_r2t_list);
  75. spin_unlock_bh(&cmd->r2t_lock);
  76. iscsit_add_cmd_to_immediate_queue(cmd, cmd->conn, ISTATE_SEND_R2T);
  77. spin_lock_bh(&cmd->r2t_lock);
  78. return 0;
  79. }
  80. struct iscsi_r2t *iscsit_get_r2t_for_eos(
  81. struct iscsi_cmd *cmd,
  82. u32 offset,
  83. u32 length)
  84. {
  85. struct iscsi_r2t *r2t;
  86. spin_lock_bh(&cmd->r2t_lock);
  87. list_for_each_entry(r2t, &cmd->cmd_r2t_list, r2t_list) {
  88. if ((r2t->offset <= offset) &&
  89. (r2t->offset + r2t->xfer_len) >= (offset + length)) {
  90. spin_unlock_bh(&cmd->r2t_lock);
  91. return r2t;
  92. }
  93. }
  94. spin_unlock_bh(&cmd->r2t_lock);
  95. pr_err("Unable to locate R2T for Offset: %u, Length:"
  96. " %u\n", offset, length);
  97. return NULL;
  98. }
  99. struct iscsi_r2t *iscsit_get_r2t_from_list(struct iscsi_cmd *cmd)
  100. {
  101. struct iscsi_r2t *r2t;
  102. spin_lock_bh(&cmd->r2t_lock);
  103. list_for_each_entry(r2t, &cmd->cmd_r2t_list, r2t_list) {
  104. if (!r2t->sent_r2t) {
  105. spin_unlock_bh(&cmd->r2t_lock);
  106. return r2t;
  107. }
  108. }
  109. spin_unlock_bh(&cmd->r2t_lock);
  110. pr_err("Unable to locate next R2T to send for ITT:"
  111. " 0x%08x.\n", cmd->init_task_tag);
  112. return NULL;
  113. }
  114. /*
  115. * Called with cmd->r2t_lock held.
  116. */
  117. void iscsit_free_r2t(struct iscsi_r2t *r2t, struct iscsi_cmd *cmd)
  118. {
  119. list_del(&r2t->r2t_list);
  120. kmem_cache_free(lio_r2t_cache, r2t);
  121. }
  122. void iscsit_free_r2ts_from_list(struct iscsi_cmd *cmd)
  123. {
  124. struct iscsi_r2t *r2t, *r2t_tmp;
  125. spin_lock_bh(&cmd->r2t_lock);
  126. list_for_each_entry_safe(r2t, r2t_tmp, &cmd->cmd_r2t_list, r2t_list)
  127. iscsit_free_r2t(r2t, cmd);
  128. spin_unlock_bh(&cmd->r2t_lock);
  129. }
  130. /*
  131. * May be called from software interrupt (timer) context for allocating
  132. * iSCSI NopINs.
  133. */
  134. struct iscsi_cmd *iscsit_allocate_cmd(struct iscsi_conn *conn, int state)
  135. {
  136. struct iscsi_cmd *cmd;
  137. struct se_session *se_sess = conn->sess->se_sess;
  138. int size, tag;
  139. tag = percpu_ida_alloc(&se_sess->sess_tag_pool, state);
  140. if (tag < 0)
  141. return NULL;
  142. size = sizeof(struct iscsi_cmd) + conn->conn_transport->priv_size;
  143. cmd = (struct iscsi_cmd *)(se_sess->sess_cmd_map + (tag * size));
  144. memset(cmd, 0, size);
  145. cmd->se_cmd.map_tag = tag;
  146. cmd->conn = conn;
  147. INIT_LIST_HEAD(&cmd->i_conn_node);
  148. INIT_LIST_HEAD(&cmd->datain_list);
  149. INIT_LIST_HEAD(&cmd->cmd_r2t_list);
  150. spin_lock_init(&cmd->datain_lock);
  151. spin_lock_init(&cmd->dataout_timeout_lock);
  152. spin_lock_init(&cmd->istate_lock);
  153. spin_lock_init(&cmd->error_lock);
  154. spin_lock_init(&cmd->r2t_lock);
  155. return cmd;
  156. }
  157. EXPORT_SYMBOL(iscsit_allocate_cmd);
  158. struct iscsi_seq *iscsit_get_seq_holder_for_datain(
  159. struct iscsi_cmd *cmd,
  160. u32 seq_send_order)
  161. {
  162. u32 i;
  163. for (i = 0; i < cmd->seq_count; i++)
  164. if (cmd->seq_list[i].seq_send_order == seq_send_order)
  165. return &cmd->seq_list[i];
  166. return NULL;
  167. }
  168. struct iscsi_seq *iscsit_get_seq_holder_for_r2t(struct iscsi_cmd *cmd)
  169. {
  170. u32 i;
  171. if (!cmd->seq_list) {
  172. pr_err("struct iscsi_cmd->seq_list is NULL!\n");
  173. return NULL;
  174. }
  175. for (i = 0; i < cmd->seq_count; i++) {
  176. if (cmd->seq_list[i].type != SEQTYPE_NORMAL)
  177. continue;
  178. if (cmd->seq_list[i].seq_send_order == cmd->seq_send_order) {
  179. cmd->seq_send_order++;
  180. return &cmd->seq_list[i];
  181. }
  182. }
  183. return NULL;
  184. }
  185. struct iscsi_r2t *iscsit_get_holder_for_r2tsn(
  186. struct iscsi_cmd *cmd,
  187. u32 r2t_sn)
  188. {
  189. struct iscsi_r2t *r2t;
  190. spin_lock_bh(&cmd->r2t_lock);
  191. list_for_each_entry(r2t, &cmd->cmd_r2t_list, r2t_list) {
  192. if (r2t->r2t_sn == r2t_sn) {
  193. spin_unlock_bh(&cmd->r2t_lock);
  194. return r2t;
  195. }
  196. }
  197. spin_unlock_bh(&cmd->r2t_lock);
  198. return NULL;
  199. }
  200. static inline int iscsit_check_received_cmdsn(struct iscsi_session *sess, u32 cmdsn)
  201. {
  202. u32 max_cmdsn;
  203. int ret;
  204. /*
  205. * This is the proper method of checking received CmdSN against
  206. * ExpCmdSN and MaxCmdSN values, as well as accounting for out
  207. * or order CmdSNs due to multiple connection sessions and/or
  208. * CRC failures.
  209. */
  210. max_cmdsn = atomic_read(&sess->max_cmd_sn);
  211. if (iscsi_sna_gt(cmdsn, max_cmdsn)) {
  212. pr_err("Received CmdSN: 0x%08x is greater than"
  213. " MaxCmdSN: 0x%08x, ignoring.\n", cmdsn, max_cmdsn);
  214. ret = CMDSN_MAXCMDSN_OVERRUN;
  215. } else if (cmdsn == sess->exp_cmd_sn) {
  216. sess->exp_cmd_sn++;
  217. pr_debug("Received CmdSN matches ExpCmdSN,"
  218. " incremented ExpCmdSN to: 0x%08x\n",
  219. sess->exp_cmd_sn);
  220. ret = CMDSN_NORMAL_OPERATION;
  221. } else if (iscsi_sna_gt(cmdsn, sess->exp_cmd_sn)) {
  222. pr_debug("Received CmdSN: 0x%08x is greater"
  223. " than ExpCmdSN: 0x%08x, not acknowledging.\n",
  224. cmdsn, sess->exp_cmd_sn);
  225. ret = CMDSN_HIGHER_THAN_EXP;
  226. } else {
  227. pr_err("Received CmdSN: 0x%08x is less than"
  228. " ExpCmdSN: 0x%08x, ignoring.\n", cmdsn,
  229. sess->exp_cmd_sn);
  230. ret = CMDSN_LOWER_THAN_EXP;
  231. }
  232. return ret;
  233. }
  234. /*
  235. * Commands may be received out of order if MC/S is in use.
  236. * Ensure they are executed in CmdSN order.
  237. */
  238. int iscsit_sequence_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
  239. unsigned char *buf, __be32 cmdsn)
  240. {
  241. int ret, cmdsn_ret;
  242. bool reject = false;
  243. u8 reason = ISCSI_REASON_BOOKMARK_NO_RESOURCES;
  244. mutex_lock(&conn->sess->cmdsn_mutex);
  245. cmdsn_ret = iscsit_check_received_cmdsn(conn->sess, be32_to_cpu(cmdsn));
  246. switch (cmdsn_ret) {
  247. case CMDSN_NORMAL_OPERATION:
  248. ret = iscsit_execute_cmd(cmd, 0);
  249. if ((ret >= 0) && !list_empty(&conn->sess->sess_ooo_cmdsn_list))
  250. iscsit_execute_ooo_cmdsns(conn->sess);
  251. else if (ret < 0) {
  252. reject = true;
  253. ret = CMDSN_ERROR_CANNOT_RECOVER;
  254. }
  255. break;
  256. case CMDSN_HIGHER_THAN_EXP:
  257. ret = iscsit_handle_ooo_cmdsn(conn->sess, cmd, be32_to_cpu(cmdsn));
  258. if (ret < 0) {
  259. reject = true;
  260. ret = CMDSN_ERROR_CANNOT_RECOVER;
  261. break;
  262. }
  263. ret = CMDSN_HIGHER_THAN_EXP;
  264. break;
  265. case CMDSN_LOWER_THAN_EXP:
  266. case CMDSN_MAXCMDSN_OVERRUN:
  267. default:
  268. cmd->i_state = ISTATE_REMOVE;
  269. iscsit_add_cmd_to_immediate_queue(cmd, conn, cmd->i_state);
  270. /*
  271. * Existing callers for iscsit_sequence_cmd() will silently
  272. * ignore commands with CMDSN_LOWER_THAN_EXP, so force this
  273. * return for CMDSN_MAXCMDSN_OVERRUN as well..
  274. */
  275. ret = CMDSN_LOWER_THAN_EXP;
  276. break;
  277. }
  278. mutex_unlock(&conn->sess->cmdsn_mutex);
  279. if (reject)
  280. iscsit_reject_cmd(cmd, reason, buf);
  281. return ret;
  282. }
  283. EXPORT_SYMBOL(iscsit_sequence_cmd);
  284. int iscsit_check_unsolicited_dataout(struct iscsi_cmd *cmd, unsigned char *buf)
  285. {
  286. struct iscsi_conn *conn = cmd->conn;
  287. struct se_cmd *se_cmd = &cmd->se_cmd;
  288. struct iscsi_data *hdr = (struct iscsi_data *) buf;
  289. u32 payload_length = ntoh24(hdr->dlength);
  290. if (conn->sess->sess_ops->InitialR2T) {
  291. pr_err("Received unexpected unsolicited data"
  292. " while InitialR2T=Yes, protocol error.\n");
  293. transport_send_check_condition_and_sense(se_cmd,
  294. TCM_UNEXPECTED_UNSOLICITED_DATA, 0);
  295. return -1;
  296. }
  297. if ((cmd->first_burst_len + payload_length) >
  298. conn->sess->sess_ops->FirstBurstLength) {
  299. pr_err("Total %u bytes exceeds FirstBurstLength: %u"
  300. " for this Unsolicited DataOut Burst.\n",
  301. (cmd->first_burst_len + payload_length),
  302. conn->sess->sess_ops->FirstBurstLength);
  303. transport_send_check_condition_and_sense(se_cmd,
  304. TCM_INCORRECT_AMOUNT_OF_DATA, 0);
  305. return -1;
  306. }
  307. if (!(hdr->flags & ISCSI_FLAG_CMD_FINAL))
  308. return 0;
  309. if (((cmd->first_burst_len + payload_length) != cmd->se_cmd.data_length) &&
  310. ((cmd->first_burst_len + payload_length) !=
  311. conn->sess->sess_ops->FirstBurstLength)) {
  312. pr_err("Unsolicited non-immediate data received %u"
  313. " does not equal FirstBurstLength: %u, and does"
  314. " not equal ExpXferLen %u.\n",
  315. (cmd->first_burst_len + payload_length),
  316. conn->sess->sess_ops->FirstBurstLength, cmd->se_cmd.data_length);
  317. transport_send_check_condition_and_sense(se_cmd,
  318. TCM_INCORRECT_AMOUNT_OF_DATA, 0);
  319. return -1;
  320. }
  321. return 0;
  322. }
  323. struct iscsi_cmd *iscsit_find_cmd_from_itt(
  324. struct iscsi_conn *conn,
  325. itt_t init_task_tag)
  326. {
  327. struct iscsi_cmd *cmd;
  328. spin_lock_bh(&conn->cmd_lock);
  329. list_for_each_entry(cmd, &conn->conn_cmd_list, i_conn_node) {
  330. if (cmd->init_task_tag == init_task_tag) {
  331. spin_unlock_bh(&conn->cmd_lock);
  332. return cmd;
  333. }
  334. }
  335. spin_unlock_bh(&conn->cmd_lock);
  336. pr_err("Unable to locate ITT: 0x%08x on CID: %hu",
  337. init_task_tag, conn->cid);
  338. return NULL;
  339. }
  340. EXPORT_SYMBOL(iscsit_find_cmd_from_itt);
  341. struct iscsi_cmd *iscsit_find_cmd_from_itt_or_dump(
  342. struct iscsi_conn *conn,
  343. itt_t init_task_tag,
  344. u32 length)
  345. {
  346. struct iscsi_cmd *cmd;
  347. spin_lock_bh(&conn->cmd_lock);
  348. list_for_each_entry(cmd, &conn->conn_cmd_list, i_conn_node) {
  349. if (cmd->cmd_flags & ICF_GOT_LAST_DATAOUT)
  350. continue;
  351. if (cmd->init_task_tag == init_task_tag) {
  352. spin_unlock_bh(&conn->cmd_lock);
  353. return cmd;
  354. }
  355. }
  356. spin_unlock_bh(&conn->cmd_lock);
  357. pr_err("Unable to locate ITT: 0x%08x on CID: %hu,"
  358. " dumping payload\n", init_task_tag, conn->cid);
  359. if (length)
  360. iscsit_dump_data_payload(conn, length, 1);
  361. return NULL;
  362. }
  363. struct iscsi_cmd *iscsit_find_cmd_from_ttt(
  364. struct iscsi_conn *conn,
  365. u32 targ_xfer_tag)
  366. {
  367. struct iscsi_cmd *cmd = NULL;
  368. spin_lock_bh(&conn->cmd_lock);
  369. list_for_each_entry(cmd, &conn->conn_cmd_list, i_conn_node) {
  370. if (cmd->targ_xfer_tag == targ_xfer_tag) {
  371. spin_unlock_bh(&conn->cmd_lock);
  372. return cmd;
  373. }
  374. }
  375. spin_unlock_bh(&conn->cmd_lock);
  376. pr_err("Unable to locate TTT: 0x%08x on CID: %hu\n",
  377. targ_xfer_tag, conn->cid);
  378. return NULL;
  379. }
  380. int iscsit_find_cmd_for_recovery(
  381. struct iscsi_session *sess,
  382. struct iscsi_cmd **cmd_ptr,
  383. struct iscsi_conn_recovery **cr_ptr,
  384. itt_t init_task_tag)
  385. {
  386. struct iscsi_cmd *cmd = NULL;
  387. struct iscsi_conn_recovery *cr;
  388. /*
  389. * Scan through the inactive connection recovery list's command list.
  390. * If init_task_tag matches the command is still alligent.
  391. */
  392. spin_lock(&sess->cr_i_lock);
  393. list_for_each_entry(cr, &sess->cr_inactive_list, cr_list) {
  394. spin_lock(&cr->conn_recovery_cmd_lock);
  395. list_for_each_entry(cmd, &cr->conn_recovery_cmd_list, i_conn_node) {
  396. if (cmd->init_task_tag == init_task_tag) {
  397. spin_unlock(&cr->conn_recovery_cmd_lock);
  398. spin_unlock(&sess->cr_i_lock);
  399. *cr_ptr = cr;
  400. *cmd_ptr = cmd;
  401. return -2;
  402. }
  403. }
  404. spin_unlock(&cr->conn_recovery_cmd_lock);
  405. }
  406. spin_unlock(&sess->cr_i_lock);
  407. /*
  408. * Scan through the active connection recovery list's command list.
  409. * If init_task_tag matches the command is ready to be reassigned.
  410. */
  411. spin_lock(&sess->cr_a_lock);
  412. list_for_each_entry(cr, &sess->cr_active_list, cr_list) {
  413. spin_lock(&cr->conn_recovery_cmd_lock);
  414. list_for_each_entry(cmd, &cr->conn_recovery_cmd_list, i_conn_node) {
  415. if (cmd->init_task_tag == init_task_tag) {
  416. spin_unlock(&cr->conn_recovery_cmd_lock);
  417. spin_unlock(&sess->cr_a_lock);
  418. *cr_ptr = cr;
  419. *cmd_ptr = cmd;
  420. return 0;
  421. }
  422. }
  423. spin_unlock(&cr->conn_recovery_cmd_lock);
  424. }
  425. spin_unlock(&sess->cr_a_lock);
  426. return -1;
  427. }
  428. void iscsit_add_cmd_to_immediate_queue(
  429. struct iscsi_cmd *cmd,
  430. struct iscsi_conn *conn,
  431. u8 state)
  432. {
  433. struct iscsi_queue_req *qr;
  434. qr = kmem_cache_zalloc(lio_qr_cache, GFP_ATOMIC);
  435. if (!qr) {
  436. pr_err("Unable to allocate memory for"
  437. " struct iscsi_queue_req\n");
  438. return;
  439. }
  440. INIT_LIST_HEAD(&qr->qr_list);
  441. qr->cmd = cmd;
  442. qr->state = state;
  443. spin_lock_bh(&conn->immed_queue_lock);
  444. list_add_tail(&qr->qr_list, &conn->immed_queue_list);
  445. atomic_inc(&cmd->immed_queue_count);
  446. atomic_set(&conn->check_immediate_queue, 1);
  447. spin_unlock_bh(&conn->immed_queue_lock);
  448. wake_up(&conn->queues_wq);
  449. }
  450. struct iscsi_queue_req *iscsit_get_cmd_from_immediate_queue(struct iscsi_conn *conn)
  451. {
  452. struct iscsi_queue_req *qr;
  453. spin_lock_bh(&conn->immed_queue_lock);
  454. if (list_empty(&conn->immed_queue_list)) {
  455. spin_unlock_bh(&conn->immed_queue_lock);
  456. return NULL;
  457. }
  458. qr = list_first_entry(&conn->immed_queue_list,
  459. struct iscsi_queue_req, qr_list);
  460. list_del(&qr->qr_list);
  461. if (qr->cmd)
  462. atomic_dec(&qr->cmd->immed_queue_count);
  463. spin_unlock_bh(&conn->immed_queue_lock);
  464. return qr;
  465. }
  466. static void iscsit_remove_cmd_from_immediate_queue(
  467. struct iscsi_cmd *cmd,
  468. struct iscsi_conn *conn)
  469. {
  470. struct iscsi_queue_req *qr, *qr_tmp;
  471. spin_lock_bh(&conn->immed_queue_lock);
  472. if (!atomic_read(&cmd->immed_queue_count)) {
  473. spin_unlock_bh(&conn->immed_queue_lock);
  474. return;
  475. }
  476. list_for_each_entry_safe(qr, qr_tmp, &conn->immed_queue_list, qr_list) {
  477. if (qr->cmd != cmd)
  478. continue;
  479. atomic_dec(&qr->cmd->immed_queue_count);
  480. list_del(&qr->qr_list);
  481. kmem_cache_free(lio_qr_cache, qr);
  482. }
  483. spin_unlock_bh(&conn->immed_queue_lock);
  484. if (atomic_read(&cmd->immed_queue_count)) {
  485. pr_err("ITT: 0x%08x immed_queue_count: %d\n",
  486. cmd->init_task_tag,
  487. atomic_read(&cmd->immed_queue_count));
  488. }
  489. }
  490. void iscsit_add_cmd_to_response_queue(
  491. struct iscsi_cmd *cmd,
  492. struct iscsi_conn *conn,
  493. u8 state)
  494. {
  495. struct iscsi_queue_req *qr;
  496. qr = kmem_cache_zalloc(lio_qr_cache, GFP_ATOMIC);
  497. if (!qr) {
  498. pr_err("Unable to allocate memory for"
  499. " struct iscsi_queue_req\n");
  500. return;
  501. }
  502. INIT_LIST_HEAD(&qr->qr_list);
  503. qr->cmd = cmd;
  504. qr->state = state;
  505. spin_lock_bh(&conn->response_queue_lock);
  506. list_add_tail(&qr->qr_list, &conn->response_queue_list);
  507. atomic_inc(&cmd->response_queue_count);
  508. spin_unlock_bh(&conn->response_queue_lock);
  509. wake_up(&conn->queues_wq);
  510. }
  511. struct iscsi_queue_req *iscsit_get_cmd_from_response_queue(struct iscsi_conn *conn)
  512. {
  513. struct iscsi_queue_req *qr;
  514. spin_lock_bh(&conn->response_queue_lock);
  515. if (list_empty(&conn->response_queue_list)) {
  516. spin_unlock_bh(&conn->response_queue_lock);
  517. return NULL;
  518. }
  519. qr = list_first_entry(&conn->response_queue_list,
  520. struct iscsi_queue_req, qr_list);
  521. list_del(&qr->qr_list);
  522. if (qr->cmd)
  523. atomic_dec(&qr->cmd->response_queue_count);
  524. spin_unlock_bh(&conn->response_queue_lock);
  525. return qr;
  526. }
  527. static void iscsit_remove_cmd_from_response_queue(
  528. struct iscsi_cmd *cmd,
  529. struct iscsi_conn *conn)
  530. {
  531. struct iscsi_queue_req *qr, *qr_tmp;
  532. spin_lock_bh(&conn->response_queue_lock);
  533. if (!atomic_read(&cmd->response_queue_count)) {
  534. spin_unlock_bh(&conn->response_queue_lock);
  535. return;
  536. }
  537. list_for_each_entry_safe(qr, qr_tmp, &conn->response_queue_list,
  538. qr_list) {
  539. if (qr->cmd != cmd)
  540. continue;
  541. atomic_dec(&qr->cmd->response_queue_count);
  542. list_del(&qr->qr_list);
  543. kmem_cache_free(lio_qr_cache, qr);
  544. }
  545. spin_unlock_bh(&conn->response_queue_lock);
  546. if (atomic_read(&cmd->response_queue_count)) {
  547. pr_err("ITT: 0x%08x response_queue_count: %d\n",
  548. cmd->init_task_tag,
  549. atomic_read(&cmd->response_queue_count));
  550. }
  551. }
  552. bool iscsit_conn_all_queues_empty(struct iscsi_conn *conn)
  553. {
  554. bool empty;
  555. spin_lock_bh(&conn->immed_queue_lock);
  556. empty = list_empty(&conn->immed_queue_list);
  557. spin_unlock_bh(&conn->immed_queue_lock);
  558. if (!empty)
  559. return empty;
  560. spin_lock_bh(&conn->response_queue_lock);
  561. empty = list_empty(&conn->response_queue_list);
  562. spin_unlock_bh(&conn->response_queue_lock);
  563. return empty;
  564. }
  565. void iscsit_free_queue_reqs_for_conn(struct iscsi_conn *conn)
  566. {
  567. struct iscsi_queue_req *qr, *qr_tmp;
  568. spin_lock_bh(&conn->immed_queue_lock);
  569. list_for_each_entry_safe(qr, qr_tmp, &conn->immed_queue_list, qr_list) {
  570. list_del(&qr->qr_list);
  571. if (qr->cmd)
  572. atomic_dec(&qr->cmd->immed_queue_count);
  573. kmem_cache_free(lio_qr_cache, qr);
  574. }
  575. spin_unlock_bh(&conn->immed_queue_lock);
  576. spin_lock_bh(&conn->response_queue_lock);
  577. list_for_each_entry_safe(qr, qr_tmp, &conn->response_queue_list,
  578. qr_list) {
  579. list_del(&qr->qr_list);
  580. if (qr->cmd)
  581. atomic_dec(&qr->cmd->response_queue_count);
  582. kmem_cache_free(lio_qr_cache, qr);
  583. }
  584. spin_unlock_bh(&conn->response_queue_lock);
  585. }
  586. void iscsit_release_cmd(struct iscsi_cmd *cmd)
  587. {
  588. struct iscsi_session *sess;
  589. struct se_cmd *se_cmd = &cmd->se_cmd;
  590. if (cmd->conn)
  591. sess = cmd->conn->sess;
  592. else
  593. sess = cmd->sess;
  594. BUG_ON(!sess || !sess->se_sess);
  595. kfree(cmd->buf_ptr);
  596. kfree(cmd->pdu_list);
  597. kfree(cmd->seq_list);
  598. kfree(cmd->tmr_req);
  599. kfree(cmd->iov_data);
  600. kfree(cmd->text_in_ptr);
  601. percpu_ida_free(&sess->se_sess->sess_tag_pool, se_cmd->map_tag);
  602. }
  603. EXPORT_SYMBOL(iscsit_release_cmd);
  604. void __iscsit_free_cmd(struct iscsi_cmd *cmd, bool scsi_cmd,
  605. bool check_queues)
  606. {
  607. struct iscsi_conn *conn = cmd->conn;
  608. if (scsi_cmd) {
  609. if (cmd->data_direction == DMA_TO_DEVICE) {
  610. iscsit_stop_dataout_timer(cmd);
  611. iscsit_free_r2ts_from_list(cmd);
  612. }
  613. if (cmd->data_direction == DMA_FROM_DEVICE)
  614. iscsit_free_all_datain_reqs(cmd);
  615. }
  616. if (conn && check_queues) {
  617. iscsit_remove_cmd_from_immediate_queue(cmd, conn);
  618. iscsit_remove_cmd_from_response_queue(cmd, conn);
  619. }
  620. }
  621. void iscsit_free_cmd(struct iscsi_cmd *cmd, bool shutdown)
  622. {
  623. struct se_cmd *se_cmd = NULL;
  624. int rc;
  625. bool op_scsi = false;
  626. /*
  627. * Determine if a struct se_cmd is associated with
  628. * this struct iscsi_cmd.
  629. */
  630. switch (cmd->iscsi_opcode) {
  631. case ISCSI_OP_SCSI_CMD:
  632. op_scsi = true;
  633. /*
  634. * Fallthrough
  635. */
  636. case ISCSI_OP_SCSI_TMFUNC:
  637. se_cmd = &cmd->se_cmd;
  638. __iscsit_free_cmd(cmd, op_scsi, shutdown);
  639. rc = transport_generic_free_cmd(se_cmd, shutdown);
  640. if (!rc && shutdown && se_cmd->se_sess) {
  641. __iscsit_free_cmd(cmd, op_scsi, shutdown);
  642. target_put_sess_cmd(se_cmd);
  643. }
  644. break;
  645. case ISCSI_OP_REJECT:
  646. /*
  647. * Handle special case for REJECT when iscsi_add_reject*() has
  648. * overwritten the original iscsi_opcode assignment, and the
  649. * associated cmd->se_cmd needs to be released.
  650. */
  651. if (cmd->se_cmd.se_tfo != NULL) {
  652. se_cmd = &cmd->se_cmd;
  653. __iscsit_free_cmd(cmd, true, shutdown);
  654. rc = transport_generic_free_cmd(&cmd->se_cmd, shutdown);
  655. if (!rc && shutdown && se_cmd->se_sess) {
  656. __iscsit_free_cmd(cmd, true, shutdown);
  657. target_put_sess_cmd(se_cmd);
  658. }
  659. break;
  660. }
  661. /* Fall-through */
  662. default:
  663. __iscsit_free_cmd(cmd, false, shutdown);
  664. iscsit_release_cmd(cmd);
  665. break;
  666. }
  667. }
  668. int iscsit_check_session_usage_count(struct iscsi_session *sess)
  669. {
  670. spin_lock_bh(&sess->session_usage_lock);
  671. if (sess->session_usage_count != 0) {
  672. sess->session_waiting_on_uc = 1;
  673. spin_unlock_bh(&sess->session_usage_lock);
  674. if (in_interrupt())
  675. return 2;
  676. wait_for_completion(&sess->session_waiting_on_uc_comp);
  677. return 1;
  678. }
  679. spin_unlock_bh(&sess->session_usage_lock);
  680. return 0;
  681. }
  682. void iscsit_dec_session_usage_count(struct iscsi_session *sess)
  683. {
  684. spin_lock_bh(&sess->session_usage_lock);
  685. sess->session_usage_count--;
  686. if (!sess->session_usage_count && sess->session_waiting_on_uc)
  687. complete(&sess->session_waiting_on_uc_comp);
  688. spin_unlock_bh(&sess->session_usage_lock);
  689. }
  690. void iscsit_inc_session_usage_count(struct iscsi_session *sess)
  691. {
  692. spin_lock_bh(&sess->session_usage_lock);
  693. sess->session_usage_count++;
  694. spin_unlock_bh(&sess->session_usage_lock);
  695. }
  696. struct iscsi_conn *iscsit_get_conn_from_cid(struct iscsi_session *sess, u16 cid)
  697. {
  698. struct iscsi_conn *conn;
  699. spin_lock_bh(&sess->conn_lock);
  700. list_for_each_entry(conn, &sess->sess_conn_list, conn_list) {
  701. if ((conn->cid == cid) &&
  702. (conn->conn_state == TARG_CONN_STATE_LOGGED_IN)) {
  703. iscsit_inc_conn_usage_count(conn);
  704. spin_unlock_bh(&sess->conn_lock);
  705. return conn;
  706. }
  707. }
  708. spin_unlock_bh(&sess->conn_lock);
  709. return NULL;
  710. }
  711. struct iscsi_conn *iscsit_get_conn_from_cid_rcfr(struct iscsi_session *sess, u16 cid)
  712. {
  713. struct iscsi_conn *conn;
  714. spin_lock_bh(&sess->conn_lock);
  715. list_for_each_entry(conn, &sess->sess_conn_list, conn_list) {
  716. if (conn->cid == cid) {
  717. iscsit_inc_conn_usage_count(conn);
  718. spin_lock(&conn->state_lock);
  719. atomic_set(&conn->connection_wait_rcfr, 1);
  720. spin_unlock(&conn->state_lock);
  721. spin_unlock_bh(&sess->conn_lock);
  722. return conn;
  723. }
  724. }
  725. spin_unlock_bh(&sess->conn_lock);
  726. return NULL;
  727. }
  728. void iscsit_check_conn_usage_count(struct iscsi_conn *conn)
  729. {
  730. spin_lock_bh(&conn->conn_usage_lock);
  731. if (conn->conn_usage_count != 0) {
  732. conn->conn_waiting_on_uc = 1;
  733. spin_unlock_bh(&conn->conn_usage_lock);
  734. wait_for_completion(&conn->conn_waiting_on_uc_comp);
  735. return;
  736. }
  737. spin_unlock_bh(&conn->conn_usage_lock);
  738. }
  739. void iscsit_dec_conn_usage_count(struct iscsi_conn *conn)
  740. {
  741. spin_lock_bh(&conn->conn_usage_lock);
  742. conn->conn_usage_count--;
  743. if (!conn->conn_usage_count && conn->conn_waiting_on_uc)
  744. complete(&conn->conn_waiting_on_uc_comp);
  745. spin_unlock_bh(&conn->conn_usage_lock);
  746. }
  747. void iscsit_inc_conn_usage_count(struct iscsi_conn *conn)
  748. {
  749. spin_lock_bh(&conn->conn_usage_lock);
  750. conn->conn_usage_count++;
  751. spin_unlock_bh(&conn->conn_usage_lock);
  752. }
  753. static int iscsit_add_nopin(struct iscsi_conn *conn, int want_response)
  754. {
  755. u8 state;
  756. struct iscsi_cmd *cmd;
  757. cmd = iscsit_allocate_cmd(conn, TASK_RUNNING);
  758. if (!cmd)
  759. return -1;
  760. cmd->iscsi_opcode = ISCSI_OP_NOOP_IN;
  761. state = (want_response) ? ISTATE_SEND_NOPIN_WANT_RESPONSE :
  762. ISTATE_SEND_NOPIN_NO_RESPONSE;
  763. cmd->init_task_tag = RESERVED_ITT;
  764. cmd->targ_xfer_tag = (want_response) ?
  765. session_get_next_ttt(conn->sess) : 0xFFFFFFFF;
  766. spin_lock_bh(&conn->cmd_lock);
  767. list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list);
  768. spin_unlock_bh(&conn->cmd_lock);
  769. if (want_response)
  770. iscsit_start_nopin_response_timer(conn);
  771. iscsit_add_cmd_to_immediate_queue(cmd, conn, state);
  772. return 0;
  773. }
  774. static void iscsit_handle_nopin_response_timeout(unsigned long data)
  775. {
  776. struct iscsi_conn *conn = (struct iscsi_conn *) data;
  777. iscsit_inc_conn_usage_count(conn);
  778. spin_lock_bh(&conn->nopin_timer_lock);
  779. if (conn->nopin_response_timer_flags & ISCSI_TF_STOP) {
  780. spin_unlock_bh(&conn->nopin_timer_lock);
  781. iscsit_dec_conn_usage_count(conn);
  782. return;
  783. }
  784. pr_debug("Did not receive response to NOPIN on CID: %hu on"
  785. " SID: %u, failing connection.\n", conn->cid,
  786. conn->sess->sid);
  787. conn->nopin_response_timer_flags &= ~ISCSI_TF_RUNNING;
  788. spin_unlock_bh(&conn->nopin_timer_lock);
  789. {
  790. struct iscsi_portal_group *tpg = conn->sess->tpg;
  791. struct iscsi_tiqn *tiqn = tpg->tpg_tiqn;
  792. if (tiqn) {
  793. spin_lock_bh(&tiqn->sess_err_stats.lock);
  794. strcpy(tiqn->sess_err_stats.last_sess_fail_rem_name,
  795. conn->sess->sess_ops->InitiatorName);
  796. tiqn->sess_err_stats.last_sess_failure_type =
  797. ISCSI_SESS_ERR_CXN_TIMEOUT;
  798. tiqn->sess_err_stats.cxn_timeout_errors++;
  799. atomic_long_inc(&conn->sess->conn_timeout_errors);
  800. spin_unlock_bh(&tiqn->sess_err_stats.lock);
  801. }
  802. }
  803. iscsit_cause_connection_reinstatement(conn, 0);
  804. iscsit_dec_conn_usage_count(conn);
  805. }
  806. void iscsit_mod_nopin_response_timer(struct iscsi_conn *conn)
  807. {
  808. struct iscsi_session *sess = conn->sess;
  809. struct iscsi_node_attrib *na = iscsit_tpg_get_node_attrib(sess);
  810. spin_lock_bh(&conn->nopin_timer_lock);
  811. if (!(conn->nopin_response_timer_flags & ISCSI_TF_RUNNING)) {
  812. spin_unlock_bh(&conn->nopin_timer_lock);
  813. return;
  814. }
  815. mod_timer(&conn->nopin_response_timer,
  816. (get_jiffies_64() + na->nopin_response_timeout * HZ));
  817. spin_unlock_bh(&conn->nopin_timer_lock);
  818. }
  819. /*
  820. * Called with conn->nopin_timer_lock held.
  821. */
  822. void iscsit_start_nopin_response_timer(struct iscsi_conn *conn)
  823. {
  824. struct iscsi_session *sess = conn->sess;
  825. struct iscsi_node_attrib *na = iscsit_tpg_get_node_attrib(sess);
  826. spin_lock_bh(&conn->nopin_timer_lock);
  827. if (conn->nopin_response_timer_flags & ISCSI_TF_RUNNING) {
  828. spin_unlock_bh(&conn->nopin_timer_lock);
  829. return;
  830. }
  831. init_timer(&conn->nopin_response_timer);
  832. conn->nopin_response_timer.expires =
  833. (get_jiffies_64() + na->nopin_response_timeout * HZ);
  834. conn->nopin_response_timer.data = (unsigned long)conn;
  835. conn->nopin_response_timer.function = iscsit_handle_nopin_response_timeout;
  836. conn->nopin_response_timer_flags &= ~ISCSI_TF_STOP;
  837. conn->nopin_response_timer_flags |= ISCSI_TF_RUNNING;
  838. add_timer(&conn->nopin_response_timer);
  839. pr_debug("Started NOPIN Response Timer on CID: %d to %u"
  840. " seconds\n", conn->cid, na->nopin_response_timeout);
  841. spin_unlock_bh(&conn->nopin_timer_lock);
  842. }
  843. void iscsit_stop_nopin_response_timer(struct iscsi_conn *conn)
  844. {
  845. spin_lock_bh(&conn->nopin_timer_lock);
  846. if (!(conn->nopin_response_timer_flags & ISCSI_TF_RUNNING)) {
  847. spin_unlock_bh(&conn->nopin_timer_lock);
  848. return;
  849. }
  850. conn->nopin_response_timer_flags |= ISCSI_TF_STOP;
  851. spin_unlock_bh(&conn->nopin_timer_lock);
  852. del_timer_sync(&conn->nopin_response_timer);
  853. spin_lock_bh(&conn->nopin_timer_lock);
  854. conn->nopin_response_timer_flags &= ~ISCSI_TF_RUNNING;
  855. spin_unlock_bh(&conn->nopin_timer_lock);
  856. }
  857. static void iscsit_handle_nopin_timeout(unsigned long data)
  858. {
  859. struct iscsi_conn *conn = (struct iscsi_conn *) data;
  860. iscsit_inc_conn_usage_count(conn);
  861. spin_lock_bh(&conn->nopin_timer_lock);
  862. if (conn->nopin_timer_flags & ISCSI_TF_STOP) {
  863. spin_unlock_bh(&conn->nopin_timer_lock);
  864. iscsit_dec_conn_usage_count(conn);
  865. return;
  866. }
  867. conn->nopin_timer_flags &= ~ISCSI_TF_RUNNING;
  868. spin_unlock_bh(&conn->nopin_timer_lock);
  869. iscsit_add_nopin(conn, 1);
  870. iscsit_dec_conn_usage_count(conn);
  871. }
  872. /*
  873. * Called with conn->nopin_timer_lock held.
  874. */
  875. void __iscsit_start_nopin_timer(struct iscsi_conn *conn)
  876. {
  877. struct iscsi_session *sess = conn->sess;
  878. struct iscsi_node_attrib *na = iscsit_tpg_get_node_attrib(sess);
  879. /*
  880. * NOPIN timeout is disabled.
  881. */
  882. if (!na->nopin_timeout)
  883. return;
  884. if (conn->nopin_timer_flags & ISCSI_TF_RUNNING)
  885. return;
  886. init_timer(&conn->nopin_timer);
  887. conn->nopin_timer.expires = (get_jiffies_64() + na->nopin_timeout * HZ);
  888. conn->nopin_timer.data = (unsigned long)conn;
  889. conn->nopin_timer.function = iscsit_handle_nopin_timeout;
  890. conn->nopin_timer_flags &= ~ISCSI_TF_STOP;
  891. conn->nopin_timer_flags |= ISCSI_TF_RUNNING;
  892. add_timer(&conn->nopin_timer);
  893. pr_debug("Started NOPIN Timer on CID: %d at %u second"
  894. " interval\n", conn->cid, na->nopin_timeout);
  895. }
  896. void iscsit_start_nopin_timer(struct iscsi_conn *conn)
  897. {
  898. struct iscsi_session *sess = conn->sess;
  899. struct iscsi_node_attrib *na = iscsit_tpg_get_node_attrib(sess);
  900. /*
  901. * NOPIN timeout is disabled..
  902. */
  903. if (!na->nopin_timeout)
  904. return;
  905. spin_lock_bh(&conn->nopin_timer_lock);
  906. if (conn->nopin_timer_flags & ISCSI_TF_RUNNING) {
  907. spin_unlock_bh(&conn->nopin_timer_lock);
  908. return;
  909. }
  910. init_timer(&conn->nopin_timer);
  911. conn->nopin_timer.expires = (get_jiffies_64() + na->nopin_timeout * HZ);
  912. conn->nopin_timer.data = (unsigned long)conn;
  913. conn->nopin_timer.function = iscsit_handle_nopin_timeout;
  914. conn->nopin_timer_flags &= ~ISCSI_TF_STOP;
  915. conn->nopin_timer_flags |= ISCSI_TF_RUNNING;
  916. add_timer(&conn->nopin_timer);
  917. pr_debug("Started NOPIN Timer on CID: %d at %u second"
  918. " interval\n", conn->cid, na->nopin_timeout);
  919. spin_unlock_bh(&conn->nopin_timer_lock);
  920. }
  921. void iscsit_stop_nopin_timer(struct iscsi_conn *conn)
  922. {
  923. spin_lock_bh(&conn->nopin_timer_lock);
  924. if (!(conn->nopin_timer_flags & ISCSI_TF_RUNNING)) {
  925. spin_unlock_bh(&conn->nopin_timer_lock);
  926. return;
  927. }
  928. conn->nopin_timer_flags |= ISCSI_TF_STOP;
  929. spin_unlock_bh(&conn->nopin_timer_lock);
  930. del_timer_sync(&conn->nopin_timer);
  931. spin_lock_bh(&conn->nopin_timer_lock);
  932. conn->nopin_timer_flags &= ~ISCSI_TF_RUNNING;
  933. spin_unlock_bh(&conn->nopin_timer_lock);
  934. }
  935. int iscsit_send_tx_data(
  936. struct iscsi_cmd *cmd,
  937. struct iscsi_conn *conn,
  938. int use_misc)
  939. {
  940. int tx_sent, tx_size;
  941. u32 iov_count;
  942. struct kvec *iov;
  943. send_data:
  944. tx_size = cmd->tx_size;
  945. if (!use_misc) {
  946. iov = &cmd->iov_data[0];
  947. iov_count = cmd->iov_data_count;
  948. } else {
  949. iov = &cmd->iov_misc[0];
  950. iov_count = cmd->iov_misc_count;
  951. }
  952. tx_sent = tx_data(conn, &iov[0], iov_count, tx_size);
  953. if (tx_size != tx_sent) {
  954. if (tx_sent == -EAGAIN) {
  955. pr_err("tx_data() returned -EAGAIN\n");
  956. goto send_data;
  957. } else
  958. return -1;
  959. }
  960. cmd->tx_size = 0;
  961. return 0;
  962. }
  963. int iscsit_fe_sendpage_sg(
  964. struct iscsi_cmd *cmd,
  965. struct iscsi_conn *conn)
  966. {
  967. struct scatterlist *sg = cmd->first_data_sg;
  968. struct kvec iov;
  969. u32 tx_hdr_size, data_len;
  970. u32 offset = cmd->first_data_sg_off;
  971. int tx_sent, iov_off;
  972. send_hdr:
  973. tx_hdr_size = ISCSI_HDR_LEN;
  974. if (conn->conn_ops->HeaderDigest)
  975. tx_hdr_size += ISCSI_CRC_LEN;
  976. iov.iov_base = cmd->pdu;
  977. iov.iov_len = tx_hdr_size;
  978. tx_sent = tx_data(conn, &iov, 1, tx_hdr_size);
  979. if (tx_hdr_size != tx_sent) {
  980. if (tx_sent == -EAGAIN) {
  981. pr_err("tx_data() returned -EAGAIN\n");
  982. goto send_hdr;
  983. }
  984. return -1;
  985. }
  986. data_len = cmd->tx_size - tx_hdr_size - cmd->padding;
  987. /*
  988. * Set iov_off used by padding and data digest tx_data() calls below
  989. * in order to determine proper offset into cmd->iov_data[]
  990. */
  991. if (conn->conn_ops->DataDigest) {
  992. data_len -= ISCSI_CRC_LEN;
  993. if (cmd->padding)
  994. iov_off = (cmd->iov_data_count - 2);
  995. else
  996. iov_off = (cmd->iov_data_count - 1);
  997. } else {
  998. iov_off = (cmd->iov_data_count - 1);
  999. }
  1000. /*
  1001. * Perform sendpage() for each page in the scatterlist
  1002. */
  1003. while (data_len) {
  1004. u32 space = (sg->length - offset);
  1005. u32 sub_len = min_t(u32, data_len, space);
  1006. send_pg:
  1007. tx_sent = conn->sock->ops->sendpage(conn->sock,
  1008. sg_page(sg), sg->offset + offset, sub_len, 0);
  1009. if (tx_sent != sub_len) {
  1010. if (tx_sent == -EAGAIN) {
  1011. pr_err("tcp_sendpage() returned"
  1012. " -EAGAIN\n");
  1013. goto send_pg;
  1014. }
  1015. pr_err("tcp_sendpage() failure: %d\n",
  1016. tx_sent);
  1017. return -1;
  1018. }
  1019. data_len -= sub_len;
  1020. offset = 0;
  1021. sg = sg_next(sg);
  1022. }
  1023. send_padding:
  1024. if (cmd->padding) {
  1025. struct kvec *iov_p = &cmd->iov_data[iov_off++];
  1026. tx_sent = tx_data(conn, iov_p, 1, cmd->padding);
  1027. if (cmd->padding != tx_sent) {
  1028. if (tx_sent == -EAGAIN) {
  1029. pr_err("tx_data() returned -EAGAIN\n");
  1030. goto send_padding;
  1031. }
  1032. return -1;
  1033. }
  1034. }
  1035. send_datacrc:
  1036. if (conn->conn_ops->DataDigest) {
  1037. struct kvec *iov_d = &cmd->iov_data[iov_off];
  1038. tx_sent = tx_data(conn, iov_d, 1, ISCSI_CRC_LEN);
  1039. if (ISCSI_CRC_LEN != tx_sent) {
  1040. if (tx_sent == -EAGAIN) {
  1041. pr_err("tx_data() returned -EAGAIN\n");
  1042. goto send_datacrc;
  1043. }
  1044. return -1;
  1045. }
  1046. }
  1047. return 0;
  1048. }
  1049. /*
  1050. * This function is used for mainly sending a ISCSI_TARG_LOGIN_RSP PDU
  1051. * back to the Initiator when an expection condition occurs with the
  1052. * errors set in status_class and status_detail.
  1053. *
  1054. * Parameters: iSCSI Connection, Status Class, Status Detail.
  1055. * Returns: 0 on success, -1 on error.
  1056. */
  1057. int iscsit_tx_login_rsp(struct iscsi_conn *conn, u8 status_class, u8 status_detail)
  1058. {
  1059. struct iscsi_login_rsp *hdr;
  1060. struct iscsi_login *login = conn->conn_login;
  1061. login->login_failed = 1;
  1062. iscsit_collect_login_stats(conn, status_class, status_detail);
  1063. memset(&login->rsp[0], 0, ISCSI_HDR_LEN);
  1064. hdr = (struct iscsi_login_rsp *)&login->rsp[0];
  1065. hdr->opcode = ISCSI_OP_LOGIN_RSP;
  1066. hdr->status_class = status_class;
  1067. hdr->status_detail = status_detail;
  1068. hdr->itt = conn->login_itt;
  1069. return conn->conn_transport->iscsit_put_login_tx(conn, login, 0);
  1070. }
  1071. void iscsit_print_session_params(struct iscsi_session *sess)
  1072. {
  1073. struct iscsi_conn *conn;
  1074. pr_debug("-----------------------------[Session Params for"
  1075. " SID: %u]-----------------------------\n", sess->sid);
  1076. spin_lock_bh(&sess->conn_lock);
  1077. list_for_each_entry(conn, &sess->sess_conn_list, conn_list)
  1078. iscsi_dump_conn_ops(conn->conn_ops);
  1079. spin_unlock_bh(&sess->conn_lock);
  1080. iscsi_dump_sess_ops(sess->sess_ops);
  1081. }
  1082. static int iscsit_do_rx_data(
  1083. struct iscsi_conn *conn,
  1084. struct iscsi_data_count *count)
  1085. {
  1086. int data = count->data_length, rx_loop = 0, total_rx = 0;
  1087. struct msghdr msg;
  1088. if (!conn || !conn->sock || !conn->conn_ops)
  1089. return -1;
  1090. memset(&msg, 0, sizeof(struct msghdr));
  1091. iov_iter_kvec(&msg.msg_iter, READ | ITER_KVEC,
  1092. count->iov, count->iov_count, data);
  1093. while (total_rx < data) {
  1094. rx_loop = sock_recvmsg(conn->sock, &msg,
  1095. (data - total_rx), MSG_WAITALL);
  1096. if (rx_loop <= 0) {
  1097. pr_debug("rx_loop: %d total_rx: %d\n",
  1098. rx_loop, total_rx);
  1099. return rx_loop;
  1100. }
  1101. total_rx += rx_loop;
  1102. pr_debug("rx_loop: %d, total_rx: %d, data: %d\n",
  1103. rx_loop, total_rx, data);
  1104. }
  1105. return total_rx;
  1106. }
  1107. static int iscsit_do_tx_data(
  1108. struct iscsi_conn *conn,
  1109. struct iscsi_data_count *count)
  1110. {
  1111. int ret, iov_len;
  1112. struct kvec *iov_p;
  1113. struct msghdr msg;
  1114. if (!conn || !conn->sock || !conn->conn_ops)
  1115. return -1;
  1116. if (count->data_length <= 0) {
  1117. pr_err("Data length is: %d\n", count->data_length);
  1118. return -1;
  1119. }
  1120. memset(&msg, 0, sizeof(struct msghdr));
  1121. iov_p = count->iov;
  1122. iov_len = count->iov_count;
  1123. ret = kernel_sendmsg(conn->sock, &msg, iov_p, iov_len,
  1124. count->data_length);
  1125. if (ret != count->data_length) {
  1126. pr_err("Unexpected ret: %d send data %d\n",
  1127. ret, count->data_length);
  1128. return -EPIPE;
  1129. }
  1130. pr_debug("ret: %d, sent data: %d\n", ret, count->data_length);
  1131. return ret;
  1132. }
  1133. int rx_data(
  1134. struct iscsi_conn *conn,
  1135. struct kvec *iov,
  1136. int iov_count,
  1137. int data)
  1138. {
  1139. struct iscsi_data_count c;
  1140. if (!conn || !conn->sock || !conn->conn_ops)
  1141. return -1;
  1142. memset(&c, 0, sizeof(struct iscsi_data_count));
  1143. c.iov = iov;
  1144. c.iov_count = iov_count;
  1145. c.data_length = data;
  1146. c.type = ISCSI_RX_DATA;
  1147. return iscsit_do_rx_data(conn, &c);
  1148. }
  1149. int tx_data(
  1150. struct iscsi_conn *conn,
  1151. struct kvec *iov,
  1152. int iov_count,
  1153. int data)
  1154. {
  1155. struct iscsi_data_count c;
  1156. if (!conn || !conn->sock || !conn->conn_ops)
  1157. return -1;
  1158. memset(&c, 0, sizeof(struct iscsi_data_count));
  1159. c.iov = iov;
  1160. c.iov_count = iov_count;
  1161. c.data_length = data;
  1162. c.type = ISCSI_TX_DATA;
  1163. return iscsit_do_tx_data(conn, &c);
  1164. }
  1165. static bool sockaddr_equal(struct sockaddr_storage *x, struct sockaddr_storage *y)
  1166. {
  1167. switch (x->ss_family) {
  1168. case AF_INET: {
  1169. struct sockaddr_in *sinx = (struct sockaddr_in *)x;
  1170. struct sockaddr_in *siny = (struct sockaddr_in *)y;
  1171. if (sinx->sin_addr.s_addr != siny->sin_addr.s_addr)
  1172. return false;
  1173. if (sinx->sin_port != siny->sin_port)
  1174. return false;
  1175. break;
  1176. }
  1177. case AF_INET6: {
  1178. struct sockaddr_in6 *sinx = (struct sockaddr_in6 *)x;
  1179. struct sockaddr_in6 *siny = (struct sockaddr_in6 *)y;
  1180. if (!ipv6_addr_equal(&sinx->sin6_addr, &siny->sin6_addr))
  1181. return false;
  1182. if (sinx->sin6_port != siny->sin6_port)
  1183. return false;
  1184. break;
  1185. }
  1186. default:
  1187. return false;
  1188. }
  1189. return true;
  1190. }
  1191. void iscsit_collect_login_stats(
  1192. struct iscsi_conn *conn,
  1193. u8 status_class,
  1194. u8 status_detail)
  1195. {
  1196. struct iscsi_param *intrname = NULL;
  1197. struct iscsi_tiqn *tiqn;
  1198. struct iscsi_login_stats *ls;
  1199. tiqn = iscsit_snmp_get_tiqn(conn);
  1200. if (!tiqn)
  1201. return;
  1202. ls = &tiqn->login_stats;
  1203. spin_lock(&ls->lock);
  1204. if (sockaddr_equal(&conn->login_sockaddr, &ls->last_intr_fail_sockaddr) &&
  1205. ((get_jiffies_64() - ls->last_fail_time) < 10)) {
  1206. /* We already have the failure info for this login */
  1207. spin_unlock(&ls->lock);
  1208. return;
  1209. }
  1210. if (status_class == ISCSI_STATUS_CLS_SUCCESS)
  1211. ls->accepts++;
  1212. else if (status_class == ISCSI_STATUS_CLS_REDIRECT) {
  1213. ls->redirects++;
  1214. ls->last_fail_type = ISCSI_LOGIN_FAIL_REDIRECT;
  1215. } else if ((status_class == ISCSI_STATUS_CLS_INITIATOR_ERR) &&
  1216. (status_detail == ISCSI_LOGIN_STATUS_AUTH_FAILED)) {
  1217. ls->authenticate_fails++;
  1218. ls->last_fail_type = ISCSI_LOGIN_FAIL_AUTHENTICATE;
  1219. } else if ((status_class == ISCSI_STATUS_CLS_INITIATOR_ERR) &&
  1220. (status_detail == ISCSI_LOGIN_STATUS_TGT_FORBIDDEN)) {
  1221. ls->authorize_fails++;
  1222. ls->last_fail_type = ISCSI_LOGIN_FAIL_AUTHORIZE;
  1223. } else if ((status_class == ISCSI_STATUS_CLS_INITIATOR_ERR) &&
  1224. (status_detail == ISCSI_LOGIN_STATUS_INIT_ERR)) {
  1225. ls->negotiate_fails++;
  1226. ls->last_fail_type = ISCSI_LOGIN_FAIL_NEGOTIATE;
  1227. } else {
  1228. ls->other_fails++;
  1229. ls->last_fail_type = ISCSI_LOGIN_FAIL_OTHER;
  1230. }
  1231. /* Save initiator name, ip address and time, if it is a failed login */
  1232. if (status_class != ISCSI_STATUS_CLS_SUCCESS) {
  1233. if (conn->param_list)
  1234. intrname = iscsi_find_param_from_key(INITIATORNAME,
  1235. conn->param_list);
  1236. strlcpy(ls->last_intr_fail_name,
  1237. (intrname ? intrname->value : "Unknown"),
  1238. sizeof(ls->last_intr_fail_name));
  1239. ls->last_intr_fail_ip_family = conn->login_family;
  1240. ls->last_intr_fail_sockaddr = conn->login_sockaddr;
  1241. ls->last_fail_time = get_jiffies_64();
  1242. }
  1243. spin_unlock(&ls->lock);
  1244. }
  1245. struct iscsi_tiqn *iscsit_snmp_get_tiqn(struct iscsi_conn *conn)
  1246. {
  1247. struct iscsi_portal_group *tpg;
  1248. if (!conn || !conn->sess)
  1249. return NULL;
  1250. tpg = conn->sess->tpg;
  1251. if (!tpg)
  1252. return NULL;
  1253. if (!tpg->tpg_tiqn)
  1254. return NULL;
  1255. return tpg->tpg_tiqn;
  1256. }