gdm_qos.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438
  1. /*
  2. * Copyright (c) 2012 GCT Semiconductor, Inc. All rights reserved.
  3. *
  4. * This software is licensed under the terms of the GNU General Public
  5. * License version 2, as published by the Free Software Foundation, and
  6. * may be copied, distributed, and modified under those terms.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  11. * GNU General Public License for more details.
  12. */
  13. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  14. #include <linux/etherdevice.h>
  15. #include <asm/byteorder.h>
  16. #include <linux/ip.h>
  17. #include <linux/tcp.h>
  18. #include <linux/if_ether.h>
  19. #include "gdm_wimax.h"
  20. #include "hci.h"
  21. #include "gdm_qos.h"
  22. #define MAX_FREE_LIST_CNT 32
  23. static struct {
  24. struct list_head head;
  25. int cnt;
  26. spinlock_t lock;
  27. } qos_free_list;
  28. static void init_qos_entry_list(void)
  29. {
  30. qos_free_list.cnt = 0;
  31. INIT_LIST_HEAD(&qos_free_list.head);
  32. spin_lock_init(&qos_free_list.lock);
  33. }
  34. static void *alloc_qos_entry(void)
  35. {
  36. struct qos_entry_s *entry;
  37. unsigned long flags;
  38. spin_lock_irqsave(&qos_free_list.lock, flags);
  39. if (qos_free_list.cnt) {
  40. entry = list_entry(qos_free_list.head.prev, struct qos_entry_s,
  41. list);
  42. list_del(&entry->list);
  43. qos_free_list.cnt--;
  44. spin_unlock_irqrestore(&qos_free_list.lock, flags);
  45. return entry;
  46. }
  47. spin_unlock_irqrestore(&qos_free_list.lock, flags);
  48. return kmalloc(sizeof(*entry), GFP_ATOMIC);
  49. }
  50. static void free_qos_entry(void *entry)
  51. {
  52. struct qos_entry_s *qentry = entry;
  53. unsigned long flags;
  54. spin_lock_irqsave(&qos_free_list.lock, flags);
  55. if (qos_free_list.cnt < MAX_FREE_LIST_CNT) {
  56. list_add(&qentry->list, &qos_free_list.head);
  57. qos_free_list.cnt++;
  58. spin_unlock_irqrestore(&qos_free_list.lock, flags);
  59. return;
  60. }
  61. spin_unlock_irqrestore(&qos_free_list.lock, flags);
  62. kfree(entry);
  63. }
  64. static void free_qos_entry_list(struct list_head *free_list)
  65. {
  66. struct qos_entry_s *entry, *n;
  67. int total_free = 0;
  68. list_for_each_entry_safe(entry, n, free_list, list) {
  69. list_del(&entry->list);
  70. kfree(entry);
  71. total_free++;
  72. }
  73. pr_debug("%s: total_free_cnt=%d\n", __func__, total_free);
  74. }
  75. void gdm_qos_init(void *nic_ptr)
  76. {
  77. struct nic *nic = nic_ptr;
  78. struct qos_cb_s *qcb = &nic->qos;
  79. int i;
  80. for (i = 0; i < QOS_MAX; i++) {
  81. INIT_LIST_HEAD(&qcb->qos_list[i]);
  82. qcb->csr[i].qos_buf_count = 0;
  83. qcb->csr[i].enabled = false;
  84. }
  85. qcb->qos_list_cnt = 0;
  86. qcb->qos_null_idx = QOS_MAX-1;
  87. qcb->qos_limit_size = 255;
  88. spin_lock_init(&qcb->qos_lock);
  89. init_qos_entry_list();
  90. }
  91. void gdm_qos_release_list(void *nic_ptr)
  92. {
  93. struct nic *nic = nic_ptr;
  94. struct qos_cb_s *qcb = &nic->qos;
  95. unsigned long flags;
  96. struct qos_entry_s *entry, *n;
  97. struct list_head free_list;
  98. int i;
  99. INIT_LIST_HEAD(&free_list);
  100. spin_lock_irqsave(&qcb->qos_lock, flags);
  101. for (i = 0; i < QOS_MAX; i++) {
  102. qcb->csr[i].qos_buf_count = 0;
  103. qcb->csr[i].enabled = false;
  104. }
  105. qcb->qos_list_cnt = 0;
  106. qcb->qos_null_idx = QOS_MAX-1;
  107. for (i = 0; i < QOS_MAX; i++) {
  108. list_for_each_entry_safe(entry, n, &qcb->qos_list[i], list) {
  109. list_move_tail(&entry->list, &free_list);
  110. }
  111. }
  112. spin_unlock_irqrestore(&qcb->qos_lock, flags);
  113. free_qos_entry_list(&free_list);
  114. }
  115. static int chk_ipv4_rule(struct gdm_wimax_csr_s *csr, u8 *stream, u8 *port)
  116. {
  117. int i;
  118. if (csr->classifier_rule_en&IPTYPEOFSERVICE) {
  119. if (((stream[1] & csr->ip2s_mask) < csr->ip2s_lo) ||
  120. ((stream[1] & csr->ip2s_mask) > csr->ip2s_hi))
  121. return 1;
  122. }
  123. if (csr->classifier_rule_en&PROTOCOL) {
  124. if (stream[9] != csr->protocol)
  125. return 1;
  126. }
  127. if (csr->classifier_rule_en&IPMASKEDSRCADDRESS) {
  128. for (i = 0; i < 4; i++) {
  129. if ((stream[12 + i] & csr->ipsrc_addrmask[i]) !=
  130. (csr->ipsrc_addr[i] & csr->ipsrc_addrmask[i]))
  131. return 1;
  132. }
  133. }
  134. if (csr->classifier_rule_en&IPMASKEDDSTADDRESS) {
  135. for (i = 0; i < 4; i++) {
  136. if ((stream[16 + i] & csr->ipdst_addrmask[i]) !=
  137. (csr->ipdst_addr[i] & csr->ipdst_addrmask[i]))
  138. return 1;
  139. }
  140. }
  141. if (csr->classifier_rule_en&PROTOCOLSRCPORTRANGE) {
  142. i = ((port[0]<<8)&0xff00)+port[1];
  143. if ((i < csr->srcport_lo) || (i > csr->srcport_hi))
  144. return 1;
  145. }
  146. if (csr->classifier_rule_en&PROTOCOLDSTPORTRANGE) {
  147. i = ((port[2]<<8)&0xff00)+port[3];
  148. if ((i < csr->dstport_lo) || (i > csr->dstport_hi))
  149. return 1;
  150. }
  151. return 0;
  152. }
  153. static int get_qos_index(struct nic *nic, u8 *iph, u8 *tcpudph)
  154. {
  155. int ip_ver, i;
  156. struct qos_cb_s *qcb = &nic->qos;
  157. if (!iph || !tcpudph)
  158. return -1;
  159. ip_ver = (iph[0]>>4)&0xf;
  160. if (ip_ver != 4)
  161. return -1;
  162. for (i = 0; i < QOS_MAX; i++) {
  163. if (!qcb->csr[i].enabled)
  164. continue;
  165. if (!qcb->csr[i].classifier_rule_en)
  166. continue;
  167. if (chk_ipv4_rule(&qcb->csr[i], iph, tcpudph) == 0)
  168. return i;
  169. }
  170. return -1;
  171. }
  172. static void extract_qos_list(struct nic *nic, struct list_head *head)
  173. {
  174. struct qos_cb_s *qcb = &nic->qos;
  175. struct qos_entry_s *entry;
  176. int i;
  177. INIT_LIST_HEAD(head);
  178. for (i = 0; i < QOS_MAX; i++) {
  179. if (!qcb->csr[i].enabled)
  180. continue;
  181. if (qcb->csr[i].qos_buf_count >= qcb->qos_limit_size)
  182. continue;
  183. if (list_empty(&qcb->qos_list[i]))
  184. continue;
  185. entry = list_entry(qcb->qos_list[i].prev, struct qos_entry_s,
  186. list);
  187. list_move_tail(&entry->list, head);
  188. qcb->csr[i].qos_buf_count++;
  189. if (!list_empty(&qcb->qos_list[i]))
  190. netdev_warn(nic->netdev, "Index(%d) is piled!!\n", i);
  191. }
  192. }
  193. static void send_qos_list(struct nic *nic, struct list_head *head)
  194. {
  195. struct qos_entry_s *entry, *n;
  196. list_for_each_entry_safe(entry, n, head, list) {
  197. list_del(&entry->list);
  198. gdm_wimax_send_tx(entry->skb, entry->dev);
  199. free_qos_entry(entry);
  200. }
  201. }
  202. int gdm_qos_send_hci_pkt(struct sk_buff *skb, struct net_device *dev)
  203. {
  204. struct nic *nic = netdev_priv(dev);
  205. int index;
  206. struct qos_cb_s *qcb = &nic->qos;
  207. unsigned long flags;
  208. struct ethhdr *ethh = (struct ethhdr *)(skb->data + HCI_HEADER_SIZE);
  209. struct iphdr *iph = (struct iphdr *)((char *)ethh + ETH_HLEN);
  210. struct tcphdr *tcph;
  211. struct qos_entry_s *entry = NULL;
  212. struct list_head send_list;
  213. int ret = 0;
  214. tcph = (struct tcphdr *)iph + iph->ihl*4;
  215. if (ethh->h_proto == cpu_to_be16(ETH_P_IP)) {
  216. if (qcb->qos_list_cnt && !qos_free_list.cnt) {
  217. entry = alloc_qos_entry();
  218. entry->skb = skb;
  219. entry->dev = dev;
  220. netdev_dbg(dev, "qcb->qos_list_cnt=%d\n",
  221. qcb->qos_list_cnt);
  222. }
  223. spin_lock_irqsave(&qcb->qos_lock, flags);
  224. if (qcb->qos_list_cnt) {
  225. index = get_qos_index(nic, (u8 *)iph, (u8 *)tcph);
  226. if (index == -1)
  227. index = qcb->qos_null_idx;
  228. if (!entry) {
  229. entry = alloc_qos_entry();
  230. entry->skb = skb;
  231. entry->dev = dev;
  232. }
  233. list_add_tail(&entry->list, &qcb->qos_list[index]);
  234. extract_qos_list(nic, &send_list);
  235. spin_unlock_irqrestore(&qcb->qos_lock, flags);
  236. send_qos_list(nic, &send_list);
  237. goto out;
  238. }
  239. spin_unlock_irqrestore(&qcb->qos_lock, flags);
  240. if (entry)
  241. free_qos_entry(entry);
  242. }
  243. ret = gdm_wimax_send_tx(skb, dev);
  244. out:
  245. return ret;
  246. }
  247. static int get_csr(struct qos_cb_s *qcb, u32 sfid, int mode)
  248. {
  249. int i;
  250. for (i = 0; i < qcb->qos_list_cnt; i++) {
  251. if (qcb->csr[i].sfid == sfid)
  252. return i;
  253. }
  254. if (mode) {
  255. for (i = 0; i < QOS_MAX; i++) {
  256. if (!qcb->csr[i].enabled) {
  257. qcb->csr[i].enabled = true;
  258. qcb->qos_list_cnt++;
  259. return i;
  260. }
  261. }
  262. }
  263. return -1;
  264. }
  265. #define QOS_CHANGE_DEL 0xFC
  266. #define QOS_ADD 0xFD
  267. #define QOS_REPORT 0xFE
  268. void gdm_recv_qos_hci_packet(void *nic_ptr, u8 *buf, int size)
  269. {
  270. struct nic *nic = nic_ptr;
  271. int i, index, pos;
  272. u32 sfid;
  273. u8 sub_cmd_evt;
  274. struct qos_cb_s *qcb = &nic->qos;
  275. struct qos_entry_s *entry, *n;
  276. struct list_head send_list;
  277. struct list_head free_list;
  278. unsigned long flags;
  279. sub_cmd_evt = (u8)buf[4];
  280. if (sub_cmd_evt == QOS_REPORT) {
  281. spin_lock_irqsave(&qcb->qos_lock, flags);
  282. for (i = 0; i < qcb->qos_list_cnt; i++) {
  283. sfid = ((buf[(i*5)+6]<<24)&0xff000000);
  284. sfid += ((buf[(i*5)+7]<<16)&0xff0000);
  285. sfid += ((buf[(i*5)+8]<<8)&0xff00);
  286. sfid += (buf[(i*5)+9]);
  287. index = get_csr(qcb, sfid, 0);
  288. if (index == -1) {
  289. spin_unlock_irqrestore(&qcb->qos_lock, flags);
  290. netdev_err(nic->netdev, "QoS ERROR: No SF\n");
  291. return;
  292. }
  293. qcb->csr[index].qos_buf_count = buf[(i*5)+10];
  294. }
  295. extract_qos_list(nic, &send_list);
  296. spin_unlock_irqrestore(&qcb->qos_lock, flags);
  297. send_qos_list(nic, &send_list);
  298. return;
  299. }
  300. /* sub_cmd_evt == QOS_ADD || sub_cmd_evt == QOS_CHANG_DEL */
  301. pos = 6;
  302. sfid = ((buf[pos++]<<24)&0xff000000);
  303. sfid += ((buf[pos++]<<16)&0xff0000);
  304. sfid += ((buf[pos++]<<8)&0xff00);
  305. sfid += (buf[pos++]);
  306. index = get_csr(qcb, sfid, 1);
  307. if (index == -1) {
  308. netdev_err(nic->netdev,
  309. "QoS ERROR: csr Update Error / Wrong index (%d)\n",
  310. index);
  311. return;
  312. }
  313. if (sub_cmd_evt == QOS_ADD) {
  314. netdev_dbg(nic->netdev, "QOS_ADD SFID = 0x%x, index=%d\n",
  315. sfid, index);
  316. spin_lock_irqsave(&qcb->qos_lock, flags);
  317. qcb->csr[index].sfid = sfid;
  318. qcb->csr[index].classifier_rule_en = ((buf[pos++]<<8)&0xff00);
  319. qcb->csr[index].classifier_rule_en += buf[pos++];
  320. if (qcb->csr[index].classifier_rule_en == 0)
  321. qcb->qos_null_idx = index;
  322. qcb->csr[index].ip2s_mask = buf[pos++];
  323. qcb->csr[index].ip2s_lo = buf[pos++];
  324. qcb->csr[index].ip2s_hi = buf[pos++];
  325. qcb->csr[index].protocol = buf[pos++];
  326. qcb->csr[index].ipsrc_addrmask[0] = buf[pos++];
  327. qcb->csr[index].ipsrc_addrmask[1] = buf[pos++];
  328. qcb->csr[index].ipsrc_addrmask[2] = buf[pos++];
  329. qcb->csr[index].ipsrc_addrmask[3] = buf[pos++];
  330. qcb->csr[index].ipsrc_addr[0] = buf[pos++];
  331. qcb->csr[index].ipsrc_addr[1] = buf[pos++];
  332. qcb->csr[index].ipsrc_addr[2] = buf[pos++];
  333. qcb->csr[index].ipsrc_addr[3] = buf[pos++];
  334. qcb->csr[index].ipdst_addrmask[0] = buf[pos++];
  335. qcb->csr[index].ipdst_addrmask[1] = buf[pos++];
  336. qcb->csr[index].ipdst_addrmask[2] = buf[pos++];
  337. qcb->csr[index].ipdst_addrmask[3] = buf[pos++];
  338. qcb->csr[index].ipdst_addr[0] = buf[pos++];
  339. qcb->csr[index].ipdst_addr[1] = buf[pos++];
  340. qcb->csr[index].ipdst_addr[2] = buf[pos++];
  341. qcb->csr[index].ipdst_addr[3] = buf[pos++];
  342. qcb->csr[index].srcport_lo = ((buf[pos++]<<8)&0xff00);
  343. qcb->csr[index].srcport_lo += buf[pos++];
  344. qcb->csr[index].srcport_hi = ((buf[pos++]<<8)&0xff00);
  345. qcb->csr[index].srcport_hi += buf[pos++];
  346. qcb->csr[index].dstport_lo = ((buf[pos++]<<8)&0xff00);
  347. qcb->csr[index].dstport_lo += buf[pos++];
  348. qcb->csr[index].dstport_hi = ((buf[pos++]<<8)&0xff00);
  349. qcb->csr[index].dstport_hi += buf[pos++];
  350. qcb->qos_limit_size = 254/qcb->qos_list_cnt;
  351. spin_unlock_irqrestore(&qcb->qos_lock, flags);
  352. } else if (sub_cmd_evt == QOS_CHANGE_DEL) {
  353. netdev_dbg(nic->netdev, "QOS_CHANGE_DEL SFID = 0x%x, index=%d\n",
  354. sfid, index);
  355. INIT_LIST_HEAD(&free_list);
  356. spin_lock_irqsave(&qcb->qos_lock, flags);
  357. qcb->csr[index].enabled = false;
  358. qcb->qos_list_cnt--;
  359. qcb->qos_limit_size = 254/qcb->qos_list_cnt;
  360. list_for_each_entry_safe(entry, n, &qcb->qos_list[index],
  361. list) {
  362. list_move_tail(&entry->list, &free_list);
  363. }
  364. spin_unlock_irqrestore(&qcb->qos_lock, flags);
  365. free_qos_entry_list(&free_list);
  366. }
  367. }