netback.c 57 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204
  1. /*
  2. * Back-end of the driver for virtual network devices. This portion of the
  3. * driver exports a 'unified' network-device interface that can be accessed
  4. * by any operating system that implements a compatible front end. A
  5. * reference front-end implementation can be found in:
  6. * drivers/net/xen-netfront.c
  7. *
  8. * Copyright (c) 2002-2005, K A Fraser
  9. *
  10. * This program is free software; you can redistribute it and/or
  11. * modify it under the terms of the GNU General Public License version 2
  12. * as published by the Free Software Foundation; or, when distributed
  13. * separately from the Linux kernel or incorporated into other
  14. * software packages, subject to the following license:
  15. *
  16. * Permission is hereby granted, free of charge, to any person obtaining a copy
  17. * of this source file (the "Software"), to deal in the Software without
  18. * restriction, including without limitation the rights to use, copy, modify,
  19. * merge, publish, distribute, sublicense, and/or sell copies of the Software,
  20. * and to permit persons to whom the Software is furnished to do so, subject to
  21. * the following conditions:
  22. *
  23. * The above copyright notice and this permission notice shall be included in
  24. * all copies or substantial portions of the Software.
  25. *
  26. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  27. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  28. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  29. * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  30. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  31. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  32. * IN THE SOFTWARE.
  33. */
  34. #include "common.h"
  35. #include <linux/kthread.h>
  36. #include <linux/if_vlan.h>
  37. #include <linux/udp.h>
  38. #include <linux/highmem.h>
  39. #include <net/tcp.h>
  40. #include <xen/xen.h>
  41. #include <xen/events.h>
  42. #include <xen/interface/memory.h>
  43. #include <xen/page.h>
  44. #include <asm/xen/hypercall.h>
  45. /* Provide an option to disable split event channels at load time as
  46. * event channels are limited resource. Split event channels are
  47. * enabled by default.
  48. */
  49. bool separate_tx_rx_irq = true;
  50. module_param(separate_tx_rx_irq, bool, 0644);
  51. /* The time that packets can stay on the guest Rx internal queue
  52. * before they are dropped.
  53. */
  54. unsigned int rx_drain_timeout_msecs = 10000;
  55. module_param(rx_drain_timeout_msecs, uint, 0444);
  56. /* The length of time before the frontend is considered unresponsive
  57. * because it isn't providing Rx slots.
  58. */
  59. unsigned int rx_stall_timeout_msecs = 60000;
  60. module_param(rx_stall_timeout_msecs, uint, 0444);
  61. #define MAX_QUEUES_DEFAULT 8
  62. unsigned int xenvif_max_queues;
  63. module_param_named(max_queues, xenvif_max_queues, uint, 0644);
  64. MODULE_PARM_DESC(max_queues,
  65. "Maximum number of queues per virtual interface");
  66. /*
  67. * This is the maximum slots a skb can have. If a guest sends a skb
  68. * which exceeds this limit it is considered malicious.
  69. */
  70. #define FATAL_SKB_SLOTS_DEFAULT 20
  71. static unsigned int fatal_skb_slots = FATAL_SKB_SLOTS_DEFAULT;
  72. module_param(fatal_skb_slots, uint, 0444);
  73. /* The amount to copy out of the first guest Tx slot into the skb's
  74. * linear area. If the first slot has more data, it will be mapped
  75. * and put into the first frag.
  76. *
  77. * This is sized to avoid pulling headers from the frags for most
  78. * TCP/IP packets.
  79. */
  80. #define XEN_NETBACK_TX_COPY_LEN 128
  81. static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
  82. u8 status);
  83. static void make_tx_response(struct xenvif_queue *queue,
  84. struct xen_netif_tx_request *txp,
  85. s8 st);
  86. static void push_tx_responses(struct xenvif_queue *queue);
  87. static inline int tx_work_todo(struct xenvif_queue *queue);
  88. static struct xen_netif_rx_response *make_rx_response(struct xenvif_queue *queue,
  89. u16 id,
  90. s8 st,
  91. u16 offset,
  92. u16 size,
  93. u16 flags);
  94. static inline unsigned long idx_to_pfn(struct xenvif_queue *queue,
  95. u16 idx)
  96. {
  97. return page_to_pfn(queue->mmap_pages[idx]);
  98. }
  99. static inline unsigned long idx_to_kaddr(struct xenvif_queue *queue,
  100. u16 idx)
  101. {
  102. return (unsigned long)pfn_to_kaddr(idx_to_pfn(queue, idx));
  103. }
  104. #define callback_param(vif, pending_idx) \
  105. (vif->pending_tx_info[pending_idx].callback_struct)
  106. /* Find the containing VIF's structure from a pointer in pending_tx_info array
  107. */
  108. static inline struct xenvif_queue *ubuf_to_queue(const struct ubuf_info *ubuf)
  109. {
  110. u16 pending_idx = ubuf->desc;
  111. struct pending_tx_info *temp =
  112. container_of(ubuf, struct pending_tx_info, callback_struct);
  113. return container_of(temp - pending_idx,
  114. struct xenvif_queue,
  115. pending_tx_info[0]);
  116. }
  117. static u16 frag_get_pending_idx(skb_frag_t *frag)
  118. {
  119. return (u16)frag->page_offset;
  120. }
  121. static void frag_set_pending_idx(skb_frag_t *frag, u16 pending_idx)
  122. {
  123. frag->page_offset = pending_idx;
  124. }
  125. static inline pending_ring_idx_t pending_index(unsigned i)
  126. {
  127. return i & (MAX_PENDING_REQS-1);
  128. }
  129. static int xenvif_rx_ring_slots_needed(struct xenvif *vif)
  130. {
  131. if (vif->gso_mask)
  132. return DIV_ROUND_UP(vif->dev->gso_max_size, XEN_PAGE_SIZE) + 1;
  133. else
  134. return DIV_ROUND_UP(vif->dev->mtu, XEN_PAGE_SIZE);
  135. }
  136. static bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue)
  137. {
  138. RING_IDX prod, cons;
  139. int needed;
  140. needed = xenvif_rx_ring_slots_needed(queue->vif);
  141. do {
  142. prod = queue->rx.sring->req_prod;
  143. cons = queue->rx.req_cons;
  144. if (prod - cons >= needed)
  145. return true;
  146. queue->rx.sring->req_event = prod + 1;
  147. /* Make sure event is visible before we check prod
  148. * again.
  149. */
  150. mb();
  151. } while (queue->rx.sring->req_prod != prod);
  152. return false;
  153. }
  154. void xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb)
  155. {
  156. unsigned long flags;
  157. spin_lock_irqsave(&queue->rx_queue.lock, flags);
  158. __skb_queue_tail(&queue->rx_queue, skb);
  159. queue->rx_queue_len += skb->len;
  160. if (queue->rx_queue_len > queue->rx_queue_max)
  161. netif_tx_stop_queue(netdev_get_tx_queue(queue->vif->dev, queue->id));
  162. spin_unlock_irqrestore(&queue->rx_queue.lock, flags);
  163. }
  164. static struct sk_buff *xenvif_rx_dequeue(struct xenvif_queue *queue)
  165. {
  166. struct sk_buff *skb;
  167. spin_lock_irq(&queue->rx_queue.lock);
  168. skb = __skb_dequeue(&queue->rx_queue);
  169. if (skb)
  170. queue->rx_queue_len -= skb->len;
  171. spin_unlock_irq(&queue->rx_queue.lock);
  172. return skb;
  173. }
  174. static void xenvif_rx_queue_maybe_wake(struct xenvif_queue *queue)
  175. {
  176. spin_lock_irq(&queue->rx_queue.lock);
  177. if (queue->rx_queue_len < queue->rx_queue_max)
  178. netif_tx_wake_queue(netdev_get_tx_queue(queue->vif->dev, queue->id));
  179. spin_unlock_irq(&queue->rx_queue.lock);
  180. }
  181. static void xenvif_rx_queue_purge(struct xenvif_queue *queue)
  182. {
  183. struct sk_buff *skb;
  184. while ((skb = xenvif_rx_dequeue(queue)) != NULL)
  185. kfree_skb(skb);
  186. }
  187. static void xenvif_rx_queue_drop_expired(struct xenvif_queue *queue)
  188. {
  189. struct sk_buff *skb;
  190. for(;;) {
  191. skb = skb_peek(&queue->rx_queue);
  192. if (!skb)
  193. break;
  194. if (time_before(jiffies, XENVIF_RX_CB(skb)->expires))
  195. break;
  196. xenvif_rx_dequeue(queue);
  197. kfree_skb(skb);
  198. }
  199. }
  200. struct netrx_pending_operations {
  201. unsigned copy_prod, copy_cons;
  202. unsigned meta_prod, meta_cons;
  203. struct gnttab_copy *copy;
  204. struct xenvif_rx_meta *meta;
  205. int copy_off;
  206. grant_ref_t copy_gref;
  207. };
  208. static struct xenvif_rx_meta *get_next_rx_buffer(struct xenvif_queue *queue,
  209. struct netrx_pending_operations *npo)
  210. {
  211. struct xenvif_rx_meta *meta;
  212. struct xen_netif_rx_request req;
  213. RING_COPY_REQUEST(&queue->rx, queue->rx.req_cons++, &req);
  214. meta = npo->meta + npo->meta_prod++;
  215. meta->gso_type = XEN_NETIF_GSO_TYPE_NONE;
  216. meta->gso_size = 0;
  217. meta->size = 0;
  218. meta->id = req.id;
  219. npo->copy_off = 0;
  220. npo->copy_gref = req.gref;
  221. return meta;
  222. }
  223. struct gop_frag_copy {
  224. struct xenvif_queue *queue;
  225. struct netrx_pending_operations *npo;
  226. struct xenvif_rx_meta *meta;
  227. int head;
  228. int gso_type;
  229. struct page *page;
  230. };
  231. static void xenvif_setup_copy_gop(unsigned long gfn,
  232. unsigned int offset,
  233. unsigned int *len,
  234. struct gop_frag_copy *info)
  235. {
  236. struct gnttab_copy *copy_gop;
  237. struct xen_page_foreign *foreign;
  238. /* Convenient aliases */
  239. struct xenvif_queue *queue = info->queue;
  240. struct netrx_pending_operations *npo = info->npo;
  241. struct page *page = info->page;
  242. BUG_ON(npo->copy_off > MAX_BUFFER_OFFSET);
  243. if (npo->copy_off == MAX_BUFFER_OFFSET)
  244. info->meta = get_next_rx_buffer(queue, npo);
  245. if (npo->copy_off + *len > MAX_BUFFER_OFFSET)
  246. *len = MAX_BUFFER_OFFSET - npo->copy_off;
  247. copy_gop = npo->copy + npo->copy_prod++;
  248. copy_gop->flags = GNTCOPY_dest_gref;
  249. copy_gop->len = *len;
  250. foreign = xen_page_foreign(page);
  251. if (foreign) {
  252. copy_gop->source.domid = foreign->domid;
  253. copy_gop->source.u.ref = foreign->gref;
  254. copy_gop->flags |= GNTCOPY_source_gref;
  255. } else {
  256. copy_gop->source.domid = DOMID_SELF;
  257. copy_gop->source.u.gmfn = gfn;
  258. }
  259. copy_gop->source.offset = offset;
  260. copy_gop->dest.domid = queue->vif->domid;
  261. copy_gop->dest.offset = npo->copy_off;
  262. copy_gop->dest.u.ref = npo->copy_gref;
  263. npo->copy_off += *len;
  264. info->meta->size += *len;
  265. /* Leave a gap for the GSO descriptor. */
  266. if (info->head && ((1 << info->gso_type) & queue->vif->gso_mask))
  267. queue->rx.req_cons++;
  268. info->head = 0; /* There must be something in this buffer now */
  269. }
  270. static void xenvif_gop_frag_copy_grant(unsigned long gfn,
  271. unsigned offset,
  272. unsigned int len,
  273. void *data)
  274. {
  275. unsigned int bytes;
  276. while (len) {
  277. bytes = len;
  278. xenvif_setup_copy_gop(gfn, offset, &bytes, data);
  279. offset += bytes;
  280. len -= bytes;
  281. }
  282. }
  283. /*
  284. * Set up the grant operations for this fragment. If it's a flipping
  285. * interface, we also set up the unmap request from here.
  286. */
  287. static void xenvif_gop_frag_copy(struct xenvif_queue *queue, struct sk_buff *skb,
  288. struct netrx_pending_operations *npo,
  289. struct page *page, unsigned long size,
  290. unsigned long offset, int *head)
  291. {
  292. struct gop_frag_copy info = {
  293. .queue = queue,
  294. .npo = npo,
  295. .head = *head,
  296. .gso_type = XEN_NETIF_GSO_TYPE_NONE,
  297. };
  298. unsigned long bytes;
  299. if (skb_is_gso(skb)) {
  300. if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
  301. info.gso_type = XEN_NETIF_GSO_TYPE_TCPV4;
  302. else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
  303. info.gso_type = XEN_NETIF_GSO_TYPE_TCPV6;
  304. }
  305. /* Data must not cross a page boundary. */
  306. BUG_ON(size + offset > PAGE_SIZE<<compound_order(page));
  307. info.meta = npo->meta + npo->meta_prod - 1;
  308. /* Skip unused frames from start of page */
  309. page += offset >> PAGE_SHIFT;
  310. offset &= ~PAGE_MASK;
  311. while (size > 0) {
  312. BUG_ON(offset >= PAGE_SIZE);
  313. bytes = PAGE_SIZE - offset;
  314. if (bytes > size)
  315. bytes = size;
  316. info.page = page;
  317. gnttab_foreach_grant_in_range(page, offset, bytes,
  318. xenvif_gop_frag_copy_grant,
  319. &info);
  320. size -= bytes;
  321. offset = 0;
  322. /* Next page */
  323. if (size) {
  324. BUG_ON(!PageCompound(page));
  325. page++;
  326. }
  327. }
  328. *head = info.head;
  329. }
  330. /*
  331. * Prepare an SKB to be transmitted to the frontend.
  332. *
  333. * This function is responsible for allocating grant operations, meta
  334. * structures, etc.
  335. *
  336. * It returns the number of meta structures consumed. The number of
  337. * ring slots used is always equal to the number of meta slots used
  338. * plus the number of GSO descriptors used. Currently, we use either
  339. * zero GSO descriptors (for non-GSO packets) or one descriptor (for
  340. * frontend-side LRO).
  341. */
  342. static int xenvif_gop_skb(struct sk_buff *skb,
  343. struct netrx_pending_operations *npo,
  344. struct xenvif_queue *queue)
  345. {
  346. struct xenvif *vif = netdev_priv(skb->dev);
  347. int nr_frags = skb_shinfo(skb)->nr_frags;
  348. int i;
  349. struct xen_netif_rx_request req;
  350. struct xenvif_rx_meta *meta;
  351. unsigned char *data;
  352. int head = 1;
  353. int old_meta_prod;
  354. int gso_type;
  355. old_meta_prod = npo->meta_prod;
  356. gso_type = XEN_NETIF_GSO_TYPE_NONE;
  357. if (skb_is_gso(skb)) {
  358. if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
  359. gso_type = XEN_NETIF_GSO_TYPE_TCPV4;
  360. else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
  361. gso_type = XEN_NETIF_GSO_TYPE_TCPV6;
  362. }
  363. /* Set up a GSO prefix descriptor, if necessary */
  364. if ((1 << gso_type) & vif->gso_prefix_mask) {
  365. RING_COPY_REQUEST(&queue->rx, queue->rx.req_cons++, &req);
  366. meta = npo->meta + npo->meta_prod++;
  367. meta->gso_type = gso_type;
  368. meta->gso_size = skb_shinfo(skb)->gso_size;
  369. meta->size = 0;
  370. meta->id = req.id;
  371. }
  372. RING_COPY_REQUEST(&queue->rx, queue->rx.req_cons++, &req);
  373. meta = npo->meta + npo->meta_prod++;
  374. if ((1 << gso_type) & vif->gso_mask) {
  375. meta->gso_type = gso_type;
  376. meta->gso_size = skb_shinfo(skb)->gso_size;
  377. } else {
  378. meta->gso_type = XEN_NETIF_GSO_TYPE_NONE;
  379. meta->gso_size = 0;
  380. }
  381. meta->size = 0;
  382. meta->id = req.id;
  383. npo->copy_off = 0;
  384. npo->copy_gref = req.gref;
  385. data = skb->data;
  386. while (data < skb_tail_pointer(skb)) {
  387. unsigned int offset = offset_in_page(data);
  388. unsigned int len = PAGE_SIZE - offset;
  389. if (data + len > skb_tail_pointer(skb))
  390. len = skb_tail_pointer(skb) - data;
  391. xenvif_gop_frag_copy(queue, skb, npo,
  392. virt_to_page(data), len, offset, &head);
  393. data += len;
  394. }
  395. for (i = 0; i < nr_frags; i++) {
  396. xenvif_gop_frag_copy(queue, skb, npo,
  397. skb_frag_page(&skb_shinfo(skb)->frags[i]),
  398. skb_frag_size(&skb_shinfo(skb)->frags[i]),
  399. skb_shinfo(skb)->frags[i].page_offset,
  400. &head);
  401. }
  402. return npo->meta_prod - old_meta_prod;
  403. }
  404. /*
  405. * This is a twin to xenvif_gop_skb. Assume that xenvif_gop_skb was
  406. * used to set up the operations on the top of
  407. * netrx_pending_operations, which have since been done. Check that
  408. * they didn't give any errors and advance over them.
  409. */
  410. static int xenvif_check_gop(struct xenvif *vif, int nr_meta_slots,
  411. struct netrx_pending_operations *npo)
  412. {
  413. struct gnttab_copy *copy_op;
  414. int status = XEN_NETIF_RSP_OKAY;
  415. int i;
  416. for (i = 0; i < nr_meta_slots; i++) {
  417. copy_op = npo->copy + npo->copy_cons++;
  418. if (copy_op->status != GNTST_okay) {
  419. netdev_dbg(vif->dev,
  420. "Bad status %d from copy to DOM%d.\n",
  421. copy_op->status, vif->domid);
  422. status = XEN_NETIF_RSP_ERROR;
  423. }
  424. }
  425. return status;
  426. }
  427. static void xenvif_add_frag_responses(struct xenvif_queue *queue, int status,
  428. struct xenvif_rx_meta *meta,
  429. int nr_meta_slots)
  430. {
  431. int i;
  432. unsigned long offset;
  433. /* No fragments used */
  434. if (nr_meta_slots <= 1)
  435. return;
  436. nr_meta_slots--;
  437. for (i = 0; i < nr_meta_slots; i++) {
  438. int flags;
  439. if (i == nr_meta_slots - 1)
  440. flags = 0;
  441. else
  442. flags = XEN_NETRXF_more_data;
  443. offset = 0;
  444. make_rx_response(queue, meta[i].id, status, offset,
  445. meta[i].size, flags);
  446. }
  447. }
  448. void xenvif_kick_thread(struct xenvif_queue *queue)
  449. {
  450. wake_up(&queue->wq);
  451. }
  452. static void xenvif_rx_action(struct xenvif_queue *queue)
  453. {
  454. s8 status;
  455. u16 flags;
  456. struct xen_netif_rx_response *resp;
  457. struct sk_buff_head rxq;
  458. struct sk_buff *skb;
  459. LIST_HEAD(notify);
  460. int ret;
  461. unsigned long offset;
  462. bool need_to_notify = false;
  463. struct netrx_pending_operations npo = {
  464. .copy = queue->grant_copy_op,
  465. .meta = queue->meta,
  466. };
  467. skb_queue_head_init(&rxq);
  468. while (xenvif_rx_ring_slots_available(queue)
  469. && (skb = xenvif_rx_dequeue(queue)) != NULL) {
  470. queue->last_rx_time = jiffies;
  471. XENVIF_RX_CB(skb)->meta_slots_used = xenvif_gop_skb(skb, &npo, queue);
  472. __skb_queue_tail(&rxq, skb);
  473. }
  474. BUG_ON(npo.meta_prod > ARRAY_SIZE(queue->meta));
  475. if (!npo.copy_prod)
  476. goto done;
  477. BUG_ON(npo.copy_prod > MAX_GRANT_COPY_OPS);
  478. gnttab_batch_copy(queue->grant_copy_op, npo.copy_prod);
  479. while ((skb = __skb_dequeue(&rxq)) != NULL) {
  480. if ((1 << queue->meta[npo.meta_cons].gso_type) &
  481. queue->vif->gso_prefix_mask) {
  482. resp = RING_GET_RESPONSE(&queue->rx,
  483. queue->rx.rsp_prod_pvt++);
  484. resp->flags = XEN_NETRXF_gso_prefix | XEN_NETRXF_more_data;
  485. resp->offset = queue->meta[npo.meta_cons].gso_size;
  486. resp->id = queue->meta[npo.meta_cons].id;
  487. resp->status = XENVIF_RX_CB(skb)->meta_slots_used;
  488. npo.meta_cons++;
  489. XENVIF_RX_CB(skb)->meta_slots_used--;
  490. }
  491. queue->stats.tx_bytes += skb->len;
  492. queue->stats.tx_packets++;
  493. status = xenvif_check_gop(queue->vif,
  494. XENVIF_RX_CB(skb)->meta_slots_used,
  495. &npo);
  496. if (XENVIF_RX_CB(skb)->meta_slots_used == 1)
  497. flags = 0;
  498. else
  499. flags = XEN_NETRXF_more_data;
  500. if (skb->ip_summed == CHECKSUM_PARTIAL) /* local packet? */
  501. flags |= XEN_NETRXF_csum_blank | XEN_NETRXF_data_validated;
  502. else if (skb->ip_summed == CHECKSUM_UNNECESSARY)
  503. /* remote but checksummed. */
  504. flags |= XEN_NETRXF_data_validated;
  505. offset = 0;
  506. resp = make_rx_response(queue, queue->meta[npo.meta_cons].id,
  507. status, offset,
  508. queue->meta[npo.meta_cons].size,
  509. flags);
  510. if ((1 << queue->meta[npo.meta_cons].gso_type) &
  511. queue->vif->gso_mask) {
  512. struct xen_netif_extra_info *gso =
  513. (struct xen_netif_extra_info *)
  514. RING_GET_RESPONSE(&queue->rx,
  515. queue->rx.rsp_prod_pvt++);
  516. resp->flags |= XEN_NETRXF_extra_info;
  517. gso->u.gso.type = queue->meta[npo.meta_cons].gso_type;
  518. gso->u.gso.size = queue->meta[npo.meta_cons].gso_size;
  519. gso->u.gso.pad = 0;
  520. gso->u.gso.features = 0;
  521. gso->type = XEN_NETIF_EXTRA_TYPE_GSO;
  522. gso->flags = 0;
  523. }
  524. xenvif_add_frag_responses(queue, status,
  525. queue->meta + npo.meta_cons + 1,
  526. XENVIF_RX_CB(skb)->meta_slots_used);
  527. RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->rx, ret);
  528. need_to_notify |= !!ret;
  529. npo.meta_cons += XENVIF_RX_CB(skb)->meta_slots_used;
  530. dev_kfree_skb(skb);
  531. }
  532. done:
  533. if (need_to_notify)
  534. notify_remote_via_irq(queue->rx_irq);
  535. }
  536. void xenvif_napi_schedule_or_enable_events(struct xenvif_queue *queue)
  537. {
  538. int more_to_do;
  539. RING_FINAL_CHECK_FOR_REQUESTS(&queue->tx, more_to_do);
  540. if (more_to_do)
  541. napi_schedule(&queue->napi);
  542. }
  543. static void tx_add_credit(struct xenvif_queue *queue)
  544. {
  545. unsigned long max_burst, max_credit;
  546. /*
  547. * Allow a burst big enough to transmit a jumbo packet of up to 128kB.
  548. * Otherwise the interface can seize up due to insufficient credit.
  549. */
  550. max_burst = max(131072UL, queue->credit_bytes);
  551. /* Take care that adding a new chunk of credit doesn't wrap to zero. */
  552. max_credit = queue->remaining_credit + queue->credit_bytes;
  553. if (max_credit < queue->remaining_credit)
  554. max_credit = ULONG_MAX; /* wrapped: clamp to ULONG_MAX */
  555. queue->remaining_credit = min(max_credit, max_burst);
  556. queue->rate_limited = false;
  557. }
  558. void xenvif_tx_credit_callback(unsigned long data)
  559. {
  560. struct xenvif_queue *queue = (struct xenvif_queue *)data;
  561. tx_add_credit(queue);
  562. xenvif_napi_schedule_or_enable_events(queue);
  563. }
  564. static void xenvif_tx_err(struct xenvif_queue *queue,
  565. struct xen_netif_tx_request *txp, RING_IDX end)
  566. {
  567. RING_IDX cons = queue->tx.req_cons;
  568. unsigned long flags;
  569. do {
  570. spin_lock_irqsave(&queue->response_lock, flags);
  571. make_tx_response(queue, txp, XEN_NETIF_RSP_ERROR);
  572. push_tx_responses(queue);
  573. spin_unlock_irqrestore(&queue->response_lock, flags);
  574. if (cons == end)
  575. break;
  576. RING_COPY_REQUEST(&queue->tx, cons++, txp);
  577. } while (1);
  578. queue->tx.req_cons = cons;
  579. }
  580. static void xenvif_fatal_tx_err(struct xenvif *vif)
  581. {
  582. netdev_err(vif->dev, "fatal error; disabling device\n");
  583. vif->disabled = true;
  584. /* Disable the vif from queue 0's kthread */
  585. if (vif->queues)
  586. xenvif_kick_thread(&vif->queues[0]);
  587. }
  588. static int xenvif_count_requests(struct xenvif_queue *queue,
  589. struct xen_netif_tx_request *first,
  590. struct xen_netif_tx_request *txp,
  591. int work_to_do)
  592. {
  593. RING_IDX cons = queue->tx.req_cons;
  594. int slots = 0;
  595. int drop_err = 0;
  596. int more_data;
  597. if (!(first->flags & XEN_NETTXF_more_data))
  598. return 0;
  599. do {
  600. struct xen_netif_tx_request dropped_tx = { 0 };
  601. if (slots >= work_to_do) {
  602. netdev_err(queue->vif->dev,
  603. "Asked for %d slots but exceeds this limit\n",
  604. work_to_do);
  605. xenvif_fatal_tx_err(queue->vif);
  606. return -ENODATA;
  607. }
  608. /* This guest is really using too many slots and
  609. * considered malicious.
  610. */
  611. if (unlikely(slots >= fatal_skb_slots)) {
  612. netdev_err(queue->vif->dev,
  613. "Malicious frontend using %d slots, threshold %u\n",
  614. slots, fatal_skb_slots);
  615. xenvif_fatal_tx_err(queue->vif);
  616. return -E2BIG;
  617. }
  618. /* Xen network protocol had implicit dependency on
  619. * MAX_SKB_FRAGS. XEN_NETBK_LEGACY_SLOTS_MAX is set to
  620. * the historical MAX_SKB_FRAGS value 18 to honor the
  621. * same behavior as before. Any packet using more than
  622. * 18 slots but less than fatal_skb_slots slots is
  623. * dropped
  624. */
  625. if (!drop_err && slots >= XEN_NETBK_LEGACY_SLOTS_MAX) {
  626. if (net_ratelimit())
  627. netdev_dbg(queue->vif->dev,
  628. "Too many slots (%d) exceeding limit (%d), dropping packet\n",
  629. slots, XEN_NETBK_LEGACY_SLOTS_MAX);
  630. drop_err = -E2BIG;
  631. }
  632. if (drop_err)
  633. txp = &dropped_tx;
  634. RING_COPY_REQUEST(&queue->tx, cons + slots, txp);
  635. /* If the guest submitted a frame >= 64 KiB then
  636. * first->size overflowed and following slots will
  637. * appear to be larger than the frame.
  638. *
  639. * This cannot be fatal error as there are buggy
  640. * frontends that do this.
  641. *
  642. * Consume all slots and drop the packet.
  643. */
  644. if (!drop_err && txp->size > first->size) {
  645. if (net_ratelimit())
  646. netdev_dbg(queue->vif->dev,
  647. "Invalid tx request, slot size %u > remaining size %u\n",
  648. txp->size, first->size);
  649. drop_err = -EIO;
  650. }
  651. first->size -= txp->size;
  652. slots++;
  653. if (unlikely((txp->offset + txp->size) > XEN_PAGE_SIZE)) {
  654. netdev_err(queue->vif->dev, "Cross page boundary, txp->offset: %u, size: %u\n",
  655. txp->offset, txp->size);
  656. xenvif_fatal_tx_err(queue->vif);
  657. return -EINVAL;
  658. }
  659. more_data = txp->flags & XEN_NETTXF_more_data;
  660. if (!drop_err)
  661. txp++;
  662. } while (more_data);
  663. if (drop_err) {
  664. xenvif_tx_err(queue, first, cons + slots);
  665. return drop_err;
  666. }
  667. return slots;
  668. }
  669. struct xenvif_tx_cb {
  670. u16 pending_idx;
  671. };
  672. #define XENVIF_TX_CB(skb) ((struct xenvif_tx_cb *)(skb)->cb)
  673. static inline void xenvif_tx_create_map_op(struct xenvif_queue *queue,
  674. u16 pending_idx,
  675. struct xen_netif_tx_request *txp,
  676. struct gnttab_map_grant_ref *mop)
  677. {
  678. queue->pages_to_map[mop-queue->tx_map_ops] = queue->mmap_pages[pending_idx];
  679. gnttab_set_map_op(mop, idx_to_kaddr(queue, pending_idx),
  680. GNTMAP_host_map | GNTMAP_readonly,
  681. txp->gref, queue->vif->domid);
  682. memcpy(&queue->pending_tx_info[pending_idx].req, txp,
  683. sizeof(*txp));
  684. }
  685. static inline struct sk_buff *xenvif_alloc_skb(unsigned int size)
  686. {
  687. struct sk_buff *skb =
  688. alloc_skb(size + NET_SKB_PAD + NET_IP_ALIGN,
  689. GFP_ATOMIC | __GFP_NOWARN);
  690. if (unlikely(skb == NULL))
  691. return NULL;
  692. /* Packets passed to netif_rx() must have some headroom. */
  693. skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
  694. /* Initialize it here to avoid later surprises */
  695. skb_shinfo(skb)->destructor_arg = NULL;
  696. return skb;
  697. }
  698. static struct gnttab_map_grant_ref *xenvif_get_requests(struct xenvif_queue *queue,
  699. struct sk_buff *skb,
  700. struct xen_netif_tx_request *txp,
  701. struct gnttab_map_grant_ref *gop,
  702. unsigned int frag_overflow,
  703. struct sk_buff *nskb)
  704. {
  705. struct skb_shared_info *shinfo = skb_shinfo(skb);
  706. skb_frag_t *frags = shinfo->frags;
  707. u16 pending_idx = XENVIF_TX_CB(skb)->pending_idx;
  708. int start;
  709. pending_ring_idx_t index;
  710. unsigned int nr_slots;
  711. nr_slots = shinfo->nr_frags;
  712. /* Skip first skb fragment if it is on same page as header fragment. */
  713. start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx);
  714. for (shinfo->nr_frags = start; shinfo->nr_frags < nr_slots;
  715. shinfo->nr_frags++, txp++, gop++) {
  716. index = pending_index(queue->pending_cons++);
  717. pending_idx = queue->pending_ring[index];
  718. xenvif_tx_create_map_op(queue, pending_idx, txp, gop);
  719. frag_set_pending_idx(&frags[shinfo->nr_frags], pending_idx);
  720. }
  721. if (frag_overflow) {
  722. shinfo = skb_shinfo(nskb);
  723. frags = shinfo->frags;
  724. for (shinfo->nr_frags = 0; shinfo->nr_frags < frag_overflow;
  725. shinfo->nr_frags++, txp++, gop++) {
  726. index = pending_index(queue->pending_cons++);
  727. pending_idx = queue->pending_ring[index];
  728. xenvif_tx_create_map_op(queue, pending_idx, txp, gop);
  729. frag_set_pending_idx(&frags[shinfo->nr_frags],
  730. pending_idx);
  731. }
  732. skb_shinfo(skb)->frag_list = nskb;
  733. }
  734. return gop;
  735. }
  736. static inline void xenvif_grant_handle_set(struct xenvif_queue *queue,
  737. u16 pending_idx,
  738. grant_handle_t handle)
  739. {
  740. if (unlikely(queue->grant_tx_handle[pending_idx] !=
  741. NETBACK_INVALID_HANDLE)) {
  742. netdev_err(queue->vif->dev,
  743. "Trying to overwrite active handle! pending_idx: 0x%x\n",
  744. pending_idx);
  745. BUG();
  746. }
  747. queue->grant_tx_handle[pending_idx] = handle;
  748. }
  749. static inline void xenvif_grant_handle_reset(struct xenvif_queue *queue,
  750. u16 pending_idx)
  751. {
  752. if (unlikely(queue->grant_tx_handle[pending_idx] ==
  753. NETBACK_INVALID_HANDLE)) {
  754. netdev_err(queue->vif->dev,
  755. "Trying to unmap invalid handle! pending_idx: 0x%x\n",
  756. pending_idx);
  757. BUG();
  758. }
  759. queue->grant_tx_handle[pending_idx] = NETBACK_INVALID_HANDLE;
  760. }
  761. static int xenvif_tx_check_gop(struct xenvif_queue *queue,
  762. struct sk_buff *skb,
  763. struct gnttab_map_grant_ref **gopp_map,
  764. struct gnttab_copy **gopp_copy)
  765. {
  766. struct gnttab_map_grant_ref *gop_map = *gopp_map;
  767. u16 pending_idx = XENVIF_TX_CB(skb)->pending_idx;
  768. /* This always points to the shinfo of the skb being checked, which
  769. * could be either the first or the one on the frag_list
  770. */
  771. struct skb_shared_info *shinfo = skb_shinfo(skb);
  772. /* If this is non-NULL, we are currently checking the frag_list skb, and
  773. * this points to the shinfo of the first one
  774. */
  775. struct skb_shared_info *first_shinfo = NULL;
  776. int nr_frags = shinfo->nr_frags;
  777. const bool sharedslot = nr_frags &&
  778. frag_get_pending_idx(&shinfo->frags[0]) == pending_idx;
  779. int i, err;
  780. /* Check status of header. */
  781. err = (*gopp_copy)->status;
  782. if (unlikely(err)) {
  783. if (net_ratelimit())
  784. netdev_dbg(queue->vif->dev,
  785. "Grant copy of header failed! status: %d pending_idx: %u ref: %u\n",
  786. (*gopp_copy)->status,
  787. pending_idx,
  788. (*gopp_copy)->source.u.ref);
  789. /* The first frag might still have this slot mapped */
  790. if (!sharedslot)
  791. xenvif_idx_release(queue, pending_idx,
  792. XEN_NETIF_RSP_ERROR);
  793. }
  794. (*gopp_copy)++;
  795. check_frags:
  796. for (i = 0; i < nr_frags; i++, gop_map++) {
  797. int j, newerr;
  798. pending_idx = frag_get_pending_idx(&shinfo->frags[i]);
  799. /* Check error status: if okay then remember grant handle. */
  800. newerr = gop_map->status;
  801. if (likely(!newerr)) {
  802. xenvif_grant_handle_set(queue,
  803. pending_idx,
  804. gop_map->handle);
  805. /* Had a previous error? Invalidate this fragment. */
  806. if (unlikely(err)) {
  807. xenvif_idx_unmap(queue, pending_idx);
  808. /* If the mapping of the first frag was OK, but
  809. * the header's copy failed, and they are
  810. * sharing a slot, send an error
  811. */
  812. if (i == 0 && sharedslot)
  813. xenvif_idx_release(queue, pending_idx,
  814. XEN_NETIF_RSP_ERROR);
  815. else
  816. xenvif_idx_release(queue, pending_idx,
  817. XEN_NETIF_RSP_OKAY);
  818. }
  819. continue;
  820. }
  821. /* Error on this fragment: respond to client with an error. */
  822. if (net_ratelimit())
  823. netdev_dbg(queue->vif->dev,
  824. "Grant map of %d. frag failed! status: %d pending_idx: %u ref: %u\n",
  825. i,
  826. gop_map->status,
  827. pending_idx,
  828. gop_map->ref);
  829. xenvif_idx_release(queue, pending_idx, XEN_NETIF_RSP_ERROR);
  830. /* Not the first error? Preceding frags already invalidated. */
  831. if (err)
  832. continue;
  833. /* First error: if the header haven't shared a slot with the
  834. * first frag, release it as well.
  835. */
  836. if (!sharedslot)
  837. xenvif_idx_release(queue,
  838. XENVIF_TX_CB(skb)->pending_idx,
  839. XEN_NETIF_RSP_OKAY);
  840. /* Invalidate preceding fragments of this skb. */
  841. for (j = 0; j < i; j++) {
  842. pending_idx = frag_get_pending_idx(&shinfo->frags[j]);
  843. xenvif_idx_unmap(queue, pending_idx);
  844. xenvif_idx_release(queue, pending_idx,
  845. XEN_NETIF_RSP_OKAY);
  846. }
  847. /* And if we found the error while checking the frag_list, unmap
  848. * the first skb's frags
  849. */
  850. if (first_shinfo) {
  851. for (j = 0; j < first_shinfo->nr_frags; j++) {
  852. pending_idx = frag_get_pending_idx(&first_shinfo->frags[j]);
  853. xenvif_idx_unmap(queue, pending_idx);
  854. xenvif_idx_release(queue, pending_idx,
  855. XEN_NETIF_RSP_OKAY);
  856. }
  857. }
  858. /* Remember the error: invalidate all subsequent fragments. */
  859. err = newerr;
  860. }
  861. if (skb_has_frag_list(skb) && !first_shinfo) {
  862. first_shinfo = skb_shinfo(skb);
  863. shinfo = skb_shinfo(skb_shinfo(skb)->frag_list);
  864. nr_frags = shinfo->nr_frags;
  865. goto check_frags;
  866. }
  867. *gopp_map = gop_map;
  868. return err;
  869. }
  870. static void xenvif_fill_frags(struct xenvif_queue *queue, struct sk_buff *skb)
  871. {
  872. struct skb_shared_info *shinfo = skb_shinfo(skb);
  873. int nr_frags = shinfo->nr_frags;
  874. int i;
  875. u16 prev_pending_idx = INVALID_PENDING_IDX;
  876. for (i = 0; i < nr_frags; i++) {
  877. skb_frag_t *frag = shinfo->frags + i;
  878. struct xen_netif_tx_request *txp;
  879. struct page *page;
  880. u16 pending_idx;
  881. pending_idx = frag_get_pending_idx(frag);
  882. /* If this is not the first frag, chain it to the previous*/
  883. if (prev_pending_idx == INVALID_PENDING_IDX)
  884. skb_shinfo(skb)->destructor_arg =
  885. &callback_param(queue, pending_idx);
  886. else
  887. callback_param(queue, prev_pending_idx).ctx =
  888. &callback_param(queue, pending_idx);
  889. callback_param(queue, pending_idx).ctx = NULL;
  890. prev_pending_idx = pending_idx;
  891. txp = &queue->pending_tx_info[pending_idx].req;
  892. page = virt_to_page(idx_to_kaddr(queue, pending_idx));
  893. __skb_fill_page_desc(skb, i, page, txp->offset, txp->size);
  894. skb->len += txp->size;
  895. skb->data_len += txp->size;
  896. skb->truesize += txp->size;
  897. /* Take an extra reference to offset network stack's put_page */
  898. get_page(queue->mmap_pages[pending_idx]);
  899. }
  900. }
  901. static int xenvif_get_extras(struct xenvif_queue *queue,
  902. struct xen_netif_extra_info *extras,
  903. int work_to_do)
  904. {
  905. struct xen_netif_extra_info extra;
  906. RING_IDX cons = queue->tx.req_cons;
  907. do {
  908. if (unlikely(work_to_do-- <= 0)) {
  909. netdev_err(queue->vif->dev, "Missing extra info\n");
  910. xenvif_fatal_tx_err(queue->vif);
  911. return -EBADR;
  912. }
  913. RING_COPY_REQUEST(&queue->tx, cons, &extra);
  914. if (unlikely(!extra.type ||
  915. extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
  916. queue->tx.req_cons = ++cons;
  917. netdev_err(queue->vif->dev,
  918. "Invalid extra type: %d\n", extra.type);
  919. xenvif_fatal_tx_err(queue->vif);
  920. return -EINVAL;
  921. }
  922. memcpy(&extras[extra.type - 1], &extra, sizeof(extra));
  923. queue->tx.req_cons = ++cons;
  924. } while (extra.flags & XEN_NETIF_EXTRA_FLAG_MORE);
  925. return work_to_do;
  926. }
  927. static int xenvif_set_skb_gso(struct xenvif *vif,
  928. struct sk_buff *skb,
  929. struct xen_netif_extra_info *gso)
  930. {
  931. if (!gso->u.gso.size) {
  932. netdev_err(vif->dev, "GSO size must not be zero.\n");
  933. xenvif_fatal_tx_err(vif);
  934. return -EINVAL;
  935. }
  936. switch (gso->u.gso.type) {
  937. case XEN_NETIF_GSO_TYPE_TCPV4:
  938. skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
  939. break;
  940. case XEN_NETIF_GSO_TYPE_TCPV6:
  941. skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
  942. break;
  943. default:
  944. netdev_err(vif->dev, "Bad GSO type %d.\n", gso->u.gso.type);
  945. xenvif_fatal_tx_err(vif);
  946. return -EINVAL;
  947. }
  948. skb_shinfo(skb)->gso_size = gso->u.gso.size;
  949. /* gso_segs will be calculated later */
  950. return 0;
  951. }
  952. static int checksum_setup(struct xenvif_queue *queue, struct sk_buff *skb)
  953. {
  954. bool recalculate_partial_csum = false;
  955. /* A GSO SKB must be CHECKSUM_PARTIAL. However some buggy
  956. * peers can fail to set NETRXF_csum_blank when sending a GSO
  957. * frame. In this case force the SKB to CHECKSUM_PARTIAL and
  958. * recalculate the partial checksum.
  959. */
  960. if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) {
  961. queue->stats.rx_gso_checksum_fixup++;
  962. skb->ip_summed = CHECKSUM_PARTIAL;
  963. recalculate_partial_csum = true;
  964. }
  965. /* A non-CHECKSUM_PARTIAL SKB does not require setup. */
  966. if (skb->ip_summed != CHECKSUM_PARTIAL)
  967. return 0;
  968. return skb_checksum_setup(skb, recalculate_partial_csum);
  969. }
  970. static bool tx_credit_exceeded(struct xenvif_queue *queue, unsigned size)
  971. {
  972. u64 now = get_jiffies_64();
  973. u64 next_credit = queue->credit_window_start +
  974. msecs_to_jiffies(queue->credit_usec / 1000);
  975. /* Timer could already be pending in rare cases. */
  976. if (timer_pending(&queue->credit_timeout)) {
  977. queue->rate_limited = true;
  978. return true;
  979. }
  980. /* Passed the point where we can replenish credit? */
  981. if (time_after_eq64(now, next_credit)) {
  982. queue->credit_window_start = now;
  983. tx_add_credit(queue);
  984. }
  985. /* Still too big to send right now? Set a callback. */
  986. if (size > queue->remaining_credit) {
  987. queue->credit_timeout.data =
  988. (unsigned long)queue;
  989. mod_timer(&queue->credit_timeout,
  990. next_credit);
  991. queue->credit_window_start = next_credit;
  992. queue->rate_limited = true;
  993. return true;
  994. }
  995. return false;
  996. }
  997. /* No locking is required in xenvif_mcast_add/del() as they are
  998. * only ever invoked from NAPI poll. An RCU list is used because
  999. * xenvif_mcast_match() is called asynchronously, during start_xmit.
  1000. */
  1001. static int xenvif_mcast_add(struct xenvif *vif, const u8 *addr)
  1002. {
  1003. struct xenvif_mcast_addr *mcast;
  1004. if (vif->fe_mcast_count == XEN_NETBK_MCAST_MAX) {
  1005. if (net_ratelimit())
  1006. netdev_err(vif->dev,
  1007. "Too many multicast addresses\n");
  1008. return -ENOSPC;
  1009. }
  1010. mcast = kzalloc(sizeof(*mcast), GFP_ATOMIC);
  1011. if (!mcast)
  1012. return -ENOMEM;
  1013. ether_addr_copy(mcast->addr, addr);
  1014. list_add_tail_rcu(&mcast->entry, &vif->fe_mcast_addr);
  1015. vif->fe_mcast_count++;
  1016. return 0;
  1017. }
  1018. static void xenvif_mcast_del(struct xenvif *vif, const u8 *addr)
  1019. {
  1020. struct xenvif_mcast_addr *mcast;
  1021. list_for_each_entry_rcu(mcast, &vif->fe_mcast_addr, entry) {
  1022. if (ether_addr_equal(addr, mcast->addr)) {
  1023. --vif->fe_mcast_count;
  1024. list_del_rcu(&mcast->entry);
  1025. kfree_rcu(mcast, rcu);
  1026. break;
  1027. }
  1028. }
  1029. }
  1030. bool xenvif_mcast_match(struct xenvif *vif, const u8 *addr)
  1031. {
  1032. struct xenvif_mcast_addr *mcast;
  1033. rcu_read_lock();
  1034. list_for_each_entry_rcu(mcast, &vif->fe_mcast_addr, entry) {
  1035. if (ether_addr_equal(addr, mcast->addr)) {
  1036. rcu_read_unlock();
  1037. return true;
  1038. }
  1039. }
  1040. rcu_read_unlock();
  1041. return false;
  1042. }
  1043. void xenvif_mcast_addr_list_free(struct xenvif *vif)
  1044. {
  1045. /* No need for locking or RCU here. NAPI poll and TX queue
  1046. * are stopped.
  1047. */
  1048. while (!list_empty(&vif->fe_mcast_addr)) {
  1049. struct xenvif_mcast_addr *mcast;
  1050. mcast = list_first_entry(&vif->fe_mcast_addr,
  1051. struct xenvif_mcast_addr,
  1052. entry);
  1053. --vif->fe_mcast_count;
  1054. list_del(&mcast->entry);
  1055. kfree(mcast);
  1056. }
  1057. }
  1058. static void xenvif_tx_build_gops(struct xenvif_queue *queue,
  1059. int budget,
  1060. unsigned *copy_ops,
  1061. unsigned *map_ops)
  1062. {
  1063. struct gnttab_map_grant_ref *gop = queue->tx_map_ops;
  1064. struct sk_buff *skb, *nskb;
  1065. int ret;
  1066. unsigned int frag_overflow;
  1067. while (skb_queue_len(&queue->tx_queue) < budget) {
  1068. struct xen_netif_tx_request txreq;
  1069. struct xen_netif_tx_request txfrags[XEN_NETBK_LEGACY_SLOTS_MAX];
  1070. struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX-1];
  1071. u16 pending_idx;
  1072. RING_IDX idx;
  1073. int work_to_do;
  1074. unsigned int data_len;
  1075. pending_ring_idx_t index;
  1076. if (queue->tx.sring->req_prod - queue->tx.req_cons >
  1077. XEN_NETIF_TX_RING_SIZE) {
  1078. netdev_err(queue->vif->dev,
  1079. "Impossible number of requests. "
  1080. "req_prod %d, req_cons %d, size %ld\n",
  1081. queue->tx.sring->req_prod, queue->tx.req_cons,
  1082. XEN_NETIF_TX_RING_SIZE);
  1083. xenvif_fatal_tx_err(queue->vif);
  1084. break;
  1085. }
  1086. work_to_do = RING_HAS_UNCONSUMED_REQUESTS(&queue->tx);
  1087. if (!work_to_do)
  1088. break;
  1089. idx = queue->tx.req_cons;
  1090. rmb(); /* Ensure that we see the request before we copy it. */
  1091. RING_COPY_REQUEST(&queue->tx, idx, &txreq);
  1092. /* Credit-based scheduling. */
  1093. if (txreq.size > queue->remaining_credit &&
  1094. tx_credit_exceeded(queue, txreq.size))
  1095. break;
  1096. queue->remaining_credit -= txreq.size;
  1097. work_to_do--;
  1098. queue->tx.req_cons = ++idx;
  1099. memset(extras, 0, sizeof(extras));
  1100. if (txreq.flags & XEN_NETTXF_extra_info) {
  1101. work_to_do = xenvif_get_extras(queue, extras,
  1102. work_to_do);
  1103. idx = queue->tx.req_cons;
  1104. if (unlikely(work_to_do < 0))
  1105. break;
  1106. }
  1107. if (extras[XEN_NETIF_EXTRA_TYPE_MCAST_ADD - 1].type) {
  1108. struct xen_netif_extra_info *extra;
  1109. extra = &extras[XEN_NETIF_EXTRA_TYPE_MCAST_ADD - 1];
  1110. ret = xenvif_mcast_add(queue->vif, extra->u.mcast.addr);
  1111. make_tx_response(queue, &txreq,
  1112. (ret == 0) ?
  1113. XEN_NETIF_RSP_OKAY :
  1114. XEN_NETIF_RSP_ERROR);
  1115. push_tx_responses(queue);
  1116. continue;
  1117. }
  1118. if (extras[XEN_NETIF_EXTRA_TYPE_MCAST_DEL - 1].type) {
  1119. struct xen_netif_extra_info *extra;
  1120. extra = &extras[XEN_NETIF_EXTRA_TYPE_MCAST_DEL - 1];
  1121. xenvif_mcast_del(queue->vif, extra->u.mcast.addr);
  1122. make_tx_response(queue, &txreq, XEN_NETIF_RSP_OKAY);
  1123. push_tx_responses(queue);
  1124. continue;
  1125. }
  1126. ret = xenvif_count_requests(queue, &txreq, txfrags, work_to_do);
  1127. if (unlikely(ret < 0))
  1128. break;
  1129. idx += ret;
  1130. if (unlikely(txreq.size < ETH_HLEN)) {
  1131. netdev_dbg(queue->vif->dev,
  1132. "Bad packet size: %d\n", txreq.size);
  1133. xenvif_tx_err(queue, &txreq, idx);
  1134. break;
  1135. }
  1136. /* No crossing a page as the payload mustn't fragment. */
  1137. if (unlikely((txreq.offset + txreq.size) > XEN_PAGE_SIZE)) {
  1138. netdev_err(queue->vif->dev,
  1139. "txreq.offset: %u, size: %u, end: %lu\n",
  1140. txreq.offset, txreq.size,
  1141. (unsigned long)(txreq.offset&~XEN_PAGE_MASK) + txreq.size);
  1142. xenvif_fatal_tx_err(queue->vif);
  1143. break;
  1144. }
  1145. index = pending_index(queue->pending_cons);
  1146. pending_idx = queue->pending_ring[index];
  1147. data_len = (txreq.size > XEN_NETBACK_TX_COPY_LEN &&
  1148. ret < XEN_NETBK_LEGACY_SLOTS_MAX) ?
  1149. XEN_NETBACK_TX_COPY_LEN : txreq.size;
  1150. skb = xenvif_alloc_skb(data_len);
  1151. if (unlikely(skb == NULL)) {
  1152. netdev_dbg(queue->vif->dev,
  1153. "Can't allocate a skb in start_xmit.\n");
  1154. xenvif_tx_err(queue, &txreq, idx);
  1155. break;
  1156. }
  1157. skb_shinfo(skb)->nr_frags = ret;
  1158. if (data_len < txreq.size)
  1159. skb_shinfo(skb)->nr_frags++;
  1160. /* At this point shinfo->nr_frags is in fact the number of
  1161. * slots, which can be as large as XEN_NETBK_LEGACY_SLOTS_MAX.
  1162. */
  1163. frag_overflow = 0;
  1164. nskb = NULL;
  1165. if (skb_shinfo(skb)->nr_frags > MAX_SKB_FRAGS) {
  1166. frag_overflow = skb_shinfo(skb)->nr_frags - MAX_SKB_FRAGS;
  1167. BUG_ON(frag_overflow > MAX_SKB_FRAGS);
  1168. skb_shinfo(skb)->nr_frags = MAX_SKB_FRAGS;
  1169. nskb = xenvif_alloc_skb(0);
  1170. if (unlikely(nskb == NULL)) {
  1171. kfree_skb(skb);
  1172. xenvif_tx_err(queue, &txreq, idx);
  1173. if (net_ratelimit())
  1174. netdev_err(queue->vif->dev,
  1175. "Can't allocate the frag_list skb.\n");
  1176. break;
  1177. }
  1178. }
  1179. if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) {
  1180. struct xen_netif_extra_info *gso;
  1181. gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
  1182. if (xenvif_set_skb_gso(queue->vif, skb, gso)) {
  1183. /* Failure in xenvif_set_skb_gso is fatal. */
  1184. kfree_skb(skb);
  1185. kfree_skb(nskb);
  1186. break;
  1187. }
  1188. }
  1189. XENVIF_TX_CB(skb)->pending_idx = pending_idx;
  1190. __skb_put(skb, data_len);
  1191. queue->tx_copy_ops[*copy_ops].source.u.ref = txreq.gref;
  1192. queue->tx_copy_ops[*copy_ops].source.domid = queue->vif->domid;
  1193. queue->tx_copy_ops[*copy_ops].source.offset = txreq.offset;
  1194. queue->tx_copy_ops[*copy_ops].dest.u.gmfn =
  1195. virt_to_gfn(skb->data);
  1196. queue->tx_copy_ops[*copy_ops].dest.domid = DOMID_SELF;
  1197. queue->tx_copy_ops[*copy_ops].dest.offset =
  1198. offset_in_page(skb->data) & ~XEN_PAGE_MASK;
  1199. queue->tx_copy_ops[*copy_ops].len = data_len;
  1200. queue->tx_copy_ops[*copy_ops].flags = GNTCOPY_source_gref;
  1201. (*copy_ops)++;
  1202. if (data_len < txreq.size) {
  1203. frag_set_pending_idx(&skb_shinfo(skb)->frags[0],
  1204. pending_idx);
  1205. xenvif_tx_create_map_op(queue, pending_idx, &txreq, gop);
  1206. gop++;
  1207. } else {
  1208. frag_set_pending_idx(&skb_shinfo(skb)->frags[0],
  1209. INVALID_PENDING_IDX);
  1210. memcpy(&queue->pending_tx_info[pending_idx].req, &txreq,
  1211. sizeof(txreq));
  1212. }
  1213. queue->pending_cons++;
  1214. gop = xenvif_get_requests(queue, skb, txfrags, gop,
  1215. frag_overflow, nskb);
  1216. __skb_queue_tail(&queue->tx_queue, skb);
  1217. queue->tx.req_cons = idx;
  1218. if (((gop-queue->tx_map_ops) >= ARRAY_SIZE(queue->tx_map_ops)) ||
  1219. (*copy_ops >= ARRAY_SIZE(queue->tx_copy_ops)))
  1220. break;
  1221. }
  1222. (*map_ops) = gop - queue->tx_map_ops;
  1223. return;
  1224. }
  1225. /* Consolidate skb with a frag_list into a brand new one with local pages on
  1226. * frags. Returns 0 or -ENOMEM if can't allocate new pages.
  1227. */
  1228. static int xenvif_handle_frag_list(struct xenvif_queue *queue, struct sk_buff *skb)
  1229. {
  1230. unsigned int offset = skb_headlen(skb);
  1231. skb_frag_t frags[MAX_SKB_FRAGS];
  1232. int i, f;
  1233. struct ubuf_info *uarg;
  1234. struct sk_buff *nskb = skb_shinfo(skb)->frag_list;
  1235. queue->stats.tx_zerocopy_sent += 2;
  1236. queue->stats.tx_frag_overflow++;
  1237. xenvif_fill_frags(queue, nskb);
  1238. /* Subtract frags size, we will correct it later */
  1239. skb->truesize -= skb->data_len;
  1240. skb->len += nskb->len;
  1241. skb->data_len += nskb->len;
  1242. /* create a brand new frags array and coalesce there */
  1243. for (i = 0; offset < skb->len; i++) {
  1244. struct page *page;
  1245. unsigned int len;
  1246. BUG_ON(i >= MAX_SKB_FRAGS);
  1247. page = alloc_page(GFP_ATOMIC);
  1248. if (!page) {
  1249. int j;
  1250. skb->truesize += skb->data_len;
  1251. for (j = 0; j < i; j++)
  1252. put_page(frags[j].page.p);
  1253. return -ENOMEM;
  1254. }
  1255. if (offset + PAGE_SIZE < skb->len)
  1256. len = PAGE_SIZE;
  1257. else
  1258. len = skb->len - offset;
  1259. if (skb_copy_bits(skb, offset, page_address(page), len))
  1260. BUG();
  1261. offset += len;
  1262. frags[i].page.p = page;
  1263. frags[i].page_offset = 0;
  1264. skb_frag_size_set(&frags[i], len);
  1265. }
  1266. /* Release all the original (foreign) frags. */
  1267. for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
  1268. skb_frag_unref(skb, f);
  1269. uarg = skb_shinfo(skb)->destructor_arg;
  1270. /* increase inflight counter to offset decrement in callback */
  1271. atomic_inc(&queue->inflight_packets);
  1272. uarg->callback(uarg, true);
  1273. skb_shinfo(skb)->destructor_arg = NULL;
  1274. /* Fill the skb with the new (local) frags. */
  1275. memcpy(skb_shinfo(skb)->frags, frags, i * sizeof(skb_frag_t));
  1276. skb_shinfo(skb)->nr_frags = i;
  1277. skb->truesize += i * PAGE_SIZE;
  1278. return 0;
  1279. }
  1280. static int xenvif_tx_submit(struct xenvif_queue *queue)
  1281. {
  1282. struct gnttab_map_grant_ref *gop_map = queue->tx_map_ops;
  1283. struct gnttab_copy *gop_copy = queue->tx_copy_ops;
  1284. struct sk_buff *skb;
  1285. int work_done = 0;
  1286. while ((skb = __skb_dequeue(&queue->tx_queue)) != NULL) {
  1287. struct xen_netif_tx_request *txp;
  1288. u16 pending_idx;
  1289. unsigned data_len;
  1290. pending_idx = XENVIF_TX_CB(skb)->pending_idx;
  1291. txp = &queue->pending_tx_info[pending_idx].req;
  1292. /* Check the remap error code. */
  1293. if (unlikely(xenvif_tx_check_gop(queue, skb, &gop_map, &gop_copy))) {
  1294. /* If there was an error, xenvif_tx_check_gop is
  1295. * expected to release all the frags which were mapped,
  1296. * so kfree_skb shouldn't do it again
  1297. */
  1298. skb_shinfo(skb)->nr_frags = 0;
  1299. if (skb_has_frag_list(skb)) {
  1300. struct sk_buff *nskb =
  1301. skb_shinfo(skb)->frag_list;
  1302. skb_shinfo(nskb)->nr_frags = 0;
  1303. }
  1304. kfree_skb(skb);
  1305. continue;
  1306. }
  1307. data_len = skb->len;
  1308. callback_param(queue, pending_idx).ctx = NULL;
  1309. if (data_len < txp->size) {
  1310. /* Append the packet payload as a fragment. */
  1311. txp->offset += data_len;
  1312. txp->size -= data_len;
  1313. } else {
  1314. /* Schedule a response immediately. */
  1315. xenvif_idx_release(queue, pending_idx,
  1316. XEN_NETIF_RSP_OKAY);
  1317. }
  1318. if (txp->flags & XEN_NETTXF_csum_blank)
  1319. skb->ip_summed = CHECKSUM_PARTIAL;
  1320. else if (txp->flags & XEN_NETTXF_data_validated)
  1321. skb->ip_summed = CHECKSUM_UNNECESSARY;
  1322. xenvif_fill_frags(queue, skb);
  1323. if (unlikely(skb_has_frag_list(skb))) {
  1324. struct sk_buff *nskb = skb_shinfo(skb)->frag_list;
  1325. xenvif_skb_zerocopy_prepare(queue, nskb);
  1326. if (xenvif_handle_frag_list(queue, skb)) {
  1327. if (net_ratelimit())
  1328. netdev_err(queue->vif->dev,
  1329. "Not enough memory to consolidate frag_list!\n");
  1330. xenvif_skb_zerocopy_prepare(queue, skb);
  1331. kfree_skb(skb);
  1332. continue;
  1333. }
  1334. /* Copied all the bits from the frag list -- free it. */
  1335. skb_frag_list_init(skb);
  1336. kfree_skb(nskb);
  1337. }
  1338. skb->dev = queue->vif->dev;
  1339. skb->protocol = eth_type_trans(skb, skb->dev);
  1340. skb_reset_network_header(skb);
  1341. if (checksum_setup(queue, skb)) {
  1342. netdev_dbg(queue->vif->dev,
  1343. "Can't setup checksum in net_tx_action\n");
  1344. /* We have to set this flag to trigger the callback */
  1345. if (skb_shinfo(skb)->destructor_arg)
  1346. xenvif_skb_zerocopy_prepare(queue, skb);
  1347. kfree_skb(skb);
  1348. continue;
  1349. }
  1350. skb_probe_transport_header(skb, 0);
  1351. /* If the packet is GSO then we will have just set up the
  1352. * transport header offset in checksum_setup so it's now
  1353. * straightforward to calculate gso_segs.
  1354. */
  1355. if (skb_is_gso(skb)) {
  1356. int mss = skb_shinfo(skb)->gso_size;
  1357. int hdrlen = skb_transport_header(skb) -
  1358. skb_mac_header(skb) +
  1359. tcp_hdrlen(skb);
  1360. skb_shinfo(skb)->gso_segs =
  1361. DIV_ROUND_UP(skb->len - hdrlen, mss);
  1362. }
  1363. queue->stats.rx_bytes += skb->len;
  1364. queue->stats.rx_packets++;
  1365. work_done++;
  1366. /* Set this flag right before netif_receive_skb, otherwise
  1367. * someone might think this packet already left netback, and
  1368. * do a skb_copy_ubufs while we are still in control of the
  1369. * skb. E.g. the __pskb_pull_tail earlier can do such thing.
  1370. */
  1371. if (skb_shinfo(skb)->destructor_arg) {
  1372. xenvif_skb_zerocopy_prepare(queue, skb);
  1373. queue->stats.tx_zerocopy_sent++;
  1374. }
  1375. netif_receive_skb(skb);
  1376. }
  1377. return work_done;
  1378. }
  1379. void xenvif_zerocopy_callback(struct ubuf_info *ubuf, bool zerocopy_success)
  1380. {
  1381. unsigned long flags;
  1382. pending_ring_idx_t index;
  1383. struct xenvif_queue *queue = ubuf_to_queue(ubuf);
  1384. /* This is the only place where we grab this lock, to protect callbacks
  1385. * from each other.
  1386. */
  1387. spin_lock_irqsave(&queue->callback_lock, flags);
  1388. do {
  1389. u16 pending_idx = ubuf->desc;
  1390. ubuf = (struct ubuf_info *) ubuf->ctx;
  1391. BUG_ON(queue->dealloc_prod - queue->dealloc_cons >=
  1392. MAX_PENDING_REQS);
  1393. index = pending_index(queue->dealloc_prod);
  1394. queue->dealloc_ring[index] = pending_idx;
  1395. /* Sync with xenvif_tx_dealloc_action:
  1396. * insert idx then incr producer.
  1397. */
  1398. smp_wmb();
  1399. queue->dealloc_prod++;
  1400. } while (ubuf);
  1401. spin_unlock_irqrestore(&queue->callback_lock, flags);
  1402. if (likely(zerocopy_success))
  1403. queue->stats.tx_zerocopy_success++;
  1404. else
  1405. queue->stats.tx_zerocopy_fail++;
  1406. xenvif_skb_zerocopy_complete(queue);
  1407. }
  1408. static inline void xenvif_tx_dealloc_action(struct xenvif_queue *queue)
  1409. {
  1410. struct gnttab_unmap_grant_ref *gop;
  1411. pending_ring_idx_t dc, dp;
  1412. u16 pending_idx, pending_idx_release[MAX_PENDING_REQS];
  1413. unsigned int i = 0;
  1414. dc = queue->dealloc_cons;
  1415. gop = queue->tx_unmap_ops;
  1416. /* Free up any grants we have finished using */
  1417. do {
  1418. dp = queue->dealloc_prod;
  1419. /* Ensure we see all indices enqueued by all
  1420. * xenvif_zerocopy_callback().
  1421. */
  1422. smp_rmb();
  1423. while (dc != dp) {
  1424. BUG_ON(gop - queue->tx_unmap_ops >= MAX_PENDING_REQS);
  1425. pending_idx =
  1426. queue->dealloc_ring[pending_index(dc++)];
  1427. pending_idx_release[gop - queue->tx_unmap_ops] =
  1428. pending_idx;
  1429. queue->pages_to_unmap[gop - queue->tx_unmap_ops] =
  1430. queue->mmap_pages[pending_idx];
  1431. gnttab_set_unmap_op(gop,
  1432. idx_to_kaddr(queue, pending_idx),
  1433. GNTMAP_host_map,
  1434. queue->grant_tx_handle[pending_idx]);
  1435. xenvif_grant_handle_reset(queue, pending_idx);
  1436. ++gop;
  1437. }
  1438. } while (dp != queue->dealloc_prod);
  1439. queue->dealloc_cons = dc;
  1440. if (gop - queue->tx_unmap_ops > 0) {
  1441. int ret;
  1442. ret = gnttab_unmap_refs(queue->tx_unmap_ops,
  1443. NULL,
  1444. queue->pages_to_unmap,
  1445. gop - queue->tx_unmap_ops);
  1446. if (ret) {
  1447. netdev_err(queue->vif->dev, "Unmap fail: nr_ops %tu ret %d\n",
  1448. gop - queue->tx_unmap_ops, ret);
  1449. for (i = 0; i < gop - queue->tx_unmap_ops; ++i) {
  1450. if (gop[i].status != GNTST_okay)
  1451. netdev_err(queue->vif->dev,
  1452. " host_addr: 0x%llx handle: 0x%x status: %d\n",
  1453. gop[i].host_addr,
  1454. gop[i].handle,
  1455. gop[i].status);
  1456. }
  1457. BUG();
  1458. }
  1459. }
  1460. for (i = 0; i < gop - queue->tx_unmap_ops; ++i)
  1461. xenvif_idx_release(queue, pending_idx_release[i],
  1462. XEN_NETIF_RSP_OKAY);
  1463. }
  1464. /* Called after netfront has transmitted */
  1465. int xenvif_tx_action(struct xenvif_queue *queue, int budget)
  1466. {
  1467. unsigned nr_mops, nr_cops = 0;
  1468. int work_done, ret;
  1469. if (unlikely(!tx_work_todo(queue)))
  1470. return 0;
  1471. xenvif_tx_build_gops(queue, budget, &nr_cops, &nr_mops);
  1472. if (nr_cops == 0)
  1473. return 0;
  1474. gnttab_batch_copy(queue->tx_copy_ops, nr_cops);
  1475. if (nr_mops != 0) {
  1476. ret = gnttab_map_refs(queue->tx_map_ops,
  1477. NULL,
  1478. queue->pages_to_map,
  1479. nr_mops);
  1480. BUG_ON(ret);
  1481. }
  1482. work_done = xenvif_tx_submit(queue);
  1483. return work_done;
  1484. }
  1485. static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
  1486. u8 status)
  1487. {
  1488. struct pending_tx_info *pending_tx_info;
  1489. pending_ring_idx_t index;
  1490. unsigned long flags;
  1491. pending_tx_info = &queue->pending_tx_info[pending_idx];
  1492. spin_lock_irqsave(&queue->response_lock, flags);
  1493. make_tx_response(queue, &pending_tx_info->req, status);
  1494. /* Release the pending index before pusing the Tx response so
  1495. * its available before a new Tx request is pushed by the
  1496. * frontend.
  1497. */
  1498. index = pending_index(queue->pending_prod++);
  1499. queue->pending_ring[index] = pending_idx;
  1500. push_tx_responses(queue);
  1501. spin_unlock_irqrestore(&queue->response_lock, flags);
  1502. }
  1503. static void make_tx_response(struct xenvif_queue *queue,
  1504. struct xen_netif_tx_request *txp,
  1505. s8 st)
  1506. {
  1507. RING_IDX i = queue->tx.rsp_prod_pvt;
  1508. struct xen_netif_tx_response *resp;
  1509. resp = RING_GET_RESPONSE(&queue->tx, i);
  1510. resp->id = txp->id;
  1511. resp->status = st;
  1512. if (txp->flags & XEN_NETTXF_extra_info)
  1513. RING_GET_RESPONSE(&queue->tx, ++i)->status = XEN_NETIF_RSP_NULL;
  1514. queue->tx.rsp_prod_pvt = ++i;
  1515. }
  1516. static void push_tx_responses(struct xenvif_queue *queue)
  1517. {
  1518. int notify;
  1519. RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->tx, notify);
  1520. if (notify)
  1521. notify_remote_via_irq(queue->tx_irq);
  1522. }
  1523. static struct xen_netif_rx_response *make_rx_response(struct xenvif_queue *queue,
  1524. u16 id,
  1525. s8 st,
  1526. u16 offset,
  1527. u16 size,
  1528. u16 flags)
  1529. {
  1530. RING_IDX i = queue->rx.rsp_prod_pvt;
  1531. struct xen_netif_rx_response *resp;
  1532. resp = RING_GET_RESPONSE(&queue->rx, i);
  1533. resp->offset = offset;
  1534. resp->flags = flags;
  1535. resp->id = id;
  1536. resp->status = (s16)size;
  1537. if (st < 0)
  1538. resp->status = (s16)st;
  1539. queue->rx.rsp_prod_pvt = ++i;
  1540. return resp;
  1541. }
  1542. void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx)
  1543. {
  1544. int ret;
  1545. struct gnttab_unmap_grant_ref tx_unmap_op;
  1546. gnttab_set_unmap_op(&tx_unmap_op,
  1547. idx_to_kaddr(queue, pending_idx),
  1548. GNTMAP_host_map,
  1549. queue->grant_tx_handle[pending_idx]);
  1550. xenvif_grant_handle_reset(queue, pending_idx);
  1551. ret = gnttab_unmap_refs(&tx_unmap_op, NULL,
  1552. &queue->mmap_pages[pending_idx], 1);
  1553. if (ret) {
  1554. netdev_err(queue->vif->dev,
  1555. "Unmap fail: ret: %d pending_idx: %d host_addr: %llx handle: 0x%x status: %d\n",
  1556. ret,
  1557. pending_idx,
  1558. tx_unmap_op.host_addr,
  1559. tx_unmap_op.handle,
  1560. tx_unmap_op.status);
  1561. BUG();
  1562. }
  1563. }
  1564. static inline int tx_work_todo(struct xenvif_queue *queue)
  1565. {
  1566. if (likely(RING_HAS_UNCONSUMED_REQUESTS(&queue->tx)))
  1567. return 1;
  1568. return 0;
  1569. }
  1570. static inline bool tx_dealloc_work_todo(struct xenvif_queue *queue)
  1571. {
  1572. return queue->dealloc_cons != queue->dealloc_prod;
  1573. }
  1574. void xenvif_unmap_frontend_rings(struct xenvif_queue *queue)
  1575. {
  1576. if (queue->tx.sring)
  1577. xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(queue->vif),
  1578. queue->tx.sring);
  1579. if (queue->rx.sring)
  1580. xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(queue->vif),
  1581. queue->rx.sring);
  1582. }
  1583. int xenvif_map_frontend_rings(struct xenvif_queue *queue,
  1584. grant_ref_t tx_ring_ref,
  1585. grant_ref_t rx_ring_ref)
  1586. {
  1587. void *addr;
  1588. struct xen_netif_tx_sring *txs;
  1589. struct xen_netif_rx_sring *rxs;
  1590. int err = -ENOMEM;
  1591. err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(queue->vif),
  1592. &tx_ring_ref, 1, &addr);
  1593. if (err)
  1594. goto err;
  1595. txs = (struct xen_netif_tx_sring *)addr;
  1596. BACK_RING_INIT(&queue->tx, txs, XEN_PAGE_SIZE);
  1597. err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(queue->vif),
  1598. &rx_ring_ref, 1, &addr);
  1599. if (err)
  1600. goto err;
  1601. rxs = (struct xen_netif_rx_sring *)addr;
  1602. BACK_RING_INIT(&queue->rx, rxs, XEN_PAGE_SIZE);
  1603. return 0;
  1604. err:
  1605. xenvif_unmap_frontend_rings(queue);
  1606. return err;
  1607. }
  1608. static void xenvif_queue_carrier_off(struct xenvif_queue *queue)
  1609. {
  1610. struct xenvif *vif = queue->vif;
  1611. queue->stalled = true;
  1612. /* At least one queue has stalled? Disable the carrier. */
  1613. spin_lock(&vif->lock);
  1614. if (vif->stalled_queues++ == 0) {
  1615. netdev_info(vif->dev, "Guest Rx stalled");
  1616. netif_carrier_off(vif->dev);
  1617. }
  1618. spin_unlock(&vif->lock);
  1619. }
  1620. static void xenvif_queue_carrier_on(struct xenvif_queue *queue)
  1621. {
  1622. struct xenvif *vif = queue->vif;
  1623. queue->last_rx_time = jiffies; /* Reset Rx stall detection. */
  1624. queue->stalled = false;
  1625. /* All queues are ready? Enable the carrier. */
  1626. spin_lock(&vif->lock);
  1627. if (--vif->stalled_queues == 0) {
  1628. netdev_info(vif->dev, "Guest Rx ready");
  1629. netif_carrier_on(vif->dev);
  1630. }
  1631. spin_unlock(&vif->lock);
  1632. }
  1633. static bool xenvif_rx_queue_stalled(struct xenvif_queue *queue)
  1634. {
  1635. RING_IDX prod, cons;
  1636. prod = queue->rx.sring->req_prod;
  1637. cons = queue->rx.req_cons;
  1638. return !queue->stalled && prod - cons < 1
  1639. && time_after(jiffies,
  1640. queue->last_rx_time + queue->vif->stall_timeout);
  1641. }
  1642. static bool xenvif_rx_queue_ready(struct xenvif_queue *queue)
  1643. {
  1644. RING_IDX prod, cons;
  1645. prod = queue->rx.sring->req_prod;
  1646. cons = queue->rx.req_cons;
  1647. return queue->stalled && prod - cons >= 1;
  1648. }
  1649. static bool xenvif_have_rx_work(struct xenvif_queue *queue)
  1650. {
  1651. return (!skb_queue_empty(&queue->rx_queue)
  1652. && xenvif_rx_ring_slots_available(queue))
  1653. || (queue->vif->stall_timeout &&
  1654. (xenvif_rx_queue_stalled(queue)
  1655. || xenvif_rx_queue_ready(queue)))
  1656. || kthread_should_stop()
  1657. || queue->vif->disabled;
  1658. }
  1659. static long xenvif_rx_queue_timeout(struct xenvif_queue *queue)
  1660. {
  1661. struct sk_buff *skb;
  1662. long timeout;
  1663. skb = skb_peek(&queue->rx_queue);
  1664. if (!skb)
  1665. return MAX_SCHEDULE_TIMEOUT;
  1666. timeout = XENVIF_RX_CB(skb)->expires - jiffies;
  1667. return timeout < 0 ? 0 : timeout;
  1668. }
  1669. /* Wait until the guest Rx thread has work.
  1670. *
  1671. * The timeout needs to be adjusted based on the current head of the
  1672. * queue (and not just the head at the beginning). In particular, if
  1673. * the queue is initially empty an infinite timeout is used and this
  1674. * needs to be reduced when a skb is queued.
  1675. *
  1676. * This cannot be done with wait_event_timeout() because it only
  1677. * calculates the timeout once.
  1678. */
  1679. static void xenvif_wait_for_rx_work(struct xenvif_queue *queue)
  1680. {
  1681. DEFINE_WAIT(wait);
  1682. if (xenvif_have_rx_work(queue))
  1683. return;
  1684. for (;;) {
  1685. long ret;
  1686. prepare_to_wait(&queue->wq, &wait, TASK_INTERRUPTIBLE);
  1687. if (xenvif_have_rx_work(queue))
  1688. break;
  1689. ret = schedule_timeout(xenvif_rx_queue_timeout(queue));
  1690. if (!ret)
  1691. break;
  1692. }
  1693. finish_wait(&queue->wq, &wait);
  1694. }
  1695. int xenvif_kthread_guest_rx(void *data)
  1696. {
  1697. struct xenvif_queue *queue = data;
  1698. struct xenvif *vif = queue->vif;
  1699. if (!vif->stall_timeout)
  1700. xenvif_queue_carrier_on(queue);
  1701. for (;;) {
  1702. xenvif_wait_for_rx_work(queue);
  1703. if (kthread_should_stop())
  1704. break;
  1705. /* This frontend is found to be rogue, disable it in
  1706. * kthread context. Currently this is only set when
  1707. * netback finds out frontend sends malformed packet,
  1708. * but we cannot disable the interface in softirq
  1709. * context so we defer it here, if this thread is
  1710. * associated with queue 0.
  1711. */
  1712. if (unlikely(vif->disabled && queue->id == 0)) {
  1713. xenvif_carrier_off(vif);
  1714. break;
  1715. }
  1716. if (!skb_queue_empty(&queue->rx_queue))
  1717. xenvif_rx_action(queue);
  1718. /* If the guest hasn't provided any Rx slots for a
  1719. * while it's probably not responsive, drop the
  1720. * carrier so packets are dropped earlier.
  1721. */
  1722. if (vif->stall_timeout) {
  1723. if (xenvif_rx_queue_stalled(queue))
  1724. xenvif_queue_carrier_off(queue);
  1725. else if (xenvif_rx_queue_ready(queue))
  1726. xenvif_queue_carrier_on(queue);
  1727. }
  1728. /* Queued packets may have foreign pages from other
  1729. * domains. These cannot be queued indefinitely as
  1730. * this would starve guests of grant refs and transmit
  1731. * slots.
  1732. */
  1733. xenvif_rx_queue_drop_expired(queue);
  1734. xenvif_rx_queue_maybe_wake(queue);
  1735. cond_resched();
  1736. }
  1737. /* Bin any remaining skbs */
  1738. xenvif_rx_queue_purge(queue);
  1739. return 0;
  1740. }
  1741. static bool xenvif_dealloc_kthread_should_stop(struct xenvif_queue *queue)
  1742. {
  1743. /* Dealloc thread must remain running until all inflight
  1744. * packets complete.
  1745. */
  1746. return kthread_should_stop() &&
  1747. !atomic_read(&queue->inflight_packets);
  1748. }
  1749. int xenvif_dealloc_kthread(void *data)
  1750. {
  1751. struct xenvif_queue *queue = data;
  1752. for (;;) {
  1753. wait_event_interruptible(queue->dealloc_wq,
  1754. tx_dealloc_work_todo(queue) ||
  1755. xenvif_dealloc_kthread_should_stop(queue));
  1756. if (xenvif_dealloc_kthread_should_stop(queue))
  1757. break;
  1758. xenvif_tx_dealloc_action(queue);
  1759. cond_resched();
  1760. }
  1761. /* Unmap anything remaining*/
  1762. if (tx_dealloc_work_todo(queue))
  1763. xenvif_tx_dealloc_action(queue);
  1764. return 0;
  1765. }
  1766. static int __init netback_init(void)
  1767. {
  1768. int rc = 0;
  1769. if (!xen_domain())
  1770. return -ENODEV;
  1771. /* Allow as many queues as there are CPUs but max. 8 if user has not
  1772. * specified a value.
  1773. */
  1774. if (xenvif_max_queues == 0)
  1775. xenvif_max_queues = min_t(unsigned int, MAX_QUEUES_DEFAULT,
  1776. num_online_cpus());
  1777. if (fatal_skb_slots < XEN_NETBK_LEGACY_SLOTS_MAX) {
  1778. pr_info("fatal_skb_slots too small (%d), bump it to XEN_NETBK_LEGACY_SLOTS_MAX (%d)\n",
  1779. fatal_skb_slots, XEN_NETBK_LEGACY_SLOTS_MAX);
  1780. fatal_skb_slots = XEN_NETBK_LEGACY_SLOTS_MAX;
  1781. }
  1782. rc = xenvif_xenbus_init();
  1783. if (rc)
  1784. goto failed_init;
  1785. #ifdef CONFIG_DEBUG_FS
  1786. xen_netback_dbg_root = debugfs_create_dir("xen-netback", NULL);
  1787. if (IS_ERR_OR_NULL(xen_netback_dbg_root))
  1788. pr_warn("Init of debugfs returned %ld!\n",
  1789. PTR_ERR(xen_netback_dbg_root));
  1790. #endif /* CONFIG_DEBUG_FS */
  1791. return 0;
  1792. failed_init:
  1793. return rc;
  1794. }
  1795. module_init(netback_init);
  1796. static void __exit netback_fini(void)
  1797. {
  1798. #ifdef CONFIG_DEBUG_FS
  1799. if (!IS_ERR_OR_NULL(xen_netback_dbg_root))
  1800. debugfs_remove_recursive(xen_netback_dbg_root);
  1801. #endif /* CONFIG_DEBUG_FS */
  1802. xenvif_xenbus_fini();
  1803. }
  1804. module_exit(netback_fini);
  1805. MODULE_LICENSE("Dual BSD/GPL");
  1806. MODULE_ALIAS("xen-backend:vif");