vmbus_drv.c 34 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368
  1. /*
  2. * Copyright (c) 2009, Microsoft Corporation.
  3. *
  4. * This program is free software; you can redistribute it and/or modify it
  5. * under the terms and conditions of the GNU General Public License,
  6. * version 2, as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope it will be useful, but WITHOUT
  9. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  11. * more details.
  12. *
  13. * You should have received a copy of the GNU General Public License along with
  14. * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
  15. * Place - Suite 330, Boston, MA 02111-1307 USA.
  16. *
  17. * Authors:
  18. * Haiyang Zhang <haiyangz@microsoft.com>
  19. * Hank Janssen <hjanssen@microsoft.com>
  20. * K. Y. Srinivasan <kys@microsoft.com>
  21. *
  22. */
  23. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  24. #include <linux/init.h>
  25. #include <linux/module.h>
  26. #include <linux/device.h>
  27. #include <linux/interrupt.h>
  28. #include <linux/sysctl.h>
  29. #include <linux/slab.h>
  30. #include <linux/acpi.h>
  31. #include <linux/completion.h>
  32. #include <linux/hyperv.h>
  33. #include <linux/kernel_stat.h>
  34. #include <linux/clockchips.h>
  35. #include <linux/cpu.h>
  36. #include <asm/hyperv.h>
  37. #include <asm/hypervisor.h>
  38. #include <asm/mshyperv.h>
  39. #include <linux/notifier.h>
  40. #include <linux/ptrace.h>
  41. #include <linux/screen_info.h>
  42. #include <linux/kdebug.h>
  43. #include <linux/random.h>
  44. #include "hyperv_vmbus.h"
  45. static struct acpi_device *hv_acpi_dev;
  46. static struct tasklet_struct msg_dpc;
  47. static struct completion probe_event;
  48. static int irq;
  49. static void hyperv_report_panic(struct pt_regs *regs)
  50. {
  51. static bool panic_reported;
  52. /*
  53. * We prefer to report panic on 'die' chain as we have proper
  54. * registers to report, but if we miss it (e.g. on BUG()) we need
  55. * to report it on 'panic'.
  56. */
  57. if (panic_reported)
  58. return;
  59. panic_reported = true;
  60. wrmsrl(HV_X64_MSR_CRASH_P0, regs->ip);
  61. wrmsrl(HV_X64_MSR_CRASH_P1, regs->ax);
  62. wrmsrl(HV_X64_MSR_CRASH_P2, regs->bx);
  63. wrmsrl(HV_X64_MSR_CRASH_P3, regs->cx);
  64. wrmsrl(HV_X64_MSR_CRASH_P4, regs->dx);
  65. /*
  66. * Let Hyper-V know there is crash data available
  67. */
  68. wrmsrl(HV_X64_MSR_CRASH_CTL, HV_CRASH_CTL_CRASH_NOTIFY);
  69. }
  70. static int hyperv_panic_event(struct notifier_block *nb, unsigned long val,
  71. void *args)
  72. {
  73. struct pt_regs *regs;
  74. regs = current_pt_regs();
  75. hyperv_report_panic(regs);
  76. return NOTIFY_DONE;
  77. }
  78. static int hyperv_die_event(struct notifier_block *nb, unsigned long val,
  79. void *args)
  80. {
  81. struct die_args *die = (struct die_args *)args;
  82. struct pt_regs *regs = die->regs;
  83. hyperv_report_panic(regs);
  84. return NOTIFY_DONE;
  85. }
  86. static struct notifier_block hyperv_die_block = {
  87. .notifier_call = hyperv_die_event,
  88. };
  89. static struct notifier_block hyperv_panic_block = {
  90. .notifier_call = hyperv_panic_event,
  91. };
  92. struct resource *hyperv_mmio;
  93. DEFINE_SEMAPHORE(hyperv_mmio_lock);
  94. static int vmbus_exists(void)
  95. {
  96. if (hv_acpi_dev == NULL)
  97. return -ENODEV;
  98. return 0;
  99. }
  100. #define VMBUS_ALIAS_LEN ((sizeof((struct hv_vmbus_device_id *)0)->guid) * 2)
  101. static void print_alias_name(struct hv_device *hv_dev, char *alias_name)
  102. {
  103. int i;
  104. for (i = 0; i < VMBUS_ALIAS_LEN; i += 2)
  105. sprintf(&alias_name[i], "%02x", hv_dev->dev_type.b[i/2]);
  106. }
  107. static u8 channel_monitor_group(struct vmbus_channel *channel)
  108. {
  109. return (u8)channel->offermsg.monitorid / 32;
  110. }
  111. static u8 channel_monitor_offset(struct vmbus_channel *channel)
  112. {
  113. return (u8)channel->offermsg.monitorid % 32;
  114. }
  115. static u32 channel_pending(struct vmbus_channel *channel,
  116. struct hv_monitor_page *monitor_page)
  117. {
  118. u8 monitor_group = channel_monitor_group(channel);
  119. return monitor_page->trigger_group[monitor_group].pending;
  120. }
  121. static u32 channel_latency(struct vmbus_channel *channel,
  122. struct hv_monitor_page *monitor_page)
  123. {
  124. u8 monitor_group = channel_monitor_group(channel);
  125. u8 monitor_offset = channel_monitor_offset(channel);
  126. return monitor_page->latency[monitor_group][monitor_offset];
  127. }
  128. static u32 channel_conn_id(struct vmbus_channel *channel,
  129. struct hv_monitor_page *monitor_page)
  130. {
  131. u8 monitor_group = channel_monitor_group(channel);
  132. u8 monitor_offset = channel_monitor_offset(channel);
  133. return monitor_page->parameter[monitor_group][monitor_offset].connectionid.u.id;
  134. }
  135. static ssize_t id_show(struct device *dev, struct device_attribute *dev_attr,
  136. char *buf)
  137. {
  138. struct hv_device *hv_dev = device_to_hv_device(dev);
  139. if (!hv_dev->channel)
  140. return -ENODEV;
  141. return sprintf(buf, "%d\n", hv_dev->channel->offermsg.child_relid);
  142. }
  143. static DEVICE_ATTR_RO(id);
  144. static ssize_t state_show(struct device *dev, struct device_attribute *dev_attr,
  145. char *buf)
  146. {
  147. struct hv_device *hv_dev = device_to_hv_device(dev);
  148. if (!hv_dev->channel)
  149. return -ENODEV;
  150. return sprintf(buf, "%d\n", hv_dev->channel->state);
  151. }
  152. static DEVICE_ATTR_RO(state);
  153. static ssize_t monitor_id_show(struct device *dev,
  154. struct device_attribute *dev_attr, char *buf)
  155. {
  156. struct hv_device *hv_dev = device_to_hv_device(dev);
  157. if (!hv_dev->channel)
  158. return -ENODEV;
  159. return sprintf(buf, "%d\n", hv_dev->channel->offermsg.monitorid);
  160. }
  161. static DEVICE_ATTR_RO(monitor_id);
  162. static ssize_t class_id_show(struct device *dev,
  163. struct device_attribute *dev_attr, char *buf)
  164. {
  165. struct hv_device *hv_dev = device_to_hv_device(dev);
  166. if (!hv_dev->channel)
  167. return -ENODEV;
  168. return sprintf(buf, "{%pUl}\n",
  169. hv_dev->channel->offermsg.offer.if_type.b);
  170. }
  171. static DEVICE_ATTR_RO(class_id);
  172. static ssize_t device_id_show(struct device *dev,
  173. struct device_attribute *dev_attr, char *buf)
  174. {
  175. struct hv_device *hv_dev = device_to_hv_device(dev);
  176. if (!hv_dev->channel)
  177. return -ENODEV;
  178. return sprintf(buf, "{%pUl}\n",
  179. hv_dev->channel->offermsg.offer.if_instance.b);
  180. }
  181. static DEVICE_ATTR_RO(device_id);
  182. static ssize_t modalias_show(struct device *dev,
  183. struct device_attribute *dev_attr, char *buf)
  184. {
  185. struct hv_device *hv_dev = device_to_hv_device(dev);
  186. char alias_name[VMBUS_ALIAS_LEN + 1];
  187. print_alias_name(hv_dev, alias_name);
  188. return sprintf(buf, "vmbus:%s\n", alias_name);
  189. }
  190. static DEVICE_ATTR_RO(modalias);
  191. static ssize_t server_monitor_pending_show(struct device *dev,
  192. struct device_attribute *dev_attr,
  193. char *buf)
  194. {
  195. struct hv_device *hv_dev = device_to_hv_device(dev);
  196. if (!hv_dev->channel)
  197. return -ENODEV;
  198. return sprintf(buf, "%d\n",
  199. channel_pending(hv_dev->channel,
  200. vmbus_connection.monitor_pages[1]));
  201. }
  202. static DEVICE_ATTR_RO(server_monitor_pending);
  203. static ssize_t client_monitor_pending_show(struct device *dev,
  204. struct device_attribute *dev_attr,
  205. char *buf)
  206. {
  207. struct hv_device *hv_dev = device_to_hv_device(dev);
  208. if (!hv_dev->channel)
  209. return -ENODEV;
  210. return sprintf(buf, "%d\n",
  211. channel_pending(hv_dev->channel,
  212. vmbus_connection.monitor_pages[1]));
  213. }
  214. static DEVICE_ATTR_RO(client_monitor_pending);
  215. static ssize_t server_monitor_latency_show(struct device *dev,
  216. struct device_attribute *dev_attr,
  217. char *buf)
  218. {
  219. struct hv_device *hv_dev = device_to_hv_device(dev);
  220. if (!hv_dev->channel)
  221. return -ENODEV;
  222. return sprintf(buf, "%d\n",
  223. channel_latency(hv_dev->channel,
  224. vmbus_connection.monitor_pages[0]));
  225. }
  226. static DEVICE_ATTR_RO(server_monitor_latency);
  227. static ssize_t client_monitor_latency_show(struct device *dev,
  228. struct device_attribute *dev_attr,
  229. char *buf)
  230. {
  231. struct hv_device *hv_dev = device_to_hv_device(dev);
  232. if (!hv_dev->channel)
  233. return -ENODEV;
  234. return sprintf(buf, "%d\n",
  235. channel_latency(hv_dev->channel,
  236. vmbus_connection.monitor_pages[1]));
  237. }
  238. static DEVICE_ATTR_RO(client_monitor_latency);
  239. static ssize_t server_monitor_conn_id_show(struct device *dev,
  240. struct device_attribute *dev_attr,
  241. char *buf)
  242. {
  243. struct hv_device *hv_dev = device_to_hv_device(dev);
  244. if (!hv_dev->channel)
  245. return -ENODEV;
  246. return sprintf(buf, "%d\n",
  247. channel_conn_id(hv_dev->channel,
  248. vmbus_connection.monitor_pages[0]));
  249. }
  250. static DEVICE_ATTR_RO(server_monitor_conn_id);
  251. static ssize_t client_monitor_conn_id_show(struct device *dev,
  252. struct device_attribute *dev_attr,
  253. char *buf)
  254. {
  255. struct hv_device *hv_dev = device_to_hv_device(dev);
  256. if (!hv_dev->channel)
  257. return -ENODEV;
  258. return sprintf(buf, "%d\n",
  259. channel_conn_id(hv_dev->channel,
  260. vmbus_connection.monitor_pages[1]));
  261. }
  262. static DEVICE_ATTR_RO(client_monitor_conn_id);
  263. static ssize_t out_intr_mask_show(struct device *dev,
  264. struct device_attribute *dev_attr, char *buf)
  265. {
  266. struct hv_device *hv_dev = device_to_hv_device(dev);
  267. struct hv_ring_buffer_debug_info outbound;
  268. if (!hv_dev->channel)
  269. return -ENODEV;
  270. if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
  271. return -EINVAL;
  272. hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
  273. return sprintf(buf, "%d\n", outbound.current_interrupt_mask);
  274. }
  275. static DEVICE_ATTR_RO(out_intr_mask);
  276. static ssize_t out_read_index_show(struct device *dev,
  277. struct device_attribute *dev_attr, char *buf)
  278. {
  279. struct hv_device *hv_dev = device_to_hv_device(dev);
  280. struct hv_ring_buffer_debug_info outbound;
  281. if (!hv_dev->channel)
  282. return -ENODEV;
  283. if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
  284. return -EINVAL;
  285. hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
  286. return sprintf(buf, "%d\n", outbound.current_read_index);
  287. }
  288. static DEVICE_ATTR_RO(out_read_index);
  289. static ssize_t out_write_index_show(struct device *dev,
  290. struct device_attribute *dev_attr,
  291. char *buf)
  292. {
  293. struct hv_device *hv_dev = device_to_hv_device(dev);
  294. struct hv_ring_buffer_debug_info outbound;
  295. if (!hv_dev->channel)
  296. return -ENODEV;
  297. if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
  298. return -EINVAL;
  299. hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
  300. return sprintf(buf, "%d\n", outbound.current_write_index);
  301. }
  302. static DEVICE_ATTR_RO(out_write_index);
  303. static ssize_t out_read_bytes_avail_show(struct device *dev,
  304. struct device_attribute *dev_attr,
  305. char *buf)
  306. {
  307. struct hv_device *hv_dev = device_to_hv_device(dev);
  308. struct hv_ring_buffer_debug_info outbound;
  309. if (!hv_dev->channel)
  310. return -ENODEV;
  311. if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
  312. return -EINVAL;
  313. hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
  314. return sprintf(buf, "%d\n", outbound.bytes_avail_toread);
  315. }
  316. static DEVICE_ATTR_RO(out_read_bytes_avail);
  317. static ssize_t out_write_bytes_avail_show(struct device *dev,
  318. struct device_attribute *dev_attr,
  319. char *buf)
  320. {
  321. struct hv_device *hv_dev = device_to_hv_device(dev);
  322. struct hv_ring_buffer_debug_info outbound;
  323. if (!hv_dev->channel)
  324. return -ENODEV;
  325. if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
  326. return -EINVAL;
  327. hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
  328. return sprintf(buf, "%d\n", outbound.bytes_avail_towrite);
  329. }
  330. static DEVICE_ATTR_RO(out_write_bytes_avail);
  331. static ssize_t in_intr_mask_show(struct device *dev,
  332. struct device_attribute *dev_attr, char *buf)
  333. {
  334. struct hv_device *hv_dev = device_to_hv_device(dev);
  335. struct hv_ring_buffer_debug_info inbound;
  336. if (!hv_dev->channel)
  337. return -ENODEV;
  338. if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
  339. return -EINVAL;
  340. hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
  341. return sprintf(buf, "%d\n", inbound.current_interrupt_mask);
  342. }
  343. static DEVICE_ATTR_RO(in_intr_mask);
  344. static ssize_t in_read_index_show(struct device *dev,
  345. struct device_attribute *dev_attr, char *buf)
  346. {
  347. struct hv_device *hv_dev = device_to_hv_device(dev);
  348. struct hv_ring_buffer_debug_info inbound;
  349. if (!hv_dev->channel)
  350. return -ENODEV;
  351. if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
  352. return -EINVAL;
  353. hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
  354. return sprintf(buf, "%d\n", inbound.current_read_index);
  355. }
  356. static DEVICE_ATTR_RO(in_read_index);
  357. static ssize_t in_write_index_show(struct device *dev,
  358. struct device_attribute *dev_attr, char *buf)
  359. {
  360. struct hv_device *hv_dev = device_to_hv_device(dev);
  361. struct hv_ring_buffer_debug_info inbound;
  362. if (!hv_dev->channel)
  363. return -ENODEV;
  364. if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
  365. return -EINVAL;
  366. hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
  367. return sprintf(buf, "%d\n", inbound.current_write_index);
  368. }
  369. static DEVICE_ATTR_RO(in_write_index);
  370. static ssize_t in_read_bytes_avail_show(struct device *dev,
  371. struct device_attribute *dev_attr,
  372. char *buf)
  373. {
  374. struct hv_device *hv_dev = device_to_hv_device(dev);
  375. struct hv_ring_buffer_debug_info inbound;
  376. if (!hv_dev->channel)
  377. return -ENODEV;
  378. if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
  379. return -EINVAL;
  380. hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
  381. return sprintf(buf, "%d\n", inbound.bytes_avail_toread);
  382. }
  383. static DEVICE_ATTR_RO(in_read_bytes_avail);
  384. static ssize_t in_write_bytes_avail_show(struct device *dev,
  385. struct device_attribute *dev_attr,
  386. char *buf)
  387. {
  388. struct hv_device *hv_dev = device_to_hv_device(dev);
  389. struct hv_ring_buffer_debug_info inbound;
  390. if (!hv_dev->channel)
  391. return -ENODEV;
  392. if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
  393. return -EINVAL;
  394. hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
  395. return sprintf(buf, "%d\n", inbound.bytes_avail_towrite);
  396. }
  397. static DEVICE_ATTR_RO(in_write_bytes_avail);
  398. static ssize_t channel_vp_mapping_show(struct device *dev,
  399. struct device_attribute *dev_attr,
  400. char *buf)
  401. {
  402. struct hv_device *hv_dev = device_to_hv_device(dev);
  403. struct vmbus_channel *channel = hv_dev->channel, *cur_sc;
  404. unsigned long flags;
  405. int buf_size = PAGE_SIZE, n_written, tot_written;
  406. struct list_head *cur;
  407. if (!channel)
  408. return -ENODEV;
  409. tot_written = snprintf(buf, buf_size, "%u:%u\n",
  410. channel->offermsg.child_relid, channel->target_cpu);
  411. spin_lock_irqsave(&channel->lock, flags);
  412. list_for_each(cur, &channel->sc_list) {
  413. if (tot_written >= buf_size - 1)
  414. break;
  415. cur_sc = list_entry(cur, struct vmbus_channel, sc_list);
  416. n_written = scnprintf(buf + tot_written,
  417. buf_size - tot_written,
  418. "%u:%u\n",
  419. cur_sc->offermsg.child_relid,
  420. cur_sc->target_cpu);
  421. tot_written += n_written;
  422. }
  423. spin_unlock_irqrestore(&channel->lock, flags);
  424. return tot_written;
  425. }
  426. static DEVICE_ATTR_RO(channel_vp_mapping);
  427. /* Set up per device attributes in /sys/bus/vmbus/devices/<bus device> */
  428. static struct attribute *vmbus_attrs[] = {
  429. &dev_attr_id.attr,
  430. &dev_attr_state.attr,
  431. &dev_attr_monitor_id.attr,
  432. &dev_attr_class_id.attr,
  433. &dev_attr_device_id.attr,
  434. &dev_attr_modalias.attr,
  435. &dev_attr_server_monitor_pending.attr,
  436. &dev_attr_client_monitor_pending.attr,
  437. &dev_attr_server_monitor_latency.attr,
  438. &dev_attr_client_monitor_latency.attr,
  439. &dev_attr_server_monitor_conn_id.attr,
  440. &dev_attr_client_monitor_conn_id.attr,
  441. &dev_attr_out_intr_mask.attr,
  442. &dev_attr_out_read_index.attr,
  443. &dev_attr_out_write_index.attr,
  444. &dev_attr_out_read_bytes_avail.attr,
  445. &dev_attr_out_write_bytes_avail.attr,
  446. &dev_attr_in_intr_mask.attr,
  447. &dev_attr_in_read_index.attr,
  448. &dev_attr_in_write_index.attr,
  449. &dev_attr_in_read_bytes_avail.attr,
  450. &dev_attr_in_write_bytes_avail.attr,
  451. &dev_attr_channel_vp_mapping.attr,
  452. NULL,
  453. };
  454. ATTRIBUTE_GROUPS(vmbus);
  455. /*
  456. * vmbus_uevent - add uevent for our device
  457. *
  458. * This routine is invoked when a device is added or removed on the vmbus to
  459. * generate a uevent to udev in the userspace. The udev will then look at its
  460. * rule and the uevent generated here to load the appropriate driver
  461. *
  462. * The alias string will be of the form vmbus:guid where guid is the string
  463. * representation of the device guid (each byte of the guid will be
  464. * represented with two hex characters.
  465. */
  466. static int vmbus_uevent(struct device *device, struct kobj_uevent_env *env)
  467. {
  468. struct hv_device *dev = device_to_hv_device(device);
  469. int ret;
  470. char alias_name[VMBUS_ALIAS_LEN + 1];
  471. print_alias_name(dev, alias_name);
  472. ret = add_uevent_var(env, "MODALIAS=vmbus:%s", alias_name);
  473. return ret;
  474. }
  475. static const uuid_le null_guid;
  476. static inline bool is_null_guid(const __u8 *guid)
  477. {
  478. if (memcmp(guid, &null_guid, sizeof(uuid_le)))
  479. return false;
  480. return true;
  481. }
  482. /*
  483. * Return a matching hv_vmbus_device_id pointer.
  484. * If there is no match, return NULL.
  485. */
  486. static const struct hv_vmbus_device_id *hv_vmbus_get_id(
  487. const struct hv_vmbus_device_id *id,
  488. const __u8 *guid)
  489. {
  490. for (; !is_null_guid(id->guid); id++)
  491. if (!memcmp(&id->guid, guid, sizeof(uuid_le)))
  492. return id;
  493. return NULL;
  494. }
  495. /*
  496. * vmbus_match - Attempt to match the specified device to the specified driver
  497. */
  498. static int vmbus_match(struct device *device, struct device_driver *driver)
  499. {
  500. struct hv_driver *drv = drv_to_hv_drv(driver);
  501. struct hv_device *hv_dev = device_to_hv_device(device);
  502. if (hv_vmbus_get_id(drv->id_table, hv_dev->dev_type.b))
  503. return 1;
  504. return 0;
  505. }
  506. /*
  507. * vmbus_probe - Add the new vmbus's child device
  508. */
  509. static int vmbus_probe(struct device *child_device)
  510. {
  511. int ret = 0;
  512. struct hv_driver *drv =
  513. drv_to_hv_drv(child_device->driver);
  514. struct hv_device *dev = device_to_hv_device(child_device);
  515. const struct hv_vmbus_device_id *dev_id;
  516. dev_id = hv_vmbus_get_id(drv->id_table, dev->dev_type.b);
  517. if (drv->probe) {
  518. ret = drv->probe(dev, dev_id);
  519. if (ret != 0)
  520. pr_err("probe failed for device %s (%d)\n",
  521. dev_name(child_device), ret);
  522. } else {
  523. pr_err("probe not set for driver %s\n",
  524. dev_name(child_device));
  525. ret = -ENODEV;
  526. }
  527. return ret;
  528. }
  529. /*
  530. * vmbus_remove - Remove a vmbus device
  531. */
  532. static int vmbus_remove(struct device *child_device)
  533. {
  534. struct hv_driver *drv;
  535. struct hv_device *dev = device_to_hv_device(child_device);
  536. if (child_device->driver) {
  537. drv = drv_to_hv_drv(child_device->driver);
  538. if (drv->remove)
  539. drv->remove(dev);
  540. }
  541. return 0;
  542. }
  543. /*
  544. * vmbus_shutdown - Shutdown a vmbus device
  545. */
  546. static void vmbus_shutdown(struct device *child_device)
  547. {
  548. struct hv_driver *drv;
  549. struct hv_device *dev = device_to_hv_device(child_device);
  550. /* The device may not be attached yet */
  551. if (!child_device->driver)
  552. return;
  553. drv = drv_to_hv_drv(child_device->driver);
  554. if (drv->shutdown)
  555. drv->shutdown(dev);
  556. return;
  557. }
  558. /*
  559. * vmbus_device_release - Final callback release of the vmbus child device
  560. */
  561. static void vmbus_device_release(struct device *device)
  562. {
  563. struct hv_device *hv_dev = device_to_hv_device(device);
  564. struct vmbus_channel *channel = hv_dev->channel;
  565. hv_process_channel_removal(channel,
  566. channel->offermsg.child_relid);
  567. kfree(hv_dev);
  568. }
  569. /* The one and only one */
  570. static struct bus_type hv_bus = {
  571. .name = "vmbus",
  572. .match = vmbus_match,
  573. .shutdown = vmbus_shutdown,
  574. .remove = vmbus_remove,
  575. .probe = vmbus_probe,
  576. .uevent = vmbus_uevent,
  577. .dev_groups = vmbus_groups,
  578. };
  579. struct onmessage_work_context {
  580. struct work_struct work;
  581. struct hv_message msg;
  582. };
  583. static void vmbus_onmessage_work(struct work_struct *work)
  584. {
  585. struct onmessage_work_context *ctx;
  586. /* Do not process messages if we're in DISCONNECTED state */
  587. if (vmbus_connection.conn_state == DISCONNECTED)
  588. return;
  589. ctx = container_of(work, struct onmessage_work_context,
  590. work);
  591. vmbus_onmessage(&ctx->msg);
  592. kfree(ctx);
  593. }
  594. static void hv_process_timer_expiration(struct hv_message *msg, int cpu)
  595. {
  596. struct clock_event_device *dev = hv_context.clk_evt[cpu];
  597. if (dev->event_handler)
  598. dev->event_handler(dev);
  599. msg->header.message_type = HVMSG_NONE;
  600. /*
  601. * Make sure the write to MessageType (ie set to
  602. * HVMSG_NONE) happens before we read the
  603. * MessagePending and EOMing. Otherwise, the EOMing
  604. * will not deliver any more messages since there is
  605. * no empty slot
  606. */
  607. mb();
  608. if (msg->header.message_flags.msg_pending) {
  609. /*
  610. * This will cause message queue rescan to
  611. * possibly deliver another msg from the
  612. * hypervisor
  613. */
  614. wrmsrl(HV_X64_MSR_EOM, 0);
  615. }
  616. }
  617. static void vmbus_on_msg_dpc(unsigned long data)
  618. {
  619. int cpu = smp_processor_id();
  620. void *page_addr = hv_context.synic_message_page[cpu];
  621. struct hv_message *msg = (struct hv_message *)page_addr +
  622. VMBUS_MESSAGE_SINT;
  623. struct vmbus_channel_message_header *hdr;
  624. struct vmbus_channel_message_table_entry *entry;
  625. struct onmessage_work_context *ctx;
  626. while (1) {
  627. if (msg->header.message_type == HVMSG_NONE)
  628. /* no msg */
  629. break;
  630. hdr = (struct vmbus_channel_message_header *)msg->u.payload;
  631. if (hdr->msgtype >= CHANNELMSG_COUNT) {
  632. WARN_ONCE(1, "unknown msgtype=%d\n", hdr->msgtype);
  633. goto msg_handled;
  634. }
  635. entry = &channel_message_table[hdr->msgtype];
  636. if (entry->handler_type == VMHT_BLOCKING) {
  637. ctx = kmalloc(sizeof(*ctx), GFP_ATOMIC);
  638. if (ctx == NULL)
  639. continue;
  640. INIT_WORK(&ctx->work, vmbus_onmessage_work);
  641. memcpy(&ctx->msg, msg, sizeof(*msg));
  642. queue_work(vmbus_connection.work_queue, &ctx->work);
  643. } else
  644. entry->message_handler(hdr);
  645. msg_handled:
  646. msg->header.message_type = HVMSG_NONE;
  647. /*
  648. * Make sure the write to MessageType (ie set to
  649. * HVMSG_NONE) happens before we read the
  650. * MessagePending and EOMing. Otherwise, the EOMing
  651. * will not deliver any more messages since there is
  652. * no empty slot
  653. */
  654. mb();
  655. if (msg->header.message_flags.msg_pending) {
  656. /*
  657. * This will cause message queue rescan to
  658. * possibly deliver another msg from the
  659. * hypervisor
  660. */
  661. wrmsrl(HV_X64_MSR_EOM, 0);
  662. }
  663. }
  664. }
  665. static void vmbus_isr(void)
  666. {
  667. int cpu = smp_processor_id();
  668. void *page_addr;
  669. struct hv_message *msg;
  670. union hv_synic_event_flags *event;
  671. bool handled = false;
  672. page_addr = hv_context.synic_event_page[cpu];
  673. if (page_addr == NULL)
  674. return;
  675. event = (union hv_synic_event_flags *)page_addr +
  676. VMBUS_MESSAGE_SINT;
  677. /*
  678. * Check for events before checking for messages. This is the order
  679. * in which events and messages are checked in Windows guests on
  680. * Hyper-V, and the Windows team suggested we do the same.
  681. */
  682. if ((vmbus_proto_version == VERSION_WS2008) ||
  683. (vmbus_proto_version == VERSION_WIN7)) {
  684. /* Since we are a child, we only need to check bit 0 */
  685. if (sync_test_and_clear_bit(0,
  686. (unsigned long *) &event->flags32[0])) {
  687. handled = true;
  688. }
  689. } else {
  690. /*
  691. * Our host is win8 or above. The signaling mechanism
  692. * has changed and we can directly look at the event page.
  693. * If bit n is set then we have an interrup on the channel
  694. * whose id is n.
  695. */
  696. handled = true;
  697. }
  698. if (handled)
  699. tasklet_schedule(hv_context.event_dpc[cpu]);
  700. page_addr = hv_context.synic_message_page[cpu];
  701. msg = (struct hv_message *)page_addr + VMBUS_MESSAGE_SINT;
  702. /* Check if there are actual msgs to be processed */
  703. if (msg->header.message_type != HVMSG_NONE) {
  704. if (msg->header.message_type == HVMSG_TIMER_EXPIRED)
  705. hv_process_timer_expiration(msg, cpu);
  706. else
  707. tasklet_schedule(&msg_dpc);
  708. }
  709. add_interrupt_randomness(HYPERVISOR_CALLBACK_VECTOR, 0);
  710. }
  711. /*
  712. * vmbus_bus_init -Main vmbus driver initialization routine.
  713. *
  714. * Here, we
  715. * - initialize the vmbus driver context
  716. * - invoke the vmbus hv main init routine
  717. * - get the irq resource
  718. * - retrieve the channel offers
  719. */
  720. static int vmbus_bus_init(int irq)
  721. {
  722. int ret;
  723. /* Hypervisor initialization...setup hypercall page..etc */
  724. ret = hv_init();
  725. if (ret != 0) {
  726. pr_err("Unable to initialize the hypervisor - 0x%x\n", ret);
  727. return ret;
  728. }
  729. tasklet_init(&msg_dpc, vmbus_on_msg_dpc, 0);
  730. ret = bus_register(&hv_bus);
  731. if (ret)
  732. goto err_cleanup;
  733. hv_setup_vmbus_irq(vmbus_isr);
  734. ret = hv_synic_alloc();
  735. if (ret)
  736. goto err_alloc;
  737. /*
  738. * Initialize the per-cpu interrupt state and
  739. * connect to the host.
  740. */
  741. on_each_cpu(hv_synic_init, NULL, 1);
  742. ret = vmbus_connect();
  743. if (ret)
  744. goto err_connect;
  745. if (vmbus_proto_version > VERSION_WIN7)
  746. cpu_hotplug_disable();
  747. /*
  748. * Only register if the crash MSRs are available
  749. */
  750. if (ms_hyperv.misc_features & HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE) {
  751. register_die_notifier(&hyperv_die_block);
  752. atomic_notifier_chain_register(&panic_notifier_list,
  753. &hyperv_panic_block);
  754. }
  755. vmbus_request_offers();
  756. return 0;
  757. err_connect:
  758. on_each_cpu(hv_synic_cleanup, NULL, 1);
  759. err_alloc:
  760. hv_synic_free();
  761. hv_remove_vmbus_irq();
  762. bus_unregister(&hv_bus);
  763. err_cleanup:
  764. hv_cleanup(false);
  765. return ret;
  766. }
  767. /**
  768. * __vmbus_child_driver_register() - Register a vmbus's driver
  769. * @hv_driver: Pointer to driver structure you want to register
  770. * @owner: owner module of the drv
  771. * @mod_name: module name string
  772. *
  773. * Registers the given driver with Linux through the 'driver_register()' call
  774. * and sets up the hyper-v vmbus handling for this driver.
  775. * It will return the state of the 'driver_register()' call.
  776. *
  777. */
  778. int __vmbus_driver_register(struct hv_driver *hv_driver, struct module *owner, const char *mod_name)
  779. {
  780. int ret;
  781. pr_info("registering driver %s\n", hv_driver->name);
  782. ret = vmbus_exists();
  783. if (ret < 0)
  784. return ret;
  785. hv_driver->driver.name = hv_driver->name;
  786. hv_driver->driver.owner = owner;
  787. hv_driver->driver.mod_name = mod_name;
  788. hv_driver->driver.bus = &hv_bus;
  789. ret = driver_register(&hv_driver->driver);
  790. return ret;
  791. }
  792. EXPORT_SYMBOL_GPL(__vmbus_driver_register);
  793. /**
  794. * vmbus_driver_unregister() - Unregister a vmbus's driver
  795. * @hv_driver: Pointer to driver structure you want to
  796. * un-register
  797. *
  798. * Un-register the given driver that was previous registered with a call to
  799. * vmbus_driver_register()
  800. */
  801. void vmbus_driver_unregister(struct hv_driver *hv_driver)
  802. {
  803. pr_info("unregistering driver %s\n", hv_driver->name);
  804. if (!vmbus_exists())
  805. driver_unregister(&hv_driver->driver);
  806. }
  807. EXPORT_SYMBOL_GPL(vmbus_driver_unregister);
  808. /*
  809. * vmbus_device_create - Creates and registers a new child device
  810. * on the vmbus.
  811. */
  812. struct hv_device *vmbus_device_create(const uuid_le *type,
  813. const uuid_le *instance,
  814. struct vmbus_channel *channel)
  815. {
  816. struct hv_device *child_device_obj;
  817. child_device_obj = kzalloc(sizeof(struct hv_device), GFP_KERNEL);
  818. if (!child_device_obj) {
  819. pr_err("Unable to allocate device object for child device\n");
  820. return NULL;
  821. }
  822. child_device_obj->channel = channel;
  823. memcpy(&child_device_obj->dev_type, type, sizeof(uuid_le));
  824. memcpy(&child_device_obj->dev_instance, instance,
  825. sizeof(uuid_le));
  826. return child_device_obj;
  827. }
  828. /*
  829. * vmbus_device_register - Register the child device
  830. */
  831. int vmbus_device_register(struct hv_device *child_device_obj)
  832. {
  833. int ret = 0;
  834. dev_set_name(&child_device_obj->device, "vmbus_%d",
  835. child_device_obj->channel->id);
  836. child_device_obj->device.bus = &hv_bus;
  837. child_device_obj->device.parent = &hv_acpi_dev->dev;
  838. child_device_obj->device.release = vmbus_device_release;
  839. /*
  840. * Register with the LDM. This will kick off the driver/device
  841. * binding...which will eventually call vmbus_match() and vmbus_probe()
  842. */
  843. ret = device_register(&child_device_obj->device);
  844. if (ret)
  845. pr_err("Unable to register child device\n");
  846. else
  847. pr_debug("child device %s registered\n",
  848. dev_name(&child_device_obj->device));
  849. return ret;
  850. }
  851. /*
  852. * vmbus_device_unregister - Remove the specified child device
  853. * from the vmbus.
  854. */
  855. void vmbus_device_unregister(struct hv_device *device_obj)
  856. {
  857. pr_debug("child device %s unregistered\n",
  858. dev_name(&device_obj->device));
  859. /*
  860. * Kick off the process of unregistering the device.
  861. * This will call vmbus_remove() and eventually vmbus_device_release()
  862. */
  863. device_unregister(&device_obj->device);
  864. }
  865. /*
  866. * VMBUS is an acpi enumerated device. Get the information we
  867. * need from DSDT.
  868. */
  869. #define VTPM_BASE_ADDRESS 0xfed40000
  870. static acpi_status vmbus_walk_resources(struct acpi_resource *res, void *ctx)
  871. {
  872. resource_size_t start = 0;
  873. resource_size_t end = 0;
  874. struct resource *new_res;
  875. struct resource **old_res = &hyperv_mmio;
  876. struct resource **prev_res = NULL;
  877. switch (res->type) {
  878. case ACPI_RESOURCE_TYPE_IRQ:
  879. irq = res->data.irq.interrupts[0];
  880. return AE_OK;
  881. /*
  882. * "Address" descriptors are for bus windows. Ignore
  883. * "memory" descriptors, which are for registers on
  884. * devices.
  885. */
  886. case ACPI_RESOURCE_TYPE_ADDRESS32:
  887. start = res->data.address32.address.minimum;
  888. end = res->data.address32.address.maximum;
  889. break;
  890. case ACPI_RESOURCE_TYPE_ADDRESS64:
  891. start = res->data.address64.address.minimum;
  892. end = res->data.address64.address.maximum;
  893. break;
  894. default:
  895. /* Unused resource type */
  896. return AE_OK;
  897. }
  898. /*
  899. * Ignore ranges that are below 1MB, as they're not
  900. * necessary or useful here.
  901. */
  902. if (end < 0x100000)
  903. return AE_OK;
  904. new_res = kzalloc(sizeof(*new_res), GFP_ATOMIC);
  905. if (!new_res)
  906. return AE_NO_MEMORY;
  907. /* If this range overlaps the virtual TPM, truncate it. */
  908. if (end > VTPM_BASE_ADDRESS && start < VTPM_BASE_ADDRESS)
  909. end = VTPM_BASE_ADDRESS;
  910. new_res->name = "hyperv mmio";
  911. new_res->flags = IORESOURCE_MEM;
  912. new_res->start = start;
  913. new_res->end = end;
  914. do {
  915. if (!*old_res) {
  916. *old_res = new_res;
  917. break;
  918. }
  919. if ((*old_res)->end < new_res->start) {
  920. new_res->sibling = *old_res;
  921. if (prev_res)
  922. (*prev_res)->sibling = new_res;
  923. *old_res = new_res;
  924. break;
  925. }
  926. prev_res = old_res;
  927. old_res = &(*old_res)->sibling;
  928. } while (1);
  929. return AE_OK;
  930. }
  931. static int vmbus_acpi_remove(struct acpi_device *device)
  932. {
  933. struct resource *cur_res;
  934. struct resource *next_res;
  935. if (hyperv_mmio) {
  936. for (cur_res = hyperv_mmio; cur_res; cur_res = next_res) {
  937. next_res = cur_res->sibling;
  938. kfree(cur_res);
  939. }
  940. }
  941. return 0;
  942. }
  943. /**
  944. * vmbus_allocate_mmio() - Pick a memory-mapped I/O range.
  945. * @new: If successful, supplied a pointer to the
  946. * allocated MMIO space.
  947. * @device_obj: Identifies the caller
  948. * @min: Minimum guest physical address of the
  949. * allocation
  950. * @max: Maximum guest physical address
  951. * @size: Size of the range to be allocated
  952. * @align: Alignment of the range to be allocated
  953. * @fb_overlap_ok: Whether this allocation can be allowed
  954. * to overlap the video frame buffer.
  955. *
  956. * This function walks the resources granted to VMBus by the
  957. * _CRS object in the ACPI namespace underneath the parent
  958. * "bridge" whether that's a root PCI bus in the Generation 1
  959. * case or a Module Device in the Generation 2 case. It then
  960. * attempts to allocate from the global MMIO pool in a way that
  961. * matches the constraints supplied in these parameters and by
  962. * that _CRS.
  963. *
  964. * Return: 0 on success, -errno on failure
  965. */
  966. int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj,
  967. resource_size_t min, resource_size_t max,
  968. resource_size_t size, resource_size_t align,
  969. bool fb_overlap_ok)
  970. {
  971. struct resource *iter;
  972. resource_size_t range_min, range_max, start, local_min, local_max;
  973. const char *dev_n = dev_name(&device_obj->device);
  974. u32 fb_end = screen_info.lfb_base + (screen_info.lfb_size << 1);
  975. int i, retval;
  976. retval = -ENXIO;
  977. down(&hyperv_mmio_lock);
  978. for (iter = hyperv_mmio; iter; iter = iter->sibling) {
  979. if ((iter->start >= max) || (iter->end <= min))
  980. continue;
  981. range_min = iter->start;
  982. range_max = iter->end;
  983. /* If this range overlaps the frame buffer, split it into
  984. two tries. */
  985. for (i = 0; i < 2; i++) {
  986. local_min = range_min;
  987. local_max = range_max;
  988. if (fb_overlap_ok || (range_min >= fb_end) ||
  989. (range_max <= screen_info.lfb_base)) {
  990. i++;
  991. } else {
  992. if ((range_min <= screen_info.lfb_base) &&
  993. (range_max >= screen_info.lfb_base)) {
  994. /*
  995. * The frame buffer is in this window,
  996. * so trim this into the part that
  997. * preceeds the frame buffer.
  998. */
  999. local_max = screen_info.lfb_base - 1;
  1000. range_min = fb_end;
  1001. } else {
  1002. range_min = fb_end;
  1003. continue;
  1004. }
  1005. }
  1006. start = (local_min + align - 1) & ~(align - 1);
  1007. for (; start + size - 1 <= local_max; start += align) {
  1008. *new = request_mem_region_exclusive(start, size,
  1009. dev_n);
  1010. if (*new) {
  1011. retval = 0;
  1012. goto exit;
  1013. }
  1014. }
  1015. }
  1016. }
  1017. exit:
  1018. up(&hyperv_mmio_lock);
  1019. return retval;
  1020. }
  1021. EXPORT_SYMBOL_GPL(vmbus_allocate_mmio);
  1022. static int vmbus_acpi_add(struct acpi_device *device)
  1023. {
  1024. acpi_status result;
  1025. int ret_val = -ENODEV;
  1026. struct acpi_device *ancestor;
  1027. hv_acpi_dev = device;
  1028. result = acpi_walk_resources(device->handle, METHOD_NAME__CRS,
  1029. vmbus_walk_resources, NULL);
  1030. if (ACPI_FAILURE(result))
  1031. goto acpi_walk_err;
  1032. /*
  1033. * Some ancestor of the vmbus acpi device (Gen1 or Gen2
  1034. * firmware) is the VMOD that has the mmio ranges. Get that.
  1035. */
  1036. for (ancestor = device->parent; ancestor; ancestor = ancestor->parent) {
  1037. result = acpi_walk_resources(ancestor->handle, METHOD_NAME__CRS,
  1038. vmbus_walk_resources, NULL);
  1039. if (ACPI_FAILURE(result))
  1040. continue;
  1041. if (hyperv_mmio)
  1042. break;
  1043. }
  1044. ret_val = 0;
  1045. acpi_walk_err:
  1046. complete(&probe_event);
  1047. if (ret_val)
  1048. vmbus_acpi_remove(device);
  1049. return ret_val;
  1050. }
  1051. static const struct acpi_device_id vmbus_acpi_device_ids[] = {
  1052. {"VMBUS", 0},
  1053. {"VMBus", 0},
  1054. {"", 0},
  1055. };
  1056. MODULE_DEVICE_TABLE(acpi, vmbus_acpi_device_ids);
  1057. static struct acpi_driver vmbus_acpi_driver = {
  1058. .name = "vmbus",
  1059. .ids = vmbus_acpi_device_ids,
  1060. .ops = {
  1061. .add = vmbus_acpi_add,
  1062. .remove = vmbus_acpi_remove,
  1063. },
  1064. };
  1065. static void hv_kexec_handler(void)
  1066. {
  1067. int cpu;
  1068. hv_synic_clockevents_cleanup();
  1069. vmbus_initiate_unload();
  1070. for_each_online_cpu(cpu)
  1071. smp_call_function_single(cpu, hv_synic_cleanup, NULL, 1);
  1072. hv_cleanup(false);
  1073. };
  1074. static void hv_crash_handler(struct pt_regs *regs)
  1075. {
  1076. vmbus_initiate_unload();
  1077. /*
  1078. * In crash handler we can't schedule synic cleanup for all CPUs,
  1079. * doing the cleanup for current CPU only. This should be sufficient
  1080. * for kdump.
  1081. */
  1082. hv_synic_cleanup(NULL);
  1083. hv_cleanup(true);
  1084. };
  1085. static int __init hv_acpi_init(void)
  1086. {
  1087. int ret, t;
  1088. if (x86_hyper != &x86_hyper_ms_hyperv)
  1089. return -ENODEV;
  1090. init_completion(&probe_event);
  1091. /*
  1092. * Get irq resources first.
  1093. */
  1094. ret = acpi_bus_register_driver(&vmbus_acpi_driver);
  1095. if (ret)
  1096. return ret;
  1097. t = wait_for_completion_timeout(&probe_event, 5*HZ);
  1098. if (t == 0) {
  1099. ret = -ETIMEDOUT;
  1100. goto cleanup;
  1101. }
  1102. if (irq <= 0) {
  1103. ret = -ENODEV;
  1104. goto cleanup;
  1105. }
  1106. ret = vmbus_bus_init(irq);
  1107. if (ret)
  1108. goto cleanup;
  1109. hv_setup_kexec_handler(hv_kexec_handler);
  1110. hv_setup_crash_handler(hv_crash_handler);
  1111. return 0;
  1112. cleanup:
  1113. acpi_bus_unregister_driver(&vmbus_acpi_driver);
  1114. hv_acpi_dev = NULL;
  1115. return ret;
  1116. }
  1117. static void __exit vmbus_exit(void)
  1118. {
  1119. int cpu;
  1120. hv_remove_kexec_handler();
  1121. hv_remove_crash_handler();
  1122. vmbus_connection.conn_state = DISCONNECTED;
  1123. hv_synic_clockevents_cleanup();
  1124. vmbus_disconnect();
  1125. hv_remove_vmbus_irq();
  1126. tasklet_kill(&msg_dpc);
  1127. vmbus_free_channels();
  1128. if (ms_hyperv.misc_features & HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE) {
  1129. unregister_die_notifier(&hyperv_die_block);
  1130. atomic_notifier_chain_unregister(&panic_notifier_list,
  1131. &hyperv_panic_block);
  1132. }
  1133. bus_unregister(&hv_bus);
  1134. hv_cleanup(false);
  1135. for_each_online_cpu(cpu) {
  1136. tasklet_kill(hv_context.event_dpc[cpu]);
  1137. smp_call_function_single(cpu, hv_synic_cleanup, NULL, 1);
  1138. }
  1139. hv_synic_free();
  1140. acpi_bus_unregister_driver(&vmbus_acpi_driver);
  1141. if (vmbus_proto_version > VERSION_WIN7)
  1142. cpu_hotplug_enable();
  1143. }
  1144. MODULE_LICENSE("GPL");
  1145. subsys_initcall(hv_acpi_init);
  1146. module_exit(vmbus_exit);