qxl_fb.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564
  1. /*
  2. * Copyright © 2013 Red Hat
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice (including the next
  12. * paragraph) shall be included in all copies or substantial portions of the
  13. * Software.
  14. *
  15. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  18. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
  21. * DEALINGS IN THE SOFTWARE.
  22. *
  23. * Authors:
  24. * David Airlie
  25. */
  26. #include <linux/module.h>
  27. #include <linux/fb.h>
  28. #include "drmP.h"
  29. #include "drm/drm.h"
  30. #include "drm/drm_crtc.h"
  31. #include "drm/drm_crtc_helper.h"
  32. #include "qxl_drv.h"
  33. #include "qxl_object.h"
  34. #include "drm_fb_helper.h"
  35. #define QXL_DIRTY_DELAY (HZ / 30)
  36. struct qxl_fbdev {
  37. struct drm_fb_helper helper;
  38. struct qxl_framebuffer qfb;
  39. struct list_head fbdev_list;
  40. struct qxl_device *qdev;
  41. spinlock_t delayed_ops_lock;
  42. struct list_head delayed_ops;
  43. void *shadow;
  44. int size;
  45. /* dirty memory logging */
  46. struct {
  47. spinlock_t lock;
  48. unsigned x1;
  49. unsigned y1;
  50. unsigned x2;
  51. unsigned y2;
  52. } dirty;
  53. };
  54. static void qxl_fb_image_init(struct qxl_fb_image *qxl_fb_image,
  55. struct qxl_device *qdev, struct fb_info *info,
  56. const struct fb_image *image)
  57. {
  58. qxl_fb_image->qdev = qdev;
  59. if (info) {
  60. qxl_fb_image->visual = info->fix.visual;
  61. if (qxl_fb_image->visual == FB_VISUAL_TRUECOLOR ||
  62. qxl_fb_image->visual == FB_VISUAL_DIRECTCOLOR)
  63. memcpy(&qxl_fb_image->pseudo_palette,
  64. info->pseudo_palette,
  65. sizeof(qxl_fb_image->pseudo_palette));
  66. } else {
  67. /* fallback */
  68. if (image->depth == 1)
  69. qxl_fb_image->visual = FB_VISUAL_MONO10;
  70. else
  71. qxl_fb_image->visual = FB_VISUAL_DIRECTCOLOR;
  72. }
  73. if (image) {
  74. memcpy(&qxl_fb_image->fb_image, image,
  75. sizeof(qxl_fb_image->fb_image));
  76. }
  77. }
  78. static void qxl_fb_dirty_flush(struct fb_info *info)
  79. {
  80. struct qxl_fbdev *qfbdev = info->par;
  81. struct qxl_device *qdev = qfbdev->qdev;
  82. struct qxl_fb_image qxl_fb_image;
  83. struct fb_image *image = &qxl_fb_image.fb_image;
  84. unsigned long flags;
  85. u32 x1, x2, y1, y2;
  86. /* TODO: hard coding 32 bpp */
  87. int stride = qfbdev->qfb.base.pitches[0];
  88. spin_lock_irqsave(&qfbdev->dirty.lock, flags);
  89. x1 = qfbdev->dirty.x1;
  90. x2 = qfbdev->dirty.x2;
  91. y1 = qfbdev->dirty.y1;
  92. y2 = qfbdev->dirty.y2;
  93. qfbdev->dirty.x1 = 0;
  94. qfbdev->dirty.x2 = 0;
  95. qfbdev->dirty.y1 = 0;
  96. qfbdev->dirty.y2 = 0;
  97. spin_unlock_irqrestore(&qfbdev->dirty.lock, flags);
  98. /*
  99. * we are using a shadow draw buffer, at qdev->surface0_shadow
  100. */
  101. qxl_io_log(qdev, "dirty x[%d, %d], y[%d, %d]", x1, x2, y1, y2);
  102. image->dx = x1;
  103. image->dy = y1;
  104. image->width = x2 - x1 + 1;
  105. image->height = y2 - y1 + 1;
  106. image->fg_color = 0xffffffff; /* unused, just to avoid uninitialized
  107. warnings */
  108. image->bg_color = 0;
  109. image->depth = 32; /* TODO: take from somewhere? */
  110. image->cmap.start = 0;
  111. image->cmap.len = 0;
  112. image->cmap.red = NULL;
  113. image->cmap.green = NULL;
  114. image->cmap.blue = NULL;
  115. image->cmap.transp = NULL;
  116. image->data = qfbdev->shadow + (x1 * 4) + (stride * y1);
  117. qxl_fb_image_init(&qxl_fb_image, qdev, info, NULL);
  118. qxl_draw_opaque_fb(&qxl_fb_image, stride);
  119. }
  120. static void qxl_dirty_update(struct qxl_fbdev *qfbdev,
  121. int x, int y, int width, int height)
  122. {
  123. struct qxl_device *qdev = qfbdev->qdev;
  124. unsigned long flags;
  125. int x2, y2;
  126. x2 = x + width - 1;
  127. y2 = y + height - 1;
  128. spin_lock_irqsave(&qfbdev->dirty.lock, flags);
  129. if ((qfbdev->dirty.y2 - qfbdev->dirty.y1) &&
  130. (qfbdev->dirty.x2 - qfbdev->dirty.x1)) {
  131. if (qfbdev->dirty.y1 < y)
  132. y = qfbdev->dirty.y1;
  133. if (qfbdev->dirty.y2 > y2)
  134. y2 = qfbdev->dirty.y2;
  135. if (qfbdev->dirty.x1 < x)
  136. x = qfbdev->dirty.x1;
  137. if (qfbdev->dirty.x2 > x2)
  138. x2 = qfbdev->dirty.x2;
  139. }
  140. qfbdev->dirty.x1 = x;
  141. qfbdev->dirty.x2 = x2;
  142. qfbdev->dirty.y1 = y;
  143. qfbdev->dirty.y2 = y2;
  144. spin_unlock_irqrestore(&qfbdev->dirty.lock, flags);
  145. schedule_work(&qdev->fb_work);
  146. }
  147. static void qxl_deferred_io(struct fb_info *info,
  148. struct list_head *pagelist)
  149. {
  150. struct qxl_fbdev *qfbdev = info->par;
  151. unsigned long start, end, min, max;
  152. struct page *page;
  153. int y1, y2;
  154. min = ULONG_MAX;
  155. max = 0;
  156. list_for_each_entry(page, pagelist, lru) {
  157. start = page->index << PAGE_SHIFT;
  158. end = start + PAGE_SIZE - 1;
  159. min = min(min, start);
  160. max = max(max, end);
  161. }
  162. if (min < max) {
  163. y1 = min / info->fix.line_length;
  164. y2 = (max / info->fix.line_length) + 1;
  165. qxl_dirty_update(qfbdev, 0, y1, info->var.xres, y2 - y1);
  166. }
  167. };
  168. static struct fb_deferred_io qxl_defio = {
  169. .delay = QXL_DIRTY_DELAY,
  170. .deferred_io = qxl_deferred_io,
  171. };
  172. static void qxl_fb_fillrect(struct fb_info *info,
  173. const struct fb_fillrect *rect)
  174. {
  175. struct qxl_fbdev *qfbdev = info->par;
  176. drm_fb_helper_sys_fillrect(info, rect);
  177. qxl_dirty_update(qfbdev, rect->dx, rect->dy, rect->width,
  178. rect->height);
  179. }
  180. static void qxl_fb_copyarea(struct fb_info *info,
  181. const struct fb_copyarea *area)
  182. {
  183. struct qxl_fbdev *qfbdev = info->par;
  184. drm_fb_helper_sys_copyarea(info, area);
  185. qxl_dirty_update(qfbdev, area->dx, area->dy, area->width,
  186. area->height);
  187. }
  188. static void qxl_fb_imageblit(struct fb_info *info,
  189. const struct fb_image *image)
  190. {
  191. struct qxl_fbdev *qfbdev = info->par;
  192. drm_fb_helper_sys_imageblit(info, image);
  193. qxl_dirty_update(qfbdev, image->dx, image->dy, image->width,
  194. image->height);
  195. }
  196. static void qxl_fb_work(struct work_struct *work)
  197. {
  198. struct qxl_device *qdev = container_of(work, struct qxl_device, fb_work);
  199. struct qxl_fbdev *qfbdev = qdev->mode_info.qfbdev;
  200. qxl_fb_dirty_flush(qfbdev->helper.fbdev);
  201. }
  202. int qxl_fb_init(struct qxl_device *qdev)
  203. {
  204. INIT_WORK(&qdev->fb_work, qxl_fb_work);
  205. return 0;
  206. }
  207. static struct fb_ops qxlfb_ops = {
  208. .owner = THIS_MODULE,
  209. .fb_check_var = drm_fb_helper_check_var,
  210. .fb_set_par = drm_fb_helper_set_par, /* TODO: copy vmwgfx */
  211. .fb_fillrect = qxl_fb_fillrect,
  212. .fb_copyarea = qxl_fb_copyarea,
  213. .fb_imageblit = qxl_fb_imageblit,
  214. .fb_pan_display = drm_fb_helper_pan_display,
  215. .fb_blank = drm_fb_helper_blank,
  216. .fb_setcmap = drm_fb_helper_setcmap,
  217. .fb_debug_enter = drm_fb_helper_debug_enter,
  218. .fb_debug_leave = drm_fb_helper_debug_leave,
  219. };
  220. static void qxlfb_destroy_pinned_object(struct drm_gem_object *gobj)
  221. {
  222. struct qxl_bo *qbo = gem_to_qxl_bo(gobj);
  223. int ret;
  224. ret = qxl_bo_reserve(qbo, false);
  225. if (likely(ret == 0)) {
  226. qxl_bo_kunmap(qbo);
  227. qxl_bo_unpin(qbo);
  228. qxl_bo_unreserve(qbo);
  229. }
  230. drm_gem_object_unreference_unlocked(gobj);
  231. }
  232. int qxl_get_handle_for_primary_fb(struct qxl_device *qdev,
  233. struct drm_file *file_priv,
  234. uint32_t *handle)
  235. {
  236. int r;
  237. struct drm_gem_object *gobj = qdev->fbdev_qfb->obj;
  238. BUG_ON(!gobj);
  239. /* drm_get_handle_create adds a reference - good */
  240. r = drm_gem_handle_create(file_priv, gobj, handle);
  241. if (r)
  242. return r;
  243. return 0;
  244. }
  245. static int qxlfb_create_pinned_object(struct qxl_fbdev *qfbdev,
  246. struct drm_mode_fb_cmd2 *mode_cmd,
  247. struct drm_gem_object **gobj_p)
  248. {
  249. struct qxl_device *qdev = qfbdev->qdev;
  250. struct drm_gem_object *gobj = NULL;
  251. struct qxl_bo *qbo = NULL;
  252. int ret;
  253. int aligned_size, size;
  254. int height = mode_cmd->height;
  255. int bpp;
  256. int depth;
  257. drm_fb_get_bpp_depth(mode_cmd->pixel_format, &bpp, &depth);
  258. size = mode_cmd->pitches[0] * height;
  259. aligned_size = ALIGN(size, PAGE_SIZE);
  260. /* TODO: unallocate and reallocate surface0 for real. Hack to just
  261. * have a large enough surface0 for 1024x768 Xorg 32bpp mode */
  262. ret = qxl_gem_object_create(qdev, aligned_size, 0,
  263. QXL_GEM_DOMAIN_SURFACE,
  264. false, /* is discardable */
  265. false, /* is kernel (false means device) */
  266. NULL,
  267. &gobj);
  268. if (ret) {
  269. pr_err("failed to allocate framebuffer (%d)\n",
  270. aligned_size);
  271. return -ENOMEM;
  272. }
  273. qbo = gem_to_qxl_bo(gobj);
  274. qbo->surf.width = mode_cmd->width;
  275. qbo->surf.height = mode_cmd->height;
  276. qbo->surf.stride = mode_cmd->pitches[0];
  277. qbo->surf.format = SPICE_SURFACE_FMT_32_xRGB;
  278. ret = qxl_bo_reserve(qbo, false);
  279. if (unlikely(ret != 0))
  280. goto out_unref;
  281. ret = qxl_bo_pin(qbo, QXL_GEM_DOMAIN_SURFACE, NULL);
  282. if (ret) {
  283. qxl_bo_unreserve(qbo);
  284. goto out_unref;
  285. }
  286. ret = qxl_bo_kmap(qbo, NULL);
  287. qxl_bo_unreserve(qbo); /* unreserve, will be mmaped */
  288. if (ret)
  289. goto out_unref;
  290. *gobj_p = gobj;
  291. return 0;
  292. out_unref:
  293. qxlfb_destroy_pinned_object(gobj);
  294. *gobj_p = NULL;
  295. return ret;
  296. }
  297. static int qxlfb_create(struct qxl_fbdev *qfbdev,
  298. struct drm_fb_helper_surface_size *sizes)
  299. {
  300. struct qxl_device *qdev = qfbdev->qdev;
  301. struct fb_info *info;
  302. struct drm_framebuffer *fb = NULL;
  303. struct drm_mode_fb_cmd2 mode_cmd;
  304. struct drm_gem_object *gobj = NULL;
  305. struct qxl_bo *qbo = NULL;
  306. int ret;
  307. int size;
  308. int bpp = sizes->surface_bpp;
  309. int depth = sizes->surface_depth;
  310. void *shadow;
  311. mode_cmd.width = sizes->surface_width;
  312. mode_cmd.height = sizes->surface_height;
  313. mode_cmd.pitches[0] = ALIGN(mode_cmd.width * ((bpp + 1) / 8), 64);
  314. mode_cmd.pixel_format = drm_mode_legacy_fb_format(bpp, depth);
  315. ret = qxlfb_create_pinned_object(qfbdev, &mode_cmd, &gobj);
  316. qbo = gem_to_qxl_bo(gobj);
  317. QXL_INFO(qdev, "%s: %dx%d %d\n", __func__, mode_cmd.width,
  318. mode_cmd.height, mode_cmd.pitches[0]);
  319. shadow = vmalloc(mode_cmd.pitches[0] * mode_cmd.height);
  320. /* TODO: what's the usual response to memory allocation errors? */
  321. BUG_ON(!shadow);
  322. QXL_INFO(qdev,
  323. "surface0 at gpu offset %lld, mmap_offset %lld (virt %p, shadow %p)\n",
  324. qxl_bo_gpu_offset(qbo),
  325. qxl_bo_mmap_offset(qbo),
  326. qbo->kptr,
  327. shadow);
  328. size = mode_cmd.pitches[0] * mode_cmd.height;
  329. info = drm_fb_helper_alloc_fbi(&qfbdev->helper);
  330. if (IS_ERR(info)) {
  331. ret = PTR_ERR(info);
  332. goto out_unref;
  333. }
  334. info->par = qfbdev;
  335. qxl_framebuffer_init(qdev->ddev, &qfbdev->qfb, &mode_cmd, gobj);
  336. fb = &qfbdev->qfb.base;
  337. /* setup helper with fb data */
  338. qfbdev->helper.fb = fb;
  339. qfbdev->shadow = shadow;
  340. strcpy(info->fix.id, "qxldrmfb");
  341. drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
  342. info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_COPYAREA | FBINFO_HWACCEL_FILLRECT;
  343. info->fbops = &qxlfb_ops;
  344. /*
  345. * TODO: using gobj->size in various places in this function. Not sure
  346. * what the difference between the different sizes is.
  347. */
  348. info->fix.smem_start = qdev->vram_base; /* TODO - correct? */
  349. info->fix.smem_len = gobj->size;
  350. info->screen_base = qfbdev->shadow;
  351. info->screen_size = gobj->size;
  352. drm_fb_helper_fill_var(info, &qfbdev->helper, sizes->fb_width,
  353. sizes->fb_height);
  354. /* setup aperture base/size for vesafb takeover */
  355. info->apertures->ranges[0].base = qdev->ddev->mode_config.fb_base;
  356. info->apertures->ranges[0].size = qdev->vram_size;
  357. info->fix.mmio_start = 0;
  358. info->fix.mmio_len = 0;
  359. if (info->screen_base == NULL) {
  360. ret = -ENOSPC;
  361. goto out_destroy_fbi;
  362. }
  363. info->fbdefio = &qxl_defio;
  364. fb_deferred_io_init(info);
  365. qdev->fbdev_info = info;
  366. qdev->fbdev_qfb = &qfbdev->qfb;
  367. DRM_INFO("fb mappable at 0x%lX, size %lu\n", info->fix.smem_start, (unsigned long)info->screen_size);
  368. DRM_INFO("fb: depth %d, pitch %d, width %d, height %d\n", fb->depth, fb->pitches[0], fb->width, fb->height);
  369. return 0;
  370. out_destroy_fbi:
  371. drm_fb_helper_release_fbi(&qfbdev->helper);
  372. out_unref:
  373. if (qbo) {
  374. ret = qxl_bo_reserve(qbo, false);
  375. if (likely(ret == 0)) {
  376. qxl_bo_kunmap(qbo);
  377. qxl_bo_unpin(qbo);
  378. qxl_bo_unreserve(qbo);
  379. }
  380. }
  381. if (fb && ret) {
  382. drm_gem_object_unreference(gobj);
  383. drm_framebuffer_cleanup(fb);
  384. kfree(fb);
  385. }
  386. drm_gem_object_unreference(gobj);
  387. return ret;
  388. }
  389. static int qxl_fb_find_or_create_single(
  390. struct drm_fb_helper *helper,
  391. struct drm_fb_helper_surface_size *sizes)
  392. {
  393. struct qxl_fbdev *qfbdev =
  394. container_of(helper, struct qxl_fbdev, helper);
  395. int new_fb = 0;
  396. int ret;
  397. if (!helper->fb) {
  398. ret = qxlfb_create(qfbdev, sizes);
  399. if (ret)
  400. return ret;
  401. new_fb = 1;
  402. }
  403. return new_fb;
  404. }
  405. static int qxl_fbdev_destroy(struct drm_device *dev, struct qxl_fbdev *qfbdev)
  406. {
  407. struct qxl_framebuffer *qfb = &qfbdev->qfb;
  408. drm_fb_helper_unregister_fbi(&qfbdev->helper);
  409. drm_fb_helper_release_fbi(&qfbdev->helper);
  410. if (qfb->obj) {
  411. qxlfb_destroy_pinned_object(qfb->obj);
  412. qfb->obj = NULL;
  413. }
  414. drm_fb_helper_fini(&qfbdev->helper);
  415. vfree(qfbdev->shadow);
  416. drm_framebuffer_cleanup(&qfb->base);
  417. return 0;
  418. }
  419. static const struct drm_fb_helper_funcs qxl_fb_helper_funcs = {
  420. .fb_probe = qxl_fb_find_or_create_single,
  421. };
  422. int qxl_fbdev_init(struct qxl_device *qdev)
  423. {
  424. int ret = 0;
  425. #ifdef CONFIG_DRM_FBDEV_EMULATION
  426. struct qxl_fbdev *qfbdev;
  427. int bpp_sel = 32; /* TODO: parameter from somewhere? */
  428. qfbdev = kzalloc(sizeof(struct qxl_fbdev), GFP_KERNEL);
  429. if (!qfbdev)
  430. return -ENOMEM;
  431. qfbdev->qdev = qdev;
  432. qdev->mode_info.qfbdev = qfbdev;
  433. spin_lock_init(&qfbdev->delayed_ops_lock);
  434. spin_lock_init(&qfbdev->dirty.lock);
  435. INIT_LIST_HEAD(&qfbdev->delayed_ops);
  436. drm_fb_helper_prepare(qdev->ddev, &qfbdev->helper,
  437. &qxl_fb_helper_funcs);
  438. ret = drm_fb_helper_init(qdev->ddev, &qfbdev->helper,
  439. qxl_num_crtc /* num_crtc - QXL supports just 1 */,
  440. QXLFB_CONN_LIMIT);
  441. if (ret)
  442. goto free;
  443. ret = drm_fb_helper_single_add_all_connectors(&qfbdev->helper);
  444. if (ret)
  445. goto fini;
  446. ret = drm_fb_helper_initial_config(&qfbdev->helper, bpp_sel);
  447. if (ret)
  448. goto fini;
  449. return 0;
  450. fini:
  451. drm_fb_helper_fini(&qfbdev->helper);
  452. free:
  453. kfree(qfbdev);
  454. #endif
  455. return ret;
  456. }
  457. void qxl_fbdev_fini(struct qxl_device *qdev)
  458. {
  459. if (!qdev->mode_info.qfbdev)
  460. return;
  461. qxl_fbdev_destroy(qdev->ddev, qdev->mode_info.qfbdev);
  462. kfree(qdev->mode_info.qfbdev);
  463. qdev->mode_info.qfbdev = NULL;
  464. }
  465. void qxl_fbdev_set_suspend(struct qxl_device *qdev, int state)
  466. {
  467. if (!qdev->mode_info.qfbdev)
  468. return;
  469. drm_fb_helper_set_suspend(&qdev->mode_info.qfbdev->helper, state);
  470. }
  471. bool qxl_fbdev_qobj_is_fb(struct qxl_device *qdev, struct qxl_bo *qobj)
  472. {
  473. if (qobj == gem_to_qxl_bo(qdev->mode_info.qfbdev->qfb.obj))
  474. return true;
  475. return false;
  476. }