atmel_hlcdc_layer.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666
  1. /*
  2. * Copyright (C) 2014 Free Electrons
  3. * Copyright (C) 2014 Atmel
  4. *
  5. * Author: Boris BREZILLON <boris.brezillon@free-electrons.com>
  6. *
  7. * This program is free software; you can redistribute it and/or modify it
  8. * under the terms of the GNU General Public License version 2 as published by
  9. * the Free Software Foundation.
  10. *
  11. * This program is distributed in the hope that it will be useful, but WITHOUT
  12. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  13. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  14. * more details.
  15. *
  16. * You should have received a copy of the GNU General Public License along with
  17. * this program. If not, see <http://www.gnu.org/licenses/>.
  18. */
  19. #include <linux/dma-mapping.h>
  20. #include <linux/interrupt.h>
  21. #include "atmel_hlcdc_dc.h"
  22. static void
  23. atmel_hlcdc_layer_fb_flip_release(struct drm_flip_work *work, void *val)
  24. {
  25. struct atmel_hlcdc_layer_fb_flip *flip = val;
  26. if (flip->fb)
  27. drm_framebuffer_unreference(flip->fb);
  28. kfree(flip);
  29. }
  30. static void
  31. atmel_hlcdc_layer_fb_flip_destroy(struct atmel_hlcdc_layer_fb_flip *flip)
  32. {
  33. if (flip->fb)
  34. drm_framebuffer_unreference(flip->fb);
  35. kfree(flip->task);
  36. kfree(flip);
  37. }
  38. static void
  39. atmel_hlcdc_layer_fb_flip_release_queue(struct atmel_hlcdc_layer *layer,
  40. struct atmel_hlcdc_layer_fb_flip *flip)
  41. {
  42. int i;
  43. if (!flip)
  44. return;
  45. for (i = 0; i < layer->max_planes; i++) {
  46. if (!flip->dscrs[i])
  47. break;
  48. flip->dscrs[i]->status = 0;
  49. flip->dscrs[i] = NULL;
  50. }
  51. drm_flip_work_queue_task(&layer->gc, flip->task);
  52. drm_flip_work_commit(&layer->gc, layer->wq);
  53. }
  54. static void atmel_hlcdc_layer_update_reset(struct atmel_hlcdc_layer *layer,
  55. int id)
  56. {
  57. struct atmel_hlcdc_layer_update *upd = &layer->update;
  58. struct atmel_hlcdc_layer_update_slot *slot;
  59. if (id < 0 || id > 1)
  60. return;
  61. slot = &upd->slots[id];
  62. bitmap_clear(slot->updated_configs, 0, layer->desc->nconfigs);
  63. memset(slot->configs, 0,
  64. sizeof(*slot->configs) * layer->desc->nconfigs);
  65. if (slot->fb_flip) {
  66. atmel_hlcdc_layer_fb_flip_release_queue(layer, slot->fb_flip);
  67. slot->fb_flip = NULL;
  68. }
  69. }
  70. static void atmel_hlcdc_layer_update_apply(struct atmel_hlcdc_layer *layer)
  71. {
  72. struct atmel_hlcdc_layer_dma_channel *dma = &layer->dma;
  73. const struct atmel_hlcdc_layer_desc *desc = layer->desc;
  74. struct atmel_hlcdc_layer_update *upd = &layer->update;
  75. struct regmap *regmap = layer->hlcdc->regmap;
  76. struct atmel_hlcdc_layer_update_slot *slot;
  77. struct atmel_hlcdc_layer_fb_flip *fb_flip;
  78. struct atmel_hlcdc_dma_channel_dscr *dscr;
  79. unsigned int cfg;
  80. u32 action = 0;
  81. int i = 0;
  82. if (upd->pending < 0 || upd->pending > 1)
  83. return;
  84. slot = &upd->slots[upd->pending];
  85. for_each_set_bit(cfg, slot->updated_configs, layer->desc->nconfigs) {
  86. regmap_write(regmap,
  87. desc->regs_offset +
  88. ATMEL_HLCDC_LAYER_CFG(layer, cfg),
  89. slot->configs[cfg]);
  90. action |= ATMEL_HLCDC_LAYER_UPDATE;
  91. }
  92. fb_flip = slot->fb_flip;
  93. if (!fb_flip->fb)
  94. goto apply;
  95. if (dma->status == ATMEL_HLCDC_LAYER_DISABLED) {
  96. for (i = 0; i < fb_flip->ngems; i++) {
  97. dscr = fb_flip->dscrs[i];
  98. dscr->ctrl = ATMEL_HLCDC_LAYER_DFETCH |
  99. ATMEL_HLCDC_LAYER_DMA_IRQ |
  100. ATMEL_HLCDC_LAYER_ADD_IRQ |
  101. ATMEL_HLCDC_LAYER_DONE_IRQ;
  102. regmap_write(regmap,
  103. desc->regs_offset +
  104. ATMEL_HLCDC_LAYER_PLANE_ADDR(i),
  105. dscr->addr);
  106. regmap_write(regmap,
  107. desc->regs_offset +
  108. ATMEL_HLCDC_LAYER_PLANE_CTRL(i),
  109. dscr->ctrl);
  110. regmap_write(regmap,
  111. desc->regs_offset +
  112. ATMEL_HLCDC_LAYER_PLANE_NEXT(i),
  113. dscr->next);
  114. }
  115. action |= ATMEL_HLCDC_LAYER_DMA_CHAN;
  116. dma->status = ATMEL_HLCDC_LAYER_ENABLED;
  117. } else {
  118. for (i = 0; i < fb_flip->ngems; i++) {
  119. dscr = fb_flip->dscrs[i];
  120. dscr->ctrl = ATMEL_HLCDC_LAYER_DFETCH |
  121. ATMEL_HLCDC_LAYER_DMA_IRQ |
  122. ATMEL_HLCDC_LAYER_DSCR_IRQ |
  123. ATMEL_HLCDC_LAYER_DONE_IRQ;
  124. regmap_write(regmap,
  125. desc->regs_offset +
  126. ATMEL_HLCDC_LAYER_PLANE_HEAD(i),
  127. dscr->next);
  128. }
  129. action |= ATMEL_HLCDC_LAYER_A2Q;
  130. }
  131. /* Release unneeded descriptors */
  132. for (i = fb_flip->ngems; i < layer->max_planes; i++) {
  133. fb_flip->dscrs[i]->status = 0;
  134. fb_flip->dscrs[i] = NULL;
  135. }
  136. dma->queue = fb_flip;
  137. slot->fb_flip = NULL;
  138. apply:
  139. if (action)
  140. regmap_write(regmap,
  141. desc->regs_offset + ATMEL_HLCDC_LAYER_CHER,
  142. action);
  143. atmel_hlcdc_layer_update_reset(layer, upd->pending);
  144. upd->pending = -1;
  145. }
  146. void atmel_hlcdc_layer_irq(struct atmel_hlcdc_layer *layer)
  147. {
  148. struct atmel_hlcdc_layer_dma_channel *dma = &layer->dma;
  149. const struct atmel_hlcdc_layer_desc *desc = layer->desc;
  150. struct regmap *regmap = layer->hlcdc->regmap;
  151. struct atmel_hlcdc_layer_fb_flip *flip;
  152. unsigned long flags;
  153. unsigned int isr, imr;
  154. unsigned int status;
  155. unsigned int plane_status;
  156. u32 flip_status;
  157. int i;
  158. regmap_read(regmap, desc->regs_offset + ATMEL_HLCDC_LAYER_IMR, &imr);
  159. regmap_read(regmap, desc->regs_offset + ATMEL_HLCDC_LAYER_ISR, &isr);
  160. status = imr & isr;
  161. if (!status)
  162. return;
  163. spin_lock_irqsave(&layer->lock, flags);
  164. flip = dma->queue ? dma->queue : dma->cur;
  165. if (!flip) {
  166. spin_unlock_irqrestore(&layer->lock, flags);
  167. return;
  168. }
  169. /*
  170. * Set LOADED and DONE flags: they'll be cleared if at least one
  171. * memory plane is not LOADED or DONE.
  172. */
  173. flip_status = ATMEL_HLCDC_DMA_CHANNEL_DSCR_LOADED |
  174. ATMEL_HLCDC_DMA_CHANNEL_DSCR_DONE;
  175. for (i = 0; i < flip->ngems; i++) {
  176. plane_status = (status >> (8 * i));
  177. if (plane_status &
  178. (ATMEL_HLCDC_LAYER_ADD_IRQ |
  179. ATMEL_HLCDC_LAYER_DSCR_IRQ) &
  180. ~flip->dscrs[i]->ctrl) {
  181. flip->dscrs[i]->status |=
  182. ATMEL_HLCDC_DMA_CHANNEL_DSCR_LOADED;
  183. flip->dscrs[i]->ctrl |=
  184. ATMEL_HLCDC_LAYER_ADD_IRQ |
  185. ATMEL_HLCDC_LAYER_DSCR_IRQ;
  186. }
  187. if (plane_status &
  188. ATMEL_HLCDC_LAYER_DONE_IRQ &
  189. ~flip->dscrs[i]->ctrl) {
  190. flip->dscrs[i]->status |=
  191. ATMEL_HLCDC_DMA_CHANNEL_DSCR_DONE;
  192. flip->dscrs[i]->ctrl |=
  193. ATMEL_HLCDC_LAYER_DONE_IRQ;
  194. }
  195. if (plane_status & ATMEL_HLCDC_LAYER_OVR_IRQ)
  196. flip->dscrs[i]->status |=
  197. ATMEL_HLCDC_DMA_CHANNEL_DSCR_OVERRUN;
  198. /*
  199. * Clear LOADED and DONE flags if the memory plane is either
  200. * not LOADED or not DONE.
  201. */
  202. if (!(flip->dscrs[i]->status &
  203. ATMEL_HLCDC_DMA_CHANNEL_DSCR_LOADED))
  204. flip_status &= ~ATMEL_HLCDC_DMA_CHANNEL_DSCR_LOADED;
  205. if (!(flip->dscrs[i]->status &
  206. ATMEL_HLCDC_DMA_CHANNEL_DSCR_DONE))
  207. flip_status &= ~ATMEL_HLCDC_DMA_CHANNEL_DSCR_DONE;
  208. /*
  209. * An overrun on one memory plane impact the whole framebuffer
  210. * transfer, hence we set the OVERRUN flag as soon as there's
  211. * one memory plane reporting such an overrun.
  212. */
  213. flip_status |= flip->dscrs[i]->status &
  214. ATMEL_HLCDC_DMA_CHANNEL_DSCR_OVERRUN;
  215. }
  216. /* Get changed bits */
  217. flip_status ^= flip->status;
  218. flip->status |= flip_status;
  219. if (flip_status & ATMEL_HLCDC_DMA_CHANNEL_DSCR_LOADED) {
  220. atmel_hlcdc_layer_fb_flip_release_queue(layer, dma->cur);
  221. dma->cur = dma->queue;
  222. dma->queue = NULL;
  223. }
  224. if (flip_status & ATMEL_HLCDC_DMA_CHANNEL_DSCR_DONE) {
  225. atmel_hlcdc_layer_fb_flip_release_queue(layer, dma->cur);
  226. dma->cur = NULL;
  227. }
  228. if (flip_status & ATMEL_HLCDC_DMA_CHANNEL_DSCR_OVERRUN) {
  229. regmap_write(regmap,
  230. desc->regs_offset + ATMEL_HLCDC_LAYER_CHDR,
  231. ATMEL_HLCDC_LAYER_RST);
  232. if (dma->queue)
  233. atmel_hlcdc_layer_fb_flip_release_queue(layer,
  234. dma->queue);
  235. if (dma->cur)
  236. atmel_hlcdc_layer_fb_flip_release_queue(layer,
  237. dma->cur);
  238. dma->cur = NULL;
  239. dma->queue = NULL;
  240. }
  241. if (!dma->queue) {
  242. atmel_hlcdc_layer_update_apply(layer);
  243. if (!dma->cur)
  244. dma->status = ATMEL_HLCDC_LAYER_DISABLED;
  245. }
  246. spin_unlock_irqrestore(&layer->lock, flags);
  247. }
  248. void atmel_hlcdc_layer_disable(struct atmel_hlcdc_layer *layer)
  249. {
  250. struct atmel_hlcdc_layer_dma_channel *dma = &layer->dma;
  251. struct atmel_hlcdc_layer_update *upd = &layer->update;
  252. struct regmap *regmap = layer->hlcdc->regmap;
  253. const struct atmel_hlcdc_layer_desc *desc = layer->desc;
  254. unsigned long flags;
  255. unsigned int isr;
  256. spin_lock_irqsave(&layer->lock, flags);
  257. /* Disable the layer */
  258. regmap_write(regmap, desc->regs_offset + ATMEL_HLCDC_LAYER_CHDR,
  259. ATMEL_HLCDC_LAYER_RST | ATMEL_HLCDC_LAYER_A2Q |
  260. ATMEL_HLCDC_LAYER_UPDATE);
  261. /* Clear all pending interrupts */
  262. regmap_read(regmap, desc->regs_offset + ATMEL_HLCDC_LAYER_ISR, &isr);
  263. /* Discard current and queued framebuffer transfers. */
  264. if (dma->cur) {
  265. atmel_hlcdc_layer_fb_flip_release_queue(layer, dma->cur);
  266. dma->cur = NULL;
  267. }
  268. if (dma->queue) {
  269. atmel_hlcdc_layer_fb_flip_release_queue(layer, dma->queue);
  270. dma->queue = NULL;
  271. }
  272. /*
  273. * Then discard the pending update request (if any) to prevent
  274. * DMA irq handler from restarting the DMA channel after it has
  275. * been disabled.
  276. */
  277. if (upd->pending >= 0) {
  278. atmel_hlcdc_layer_update_reset(layer, upd->pending);
  279. upd->pending = -1;
  280. }
  281. dma->status = ATMEL_HLCDC_LAYER_DISABLED;
  282. spin_unlock_irqrestore(&layer->lock, flags);
  283. }
  284. int atmel_hlcdc_layer_update_start(struct atmel_hlcdc_layer *layer)
  285. {
  286. struct atmel_hlcdc_layer_dma_channel *dma = &layer->dma;
  287. struct atmel_hlcdc_layer_update *upd = &layer->update;
  288. struct regmap *regmap = layer->hlcdc->regmap;
  289. struct atmel_hlcdc_layer_fb_flip *fb_flip;
  290. struct atmel_hlcdc_layer_update_slot *slot;
  291. unsigned long flags;
  292. int i, j = 0;
  293. fb_flip = kzalloc(sizeof(*fb_flip), GFP_KERNEL);
  294. if (!fb_flip)
  295. return -ENOMEM;
  296. fb_flip->task = drm_flip_work_allocate_task(fb_flip, GFP_KERNEL);
  297. if (!fb_flip->task) {
  298. kfree(fb_flip);
  299. return -ENOMEM;
  300. }
  301. spin_lock_irqsave(&layer->lock, flags);
  302. upd->next = upd->pending ? 0 : 1;
  303. slot = &upd->slots[upd->next];
  304. for (i = 0; i < layer->max_planes * 4; i++) {
  305. if (!dma->dscrs[i].status) {
  306. fb_flip->dscrs[j++] = &dma->dscrs[i];
  307. dma->dscrs[i].status =
  308. ATMEL_HLCDC_DMA_CHANNEL_DSCR_RESERVED;
  309. if (j == layer->max_planes)
  310. break;
  311. }
  312. }
  313. if (j < layer->max_planes) {
  314. for (i = 0; i < j; i++)
  315. fb_flip->dscrs[i]->status = 0;
  316. }
  317. if (j < layer->max_planes) {
  318. spin_unlock_irqrestore(&layer->lock, flags);
  319. atmel_hlcdc_layer_fb_flip_destroy(fb_flip);
  320. return -EBUSY;
  321. }
  322. slot->fb_flip = fb_flip;
  323. if (upd->pending >= 0) {
  324. memcpy(slot->configs,
  325. upd->slots[upd->pending].configs,
  326. layer->desc->nconfigs * sizeof(u32));
  327. memcpy(slot->updated_configs,
  328. upd->slots[upd->pending].updated_configs,
  329. DIV_ROUND_UP(layer->desc->nconfigs,
  330. BITS_PER_BYTE * sizeof(unsigned long)) *
  331. sizeof(unsigned long));
  332. slot->fb_flip->fb = upd->slots[upd->pending].fb_flip->fb;
  333. if (upd->slots[upd->pending].fb_flip->fb) {
  334. slot->fb_flip->fb =
  335. upd->slots[upd->pending].fb_flip->fb;
  336. slot->fb_flip->ngems =
  337. upd->slots[upd->pending].fb_flip->ngems;
  338. drm_framebuffer_reference(slot->fb_flip->fb);
  339. }
  340. } else {
  341. regmap_bulk_read(regmap,
  342. layer->desc->regs_offset +
  343. ATMEL_HLCDC_LAYER_CFG(layer, 0),
  344. upd->slots[upd->next].configs,
  345. layer->desc->nconfigs);
  346. }
  347. spin_unlock_irqrestore(&layer->lock, flags);
  348. return 0;
  349. }
  350. void atmel_hlcdc_layer_update_rollback(struct atmel_hlcdc_layer *layer)
  351. {
  352. struct atmel_hlcdc_layer_update *upd = &layer->update;
  353. atmel_hlcdc_layer_update_reset(layer, upd->next);
  354. upd->next = -1;
  355. }
  356. void atmel_hlcdc_layer_update_set_fb(struct atmel_hlcdc_layer *layer,
  357. struct drm_framebuffer *fb,
  358. unsigned int *offsets)
  359. {
  360. struct atmel_hlcdc_layer_update *upd = &layer->update;
  361. struct atmel_hlcdc_layer_fb_flip *fb_flip;
  362. struct atmel_hlcdc_layer_update_slot *slot;
  363. struct atmel_hlcdc_dma_channel_dscr *dscr;
  364. struct drm_framebuffer *old_fb;
  365. int nplanes = 0;
  366. int i;
  367. if (upd->next < 0 || upd->next > 1)
  368. return;
  369. if (fb)
  370. nplanes = drm_format_num_planes(fb->pixel_format);
  371. if (nplanes > layer->max_planes)
  372. return;
  373. slot = &upd->slots[upd->next];
  374. fb_flip = slot->fb_flip;
  375. old_fb = slot->fb_flip->fb;
  376. for (i = 0; i < nplanes; i++) {
  377. struct drm_gem_cma_object *gem;
  378. dscr = slot->fb_flip->dscrs[i];
  379. gem = drm_fb_cma_get_gem_obj(fb, i);
  380. dscr->addr = gem->paddr + offsets[i];
  381. }
  382. fb_flip->ngems = nplanes;
  383. fb_flip->fb = fb;
  384. if (fb)
  385. drm_framebuffer_reference(fb);
  386. if (old_fb)
  387. drm_framebuffer_unreference(old_fb);
  388. }
  389. void atmel_hlcdc_layer_update_cfg(struct atmel_hlcdc_layer *layer, int cfg,
  390. u32 mask, u32 val)
  391. {
  392. struct atmel_hlcdc_layer_update *upd = &layer->update;
  393. struct atmel_hlcdc_layer_update_slot *slot;
  394. if (upd->next < 0 || upd->next > 1)
  395. return;
  396. if (cfg >= layer->desc->nconfigs)
  397. return;
  398. slot = &upd->slots[upd->next];
  399. slot->configs[cfg] &= ~mask;
  400. slot->configs[cfg] |= (val & mask);
  401. set_bit(cfg, slot->updated_configs);
  402. }
  403. void atmel_hlcdc_layer_update_commit(struct atmel_hlcdc_layer *layer)
  404. {
  405. struct atmel_hlcdc_layer_dma_channel *dma = &layer->dma;
  406. struct atmel_hlcdc_layer_update *upd = &layer->update;
  407. struct atmel_hlcdc_layer_update_slot *slot;
  408. unsigned long flags;
  409. if (upd->next < 0 || upd->next > 1)
  410. return;
  411. slot = &upd->slots[upd->next];
  412. spin_lock_irqsave(&layer->lock, flags);
  413. /*
  414. * Release pending update request and replace it by the new one.
  415. */
  416. if (upd->pending >= 0)
  417. atmel_hlcdc_layer_update_reset(layer, upd->pending);
  418. upd->pending = upd->next;
  419. upd->next = -1;
  420. if (!dma->queue)
  421. atmel_hlcdc_layer_update_apply(layer);
  422. spin_unlock_irqrestore(&layer->lock, flags);
  423. upd->next = -1;
  424. }
  425. static int atmel_hlcdc_layer_dma_init(struct drm_device *dev,
  426. struct atmel_hlcdc_layer *layer)
  427. {
  428. struct atmel_hlcdc_layer_dma_channel *dma = &layer->dma;
  429. dma_addr_t dma_addr;
  430. int i;
  431. dma->dscrs = dma_alloc_coherent(dev->dev,
  432. layer->max_planes * 4 *
  433. sizeof(*dma->dscrs),
  434. &dma_addr, GFP_KERNEL);
  435. if (!dma->dscrs)
  436. return -ENOMEM;
  437. for (i = 0; i < layer->max_planes * 4; i++) {
  438. struct atmel_hlcdc_dma_channel_dscr *dscr = &dma->dscrs[i];
  439. dscr->next = dma_addr + (i * sizeof(*dscr));
  440. }
  441. return 0;
  442. }
  443. static void atmel_hlcdc_layer_dma_cleanup(struct drm_device *dev,
  444. struct atmel_hlcdc_layer *layer)
  445. {
  446. struct atmel_hlcdc_layer_dma_channel *dma = &layer->dma;
  447. int i;
  448. for (i = 0; i < layer->max_planes * 4; i++) {
  449. struct atmel_hlcdc_dma_channel_dscr *dscr = &dma->dscrs[i];
  450. dscr->status = 0;
  451. }
  452. dma_free_coherent(dev->dev, layer->max_planes * 4 *
  453. sizeof(*dma->dscrs), dma->dscrs,
  454. dma->dscrs[0].next);
  455. }
  456. static int atmel_hlcdc_layer_update_init(struct drm_device *dev,
  457. struct atmel_hlcdc_layer *layer,
  458. const struct atmel_hlcdc_layer_desc *desc)
  459. {
  460. struct atmel_hlcdc_layer_update *upd = &layer->update;
  461. int updated_size;
  462. void *buffer;
  463. int i;
  464. updated_size = DIV_ROUND_UP(desc->nconfigs,
  465. BITS_PER_BYTE *
  466. sizeof(unsigned long));
  467. buffer = devm_kzalloc(dev->dev,
  468. ((desc->nconfigs * sizeof(u32)) +
  469. (updated_size * sizeof(unsigned long))) * 2,
  470. GFP_KERNEL);
  471. if (!buffer)
  472. return -ENOMEM;
  473. for (i = 0; i < 2; i++) {
  474. upd->slots[i].updated_configs = buffer;
  475. buffer += updated_size * sizeof(unsigned long);
  476. upd->slots[i].configs = buffer;
  477. buffer += desc->nconfigs * sizeof(u32);
  478. }
  479. upd->pending = -1;
  480. upd->next = -1;
  481. return 0;
  482. }
  483. int atmel_hlcdc_layer_init(struct drm_device *dev,
  484. struct atmel_hlcdc_layer *layer,
  485. const struct atmel_hlcdc_layer_desc *desc)
  486. {
  487. struct atmel_hlcdc_dc *dc = dev->dev_private;
  488. struct regmap *regmap = dc->hlcdc->regmap;
  489. unsigned int tmp;
  490. int ret;
  491. int i;
  492. layer->hlcdc = dc->hlcdc;
  493. layer->wq = dc->wq;
  494. layer->desc = desc;
  495. regmap_write(regmap, desc->regs_offset + ATMEL_HLCDC_LAYER_CHDR,
  496. ATMEL_HLCDC_LAYER_RST);
  497. for (i = 0; i < desc->formats->nformats; i++) {
  498. int nplanes = drm_format_num_planes(desc->formats->formats[i]);
  499. if (nplanes > layer->max_planes)
  500. layer->max_planes = nplanes;
  501. }
  502. spin_lock_init(&layer->lock);
  503. drm_flip_work_init(&layer->gc, desc->name,
  504. atmel_hlcdc_layer_fb_flip_release);
  505. ret = atmel_hlcdc_layer_dma_init(dev, layer);
  506. if (ret)
  507. return ret;
  508. ret = atmel_hlcdc_layer_update_init(dev, layer, desc);
  509. if (ret)
  510. return ret;
  511. /* Flush Status Register */
  512. regmap_write(regmap, desc->regs_offset + ATMEL_HLCDC_LAYER_IDR,
  513. 0xffffffff);
  514. regmap_read(regmap, desc->regs_offset + ATMEL_HLCDC_LAYER_ISR,
  515. &tmp);
  516. tmp = 0;
  517. for (i = 0; i < layer->max_planes; i++)
  518. tmp |= (ATMEL_HLCDC_LAYER_DMA_IRQ |
  519. ATMEL_HLCDC_LAYER_DSCR_IRQ |
  520. ATMEL_HLCDC_LAYER_ADD_IRQ |
  521. ATMEL_HLCDC_LAYER_DONE_IRQ |
  522. ATMEL_HLCDC_LAYER_OVR_IRQ) << (8 * i);
  523. regmap_write(regmap, desc->regs_offset + ATMEL_HLCDC_LAYER_IER, tmp);
  524. return 0;
  525. }
  526. void atmel_hlcdc_layer_cleanup(struct drm_device *dev,
  527. struct atmel_hlcdc_layer *layer)
  528. {
  529. const struct atmel_hlcdc_layer_desc *desc = layer->desc;
  530. struct regmap *regmap = layer->hlcdc->regmap;
  531. regmap_write(regmap, desc->regs_offset + ATMEL_HLCDC_LAYER_IDR,
  532. 0xffffffff);
  533. regmap_write(regmap, desc->regs_offset + ATMEL_HLCDC_LAYER_CHDR,
  534. ATMEL_HLCDC_LAYER_RST);
  535. atmel_hlcdc_layer_dma_cleanup(dev, layer);
  536. drm_flip_work_cleanup(&layer->gc);
  537. }