mdp5_smp.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422
  1. /*
  2. * Copyright (c) 2014, The Linux Foundation. All rights reserved.
  3. * Copyright (C) 2013 Red Hat
  4. * Author: Rob Clark <robdclark@gmail.com>
  5. *
  6. * This program is free software; you can redistribute it and/or modify it
  7. * under the terms of the GNU General Public License version 2 as published by
  8. * the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope that it will be useful, but WITHOUT
  11. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  13. * more details.
  14. *
  15. * You should have received a copy of the GNU General Public License along with
  16. * this program. If not, see <http://www.gnu.org/licenses/>.
  17. */
  18. #include "mdp5_kms.h"
  19. #include "mdp5_smp.h"
  20. /* SMP - Shared Memory Pool
  21. *
  22. * These are shared between all the clients, where each plane in a
  23. * scanout buffer is a SMP client. Ie. scanout of 3 plane I420 on
  24. * pipe VIG0 => 3 clients: VIG0_Y, VIG0_CB, VIG0_CR.
  25. *
  26. * Based on the size of the attached scanout buffer, a certain # of
  27. * blocks must be allocated to that client out of the shared pool.
  28. *
  29. * In some hw, some blocks are statically allocated for certain pipes
  30. * and CANNOT be re-allocated (eg: MMB0 and MMB1 both tied to RGB0).
  31. *
  32. * For each block that can be dynamically allocated, it can be either
  33. * free:
  34. * The block is free.
  35. *
  36. * pending:
  37. * The block is allocated to some client and not free.
  38. *
  39. * configured:
  40. * The block is allocated to some client, and assigned to that
  41. * client in MDP5_MDP_SMP_ALLOC registers.
  42. *
  43. * inuse:
  44. * The block is being actively used by a client.
  45. *
  46. * The updates happen in the following steps:
  47. *
  48. * 1) mdp5_smp_request():
  49. * When plane scanout is setup, calculate required number of
  50. * blocks needed per client, and request. Blocks neither inuse nor
  51. * configured nor pending by any other client are added to client's
  52. * pending set.
  53. * For shrinking, blocks in pending but not in configured can be freed
  54. * directly, but those already in configured will be freed later by
  55. * mdp5_smp_commit.
  56. *
  57. * 2) mdp5_smp_configure():
  58. * As hw is programmed, before FLUSH, MDP5_MDP_SMP_ALLOC registers
  59. * are configured for the union(pending, inuse)
  60. * Current pending is copied to configured.
  61. * It is assumed that mdp5_smp_request and mdp5_smp_configure not run
  62. * concurrently for the same pipe.
  63. *
  64. * 3) mdp5_smp_commit():
  65. * After next vblank, copy configured -> inuse. Optionally update
  66. * MDP5_SMP_ALLOC registers if there are newly unused blocks
  67. *
  68. * 4) mdp5_smp_release():
  69. * Must be called after the pipe is disabled and no longer uses any SMB
  70. *
  71. * On the next vblank after changes have been committed to hw, the
  72. * client's pending blocks become it's in-use blocks (and no-longer
  73. * in-use blocks become available to other clients).
  74. *
  75. * btw, hurray for confusing overloaded acronyms! :-/
  76. *
  77. * NOTE: for atomic modeset/pageflip NONBLOCK operations, step #1
  78. * should happen at (or before)? atomic->check(). And we'd need
  79. * an API to discard previous requests if update is aborted or
  80. * (test-only).
  81. *
  82. * TODO would perhaps be nice to have debugfs to dump out kernel
  83. * inuse and pending state of all clients..
  84. */
  85. struct mdp5_smp {
  86. struct drm_device *dev;
  87. uint8_t reserved[MAX_CLIENTS]; /* fixed MMBs allocation per client */
  88. int blk_cnt;
  89. int blk_size;
  90. spinlock_t state_lock;
  91. mdp5_smp_state_t state; /* to track smp allocation amongst pipes: */
  92. struct mdp5_client_smp_state client_state[MAX_CLIENTS];
  93. };
  94. static void update_smp_state(struct mdp5_smp *smp,
  95. u32 cid, mdp5_smp_state_t *assigned);
  96. static inline
  97. struct mdp5_kms *get_kms(struct mdp5_smp *smp)
  98. {
  99. struct msm_drm_private *priv = smp->dev->dev_private;
  100. return to_mdp5_kms(to_mdp_kms(priv->kms));
  101. }
  102. static inline u32 pipe2client(enum mdp5_pipe pipe, int plane)
  103. {
  104. #define CID_UNUSED 0
  105. if (WARN_ON(plane >= pipe2nclients(pipe)))
  106. return CID_UNUSED;
  107. /*
  108. * Note on SMP clients:
  109. * For ViG pipes, fetch Y/Cr/Cb-components clients are always
  110. * consecutive, and in that order.
  111. *
  112. * e.g.:
  113. * if mdp5_cfg->smp.clients[SSPP_VIG0] = N,
  114. * Y plane's client ID is N
  115. * Cr plane's client ID is N + 1
  116. * Cb plane's client ID is N + 2
  117. */
  118. return mdp5_cfg->smp.clients[pipe] + plane;
  119. }
  120. /* step #1: update # of blocks pending for the client: */
  121. static int smp_request_block(struct mdp5_smp *smp,
  122. u32 cid, int nblks)
  123. {
  124. struct mdp5_kms *mdp5_kms = get_kms(smp);
  125. struct mdp5_client_smp_state *ps = &smp->client_state[cid];
  126. int i, ret, avail, cur_nblks, cnt = smp->blk_cnt;
  127. uint8_t reserved;
  128. unsigned long flags;
  129. reserved = smp->reserved[cid];
  130. spin_lock_irqsave(&smp->state_lock, flags);
  131. if (reserved) {
  132. nblks = max(0, nblks - reserved);
  133. DBG("%d MMBs allocated (%d reserved)", nblks, reserved);
  134. }
  135. avail = cnt - bitmap_weight(smp->state, cnt);
  136. if (nblks > avail) {
  137. dev_err(mdp5_kms->dev->dev, "out of blks (req=%d > avail=%d)\n",
  138. nblks, avail);
  139. ret = -ENOSPC;
  140. goto fail;
  141. }
  142. cur_nblks = bitmap_weight(ps->pending, cnt);
  143. if (nblks > cur_nblks) {
  144. /* grow the existing pending reservation: */
  145. for (i = cur_nblks; i < nblks; i++) {
  146. int blk = find_first_zero_bit(smp->state, cnt);
  147. set_bit(blk, ps->pending);
  148. set_bit(blk, smp->state);
  149. }
  150. } else {
  151. /* shrink the existing pending reservation: */
  152. for (i = cur_nblks; i > nblks; i--) {
  153. int blk = find_first_bit(ps->pending, cnt);
  154. clear_bit(blk, ps->pending);
  155. /* clear in global smp_state if not in configured
  156. * otherwise until _commit()
  157. */
  158. if (!test_bit(blk, ps->configured))
  159. clear_bit(blk, smp->state);
  160. }
  161. }
  162. fail:
  163. spin_unlock_irqrestore(&smp->state_lock, flags);
  164. return 0;
  165. }
  166. static void set_fifo_thresholds(struct mdp5_smp *smp,
  167. enum mdp5_pipe pipe, int nblks)
  168. {
  169. struct mdp5_kms *mdp5_kms = get_kms(smp);
  170. u32 smp_entries_per_blk = smp->blk_size / (128 / BITS_PER_BYTE);
  171. u32 val;
  172. /* 1/4 of SMP pool that is being fetched */
  173. val = (nblks * smp_entries_per_blk) / 4;
  174. mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_0(pipe), val * 1);
  175. mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_1(pipe), val * 2);
  176. mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_2(pipe), val * 3);
  177. }
  178. /*
  179. * NOTE: looks like if horizontal decimation is used (if we supported that)
  180. * then the width used to calculate SMP block requirements is the post-
  181. * decimated width. Ie. SMP buffering sits downstream of decimation (which
  182. * presumably happens during the dma from scanout buffer).
  183. */
  184. int mdp5_smp_request(struct mdp5_smp *smp, enum mdp5_pipe pipe,
  185. const struct mdp_format *format, u32 width, bool hdecim)
  186. {
  187. struct mdp5_kms *mdp5_kms = get_kms(smp);
  188. struct drm_device *dev = mdp5_kms->dev;
  189. int rev = mdp5_cfg_get_hw_rev(mdp5_kms->cfg);
  190. int i, hsub, nplanes, nlines, nblks, ret;
  191. u32 fmt = format->base.pixel_format;
  192. nplanes = drm_format_num_planes(fmt);
  193. hsub = drm_format_horz_chroma_subsampling(fmt);
  194. /* different if BWC (compressed framebuffer?) enabled: */
  195. nlines = 2;
  196. /* Newer MDPs have split/packing logic, which fetches sub-sampled
  197. * U and V components (splits them from Y if necessary) and packs
  198. * them together, writes to SMP using a single client.
  199. */
  200. if ((rev > 0) && (format->chroma_sample > CHROMA_FULL)) {
  201. fmt = DRM_FORMAT_NV24;
  202. nplanes = 2;
  203. /* if decimation is enabled, HW decimates less on the
  204. * sub sampled chroma components
  205. */
  206. if (hdecim && (hsub > 1))
  207. hsub = 1;
  208. }
  209. for (i = 0, nblks = 0; i < nplanes; i++) {
  210. int n, fetch_stride, cpp;
  211. cpp = drm_format_plane_cpp(fmt, i);
  212. fetch_stride = width * cpp / (i ? hsub : 1);
  213. n = DIV_ROUND_UP(fetch_stride * nlines, smp->blk_size);
  214. /* for hw rev v1.00 */
  215. if (rev == 0)
  216. n = roundup_pow_of_two(n);
  217. DBG("%s[%d]: request %d SMP blocks", pipe2name(pipe), i, n);
  218. ret = smp_request_block(smp, pipe2client(pipe, i), n);
  219. if (ret) {
  220. dev_err(dev->dev, "Cannot allocate %d SMP blocks: %d\n",
  221. n, ret);
  222. return ret;
  223. }
  224. nblks += n;
  225. }
  226. set_fifo_thresholds(smp, pipe, nblks);
  227. return 0;
  228. }
  229. /* Release SMP blocks for all clients of the pipe */
  230. void mdp5_smp_release(struct mdp5_smp *smp, enum mdp5_pipe pipe)
  231. {
  232. int i;
  233. unsigned long flags;
  234. int cnt = smp->blk_cnt;
  235. for (i = 0; i < pipe2nclients(pipe); i++) {
  236. mdp5_smp_state_t assigned;
  237. u32 cid = pipe2client(pipe, i);
  238. struct mdp5_client_smp_state *ps = &smp->client_state[cid];
  239. spin_lock_irqsave(&smp->state_lock, flags);
  240. /* clear hw assignment */
  241. bitmap_or(assigned, ps->inuse, ps->configured, cnt);
  242. update_smp_state(smp, CID_UNUSED, &assigned);
  243. /* free to global pool */
  244. bitmap_andnot(smp->state, smp->state, ps->pending, cnt);
  245. bitmap_andnot(smp->state, smp->state, assigned, cnt);
  246. /* clear client's infor */
  247. bitmap_zero(ps->pending, cnt);
  248. bitmap_zero(ps->configured, cnt);
  249. bitmap_zero(ps->inuse, cnt);
  250. spin_unlock_irqrestore(&smp->state_lock, flags);
  251. }
  252. set_fifo_thresholds(smp, pipe, 0);
  253. }
  254. static void update_smp_state(struct mdp5_smp *smp,
  255. u32 cid, mdp5_smp_state_t *assigned)
  256. {
  257. struct mdp5_kms *mdp5_kms = get_kms(smp);
  258. int cnt = smp->blk_cnt;
  259. u32 blk, val;
  260. for_each_set_bit(blk, *assigned, cnt) {
  261. int idx = blk / 3;
  262. int fld = blk % 3;
  263. val = mdp5_read(mdp5_kms, REG_MDP5_MDP_SMP_ALLOC_W_REG(0, idx));
  264. switch (fld) {
  265. case 0:
  266. val &= ~MDP5_MDP_SMP_ALLOC_W_REG_CLIENT0__MASK;
  267. val |= MDP5_MDP_SMP_ALLOC_W_REG_CLIENT0(cid);
  268. break;
  269. case 1:
  270. val &= ~MDP5_MDP_SMP_ALLOC_W_REG_CLIENT1__MASK;
  271. val |= MDP5_MDP_SMP_ALLOC_W_REG_CLIENT1(cid);
  272. break;
  273. case 2:
  274. val &= ~MDP5_MDP_SMP_ALLOC_W_REG_CLIENT2__MASK;
  275. val |= MDP5_MDP_SMP_ALLOC_W_REG_CLIENT2(cid);
  276. break;
  277. }
  278. mdp5_write(mdp5_kms, REG_MDP5_MDP_SMP_ALLOC_W_REG(0, idx), val);
  279. mdp5_write(mdp5_kms, REG_MDP5_MDP_SMP_ALLOC_R_REG(0, idx), val);
  280. }
  281. }
  282. /* step #2: configure hw for union(pending, inuse): */
  283. void mdp5_smp_configure(struct mdp5_smp *smp, enum mdp5_pipe pipe)
  284. {
  285. int cnt = smp->blk_cnt;
  286. mdp5_smp_state_t assigned;
  287. int i;
  288. for (i = 0; i < pipe2nclients(pipe); i++) {
  289. u32 cid = pipe2client(pipe, i);
  290. struct mdp5_client_smp_state *ps = &smp->client_state[cid];
  291. /*
  292. * if vblank has not happened since last smp_configure
  293. * skip the configure for now
  294. */
  295. if (!bitmap_equal(ps->inuse, ps->configured, cnt))
  296. continue;
  297. bitmap_copy(ps->configured, ps->pending, cnt);
  298. bitmap_or(assigned, ps->inuse, ps->configured, cnt);
  299. update_smp_state(smp, cid, &assigned);
  300. }
  301. }
  302. /* step #3: after vblank, copy configured -> inuse: */
  303. void mdp5_smp_commit(struct mdp5_smp *smp, enum mdp5_pipe pipe)
  304. {
  305. int cnt = smp->blk_cnt;
  306. mdp5_smp_state_t released;
  307. int i;
  308. for (i = 0; i < pipe2nclients(pipe); i++) {
  309. u32 cid = pipe2client(pipe, i);
  310. struct mdp5_client_smp_state *ps = &smp->client_state[cid];
  311. /*
  312. * Figure out if there are any blocks we where previously
  313. * using, which can be released and made available to other
  314. * clients:
  315. */
  316. if (bitmap_andnot(released, ps->inuse, ps->configured, cnt)) {
  317. unsigned long flags;
  318. spin_lock_irqsave(&smp->state_lock, flags);
  319. /* clear released blocks: */
  320. bitmap_andnot(smp->state, smp->state, released, cnt);
  321. spin_unlock_irqrestore(&smp->state_lock, flags);
  322. update_smp_state(smp, CID_UNUSED, &released);
  323. }
  324. bitmap_copy(ps->inuse, ps->configured, cnt);
  325. }
  326. }
  327. void mdp5_smp_destroy(struct mdp5_smp *smp)
  328. {
  329. kfree(smp);
  330. }
  331. struct mdp5_smp *mdp5_smp_init(struct drm_device *dev, const struct mdp5_smp_block *cfg)
  332. {
  333. struct mdp5_smp *smp = NULL;
  334. int ret;
  335. smp = kzalloc(sizeof(*smp), GFP_KERNEL);
  336. if (unlikely(!smp)) {
  337. ret = -ENOMEM;
  338. goto fail;
  339. }
  340. smp->dev = dev;
  341. smp->blk_cnt = cfg->mmb_count;
  342. smp->blk_size = cfg->mmb_size;
  343. /* statically tied MMBs cannot be re-allocated: */
  344. bitmap_copy(smp->state, cfg->reserved_state, smp->blk_cnt);
  345. memcpy(smp->reserved, cfg->reserved, sizeof(smp->reserved));
  346. spin_lock_init(&smp->state_lock);
  347. return smp;
  348. fail:
  349. if (smp)
  350. mdp5_smp_destroy(smp);
  351. return ERR_PTR(ret);
  352. }