qat_uclo.c 34 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166
  1. /*
  2. This file is provided under a dual BSD/GPLv2 license. When using or
  3. redistributing this file, you may do so under either license.
  4. GPL LICENSE SUMMARY
  5. Copyright(c) 2014 Intel Corporation.
  6. This program is free software; you can redistribute it and/or modify
  7. it under the terms of version 2 of the GNU General Public License as
  8. published by the Free Software Foundation.
  9. This program is distributed in the hope that it will be useful, but
  10. WITHOUT ANY WARRANTY; without even the implied warranty of
  11. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  12. General Public License for more details.
  13. Contact Information:
  14. qat-linux@intel.com
  15. BSD LICENSE
  16. Copyright(c) 2014 Intel Corporation.
  17. Redistribution and use in source and binary forms, with or without
  18. modification, are permitted provided that the following conditions
  19. are met:
  20. * Redistributions of source code must retain the above copyright
  21. notice, this list of conditions and the following disclaimer.
  22. * Redistributions in binary form must reproduce the above copyright
  23. notice, this list of conditions and the following disclaimer in
  24. the documentation and/or other materials provided with the
  25. distribution.
  26. * Neither the name of Intel Corporation nor the names of its
  27. contributors may be used to endorse or promote products derived
  28. from this software without specific prior written permission.
  29. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  30. "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  31. LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  32. A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  33. OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  34. SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  35. LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  36. DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  37. THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  38. (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  39. OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  40. */
  41. #include <linux/slab.h>
  42. #include <linux/ctype.h>
  43. #include <linux/kernel.h>
  44. #include "adf_accel_devices.h"
  45. #include "adf_common_drv.h"
  46. #include "icp_qat_uclo.h"
  47. #include "icp_qat_hal.h"
  48. #include "icp_qat_fw_loader_handle.h"
  49. #define UWORD_CPYBUF_SIZE 1024
  50. #define INVLD_UWORD 0xffffffffffull
  51. #define PID_MINOR_REV 0xf
  52. #define PID_MAJOR_REV (0xf << 4)
  53. static int qat_uclo_init_ae_data(struct icp_qat_uclo_objhandle *obj_handle,
  54. unsigned int ae, unsigned int image_num)
  55. {
  56. struct icp_qat_uclo_aedata *ae_data;
  57. struct icp_qat_uclo_encapme *encap_image;
  58. struct icp_qat_uclo_page *page = NULL;
  59. struct icp_qat_uclo_aeslice *ae_slice = NULL;
  60. ae_data = &obj_handle->ae_data[ae];
  61. encap_image = &obj_handle->ae_uimage[image_num];
  62. ae_slice = &ae_data->ae_slices[ae_data->slice_num];
  63. ae_slice->encap_image = encap_image;
  64. if (encap_image->img_ptr) {
  65. ae_slice->ctx_mask_assigned =
  66. encap_image->img_ptr->ctx_assigned;
  67. ae_data->eff_ustore_size = obj_handle->ustore_phy_size;
  68. } else {
  69. ae_slice->ctx_mask_assigned = 0;
  70. }
  71. ae_slice->region = kzalloc(sizeof(*ae_slice->region), GFP_KERNEL);
  72. if (!ae_slice->region)
  73. return -ENOMEM;
  74. ae_slice->page = kzalloc(sizeof(*ae_slice->page), GFP_KERNEL);
  75. if (!ae_slice->page)
  76. goto out_err;
  77. page = ae_slice->page;
  78. page->encap_page = encap_image->page;
  79. ae_slice->page->region = ae_slice->region;
  80. ae_data->slice_num++;
  81. return 0;
  82. out_err:
  83. kfree(ae_slice->region);
  84. ae_slice->region = NULL;
  85. return -ENOMEM;
  86. }
  87. static int qat_uclo_free_ae_data(struct icp_qat_uclo_aedata *ae_data)
  88. {
  89. unsigned int i;
  90. if (!ae_data) {
  91. pr_err("QAT: bad argument, ae_data is NULL\n ");
  92. return -EINVAL;
  93. }
  94. for (i = 0; i < ae_data->slice_num; i++) {
  95. kfree(ae_data->ae_slices[i].region);
  96. ae_data->ae_slices[i].region = NULL;
  97. kfree(ae_data->ae_slices[i].page);
  98. ae_data->ae_slices[i].page = NULL;
  99. }
  100. return 0;
  101. }
  102. static char *qat_uclo_get_string(struct icp_qat_uof_strtable *str_table,
  103. unsigned int str_offset)
  104. {
  105. if ((!str_table->table_len) || (str_offset > str_table->table_len))
  106. return NULL;
  107. return (char *)(((unsigned long)(str_table->strings)) + str_offset);
  108. }
  109. static int qat_uclo_check_format(struct icp_qat_uof_filehdr *hdr)
  110. {
  111. int maj = hdr->maj_ver & 0xff;
  112. int min = hdr->min_ver & 0xff;
  113. if (hdr->file_id != ICP_QAT_UOF_FID) {
  114. pr_err("QAT: Invalid header 0x%x\n", hdr->file_id);
  115. return -EINVAL;
  116. }
  117. if (min != ICP_QAT_UOF_MINVER || maj != ICP_QAT_UOF_MAJVER) {
  118. pr_err("QAT: bad UOF version, major 0x%x, minor 0x%x\n",
  119. maj, min);
  120. return -EINVAL;
  121. }
  122. return 0;
  123. }
  124. static void qat_uclo_wr_sram_by_words(struct icp_qat_fw_loader_handle *handle,
  125. unsigned int addr, unsigned int *val,
  126. unsigned int num_in_bytes)
  127. {
  128. unsigned int outval;
  129. unsigned char *ptr = (unsigned char *)val;
  130. while (num_in_bytes) {
  131. memcpy(&outval, ptr, 4);
  132. SRAM_WRITE(handle, addr, outval);
  133. num_in_bytes -= 4;
  134. ptr += 4;
  135. addr += 4;
  136. }
  137. }
  138. static void qat_uclo_wr_umem_by_words(struct icp_qat_fw_loader_handle *handle,
  139. unsigned char ae, unsigned int addr,
  140. unsigned int *val,
  141. unsigned int num_in_bytes)
  142. {
  143. unsigned int outval;
  144. unsigned char *ptr = (unsigned char *)val;
  145. addr >>= 0x2; /* convert to uword address */
  146. while (num_in_bytes) {
  147. memcpy(&outval, ptr, 4);
  148. qat_hal_wr_umem(handle, ae, addr++, 1, &outval);
  149. num_in_bytes -= 4;
  150. ptr += 4;
  151. }
  152. }
  153. static void qat_uclo_batch_wr_umem(struct icp_qat_fw_loader_handle *handle,
  154. unsigned char ae,
  155. struct icp_qat_uof_batch_init
  156. *umem_init_header)
  157. {
  158. struct icp_qat_uof_batch_init *umem_init;
  159. if (!umem_init_header)
  160. return;
  161. umem_init = umem_init_header->next;
  162. while (umem_init) {
  163. unsigned int addr, *value, size;
  164. ae = umem_init->ae;
  165. addr = umem_init->addr;
  166. value = umem_init->value;
  167. size = umem_init->size;
  168. qat_uclo_wr_umem_by_words(handle, ae, addr, value, size);
  169. umem_init = umem_init->next;
  170. }
  171. }
  172. static void
  173. qat_uclo_cleanup_batch_init_list(struct icp_qat_fw_loader_handle *handle,
  174. struct icp_qat_uof_batch_init **base)
  175. {
  176. struct icp_qat_uof_batch_init *umem_init;
  177. umem_init = *base;
  178. while (umem_init) {
  179. struct icp_qat_uof_batch_init *pre;
  180. pre = umem_init;
  181. umem_init = umem_init->next;
  182. kfree(pre);
  183. }
  184. *base = NULL;
  185. }
  186. static int qat_uclo_parse_num(char *str, unsigned int *num)
  187. {
  188. char buf[16] = {0};
  189. unsigned long ae = 0;
  190. int i;
  191. strncpy(buf, str, 15);
  192. for (i = 0; i < 16; i++) {
  193. if (!isdigit(buf[i])) {
  194. buf[i] = '\0';
  195. break;
  196. }
  197. }
  198. if ((kstrtoul(buf, 10, &ae)))
  199. return -EFAULT;
  200. *num = (unsigned int)ae;
  201. return 0;
  202. }
  203. static int qat_uclo_fetch_initmem_ae(struct icp_qat_fw_loader_handle *handle,
  204. struct icp_qat_uof_initmem *init_mem,
  205. unsigned int size_range, unsigned int *ae)
  206. {
  207. struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
  208. char *str;
  209. if ((init_mem->addr + init_mem->num_in_bytes) > (size_range << 0x2)) {
  210. pr_err("QAT: initmem is out of range");
  211. return -EINVAL;
  212. }
  213. if (init_mem->scope != ICP_QAT_UOF_LOCAL_SCOPE) {
  214. pr_err("QAT: Memory scope for init_mem error\n");
  215. return -EINVAL;
  216. }
  217. str = qat_uclo_get_string(&obj_handle->str_table, init_mem->sym_name);
  218. if (!str) {
  219. pr_err("QAT: AE name assigned in UOF init table is NULL\n");
  220. return -EINVAL;
  221. }
  222. if (qat_uclo_parse_num(str, ae)) {
  223. pr_err("QAT: Parse num for AE number failed\n");
  224. return -EINVAL;
  225. }
  226. if (*ae >= ICP_QAT_UCLO_MAX_AE) {
  227. pr_err("QAT: ae %d out of range\n", *ae);
  228. return -EINVAL;
  229. }
  230. return 0;
  231. }
  232. static int qat_uclo_create_batch_init_list(struct icp_qat_fw_loader_handle
  233. *handle, struct icp_qat_uof_initmem
  234. *init_mem, unsigned int ae,
  235. struct icp_qat_uof_batch_init
  236. **init_tab_base)
  237. {
  238. struct icp_qat_uof_batch_init *init_header, *tail;
  239. struct icp_qat_uof_batch_init *mem_init, *tail_old;
  240. struct icp_qat_uof_memvar_attr *mem_val_attr;
  241. unsigned int i, flag = 0;
  242. mem_val_attr =
  243. (struct icp_qat_uof_memvar_attr *)((unsigned long)init_mem +
  244. sizeof(struct icp_qat_uof_initmem));
  245. init_header = *init_tab_base;
  246. if (!init_header) {
  247. init_header = kzalloc(sizeof(*init_header), GFP_KERNEL);
  248. if (!init_header)
  249. return -ENOMEM;
  250. init_header->size = 1;
  251. *init_tab_base = init_header;
  252. flag = 1;
  253. }
  254. tail_old = init_header;
  255. while (tail_old->next)
  256. tail_old = tail_old->next;
  257. tail = tail_old;
  258. for (i = 0; i < init_mem->val_attr_num; i++) {
  259. mem_init = kzalloc(sizeof(*mem_init), GFP_KERNEL);
  260. if (!mem_init)
  261. goto out_err;
  262. mem_init->ae = ae;
  263. mem_init->addr = init_mem->addr + mem_val_attr->offset_in_byte;
  264. mem_init->value = &mem_val_attr->value;
  265. mem_init->size = 4;
  266. mem_init->next = NULL;
  267. tail->next = mem_init;
  268. tail = mem_init;
  269. init_header->size += qat_hal_get_ins_num();
  270. mem_val_attr++;
  271. }
  272. return 0;
  273. out_err:
  274. while (tail_old) {
  275. mem_init = tail_old->next;
  276. kfree(tail_old);
  277. tail_old = mem_init;
  278. }
  279. if (flag)
  280. kfree(*init_tab_base);
  281. return -ENOMEM;
  282. }
  283. static int qat_uclo_init_lmem_seg(struct icp_qat_fw_loader_handle *handle,
  284. struct icp_qat_uof_initmem *init_mem)
  285. {
  286. struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
  287. unsigned int ae;
  288. if (qat_uclo_fetch_initmem_ae(handle, init_mem,
  289. ICP_QAT_UCLO_MAX_LMEM_REG, &ae))
  290. return -EINVAL;
  291. if (qat_uclo_create_batch_init_list(handle, init_mem, ae,
  292. &obj_handle->lm_init_tab[ae]))
  293. return -EINVAL;
  294. return 0;
  295. }
  296. static int qat_uclo_init_umem_seg(struct icp_qat_fw_loader_handle *handle,
  297. struct icp_qat_uof_initmem *init_mem)
  298. {
  299. struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
  300. unsigned int ae, ustore_size, uaddr, i;
  301. ustore_size = obj_handle->ustore_phy_size;
  302. if (qat_uclo_fetch_initmem_ae(handle, init_mem, ustore_size, &ae))
  303. return -EINVAL;
  304. if (qat_uclo_create_batch_init_list(handle, init_mem, ae,
  305. &obj_handle->umem_init_tab[ae]))
  306. return -EINVAL;
  307. /* set the highest ustore address referenced */
  308. uaddr = (init_mem->addr + init_mem->num_in_bytes) >> 0x2;
  309. for (i = 0; i < obj_handle->ae_data[ae].slice_num; i++) {
  310. if (obj_handle->ae_data[ae].ae_slices[i].
  311. encap_image->uwords_num < uaddr)
  312. obj_handle->ae_data[ae].ae_slices[i].
  313. encap_image->uwords_num = uaddr;
  314. }
  315. return 0;
  316. }
  317. #define ICP_DH895XCC_PESRAM_BAR_SIZE 0x80000
  318. static int qat_uclo_init_ae_memory(struct icp_qat_fw_loader_handle *handle,
  319. struct icp_qat_uof_initmem *init_mem)
  320. {
  321. switch (init_mem->region) {
  322. case ICP_QAT_UOF_LMEM_REGION:
  323. if (qat_uclo_init_lmem_seg(handle, init_mem))
  324. return -EINVAL;
  325. break;
  326. case ICP_QAT_UOF_UMEM_REGION:
  327. if (qat_uclo_init_umem_seg(handle, init_mem))
  328. return -EINVAL;
  329. break;
  330. default:
  331. pr_err("QAT: initmem region error. region type=0x%x\n",
  332. init_mem->region);
  333. return -EINVAL;
  334. }
  335. return 0;
  336. }
  337. static int qat_uclo_init_ustore(struct icp_qat_fw_loader_handle *handle,
  338. struct icp_qat_uclo_encapme *image)
  339. {
  340. unsigned int i;
  341. struct icp_qat_uclo_encap_page *page;
  342. struct icp_qat_uof_image *uof_image;
  343. unsigned char ae;
  344. unsigned int ustore_size;
  345. unsigned int patt_pos;
  346. struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
  347. uint64_t *fill_data;
  348. uof_image = image->img_ptr;
  349. fill_data = kcalloc(ICP_QAT_UCLO_MAX_USTORE, sizeof(uint64_t),
  350. GFP_KERNEL);
  351. if (!fill_data)
  352. return -ENOMEM;
  353. for (i = 0; i < ICP_QAT_UCLO_MAX_USTORE; i++)
  354. memcpy(&fill_data[i], &uof_image->fill_pattern,
  355. sizeof(uint64_t));
  356. page = image->page;
  357. for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
  358. if (!test_bit(ae, (unsigned long *)&uof_image->ae_assigned))
  359. continue;
  360. ustore_size = obj_handle->ae_data[ae].eff_ustore_size;
  361. patt_pos = page->beg_addr_p + page->micro_words_num;
  362. qat_hal_wr_uwords(handle, (unsigned char)ae, 0,
  363. page->beg_addr_p, &fill_data[0]);
  364. qat_hal_wr_uwords(handle, (unsigned char)ae, patt_pos,
  365. ustore_size - patt_pos + 1,
  366. &fill_data[page->beg_addr_p]);
  367. }
  368. kfree(fill_data);
  369. return 0;
  370. }
  371. static int qat_uclo_init_memory(struct icp_qat_fw_loader_handle *handle)
  372. {
  373. int i, ae;
  374. struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
  375. struct icp_qat_uof_initmem *initmem = obj_handle->init_mem_tab.init_mem;
  376. for (i = 0; i < obj_handle->init_mem_tab.entry_num; i++) {
  377. if (initmem->num_in_bytes) {
  378. if (qat_uclo_init_ae_memory(handle, initmem))
  379. return -EINVAL;
  380. }
  381. initmem = (struct icp_qat_uof_initmem *)((unsigned long)(
  382. (unsigned long)initmem +
  383. sizeof(struct icp_qat_uof_initmem)) +
  384. (sizeof(struct icp_qat_uof_memvar_attr) *
  385. initmem->val_attr_num));
  386. }
  387. for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
  388. if (qat_hal_batch_wr_lm(handle, ae,
  389. obj_handle->lm_init_tab[ae])) {
  390. pr_err("QAT: fail to batch init lmem for AE %d\n", ae);
  391. return -EINVAL;
  392. }
  393. qat_uclo_cleanup_batch_init_list(handle,
  394. &obj_handle->lm_init_tab[ae]);
  395. qat_uclo_batch_wr_umem(handle, ae,
  396. obj_handle->umem_init_tab[ae]);
  397. qat_uclo_cleanup_batch_init_list(handle,
  398. &obj_handle->
  399. umem_init_tab[ae]);
  400. }
  401. return 0;
  402. }
  403. static void *qat_uclo_find_chunk(struct icp_qat_uof_objhdr *obj_hdr,
  404. char *chunk_id, void *cur)
  405. {
  406. int i;
  407. struct icp_qat_uof_chunkhdr *chunk_hdr =
  408. (struct icp_qat_uof_chunkhdr *)
  409. ((unsigned long)obj_hdr + sizeof(struct icp_qat_uof_objhdr));
  410. for (i = 0; i < obj_hdr->num_chunks; i++) {
  411. if ((cur < (void *)&chunk_hdr[i]) &&
  412. !strncmp(chunk_hdr[i].chunk_id, chunk_id,
  413. ICP_QAT_UOF_OBJID_LEN)) {
  414. return &chunk_hdr[i];
  415. }
  416. }
  417. return NULL;
  418. }
  419. static unsigned int qat_uclo_calc_checksum(unsigned int reg, int ch)
  420. {
  421. int i;
  422. unsigned int topbit = 1 << 0xF;
  423. unsigned int inbyte = (unsigned int)((reg >> 0x18) ^ ch);
  424. reg ^= inbyte << 0x8;
  425. for (i = 0; i < 0x8; i++) {
  426. if (reg & topbit)
  427. reg = (reg << 1) ^ 0x1021;
  428. else
  429. reg <<= 1;
  430. }
  431. return reg & 0xFFFF;
  432. }
  433. static unsigned int qat_uclo_calc_str_checksum(char *ptr, int num)
  434. {
  435. unsigned int chksum = 0;
  436. if (ptr)
  437. while (num--)
  438. chksum = qat_uclo_calc_checksum(chksum, *ptr++);
  439. return chksum;
  440. }
  441. static struct icp_qat_uclo_objhdr *
  442. qat_uclo_map_chunk(char *buf, struct icp_qat_uof_filehdr *file_hdr,
  443. char *chunk_id)
  444. {
  445. struct icp_qat_uof_filechunkhdr *file_chunk;
  446. struct icp_qat_uclo_objhdr *obj_hdr;
  447. char *chunk;
  448. int i;
  449. file_chunk = (struct icp_qat_uof_filechunkhdr *)
  450. (buf + sizeof(struct icp_qat_uof_filehdr));
  451. for (i = 0; i < file_hdr->num_chunks; i++) {
  452. if (!strncmp(file_chunk->chunk_id, chunk_id,
  453. ICP_QAT_UOF_OBJID_LEN)) {
  454. chunk = buf + file_chunk->offset;
  455. if (file_chunk->checksum != qat_uclo_calc_str_checksum(
  456. chunk, file_chunk->size))
  457. break;
  458. obj_hdr = kzalloc(sizeof(*obj_hdr), GFP_KERNEL);
  459. if (!obj_hdr)
  460. break;
  461. obj_hdr->file_buff = chunk;
  462. obj_hdr->checksum = file_chunk->checksum;
  463. obj_hdr->size = file_chunk->size;
  464. return obj_hdr;
  465. }
  466. file_chunk++;
  467. }
  468. return NULL;
  469. }
  470. static unsigned int
  471. qat_uclo_check_image_compat(struct icp_qat_uof_encap_obj *encap_uof_obj,
  472. struct icp_qat_uof_image *image)
  473. {
  474. struct icp_qat_uof_objtable *uc_var_tab, *imp_var_tab, *imp_expr_tab;
  475. struct icp_qat_uof_objtable *neigh_reg_tab;
  476. struct icp_qat_uof_code_page *code_page;
  477. code_page = (struct icp_qat_uof_code_page *)
  478. ((char *)image + sizeof(struct icp_qat_uof_image));
  479. uc_var_tab = (struct icp_qat_uof_objtable *)(encap_uof_obj->beg_uof +
  480. code_page->uc_var_tab_offset);
  481. imp_var_tab = (struct icp_qat_uof_objtable *)(encap_uof_obj->beg_uof +
  482. code_page->imp_var_tab_offset);
  483. imp_expr_tab = (struct icp_qat_uof_objtable *)
  484. (encap_uof_obj->beg_uof +
  485. code_page->imp_expr_tab_offset);
  486. if (uc_var_tab->entry_num || imp_var_tab->entry_num ||
  487. imp_expr_tab->entry_num) {
  488. pr_err("QAT: UOF can't contain imported variable to be parsed");
  489. return -EINVAL;
  490. }
  491. neigh_reg_tab = (struct icp_qat_uof_objtable *)
  492. (encap_uof_obj->beg_uof +
  493. code_page->neigh_reg_tab_offset);
  494. if (neigh_reg_tab->entry_num) {
  495. pr_err("QAT: UOF can't contain shared control store feature");
  496. return -EINVAL;
  497. }
  498. if (image->numpages > 1) {
  499. pr_err("QAT: UOF can't contain multiple pages");
  500. return -EINVAL;
  501. }
  502. if (ICP_QAT_SHARED_USTORE_MODE(image->ae_mode)) {
  503. pr_err("QAT: UOF can't use shared control store feature");
  504. return -EFAULT;
  505. }
  506. if (RELOADABLE_CTX_SHARED_MODE(image->ae_mode)) {
  507. pr_err("QAT: UOF can't use reloadable feature");
  508. return -EFAULT;
  509. }
  510. return 0;
  511. }
  512. static void qat_uclo_map_image_page(struct icp_qat_uof_encap_obj
  513. *encap_uof_obj,
  514. struct icp_qat_uof_image *img,
  515. struct icp_qat_uclo_encap_page *page)
  516. {
  517. struct icp_qat_uof_code_page *code_page;
  518. struct icp_qat_uof_code_area *code_area;
  519. struct icp_qat_uof_objtable *uword_block_tab;
  520. struct icp_qat_uof_uword_block *uwblock;
  521. int i;
  522. code_page = (struct icp_qat_uof_code_page *)
  523. ((char *)img + sizeof(struct icp_qat_uof_image));
  524. page->def_page = code_page->def_page;
  525. page->page_region = code_page->page_region;
  526. page->beg_addr_v = code_page->beg_addr_v;
  527. page->beg_addr_p = code_page->beg_addr_p;
  528. code_area = (struct icp_qat_uof_code_area *)(encap_uof_obj->beg_uof +
  529. code_page->code_area_offset);
  530. page->micro_words_num = code_area->micro_words_num;
  531. uword_block_tab = (struct icp_qat_uof_objtable *)
  532. (encap_uof_obj->beg_uof +
  533. code_area->uword_block_tab);
  534. page->uwblock_num = uword_block_tab->entry_num;
  535. uwblock = (struct icp_qat_uof_uword_block *)((char *)uword_block_tab +
  536. sizeof(struct icp_qat_uof_objtable));
  537. page->uwblock = (struct icp_qat_uclo_encap_uwblock *)uwblock;
  538. for (i = 0; i < uword_block_tab->entry_num; i++)
  539. page->uwblock[i].micro_words =
  540. (unsigned long)encap_uof_obj->beg_uof + uwblock[i].uword_offset;
  541. }
  542. static int qat_uclo_map_uimage(struct icp_qat_uclo_objhandle *obj_handle,
  543. struct icp_qat_uclo_encapme *ae_uimage,
  544. int max_image)
  545. {
  546. int i, j;
  547. struct icp_qat_uof_chunkhdr *chunk_hdr = NULL;
  548. struct icp_qat_uof_image *image;
  549. struct icp_qat_uof_objtable *ae_regtab;
  550. struct icp_qat_uof_objtable *init_reg_sym_tab;
  551. struct icp_qat_uof_objtable *sbreak_tab;
  552. struct icp_qat_uof_encap_obj *encap_uof_obj =
  553. &obj_handle->encap_uof_obj;
  554. for (j = 0; j < max_image; j++) {
  555. chunk_hdr = qat_uclo_find_chunk(encap_uof_obj->obj_hdr,
  556. ICP_QAT_UOF_IMAG, chunk_hdr);
  557. if (!chunk_hdr)
  558. break;
  559. image = (struct icp_qat_uof_image *)(encap_uof_obj->beg_uof +
  560. chunk_hdr->offset);
  561. ae_regtab = (struct icp_qat_uof_objtable *)
  562. (image->reg_tab_offset +
  563. obj_handle->obj_hdr->file_buff);
  564. ae_uimage[j].ae_reg_num = ae_regtab->entry_num;
  565. ae_uimage[j].ae_reg = (struct icp_qat_uof_ae_reg *)
  566. (((char *)ae_regtab) +
  567. sizeof(struct icp_qat_uof_objtable));
  568. init_reg_sym_tab = (struct icp_qat_uof_objtable *)
  569. (image->init_reg_sym_tab +
  570. obj_handle->obj_hdr->file_buff);
  571. ae_uimage[j].init_regsym_num = init_reg_sym_tab->entry_num;
  572. ae_uimage[j].init_regsym = (struct icp_qat_uof_init_regsym *)
  573. (((char *)init_reg_sym_tab) +
  574. sizeof(struct icp_qat_uof_objtable));
  575. sbreak_tab = (struct icp_qat_uof_objtable *)
  576. (image->sbreak_tab + obj_handle->obj_hdr->file_buff);
  577. ae_uimage[j].sbreak_num = sbreak_tab->entry_num;
  578. ae_uimage[j].sbreak = (struct icp_qat_uof_sbreak *)
  579. (((char *)sbreak_tab) +
  580. sizeof(struct icp_qat_uof_objtable));
  581. ae_uimage[j].img_ptr = image;
  582. if (qat_uclo_check_image_compat(encap_uof_obj, image))
  583. goto out_err;
  584. ae_uimage[j].page =
  585. kzalloc(sizeof(struct icp_qat_uclo_encap_page),
  586. GFP_KERNEL);
  587. if (!ae_uimage[j].page)
  588. goto out_err;
  589. qat_uclo_map_image_page(encap_uof_obj, image,
  590. ae_uimage[j].page);
  591. }
  592. return j;
  593. out_err:
  594. for (i = 0; i < j; i++)
  595. kfree(ae_uimage[i].page);
  596. return 0;
  597. }
  598. static int qat_uclo_map_ae(struct icp_qat_fw_loader_handle *handle, int max_ae)
  599. {
  600. int i, ae;
  601. int mflag = 0;
  602. struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
  603. for (ae = 0; ae <= max_ae; ae++) {
  604. if (!test_bit(ae,
  605. (unsigned long *)&handle->hal_handle->ae_mask))
  606. continue;
  607. for (i = 0; i < obj_handle->uimage_num; i++) {
  608. if (!test_bit(ae, (unsigned long *)
  609. &obj_handle->ae_uimage[i].img_ptr->ae_assigned))
  610. continue;
  611. mflag = 1;
  612. if (qat_uclo_init_ae_data(obj_handle, ae, i))
  613. return -EINVAL;
  614. }
  615. }
  616. if (!mflag) {
  617. pr_err("QAT: uimage uses AE not set");
  618. return -EINVAL;
  619. }
  620. return 0;
  621. }
  622. static struct icp_qat_uof_strtable *
  623. qat_uclo_map_str_table(struct icp_qat_uclo_objhdr *obj_hdr,
  624. char *tab_name, struct icp_qat_uof_strtable *str_table)
  625. {
  626. struct icp_qat_uof_chunkhdr *chunk_hdr;
  627. chunk_hdr = qat_uclo_find_chunk((struct icp_qat_uof_objhdr *)
  628. obj_hdr->file_buff, tab_name, NULL);
  629. if (chunk_hdr) {
  630. int hdr_size;
  631. memcpy(&str_table->table_len, obj_hdr->file_buff +
  632. chunk_hdr->offset, sizeof(str_table->table_len));
  633. hdr_size = (char *)&str_table->strings - (char *)str_table;
  634. str_table->strings = (unsigned long)obj_hdr->file_buff +
  635. chunk_hdr->offset + hdr_size;
  636. return str_table;
  637. }
  638. return NULL;
  639. }
  640. static void
  641. qat_uclo_map_initmem_table(struct icp_qat_uof_encap_obj *encap_uof_obj,
  642. struct icp_qat_uclo_init_mem_table *init_mem_tab)
  643. {
  644. struct icp_qat_uof_chunkhdr *chunk_hdr;
  645. chunk_hdr = qat_uclo_find_chunk(encap_uof_obj->obj_hdr,
  646. ICP_QAT_UOF_IMEM, NULL);
  647. if (chunk_hdr) {
  648. memmove(&init_mem_tab->entry_num, encap_uof_obj->beg_uof +
  649. chunk_hdr->offset, sizeof(unsigned int));
  650. init_mem_tab->init_mem = (struct icp_qat_uof_initmem *)
  651. (encap_uof_obj->beg_uof + chunk_hdr->offset +
  652. sizeof(unsigned int));
  653. }
  654. }
  655. static int qat_uclo_check_uof_compat(struct icp_qat_uclo_objhandle *obj_handle)
  656. {
  657. unsigned int maj_ver, prod_type = obj_handle->prod_type;
  658. if (!(prod_type & obj_handle->encap_uof_obj.obj_hdr->cpu_type)) {
  659. pr_err("QAT: UOF type 0x%x not match with cur platform 0x%x\n",
  660. obj_handle->encap_uof_obj.obj_hdr->cpu_type, prod_type);
  661. return -EINVAL;
  662. }
  663. maj_ver = obj_handle->prod_rev & 0xff;
  664. if ((obj_handle->encap_uof_obj.obj_hdr->max_cpu_ver < maj_ver) ||
  665. (obj_handle->encap_uof_obj.obj_hdr->min_cpu_ver > maj_ver)) {
  666. pr_err("QAT: UOF majVer 0x%x out of range\n", maj_ver);
  667. return -EINVAL;
  668. }
  669. return 0;
  670. }
  671. static int qat_uclo_init_reg(struct icp_qat_fw_loader_handle *handle,
  672. unsigned char ae, unsigned char ctx_mask,
  673. enum icp_qat_uof_regtype reg_type,
  674. unsigned short reg_addr, unsigned int value)
  675. {
  676. switch (reg_type) {
  677. case ICP_GPA_ABS:
  678. case ICP_GPB_ABS:
  679. ctx_mask = 0;
  680. case ICP_GPA_REL:
  681. case ICP_GPB_REL:
  682. return qat_hal_init_gpr(handle, ae, ctx_mask, reg_type,
  683. reg_addr, value);
  684. case ICP_SR_ABS:
  685. case ICP_DR_ABS:
  686. case ICP_SR_RD_ABS:
  687. case ICP_DR_RD_ABS:
  688. ctx_mask = 0;
  689. case ICP_SR_REL:
  690. case ICP_DR_REL:
  691. case ICP_SR_RD_REL:
  692. case ICP_DR_RD_REL:
  693. return qat_hal_init_rd_xfer(handle, ae, ctx_mask, reg_type,
  694. reg_addr, value);
  695. case ICP_SR_WR_ABS:
  696. case ICP_DR_WR_ABS:
  697. ctx_mask = 0;
  698. case ICP_SR_WR_REL:
  699. case ICP_DR_WR_REL:
  700. return qat_hal_init_wr_xfer(handle, ae, ctx_mask, reg_type,
  701. reg_addr, value);
  702. case ICP_NEIGH_REL:
  703. return qat_hal_init_nn(handle, ae, ctx_mask, reg_addr, value);
  704. default:
  705. pr_err("QAT: UOF uses not supported reg type 0x%x\n", reg_type);
  706. return -EFAULT;
  707. }
  708. return 0;
  709. }
  710. static int qat_uclo_init_reg_sym(struct icp_qat_fw_loader_handle *handle,
  711. unsigned int ae,
  712. struct icp_qat_uclo_encapme *encap_ae)
  713. {
  714. unsigned int i;
  715. unsigned char ctx_mask;
  716. struct icp_qat_uof_init_regsym *init_regsym;
  717. if (ICP_QAT_CTX_MODE(encap_ae->img_ptr->ae_mode) ==
  718. ICP_QAT_UCLO_MAX_CTX)
  719. ctx_mask = 0xff;
  720. else
  721. ctx_mask = 0x55;
  722. for (i = 0; i < encap_ae->init_regsym_num; i++) {
  723. unsigned int exp_res;
  724. init_regsym = &encap_ae->init_regsym[i];
  725. exp_res = init_regsym->value;
  726. switch (init_regsym->init_type) {
  727. case ICP_QAT_UOF_INIT_REG:
  728. qat_uclo_init_reg(handle, ae, ctx_mask,
  729. (enum icp_qat_uof_regtype)
  730. init_regsym->reg_type,
  731. (unsigned short)init_regsym->reg_addr,
  732. exp_res);
  733. break;
  734. case ICP_QAT_UOF_INIT_REG_CTX:
  735. /* check if ctx is appropriate for the ctxMode */
  736. if (!((1 << init_regsym->ctx) & ctx_mask)) {
  737. pr_err("QAT: invalid ctx num = 0x%x\n",
  738. init_regsym->ctx);
  739. return -EINVAL;
  740. }
  741. qat_uclo_init_reg(handle, ae,
  742. (unsigned char)
  743. (1 << init_regsym->ctx),
  744. (enum icp_qat_uof_regtype)
  745. init_regsym->reg_type,
  746. (unsigned short)init_regsym->reg_addr,
  747. exp_res);
  748. break;
  749. case ICP_QAT_UOF_INIT_EXPR:
  750. pr_err("QAT: INIT_EXPR feature not supported\n");
  751. return -EINVAL;
  752. case ICP_QAT_UOF_INIT_EXPR_ENDIAN_SWAP:
  753. pr_err("QAT: INIT_EXPR_ENDIAN_SWAP feature not supported\n");
  754. return -EINVAL;
  755. default:
  756. break;
  757. }
  758. }
  759. return 0;
  760. }
  761. static int qat_uclo_init_globals(struct icp_qat_fw_loader_handle *handle)
  762. {
  763. struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
  764. unsigned int s, ae;
  765. if (obj_handle->global_inited)
  766. return 0;
  767. if (obj_handle->init_mem_tab.entry_num) {
  768. if (qat_uclo_init_memory(handle)) {
  769. pr_err("QAT: initialize memory failed\n");
  770. return -EINVAL;
  771. }
  772. }
  773. for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
  774. for (s = 0; s < obj_handle->ae_data[ae].slice_num; s++) {
  775. if (!obj_handle->ae_data[ae].ae_slices[s].encap_image)
  776. continue;
  777. if (qat_uclo_init_reg_sym(handle, ae,
  778. obj_handle->ae_data[ae].
  779. ae_slices[s].encap_image))
  780. return -EINVAL;
  781. }
  782. }
  783. obj_handle->global_inited = 1;
  784. return 0;
  785. }
  786. static int qat_uclo_set_ae_mode(struct icp_qat_fw_loader_handle *handle)
  787. {
  788. unsigned char ae, nn_mode, s;
  789. struct icp_qat_uof_image *uof_image;
  790. struct icp_qat_uclo_aedata *ae_data;
  791. struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
  792. for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
  793. if (!test_bit(ae,
  794. (unsigned long *)&handle->hal_handle->ae_mask))
  795. continue;
  796. ae_data = &obj_handle->ae_data[ae];
  797. for (s = 0; s < min_t(unsigned int, ae_data->slice_num,
  798. ICP_QAT_UCLO_MAX_CTX); s++) {
  799. if (!obj_handle->ae_data[ae].ae_slices[s].encap_image)
  800. continue;
  801. uof_image = ae_data->ae_slices[s].encap_image->img_ptr;
  802. if (qat_hal_set_ae_ctx_mode(handle, ae,
  803. (char)ICP_QAT_CTX_MODE
  804. (uof_image->ae_mode))) {
  805. pr_err("QAT: qat_hal_set_ae_ctx_mode error\n");
  806. return -EFAULT;
  807. }
  808. nn_mode = ICP_QAT_NN_MODE(uof_image->ae_mode);
  809. if (qat_hal_set_ae_nn_mode(handle, ae, nn_mode)) {
  810. pr_err("QAT: qat_hal_set_ae_nn_mode error\n");
  811. return -EFAULT;
  812. }
  813. if (qat_hal_set_ae_lm_mode(handle, ae, ICP_LMEM0,
  814. (char)ICP_QAT_LOC_MEM0_MODE
  815. (uof_image->ae_mode))) {
  816. pr_err("QAT: qat_hal_set_ae_lm_mode LMEM0 error\n");
  817. return -EFAULT;
  818. }
  819. if (qat_hal_set_ae_lm_mode(handle, ae, ICP_LMEM1,
  820. (char)ICP_QAT_LOC_MEM1_MODE
  821. (uof_image->ae_mode))) {
  822. pr_err("QAT: qat_hal_set_ae_lm_mode LMEM1 error\n");
  823. return -EFAULT;
  824. }
  825. }
  826. }
  827. return 0;
  828. }
  829. static void qat_uclo_init_uword_num(struct icp_qat_fw_loader_handle *handle)
  830. {
  831. struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
  832. struct icp_qat_uclo_encapme *image;
  833. int a;
  834. for (a = 0; a < obj_handle->uimage_num; a++) {
  835. image = &obj_handle->ae_uimage[a];
  836. image->uwords_num = image->page->beg_addr_p +
  837. image->page->micro_words_num;
  838. }
  839. }
  840. static int qat_uclo_parse_uof_obj(struct icp_qat_fw_loader_handle *handle)
  841. {
  842. struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
  843. unsigned int ae;
  844. obj_handle->uword_buf = kcalloc(UWORD_CPYBUF_SIZE, sizeof(uint64_t),
  845. GFP_KERNEL);
  846. if (!obj_handle->uword_buf)
  847. return -ENOMEM;
  848. obj_handle->encap_uof_obj.beg_uof = obj_handle->obj_hdr->file_buff;
  849. obj_handle->encap_uof_obj.obj_hdr = (struct icp_qat_uof_objhdr *)
  850. obj_handle->obj_hdr->file_buff;
  851. obj_handle->uword_in_bytes = 6;
  852. obj_handle->prod_type = ICP_QAT_AC_C_CPU_TYPE;
  853. obj_handle->prod_rev = PID_MAJOR_REV |
  854. (PID_MINOR_REV & handle->hal_handle->revision_id);
  855. if (qat_uclo_check_uof_compat(obj_handle)) {
  856. pr_err("QAT: UOF incompatible\n");
  857. return -EINVAL;
  858. }
  859. obj_handle->ustore_phy_size = ICP_QAT_UCLO_MAX_USTORE;
  860. if (!obj_handle->obj_hdr->file_buff ||
  861. !qat_uclo_map_str_table(obj_handle->obj_hdr, ICP_QAT_UOF_STRT,
  862. &obj_handle->str_table)) {
  863. pr_err("QAT: UOF doesn't have effective images\n");
  864. goto out_err;
  865. }
  866. obj_handle->uimage_num =
  867. qat_uclo_map_uimage(obj_handle, obj_handle->ae_uimage,
  868. ICP_QAT_UCLO_MAX_AE * ICP_QAT_UCLO_MAX_CTX);
  869. if (!obj_handle->uimage_num)
  870. goto out_err;
  871. if (qat_uclo_map_ae(handle, handle->hal_handle->ae_max_num)) {
  872. pr_err("QAT: Bad object\n");
  873. goto out_check_uof_aemask_err;
  874. }
  875. qat_uclo_init_uword_num(handle);
  876. qat_uclo_map_initmem_table(&obj_handle->encap_uof_obj,
  877. &obj_handle->init_mem_tab);
  878. if (qat_uclo_set_ae_mode(handle))
  879. goto out_check_uof_aemask_err;
  880. return 0;
  881. out_check_uof_aemask_err:
  882. for (ae = 0; ae < obj_handle->uimage_num; ae++)
  883. kfree(obj_handle->ae_uimage[ae].page);
  884. out_err:
  885. kfree(obj_handle->uword_buf);
  886. return -EFAULT;
  887. }
  888. void qat_uclo_wr_mimage(struct icp_qat_fw_loader_handle *handle,
  889. void *addr_ptr, int mem_size)
  890. {
  891. qat_uclo_wr_sram_by_words(handle, 0, addr_ptr, ALIGN(mem_size, 4));
  892. }
  893. int qat_uclo_map_uof_obj(struct icp_qat_fw_loader_handle *handle,
  894. void *addr_ptr, int mem_size)
  895. {
  896. struct icp_qat_uof_filehdr *filehdr;
  897. struct icp_qat_uclo_objhandle *objhdl;
  898. BUILD_BUG_ON(ICP_QAT_UCLO_MAX_AE >=
  899. (sizeof(handle->hal_handle->ae_mask) * 8));
  900. if (!handle || !addr_ptr || mem_size < 24)
  901. return -EINVAL;
  902. objhdl = kzalloc(sizeof(*objhdl), GFP_KERNEL);
  903. if (!objhdl)
  904. return -ENOMEM;
  905. objhdl->obj_buf = kmemdup(addr_ptr, mem_size, GFP_KERNEL);
  906. if (!objhdl->obj_buf)
  907. goto out_objbuf_err;
  908. filehdr = (struct icp_qat_uof_filehdr *)objhdl->obj_buf;
  909. if (qat_uclo_check_format(filehdr))
  910. goto out_objhdr_err;
  911. objhdl->obj_hdr = qat_uclo_map_chunk((char *)objhdl->obj_buf, filehdr,
  912. ICP_QAT_UOF_OBJS);
  913. if (!objhdl->obj_hdr) {
  914. pr_err("QAT: object file chunk is null\n");
  915. goto out_objhdr_err;
  916. }
  917. handle->obj_handle = objhdl;
  918. if (qat_uclo_parse_uof_obj(handle))
  919. goto out_overlay_obj_err;
  920. return 0;
  921. out_overlay_obj_err:
  922. handle->obj_handle = NULL;
  923. kfree(objhdl->obj_hdr);
  924. out_objhdr_err:
  925. kfree(objhdl->obj_buf);
  926. out_objbuf_err:
  927. kfree(objhdl);
  928. return -ENOMEM;
  929. }
  930. void qat_uclo_del_uof_obj(struct icp_qat_fw_loader_handle *handle)
  931. {
  932. struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
  933. unsigned int a;
  934. if (!obj_handle)
  935. return;
  936. kfree(obj_handle->uword_buf);
  937. for (a = 0; a < obj_handle->uimage_num; a++)
  938. kfree(obj_handle->ae_uimage[a].page);
  939. for (a = 0; a < handle->hal_handle->ae_max_num; a++)
  940. qat_uclo_free_ae_data(&obj_handle->ae_data[a]);
  941. kfree(obj_handle->obj_hdr);
  942. kfree(obj_handle->obj_buf);
  943. kfree(obj_handle);
  944. handle->obj_handle = NULL;
  945. }
  946. static void qat_uclo_fill_uwords(struct icp_qat_uclo_objhandle *obj_handle,
  947. struct icp_qat_uclo_encap_page *encap_page,
  948. uint64_t *uword, unsigned int addr_p,
  949. unsigned int raddr, uint64_t fill)
  950. {
  951. uint64_t uwrd = 0;
  952. unsigned int i;
  953. if (!encap_page) {
  954. *uword = fill;
  955. return;
  956. }
  957. for (i = 0; i < encap_page->uwblock_num; i++) {
  958. if (raddr >= encap_page->uwblock[i].start_addr &&
  959. raddr <= encap_page->uwblock[i].start_addr +
  960. encap_page->uwblock[i].words_num - 1) {
  961. raddr -= encap_page->uwblock[i].start_addr;
  962. raddr *= obj_handle->uword_in_bytes;
  963. memcpy(&uwrd, (void *)(((unsigned long)
  964. encap_page->uwblock[i].micro_words) + raddr),
  965. obj_handle->uword_in_bytes);
  966. uwrd = uwrd & 0xbffffffffffull;
  967. }
  968. }
  969. *uword = uwrd;
  970. if (*uword == INVLD_UWORD)
  971. *uword = fill;
  972. }
  973. static void qat_uclo_wr_uimage_raw_page(struct icp_qat_fw_loader_handle *handle,
  974. struct icp_qat_uclo_encap_page
  975. *encap_page, unsigned int ae)
  976. {
  977. unsigned int uw_physical_addr, uw_relative_addr, i, words_num, cpylen;
  978. struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
  979. uint64_t fill_pat;
  980. /* load the page starting at appropriate ustore address */
  981. /* get fill-pattern from an image -- they are all the same */
  982. memcpy(&fill_pat, obj_handle->ae_uimage[0].img_ptr->fill_pattern,
  983. sizeof(uint64_t));
  984. uw_physical_addr = encap_page->beg_addr_p;
  985. uw_relative_addr = 0;
  986. words_num = encap_page->micro_words_num;
  987. while (words_num) {
  988. if (words_num < UWORD_CPYBUF_SIZE)
  989. cpylen = words_num;
  990. else
  991. cpylen = UWORD_CPYBUF_SIZE;
  992. /* load the buffer */
  993. for (i = 0; i < cpylen; i++)
  994. qat_uclo_fill_uwords(obj_handle, encap_page,
  995. &obj_handle->uword_buf[i],
  996. uw_physical_addr + i,
  997. uw_relative_addr + i, fill_pat);
  998. /* copy the buffer to ustore */
  999. qat_hal_wr_uwords(handle, (unsigned char)ae,
  1000. uw_physical_addr, cpylen,
  1001. obj_handle->uword_buf);
  1002. uw_physical_addr += cpylen;
  1003. uw_relative_addr += cpylen;
  1004. words_num -= cpylen;
  1005. }
  1006. }
  1007. static void qat_uclo_wr_uimage_page(struct icp_qat_fw_loader_handle *handle,
  1008. struct icp_qat_uof_image *image)
  1009. {
  1010. struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
  1011. unsigned int ctx_mask, s;
  1012. struct icp_qat_uclo_page *page;
  1013. unsigned char ae;
  1014. int ctx;
  1015. if (ICP_QAT_CTX_MODE(image->ae_mode) == ICP_QAT_UCLO_MAX_CTX)
  1016. ctx_mask = 0xff;
  1017. else
  1018. ctx_mask = 0x55;
  1019. /* load the default page and set assigned CTX PC
  1020. * to the entrypoint address */
  1021. for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
  1022. if (!test_bit(ae, (unsigned long *)&image->ae_assigned))
  1023. continue;
  1024. /* find the slice to which this image is assigned */
  1025. for (s = 0; s < obj_handle->ae_data[ae].slice_num; s++) {
  1026. if (image->ctx_assigned & obj_handle->ae_data[ae].
  1027. ae_slices[s].ctx_mask_assigned)
  1028. break;
  1029. }
  1030. if (s >= obj_handle->ae_data[ae].slice_num)
  1031. continue;
  1032. page = obj_handle->ae_data[ae].ae_slices[s].page;
  1033. if (!page->encap_page->def_page)
  1034. continue;
  1035. qat_uclo_wr_uimage_raw_page(handle, page->encap_page, ae);
  1036. page = obj_handle->ae_data[ae].ae_slices[s].page;
  1037. for (ctx = 0; ctx < ICP_QAT_UCLO_MAX_CTX; ctx++)
  1038. obj_handle->ae_data[ae].ae_slices[s].cur_page[ctx] =
  1039. (ctx_mask & (1 << ctx)) ? page : NULL;
  1040. qat_hal_set_live_ctx(handle, (unsigned char)ae,
  1041. image->ctx_assigned);
  1042. qat_hal_set_pc(handle, (unsigned char)ae, image->ctx_assigned,
  1043. image->entry_address);
  1044. }
  1045. }
  1046. int qat_uclo_wr_all_uimage(struct icp_qat_fw_loader_handle *handle)
  1047. {
  1048. struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
  1049. unsigned int i;
  1050. if (qat_uclo_init_globals(handle))
  1051. return -EINVAL;
  1052. for (i = 0; i < obj_handle->uimage_num; i++) {
  1053. if (!obj_handle->ae_uimage[i].img_ptr)
  1054. return -EINVAL;
  1055. if (qat_uclo_init_ustore(handle, &obj_handle->ae_uimage[i]))
  1056. return -EINVAL;
  1057. qat_uclo_wr_uimage_page(handle,
  1058. obj_handle->ae_uimage[i].img_ptr);
  1059. }
  1060. return 0;
  1061. }