caamalg.c 128 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487448844894490449144924493449444954496449744984499450045014502450345044505450645074508450945104511451245134514451545164517451845194520452145224523452445254526452745284529453045314532453345344535453645374538453945404541454245434544454545464547454845494550455145524553455445554556455745584559456045614562456345644565456645674568456945704571457245734574457545764577457845794580458145824583458445854586458745884589459045914592459345944595459645974598459946004601460246034604460546064607460846094610461146124613461446154616461746184619462046214622462346244625462646274628462946304631463246334634
  1. /*
  2. * caam - Freescale FSL CAAM support for crypto API
  3. *
  4. * Copyright 2008-2011 Freescale Semiconductor, Inc.
  5. *
  6. * Based on talitos crypto API driver.
  7. *
  8. * relationship of job descriptors to shared descriptors (SteveC Dec 10 2008):
  9. *
  10. * --------------- ---------------
  11. * | JobDesc #1 |-------------------->| ShareDesc |
  12. * | *(packet 1) | | (PDB) |
  13. * --------------- |------------->| (hashKey) |
  14. * . | | (cipherKey) |
  15. * . | |-------->| (operation) |
  16. * --------------- | | ---------------
  17. * | JobDesc #2 |------| |
  18. * | *(packet 2) | |
  19. * --------------- |
  20. * . |
  21. * . |
  22. * --------------- |
  23. * | JobDesc #3 |------------
  24. * | *(packet 3) |
  25. * ---------------
  26. *
  27. * The SharedDesc never changes for a connection unless rekeyed, but
  28. * each packet will likely be in a different place. So all we need
  29. * to know to process the packet is where the input is, where the
  30. * output goes, and what context we want to process with. Context is
  31. * in the SharedDesc, packet references in the JobDesc.
  32. *
  33. * So, a job desc looks like:
  34. *
  35. * ---------------------
  36. * | Header |
  37. * | ShareDesc Pointer |
  38. * | SEQ_OUT_PTR |
  39. * | (output buffer) |
  40. * | (output length) |
  41. * | SEQ_IN_PTR |
  42. * | (input buffer) |
  43. * | (input length) |
  44. * ---------------------
  45. */
  46. #include "compat.h"
  47. #include "regs.h"
  48. #include "intern.h"
  49. #include "desc_constr.h"
  50. #include "jr.h"
  51. #include "error.h"
  52. #include "sg_sw_sec4.h"
  53. #include "key_gen.h"
  54. /*
  55. * crypto alg
  56. */
  57. #define CAAM_CRA_PRIORITY 3000
  58. /* max key is sum of AES_MAX_KEY_SIZE, max split key size */
  59. #define CAAM_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + \
  60. CTR_RFC3686_NONCE_SIZE + \
  61. SHA512_DIGEST_SIZE * 2)
  62. /* max IV is max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
  63. #define CAAM_MAX_IV_LENGTH 16
  64. #define AEAD_DESC_JOB_IO_LEN (DESC_JOB_IO_LEN + CAAM_CMD_SZ * 2)
  65. #define GCM_DESC_JOB_IO_LEN (AEAD_DESC_JOB_IO_LEN + \
  66. CAAM_CMD_SZ * 4)
  67. #define AUTHENC_DESC_JOB_IO_LEN (AEAD_DESC_JOB_IO_LEN + \
  68. CAAM_CMD_SZ * 5)
  69. /* length of descriptors text */
  70. #define DESC_AEAD_BASE (4 * CAAM_CMD_SZ)
  71. #define DESC_AEAD_ENC_LEN (DESC_AEAD_BASE + 11 * CAAM_CMD_SZ)
  72. #define DESC_AEAD_DEC_LEN (DESC_AEAD_BASE + 15 * CAAM_CMD_SZ)
  73. #define DESC_AEAD_GIVENC_LEN (DESC_AEAD_ENC_LEN + 9 * CAAM_CMD_SZ)
  74. /* Note: Nonce is counted in enckeylen */
  75. #define DESC_AEAD_CTR_RFC3686_LEN (4 * CAAM_CMD_SZ)
  76. #define DESC_AEAD_NULL_BASE (3 * CAAM_CMD_SZ)
  77. #define DESC_AEAD_NULL_ENC_LEN (DESC_AEAD_NULL_BASE + 11 * CAAM_CMD_SZ)
  78. #define DESC_AEAD_NULL_DEC_LEN (DESC_AEAD_NULL_BASE + 13 * CAAM_CMD_SZ)
  79. #define DESC_GCM_BASE (3 * CAAM_CMD_SZ)
  80. #define DESC_GCM_ENC_LEN (DESC_GCM_BASE + 16 * CAAM_CMD_SZ)
  81. #define DESC_GCM_DEC_LEN (DESC_GCM_BASE + 12 * CAAM_CMD_SZ)
  82. #define DESC_RFC4106_BASE (3 * CAAM_CMD_SZ)
  83. #define DESC_RFC4106_ENC_LEN (DESC_RFC4106_BASE + 13 * CAAM_CMD_SZ)
  84. #define DESC_RFC4106_DEC_LEN (DESC_RFC4106_BASE + 13 * CAAM_CMD_SZ)
  85. #define DESC_RFC4543_BASE (3 * CAAM_CMD_SZ)
  86. #define DESC_RFC4543_ENC_LEN (DESC_RFC4543_BASE + 11 * CAAM_CMD_SZ)
  87. #define DESC_RFC4543_DEC_LEN (DESC_RFC4543_BASE + 12 * CAAM_CMD_SZ)
  88. #define DESC_ABLKCIPHER_BASE (3 * CAAM_CMD_SZ)
  89. #define DESC_ABLKCIPHER_ENC_LEN (DESC_ABLKCIPHER_BASE + \
  90. 20 * CAAM_CMD_SZ)
  91. #define DESC_ABLKCIPHER_DEC_LEN (DESC_ABLKCIPHER_BASE + \
  92. 15 * CAAM_CMD_SZ)
  93. #define DESC_MAX_USED_BYTES (CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN)
  94. #define DESC_MAX_USED_LEN (DESC_MAX_USED_BYTES / CAAM_CMD_SZ)
  95. #ifdef DEBUG
  96. /* for print_hex_dumps with line references */
  97. #define debug(format, arg...) printk(format, arg)
  98. #else
  99. #define debug(format, arg...)
  100. #endif
  101. static struct list_head alg_list;
  102. struct caam_alg_entry {
  103. int class1_alg_type;
  104. int class2_alg_type;
  105. int alg_op;
  106. bool rfc3686;
  107. bool geniv;
  108. };
  109. struct caam_aead_alg {
  110. struct aead_alg aead;
  111. struct caam_alg_entry caam;
  112. bool registered;
  113. };
  114. /* Set DK bit in class 1 operation if shared */
  115. static inline void append_dec_op1(u32 *desc, u32 type)
  116. {
  117. u32 *jump_cmd, *uncond_jump_cmd;
  118. /* DK bit is valid only for AES */
  119. if ((type & OP_ALG_ALGSEL_MASK) != OP_ALG_ALGSEL_AES) {
  120. append_operation(desc, type | OP_ALG_AS_INITFINAL |
  121. OP_ALG_DECRYPT);
  122. return;
  123. }
  124. jump_cmd = append_jump(desc, JUMP_TEST_ALL | JUMP_COND_SHRD);
  125. append_operation(desc, type | OP_ALG_AS_INITFINAL |
  126. OP_ALG_DECRYPT);
  127. uncond_jump_cmd = append_jump(desc, JUMP_TEST_ALL);
  128. set_jump_tgt_here(desc, jump_cmd);
  129. append_operation(desc, type | OP_ALG_AS_INITFINAL |
  130. OP_ALG_DECRYPT | OP_ALG_AAI_DK);
  131. set_jump_tgt_here(desc, uncond_jump_cmd);
  132. }
  133. /*
  134. * For aead functions, read payload and write payload,
  135. * both of which are specified in req->src and req->dst
  136. */
  137. static inline void aead_append_src_dst(u32 *desc, u32 msg_type)
  138. {
  139. append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
  140. append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH |
  141. KEY_VLF | msg_type | FIFOLD_TYPE_LASTBOTH);
  142. }
  143. /*
  144. * For ablkcipher encrypt and decrypt, read from req->src and
  145. * write to req->dst
  146. */
  147. static inline void ablkcipher_append_src_dst(u32 *desc)
  148. {
  149. append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
  150. append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
  151. append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 |
  152. KEY_VLF | FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
  153. append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
  154. }
  155. /*
  156. * per-session context
  157. */
  158. struct caam_ctx {
  159. struct device *jrdev;
  160. u32 sh_desc_enc[DESC_MAX_USED_LEN];
  161. u32 sh_desc_dec[DESC_MAX_USED_LEN];
  162. u32 sh_desc_givenc[DESC_MAX_USED_LEN];
  163. dma_addr_t sh_desc_enc_dma;
  164. dma_addr_t sh_desc_dec_dma;
  165. dma_addr_t sh_desc_givenc_dma;
  166. u32 class1_alg_type;
  167. u32 class2_alg_type;
  168. u32 alg_op;
  169. u8 key[CAAM_MAX_KEY_SIZE];
  170. dma_addr_t key_dma;
  171. unsigned int enckeylen;
  172. unsigned int split_key_len;
  173. unsigned int split_key_pad_len;
  174. unsigned int authsize;
  175. };
  176. static void append_key_aead(u32 *desc, struct caam_ctx *ctx,
  177. int keys_fit_inline, bool is_rfc3686)
  178. {
  179. u32 *nonce;
  180. unsigned int enckeylen = ctx->enckeylen;
  181. /*
  182. * RFC3686 specific:
  183. * | ctx->key = {AUTH_KEY, ENC_KEY, NONCE}
  184. * | enckeylen = encryption key size + nonce size
  185. */
  186. if (is_rfc3686)
  187. enckeylen -= CTR_RFC3686_NONCE_SIZE;
  188. if (keys_fit_inline) {
  189. append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
  190. ctx->split_key_len, CLASS_2 |
  191. KEY_DEST_MDHA_SPLIT | KEY_ENC);
  192. append_key_as_imm(desc, (void *)ctx->key +
  193. ctx->split_key_pad_len, enckeylen,
  194. enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
  195. } else {
  196. append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
  197. KEY_DEST_MDHA_SPLIT | KEY_ENC);
  198. append_key(desc, ctx->key_dma + ctx->split_key_pad_len,
  199. enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
  200. }
  201. /* Load Counter into CONTEXT1 reg */
  202. if (is_rfc3686) {
  203. nonce = (u32 *)((void *)ctx->key + ctx->split_key_pad_len +
  204. enckeylen);
  205. append_load_imm_u32(desc, *nonce, LDST_CLASS_IND_CCB |
  206. LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
  207. append_move(desc,
  208. MOVE_SRC_OUTFIFO |
  209. MOVE_DEST_CLASS1CTX |
  210. (16 << MOVE_OFFSET_SHIFT) |
  211. (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
  212. }
  213. }
  214. static void init_sh_desc_key_aead(u32 *desc, struct caam_ctx *ctx,
  215. int keys_fit_inline, bool is_rfc3686)
  216. {
  217. u32 *key_jump_cmd;
  218. /* Note: Context registers are saved. */
  219. init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
  220. /* Skip if already shared */
  221. key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
  222. JUMP_COND_SHRD);
  223. append_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
  224. set_jump_tgt_here(desc, key_jump_cmd);
  225. }
  226. static int aead_null_set_sh_desc(struct crypto_aead *aead)
  227. {
  228. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  229. struct device *jrdev = ctx->jrdev;
  230. bool keys_fit_inline = false;
  231. u32 *key_jump_cmd, *jump_cmd, *read_move_cmd, *write_move_cmd;
  232. u32 *desc;
  233. /*
  234. * Job Descriptor and Shared Descriptors
  235. * must all fit into the 64-word Descriptor h/w Buffer
  236. */
  237. if (DESC_AEAD_NULL_ENC_LEN + AEAD_DESC_JOB_IO_LEN +
  238. ctx->split_key_pad_len <= CAAM_DESC_BYTES_MAX)
  239. keys_fit_inline = true;
  240. /* aead_encrypt shared descriptor */
  241. desc = ctx->sh_desc_enc;
  242. init_sh_desc(desc, HDR_SHARE_SERIAL);
  243. /* Skip if already shared */
  244. key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
  245. JUMP_COND_SHRD);
  246. if (keys_fit_inline)
  247. append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
  248. ctx->split_key_len, CLASS_2 |
  249. KEY_DEST_MDHA_SPLIT | KEY_ENC);
  250. else
  251. append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
  252. KEY_DEST_MDHA_SPLIT | KEY_ENC);
  253. set_jump_tgt_here(desc, key_jump_cmd);
  254. /* assoclen + cryptlen = seqinlen */
  255. append_math_sub(desc, REG3, SEQINLEN, REG0, CAAM_CMD_SZ);
  256. /* Prepare to read and write cryptlen + assoclen bytes */
  257. append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
  258. append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
  259. /*
  260. * MOVE_LEN opcode is not available in all SEC HW revisions,
  261. * thus need to do some magic, i.e. self-patch the descriptor
  262. * buffer.
  263. */
  264. read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF |
  265. MOVE_DEST_MATH3 |
  266. (0x6 << MOVE_LEN_SHIFT));
  267. write_move_cmd = append_move(desc, MOVE_SRC_MATH3 |
  268. MOVE_DEST_DESCBUF |
  269. MOVE_WAITCOMP |
  270. (0x8 << MOVE_LEN_SHIFT));
  271. /* Class 2 operation */
  272. append_operation(desc, ctx->class2_alg_type |
  273. OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
  274. /* Read and write cryptlen bytes */
  275. aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
  276. set_move_tgt_here(desc, read_move_cmd);
  277. set_move_tgt_here(desc, write_move_cmd);
  278. append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
  279. append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO |
  280. MOVE_AUX_LS);
  281. /* Write ICV */
  282. append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
  283. LDST_SRCDST_BYTE_CONTEXT);
  284. ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
  285. desc_bytes(desc),
  286. DMA_TO_DEVICE);
  287. if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
  288. dev_err(jrdev, "unable to map shared descriptor\n");
  289. return -ENOMEM;
  290. }
  291. #ifdef DEBUG
  292. print_hex_dump(KERN_ERR,
  293. "aead null enc shdesc@"__stringify(__LINE__)": ",
  294. DUMP_PREFIX_ADDRESS, 16, 4, desc,
  295. desc_bytes(desc), 1);
  296. #endif
  297. /*
  298. * Job Descriptor and Shared Descriptors
  299. * must all fit into the 64-word Descriptor h/w Buffer
  300. */
  301. keys_fit_inline = false;
  302. if (DESC_AEAD_NULL_DEC_LEN + DESC_JOB_IO_LEN +
  303. ctx->split_key_pad_len <= CAAM_DESC_BYTES_MAX)
  304. keys_fit_inline = true;
  305. desc = ctx->sh_desc_dec;
  306. /* aead_decrypt shared descriptor */
  307. init_sh_desc(desc, HDR_SHARE_SERIAL);
  308. /* Skip if already shared */
  309. key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
  310. JUMP_COND_SHRD);
  311. if (keys_fit_inline)
  312. append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
  313. ctx->split_key_len, CLASS_2 |
  314. KEY_DEST_MDHA_SPLIT | KEY_ENC);
  315. else
  316. append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
  317. KEY_DEST_MDHA_SPLIT | KEY_ENC);
  318. set_jump_tgt_here(desc, key_jump_cmd);
  319. /* Class 2 operation */
  320. append_operation(desc, ctx->class2_alg_type |
  321. OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
  322. /* assoclen + cryptlen = seqoutlen */
  323. append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
  324. /* Prepare to read and write cryptlen + assoclen bytes */
  325. append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ);
  326. append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ);
  327. /*
  328. * MOVE_LEN opcode is not available in all SEC HW revisions,
  329. * thus need to do some magic, i.e. self-patch the descriptor
  330. * buffer.
  331. */
  332. read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF |
  333. MOVE_DEST_MATH2 |
  334. (0x6 << MOVE_LEN_SHIFT));
  335. write_move_cmd = append_move(desc, MOVE_SRC_MATH2 |
  336. MOVE_DEST_DESCBUF |
  337. MOVE_WAITCOMP |
  338. (0x8 << MOVE_LEN_SHIFT));
  339. /* Read and write cryptlen bytes */
  340. aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
  341. /*
  342. * Insert a NOP here, since we need at least 4 instructions between
  343. * code patching the descriptor buffer and the location being patched.
  344. */
  345. jump_cmd = append_jump(desc, JUMP_TEST_ALL);
  346. set_jump_tgt_here(desc, jump_cmd);
  347. set_move_tgt_here(desc, read_move_cmd);
  348. set_move_tgt_here(desc, write_move_cmd);
  349. append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
  350. append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO |
  351. MOVE_AUX_LS);
  352. append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
  353. /* Load ICV */
  354. append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS2 |
  355. FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
  356. ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
  357. desc_bytes(desc),
  358. DMA_TO_DEVICE);
  359. if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
  360. dev_err(jrdev, "unable to map shared descriptor\n");
  361. return -ENOMEM;
  362. }
  363. #ifdef DEBUG
  364. print_hex_dump(KERN_ERR,
  365. "aead null dec shdesc@"__stringify(__LINE__)": ",
  366. DUMP_PREFIX_ADDRESS, 16, 4, desc,
  367. desc_bytes(desc), 1);
  368. #endif
  369. return 0;
  370. }
  371. static int aead_set_sh_desc(struct crypto_aead *aead)
  372. {
  373. struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
  374. struct caam_aead_alg, aead);
  375. unsigned int ivsize = crypto_aead_ivsize(aead);
  376. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  377. struct device *jrdev = ctx->jrdev;
  378. bool keys_fit_inline;
  379. u32 geniv, moveiv;
  380. u32 ctx1_iv_off = 0;
  381. u32 *desc;
  382. const bool ctr_mode = ((ctx->class1_alg_type & OP_ALG_AAI_MASK) ==
  383. OP_ALG_AAI_CTR_MOD128);
  384. const bool is_rfc3686 = alg->caam.rfc3686;
  385. if (!ctx->authsize)
  386. return 0;
  387. /* NULL encryption / decryption */
  388. if (!ctx->enckeylen)
  389. return aead_null_set_sh_desc(aead);
  390. /*
  391. * AES-CTR needs to load IV in CONTEXT1 reg
  392. * at an offset of 128bits (16bytes)
  393. * CONTEXT1[255:128] = IV
  394. */
  395. if (ctr_mode)
  396. ctx1_iv_off = 16;
  397. /*
  398. * RFC3686 specific:
  399. * CONTEXT1[255:128] = {NONCE, IV, COUNTER}
  400. */
  401. if (is_rfc3686)
  402. ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
  403. if (alg->caam.geniv)
  404. goto skip_enc;
  405. /*
  406. * Job Descriptor and Shared Descriptors
  407. * must all fit into the 64-word Descriptor h/w Buffer
  408. */
  409. keys_fit_inline = false;
  410. if (DESC_AEAD_ENC_LEN + AUTHENC_DESC_JOB_IO_LEN +
  411. ctx->split_key_pad_len + ctx->enckeylen +
  412. (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0) <=
  413. CAAM_DESC_BYTES_MAX)
  414. keys_fit_inline = true;
  415. /* aead_encrypt shared descriptor */
  416. desc = ctx->sh_desc_enc;
  417. /* Note: Context registers are saved. */
  418. init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
  419. /* Class 2 operation */
  420. append_operation(desc, ctx->class2_alg_type |
  421. OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
  422. /* Read and write assoclen bytes */
  423. append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
  424. append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
  425. /* Skip assoc data */
  426. append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
  427. /* read assoc before reading payload */
  428. append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
  429. FIFOLDST_VLF);
  430. /* Load Counter into CONTEXT1 reg */
  431. if (is_rfc3686)
  432. append_load_imm_u32(desc, be32_to_cpu(1), LDST_IMM |
  433. LDST_CLASS_1_CCB |
  434. LDST_SRCDST_BYTE_CONTEXT |
  435. ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
  436. LDST_OFFSET_SHIFT));
  437. /* Class 1 operation */
  438. append_operation(desc, ctx->class1_alg_type |
  439. OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
  440. /* Read and write cryptlen bytes */
  441. append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
  442. append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
  443. aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2);
  444. /* Write ICV */
  445. append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
  446. LDST_SRCDST_BYTE_CONTEXT);
  447. ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
  448. desc_bytes(desc),
  449. DMA_TO_DEVICE);
  450. if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
  451. dev_err(jrdev, "unable to map shared descriptor\n");
  452. return -ENOMEM;
  453. }
  454. #ifdef DEBUG
  455. print_hex_dump(KERN_ERR, "aead enc shdesc@"__stringify(__LINE__)": ",
  456. DUMP_PREFIX_ADDRESS, 16, 4, desc,
  457. desc_bytes(desc), 1);
  458. #endif
  459. skip_enc:
  460. /*
  461. * Job Descriptor and Shared Descriptors
  462. * must all fit into the 64-word Descriptor h/w Buffer
  463. */
  464. keys_fit_inline = false;
  465. if (DESC_AEAD_DEC_LEN + AUTHENC_DESC_JOB_IO_LEN +
  466. ctx->split_key_pad_len + ctx->enckeylen +
  467. (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0) <=
  468. CAAM_DESC_BYTES_MAX)
  469. keys_fit_inline = true;
  470. /* aead_decrypt shared descriptor */
  471. desc = ctx->sh_desc_dec;
  472. /* Note: Context registers are saved. */
  473. init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
  474. /* Class 2 operation */
  475. append_operation(desc, ctx->class2_alg_type |
  476. OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
  477. /* Read and write assoclen bytes */
  478. append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
  479. if (alg->caam.geniv)
  480. append_math_add_imm_u32(desc, VARSEQOUTLEN, REG3, IMM, ivsize);
  481. else
  482. append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
  483. /* Skip assoc data */
  484. append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
  485. /* read assoc before reading payload */
  486. append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
  487. KEY_VLF);
  488. if (alg->caam.geniv) {
  489. append_seq_load(desc, ivsize, LDST_CLASS_1_CCB |
  490. LDST_SRCDST_BYTE_CONTEXT |
  491. (ctx1_iv_off << LDST_OFFSET_SHIFT));
  492. append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_CLASS2INFIFO |
  493. (ctx1_iv_off << MOVE_OFFSET_SHIFT) | ivsize);
  494. }
  495. /* Load Counter into CONTEXT1 reg */
  496. if (is_rfc3686)
  497. append_load_imm_u32(desc, be32_to_cpu(1), LDST_IMM |
  498. LDST_CLASS_1_CCB |
  499. LDST_SRCDST_BYTE_CONTEXT |
  500. ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
  501. LDST_OFFSET_SHIFT));
  502. /* Choose operation */
  503. if (ctr_mode)
  504. append_operation(desc, ctx->class1_alg_type |
  505. OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT);
  506. else
  507. append_dec_op1(desc, ctx->class1_alg_type);
  508. /* Read and write cryptlen bytes */
  509. append_math_add(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
  510. append_math_add(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
  511. aead_append_src_dst(desc, FIFOLD_TYPE_MSG);
  512. /* Load ICV */
  513. append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS2 |
  514. FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
  515. ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
  516. desc_bytes(desc),
  517. DMA_TO_DEVICE);
  518. if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
  519. dev_err(jrdev, "unable to map shared descriptor\n");
  520. return -ENOMEM;
  521. }
  522. #ifdef DEBUG
  523. print_hex_dump(KERN_ERR, "aead dec shdesc@"__stringify(__LINE__)": ",
  524. DUMP_PREFIX_ADDRESS, 16, 4, desc,
  525. desc_bytes(desc), 1);
  526. #endif
  527. if (!alg->caam.geniv)
  528. goto skip_givenc;
  529. /*
  530. * Job Descriptor and Shared Descriptors
  531. * must all fit into the 64-word Descriptor h/w Buffer
  532. */
  533. keys_fit_inline = false;
  534. if (DESC_AEAD_GIVENC_LEN + AUTHENC_DESC_JOB_IO_LEN +
  535. ctx->split_key_pad_len + ctx->enckeylen +
  536. (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0) <=
  537. CAAM_DESC_BYTES_MAX)
  538. keys_fit_inline = true;
  539. /* aead_givencrypt shared descriptor */
  540. desc = ctx->sh_desc_enc;
  541. /* Note: Context registers are saved. */
  542. init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
  543. if (is_rfc3686)
  544. goto copy_iv;
  545. /* Generate IV */
  546. geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
  547. NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 |
  548. NFIFOENTRY_PTYPE_RND | (ivsize << NFIFOENTRY_DLEN_SHIFT);
  549. append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
  550. LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
  551. append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
  552. append_move(desc, MOVE_WAITCOMP |
  553. MOVE_SRC_INFIFO | MOVE_DEST_CLASS1CTX |
  554. (ctx1_iv_off << MOVE_OFFSET_SHIFT) |
  555. (ivsize << MOVE_LEN_SHIFT));
  556. append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
  557. copy_iv:
  558. /* Copy IV to class 1 context */
  559. append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_OUTFIFO |
  560. (ctx1_iv_off << MOVE_OFFSET_SHIFT) |
  561. (ivsize << MOVE_LEN_SHIFT));
  562. /* Return to encryption */
  563. append_operation(desc, ctx->class2_alg_type |
  564. OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
  565. /* Read and write assoclen bytes */
  566. append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
  567. append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
  568. /* ivsize + cryptlen = seqoutlen - authsize */
  569. append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
  570. /* Skip assoc data */
  571. append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
  572. /* read assoc before reading payload */
  573. append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
  574. KEY_VLF);
  575. /* Copy iv from outfifo to class 2 fifo */
  576. moveiv = NFIFOENTRY_STYPE_OFIFO | NFIFOENTRY_DEST_CLASS2 |
  577. NFIFOENTRY_DTYPE_MSG | (ivsize << NFIFOENTRY_DLEN_SHIFT);
  578. append_load_imm_u32(desc, moveiv, LDST_CLASS_IND_CCB |
  579. LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
  580. append_load_imm_u32(desc, ivsize, LDST_CLASS_2_CCB |
  581. LDST_SRCDST_WORD_DATASZ_REG | LDST_IMM);
  582. /* Load Counter into CONTEXT1 reg */
  583. if (is_rfc3686)
  584. append_load_imm_u32(desc, be32_to_cpu(1), LDST_IMM |
  585. LDST_CLASS_1_CCB |
  586. LDST_SRCDST_BYTE_CONTEXT |
  587. ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
  588. LDST_OFFSET_SHIFT));
  589. /* Class 1 operation */
  590. append_operation(desc, ctx->class1_alg_type |
  591. OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
  592. /* Will write ivsize + cryptlen */
  593. append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
  594. /* Not need to reload iv */
  595. append_seq_fifo_load(desc, ivsize,
  596. FIFOLD_CLASS_SKIP);
  597. /* Will read cryptlen */
  598. append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
  599. append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | KEY_VLF |
  600. FIFOLD_TYPE_MSG1OUT2 | FIFOLD_TYPE_LASTBOTH);
  601. append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
  602. /* Write ICV */
  603. append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
  604. LDST_SRCDST_BYTE_CONTEXT);
  605. ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
  606. desc_bytes(desc),
  607. DMA_TO_DEVICE);
  608. if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
  609. dev_err(jrdev, "unable to map shared descriptor\n");
  610. return -ENOMEM;
  611. }
  612. #ifdef DEBUG
  613. print_hex_dump(KERN_ERR, "aead givenc shdesc@"__stringify(__LINE__)": ",
  614. DUMP_PREFIX_ADDRESS, 16, 4, desc,
  615. desc_bytes(desc), 1);
  616. #endif
  617. skip_givenc:
  618. return 0;
  619. }
  620. static int aead_setauthsize(struct crypto_aead *authenc,
  621. unsigned int authsize)
  622. {
  623. struct caam_ctx *ctx = crypto_aead_ctx(authenc);
  624. ctx->authsize = authsize;
  625. aead_set_sh_desc(authenc);
  626. return 0;
  627. }
  628. static int gcm_set_sh_desc(struct crypto_aead *aead)
  629. {
  630. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  631. struct device *jrdev = ctx->jrdev;
  632. bool keys_fit_inline = false;
  633. u32 *key_jump_cmd, *zero_payload_jump_cmd,
  634. *zero_assoc_jump_cmd1, *zero_assoc_jump_cmd2;
  635. u32 *desc;
  636. if (!ctx->enckeylen || !ctx->authsize)
  637. return 0;
  638. /*
  639. * AES GCM encrypt shared descriptor
  640. * Job Descriptor and Shared Descriptor
  641. * must fit into the 64-word Descriptor h/w Buffer
  642. */
  643. if (DESC_GCM_ENC_LEN + GCM_DESC_JOB_IO_LEN +
  644. ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
  645. keys_fit_inline = true;
  646. desc = ctx->sh_desc_enc;
  647. init_sh_desc(desc, HDR_SHARE_SERIAL);
  648. /* skip key loading if they are loaded due to sharing */
  649. key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
  650. JUMP_COND_SHRD | JUMP_COND_SELF);
  651. if (keys_fit_inline)
  652. append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
  653. ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
  654. else
  655. append_key(desc, ctx->key_dma, ctx->enckeylen,
  656. CLASS_1 | KEY_DEST_CLASS_REG);
  657. set_jump_tgt_here(desc, key_jump_cmd);
  658. /* class 1 operation */
  659. append_operation(desc, ctx->class1_alg_type |
  660. OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
  661. /* if assoclen + cryptlen is ZERO, skip to ICV write */
  662. append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
  663. zero_assoc_jump_cmd2 = append_jump(desc, JUMP_TEST_ALL |
  664. JUMP_COND_MATH_Z);
  665. /* if assoclen is ZERO, skip reading the assoc data */
  666. append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
  667. zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL |
  668. JUMP_COND_MATH_Z);
  669. append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
  670. /* skip assoc data */
  671. append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
  672. /* cryptlen = seqinlen - assoclen */
  673. append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG3, CAAM_CMD_SZ);
  674. /* if cryptlen is ZERO jump to zero-payload commands */
  675. zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
  676. JUMP_COND_MATH_Z);
  677. /* read assoc data */
  678. append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
  679. FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
  680. set_jump_tgt_here(desc, zero_assoc_jump_cmd1);
  681. append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
  682. /* write encrypted data */
  683. append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
  684. /* read payload data */
  685. append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
  686. FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
  687. /* jump the zero-payload commands */
  688. append_jump(desc, JUMP_TEST_ALL | 2);
  689. /* zero-payload commands */
  690. set_jump_tgt_here(desc, zero_payload_jump_cmd);
  691. /* read assoc data */
  692. append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
  693. FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST1);
  694. /* There is no input data */
  695. set_jump_tgt_here(desc, zero_assoc_jump_cmd2);
  696. /* write ICV */
  697. append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB |
  698. LDST_SRCDST_BYTE_CONTEXT);
  699. ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
  700. desc_bytes(desc),
  701. DMA_TO_DEVICE);
  702. if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
  703. dev_err(jrdev, "unable to map shared descriptor\n");
  704. return -ENOMEM;
  705. }
  706. #ifdef DEBUG
  707. print_hex_dump(KERN_ERR, "gcm enc shdesc@"__stringify(__LINE__)": ",
  708. DUMP_PREFIX_ADDRESS, 16, 4, desc,
  709. desc_bytes(desc), 1);
  710. #endif
  711. /*
  712. * Job Descriptor and Shared Descriptors
  713. * must all fit into the 64-word Descriptor h/w Buffer
  714. */
  715. keys_fit_inline = false;
  716. if (DESC_GCM_DEC_LEN + GCM_DESC_JOB_IO_LEN +
  717. ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
  718. keys_fit_inline = true;
  719. desc = ctx->sh_desc_dec;
  720. init_sh_desc(desc, HDR_SHARE_SERIAL);
  721. /* skip key loading if they are loaded due to sharing */
  722. key_jump_cmd = append_jump(desc, JUMP_JSL |
  723. JUMP_TEST_ALL | JUMP_COND_SHRD |
  724. JUMP_COND_SELF);
  725. if (keys_fit_inline)
  726. append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
  727. ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
  728. else
  729. append_key(desc, ctx->key_dma, ctx->enckeylen,
  730. CLASS_1 | KEY_DEST_CLASS_REG);
  731. set_jump_tgt_here(desc, key_jump_cmd);
  732. /* class 1 operation */
  733. append_operation(desc, ctx->class1_alg_type |
  734. OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
  735. /* if assoclen is ZERO, skip reading the assoc data */
  736. append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
  737. zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL |
  738. JUMP_COND_MATH_Z);
  739. append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
  740. /* skip assoc data */
  741. append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
  742. /* read assoc data */
  743. append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
  744. FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
  745. set_jump_tgt_here(desc, zero_assoc_jump_cmd1);
  746. /* cryptlen = seqoutlen - assoclen */
  747. append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
  748. /* jump to zero-payload command if cryptlen is zero */
  749. zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
  750. JUMP_COND_MATH_Z);
  751. append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
  752. /* store encrypted data */
  753. append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
  754. /* read payload data */
  755. append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
  756. FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
  757. /* zero-payload command */
  758. set_jump_tgt_here(desc, zero_payload_jump_cmd);
  759. /* read ICV */
  760. append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS1 |
  761. FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
  762. ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
  763. desc_bytes(desc),
  764. DMA_TO_DEVICE);
  765. if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
  766. dev_err(jrdev, "unable to map shared descriptor\n");
  767. return -ENOMEM;
  768. }
  769. #ifdef DEBUG
  770. print_hex_dump(KERN_ERR, "gcm dec shdesc@"__stringify(__LINE__)": ",
  771. DUMP_PREFIX_ADDRESS, 16, 4, desc,
  772. desc_bytes(desc), 1);
  773. #endif
  774. return 0;
  775. }
  776. static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
  777. {
  778. struct caam_ctx *ctx = crypto_aead_ctx(authenc);
  779. ctx->authsize = authsize;
  780. gcm_set_sh_desc(authenc);
  781. return 0;
  782. }
  783. static int rfc4106_set_sh_desc(struct crypto_aead *aead)
  784. {
  785. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  786. struct device *jrdev = ctx->jrdev;
  787. bool keys_fit_inline = false;
  788. u32 *key_jump_cmd;
  789. u32 *desc;
  790. if (!ctx->enckeylen || !ctx->authsize)
  791. return 0;
  792. /*
  793. * RFC4106 encrypt shared descriptor
  794. * Job Descriptor and Shared Descriptor
  795. * must fit into the 64-word Descriptor h/w Buffer
  796. */
  797. if (DESC_RFC4106_ENC_LEN + GCM_DESC_JOB_IO_LEN +
  798. ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
  799. keys_fit_inline = true;
  800. desc = ctx->sh_desc_enc;
  801. init_sh_desc(desc, HDR_SHARE_SERIAL);
  802. /* Skip key loading if it is loaded due to sharing */
  803. key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
  804. JUMP_COND_SHRD);
  805. if (keys_fit_inline)
  806. append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
  807. ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
  808. else
  809. append_key(desc, ctx->key_dma, ctx->enckeylen,
  810. CLASS_1 | KEY_DEST_CLASS_REG);
  811. set_jump_tgt_here(desc, key_jump_cmd);
  812. /* Class 1 operation */
  813. append_operation(desc, ctx->class1_alg_type |
  814. OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
  815. append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, 8);
  816. append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
  817. /* Read assoc data */
  818. append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
  819. FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
  820. /* Skip IV */
  821. append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
  822. /* Will read cryptlen bytes */
  823. append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
  824. /* Workaround for erratum A-005473 (simultaneous SEQ FIFO skips) */
  825. append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG);
  826. /* Skip assoc data */
  827. append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
  828. /* cryptlen = seqoutlen - assoclen */
  829. append_math_sub(desc, VARSEQOUTLEN, VARSEQINLEN, REG0, CAAM_CMD_SZ);
  830. /* Write encrypted data */
  831. append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
  832. /* Read payload data */
  833. append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
  834. FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
  835. /* Write ICV */
  836. append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB |
  837. LDST_SRCDST_BYTE_CONTEXT);
  838. ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
  839. desc_bytes(desc),
  840. DMA_TO_DEVICE);
  841. if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
  842. dev_err(jrdev, "unable to map shared descriptor\n");
  843. return -ENOMEM;
  844. }
  845. #ifdef DEBUG
  846. print_hex_dump(KERN_ERR, "rfc4106 enc shdesc@"__stringify(__LINE__)": ",
  847. DUMP_PREFIX_ADDRESS, 16, 4, desc,
  848. desc_bytes(desc), 1);
  849. #endif
  850. /*
  851. * Job Descriptor and Shared Descriptors
  852. * must all fit into the 64-word Descriptor h/w Buffer
  853. */
  854. keys_fit_inline = false;
  855. if (DESC_RFC4106_DEC_LEN + DESC_JOB_IO_LEN +
  856. ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
  857. keys_fit_inline = true;
  858. desc = ctx->sh_desc_dec;
  859. init_sh_desc(desc, HDR_SHARE_SERIAL);
  860. /* Skip key loading if it is loaded due to sharing */
  861. key_jump_cmd = append_jump(desc, JUMP_JSL |
  862. JUMP_TEST_ALL | JUMP_COND_SHRD);
  863. if (keys_fit_inline)
  864. append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
  865. ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
  866. else
  867. append_key(desc, ctx->key_dma, ctx->enckeylen,
  868. CLASS_1 | KEY_DEST_CLASS_REG);
  869. set_jump_tgt_here(desc, key_jump_cmd);
  870. /* Class 1 operation */
  871. append_operation(desc, ctx->class1_alg_type |
  872. OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
  873. append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, 8);
  874. append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
  875. /* Read assoc data */
  876. append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
  877. FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
  878. /* Skip IV */
  879. append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
  880. /* Will read cryptlen bytes */
  881. append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG3, CAAM_CMD_SZ);
  882. /* Workaround for erratum A-005473 (simultaneous SEQ FIFO skips) */
  883. append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG);
  884. /* Skip assoc data */
  885. append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
  886. /* Will write cryptlen bytes */
  887. append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
  888. /* Store payload data */
  889. append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
  890. /* Read encrypted data */
  891. append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
  892. FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
  893. /* Read ICV */
  894. append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS1 |
  895. FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
  896. ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
  897. desc_bytes(desc),
  898. DMA_TO_DEVICE);
  899. if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
  900. dev_err(jrdev, "unable to map shared descriptor\n");
  901. return -ENOMEM;
  902. }
  903. #ifdef DEBUG
  904. print_hex_dump(KERN_ERR, "rfc4106 dec shdesc@"__stringify(__LINE__)": ",
  905. DUMP_PREFIX_ADDRESS, 16, 4, desc,
  906. desc_bytes(desc), 1);
  907. #endif
  908. return 0;
  909. }
  910. static int rfc4106_setauthsize(struct crypto_aead *authenc,
  911. unsigned int authsize)
  912. {
  913. struct caam_ctx *ctx = crypto_aead_ctx(authenc);
  914. ctx->authsize = authsize;
  915. rfc4106_set_sh_desc(authenc);
  916. return 0;
  917. }
  918. static int rfc4543_set_sh_desc(struct crypto_aead *aead)
  919. {
  920. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  921. struct device *jrdev = ctx->jrdev;
  922. bool keys_fit_inline = false;
  923. u32 *key_jump_cmd;
  924. u32 *read_move_cmd, *write_move_cmd;
  925. u32 *desc;
  926. if (!ctx->enckeylen || !ctx->authsize)
  927. return 0;
  928. /*
  929. * RFC4543 encrypt shared descriptor
  930. * Job Descriptor and Shared Descriptor
  931. * must fit into the 64-word Descriptor h/w Buffer
  932. */
  933. if (DESC_RFC4543_ENC_LEN + GCM_DESC_JOB_IO_LEN +
  934. ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
  935. keys_fit_inline = true;
  936. desc = ctx->sh_desc_enc;
  937. init_sh_desc(desc, HDR_SHARE_SERIAL);
  938. /* Skip key loading if it is loaded due to sharing */
  939. key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
  940. JUMP_COND_SHRD);
  941. if (keys_fit_inline)
  942. append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
  943. ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
  944. else
  945. append_key(desc, ctx->key_dma, ctx->enckeylen,
  946. CLASS_1 | KEY_DEST_CLASS_REG);
  947. set_jump_tgt_here(desc, key_jump_cmd);
  948. /* Class 1 operation */
  949. append_operation(desc, ctx->class1_alg_type |
  950. OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
  951. /* assoclen + cryptlen = seqinlen */
  952. append_math_sub(desc, REG3, SEQINLEN, REG0, CAAM_CMD_SZ);
  953. /*
  954. * MOVE_LEN opcode is not available in all SEC HW revisions,
  955. * thus need to do some magic, i.e. self-patch the descriptor
  956. * buffer.
  957. */
  958. read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 |
  959. (0x6 << MOVE_LEN_SHIFT));
  960. write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF |
  961. (0x8 << MOVE_LEN_SHIFT));
  962. /* Will read assoclen + cryptlen bytes */
  963. append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
  964. /* Will write assoclen + cryptlen bytes */
  965. append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
  966. /* Read and write assoclen + cryptlen bytes */
  967. aead_append_src_dst(desc, FIFOLD_TYPE_AAD);
  968. set_move_tgt_here(desc, read_move_cmd);
  969. set_move_tgt_here(desc, write_move_cmd);
  970. append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
  971. /* Move payload data to OFIFO */
  972. append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO);
  973. /* Write ICV */
  974. append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB |
  975. LDST_SRCDST_BYTE_CONTEXT);
  976. ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
  977. desc_bytes(desc),
  978. DMA_TO_DEVICE);
  979. if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
  980. dev_err(jrdev, "unable to map shared descriptor\n");
  981. return -ENOMEM;
  982. }
  983. #ifdef DEBUG
  984. print_hex_dump(KERN_ERR, "rfc4543 enc shdesc@"__stringify(__LINE__)": ",
  985. DUMP_PREFIX_ADDRESS, 16, 4, desc,
  986. desc_bytes(desc), 1);
  987. #endif
  988. /*
  989. * Job Descriptor and Shared Descriptors
  990. * must all fit into the 64-word Descriptor h/w Buffer
  991. */
  992. keys_fit_inline = false;
  993. if (DESC_RFC4543_DEC_LEN + GCM_DESC_JOB_IO_LEN +
  994. ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
  995. keys_fit_inline = true;
  996. desc = ctx->sh_desc_dec;
  997. init_sh_desc(desc, HDR_SHARE_SERIAL);
  998. /* Skip key loading if it is loaded due to sharing */
  999. key_jump_cmd = append_jump(desc, JUMP_JSL |
  1000. JUMP_TEST_ALL | JUMP_COND_SHRD);
  1001. if (keys_fit_inline)
  1002. append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
  1003. ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
  1004. else
  1005. append_key(desc, ctx->key_dma, ctx->enckeylen,
  1006. CLASS_1 | KEY_DEST_CLASS_REG);
  1007. set_jump_tgt_here(desc, key_jump_cmd);
  1008. /* Class 1 operation */
  1009. append_operation(desc, ctx->class1_alg_type |
  1010. OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
  1011. /* assoclen + cryptlen = seqoutlen */
  1012. append_math_sub(desc, REG3, SEQOUTLEN, REG0, CAAM_CMD_SZ);
  1013. /*
  1014. * MOVE_LEN opcode is not available in all SEC HW revisions,
  1015. * thus need to do some magic, i.e. self-patch the descriptor
  1016. * buffer.
  1017. */
  1018. read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 |
  1019. (0x6 << MOVE_LEN_SHIFT));
  1020. write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF |
  1021. (0x8 << MOVE_LEN_SHIFT));
  1022. /* Will read assoclen + cryptlen bytes */
  1023. append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
  1024. /* Will write assoclen + cryptlen bytes */
  1025. append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
  1026. /* Store payload data */
  1027. append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
  1028. /* In-snoop assoclen + cryptlen data */
  1029. append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | FIFOLDST_VLF |
  1030. FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST2FLUSH1);
  1031. set_move_tgt_here(desc, read_move_cmd);
  1032. set_move_tgt_here(desc, write_move_cmd);
  1033. append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
  1034. /* Move payload data to OFIFO */
  1035. append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO);
  1036. append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
  1037. /* Read ICV */
  1038. append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS1 |
  1039. FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
  1040. ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
  1041. desc_bytes(desc),
  1042. DMA_TO_DEVICE);
  1043. if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
  1044. dev_err(jrdev, "unable to map shared descriptor\n");
  1045. return -ENOMEM;
  1046. }
  1047. #ifdef DEBUG
  1048. print_hex_dump(KERN_ERR, "rfc4543 dec shdesc@"__stringify(__LINE__)": ",
  1049. DUMP_PREFIX_ADDRESS, 16, 4, desc,
  1050. desc_bytes(desc), 1);
  1051. #endif
  1052. return 0;
  1053. }
  1054. static int rfc4543_setauthsize(struct crypto_aead *authenc,
  1055. unsigned int authsize)
  1056. {
  1057. struct caam_ctx *ctx = crypto_aead_ctx(authenc);
  1058. ctx->authsize = authsize;
  1059. rfc4543_set_sh_desc(authenc);
  1060. return 0;
  1061. }
  1062. static u32 gen_split_aead_key(struct caam_ctx *ctx, const u8 *key_in,
  1063. u32 authkeylen)
  1064. {
  1065. return gen_split_key(ctx->jrdev, ctx->key, ctx->split_key_len,
  1066. ctx->split_key_pad_len, key_in, authkeylen,
  1067. ctx->alg_op);
  1068. }
  1069. static int aead_setkey(struct crypto_aead *aead,
  1070. const u8 *key, unsigned int keylen)
  1071. {
  1072. /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
  1073. static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
  1074. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  1075. struct device *jrdev = ctx->jrdev;
  1076. struct crypto_authenc_keys keys;
  1077. int ret = 0;
  1078. if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
  1079. goto badkey;
  1080. /* Pick class 2 key length from algorithm submask */
  1081. ctx->split_key_len = mdpadlen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
  1082. OP_ALG_ALGSEL_SHIFT] * 2;
  1083. ctx->split_key_pad_len = ALIGN(ctx->split_key_len, 16);
  1084. if (ctx->split_key_pad_len + keys.enckeylen > CAAM_MAX_KEY_SIZE)
  1085. goto badkey;
  1086. #ifdef DEBUG
  1087. printk(KERN_ERR "keylen %d enckeylen %d authkeylen %d\n",
  1088. keys.authkeylen + keys.enckeylen, keys.enckeylen,
  1089. keys.authkeylen);
  1090. printk(KERN_ERR "split_key_len %d split_key_pad_len %d\n",
  1091. ctx->split_key_len, ctx->split_key_pad_len);
  1092. print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
  1093. DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
  1094. #endif
  1095. ret = gen_split_aead_key(ctx, keys.authkey, keys.authkeylen);
  1096. if (ret) {
  1097. goto badkey;
  1098. }
  1099. /* postpend encryption key to auth split key */
  1100. memcpy(ctx->key + ctx->split_key_pad_len, keys.enckey, keys.enckeylen);
  1101. ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len +
  1102. keys.enckeylen, DMA_TO_DEVICE);
  1103. if (dma_mapping_error(jrdev, ctx->key_dma)) {
  1104. dev_err(jrdev, "unable to map key i/o memory\n");
  1105. return -ENOMEM;
  1106. }
  1107. #ifdef DEBUG
  1108. print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
  1109. DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
  1110. ctx->split_key_pad_len + keys.enckeylen, 1);
  1111. #endif
  1112. ctx->enckeylen = keys.enckeylen;
  1113. ret = aead_set_sh_desc(aead);
  1114. if (ret) {
  1115. dma_unmap_single(jrdev, ctx->key_dma, ctx->split_key_pad_len +
  1116. keys.enckeylen, DMA_TO_DEVICE);
  1117. }
  1118. return ret;
  1119. badkey:
  1120. crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
  1121. return -EINVAL;
  1122. }
  1123. static int gcm_setkey(struct crypto_aead *aead,
  1124. const u8 *key, unsigned int keylen)
  1125. {
  1126. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  1127. struct device *jrdev = ctx->jrdev;
  1128. int ret = 0;
  1129. #ifdef DEBUG
  1130. print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
  1131. DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
  1132. #endif
  1133. memcpy(ctx->key, key, keylen);
  1134. ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen,
  1135. DMA_TO_DEVICE);
  1136. if (dma_mapping_error(jrdev, ctx->key_dma)) {
  1137. dev_err(jrdev, "unable to map key i/o memory\n");
  1138. return -ENOMEM;
  1139. }
  1140. ctx->enckeylen = keylen;
  1141. ret = gcm_set_sh_desc(aead);
  1142. if (ret) {
  1143. dma_unmap_single(jrdev, ctx->key_dma, ctx->enckeylen,
  1144. DMA_TO_DEVICE);
  1145. }
  1146. return ret;
  1147. }
  1148. static int rfc4106_setkey(struct crypto_aead *aead,
  1149. const u8 *key, unsigned int keylen)
  1150. {
  1151. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  1152. struct device *jrdev = ctx->jrdev;
  1153. int ret = 0;
  1154. if (keylen < 4)
  1155. return -EINVAL;
  1156. #ifdef DEBUG
  1157. print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
  1158. DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
  1159. #endif
  1160. memcpy(ctx->key, key, keylen);
  1161. /*
  1162. * The last four bytes of the key material are used as the salt value
  1163. * in the nonce. Update the AES key length.
  1164. */
  1165. ctx->enckeylen = keylen - 4;
  1166. ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->enckeylen,
  1167. DMA_TO_DEVICE);
  1168. if (dma_mapping_error(jrdev, ctx->key_dma)) {
  1169. dev_err(jrdev, "unable to map key i/o memory\n");
  1170. return -ENOMEM;
  1171. }
  1172. ret = rfc4106_set_sh_desc(aead);
  1173. if (ret) {
  1174. dma_unmap_single(jrdev, ctx->key_dma, ctx->enckeylen,
  1175. DMA_TO_DEVICE);
  1176. }
  1177. return ret;
  1178. }
  1179. static int rfc4543_setkey(struct crypto_aead *aead,
  1180. const u8 *key, unsigned int keylen)
  1181. {
  1182. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  1183. struct device *jrdev = ctx->jrdev;
  1184. int ret = 0;
  1185. if (keylen < 4)
  1186. return -EINVAL;
  1187. #ifdef DEBUG
  1188. print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
  1189. DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
  1190. #endif
  1191. memcpy(ctx->key, key, keylen);
  1192. /*
  1193. * The last four bytes of the key material are used as the salt value
  1194. * in the nonce. Update the AES key length.
  1195. */
  1196. ctx->enckeylen = keylen - 4;
  1197. ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->enckeylen,
  1198. DMA_TO_DEVICE);
  1199. if (dma_mapping_error(jrdev, ctx->key_dma)) {
  1200. dev_err(jrdev, "unable to map key i/o memory\n");
  1201. return -ENOMEM;
  1202. }
  1203. ret = rfc4543_set_sh_desc(aead);
  1204. if (ret) {
  1205. dma_unmap_single(jrdev, ctx->key_dma, ctx->enckeylen,
  1206. DMA_TO_DEVICE);
  1207. }
  1208. return ret;
  1209. }
  1210. static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
  1211. const u8 *key, unsigned int keylen)
  1212. {
  1213. struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
  1214. struct ablkcipher_tfm *crt = &ablkcipher->base.crt_ablkcipher;
  1215. struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablkcipher);
  1216. const char *alg_name = crypto_tfm_alg_name(tfm);
  1217. struct device *jrdev = ctx->jrdev;
  1218. int ret = 0;
  1219. u32 *key_jump_cmd;
  1220. u32 *desc;
  1221. u32 *nonce;
  1222. u32 geniv;
  1223. u32 ctx1_iv_off = 0;
  1224. const bool ctr_mode = ((ctx->class1_alg_type & OP_ALG_AAI_MASK) ==
  1225. OP_ALG_AAI_CTR_MOD128);
  1226. const bool is_rfc3686 = (ctr_mode &&
  1227. (strstr(alg_name, "rfc3686") != NULL));
  1228. #ifdef DEBUG
  1229. print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
  1230. DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
  1231. #endif
  1232. /*
  1233. * AES-CTR needs to load IV in CONTEXT1 reg
  1234. * at an offset of 128bits (16bytes)
  1235. * CONTEXT1[255:128] = IV
  1236. */
  1237. if (ctr_mode)
  1238. ctx1_iv_off = 16;
  1239. /*
  1240. * RFC3686 specific:
  1241. * | CONTEXT1[255:128] = {NONCE, IV, COUNTER}
  1242. * | *key = {KEY, NONCE}
  1243. */
  1244. if (is_rfc3686) {
  1245. ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
  1246. keylen -= CTR_RFC3686_NONCE_SIZE;
  1247. }
  1248. memcpy(ctx->key, key, keylen);
  1249. ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen,
  1250. DMA_TO_DEVICE);
  1251. if (dma_mapping_error(jrdev, ctx->key_dma)) {
  1252. dev_err(jrdev, "unable to map key i/o memory\n");
  1253. return -ENOMEM;
  1254. }
  1255. ctx->enckeylen = keylen;
  1256. /* ablkcipher_encrypt shared descriptor */
  1257. desc = ctx->sh_desc_enc;
  1258. init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
  1259. /* Skip if already shared */
  1260. key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
  1261. JUMP_COND_SHRD);
  1262. /* Load class1 key only */
  1263. append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
  1264. ctx->enckeylen, CLASS_1 |
  1265. KEY_DEST_CLASS_REG);
  1266. /* Load nonce into CONTEXT1 reg */
  1267. if (is_rfc3686) {
  1268. nonce = (u32 *)(key + keylen);
  1269. append_load_imm_u32(desc, *nonce, LDST_CLASS_IND_CCB |
  1270. LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
  1271. append_move(desc, MOVE_WAITCOMP |
  1272. MOVE_SRC_OUTFIFO |
  1273. MOVE_DEST_CLASS1CTX |
  1274. (16 << MOVE_OFFSET_SHIFT) |
  1275. (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
  1276. }
  1277. set_jump_tgt_here(desc, key_jump_cmd);
  1278. /* Load iv */
  1279. append_seq_load(desc, crt->ivsize, LDST_SRCDST_BYTE_CONTEXT |
  1280. LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT));
  1281. /* Load counter into CONTEXT1 reg */
  1282. if (is_rfc3686)
  1283. append_load_imm_u32(desc, be32_to_cpu(1), LDST_IMM |
  1284. LDST_CLASS_1_CCB |
  1285. LDST_SRCDST_BYTE_CONTEXT |
  1286. ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
  1287. LDST_OFFSET_SHIFT));
  1288. /* Load operation */
  1289. append_operation(desc, ctx->class1_alg_type |
  1290. OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
  1291. /* Perform operation */
  1292. ablkcipher_append_src_dst(desc);
  1293. ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
  1294. desc_bytes(desc),
  1295. DMA_TO_DEVICE);
  1296. if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
  1297. dev_err(jrdev, "unable to map shared descriptor\n");
  1298. return -ENOMEM;
  1299. }
  1300. #ifdef DEBUG
  1301. print_hex_dump(KERN_ERR,
  1302. "ablkcipher enc shdesc@"__stringify(__LINE__)": ",
  1303. DUMP_PREFIX_ADDRESS, 16, 4, desc,
  1304. desc_bytes(desc), 1);
  1305. #endif
  1306. /* ablkcipher_decrypt shared descriptor */
  1307. desc = ctx->sh_desc_dec;
  1308. init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
  1309. /* Skip if already shared */
  1310. key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
  1311. JUMP_COND_SHRD);
  1312. /* Load class1 key only */
  1313. append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
  1314. ctx->enckeylen, CLASS_1 |
  1315. KEY_DEST_CLASS_REG);
  1316. /* Load nonce into CONTEXT1 reg */
  1317. if (is_rfc3686) {
  1318. nonce = (u32 *)(key + keylen);
  1319. append_load_imm_u32(desc, *nonce, LDST_CLASS_IND_CCB |
  1320. LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
  1321. append_move(desc, MOVE_WAITCOMP |
  1322. MOVE_SRC_OUTFIFO |
  1323. MOVE_DEST_CLASS1CTX |
  1324. (16 << MOVE_OFFSET_SHIFT) |
  1325. (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
  1326. }
  1327. set_jump_tgt_here(desc, key_jump_cmd);
  1328. /* load IV */
  1329. append_seq_load(desc, crt->ivsize, LDST_SRCDST_BYTE_CONTEXT |
  1330. LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT));
  1331. /* Load counter into CONTEXT1 reg */
  1332. if (is_rfc3686)
  1333. append_load_imm_u32(desc, be32_to_cpu(1), LDST_IMM |
  1334. LDST_CLASS_1_CCB |
  1335. LDST_SRCDST_BYTE_CONTEXT |
  1336. ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
  1337. LDST_OFFSET_SHIFT));
  1338. /* Choose operation */
  1339. if (ctr_mode)
  1340. append_operation(desc, ctx->class1_alg_type |
  1341. OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT);
  1342. else
  1343. append_dec_op1(desc, ctx->class1_alg_type);
  1344. /* Perform operation */
  1345. ablkcipher_append_src_dst(desc);
  1346. ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
  1347. desc_bytes(desc),
  1348. DMA_TO_DEVICE);
  1349. if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
  1350. dev_err(jrdev, "unable to map shared descriptor\n");
  1351. return -ENOMEM;
  1352. }
  1353. #ifdef DEBUG
  1354. print_hex_dump(KERN_ERR,
  1355. "ablkcipher dec shdesc@"__stringify(__LINE__)": ",
  1356. DUMP_PREFIX_ADDRESS, 16, 4, desc,
  1357. desc_bytes(desc), 1);
  1358. #endif
  1359. /* ablkcipher_givencrypt shared descriptor */
  1360. desc = ctx->sh_desc_givenc;
  1361. init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
  1362. /* Skip if already shared */
  1363. key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
  1364. JUMP_COND_SHRD);
  1365. /* Load class1 key only */
  1366. append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
  1367. ctx->enckeylen, CLASS_1 |
  1368. KEY_DEST_CLASS_REG);
  1369. /* Load Nonce into CONTEXT1 reg */
  1370. if (is_rfc3686) {
  1371. nonce = (u32 *)(key + keylen);
  1372. append_load_imm_u32(desc, *nonce, LDST_CLASS_IND_CCB |
  1373. LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
  1374. append_move(desc, MOVE_WAITCOMP |
  1375. MOVE_SRC_OUTFIFO |
  1376. MOVE_DEST_CLASS1CTX |
  1377. (16 << MOVE_OFFSET_SHIFT) |
  1378. (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
  1379. }
  1380. set_jump_tgt_here(desc, key_jump_cmd);
  1381. /* Generate IV */
  1382. geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
  1383. NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 |
  1384. NFIFOENTRY_PTYPE_RND | (crt->ivsize << NFIFOENTRY_DLEN_SHIFT);
  1385. append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
  1386. LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
  1387. append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
  1388. append_move(desc, MOVE_WAITCOMP |
  1389. MOVE_SRC_INFIFO |
  1390. MOVE_DEST_CLASS1CTX |
  1391. (crt->ivsize << MOVE_LEN_SHIFT) |
  1392. (ctx1_iv_off << MOVE_OFFSET_SHIFT));
  1393. append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
  1394. /* Copy generated IV to memory */
  1395. append_seq_store(desc, crt->ivsize,
  1396. LDST_SRCDST_BYTE_CONTEXT | LDST_CLASS_1_CCB |
  1397. (ctx1_iv_off << LDST_OFFSET_SHIFT));
  1398. /* Load Counter into CONTEXT1 reg */
  1399. if (is_rfc3686)
  1400. append_load_imm_u32(desc, (u32)1, LDST_IMM |
  1401. LDST_CLASS_1_CCB |
  1402. LDST_SRCDST_BYTE_CONTEXT |
  1403. ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
  1404. LDST_OFFSET_SHIFT));
  1405. if (ctx1_iv_off)
  1406. append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | JUMP_COND_NCP |
  1407. (1 << JUMP_OFFSET_SHIFT));
  1408. /* Load operation */
  1409. append_operation(desc, ctx->class1_alg_type |
  1410. OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
  1411. /* Perform operation */
  1412. ablkcipher_append_src_dst(desc);
  1413. ctx->sh_desc_givenc_dma = dma_map_single(jrdev, desc,
  1414. desc_bytes(desc),
  1415. DMA_TO_DEVICE);
  1416. if (dma_mapping_error(jrdev, ctx->sh_desc_givenc_dma)) {
  1417. dev_err(jrdev, "unable to map shared descriptor\n");
  1418. return -ENOMEM;
  1419. }
  1420. #ifdef DEBUG
  1421. print_hex_dump(KERN_ERR,
  1422. "ablkcipher givenc shdesc@" __stringify(__LINE__) ": ",
  1423. DUMP_PREFIX_ADDRESS, 16, 4, desc,
  1424. desc_bytes(desc), 1);
  1425. #endif
  1426. return ret;
  1427. }
  1428. static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
  1429. const u8 *key, unsigned int keylen)
  1430. {
  1431. struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
  1432. struct device *jrdev = ctx->jrdev;
  1433. u32 *key_jump_cmd, *desc;
  1434. __be64 sector_size = cpu_to_be64(512);
  1435. if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) {
  1436. crypto_ablkcipher_set_flags(ablkcipher,
  1437. CRYPTO_TFM_RES_BAD_KEY_LEN);
  1438. dev_err(jrdev, "key size mismatch\n");
  1439. return -EINVAL;
  1440. }
  1441. memcpy(ctx->key, key, keylen);
  1442. ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen, DMA_TO_DEVICE);
  1443. if (dma_mapping_error(jrdev, ctx->key_dma)) {
  1444. dev_err(jrdev, "unable to map key i/o memory\n");
  1445. return -ENOMEM;
  1446. }
  1447. ctx->enckeylen = keylen;
  1448. /* xts_ablkcipher_encrypt shared descriptor */
  1449. desc = ctx->sh_desc_enc;
  1450. init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
  1451. /* Skip if already shared */
  1452. key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
  1453. JUMP_COND_SHRD);
  1454. /* Load class1 keys only */
  1455. append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
  1456. ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
  1457. /* Load sector size with index 40 bytes (0x28) */
  1458. append_cmd(desc, CMD_LOAD | IMMEDIATE | LDST_SRCDST_BYTE_CONTEXT |
  1459. LDST_CLASS_1_CCB | (0x28 << LDST_OFFSET_SHIFT) | 8);
  1460. append_data(desc, (void *)&sector_size, 8);
  1461. set_jump_tgt_here(desc, key_jump_cmd);
  1462. /*
  1463. * create sequence for loading the sector index
  1464. * Upper 8B of IV - will be used as sector index
  1465. * Lower 8B of IV - will be discarded
  1466. */
  1467. append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
  1468. LDST_CLASS_1_CCB | (0x20 << LDST_OFFSET_SHIFT) | 8);
  1469. append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
  1470. /* Load operation */
  1471. append_operation(desc, ctx->class1_alg_type | OP_ALG_AS_INITFINAL |
  1472. OP_ALG_ENCRYPT);
  1473. /* Perform operation */
  1474. ablkcipher_append_src_dst(desc);
  1475. ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
  1476. DMA_TO_DEVICE);
  1477. if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
  1478. dev_err(jrdev, "unable to map shared descriptor\n");
  1479. return -ENOMEM;
  1480. }
  1481. #ifdef DEBUG
  1482. print_hex_dump(KERN_ERR,
  1483. "xts ablkcipher enc shdesc@" __stringify(__LINE__) ": ",
  1484. DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
  1485. #endif
  1486. /* xts_ablkcipher_decrypt shared descriptor */
  1487. desc = ctx->sh_desc_dec;
  1488. init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
  1489. /* Skip if already shared */
  1490. key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
  1491. JUMP_COND_SHRD);
  1492. /* Load class1 key only */
  1493. append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
  1494. ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
  1495. /* Load sector size with index 40 bytes (0x28) */
  1496. append_cmd(desc, CMD_LOAD | IMMEDIATE | LDST_SRCDST_BYTE_CONTEXT |
  1497. LDST_CLASS_1_CCB | (0x28 << LDST_OFFSET_SHIFT) | 8);
  1498. append_data(desc, (void *)&sector_size, 8);
  1499. set_jump_tgt_here(desc, key_jump_cmd);
  1500. /*
  1501. * create sequence for loading the sector index
  1502. * Upper 8B of IV - will be used as sector index
  1503. * Lower 8B of IV - will be discarded
  1504. */
  1505. append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
  1506. LDST_CLASS_1_CCB | (0x20 << LDST_OFFSET_SHIFT) | 8);
  1507. append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
  1508. /* Load operation */
  1509. append_dec_op1(desc, ctx->class1_alg_type);
  1510. /* Perform operation */
  1511. ablkcipher_append_src_dst(desc);
  1512. ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
  1513. DMA_TO_DEVICE);
  1514. if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
  1515. dma_unmap_single(jrdev, ctx->sh_desc_enc_dma,
  1516. desc_bytes(ctx->sh_desc_enc), DMA_TO_DEVICE);
  1517. dev_err(jrdev, "unable to map shared descriptor\n");
  1518. return -ENOMEM;
  1519. }
  1520. #ifdef DEBUG
  1521. print_hex_dump(KERN_ERR,
  1522. "xts ablkcipher dec shdesc@" __stringify(__LINE__) ": ",
  1523. DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
  1524. #endif
  1525. return 0;
  1526. }
  1527. /*
  1528. * aead_edesc - s/w-extended aead descriptor
  1529. * @assoc_nents: number of segments in associated data (SPI+Seq) scatterlist
  1530. * @src_nents: number of segments in input scatterlist
  1531. * @dst_nents: number of segments in output scatterlist
  1532. * @iv_dma: dma address of iv for checking continuity and link table
  1533. * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
  1534. * @sec4_sg_bytes: length of dma mapped sec4_sg space
  1535. * @sec4_sg_dma: bus physical mapped address of h/w link table
  1536. * @hw_desc: the h/w job descriptor followed by any referenced link tables
  1537. */
  1538. struct aead_edesc {
  1539. int assoc_nents;
  1540. int src_nents;
  1541. int dst_nents;
  1542. dma_addr_t iv_dma;
  1543. int sec4_sg_bytes;
  1544. dma_addr_t sec4_sg_dma;
  1545. struct sec4_sg_entry *sec4_sg;
  1546. u32 hw_desc[];
  1547. };
  1548. /*
  1549. * ablkcipher_edesc - s/w-extended ablkcipher descriptor
  1550. * @src_nents: number of segments in input scatterlist
  1551. * @dst_nents: number of segments in output scatterlist
  1552. * @iv_dma: dma address of iv for checking continuity and link table
  1553. * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
  1554. * @sec4_sg_bytes: length of dma mapped sec4_sg space
  1555. * @sec4_sg_dma: bus physical mapped address of h/w link table
  1556. * @hw_desc: the h/w job descriptor followed by any referenced link tables
  1557. */
  1558. struct ablkcipher_edesc {
  1559. int src_nents;
  1560. int dst_nents;
  1561. dma_addr_t iv_dma;
  1562. int sec4_sg_bytes;
  1563. dma_addr_t sec4_sg_dma;
  1564. struct sec4_sg_entry *sec4_sg;
  1565. u32 hw_desc[0];
  1566. };
  1567. static void caam_unmap(struct device *dev, struct scatterlist *src,
  1568. struct scatterlist *dst, int src_nents,
  1569. int dst_nents,
  1570. dma_addr_t iv_dma, int ivsize, dma_addr_t sec4_sg_dma,
  1571. int sec4_sg_bytes)
  1572. {
  1573. if (dst != src) {
  1574. dma_unmap_sg(dev, src, src_nents ? : 1, DMA_TO_DEVICE);
  1575. dma_unmap_sg(dev, dst, dst_nents ? : 1, DMA_FROM_DEVICE);
  1576. } else {
  1577. dma_unmap_sg(dev, src, src_nents ? : 1, DMA_BIDIRECTIONAL);
  1578. }
  1579. if (iv_dma)
  1580. dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
  1581. if (sec4_sg_bytes)
  1582. dma_unmap_single(dev, sec4_sg_dma, sec4_sg_bytes,
  1583. DMA_TO_DEVICE);
  1584. }
  1585. static void aead_unmap(struct device *dev,
  1586. struct aead_edesc *edesc,
  1587. struct aead_request *req)
  1588. {
  1589. caam_unmap(dev, req->src, req->dst,
  1590. edesc->src_nents, edesc->dst_nents, 0, 0,
  1591. edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
  1592. }
  1593. static void ablkcipher_unmap(struct device *dev,
  1594. struct ablkcipher_edesc *edesc,
  1595. struct ablkcipher_request *req)
  1596. {
  1597. struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
  1598. int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
  1599. caam_unmap(dev, req->src, req->dst,
  1600. edesc->src_nents, edesc->dst_nents,
  1601. edesc->iv_dma, ivsize,
  1602. edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
  1603. }
  1604. static void aead_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
  1605. void *context)
  1606. {
  1607. struct aead_request *req = context;
  1608. struct aead_edesc *edesc;
  1609. #ifdef DEBUG
  1610. dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
  1611. #endif
  1612. edesc = container_of(desc, struct aead_edesc, hw_desc[0]);
  1613. if (err)
  1614. caam_jr_strstatus(jrdev, err);
  1615. aead_unmap(jrdev, edesc, req);
  1616. kfree(edesc);
  1617. aead_request_complete(req, err);
  1618. }
  1619. static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
  1620. void *context)
  1621. {
  1622. struct aead_request *req = context;
  1623. struct aead_edesc *edesc;
  1624. #ifdef DEBUG
  1625. dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
  1626. #endif
  1627. edesc = container_of(desc, struct aead_edesc, hw_desc[0]);
  1628. if (err)
  1629. caam_jr_strstatus(jrdev, err);
  1630. aead_unmap(jrdev, edesc, req);
  1631. /*
  1632. * verify hw auth check passed else return -EBADMSG
  1633. */
  1634. if ((err & JRSTA_CCBERR_ERRID_MASK) == JRSTA_CCBERR_ERRID_ICVCHK)
  1635. err = -EBADMSG;
  1636. kfree(edesc);
  1637. aead_request_complete(req, err);
  1638. }
  1639. static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
  1640. void *context)
  1641. {
  1642. struct ablkcipher_request *req = context;
  1643. struct ablkcipher_edesc *edesc;
  1644. #ifdef DEBUG
  1645. struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
  1646. int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
  1647. dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
  1648. #endif
  1649. edesc = (struct ablkcipher_edesc *)((char *)desc -
  1650. offsetof(struct ablkcipher_edesc, hw_desc));
  1651. if (err)
  1652. caam_jr_strstatus(jrdev, err);
  1653. #ifdef DEBUG
  1654. print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
  1655. DUMP_PREFIX_ADDRESS, 16, 4, req->info,
  1656. edesc->src_nents > 1 ? 100 : ivsize, 1);
  1657. print_hex_dump(KERN_ERR, "dst @"__stringify(__LINE__)": ",
  1658. DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
  1659. edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
  1660. #endif
  1661. ablkcipher_unmap(jrdev, edesc, req);
  1662. kfree(edesc);
  1663. ablkcipher_request_complete(req, err);
  1664. }
  1665. static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
  1666. void *context)
  1667. {
  1668. struct ablkcipher_request *req = context;
  1669. struct ablkcipher_edesc *edesc;
  1670. #ifdef DEBUG
  1671. struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
  1672. int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
  1673. dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
  1674. #endif
  1675. edesc = (struct ablkcipher_edesc *)((char *)desc -
  1676. offsetof(struct ablkcipher_edesc, hw_desc));
  1677. if (err)
  1678. caam_jr_strstatus(jrdev, err);
  1679. #ifdef DEBUG
  1680. print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
  1681. DUMP_PREFIX_ADDRESS, 16, 4, req->info,
  1682. ivsize, 1);
  1683. print_hex_dump(KERN_ERR, "dst @"__stringify(__LINE__)": ",
  1684. DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
  1685. edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
  1686. #endif
  1687. ablkcipher_unmap(jrdev, edesc, req);
  1688. kfree(edesc);
  1689. ablkcipher_request_complete(req, err);
  1690. }
  1691. /*
  1692. * Fill in aead job descriptor
  1693. */
  1694. static void init_aead_job(struct aead_request *req,
  1695. struct aead_edesc *edesc,
  1696. bool all_contig, bool encrypt)
  1697. {
  1698. struct crypto_aead *aead = crypto_aead_reqtfm(req);
  1699. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  1700. int authsize = ctx->authsize;
  1701. u32 *desc = edesc->hw_desc;
  1702. u32 out_options, in_options;
  1703. dma_addr_t dst_dma, src_dma;
  1704. int len, sec4_sg_index = 0;
  1705. dma_addr_t ptr;
  1706. u32 *sh_desc;
  1707. sh_desc = encrypt ? ctx->sh_desc_enc : ctx->sh_desc_dec;
  1708. ptr = encrypt ? ctx->sh_desc_enc_dma : ctx->sh_desc_dec_dma;
  1709. len = desc_len(sh_desc);
  1710. init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
  1711. if (all_contig) {
  1712. src_dma = sg_dma_address(req->src);
  1713. in_options = 0;
  1714. } else {
  1715. src_dma = edesc->sec4_sg_dma;
  1716. sec4_sg_index += edesc->src_nents;
  1717. in_options = LDST_SGF;
  1718. }
  1719. append_seq_in_ptr(desc, src_dma, req->assoclen + req->cryptlen,
  1720. in_options);
  1721. dst_dma = src_dma;
  1722. out_options = in_options;
  1723. if (unlikely(req->src != req->dst)) {
  1724. if (!edesc->dst_nents) {
  1725. dst_dma = sg_dma_address(req->dst);
  1726. out_options = 0;
  1727. } else {
  1728. dst_dma = edesc->sec4_sg_dma +
  1729. sec4_sg_index *
  1730. sizeof(struct sec4_sg_entry);
  1731. out_options = LDST_SGF;
  1732. }
  1733. }
  1734. if (encrypt)
  1735. append_seq_out_ptr(desc, dst_dma,
  1736. req->assoclen + req->cryptlen + authsize,
  1737. out_options);
  1738. else
  1739. append_seq_out_ptr(desc, dst_dma,
  1740. req->assoclen + req->cryptlen - authsize,
  1741. out_options);
  1742. /* REG3 = assoclen */
  1743. append_math_add_imm_u32(desc, REG3, ZERO, IMM, req->assoclen);
  1744. }
  1745. static void init_gcm_job(struct aead_request *req,
  1746. struct aead_edesc *edesc,
  1747. bool all_contig, bool encrypt)
  1748. {
  1749. struct crypto_aead *aead = crypto_aead_reqtfm(req);
  1750. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  1751. unsigned int ivsize = crypto_aead_ivsize(aead);
  1752. u32 *desc = edesc->hw_desc;
  1753. bool generic_gcm = (ivsize == 12);
  1754. unsigned int last;
  1755. init_aead_job(req, edesc, all_contig, encrypt);
  1756. /* BUG This should not be specific to generic GCM. */
  1757. last = 0;
  1758. if (encrypt && generic_gcm && !(req->assoclen + req->cryptlen))
  1759. last = FIFOLD_TYPE_LAST1;
  1760. /* Read GCM IV */
  1761. append_cmd(desc, CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 | IMMEDIATE |
  1762. FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 | 12 | last);
  1763. /* Append Salt */
  1764. if (!generic_gcm)
  1765. append_data(desc, ctx->key + ctx->enckeylen, 4);
  1766. /* Append IV */
  1767. append_data(desc, req->iv, ivsize);
  1768. /* End of blank commands */
  1769. }
  1770. static void init_authenc_job(struct aead_request *req,
  1771. struct aead_edesc *edesc,
  1772. bool all_contig, bool encrypt)
  1773. {
  1774. struct crypto_aead *aead = crypto_aead_reqtfm(req);
  1775. struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
  1776. struct caam_aead_alg, aead);
  1777. unsigned int ivsize = crypto_aead_ivsize(aead);
  1778. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  1779. const bool ctr_mode = ((ctx->class1_alg_type & OP_ALG_AAI_MASK) ==
  1780. OP_ALG_AAI_CTR_MOD128);
  1781. const bool is_rfc3686 = alg->caam.rfc3686;
  1782. u32 *desc = edesc->hw_desc;
  1783. u32 ivoffset = 0;
  1784. /*
  1785. * AES-CTR needs to load IV in CONTEXT1 reg
  1786. * at an offset of 128bits (16bytes)
  1787. * CONTEXT1[255:128] = IV
  1788. */
  1789. if (ctr_mode)
  1790. ivoffset = 16;
  1791. /*
  1792. * RFC3686 specific:
  1793. * CONTEXT1[255:128] = {NONCE, IV, COUNTER}
  1794. */
  1795. if (is_rfc3686)
  1796. ivoffset = 16 + CTR_RFC3686_NONCE_SIZE;
  1797. init_aead_job(req, edesc, all_contig, encrypt);
  1798. if (ivsize && ((is_rfc3686 && encrypt) || !alg->caam.geniv))
  1799. append_load_as_imm(desc, req->iv, ivsize,
  1800. LDST_CLASS_1_CCB |
  1801. LDST_SRCDST_BYTE_CONTEXT |
  1802. (ivoffset << LDST_OFFSET_SHIFT));
  1803. }
  1804. /*
  1805. * Fill in ablkcipher job descriptor
  1806. */
  1807. static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr,
  1808. struct ablkcipher_edesc *edesc,
  1809. struct ablkcipher_request *req,
  1810. bool iv_contig)
  1811. {
  1812. struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
  1813. int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
  1814. u32 *desc = edesc->hw_desc;
  1815. u32 out_options = 0, in_options;
  1816. dma_addr_t dst_dma, src_dma;
  1817. int len, sec4_sg_index = 0;
  1818. #ifdef DEBUG
  1819. print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ",
  1820. DUMP_PREFIX_ADDRESS, 16, 4, req->info,
  1821. ivsize, 1);
  1822. print_hex_dump(KERN_ERR, "src @"__stringify(__LINE__)": ",
  1823. DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
  1824. edesc->src_nents ? 100 : req->nbytes, 1);
  1825. #endif
  1826. len = desc_len(sh_desc);
  1827. init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
  1828. if (iv_contig) {
  1829. src_dma = edesc->iv_dma;
  1830. in_options = 0;
  1831. } else {
  1832. src_dma = edesc->sec4_sg_dma;
  1833. sec4_sg_index += edesc->src_nents + 1;
  1834. in_options = LDST_SGF;
  1835. }
  1836. append_seq_in_ptr(desc, src_dma, req->nbytes + ivsize, in_options);
  1837. if (likely(req->src == req->dst)) {
  1838. if (!edesc->src_nents && iv_contig) {
  1839. dst_dma = sg_dma_address(req->src);
  1840. } else {
  1841. dst_dma = edesc->sec4_sg_dma +
  1842. sizeof(struct sec4_sg_entry);
  1843. out_options = LDST_SGF;
  1844. }
  1845. } else {
  1846. if (!edesc->dst_nents) {
  1847. dst_dma = sg_dma_address(req->dst);
  1848. } else {
  1849. dst_dma = edesc->sec4_sg_dma +
  1850. sec4_sg_index * sizeof(struct sec4_sg_entry);
  1851. out_options = LDST_SGF;
  1852. }
  1853. }
  1854. append_seq_out_ptr(desc, dst_dma, req->nbytes, out_options);
  1855. }
  1856. /*
  1857. * Fill in ablkcipher givencrypt job descriptor
  1858. */
  1859. static void init_ablkcipher_giv_job(u32 *sh_desc, dma_addr_t ptr,
  1860. struct ablkcipher_edesc *edesc,
  1861. struct ablkcipher_request *req,
  1862. bool iv_contig)
  1863. {
  1864. struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
  1865. int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
  1866. u32 *desc = edesc->hw_desc;
  1867. u32 out_options, in_options;
  1868. dma_addr_t dst_dma, src_dma;
  1869. int len, sec4_sg_index = 0;
  1870. #ifdef DEBUG
  1871. print_hex_dump(KERN_ERR, "presciv@" __stringify(__LINE__) ": ",
  1872. DUMP_PREFIX_ADDRESS, 16, 4, req->info,
  1873. ivsize, 1);
  1874. print_hex_dump(KERN_ERR, "src @" __stringify(__LINE__) ": ",
  1875. DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
  1876. edesc->src_nents ? 100 : req->nbytes, 1);
  1877. #endif
  1878. len = desc_len(sh_desc);
  1879. init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
  1880. if (!edesc->src_nents) {
  1881. src_dma = sg_dma_address(req->src);
  1882. in_options = 0;
  1883. } else {
  1884. src_dma = edesc->sec4_sg_dma;
  1885. sec4_sg_index += edesc->src_nents;
  1886. in_options = LDST_SGF;
  1887. }
  1888. append_seq_in_ptr(desc, src_dma, req->nbytes, in_options);
  1889. if (iv_contig) {
  1890. dst_dma = edesc->iv_dma;
  1891. out_options = 0;
  1892. } else {
  1893. dst_dma = edesc->sec4_sg_dma +
  1894. sec4_sg_index * sizeof(struct sec4_sg_entry);
  1895. out_options = LDST_SGF;
  1896. }
  1897. append_seq_out_ptr(desc, dst_dma, req->nbytes + ivsize, out_options);
  1898. }
  1899. /*
  1900. * allocate and map the aead extended descriptor
  1901. */
  1902. static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
  1903. int desc_bytes, bool *all_contig_ptr,
  1904. bool encrypt)
  1905. {
  1906. struct crypto_aead *aead = crypto_aead_reqtfm(req);
  1907. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  1908. struct device *jrdev = ctx->jrdev;
  1909. gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
  1910. CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
  1911. int src_nents, dst_nents = 0;
  1912. struct aead_edesc *edesc;
  1913. int sgc;
  1914. bool all_contig = true;
  1915. int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
  1916. unsigned int authsize = ctx->authsize;
  1917. if (unlikely(req->dst != req->src)) {
  1918. src_nents = sg_count(req->src, req->assoclen + req->cryptlen);
  1919. dst_nents = sg_count(req->dst,
  1920. req->assoclen + req->cryptlen +
  1921. (encrypt ? authsize : (-authsize)));
  1922. } else {
  1923. src_nents = sg_count(req->src,
  1924. req->assoclen + req->cryptlen +
  1925. (encrypt ? authsize : 0));
  1926. }
  1927. /* Check if data are contiguous. */
  1928. all_contig = !src_nents;
  1929. if (!all_contig) {
  1930. src_nents = src_nents ? : 1;
  1931. sec4_sg_len = src_nents;
  1932. }
  1933. sec4_sg_len += dst_nents;
  1934. sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
  1935. /* allocate space for base edesc and hw desc commands, link tables */
  1936. edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes,
  1937. GFP_DMA | flags);
  1938. if (!edesc) {
  1939. dev_err(jrdev, "could not allocate extended descriptor\n");
  1940. return ERR_PTR(-ENOMEM);
  1941. }
  1942. if (likely(req->src == req->dst)) {
  1943. sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
  1944. DMA_BIDIRECTIONAL);
  1945. if (unlikely(!sgc)) {
  1946. dev_err(jrdev, "unable to map source\n");
  1947. kfree(edesc);
  1948. return ERR_PTR(-ENOMEM);
  1949. }
  1950. } else {
  1951. sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
  1952. DMA_TO_DEVICE);
  1953. if (unlikely(!sgc)) {
  1954. dev_err(jrdev, "unable to map source\n");
  1955. kfree(edesc);
  1956. return ERR_PTR(-ENOMEM);
  1957. }
  1958. sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1,
  1959. DMA_FROM_DEVICE);
  1960. if (unlikely(!sgc)) {
  1961. dev_err(jrdev, "unable to map destination\n");
  1962. dma_unmap_sg(jrdev, req->src, src_nents ? : 1,
  1963. DMA_TO_DEVICE);
  1964. kfree(edesc);
  1965. return ERR_PTR(-ENOMEM);
  1966. }
  1967. }
  1968. edesc->src_nents = src_nents;
  1969. edesc->dst_nents = dst_nents;
  1970. edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) +
  1971. desc_bytes;
  1972. *all_contig_ptr = all_contig;
  1973. sec4_sg_index = 0;
  1974. if (!all_contig) {
  1975. sg_to_sec4_sg_last(req->src, src_nents,
  1976. edesc->sec4_sg + sec4_sg_index, 0);
  1977. sec4_sg_index += src_nents;
  1978. }
  1979. if (dst_nents) {
  1980. sg_to_sec4_sg_last(req->dst, dst_nents,
  1981. edesc->sec4_sg + sec4_sg_index, 0);
  1982. }
  1983. if (!sec4_sg_bytes)
  1984. return edesc;
  1985. edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
  1986. sec4_sg_bytes, DMA_TO_DEVICE);
  1987. if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
  1988. dev_err(jrdev, "unable to map S/G table\n");
  1989. aead_unmap(jrdev, edesc, req);
  1990. kfree(edesc);
  1991. return ERR_PTR(-ENOMEM);
  1992. }
  1993. edesc->sec4_sg_bytes = sec4_sg_bytes;
  1994. return edesc;
  1995. }
  1996. static int gcm_encrypt(struct aead_request *req)
  1997. {
  1998. struct aead_edesc *edesc;
  1999. struct crypto_aead *aead = crypto_aead_reqtfm(req);
  2000. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  2001. struct device *jrdev = ctx->jrdev;
  2002. bool all_contig;
  2003. u32 *desc;
  2004. int ret = 0;
  2005. /* allocate extended descriptor */
  2006. edesc = aead_edesc_alloc(req, GCM_DESC_JOB_IO_LEN, &all_contig, true);
  2007. if (IS_ERR(edesc))
  2008. return PTR_ERR(edesc);
  2009. /* Create and submit job descriptor */
  2010. init_gcm_job(req, edesc, all_contig, true);
  2011. #ifdef DEBUG
  2012. print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
  2013. DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
  2014. desc_bytes(edesc->hw_desc), 1);
  2015. #endif
  2016. desc = edesc->hw_desc;
  2017. ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
  2018. if (!ret) {
  2019. ret = -EINPROGRESS;
  2020. } else {
  2021. aead_unmap(jrdev, edesc, req);
  2022. kfree(edesc);
  2023. }
  2024. return ret;
  2025. }
  2026. static int ipsec_gcm_encrypt(struct aead_request *req)
  2027. {
  2028. if (req->assoclen < 8)
  2029. return -EINVAL;
  2030. return gcm_encrypt(req);
  2031. }
  2032. static int aead_encrypt(struct aead_request *req)
  2033. {
  2034. struct aead_edesc *edesc;
  2035. struct crypto_aead *aead = crypto_aead_reqtfm(req);
  2036. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  2037. struct device *jrdev = ctx->jrdev;
  2038. bool all_contig;
  2039. u32 *desc;
  2040. int ret = 0;
  2041. /* allocate extended descriptor */
  2042. edesc = aead_edesc_alloc(req, AUTHENC_DESC_JOB_IO_LEN,
  2043. &all_contig, true);
  2044. if (IS_ERR(edesc))
  2045. return PTR_ERR(edesc);
  2046. /* Create and submit job descriptor */
  2047. init_authenc_job(req, edesc, all_contig, true);
  2048. #ifdef DEBUG
  2049. print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
  2050. DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
  2051. desc_bytes(edesc->hw_desc), 1);
  2052. #endif
  2053. desc = edesc->hw_desc;
  2054. ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
  2055. if (!ret) {
  2056. ret = -EINPROGRESS;
  2057. } else {
  2058. aead_unmap(jrdev, edesc, req);
  2059. kfree(edesc);
  2060. }
  2061. return ret;
  2062. }
  2063. static int gcm_decrypt(struct aead_request *req)
  2064. {
  2065. struct aead_edesc *edesc;
  2066. struct crypto_aead *aead = crypto_aead_reqtfm(req);
  2067. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  2068. struct device *jrdev = ctx->jrdev;
  2069. bool all_contig;
  2070. u32 *desc;
  2071. int ret = 0;
  2072. /* allocate extended descriptor */
  2073. edesc = aead_edesc_alloc(req, GCM_DESC_JOB_IO_LEN, &all_contig, false);
  2074. if (IS_ERR(edesc))
  2075. return PTR_ERR(edesc);
  2076. /* Create and submit job descriptor*/
  2077. init_gcm_job(req, edesc, all_contig, false);
  2078. #ifdef DEBUG
  2079. print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
  2080. DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
  2081. desc_bytes(edesc->hw_desc), 1);
  2082. #endif
  2083. desc = edesc->hw_desc;
  2084. ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req);
  2085. if (!ret) {
  2086. ret = -EINPROGRESS;
  2087. } else {
  2088. aead_unmap(jrdev, edesc, req);
  2089. kfree(edesc);
  2090. }
  2091. return ret;
  2092. }
  2093. static int ipsec_gcm_decrypt(struct aead_request *req)
  2094. {
  2095. if (req->assoclen < 8)
  2096. return -EINVAL;
  2097. return gcm_decrypt(req);
  2098. }
  2099. static int aead_decrypt(struct aead_request *req)
  2100. {
  2101. struct aead_edesc *edesc;
  2102. struct crypto_aead *aead = crypto_aead_reqtfm(req);
  2103. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  2104. struct device *jrdev = ctx->jrdev;
  2105. bool all_contig;
  2106. u32 *desc;
  2107. int ret = 0;
  2108. /* allocate extended descriptor */
  2109. edesc = aead_edesc_alloc(req, AUTHENC_DESC_JOB_IO_LEN,
  2110. &all_contig, false);
  2111. if (IS_ERR(edesc))
  2112. return PTR_ERR(edesc);
  2113. #ifdef DEBUG
  2114. print_hex_dump(KERN_ERR, "dec src@"__stringify(__LINE__)": ",
  2115. DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
  2116. req->assoclen + req->cryptlen, 1);
  2117. #endif
  2118. /* Create and submit job descriptor*/
  2119. init_authenc_job(req, edesc, all_contig, false);
  2120. #ifdef DEBUG
  2121. print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
  2122. DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
  2123. desc_bytes(edesc->hw_desc), 1);
  2124. #endif
  2125. desc = edesc->hw_desc;
  2126. ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req);
  2127. if (!ret) {
  2128. ret = -EINPROGRESS;
  2129. } else {
  2130. aead_unmap(jrdev, edesc, req);
  2131. kfree(edesc);
  2132. }
  2133. return ret;
  2134. }
  2135. /*
  2136. * allocate and map the ablkcipher extended descriptor for ablkcipher
  2137. */
  2138. static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
  2139. *req, int desc_bytes,
  2140. bool *iv_contig_out)
  2141. {
  2142. struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
  2143. struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
  2144. struct device *jrdev = ctx->jrdev;
  2145. gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
  2146. CRYPTO_TFM_REQ_MAY_SLEEP)) ?
  2147. GFP_KERNEL : GFP_ATOMIC;
  2148. int src_nents, dst_nents = 0, sec4_sg_bytes;
  2149. struct ablkcipher_edesc *edesc;
  2150. dma_addr_t iv_dma = 0;
  2151. bool iv_contig = false;
  2152. int sgc;
  2153. int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
  2154. int sec4_sg_index;
  2155. src_nents = sg_count(req->src, req->nbytes);
  2156. if (req->dst != req->src)
  2157. dst_nents = sg_count(req->dst, req->nbytes);
  2158. if (likely(req->src == req->dst)) {
  2159. sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
  2160. DMA_BIDIRECTIONAL);
  2161. } else {
  2162. sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
  2163. DMA_TO_DEVICE);
  2164. sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1,
  2165. DMA_FROM_DEVICE);
  2166. }
  2167. iv_dma = dma_map_single(jrdev, req->info, ivsize, DMA_TO_DEVICE);
  2168. if (dma_mapping_error(jrdev, iv_dma)) {
  2169. dev_err(jrdev, "unable to map IV\n");
  2170. return ERR_PTR(-ENOMEM);
  2171. }
  2172. /*
  2173. * Check if iv can be contiguous with source and destination.
  2174. * If so, include it. If not, create scatterlist.
  2175. */
  2176. if (!src_nents && iv_dma + ivsize == sg_dma_address(req->src))
  2177. iv_contig = true;
  2178. else
  2179. src_nents = src_nents ? : 1;
  2180. sec4_sg_bytes = ((iv_contig ? 0 : 1) + src_nents + dst_nents) *
  2181. sizeof(struct sec4_sg_entry);
  2182. /* allocate space for base edesc and hw desc commands, link tables */
  2183. edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes,
  2184. GFP_DMA | flags);
  2185. if (!edesc) {
  2186. dev_err(jrdev, "could not allocate extended descriptor\n");
  2187. return ERR_PTR(-ENOMEM);
  2188. }
  2189. edesc->src_nents = src_nents;
  2190. edesc->dst_nents = dst_nents;
  2191. edesc->sec4_sg_bytes = sec4_sg_bytes;
  2192. edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) +
  2193. desc_bytes;
  2194. sec4_sg_index = 0;
  2195. if (!iv_contig) {
  2196. dma_to_sec4_sg_one(edesc->sec4_sg, iv_dma, ivsize, 0);
  2197. sg_to_sec4_sg_last(req->src, src_nents,
  2198. edesc->sec4_sg + 1, 0);
  2199. sec4_sg_index += 1 + src_nents;
  2200. }
  2201. if (dst_nents) {
  2202. sg_to_sec4_sg_last(req->dst, dst_nents,
  2203. edesc->sec4_sg + sec4_sg_index, 0);
  2204. }
  2205. edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
  2206. sec4_sg_bytes, DMA_TO_DEVICE);
  2207. if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
  2208. dev_err(jrdev, "unable to map S/G table\n");
  2209. return ERR_PTR(-ENOMEM);
  2210. }
  2211. edesc->iv_dma = iv_dma;
  2212. #ifdef DEBUG
  2213. print_hex_dump(KERN_ERR, "ablkcipher sec4_sg@"__stringify(__LINE__)": ",
  2214. DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg,
  2215. sec4_sg_bytes, 1);
  2216. #endif
  2217. *iv_contig_out = iv_contig;
  2218. return edesc;
  2219. }
  2220. static int ablkcipher_encrypt(struct ablkcipher_request *req)
  2221. {
  2222. struct ablkcipher_edesc *edesc;
  2223. struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
  2224. struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
  2225. struct device *jrdev = ctx->jrdev;
  2226. bool iv_contig;
  2227. u32 *desc;
  2228. int ret = 0;
  2229. /* allocate extended descriptor */
  2230. edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN *
  2231. CAAM_CMD_SZ, &iv_contig);
  2232. if (IS_ERR(edesc))
  2233. return PTR_ERR(edesc);
  2234. /* Create and submit job descriptor*/
  2235. init_ablkcipher_job(ctx->sh_desc_enc,
  2236. ctx->sh_desc_enc_dma, edesc, req, iv_contig);
  2237. #ifdef DEBUG
  2238. print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"__stringify(__LINE__)": ",
  2239. DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
  2240. desc_bytes(edesc->hw_desc), 1);
  2241. #endif
  2242. desc = edesc->hw_desc;
  2243. ret = caam_jr_enqueue(jrdev, desc, ablkcipher_encrypt_done, req);
  2244. if (!ret) {
  2245. ret = -EINPROGRESS;
  2246. } else {
  2247. ablkcipher_unmap(jrdev, edesc, req);
  2248. kfree(edesc);
  2249. }
  2250. return ret;
  2251. }
  2252. static int ablkcipher_decrypt(struct ablkcipher_request *req)
  2253. {
  2254. struct ablkcipher_edesc *edesc;
  2255. struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
  2256. struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
  2257. struct device *jrdev = ctx->jrdev;
  2258. bool iv_contig;
  2259. u32 *desc;
  2260. int ret = 0;
  2261. /* allocate extended descriptor */
  2262. edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN *
  2263. CAAM_CMD_SZ, &iv_contig);
  2264. if (IS_ERR(edesc))
  2265. return PTR_ERR(edesc);
  2266. /* Create and submit job descriptor*/
  2267. init_ablkcipher_job(ctx->sh_desc_dec,
  2268. ctx->sh_desc_dec_dma, edesc, req, iv_contig);
  2269. desc = edesc->hw_desc;
  2270. #ifdef DEBUG
  2271. print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"__stringify(__LINE__)": ",
  2272. DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
  2273. desc_bytes(edesc->hw_desc), 1);
  2274. #endif
  2275. ret = caam_jr_enqueue(jrdev, desc, ablkcipher_decrypt_done, req);
  2276. if (!ret) {
  2277. ret = -EINPROGRESS;
  2278. } else {
  2279. ablkcipher_unmap(jrdev, edesc, req);
  2280. kfree(edesc);
  2281. }
  2282. return ret;
  2283. }
  2284. /*
  2285. * allocate and map the ablkcipher extended descriptor
  2286. * for ablkcipher givencrypt
  2287. */
  2288. static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
  2289. struct skcipher_givcrypt_request *greq,
  2290. int desc_bytes,
  2291. bool *iv_contig_out)
  2292. {
  2293. struct ablkcipher_request *req = &greq->creq;
  2294. struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
  2295. struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
  2296. struct device *jrdev = ctx->jrdev;
  2297. gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
  2298. CRYPTO_TFM_REQ_MAY_SLEEP)) ?
  2299. GFP_KERNEL : GFP_ATOMIC;
  2300. int src_nents, dst_nents = 0, sec4_sg_bytes;
  2301. struct ablkcipher_edesc *edesc;
  2302. dma_addr_t iv_dma = 0;
  2303. bool iv_contig = false;
  2304. int sgc;
  2305. int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
  2306. int sec4_sg_index;
  2307. src_nents = sg_count(req->src, req->nbytes);
  2308. if (unlikely(req->dst != req->src))
  2309. dst_nents = sg_count(req->dst, req->nbytes);
  2310. if (likely(req->src == req->dst)) {
  2311. sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
  2312. DMA_BIDIRECTIONAL);
  2313. } else {
  2314. sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
  2315. DMA_TO_DEVICE);
  2316. sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1,
  2317. DMA_FROM_DEVICE);
  2318. }
  2319. /*
  2320. * Check if iv can be contiguous with source and destination.
  2321. * If so, include it. If not, create scatterlist.
  2322. */
  2323. iv_dma = dma_map_single(jrdev, greq->giv, ivsize, DMA_TO_DEVICE);
  2324. if (dma_mapping_error(jrdev, iv_dma)) {
  2325. dev_err(jrdev, "unable to map IV\n");
  2326. return ERR_PTR(-ENOMEM);
  2327. }
  2328. if (!dst_nents && iv_dma + ivsize == sg_dma_address(req->dst))
  2329. iv_contig = true;
  2330. else
  2331. dst_nents = dst_nents ? : 1;
  2332. sec4_sg_bytes = ((iv_contig ? 0 : 1) + src_nents + dst_nents) *
  2333. sizeof(struct sec4_sg_entry);
  2334. /* allocate space for base edesc and hw desc commands, link tables */
  2335. edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes,
  2336. GFP_DMA | flags);
  2337. if (!edesc) {
  2338. dev_err(jrdev, "could not allocate extended descriptor\n");
  2339. return ERR_PTR(-ENOMEM);
  2340. }
  2341. edesc->src_nents = src_nents;
  2342. edesc->dst_nents = dst_nents;
  2343. edesc->sec4_sg_bytes = sec4_sg_bytes;
  2344. edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) +
  2345. desc_bytes;
  2346. sec4_sg_index = 0;
  2347. if (src_nents) {
  2348. sg_to_sec4_sg_last(req->src, src_nents, edesc->sec4_sg, 0);
  2349. sec4_sg_index += src_nents;
  2350. }
  2351. if (!iv_contig) {
  2352. dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
  2353. iv_dma, ivsize, 0);
  2354. sec4_sg_index += 1;
  2355. sg_to_sec4_sg_last(req->dst, dst_nents,
  2356. edesc->sec4_sg + sec4_sg_index, 0);
  2357. }
  2358. edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
  2359. sec4_sg_bytes, DMA_TO_DEVICE);
  2360. if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
  2361. dev_err(jrdev, "unable to map S/G table\n");
  2362. return ERR_PTR(-ENOMEM);
  2363. }
  2364. edesc->iv_dma = iv_dma;
  2365. #ifdef DEBUG
  2366. print_hex_dump(KERN_ERR,
  2367. "ablkcipher sec4_sg@" __stringify(__LINE__) ": ",
  2368. DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg,
  2369. sec4_sg_bytes, 1);
  2370. #endif
  2371. *iv_contig_out = iv_contig;
  2372. return edesc;
  2373. }
  2374. static int ablkcipher_givencrypt(struct skcipher_givcrypt_request *creq)
  2375. {
  2376. struct ablkcipher_request *req = &creq->creq;
  2377. struct ablkcipher_edesc *edesc;
  2378. struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
  2379. struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
  2380. struct device *jrdev = ctx->jrdev;
  2381. bool iv_contig;
  2382. u32 *desc;
  2383. int ret = 0;
  2384. /* allocate extended descriptor */
  2385. edesc = ablkcipher_giv_edesc_alloc(creq, DESC_JOB_IO_LEN *
  2386. CAAM_CMD_SZ, &iv_contig);
  2387. if (IS_ERR(edesc))
  2388. return PTR_ERR(edesc);
  2389. /* Create and submit job descriptor*/
  2390. init_ablkcipher_giv_job(ctx->sh_desc_givenc, ctx->sh_desc_givenc_dma,
  2391. edesc, req, iv_contig);
  2392. #ifdef DEBUG
  2393. print_hex_dump(KERN_ERR,
  2394. "ablkcipher jobdesc@" __stringify(__LINE__) ": ",
  2395. DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
  2396. desc_bytes(edesc->hw_desc), 1);
  2397. #endif
  2398. desc = edesc->hw_desc;
  2399. ret = caam_jr_enqueue(jrdev, desc, ablkcipher_encrypt_done, req);
  2400. if (!ret) {
  2401. ret = -EINPROGRESS;
  2402. } else {
  2403. ablkcipher_unmap(jrdev, edesc, req);
  2404. kfree(edesc);
  2405. }
  2406. return ret;
  2407. }
  2408. #define template_aead template_u.aead
  2409. #define template_ablkcipher template_u.ablkcipher
  2410. struct caam_alg_template {
  2411. char name[CRYPTO_MAX_ALG_NAME];
  2412. char driver_name[CRYPTO_MAX_ALG_NAME];
  2413. unsigned int blocksize;
  2414. u32 type;
  2415. union {
  2416. struct ablkcipher_alg ablkcipher;
  2417. } template_u;
  2418. u32 class1_alg_type;
  2419. u32 class2_alg_type;
  2420. u32 alg_op;
  2421. };
  2422. static struct caam_alg_template driver_algs[] = {
  2423. /* ablkcipher descriptor */
  2424. {
  2425. .name = "cbc(aes)",
  2426. .driver_name = "cbc-aes-caam",
  2427. .blocksize = AES_BLOCK_SIZE,
  2428. .type = CRYPTO_ALG_TYPE_GIVCIPHER,
  2429. .template_ablkcipher = {
  2430. .setkey = ablkcipher_setkey,
  2431. .encrypt = ablkcipher_encrypt,
  2432. .decrypt = ablkcipher_decrypt,
  2433. .givencrypt = ablkcipher_givencrypt,
  2434. .geniv = "<built-in>",
  2435. .min_keysize = AES_MIN_KEY_SIZE,
  2436. .max_keysize = AES_MAX_KEY_SIZE,
  2437. .ivsize = AES_BLOCK_SIZE,
  2438. },
  2439. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
  2440. },
  2441. {
  2442. .name = "cbc(des3_ede)",
  2443. .driver_name = "cbc-3des-caam",
  2444. .blocksize = DES3_EDE_BLOCK_SIZE,
  2445. .type = CRYPTO_ALG_TYPE_GIVCIPHER,
  2446. .template_ablkcipher = {
  2447. .setkey = ablkcipher_setkey,
  2448. .encrypt = ablkcipher_encrypt,
  2449. .decrypt = ablkcipher_decrypt,
  2450. .givencrypt = ablkcipher_givencrypt,
  2451. .geniv = "<built-in>",
  2452. .min_keysize = DES3_EDE_KEY_SIZE,
  2453. .max_keysize = DES3_EDE_KEY_SIZE,
  2454. .ivsize = DES3_EDE_BLOCK_SIZE,
  2455. },
  2456. .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
  2457. },
  2458. {
  2459. .name = "cbc(des)",
  2460. .driver_name = "cbc-des-caam",
  2461. .blocksize = DES_BLOCK_SIZE,
  2462. .type = CRYPTO_ALG_TYPE_GIVCIPHER,
  2463. .template_ablkcipher = {
  2464. .setkey = ablkcipher_setkey,
  2465. .encrypt = ablkcipher_encrypt,
  2466. .decrypt = ablkcipher_decrypt,
  2467. .givencrypt = ablkcipher_givencrypt,
  2468. .geniv = "<built-in>",
  2469. .min_keysize = DES_KEY_SIZE,
  2470. .max_keysize = DES_KEY_SIZE,
  2471. .ivsize = DES_BLOCK_SIZE,
  2472. },
  2473. .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
  2474. },
  2475. {
  2476. .name = "ctr(aes)",
  2477. .driver_name = "ctr-aes-caam",
  2478. .blocksize = 1,
  2479. .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
  2480. .template_ablkcipher = {
  2481. .setkey = ablkcipher_setkey,
  2482. .encrypt = ablkcipher_encrypt,
  2483. .decrypt = ablkcipher_decrypt,
  2484. .geniv = "chainiv",
  2485. .min_keysize = AES_MIN_KEY_SIZE,
  2486. .max_keysize = AES_MAX_KEY_SIZE,
  2487. .ivsize = AES_BLOCK_SIZE,
  2488. },
  2489. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
  2490. },
  2491. {
  2492. .name = "rfc3686(ctr(aes))",
  2493. .driver_name = "rfc3686-ctr-aes-caam",
  2494. .blocksize = 1,
  2495. .type = CRYPTO_ALG_TYPE_GIVCIPHER,
  2496. .template_ablkcipher = {
  2497. .setkey = ablkcipher_setkey,
  2498. .encrypt = ablkcipher_encrypt,
  2499. .decrypt = ablkcipher_decrypt,
  2500. .givencrypt = ablkcipher_givencrypt,
  2501. .geniv = "<built-in>",
  2502. .min_keysize = AES_MIN_KEY_SIZE +
  2503. CTR_RFC3686_NONCE_SIZE,
  2504. .max_keysize = AES_MAX_KEY_SIZE +
  2505. CTR_RFC3686_NONCE_SIZE,
  2506. .ivsize = CTR_RFC3686_IV_SIZE,
  2507. },
  2508. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
  2509. },
  2510. {
  2511. .name = "xts(aes)",
  2512. .driver_name = "xts-aes-caam",
  2513. .blocksize = AES_BLOCK_SIZE,
  2514. .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
  2515. .template_ablkcipher = {
  2516. .setkey = xts_ablkcipher_setkey,
  2517. .encrypt = ablkcipher_encrypt,
  2518. .decrypt = ablkcipher_decrypt,
  2519. .geniv = "eseqiv",
  2520. .min_keysize = 2 * AES_MIN_KEY_SIZE,
  2521. .max_keysize = 2 * AES_MAX_KEY_SIZE,
  2522. .ivsize = AES_BLOCK_SIZE,
  2523. },
  2524. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS,
  2525. },
  2526. };
  2527. static struct caam_aead_alg driver_aeads[] = {
  2528. {
  2529. .aead = {
  2530. .base = {
  2531. .cra_name = "rfc4106(gcm(aes))",
  2532. .cra_driver_name = "rfc4106-gcm-aes-caam",
  2533. .cra_blocksize = 1,
  2534. },
  2535. .setkey = rfc4106_setkey,
  2536. .setauthsize = rfc4106_setauthsize,
  2537. .encrypt = ipsec_gcm_encrypt,
  2538. .decrypt = ipsec_gcm_decrypt,
  2539. .ivsize = 8,
  2540. .maxauthsize = AES_BLOCK_SIZE,
  2541. },
  2542. .caam = {
  2543. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
  2544. },
  2545. },
  2546. {
  2547. .aead = {
  2548. .base = {
  2549. .cra_name = "rfc4543(gcm(aes))",
  2550. .cra_driver_name = "rfc4543-gcm-aes-caam",
  2551. .cra_blocksize = 1,
  2552. },
  2553. .setkey = rfc4543_setkey,
  2554. .setauthsize = rfc4543_setauthsize,
  2555. .encrypt = ipsec_gcm_encrypt,
  2556. .decrypt = ipsec_gcm_decrypt,
  2557. .ivsize = 8,
  2558. .maxauthsize = AES_BLOCK_SIZE,
  2559. },
  2560. .caam = {
  2561. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
  2562. },
  2563. },
  2564. /* Galois Counter Mode */
  2565. {
  2566. .aead = {
  2567. .base = {
  2568. .cra_name = "gcm(aes)",
  2569. .cra_driver_name = "gcm-aes-caam",
  2570. .cra_blocksize = 1,
  2571. },
  2572. .setkey = gcm_setkey,
  2573. .setauthsize = gcm_setauthsize,
  2574. .encrypt = gcm_encrypt,
  2575. .decrypt = gcm_decrypt,
  2576. .ivsize = 12,
  2577. .maxauthsize = AES_BLOCK_SIZE,
  2578. },
  2579. .caam = {
  2580. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
  2581. },
  2582. },
  2583. /* single-pass ipsec_esp descriptor */
  2584. {
  2585. .aead = {
  2586. .base = {
  2587. .cra_name = "authenc(hmac(md5),"
  2588. "ecb(cipher_null))",
  2589. .cra_driver_name = "authenc-hmac-md5-"
  2590. "ecb-cipher_null-caam",
  2591. .cra_blocksize = NULL_BLOCK_SIZE,
  2592. },
  2593. .setkey = aead_setkey,
  2594. .setauthsize = aead_setauthsize,
  2595. .encrypt = aead_encrypt,
  2596. .decrypt = aead_decrypt,
  2597. .ivsize = NULL_IV_SIZE,
  2598. .maxauthsize = MD5_DIGEST_SIZE,
  2599. },
  2600. .caam = {
  2601. .class2_alg_type = OP_ALG_ALGSEL_MD5 |
  2602. OP_ALG_AAI_HMAC_PRECOMP,
  2603. .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
  2604. },
  2605. },
  2606. {
  2607. .aead = {
  2608. .base = {
  2609. .cra_name = "authenc(hmac(sha1),"
  2610. "ecb(cipher_null))",
  2611. .cra_driver_name = "authenc-hmac-sha1-"
  2612. "ecb-cipher_null-caam",
  2613. .cra_blocksize = NULL_BLOCK_SIZE,
  2614. },
  2615. .setkey = aead_setkey,
  2616. .setauthsize = aead_setauthsize,
  2617. .encrypt = aead_encrypt,
  2618. .decrypt = aead_decrypt,
  2619. .ivsize = NULL_IV_SIZE,
  2620. .maxauthsize = SHA1_DIGEST_SIZE,
  2621. },
  2622. .caam = {
  2623. .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
  2624. OP_ALG_AAI_HMAC_PRECOMP,
  2625. .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
  2626. },
  2627. },
  2628. {
  2629. .aead = {
  2630. .base = {
  2631. .cra_name = "authenc(hmac(sha224),"
  2632. "ecb(cipher_null))",
  2633. .cra_driver_name = "authenc-hmac-sha224-"
  2634. "ecb-cipher_null-caam",
  2635. .cra_blocksize = NULL_BLOCK_SIZE,
  2636. },
  2637. .setkey = aead_setkey,
  2638. .setauthsize = aead_setauthsize,
  2639. .encrypt = aead_encrypt,
  2640. .decrypt = aead_decrypt,
  2641. .ivsize = NULL_IV_SIZE,
  2642. .maxauthsize = SHA224_DIGEST_SIZE,
  2643. },
  2644. .caam = {
  2645. .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
  2646. OP_ALG_AAI_HMAC_PRECOMP,
  2647. .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
  2648. },
  2649. },
  2650. {
  2651. .aead = {
  2652. .base = {
  2653. .cra_name = "authenc(hmac(sha256),"
  2654. "ecb(cipher_null))",
  2655. .cra_driver_name = "authenc-hmac-sha256-"
  2656. "ecb-cipher_null-caam",
  2657. .cra_blocksize = NULL_BLOCK_SIZE,
  2658. },
  2659. .setkey = aead_setkey,
  2660. .setauthsize = aead_setauthsize,
  2661. .encrypt = aead_encrypt,
  2662. .decrypt = aead_decrypt,
  2663. .ivsize = NULL_IV_SIZE,
  2664. .maxauthsize = SHA256_DIGEST_SIZE,
  2665. },
  2666. .caam = {
  2667. .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
  2668. OP_ALG_AAI_HMAC_PRECOMP,
  2669. .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
  2670. },
  2671. },
  2672. {
  2673. .aead = {
  2674. .base = {
  2675. .cra_name = "authenc(hmac(sha384),"
  2676. "ecb(cipher_null))",
  2677. .cra_driver_name = "authenc-hmac-sha384-"
  2678. "ecb-cipher_null-caam",
  2679. .cra_blocksize = NULL_BLOCK_SIZE,
  2680. },
  2681. .setkey = aead_setkey,
  2682. .setauthsize = aead_setauthsize,
  2683. .encrypt = aead_encrypt,
  2684. .decrypt = aead_decrypt,
  2685. .ivsize = NULL_IV_SIZE,
  2686. .maxauthsize = SHA384_DIGEST_SIZE,
  2687. },
  2688. .caam = {
  2689. .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
  2690. OP_ALG_AAI_HMAC_PRECOMP,
  2691. .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
  2692. },
  2693. },
  2694. {
  2695. .aead = {
  2696. .base = {
  2697. .cra_name = "authenc(hmac(sha512),"
  2698. "ecb(cipher_null))",
  2699. .cra_driver_name = "authenc-hmac-sha512-"
  2700. "ecb-cipher_null-caam",
  2701. .cra_blocksize = NULL_BLOCK_SIZE,
  2702. },
  2703. .setkey = aead_setkey,
  2704. .setauthsize = aead_setauthsize,
  2705. .encrypt = aead_encrypt,
  2706. .decrypt = aead_decrypt,
  2707. .ivsize = NULL_IV_SIZE,
  2708. .maxauthsize = SHA512_DIGEST_SIZE,
  2709. },
  2710. .caam = {
  2711. .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
  2712. OP_ALG_AAI_HMAC_PRECOMP,
  2713. .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
  2714. },
  2715. },
  2716. {
  2717. .aead = {
  2718. .base = {
  2719. .cra_name = "authenc(hmac(md5),cbc(aes))",
  2720. .cra_driver_name = "authenc-hmac-md5-"
  2721. "cbc-aes-caam",
  2722. .cra_blocksize = AES_BLOCK_SIZE,
  2723. },
  2724. .setkey = aead_setkey,
  2725. .setauthsize = aead_setauthsize,
  2726. .encrypt = aead_encrypt,
  2727. .decrypt = aead_decrypt,
  2728. .ivsize = AES_BLOCK_SIZE,
  2729. .maxauthsize = MD5_DIGEST_SIZE,
  2730. },
  2731. .caam = {
  2732. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
  2733. .class2_alg_type = OP_ALG_ALGSEL_MD5 |
  2734. OP_ALG_AAI_HMAC_PRECOMP,
  2735. .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
  2736. },
  2737. },
  2738. {
  2739. .aead = {
  2740. .base = {
  2741. .cra_name = "echainiv(authenc(hmac(md5),"
  2742. "cbc(aes)))",
  2743. .cra_driver_name = "echainiv-authenc-hmac-md5-"
  2744. "cbc-aes-caam",
  2745. .cra_blocksize = AES_BLOCK_SIZE,
  2746. },
  2747. .setkey = aead_setkey,
  2748. .setauthsize = aead_setauthsize,
  2749. .encrypt = aead_encrypt,
  2750. .decrypt = aead_decrypt,
  2751. .ivsize = AES_BLOCK_SIZE,
  2752. .maxauthsize = MD5_DIGEST_SIZE,
  2753. },
  2754. .caam = {
  2755. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
  2756. .class2_alg_type = OP_ALG_ALGSEL_MD5 |
  2757. OP_ALG_AAI_HMAC_PRECOMP,
  2758. .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
  2759. .geniv = true,
  2760. },
  2761. },
  2762. {
  2763. .aead = {
  2764. .base = {
  2765. .cra_name = "authenc(hmac(sha1),cbc(aes))",
  2766. .cra_driver_name = "authenc-hmac-sha1-"
  2767. "cbc-aes-caam",
  2768. .cra_blocksize = AES_BLOCK_SIZE,
  2769. },
  2770. .setkey = aead_setkey,
  2771. .setauthsize = aead_setauthsize,
  2772. .encrypt = aead_encrypt,
  2773. .decrypt = aead_decrypt,
  2774. .ivsize = AES_BLOCK_SIZE,
  2775. .maxauthsize = SHA1_DIGEST_SIZE,
  2776. },
  2777. .caam = {
  2778. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
  2779. .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
  2780. OP_ALG_AAI_HMAC_PRECOMP,
  2781. .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
  2782. },
  2783. },
  2784. {
  2785. .aead = {
  2786. .base = {
  2787. .cra_name = "echainiv(authenc(hmac(sha1),"
  2788. "cbc(aes)))",
  2789. .cra_driver_name = "echainiv-authenc-"
  2790. "hmac-sha1-cbc-aes-caam",
  2791. .cra_blocksize = AES_BLOCK_SIZE,
  2792. },
  2793. .setkey = aead_setkey,
  2794. .setauthsize = aead_setauthsize,
  2795. .encrypt = aead_encrypt,
  2796. .decrypt = aead_decrypt,
  2797. .ivsize = AES_BLOCK_SIZE,
  2798. .maxauthsize = SHA1_DIGEST_SIZE,
  2799. },
  2800. .caam = {
  2801. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
  2802. .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
  2803. OP_ALG_AAI_HMAC_PRECOMP,
  2804. .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
  2805. .geniv = true,
  2806. },
  2807. },
  2808. {
  2809. .aead = {
  2810. .base = {
  2811. .cra_name = "authenc(hmac(sha224),cbc(aes))",
  2812. .cra_driver_name = "authenc-hmac-sha224-"
  2813. "cbc-aes-caam",
  2814. .cra_blocksize = AES_BLOCK_SIZE,
  2815. },
  2816. .setkey = aead_setkey,
  2817. .setauthsize = aead_setauthsize,
  2818. .encrypt = aead_encrypt,
  2819. .decrypt = aead_decrypt,
  2820. .ivsize = AES_BLOCK_SIZE,
  2821. .maxauthsize = SHA224_DIGEST_SIZE,
  2822. },
  2823. .caam = {
  2824. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
  2825. .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
  2826. OP_ALG_AAI_HMAC_PRECOMP,
  2827. .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
  2828. },
  2829. },
  2830. {
  2831. .aead = {
  2832. .base = {
  2833. .cra_name = "echainiv(authenc(hmac(sha224),"
  2834. "cbc(aes)))",
  2835. .cra_driver_name = "echainiv-authenc-"
  2836. "hmac-sha224-cbc-aes-caam",
  2837. .cra_blocksize = AES_BLOCK_SIZE,
  2838. },
  2839. .setkey = aead_setkey,
  2840. .setauthsize = aead_setauthsize,
  2841. .encrypt = aead_encrypt,
  2842. .decrypt = aead_decrypt,
  2843. .ivsize = AES_BLOCK_SIZE,
  2844. .maxauthsize = SHA224_DIGEST_SIZE,
  2845. },
  2846. .caam = {
  2847. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
  2848. .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
  2849. OP_ALG_AAI_HMAC_PRECOMP,
  2850. .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
  2851. .geniv = true,
  2852. },
  2853. },
  2854. {
  2855. .aead = {
  2856. .base = {
  2857. .cra_name = "authenc(hmac(sha256),cbc(aes))",
  2858. .cra_driver_name = "authenc-hmac-sha256-"
  2859. "cbc-aes-caam",
  2860. .cra_blocksize = AES_BLOCK_SIZE,
  2861. },
  2862. .setkey = aead_setkey,
  2863. .setauthsize = aead_setauthsize,
  2864. .encrypt = aead_encrypt,
  2865. .decrypt = aead_decrypt,
  2866. .ivsize = AES_BLOCK_SIZE,
  2867. .maxauthsize = SHA256_DIGEST_SIZE,
  2868. },
  2869. .caam = {
  2870. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
  2871. .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
  2872. OP_ALG_AAI_HMAC_PRECOMP,
  2873. .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
  2874. },
  2875. },
  2876. {
  2877. .aead = {
  2878. .base = {
  2879. .cra_name = "echainiv(authenc(hmac(sha256),"
  2880. "cbc(aes)))",
  2881. .cra_driver_name = "echainiv-authenc-"
  2882. "hmac-sha256-cbc-aes-caam",
  2883. .cra_blocksize = AES_BLOCK_SIZE,
  2884. },
  2885. .setkey = aead_setkey,
  2886. .setauthsize = aead_setauthsize,
  2887. .encrypt = aead_encrypt,
  2888. .decrypt = aead_decrypt,
  2889. .ivsize = AES_BLOCK_SIZE,
  2890. .maxauthsize = SHA256_DIGEST_SIZE,
  2891. },
  2892. .caam = {
  2893. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
  2894. .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
  2895. OP_ALG_AAI_HMAC_PRECOMP,
  2896. .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
  2897. .geniv = true,
  2898. },
  2899. },
  2900. {
  2901. .aead = {
  2902. .base = {
  2903. .cra_name = "authenc(hmac(sha384),cbc(aes))",
  2904. .cra_driver_name = "authenc-hmac-sha384-"
  2905. "cbc-aes-caam",
  2906. .cra_blocksize = AES_BLOCK_SIZE,
  2907. },
  2908. .setkey = aead_setkey,
  2909. .setauthsize = aead_setauthsize,
  2910. .encrypt = aead_encrypt,
  2911. .decrypt = aead_decrypt,
  2912. .ivsize = AES_BLOCK_SIZE,
  2913. .maxauthsize = SHA384_DIGEST_SIZE,
  2914. },
  2915. .caam = {
  2916. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
  2917. .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
  2918. OP_ALG_AAI_HMAC_PRECOMP,
  2919. .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
  2920. },
  2921. },
  2922. {
  2923. .aead = {
  2924. .base = {
  2925. .cra_name = "echainiv(authenc(hmac(sha384),"
  2926. "cbc(aes)))",
  2927. .cra_driver_name = "echainiv-authenc-"
  2928. "hmac-sha384-cbc-aes-caam",
  2929. .cra_blocksize = AES_BLOCK_SIZE,
  2930. },
  2931. .setkey = aead_setkey,
  2932. .setauthsize = aead_setauthsize,
  2933. .encrypt = aead_encrypt,
  2934. .decrypt = aead_decrypt,
  2935. .ivsize = AES_BLOCK_SIZE,
  2936. .maxauthsize = SHA384_DIGEST_SIZE,
  2937. },
  2938. .caam = {
  2939. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
  2940. .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
  2941. OP_ALG_AAI_HMAC_PRECOMP,
  2942. .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
  2943. .geniv = true,
  2944. },
  2945. },
  2946. {
  2947. .aead = {
  2948. .base = {
  2949. .cra_name = "authenc(hmac(sha512),cbc(aes))",
  2950. .cra_driver_name = "authenc-hmac-sha512-"
  2951. "cbc-aes-caam",
  2952. .cra_blocksize = AES_BLOCK_SIZE,
  2953. },
  2954. .setkey = aead_setkey,
  2955. .setauthsize = aead_setauthsize,
  2956. .encrypt = aead_encrypt,
  2957. .decrypt = aead_decrypt,
  2958. .ivsize = AES_BLOCK_SIZE,
  2959. .maxauthsize = SHA512_DIGEST_SIZE,
  2960. },
  2961. .caam = {
  2962. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
  2963. .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
  2964. OP_ALG_AAI_HMAC_PRECOMP,
  2965. .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
  2966. },
  2967. },
  2968. {
  2969. .aead = {
  2970. .base = {
  2971. .cra_name = "echainiv(authenc(hmac(sha512),"
  2972. "cbc(aes)))",
  2973. .cra_driver_name = "echainiv-authenc-"
  2974. "hmac-sha512-cbc-aes-caam",
  2975. .cra_blocksize = AES_BLOCK_SIZE,
  2976. },
  2977. .setkey = aead_setkey,
  2978. .setauthsize = aead_setauthsize,
  2979. .encrypt = aead_encrypt,
  2980. .decrypt = aead_decrypt,
  2981. .ivsize = AES_BLOCK_SIZE,
  2982. .maxauthsize = SHA512_DIGEST_SIZE,
  2983. },
  2984. .caam = {
  2985. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
  2986. .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
  2987. OP_ALG_AAI_HMAC_PRECOMP,
  2988. .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
  2989. .geniv = true,
  2990. },
  2991. },
  2992. {
  2993. .aead = {
  2994. .base = {
  2995. .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
  2996. .cra_driver_name = "authenc-hmac-md5-"
  2997. "cbc-des3_ede-caam",
  2998. .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  2999. },
  3000. .setkey = aead_setkey,
  3001. .setauthsize = aead_setauthsize,
  3002. .encrypt = aead_encrypt,
  3003. .decrypt = aead_decrypt,
  3004. .ivsize = DES3_EDE_BLOCK_SIZE,
  3005. .maxauthsize = MD5_DIGEST_SIZE,
  3006. },
  3007. .caam = {
  3008. .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
  3009. .class2_alg_type = OP_ALG_ALGSEL_MD5 |
  3010. OP_ALG_AAI_HMAC_PRECOMP,
  3011. .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
  3012. }
  3013. },
  3014. {
  3015. .aead = {
  3016. .base = {
  3017. .cra_name = "echainiv(authenc(hmac(md5),"
  3018. "cbc(des3_ede)))",
  3019. .cra_driver_name = "echainiv-authenc-hmac-md5-"
  3020. "cbc-des3_ede-caam",
  3021. .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  3022. },
  3023. .setkey = aead_setkey,
  3024. .setauthsize = aead_setauthsize,
  3025. .encrypt = aead_encrypt,
  3026. .decrypt = aead_decrypt,
  3027. .ivsize = DES3_EDE_BLOCK_SIZE,
  3028. .maxauthsize = MD5_DIGEST_SIZE,
  3029. },
  3030. .caam = {
  3031. .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
  3032. .class2_alg_type = OP_ALG_ALGSEL_MD5 |
  3033. OP_ALG_AAI_HMAC_PRECOMP,
  3034. .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
  3035. .geniv = true,
  3036. }
  3037. },
  3038. {
  3039. .aead = {
  3040. .base = {
  3041. .cra_name = "authenc(hmac(sha1),"
  3042. "cbc(des3_ede))",
  3043. .cra_driver_name = "authenc-hmac-sha1-"
  3044. "cbc-des3_ede-caam",
  3045. .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  3046. },
  3047. .setkey = aead_setkey,
  3048. .setauthsize = aead_setauthsize,
  3049. .encrypt = aead_encrypt,
  3050. .decrypt = aead_decrypt,
  3051. .ivsize = DES3_EDE_BLOCK_SIZE,
  3052. .maxauthsize = SHA1_DIGEST_SIZE,
  3053. },
  3054. .caam = {
  3055. .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
  3056. .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
  3057. OP_ALG_AAI_HMAC_PRECOMP,
  3058. .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
  3059. },
  3060. },
  3061. {
  3062. .aead = {
  3063. .base = {
  3064. .cra_name = "echainiv(authenc(hmac(sha1),"
  3065. "cbc(des3_ede)))",
  3066. .cra_driver_name = "echainiv-authenc-"
  3067. "hmac-sha1-"
  3068. "cbc-des3_ede-caam",
  3069. .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  3070. },
  3071. .setkey = aead_setkey,
  3072. .setauthsize = aead_setauthsize,
  3073. .encrypt = aead_encrypt,
  3074. .decrypt = aead_decrypt,
  3075. .ivsize = DES3_EDE_BLOCK_SIZE,
  3076. .maxauthsize = SHA1_DIGEST_SIZE,
  3077. },
  3078. .caam = {
  3079. .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
  3080. .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
  3081. OP_ALG_AAI_HMAC_PRECOMP,
  3082. .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
  3083. .geniv = true,
  3084. },
  3085. },
  3086. {
  3087. .aead = {
  3088. .base = {
  3089. .cra_name = "authenc(hmac(sha224),"
  3090. "cbc(des3_ede))",
  3091. .cra_driver_name = "authenc-hmac-sha224-"
  3092. "cbc-des3_ede-caam",
  3093. .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  3094. },
  3095. .setkey = aead_setkey,
  3096. .setauthsize = aead_setauthsize,
  3097. .encrypt = aead_encrypt,
  3098. .decrypt = aead_decrypt,
  3099. .ivsize = DES3_EDE_BLOCK_SIZE,
  3100. .maxauthsize = SHA224_DIGEST_SIZE,
  3101. },
  3102. .caam = {
  3103. .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
  3104. .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
  3105. OP_ALG_AAI_HMAC_PRECOMP,
  3106. .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
  3107. },
  3108. },
  3109. {
  3110. .aead = {
  3111. .base = {
  3112. .cra_name = "echainiv(authenc(hmac(sha224),"
  3113. "cbc(des3_ede)))",
  3114. .cra_driver_name = "echainiv-authenc-"
  3115. "hmac-sha224-"
  3116. "cbc-des3_ede-caam",
  3117. .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  3118. },
  3119. .setkey = aead_setkey,
  3120. .setauthsize = aead_setauthsize,
  3121. .encrypt = aead_encrypt,
  3122. .decrypt = aead_decrypt,
  3123. .ivsize = DES3_EDE_BLOCK_SIZE,
  3124. .maxauthsize = SHA224_DIGEST_SIZE,
  3125. },
  3126. .caam = {
  3127. .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
  3128. .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
  3129. OP_ALG_AAI_HMAC_PRECOMP,
  3130. .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
  3131. .geniv = true,
  3132. },
  3133. },
  3134. {
  3135. .aead = {
  3136. .base = {
  3137. .cra_name = "authenc(hmac(sha256),"
  3138. "cbc(des3_ede))",
  3139. .cra_driver_name = "authenc-hmac-sha256-"
  3140. "cbc-des3_ede-caam",
  3141. .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  3142. },
  3143. .setkey = aead_setkey,
  3144. .setauthsize = aead_setauthsize,
  3145. .encrypt = aead_encrypt,
  3146. .decrypt = aead_decrypt,
  3147. .ivsize = DES3_EDE_BLOCK_SIZE,
  3148. .maxauthsize = SHA256_DIGEST_SIZE,
  3149. },
  3150. .caam = {
  3151. .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
  3152. .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
  3153. OP_ALG_AAI_HMAC_PRECOMP,
  3154. .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
  3155. },
  3156. },
  3157. {
  3158. .aead = {
  3159. .base = {
  3160. .cra_name = "echainiv(authenc(hmac(sha256),"
  3161. "cbc(des3_ede)))",
  3162. .cra_driver_name = "echainiv-authenc-"
  3163. "hmac-sha256-"
  3164. "cbc-des3_ede-caam",
  3165. .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  3166. },
  3167. .setkey = aead_setkey,
  3168. .setauthsize = aead_setauthsize,
  3169. .encrypt = aead_encrypt,
  3170. .decrypt = aead_decrypt,
  3171. .ivsize = DES3_EDE_BLOCK_SIZE,
  3172. .maxauthsize = SHA256_DIGEST_SIZE,
  3173. },
  3174. .caam = {
  3175. .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
  3176. .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
  3177. OP_ALG_AAI_HMAC_PRECOMP,
  3178. .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
  3179. .geniv = true,
  3180. },
  3181. },
  3182. {
  3183. .aead = {
  3184. .base = {
  3185. .cra_name = "authenc(hmac(sha384),"
  3186. "cbc(des3_ede))",
  3187. .cra_driver_name = "authenc-hmac-sha384-"
  3188. "cbc-des3_ede-caam",
  3189. .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  3190. },
  3191. .setkey = aead_setkey,
  3192. .setauthsize = aead_setauthsize,
  3193. .encrypt = aead_encrypt,
  3194. .decrypt = aead_decrypt,
  3195. .ivsize = DES3_EDE_BLOCK_SIZE,
  3196. .maxauthsize = SHA384_DIGEST_SIZE,
  3197. },
  3198. .caam = {
  3199. .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
  3200. .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
  3201. OP_ALG_AAI_HMAC_PRECOMP,
  3202. .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
  3203. },
  3204. },
  3205. {
  3206. .aead = {
  3207. .base = {
  3208. .cra_name = "echainiv(authenc(hmac(sha384),"
  3209. "cbc(des3_ede)))",
  3210. .cra_driver_name = "echainiv-authenc-"
  3211. "hmac-sha384-"
  3212. "cbc-des3_ede-caam",
  3213. .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  3214. },
  3215. .setkey = aead_setkey,
  3216. .setauthsize = aead_setauthsize,
  3217. .encrypt = aead_encrypt,
  3218. .decrypt = aead_decrypt,
  3219. .ivsize = DES3_EDE_BLOCK_SIZE,
  3220. .maxauthsize = SHA384_DIGEST_SIZE,
  3221. },
  3222. .caam = {
  3223. .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
  3224. .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
  3225. OP_ALG_AAI_HMAC_PRECOMP,
  3226. .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
  3227. .geniv = true,
  3228. },
  3229. },
  3230. {
  3231. .aead = {
  3232. .base = {
  3233. .cra_name = "authenc(hmac(sha512),"
  3234. "cbc(des3_ede))",
  3235. .cra_driver_name = "authenc-hmac-sha512-"
  3236. "cbc-des3_ede-caam",
  3237. .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  3238. },
  3239. .setkey = aead_setkey,
  3240. .setauthsize = aead_setauthsize,
  3241. .encrypt = aead_encrypt,
  3242. .decrypt = aead_decrypt,
  3243. .ivsize = DES3_EDE_BLOCK_SIZE,
  3244. .maxauthsize = SHA512_DIGEST_SIZE,
  3245. },
  3246. .caam = {
  3247. .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
  3248. .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
  3249. OP_ALG_AAI_HMAC_PRECOMP,
  3250. .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
  3251. },
  3252. },
  3253. {
  3254. .aead = {
  3255. .base = {
  3256. .cra_name = "echainiv(authenc(hmac(sha512),"
  3257. "cbc(des3_ede)))",
  3258. .cra_driver_name = "echainiv-authenc-"
  3259. "hmac-sha512-"
  3260. "cbc-des3_ede-caam",
  3261. .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  3262. },
  3263. .setkey = aead_setkey,
  3264. .setauthsize = aead_setauthsize,
  3265. .encrypt = aead_encrypt,
  3266. .decrypt = aead_decrypt,
  3267. .ivsize = DES3_EDE_BLOCK_SIZE,
  3268. .maxauthsize = SHA512_DIGEST_SIZE,
  3269. },
  3270. .caam = {
  3271. .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
  3272. .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
  3273. OP_ALG_AAI_HMAC_PRECOMP,
  3274. .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
  3275. .geniv = true,
  3276. },
  3277. },
  3278. {
  3279. .aead = {
  3280. .base = {
  3281. .cra_name = "authenc(hmac(md5),cbc(des))",
  3282. .cra_driver_name = "authenc-hmac-md5-"
  3283. "cbc-des-caam",
  3284. .cra_blocksize = DES_BLOCK_SIZE,
  3285. },
  3286. .setkey = aead_setkey,
  3287. .setauthsize = aead_setauthsize,
  3288. .encrypt = aead_encrypt,
  3289. .decrypt = aead_decrypt,
  3290. .ivsize = DES_BLOCK_SIZE,
  3291. .maxauthsize = MD5_DIGEST_SIZE,
  3292. },
  3293. .caam = {
  3294. .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
  3295. .class2_alg_type = OP_ALG_ALGSEL_MD5 |
  3296. OP_ALG_AAI_HMAC_PRECOMP,
  3297. .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
  3298. },
  3299. },
  3300. {
  3301. .aead = {
  3302. .base = {
  3303. .cra_name = "echainiv(authenc(hmac(md5),"
  3304. "cbc(des)))",
  3305. .cra_driver_name = "echainiv-authenc-hmac-md5-"
  3306. "cbc-des-caam",
  3307. .cra_blocksize = DES_BLOCK_SIZE,
  3308. },
  3309. .setkey = aead_setkey,
  3310. .setauthsize = aead_setauthsize,
  3311. .encrypt = aead_encrypt,
  3312. .decrypt = aead_decrypt,
  3313. .ivsize = DES_BLOCK_SIZE,
  3314. .maxauthsize = MD5_DIGEST_SIZE,
  3315. },
  3316. .caam = {
  3317. .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
  3318. .class2_alg_type = OP_ALG_ALGSEL_MD5 |
  3319. OP_ALG_AAI_HMAC_PRECOMP,
  3320. .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
  3321. .geniv = true,
  3322. },
  3323. },
  3324. {
  3325. .aead = {
  3326. .base = {
  3327. .cra_name = "authenc(hmac(sha1),cbc(des))",
  3328. .cra_driver_name = "authenc-hmac-sha1-"
  3329. "cbc-des-caam",
  3330. .cra_blocksize = DES_BLOCK_SIZE,
  3331. },
  3332. .setkey = aead_setkey,
  3333. .setauthsize = aead_setauthsize,
  3334. .encrypt = aead_encrypt,
  3335. .decrypt = aead_decrypt,
  3336. .ivsize = DES_BLOCK_SIZE,
  3337. .maxauthsize = SHA1_DIGEST_SIZE,
  3338. },
  3339. .caam = {
  3340. .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
  3341. .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
  3342. OP_ALG_AAI_HMAC_PRECOMP,
  3343. .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
  3344. },
  3345. },
  3346. {
  3347. .aead = {
  3348. .base = {
  3349. .cra_name = "echainiv(authenc(hmac(sha1),"
  3350. "cbc(des)))",
  3351. .cra_driver_name = "echainiv-authenc-"
  3352. "hmac-sha1-cbc-des-caam",
  3353. .cra_blocksize = DES_BLOCK_SIZE,
  3354. },
  3355. .setkey = aead_setkey,
  3356. .setauthsize = aead_setauthsize,
  3357. .encrypt = aead_encrypt,
  3358. .decrypt = aead_decrypt,
  3359. .ivsize = DES_BLOCK_SIZE,
  3360. .maxauthsize = SHA1_DIGEST_SIZE,
  3361. },
  3362. .caam = {
  3363. .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
  3364. .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
  3365. OP_ALG_AAI_HMAC_PRECOMP,
  3366. .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
  3367. .geniv = true,
  3368. },
  3369. },
  3370. {
  3371. .aead = {
  3372. .base = {
  3373. .cra_name = "authenc(hmac(sha224),cbc(des))",
  3374. .cra_driver_name = "authenc-hmac-sha224-"
  3375. "cbc-des-caam",
  3376. .cra_blocksize = DES_BLOCK_SIZE,
  3377. },
  3378. .setkey = aead_setkey,
  3379. .setauthsize = aead_setauthsize,
  3380. .encrypt = aead_encrypt,
  3381. .decrypt = aead_decrypt,
  3382. .ivsize = DES_BLOCK_SIZE,
  3383. .maxauthsize = SHA224_DIGEST_SIZE,
  3384. },
  3385. .caam = {
  3386. .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
  3387. .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
  3388. OP_ALG_AAI_HMAC_PRECOMP,
  3389. .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
  3390. },
  3391. },
  3392. {
  3393. .aead = {
  3394. .base = {
  3395. .cra_name = "echainiv(authenc(hmac(sha224),"
  3396. "cbc(des)))",
  3397. .cra_driver_name = "echainiv-authenc-"
  3398. "hmac-sha224-cbc-des-caam",
  3399. .cra_blocksize = DES_BLOCK_SIZE,
  3400. },
  3401. .setkey = aead_setkey,
  3402. .setauthsize = aead_setauthsize,
  3403. .encrypt = aead_encrypt,
  3404. .decrypt = aead_decrypt,
  3405. .ivsize = DES_BLOCK_SIZE,
  3406. .maxauthsize = SHA224_DIGEST_SIZE,
  3407. },
  3408. .caam = {
  3409. .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
  3410. .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
  3411. OP_ALG_AAI_HMAC_PRECOMP,
  3412. .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
  3413. .geniv = true,
  3414. },
  3415. },
  3416. {
  3417. .aead = {
  3418. .base = {
  3419. .cra_name = "authenc(hmac(sha256),cbc(des))",
  3420. .cra_driver_name = "authenc-hmac-sha256-"
  3421. "cbc-des-caam",
  3422. .cra_blocksize = DES_BLOCK_SIZE,
  3423. },
  3424. .setkey = aead_setkey,
  3425. .setauthsize = aead_setauthsize,
  3426. .encrypt = aead_encrypt,
  3427. .decrypt = aead_decrypt,
  3428. .ivsize = DES_BLOCK_SIZE,
  3429. .maxauthsize = SHA256_DIGEST_SIZE,
  3430. },
  3431. .caam = {
  3432. .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
  3433. .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
  3434. OP_ALG_AAI_HMAC_PRECOMP,
  3435. .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
  3436. },
  3437. },
  3438. {
  3439. .aead = {
  3440. .base = {
  3441. .cra_name = "echainiv(authenc(hmac(sha256),"
  3442. "cbc(des)))",
  3443. .cra_driver_name = "echainiv-authenc-"
  3444. "hmac-sha256-cbc-des-caam",
  3445. .cra_blocksize = DES_BLOCK_SIZE,
  3446. },
  3447. .setkey = aead_setkey,
  3448. .setauthsize = aead_setauthsize,
  3449. .encrypt = aead_encrypt,
  3450. .decrypt = aead_decrypt,
  3451. .ivsize = DES_BLOCK_SIZE,
  3452. .maxauthsize = SHA256_DIGEST_SIZE,
  3453. },
  3454. .caam = {
  3455. .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
  3456. .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
  3457. OP_ALG_AAI_HMAC_PRECOMP,
  3458. .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
  3459. .geniv = true,
  3460. },
  3461. },
  3462. {
  3463. .aead = {
  3464. .base = {
  3465. .cra_name = "authenc(hmac(sha384),cbc(des))",
  3466. .cra_driver_name = "authenc-hmac-sha384-"
  3467. "cbc-des-caam",
  3468. .cra_blocksize = DES_BLOCK_SIZE,
  3469. },
  3470. .setkey = aead_setkey,
  3471. .setauthsize = aead_setauthsize,
  3472. .encrypt = aead_encrypt,
  3473. .decrypt = aead_decrypt,
  3474. .ivsize = DES_BLOCK_SIZE,
  3475. .maxauthsize = SHA384_DIGEST_SIZE,
  3476. },
  3477. .caam = {
  3478. .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
  3479. .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
  3480. OP_ALG_AAI_HMAC_PRECOMP,
  3481. .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
  3482. },
  3483. },
  3484. {
  3485. .aead = {
  3486. .base = {
  3487. .cra_name = "echainiv(authenc(hmac(sha384),"
  3488. "cbc(des)))",
  3489. .cra_driver_name = "echainiv-authenc-"
  3490. "hmac-sha384-cbc-des-caam",
  3491. .cra_blocksize = DES_BLOCK_SIZE,
  3492. },
  3493. .setkey = aead_setkey,
  3494. .setauthsize = aead_setauthsize,
  3495. .encrypt = aead_encrypt,
  3496. .decrypt = aead_decrypt,
  3497. .ivsize = DES_BLOCK_SIZE,
  3498. .maxauthsize = SHA384_DIGEST_SIZE,
  3499. },
  3500. .caam = {
  3501. .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
  3502. .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
  3503. OP_ALG_AAI_HMAC_PRECOMP,
  3504. .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
  3505. .geniv = true,
  3506. },
  3507. },
  3508. {
  3509. .aead = {
  3510. .base = {
  3511. .cra_name = "authenc(hmac(sha512),cbc(des))",
  3512. .cra_driver_name = "authenc-hmac-sha512-"
  3513. "cbc-des-caam",
  3514. .cra_blocksize = DES_BLOCK_SIZE,
  3515. },
  3516. .setkey = aead_setkey,
  3517. .setauthsize = aead_setauthsize,
  3518. .encrypt = aead_encrypt,
  3519. .decrypt = aead_decrypt,
  3520. .ivsize = DES_BLOCK_SIZE,
  3521. .maxauthsize = SHA512_DIGEST_SIZE,
  3522. },
  3523. .caam = {
  3524. .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
  3525. .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
  3526. OP_ALG_AAI_HMAC_PRECOMP,
  3527. .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
  3528. },
  3529. },
  3530. {
  3531. .aead = {
  3532. .base = {
  3533. .cra_name = "echainiv(authenc(hmac(sha512),"
  3534. "cbc(des)))",
  3535. .cra_driver_name = "echainiv-authenc-"
  3536. "hmac-sha512-cbc-des-caam",
  3537. .cra_blocksize = DES_BLOCK_SIZE,
  3538. },
  3539. .setkey = aead_setkey,
  3540. .setauthsize = aead_setauthsize,
  3541. .encrypt = aead_encrypt,
  3542. .decrypt = aead_decrypt,
  3543. .ivsize = DES_BLOCK_SIZE,
  3544. .maxauthsize = SHA512_DIGEST_SIZE,
  3545. },
  3546. .caam = {
  3547. .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
  3548. .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
  3549. OP_ALG_AAI_HMAC_PRECOMP,
  3550. .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
  3551. .geniv = true,
  3552. },
  3553. },
  3554. {
  3555. .aead = {
  3556. .base = {
  3557. .cra_name = "authenc(hmac(md5),"
  3558. "rfc3686(ctr(aes)))",
  3559. .cra_driver_name = "authenc-hmac-md5-"
  3560. "rfc3686-ctr-aes-caam",
  3561. .cra_blocksize = 1,
  3562. },
  3563. .setkey = aead_setkey,
  3564. .setauthsize = aead_setauthsize,
  3565. .encrypt = aead_encrypt,
  3566. .decrypt = aead_decrypt,
  3567. .ivsize = CTR_RFC3686_IV_SIZE,
  3568. .maxauthsize = MD5_DIGEST_SIZE,
  3569. },
  3570. .caam = {
  3571. .class1_alg_type = OP_ALG_ALGSEL_AES |
  3572. OP_ALG_AAI_CTR_MOD128,
  3573. .class2_alg_type = OP_ALG_ALGSEL_MD5 |
  3574. OP_ALG_AAI_HMAC_PRECOMP,
  3575. .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
  3576. .rfc3686 = true,
  3577. },
  3578. },
  3579. {
  3580. .aead = {
  3581. .base = {
  3582. .cra_name = "seqiv(authenc("
  3583. "hmac(md5),rfc3686(ctr(aes))))",
  3584. .cra_driver_name = "seqiv-authenc-hmac-md5-"
  3585. "rfc3686-ctr-aes-caam",
  3586. .cra_blocksize = 1,
  3587. },
  3588. .setkey = aead_setkey,
  3589. .setauthsize = aead_setauthsize,
  3590. .encrypt = aead_encrypt,
  3591. .decrypt = aead_decrypt,
  3592. .ivsize = CTR_RFC3686_IV_SIZE,
  3593. .maxauthsize = MD5_DIGEST_SIZE,
  3594. },
  3595. .caam = {
  3596. .class1_alg_type = OP_ALG_ALGSEL_AES |
  3597. OP_ALG_AAI_CTR_MOD128,
  3598. .class2_alg_type = OP_ALG_ALGSEL_MD5 |
  3599. OP_ALG_AAI_HMAC_PRECOMP,
  3600. .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
  3601. .rfc3686 = true,
  3602. .geniv = true,
  3603. },
  3604. },
  3605. {
  3606. .aead = {
  3607. .base = {
  3608. .cra_name = "authenc(hmac(sha1),"
  3609. "rfc3686(ctr(aes)))",
  3610. .cra_driver_name = "authenc-hmac-sha1-"
  3611. "rfc3686-ctr-aes-caam",
  3612. .cra_blocksize = 1,
  3613. },
  3614. .setkey = aead_setkey,
  3615. .setauthsize = aead_setauthsize,
  3616. .encrypt = aead_encrypt,
  3617. .decrypt = aead_decrypt,
  3618. .ivsize = CTR_RFC3686_IV_SIZE,
  3619. .maxauthsize = SHA1_DIGEST_SIZE,
  3620. },
  3621. .caam = {
  3622. .class1_alg_type = OP_ALG_ALGSEL_AES |
  3623. OP_ALG_AAI_CTR_MOD128,
  3624. .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
  3625. OP_ALG_AAI_HMAC_PRECOMP,
  3626. .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
  3627. .rfc3686 = true,
  3628. },
  3629. },
  3630. {
  3631. .aead = {
  3632. .base = {
  3633. .cra_name = "seqiv(authenc("
  3634. "hmac(sha1),rfc3686(ctr(aes))))",
  3635. .cra_driver_name = "seqiv-authenc-hmac-sha1-"
  3636. "rfc3686-ctr-aes-caam",
  3637. .cra_blocksize = 1,
  3638. },
  3639. .setkey = aead_setkey,
  3640. .setauthsize = aead_setauthsize,
  3641. .encrypt = aead_encrypt,
  3642. .decrypt = aead_decrypt,
  3643. .ivsize = CTR_RFC3686_IV_SIZE,
  3644. .maxauthsize = SHA1_DIGEST_SIZE,
  3645. },
  3646. .caam = {
  3647. .class1_alg_type = OP_ALG_ALGSEL_AES |
  3648. OP_ALG_AAI_CTR_MOD128,
  3649. .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
  3650. OP_ALG_AAI_HMAC_PRECOMP,
  3651. .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
  3652. .rfc3686 = true,
  3653. .geniv = true,
  3654. },
  3655. },
  3656. {
  3657. .aead = {
  3658. .base = {
  3659. .cra_name = "authenc(hmac(sha224),"
  3660. "rfc3686(ctr(aes)))",
  3661. .cra_driver_name = "authenc-hmac-sha224-"
  3662. "rfc3686-ctr-aes-caam",
  3663. .cra_blocksize = 1,
  3664. },
  3665. .setkey = aead_setkey,
  3666. .setauthsize = aead_setauthsize,
  3667. .encrypt = aead_encrypt,
  3668. .decrypt = aead_decrypt,
  3669. .ivsize = CTR_RFC3686_IV_SIZE,
  3670. .maxauthsize = SHA224_DIGEST_SIZE,
  3671. },
  3672. .caam = {
  3673. .class1_alg_type = OP_ALG_ALGSEL_AES |
  3674. OP_ALG_AAI_CTR_MOD128,
  3675. .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
  3676. OP_ALG_AAI_HMAC_PRECOMP,
  3677. .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
  3678. .rfc3686 = true,
  3679. },
  3680. },
  3681. {
  3682. .aead = {
  3683. .base = {
  3684. .cra_name = "seqiv(authenc("
  3685. "hmac(sha224),rfc3686(ctr(aes))))",
  3686. .cra_driver_name = "seqiv-authenc-hmac-sha224-"
  3687. "rfc3686-ctr-aes-caam",
  3688. .cra_blocksize = 1,
  3689. },
  3690. .setkey = aead_setkey,
  3691. .setauthsize = aead_setauthsize,
  3692. .encrypt = aead_encrypt,
  3693. .decrypt = aead_decrypt,
  3694. .ivsize = CTR_RFC3686_IV_SIZE,
  3695. .maxauthsize = SHA224_DIGEST_SIZE,
  3696. },
  3697. .caam = {
  3698. .class1_alg_type = OP_ALG_ALGSEL_AES |
  3699. OP_ALG_AAI_CTR_MOD128,
  3700. .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
  3701. OP_ALG_AAI_HMAC_PRECOMP,
  3702. .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
  3703. .rfc3686 = true,
  3704. .geniv = true,
  3705. },
  3706. },
  3707. {
  3708. .aead = {
  3709. .base = {
  3710. .cra_name = "authenc(hmac(sha256),"
  3711. "rfc3686(ctr(aes)))",
  3712. .cra_driver_name = "authenc-hmac-sha256-"
  3713. "rfc3686-ctr-aes-caam",
  3714. .cra_blocksize = 1,
  3715. },
  3716. .setkey = aead_setkey,
  3717. .setauthsize = aead_setauthsize,
  3718. .encrypt = aead_encrypt,
  3719. .decrypt = aead_decrypt,
  3720. .ivsize = CTR_RFC3686_IV_SIZE,
  3721. .maxauthsize = SHA256_DIGEST_SIZE,
  3722. },
  3723. .caam = {
  3724. .class1_alg_type = OP_ALG_ALGSEL_AES |
  3725. OP_ALG_AAI_CTR_MOD128,
  3726. .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
  3727. OP_ALG_AAI_HMAC_PRECOMP,
  3728. .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
  3729. .rfc3686 = true,
  3730. },
  3731. },
  3732. {
  3733. .aead = {
  3734. .base = {
  3735. .cra_name = "seqiv(authenc(hmac(sha256),"
  3736. "rfc3686(ctr(aes))))",
  3737. .cra_driver_name = "seqiv-authenc-hmac-sha256-"
  3738. "rfc3686-ctr-aes-caam",
  3739. .cra_blocksize = 1,
  3740. },
  3741. .setkey = aead_setkey,
  3742. .setauthsize = aead_setauthsize,
  3743. .encrypt = aead_encrypt,
  3744. .decrypt = aead_decrypt,
  3745. .ivsize = CTR_RFC3686_IV_SIZE,
  3746. .maxauthsize = SHA256_DIGEST_SIZE,
  3747. },
  3748. .caam = {
  3749. .class1_alg_type = OP_ALG_ALGSEL_AES |
  3750. OP_ALG_AAI_CTR_MOD128,
  3751. .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
  3752. OP_ALG_AAI_HMAC_PRECOMP,
  3753. .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
  3754. .rfc3686 = true,
  3755. .geniv = true,
  3756. },
  3757. },
  3758. {
  3759. .aead = {
  3760. .base = {
  3761. .cra_name = "authenc(hmac(sha384),"
  3762. "rfc3686(ctr(aes)))",
  3763. .cra_driver_name = "authenc-hmac-sha384-"
  3764. "rfc3686-ctr-aes-caam",
  3765. .cra_blocksize = 1,
  3766. },
  3767. .setkey = aead_setkey,
  3768. .setauthsize = aead_setauthsize,
  3769. .encrypt = aead_encrypt,
  3770. .decrypt = aead_decrypt,
  3771. .ivsize = CTR_RFC3686_IV_SIZE,
  3772. .maxauthsize = SHA384_DIGEST_SIZE,
  3773. },
  3774. .caam = {
  3775. .class1_alg_type = OP_ALG_ALGSEL_AES |
  3776. OP_ALG_AAI_CTR_MOD128,
  3777. .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
  3778. OP_ALG_AAI_HMAC_PRECOMP,
  3779. .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
  3780. .rfc3686 = true,
  3781. },
  3782. },
  3783. {
  3784. .aead = {
  3785. .base = {
  3786. .cra_name = "seqiv(authenc(hmac(sha384),"
  3787. "rfc3686(ctr(aes))))",
  3788. .cra_driver_name = "seqiv-authenc-hmac-sha384-"
  3789. "rfc3686-ctr-aes-caam",
  3790. .cra_blocksize = 1,
  3791. },
  3792. .setkey = aead_setkey,
  3793. .setauthsize = aead_setauthsize,
  3794. .encrypt = aead_encrypt,
  3795. .decrypt = aead_decrypt,
  3796. .ivsize = CTR_RFC3686_IV_SIZE,
  3797. .maxauthsize = SHA384_DIGEST_SIZE,
  3798. },
  3799. .caam = {
  3800. .class1_alg_type = OP_ALG_ALGSEL_AES |
  3801. OP_ALG_AAI_CTR_MOD128,
  3802. .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
  3803. OP_ALG_AAI_HMAC_PRECOMP,
  3804. .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
  3805. .rfc3686 = true,
  3806. .geniv = true,
  3807. },
  3808. },
  3809. {
  3810. .aead = {
  3811. .base = {
  3812. .cra_name = "authenc(hmac(sha512),"
  3813. "rfc3686(ctr(aes)))",
  3814. .cra_driver_name = "authenc-hmac-sha512-"
  3815. "rfc3686-ctr-aes-caam",
  3816. .cra_blocksize = 1,
  3817. },
  3818. .setkey = aead_setkey,
  3819. .setauthsize = aead_setauthsize,
  3820. .encrypt = aead_encrypt,
  3821. .decrypt = aead_decrypt,
  3822. .ivsize = CTR_RFC3686_IV_SIZE,
  3823. .maxauthsize = SHA512_DIGEST_SIZE,
  3824. },
  3825. .caam = {
  3826. .class1_alg_type = OP_ALG_ALGSEL_AES |
  3827. OP_ALG_AAI_CTR_MOD128,
  3828. .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
  3829. OP_ALG_AAI_HMAC_PRECOMP,
  3830. .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
  3831. .rfc3686 = true,
  3832. },
  3833. },
  3834. {
  3835. .aead = {
  3836. .base = {
  3837. .cra_name = "seqiv(authenc(hmac(sha512),"
  3838. "rfc3686(ctr(aes))))",
  3839. .cra_driver_name = "seqiv-authenc-hmac-sha512-"
  3840. "rfc3686-ctr-aes-caam",
  3841. .cra_blocksize = 1,
  3842. },
  3843. .setkey = aead_setkey,
  3844. .setauthsize = aead_setauthsize,
  3845. .encrypt = aead_encrypt,
  3846. .decrypt = aead_decrypt,
  3847. .ivsize = CTR_RFC3686_IV_SIZE,
  3848. .maxauthsize = SHA512_DIGEST_SIZE,
  3849. },
  3850. .caam = {
  3851. .class1_alg_type = OP_ALG_ALGSEL_AES |
  3852. OP_ALG_AAI_CTR_MOD128,
  3853. .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
  3854. OP_ALG_AAI_HMAC_PRECOMP,
  3855. .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
  3856. .rfc3686 = true,
  3857. .geniv = true,
  3858. },
  3859. },
  3860. };
  3861. struct caam_crypto_alg {
  3862. struct crypto_alg crypto_alg;
  3863. struct list_head entry;
  3864. struct caam_alg_entry caam;
  3865. };
  3866. static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam)
  3867. {
  3868. ctx->jrdev = caam_jr_alloc();
  3869. if (IS_ERR(ctx->jrdev)) {
  3870. pr_err("Job Ring Device allocation for transform failed\n");
  3871. return PTR_ERR(ctx->jrdev);
  3872. }
  3873. /* copy descriptor header template value */
  3874. ctx->class1_alg_type = OP_TYPE_CLASS1_ALG | caam->class1_alg_type;
  3875. ctx->class2_alg_type = OP_TYPE_CLASS2_ALG | caam->class2_alg_type;
  3876. ctx->alg_op = OP_TYPE_CLASS2_ALG | caam->alg_op;
  3877. return 0;
  3878. }
  3879. static int caam_cra_init(struct crypto_tfm *tfm)
  3880. {
  3881. struct crypto_alg *alg = tfm->__crt_alg;
  3882. struct caam_crypto_alg *caam_alg =
  3883. container_of(alg, struct caam_crypto_alg, crypto_alg);
  3884. struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
  3885. return caam_init_common(ctx, &caam_alg->caam);
  3886. }
  3887. static int caam_aead_init(struct crypto_aead *tfm)
  3888. {
  3889. struct aead_alg *alg = crypto_aead_alg(tfm);
  3890. struct caam_aead_alg *caam_alg =
  3891. container_of(alg, struct caam_aead_alg, aead);
  3892. struct caam_ctx *ctx = crypto_aead_ctx(tfm);
  3893. return caam_init_common(ctx, &caam_alg->caam);
  3894. }
  3895. static void caam_exit_common(struct caam_ctx *ctx)
  3896. {
  3897. if (ctx->sh_desc_enc_dma &&
  3898. !dma_mapping_error(ctx->jrdev, ctx->sh_desc_enc_dma))
  3899. dma_unmap_single(ctx->jrdev, ctx->sh_desc_enc_dma,
  3900. desc_bytes(ctx->sh_desc_enc), DMA_TO_DEVICE);
  3901. if (ctx->sh_desc_dec_dma &&
  3902. !dma_mapping_error(ctx->jrdev, ctx->sh_desc_dec_dma))
  3903. dma_unmap_single(ctx->jrdev, ctx->sh_desc_dec_dma,
  3904. desc_bytes(ctx->sh_desc_dec), DMA_TO_DEVICE);
  3905. if (ctx->sh_desc_givenc_dma &&
  3906. !dma_mapping_error(ctx->jrdev, ctx->sh_desc_givenc_dma))
  3907. dma_unmap_single(ctx->jrdev, ctx->sh_desc_givenc_dma,
  3908. desc_bytes(ctx->sh_desc_givenc),
  3909. DMA_TO_DEVICE);
  3910. if (ctx->key_dma &&
  3911. !dma_mapping_error(ctx->jrdev, ctx->key_dma))
  3912. dma_unmap_single(ctx->jrdev, ctx->key_dma,
  3913. ctx->enckeylen + ctx->split_key_pad_len,
  3914. DMA_TO_DEVICE);
  3915. caam_jr_free(ctx->jrdev);
  3916. }
  3917. static void caam_cra_exit(struct crypto_tfm *tfm)
  3918. {
  3919. caam_exit_common(crypto_tfm_ctx(tfm));
  3920. }
  3921. static void caam_aead_exit(struct crypto_aead *tfm)
  3922. {
  3923. caam_exit_common(crypto_aead_ctx(tfm));
  3924. }
  3925. static void __exit caam_algapi_exit(void)
  3926. {
  3927. struct caam_crypto_alg *t_alg, *n;
  3928. int i;
  3929. for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
  3930. struct caam_aead_alg *t_alg = driver_aeads + i;
  3931. if (t_alg->registered)
  3932. crypto_unregister_aead(&t_alg->aead);
  3933. }
  3934. if (!alg_list.next)
  3935. return;
  3936. list_for_each_entry_safe(t_alg, n, &alg_list, entry) {
  3937. crypto_unregister_alg(&t_alg->crypto_alg);
  3938. list_del(&t_alg->entry);
  3939. kfree(t_alg);
  3940. }
  3941. }
  3942. static struct caam_crypto_alg *caam_alg_alloc(struct caam_alg_template
  3943. *template)
  3944. {
  3945. struct caam_crypto_alg *t_alg;
  3946. struct crypto_alg *alg;
  3947. t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
  3948. if (!t_alg) {
  3949. pr_err("failed to allocate t_alg\n");
  3950. return ERR_PTR(-ENOMEM);
  3951. }
  3952. alg = &t_alg->crypto_alg;
  3953. snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name);
  3954. snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
  3955. template->driver_name);
  3956. alg->cra_module = THIS_MODULE;
  3957. alg->cra_init = caam_cra_init;
  3958. alg->cra_exit = caam_cra_exit;
  3959. alg->cra_priority = CAAM_CRA_PRIORITY;
  3960. alg->cra_blocksize = template->blocksize;
  3961. alg->cra_alignmask = 0;
  3962. alg->cra_ctxsize = sizeof(struct caam_ctx);
  3963. alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
  3964. template->type;
  3965. switch (template->type) {
  3966. case CRYPTO_ALG_TYPE_GIVCIPHER:
  3967. alg->cra_type = &crypto_givcipher_type;
  3968. alg->cra_ablkcipher = template->template_ablkcipher;
  3969. break;
  3970. case CRYPTO_ALG_TYPE_ABLKCIPHER:
  3971. alg->cra_type = &crypto_ablkcipher_type;
  3972. alg->cra_ablkcipher = template->template_ablkcipher;
  3973. break;
  3974. }
  3975. t_alg->caam.class1_alg_type = template->class1_alg_type;
  3976. t_alg->caam.class2_alg_type = template->class2_alg_type;
  3977. t_alg->caam.alg_op = template->alg_op;
  3978. return t_alg;
  3979. }
  3980. static void caam_aead_alg_init(struct caam_aead_alg *t_alg)
  3981. {
  3982. struct aead_alg *alg = &t_alg->aead;
  3983. alg->base.cra_module = THIS_MODULE;
  3984. alg->base.cra_priority = CAAM_CRA_PRIORITY;
  3985. alg->base.cra_ctxsize = sizeof(struct caam_ctx);
  3986. alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
  3987. alg->init = caam_aead_init;
  3988. alg->exit = caam_aead_exit;
  3989. }
  3990. static int __init caam_algapi_init(void)
  3991. {
  3992. struct device_node *dev_node;
  3993. struct platform_device *pdev;
  3994. struct device *ctrldev;
  3995. struct caam_drv_private *priv;
  3996. int i = 0, err = 0;
  3997. u32 cha_vid, cha_inst, des_inst, aes_inst, md_inst;
  3998. unsigned int md_limit = SHA512_DIGEST_SIZE;
  3999. bool registered = false;
  4000. dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
  4001. if (!dev_node) {
  4002. dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
  4003. if (!dev_node)
  4004. return -ENODEV;
  4005. }
  4006. pdev = of_find_device_by_node(dev_node);
  4007. if (!pdev) {
  4008. of_node_put(dev_node);
  4009. return -ENODEV;
  4010. }
  4011. ctrldev = &pdev->dev;
  4012. priv = dev_get_drvdata(ctrldev);
  4013. of_node_put(dev_node);
  4014. /*
  4015. * If priv is NULL, it's probably because the caam driver wasn't
  4016. * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
  4017. */
  4018. if (!priv)
  4019. return -ENODEV;
  4020. INIT_LIST_HEAD(&alg_list);
  4021. /*
  4022. * Register crypto algorithms the device supports.
  4023. * First, detect presence and attributes of DES, AES, and MD blocks.
  4024. */
  4025. cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls);
  4026. cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
  4027. des_inst = (cha_inst & CHA_ID_LS_DES_MASK) >> CHA_ID_LS_DES_SHIFT;
  4028. aes_inst = (cha_inst & CHA_ID_LS_AES_MASK) >> CHA_ID_LS_AES_SHIFT;
  4029. md_inst = (cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
  4030. /* If MD is present, limit digest size based on LP256 */
  4031. if (md_inst && ((cha_vid & CHA_ID_LS_MD_MASK) == CHA_ID_LS_MD_LP256))
  4032. md_limit = SHA256_DIGEST_SIZE;
  4033. for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
  4034. struct caam_crypto_alg *t_alg;
  4035. struct caam_alg_template *alg = driver_algs + i;
  4036. u32 alg_sel = alg->class1_alg_type & OP_ALG_ALGSEL_MASK;
  4037. /* Skip DES algorithms if not supported by device */
  4038. if (!des_inst &&
  4039. ((alg_sel == OP_ALG_ALGSEL_3DES) ||
  4040. (alg_sel == OP_ALG_ALGSEL_DES)))
  4041. continue;
  4042. /* Skip AES algorithms if not supported by device */
  4043. if (!aes_inst && (alg_sel == OP_ALG_ALGSEL_AES))
  4044. continue;
  4045. /*
  4046. * Check support for AES modes not available
  4047. * on LP devices.
  4048. */
  4049. if ((cha_vid & CHA_ID_LS_AES_MASK) == CHA_ID_LS_AES_LP)
  4050. if ((alg->class1_alg_type & OP_ALG_AAI_MASK) ==
  4051. OP_ALG_AAI_XTS)
  4052. continue;
  4053. t_alg = caam_alg_alloc(alg);
  4054. if (IS_ERR(t_alg)) {
  4055. err = PTR_ERR(t_alg);
  4056. pr_warn("%s alg allocation failed\n", alg->driver_name);
  4057. continue;
  4058. }
  4059. err = crypto_register_alg(&t_alg->crypto_alg);
  4060. if (err) {
  4061. pr_warn("%s alg registration failed\n",
  4062. t_alg->crypto_alg.cra_driver_name);
  4063. kfree(t_alg);
  4064. continue;
  4065. }
  4066. list_add_tail(&t_alg->entry, &alg_list);
  4067. registered = true;
  4068. }
  4069. for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
  4070. struct caam_aead_alg *t_alg = driver_aeads + i;
  4071. u32 c1_alg_sel = t_alg->caam.class1_alg_type &
  4072. OP_ALG_ALGSEL_MASK;
  4073. u32 c2_alg_sel = t_alg->caam.class2_alg_type &
  4074. OP_ALG_ALGSEL_MASK;
  4075. u32 alg_aai = t_alg->caam.class1_alg_type & OP_ALG_AAI_MASK;
  4076. /* Skip DES algorithms if not supported by device */
  4077. if (!des_inst &&
  4078. ((c1_alg_sel == OP_ALG_ALGSEL_3DES) ||
  4079. (c1_alg_sel == OP_ALG_ALGSEL_DES)))
  4080. continue;
  4081. /* Skip AES algorithms if not supported by device */
  4082. if (!aes_inst && (c1_alg_sel == OP_ALG_ALGSEL_AES))
  4083. continue;
  4084. /*
  4085. * Check support for AES algorithms not available
  4086. * on LP devices.
  4087. */
  4088. if ((cha_vid & CHA_ID_LS_AES_MASK) == CHA_ID_LS_AES_LP)
  4089. if (alg_aai == OP_ALG_AAI_GCM)
  4090. continue;
  4091. /*
  4092. * Skip algorithms requiring message digests
  4093. * if MD or MD size is not supported by device.
  4094. */
  4095. if (c2_alg_sel &&
  4096. (!md_inst || (t_alg->aead.maxauthsize > md_limit)))
  4097. continue;
  4098. caam_aead_alg_init(t_alg);
  4099. err = crypto_register_aead(&t_alg->aead);
  4100. if (err) {
  4101. pr_warn("%s alg registration failed\n",
  4102. t_alg->aead.base.cra_driver_name);
  4103. continue;
  4104. }
  4105. t_alg->registered = true;
  4106. registered = true;
  4107. }
  4108. if (registered)
  4109. pr_info("caam algorithms registered in /proc/crypto\n");
  4110. return err;
  4111. }
  4112. module_init(caam_algapi_init);
  4113. module_exit(caam_algapi_exit);
  4114. MODULE_LICENSE("GPL");
  4115. MODULE_DESCRIPTION("FSL CAAM support for crypto API");
  4116. MODULE_AUTHOR("Freescale Semiconductor - NMG/STC");