perfmon.c 168 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487448844894490449144924493449444954496449744984499450045014502450345044505450645074508450945104511451245134514451545164517451845194520452145224523452445254526452745284529453045314532453345344535453645374538453945404541454245434544454545464547454845494550455145524553455445554556455745584559456045614562456345644565456645674568456945704571457245734574457545764577457845794580458145824583458445854586458745884589459045914592459345944595459645974598459946004601460246034604460546064607460846094610461146124613461446154616461746184619462046214622462346244625462646274628462946304631463246334634463546364637463846394640464146424643464446454646464746484649465046514652465346544655465646574658465946604661466246634664466546664667466846694670467146724673467446754676467746784679468046814682468346844685468646874688468946904691469246934694469546964697469846994700470147024703470447054706470747084709471047114712471347144715471647174718471947204721472247234724472547264727472847294730473147324733473447354736473747384739474047414742474347444745474647474748474947504751475247534754475547564757475847594760476147624763476447654766476747684769477047714772477347744775477647774778477947804781478247834784478547864787478847894790479147924793479447954796479747984799480048014802480348044805480648074808480948104811481248134814481548164817481848194820482148224823482448254826482748284829483048314832483348344835483648374838483948404841484248434844484548464847484848494850485148524853485448554856485748584859486048614862486348644865486648674868486948704871487248734874487548764877487848794880488148824883488448854886488748884889489048914892489348944895489648974898489949004901490249034904490549064907490849094910491149124913491449154916491749184919492049214922492349244925492649274928492949304931493249334934493549364937493849394940494149424943494449454946494749484949495049514952495349544955495649574958495949604961496249634964496549664967496849694970497149724973497449754976497749784979498049814982498349844985498649874988498949904991499249934994499549964997499849995000500150025003500450055006500750085009501050115012501350145015501650175018501950205021502250235024502550265027502850295030503150325033503450355036503750385039504050415042504350445045504650475048504950505051505250535054505550565057505850595060506150625063506450655066506750685069507050715072507350745075507650775078507950805081508250835084508550865087508850895090509150925093509450955096509750985099510051015102510351045105510651075108510951105111511251135114511551165117511851195120512151225123512451255126512751285129513051315132513351345135513651375138513951405141514251435144514551465147514851495150515151525153515451555156515751585159516051615162516351645165516651675168516951705171517251735174517551765177517851795180518151825183518451855186518751885189519051915192519351945195519651975198519952005201520252035204520552065207520852095210521152125213521452155216521752185219522052215222522352245225522652275228522952305231523252335234523552365237523852395240524152425243524452455246524752485249525052515252525352545255525652575258525952605261526252635264526552665267526852695270527152725273527452755276527752785279528052815282528352845285528652875288528952905291529252935294529552965297529852995300530153025303530453055306530753085309531053115312531353145315531653175318531953205321532253235324532553265327532853295330533153325333533453355336533753385339534053415342534353445345534653475348534953505351535253535354535553565357535853595360536153625363536453655366536753685369537053715372537353745375537653775378537953805381538253835384538553865387538853895390539153925393539453955396539753985399540054015402540354045405540654075408540954105411541254135414541554165417541854195420542154225423542454255426542754285429543054315432543354345435543654375438543954405441544254435444544554465447544854495450545154525453545454555456545754585459546054615462546354645465546654675468546954705471547254735474547554765477547854795480548154825483548454855486548754885489549054915492549354945495549654975498549955005501550255035504550555065507550855095510551155125513551455155516551755185519552055215522552355245525552655275528552955305531553255335534553555365537553855395540554155425543554455455546554755485549555055515552555355545555555655575558555955605561556255635564556555665567556855695570557155725573557455755576557755785579558055815582558355845585558655875588558955905591559255935594559555965597559855995600560156025603560456055606560756085609561056115612561356145615561656175618561956205621562256235624562556265627562856295630563156325633563456355636563756385639564056415642564356445645564656475648564956505651565256535654565556565657565856595660566156625663566456655666566756685669567056715672567356745675567656775678567956805681568256835684568556865687568856895690569156925693569456955696569756985699570057015702570357045705570657075708570957105711571257135714571557165717571857195720572157225723572457255726572757285729573057315732573357345735573657375738573957405741574257435744574557465747574857495750575157525753575457555756575757585759576057615762576357645765576657675768576957705771577257735774577557765777577857795780578157825783578457855786578757885789579057915792579357945795579657975798579958005801580258035804580558065807580858095810581158125813581458155816581758185819582058215822582358245825582658275828582958305831583258335834583558365837583858395840584158425843584458455846584758485849585058515852585358545855585658575858585958605861586258635864586558665867586858695870587158725873587458755876587758785879588058815882588358845885588658875888588958905891589258935894589558965897589858995900590159025903590459055906590759085909591059115912591359145915591659175918591959205921592259235924592559265927592859295930593159325933593459355936593759385939594059415942594359445945594659475948594959505951595259535954595559565957595859595960596159625963596459655966596759685969597059715972597359745975597659775978597959805981598259835984598559865987598859895990599159925993599459955996599759985999600060016002600360046005600660076008600960106011601260136014601560166017601860196020602160226023602460256026602760286029603060316032603360346035603660376038603960406041604260436044604560466047604860496050605160526053605460556056605760586059606060616062606360646065606660676068606960706071607260736074607560766077607860796080608160826083608460856086608760886089609060916092609360946095609660976098609961006101610261036104610561066107610861096110611161126113611461156116611761186119612061216122612361246125612661276128612961306131613261336134613561366137613861396140614161426143614461456146614761486149615061516152615361546155615661576158615961606161616261636164616561666167616861696170617161726173617461756176617761786179618061816182618361846185618661876188618961906191619261936194619561966197619861996200620162026203620462056206620762086209621062116212621362146215621662176218621962206221622262236224622562266227622862296230623162326233623462356236623762386239624062416242624362446245624662476248624962506251625262536254625562566257625862596260626162626263626462656266626762686269627062716272627362746275627662776278627962806281628262836284628562866287628862896290629162926293629462956296629762986299630063016302630363046305630663076308630963106311631263136314631563166317631863196320632163226323632463256326632763286329633063316332633363346335633663376338633963406341634263436344634563466347634863496350635163526353635463556356635763586359636063616362636363646365636663676368636963706371637263736374637563766377637863796380638163826383638463856386638763886389639063916392639363946395639663976398639964006401640264036404640564066407640864096410641164126413641464156416641764186419642064216422642364246425642664276428642964306431643264336434643564366437643864396440644164426443644464456446644764486449645064516452645364546455645664576458645964606461646264636464646564666467646864696470647164726473647464756476647764786479648064816482648364846485648664876488648964906491649264936494649564966497649864996500650165026503650465056506650765086509651065116512651365146515651665176518651965206521652265236524652565266527652865296530653165326533653465356536653765386539654065416542654365446545654665476548654965506551655265536554655565566557655865596560656165626563656465656566656765686569657065716572657365746575657665776578657965806581658265836584658565866587658865896590659165926593659465956596659765986599660066016602660366046605660666076608660966106611661266136614661566166617661866196620662166226623662466256626662766286629663066316632663366346635663666376638663966406641664266436644664566466647664866496650665166526653665466556656665766586659666066616662666366646665666666676668666966706671667266736674667566766677667866796680668166826683668466856686668766886689669066916692669366946695669666976698669967006701670267036704670567066707670867096710671167126713671467156716671767186719672067216722672367246725672667276728672967306731673267336734673567366737673867396740674167426743674467456746674767486749675067516752675367546755675667576758675967606761676267636764676567666767676867696770677167726773677467756776677767786779678067816782
  1. /*
  2. * This file implements the perfmon-2 subsystem which is used
  3. * to program the IA-64 Performance Monitoring Unit (PMU).
  4. *
  5. * The initial version of perfmon.c was written by
  6. * Ganesh Venkitachalam, IBM Corp.
  7. *
  8. * Then it was modified for perfmon-1.x by Stephane Eranian and
  9. * David Mosberger, Hewlett Packard Co.
  10. *
  11. * Version Perfmon-2.x is a rewrite of perfmon-1.x
  12. * by Stephane Eranian, Hewlett Packard Co.
  13. *
  14. * Copyright (C) 1999-2005 Hewlett Packard Co
  15. * Stephane Eranian <eranian@hpl.hp.com>
  16. * David Mosberger-Tang <davidm@hpl.hp.com>
  17. *
  18. * More information about perfmon available at:
  19. * http://www.hpl.hp.com/research/linux/perfmon
  20. */
  21. #include <linux/module.h>
  22. #include <linux/kernel.h>
  23. #include <linux/sched.h>
  24. #include <linux/interrupt.h>
  25. #include <linux/proc_fs.h>
  26. #include <linux/seq_file.h>
  27. #include <linux/init.h>
  28. #include <linux/vmalloc.h>
  29. #include <linux/mm.h>
  30. #include <linux/sysctl.h>
  31. #include <linux/list.h>
  32. #include <linux/file.h>
  33. #include <linux/poll.h>
  34. #include <linux/vfs.h>
  35. #include <linux/smp.h>
  36. #include <linux/pagemap.h>
  37. #include <linux/mount.h>
  38. #include <linux/bitops.h>
  39. #include <linux/capability.h>
  40. #include <linux/rcupdate.h>
  41. #include <linux/completion.h>
  42. #include <linux/tracehook.h>
  43. #include <linux/slab.h>
  44. #include <linux/cpu.h>
  45. #include <asm/errno.h>
  46. #include <asm/intrinsics.h>
  47. #include <asm/page.h>
  48. #include <asm/perfmon.h>
  49. #include <asm/processor.h>
  50. #include <asm/signal.h>
  51. #include <asm/uaccess.h>
  52. #include <asm/delay.h>
  53. #ifdef CONFIG_PERFMON
  54. /*
  55. * perfmon context state
  56. */
  57. #define PFM_CTX_UNLOADED 1 /* context is not loaded onto any task */
  58. #define PFM_CTX_LOADED 2 /* context is loaded onto a task */
  59. #define PFM_CTX_MASKED 3 /* context is loaded but monitoring is masked due to overflow */
  60. #define PFM_CTX_ZOMBIE 4 /* owner of the context is closing it */
  61. #define PFM_INVALID_ACTIVATION (~0UL)
  62. #define PFM_NUM_PMC_REGS 64 /* PMC save area for ctxsw */
  63. #define PFM_NUM_PMD_REGS 64 /* PMD save area for ctxsw */
  64. /*
  65. * depth of message queue
  66. */
  67. #define PFM_MAX_MSGS 32
  68. #define PFM_CTXQ_EMPTY(g) ((g)->ctx_msgq_head == (g)->ctx_msgq_tail)
  69. /*
  70. * type of a PMU register (bitmask).
  71. * bitmask structure:
  72. * bit0 : register implemented
  73. * bit1 : end marker
  74. * bit2-3 : reserved
  75. * bit4 : pmc has pmc.pm
  76. * bit5 : pmc controls a counter (has pmc.oi), pmd is used as counter
  77. * bit6-7 : register type
  78. * bit8-31: reserved
  79. */
  80. #define PFM_REG_NOTIMPL 0x0 /* not implemented at all */
  81. #define PFM_REG_IMPL 0x1 /* register implemented */
  82. #define PFM_REG_END 0x2 /* end marker */
  83. #define PFM_REG_MONITOR (0x1<<4|PFM_REG_IMPL) /* a PMC with a pmc.pm field only */
  84. #define PFM_REG_COUNTING (0x2<<4|PFM_REG_MONITOR) /* a monitor + pmc.oi+ PMD used as a counter */
  85. #define PFM_REG_CONTROL (0x4<<4|PFM_REG_IMPL) /* PMU control register */
  86. #define PFM_REG_CONFIG (0x8<<4|PFM_REG_IMPL) /* configuration register */
  87. #define PFM_REG_BUFFER (0xc<<4|PFM_REG_IMPL) /* PMD used as buffer */
  88. #define PMC_IS_LAST(i) (pmu_conf->pmc_desc[i].type & PFM_REG_END)
  89. #define PMD_IS_LAST(i) (pmu_conf->pmd_desc[i].type & PFM_REG_END)
  90. #define PMC_OVFL_NOTIFY(ctx, i) ((ctx)->ctx_pmds[i].flags & PFM_REGFL_OVFL_NOTIFY)
  91. /* i assumed unsigned */
  92. #define PMC_IS_IMPL(i) (i< PMU_MAX_PMCS && (pmu_conf->pmc_desc[i].type & PFM_REG_IMPL))
  93. #define PMD_IS_IMPL(i) (i< PMU_MAX_PMDS && (pmu_conf->pmd_desc[i].type & PFM_REG_IMPL))
  94. /* XXX: these assume that register i is implemented */
  95. #define PMD_IS_COUNTING(i) ((pmu_conf->pmd_desc[i].type & PFM_REG_COUNTING) == PFM_REG_COUNTING)
  96. #define PMC_IS_COUNTING(i) ((pmu_conf->pmc_desc[i].type & PFM_REG_COUNTING) == PFM_REG_COUNTING)
  97. #define PMC_IS_MONITOR(i) ((pmu_conf->pmc_desc[i].type & PFM_REG_MONITOR) == PFM_REG_MONITOR)
  98. #define PMC_IS_CONTROL(i) ((pmu_conf->pmc_desc[i].type & PFM_REG_CONTROL) == PFM_REG_CONTROL)
  99. #define PMC_DFL_VAL(i) pmu_conf->pmc_desc[i].default_value
  100. #define PMC_RSVD_MASK(i) pmu_conf->pmc_desc[i].reserved_mask
  101. #define PMD_PMD_DEP(i) pmu_conf->pmd_desc[i].dep_pmd[0]
  102. #define PMC_PMD_DEP(i) pmu_conf->pmc_desc[i].dep_pmd[0]
  103. #define PFM_NUM_IBRS IA64_NUM_DBG_REGS
  104. #define PFM_NUM_DBRS IA64_NUM_DBG_REGS
  105. #define CTX_OVFL_NOBLOCK(c) ((c)->ctx_fl_block == 0)
  106. #define CTX_HAS_SMPL(c) ((c)->ctx_fl_is_sampling)
  107. #define PFM_CTX_TASK(h) (h)->ctx_task
  108. #define PMU_PMC_OI 5 /* position of pmc.oi bit */
  109. /* XXX: does not support more than 64 PMDs */
  110. #define CTX_USED_PMD(ctx, mask) (ctx)->ctx_used_pmds[0] |= (mask)
  111. #define CTX_IS_USED_PMD(ctx, c) (((ctx)->ctx_used_pmds[0] & (1UL << (c))) != 0UL)
  112. #define CTX_USED_MONITOR(ctx, mask) (ctx)->ctx_used_monitors[0] |= (mask)
  113. #define CTX_USED_IBR(ctx,n) (ctx)->ctx_used_ibrs[(n)>>6] |= 1UL<< ((n) % 64)
  114. #define CTX_USED_DBR(ctx,n) (ctx)->ctx_used_dbrs[(n)>>6] |= 1UL<< ((n) % 64)
  115. #define CTX_USES_DBREGS(ctx) (((pfm_context_t *)(ctx))->ctx_fl_using_dbreg==1)
  116. #define PFM_CODE_RR 0 /* requesting code range restriction */
  117. #define PFM_DATA_RR 1 /* requestion data range restriction */
  118. #define PFM_CPUINFO_CLEAR(v) pfm_get_cpu_var(pfm_syst_info) &= ~(v)
  119. #define PFM_CPUINFO_SET(v) pfm_get_cpu_var(pfm_syst_info) |= (v)
  120. #define PFM_CPUINFO_GET() pfm_get_cpu_var(pfm_syst_info)
  121. #define RDEP(x) (1UL<<(x))
  122. /*
  123. * context protection macros
  124. * in SMP:
  125. * - we need to protect against CPU concurrency (spin_lock)
  126. * - we need to protect against PMU overflow interrupts (local_irq_disable)
  127. * in UP:
  128. * - we need to protect against PMU overflow interrupts (local_irq_disable)
  129. *
  130. * spin_lock_irqsave()/spin_unlock_irqrestore():
  131. * in SMP: local_irq_disable + spin_lock
  132. * in UP : local_irq_disable
  133. *
  134. * spin_lock()/spin_lock():
  135. * in UP : removed automatically
  136. * in SMP: protect against context accesses from other CPU. interrupts
  137. * are not masked. This is useful for the PMU interrupt handler
  138. * because we know we will not get PMU concurrency in that code.
  139. */
  140. #define PROTECT_CTX(c, f) \
  141. do { \
  142. DPRINT(("spinlock_irq_save ctx %p by [%d]\n", c, task_pid_nr(current))); \
  143. spin_lock_irqsave(&(c)->ctx_lock, f); \
  144. DPRINT(("spinlocked ctx %p by [%d]\n", c, task_pid_nr(current))); \
  145. } while(0)
  146. #define UNPROTECT_CTX(c, f) \
  147. do { \
  148. DPRINT(("spinlock_irq_restore ctx %p by [%d]\n", c, task_pid_nr(current))); \
  149. spin_unlock_irqrestore(&(c)->ctx_lock, f); \
  150. } while(0)
  151. #define PROTECT_CTX_NOPRINT(c, f) \
  152. do { \
  153. spin_lock_irqsave(&(c)->ctx_lock, f); \
  154. } while(0)
  155. #define UNPROTECT_CTX_NOPRINT(c, f) \
  156. do { \
  157. spin_unlock_irqrestore(&(c)->ctx_lock, f); \
  158. } while(0)
  159. #define PROTECT_CTX_NOIRQ(c) \
  160. do { \
  161. spin_lock(&(c)->ctx_lock); \
  162. } while(0)
  163. #define UNPROTECT_CTX_NOIRQ(c) \
  164. do { \
  165. spin_unlock(&(c)->ctx_lock); \
  166. } while(0)
  167. #ifdef CONFIG_SMP
  168. #define GET_ACTIVATION() pfm_get_cpu_var(pmu_activation_number)
  169. #define INC_ACTIVATION() pfm_get_cpu_var(pmu_activation_number)++
  170. #define SET_ACTIVATION(c) (c)->ctx_last_activation = GET_ACTIVATION()
  171. #else /* !CONFIG_SMP */
  172. #define SET_ACTIVATION(t) do {} while(0)
  173. #define GET_ACTIVATION(t) do {} while(0)
  174. #define INC_ACTIVATION(t) do {} while(0)
  175. #endif /* CONFIG_SMP */
  176. #define SET_PMU_OWNER(t, c) do { pfm_get_cpu_var(pmu_owner) = (t); pfm_get_cpu_var(pmu_ctx) = (c); } while(0)
  177. #define GET_PMU_OWNER() pfm_get_cpu_var(pmu_owner)
  178. #define GET_PMU_CTX() pfm_get_cpu_var(pmu_ctx)
  179. #define LOCK_PFS(g) spin_lock_irqsave(&pfm_sessions.pfs_lock, g)
  180. #define UNLOCK_PFS(g) spin_unlock_irqrestore(&pfm_sessions.pfs_lock, g)
  181. #define PFM_REG_RETFLAG_SET(flags, val) do { flags &= ~PFM_REG_RETFL_MASK; flags |= (val); } while(0)
  182. /*
  183. * cmp0 must be the value of pmc0
  184. */
  185. #define PMC0_HAS_OVFL(cmp0) (cmp0 & ~0x1UL)
  186. #define PFMFS_MAGIC 0xa0b4d889
  187. /*
  188. * debugging
  189. */
  190. #define PFM_DEBUGGING 1
  191. #ifdef PFM_DEBUGGING
  192. #define DPRINT(a) \
  193. do { \
  194. if (unlikely(pfm_sysctl.debug >0)) { printk("%s.%d: CPU%d [%d] ", __func__, __LINE__, smp_processor_id(), task_pid_nr(current)); printk a; } \
  195. } while (0)
  196. #define DPRINT_ovfl(a) \
  197. do { \
  198. if (unlikely(pfm_sysctl.debug > 0 && pfm_sysctl.debug_ovfl >0)) { printk("%s.%d: CPU%d [%d] ", __func__, __LINE__, smp_processor_id(), task_pid_nr(current)); printk a; } \
  199. } while (0)
  200. #endif
  201. /*
  202. * 64-bit software counter structure
  203. *
  204. * the next_reset_type is applied to the next call to pfm_reset_regs()
  205. */
  206. typedef struct {
  207. unsigned long val; /* virtual 64bit counter value */
  208. unsigned long lval; /* last reset value */
  209. unsigned long long_reset; /* reset value on sampling overflow */
  210. unsigned long short_reset; /* reset value on overflow */
  211. unsigned long reset_pmds[4]; /* which other pmds to reset when this counter overflows */
  212. unsigned long smpl_pmds[4]; /* which pmds are accessed when counter overflow */
  213. unsigned long seed; /* seed for random-number generator */
  214. unsigned long mask; /* mask for random-number generator */
  215. unsigned int flags; /* notify/do not notify */
  216. unsigned long eventid; /* overflow event identifier */
  217. } pfm_counter_t;
  218. /*
  219. * context flags
  220. */
  221. typedef struct {
  222. unsigned int block:1; /* when 1, task will blocked on user notifications */
  223. unsigned int system:1; /* do system wide monitoring */
  224. unsigned int using_dbreg:1; /* using range restrictions (debug registers) */
  225. unsigned int is_sampling:1; /* true if using a custom format */
  226. unsigned int excl_idle:1; /* exclude idle task in system wide session */
  227. unsigned int going_zombie:1; /* context is zombie (MASKED+blocking) */
  228. unsigned int trap_reason:2; /* reason for going into pfm_handle_work() */
  229. unsigned int no_msg:1; /* no message sent on overflow */
  230. unsigned int can_restart:1; /* allowed to issue a PFM_RESTART */
  231. unsigned int reserved:22;
  232. } pfm_context_flags_t;
  233. #define PFM_TRAP_REASON_NONE 0x0 /* default value */
  234. #define PFM_TRAP_REASON_BLOCK 0x1 /* we need to block on overflow */
  235. #define PFM_TRAP_REASON_RESET 0x2 /* we need to reset PMDs */
  236. /*
  237. * perfmon context: encapsulates all the state of a monitoring session
  238. */
  239. typedef struct pfm_context {
  240. spinlock_t ctx_lock; /* context protection */
  241. pfm_context_flags_t ctx_flags; /* bitmask of flags (block reason incl.) */
  242. unsigned int ctx_state; /* state: active/inactive (no bitfield) */
  243. struct task_struct *ctx_task; /* task to which context is attached */
  244. unsigned long ctx_ovfl_regs[4]; /* which registers overflowed (notification) */
  245. struct completion ctx_restart_done; /* use for blocking notification mode */
  246. unsigned long ctx_used_pmds[4]; /* bitmask of PMD used */
  247. unsigned long ctx_all_pmds[4]; /* bitmask of all accessible PMDs */
  248. unsigned long ctx_reload_pmds[4]; /* bitmask of force reload PMD on ctxsw in */
  249. unsigned long ctx_all_pmcs[4]; /* bitmask of all accessible PMCs */
  250. unsigned long ctx_reload_pmcs[4]; /* bitmask of force reload PMC on ctxsw in */
  251. unsigned long ctx_used_monitors[4]; /* bitmask of monitor PMC being used */
  252. unsigned long ctx_pmcs[PFM_NUM_PMC_REGS]; /* saved copies of PMC values */
  253. unsigned int ctx_used_ibrs[1]; /* bitmask of used IBR (speedup ctxsw in) */
  254. unsigned int ctx_used_dbrs[1]; /* bitmask of used DBR (speedup ctxsw in) */
  255. unsigned long ctx_dbrs[IA64_NUM_DBG_REGS]; /* DBR values (cache) when not loaded */
  256. unsigned long ctx_ibrs[IA64_NUM_DBG_REGS]; /* IBR values (cache) when not loaded */
  257. pfm_counter_t ctx_pmds[PFM_NUM_PMD_REGS]; /* software state for PMDS */
  258. unsigned long th_pmcs[PFM_NUM_PMC_REGS]; /* PMC thread save state */
  259. unsigned long th_pmds[PFM_NUM_PMD_REGS]; /* PMD thread save state */
  260. unsigned long ctx_saved_psr_up; /* only contains psr.up value */
  261. unsigned long ctx_last_activation; /* context last activation number for last_cpu */
  262. unsigned int ctx_last_cpu; /* CPU id of current or last CPU used (SMP only) */
  263. unsigned int ctx_cpu; /* cpu to which perfmon is applied (system wide) */
  264. int ctx_fd; /* file descriptor used my this context */
  265. pfm_ovfl_arg_t ctx_ovfl_arg; /* argument to custom buffer format handler */
  266. pfm_buffer_fmt_t *ctx_buf_fmt; /* buffer format callbacks */
  267. void *ctx_smpl_hdr; /* points to sampling buffer header kernel vaddr */
  268. unsigned long ctx_smpl_size; /* size of sampling buffer */
  269. void *ctx_smpl_vaddr; /* user level virtual address of smpl buffer */
  270. wait_queue_head_t ctx_msgq_wait;
  271. pfm_msg_t ctx_msgq[PFM_MAX_MSGS];
  272. int ctx_msgq_head;
  273. int ctx_msgq_tail;
  274. struct fasync_struct *ctx_async_queue;
  275. wait_queue_head_t ctx_zombieq; /* termination cleanup wait queue */
  276. } pfm_context_t;
  277. /*
  278. * magic number used to verify that structure is really
  279. * a perfmon context
  280. */
  281. #define PFM_IS_FILE(f) ((f)->f_op == &pfm_file_ops)
  282. #define PFM_GET_CTX(t) ((pfm_context_t *)(t)->thread.pfm_context)
  283. #ifdef CONFIG_SMP
  284. #define SET_LAST_CPU(ctx, v) (ctx)->ctx_last_cpu = (v)
  285. #define GET_LAST_CPU(ctx) (ctx)->ctx_last_cpu
  286. #else
  287. #define SET_LAST_CPU(ctx, v) do {} while(0)
  288. #define GET_LAST_CPU(ctx) do {} while(0)
  289. #endif
  290. #define ctx_fl_block ctx_flags.block
  291. #define ctx_fl_system ctx_flags.system
  292. #define ctx_fl_using_dbreg ctx_flags.using_dbreg
  293. #define ctx_fl_is_sampling ctx_flags.is_sampling
  294. #define ctx_fl_excl_idle ctx_flags.excl_idle
  295. #define ctx_fl_going_zombie ctx_flags.going_zombie
  296. #define ctx_fl_trap_reason ctx_flags.trap_reason
  297. #define ctx_fl_no_msg ctx_flags.no_msg
  298. #define ctx_fl_can_restart ctx_flags.can_restart
  299. #define PFM_SET_WORK_PENDING(t, v) do { (t)->thread.pfm_needs_checking = v; } while(0);
  300. #define PFM_GET_WORK_PENDING(t) (t)->thread.pfm_needs_checking
  301. /*
  302. * global information about all sessions
  303. * mostly used to synchronize between system wide and per-process
  304. */
  305. typedef struct {
  306. spinlock_t pfs_lock; /* lock the structure */
  307. unsigned int pfs_task_sessions; /* number of per task sessions */
  308. unsigned int pfs_sys_sessions; /* number of per system wide sessions */
  309. unsigned int pfs_sys_use_dbregs; /* incremented when a system wide session uses debug regs */
  310. unsigned int pfs_ptrace_use_dbregs; /* incremented when a process uses debug regs */
  311. struct task_struct *pfs_sys_session[NR_CPUS]; /* point to task owning a system-wide session */
  312. } pfm_session_t;
  313. /*
  314. * information about a PMC or PMD.
  315. * dep_pmd[]: a bitmask of dependent PMD registers
  316. * dep_pmc[]: a bitmask of dependent PMC registers
  317. */
  318. typedef int (*pfm_reg_check_t)(struct task_struct *task, pfm_context_t *ctx, unsigned int cnum, unsigned long *val, struct pt_regs *regs);
  319. typedef struct {
  320. unsigned int type;
  321. int pm_pos;
  322. unsigned long default_value; /* power-on default value */
  323. unsigned long reserved_mask; /* bitmask of reserved bits */
  324. pfm_reg_check_t read_check;
  325. pfm_reg_check_t write_check;
  326. unsigned long dep_pmd[4];
  327. unsigned long dep_pmc[4];
  328. } pfm_reg_desc_t;
  329. /* assume cnum is a valid monitor */
  330. #define PMC_PM(cnum, val) (((val) >> (pmu_conf->pmc_desc[cnum].pm_pos)) & 0x1)
  331. /*
  332. * This structure is initialized at boot time and contains
  333. * a description of the PMU main characteristics.
  334. *
  335. * If the probe function is defined, detection is based
  336. * on its return value:
  337. * - 0 means recognized PMU
  338. * - anything else means not supported
  339. * When the probe function is not defined, then the pmu_family field
  340. * is used and it must match the host CPU family such that:
  341. * - cpu->family & config->pmu_family != 0
  342. */
  343. typedef struct {
  344. unsigned long ovfl_val; /* overflow value for counters */
  345. pfm_reg_desc_t *pmc_desc; /* detailed PMC register dependencies descriptions */
  346. pfm_reg_desc_t *pmd_desc; /* detailed PMD register dependencies descriptions */
  347. unsigned int num_pmcs; /* number of PMCS: computed at init time */
  348. unsigned int num_pmds; /* number of PMDS: computed at init time */
  349. unsigned long impl_pmcs[4]; /* bitmask of implemented PMCS */
  350. unsigned long impl_pmds[4]; /* bitmask of implemented PMDS */
  351. char *pmu_name; /* PMU family name */
  352. unsigned int pmu_family; /* cpuid family pattern used to identify pmu */
  353. unsigned int flags; /* pmu specific flags */
  354. unsigned int num_ibrs; /* number of IBRS: computed at init time */
  355. unsigned int num_dbrs; /* number of DBRS: computed at init time */
  356. unsigned int num_counters; /* PMC/PMD counting pairs : computed at init time */
  357. int (*probe)(void); /* customized probe routine */
  358. unsigned int use_rr_dbregs:1; /* set if debug registers used for range restriction */
  359. } pmu_config_t;
  360. /*
  361. * PMU specific flags
  362. */
  363. #define PFM_PMU_IRQ_RESEND 1 /* PMU needs explicit IRQ resend */
  364. /*
  365. * debug register related type definitions
  366. */
  367. typedef struct {
  368. unsigned long ibr_mask:56;
  369. unsigned long ibr_plm:4;
  370. unsigned long ibr_ig:3;
  371. unsigned long ibr_x:1;
  372. } ibr_mask_reg_t;
  373. typedef struct {
  374. unsigned long dbr_mask:56;
  375. unsigned long dbr_plm:4;
  376. unsigned long dbr_ig:2;
  377. unsigned long dbr_w:1;
  378. unsigned long dbr_r:1;
  379. } dbr_mask_reg_t;
  380. typedef union {
  381. unsigned long val;
  382. ibr_mask_reg_t ibr;
  383. dbr_mask_reg_t dbr;
  384. } dbreg_t;
  385. /*
  386. * perfmon command descriptions
  387. */
  388. typedef struct {
  389. int (*cmd_func)(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs);
  390. char *cmd_name;
  391. int cmd_flags;
  392. unsigned int cmd_narg;
  393. size_t cmd_argsize;
  394. int (*cmd_getsize)(void *arg, size_t *sz);
  395. } pfm_cmd_desc_t;
  396. #define PFM_CMD_FD 0x01 /* command requires a file descriptor */
  397. #define PFM_CMD_ARG_READ 0x02 /* command must read argument(s) */
  398. #define PFM_CMD_ARG_RW 0x04 /* command must read/write argument(s) */
  399. #define PFM_CMD_STOP 0x08 /* command does not work on zombie context */
  400. #define PFM_CMD_NAME(cmd) pfm_cmd_tab[(cmd)].cmd_name
  401. #define PFM_CMD_READ_ARG(cmd) (pfm_cmd_tab[(cmd)].cmd_flags & PFM_CMD_ARG_READ)
  402. #define PFM_CMD_RW_ARG(cmd) (pfm_cmd_tab[(cmd)].cmd_flags & PFM_CMD_ARG_RW)
  403. #define PFM_CMD_USE_FD(cmd) (pfm_cmd_tab[(cmd)].cmd_flags & PFM_CMD_FD)
  404. #define PFM_CMD_STOPPED(cmd) (pfm_cmd_tab[(cmd)].cmd_flags & PFM_CMD_STOP)
  405. #define PFM_CMD_ARG_MANY -1 /* cannot be zero */
  406. typedef struct {
  407. unsigned long pfm_spurious_ovfl_intr_count; /* keep track of spurious ovfl interrupts */
  408. unsigned long pfm_replay_ovfl_intr_count; /* keep track of replayed ovfl interrupts */
  409. unsigned long pfm_ovfl_intr_count; /* keep track of ovfl interrupts */
  410. unsigned long pfm_ovfl_intr_cycles; /* cycles spent processing ovfl interrupts */
  411. unsigned long pfm_ovfl_intr_cycles_min; /* min cycles spent processing ovfl interrupts */
  412. unsigned long pfm_ovfl_intr_cycles_max; /* max cycles spent processing ovfl interrupts */
  413. unsigned long pfm_smpl_handler_calls;
  414. unsigned long pfm_smpl_handler_cycles;
  415. char pad[SMP_CACHE_BYTES] ____cacheline_aligned;
  416. } pfm_stats_t;
  417. /*
  418. * perfmon internal variables
  419. */
  420. static pfm_stats_t pfm_stats[NR_CPUS];
  421. static pfm_session_t pfm_sessions; /* global sessions information */
  422. static DEFINE_SPINLOCK(pfm_alt_install_check);
  423. static pfm_intr_handler_desc_t *pfm_alt_intr_handler;
  424. static struct proc_dir_entry *perfmon_dir;
  425. static pfm_uuid_t pfm_null_uuid = {0,};
  426. static spinlock_t pfm_buffer_fmt_lock;
  427. static LIST_HEAD(pfm_buffer_fmt_list);
  428. static pmu_config_t *pmu_conf;
  429. /* sysctl() controls */
  430. pfm_sysctl_t pfm_sysctl;
  431. EXPORT_SYMBOL(pfm_sysctl);
  432. static struct ctl_table pfm_ctl_table[] = {
  433. {
  434. .procname = "debug",
  435. .data = &pfm_sysctl.debug,
  436. .maxlen = sizeof(int),
  437. .mode = 0666,
  438. .proc_handler = proc_dointvec,
  439. },
  440. {
  441. .procname = "debug_ovfl",
  442. .data = &pfm_sysctl.debug_ovfl,
  443. .maxlen = sizeof(int),
  444. .mode = 0666,
  445. .proc_handler = proc_dointvec,
  446. },
  447. {
  448. .procname = "fastctxsw",
  449. .data = &pfm_sysctl.fastctxsw,
  450. .maxlen = sizeof(int),
  451. .mode = 0600,
  452. .proc_handler = proc_dointvec,
  453. },
  454. {
  455. .procname = "expert_mode",
  456. .data = &pfm_sysctl.expert_mode,
  457. .maxlen = sizeof(int),
  458. .mode = 0600,
  459. .proc_handler = proc_dointvec,
  460. },
  461. {}
  462. };
  463. static struct ctl_table pfm_sysctl_dir[] = {
  464. {
  465. .procname = "perfmon",
  466. .mode = 0555,
  467. .child = pfm_ctl_table,
  468. },
  469. {}
  470. };
  471. static struct ctl_table pfm_sysctl_root[] = {
  472. {
  473. .procname = "kernel",
  474. .mode = 0555,
  475. .child = pfm_sysctl_dir,
  476. },
  477. {}
  478. };
  479. static struct ctl_table_header *pfm_sysctl_header;
  480. static int pfm_context_unload(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs);
  481. #define pfm_get_cpu_var(v) __ia64_per_cpu_var(v)
  482. #define pfm_get_cpu_data(a,b) per_cpu(a, b)
  483. static inline void
  484. pfm_put_task(struct task_struct *task)
  485. {
  486. if (task != current) put_task_struct(task);
  487. }
  488. static inline void
  489. pfm_reserve_page(unsigned long a)
  490. {
  491. SetPageReserved(vmalloc_to_page((void *)a));
  492. }
  493. static inline void
  494. pfm_unreserve_page(unsigned long a)
  495. {
  496. ClearPageReserved(vmalloc_to_page((void*)a));
  497. }
  498. static inline unsigned long
  499. pfm_protect_ctx_ctxsw(pfm_context_t *x)
  500. {
  501. spin_lock(&(x)->ctx_lock);
  502. return 0UL;
  503. }
  504. static inline void
  505. pfm_unprotect_ctx_ctxsw(pfm_context_t *x, unsigned long f)
  506. {
  507. spin_unlock(&(x)->ctx_lock);
  508. }
  509. /* forward declaration */
  510. static const struct dentry_operations pfmfs_dentry_operations;
  511. static struct dentry *
  512. pfmfs_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *data)
  513. {
  514. return mount_pseudo(fs_type, "pfm:", NULL, &pfmfs_dentry_operations,
  515. PFMFS_MAGIC);
  516. }
  517. static struct file_system_type pfm_fs_type = {
  518. .name = "pfmfs",
  519. .mount = pfmfs_mount,
  520. .kill_sb = kill_anon_super,
  521. };
  522. MODULE_ALIAS_FS("pfmfs");
  523. DEFINE_PER_CPU(unsigned long, pfm_syst_info);
  524. DEFINE_PER_CPU(struct task_struct *, pmu_owner);
  525. DEFINE_PER_CPU(pfm_context_t *, pmu_ctx);
  526. DEFINE_PER_CPU(unsigned long, pmu_activation_number);
  527. EXPORT_PER_CPU_SYMBOL_GPL(pfm_syst_info);
  528. /* forward declaration */
  529. static const struct file_operations pfm_file_ops;
  530. /*
  531. * forward declarations
  532. */
  533. #ifndef CONFIG_SMP
  534. static void pfm_lazy_save_regs (struct task_struct *ta);
  535. #endif
  536. void dump_pmu_state(const char *);
  537. static int pfm_write_ibr_dbr(int mode, pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs);
  538. #include "perfmon_itanium.h"
  539. #include "perfmon_mckinley.h"
  540. #include "perfmon_montecito.h"
  541. #include "perfmon_generic.h"
  542. static pmu_config_t *pmu_confs[]={
  543. &pmu_conf_mont,
  544. &pmu_conf_mck,
  545. &pmu_conf_ita,
  546. &pmu_conf_gen, /* must be last */
  547. NULL
  548. };
  549. static int pfm_end_notify_user(pfm_context_t *ctx);
  550. static inline void
  551. pfm_clear_psr_pp(void)
  552. {
  553. ia64_rsm(IA64_PSR_PP);
  554. ia64_srlz_i();
  555. }
  556. static inline void
  557. pfm_set_psr_pp(void)
  558. {
  559. ia64_ssm(IA64_PSR_PP);
  560. ia64_srlz_i();
  561. }
  562. static inline void
  563. pfm_clear_psr_up(void)
  564. {
  565. ia64_rsm(IA64_PSR_UP);
  566. ia64_srlz_i();
  567. }
  568. static inline void
  569. pfm_set_psr_up(void)
  570. {
  571. ia64_ssm(IA64_PSR_UP);
  572. ia64_srlz_i();
  573. }
  574. static inline unsigned long
  575. pfm_get_psr(void)
  576. {
  577. unsigned long tmp;
  578. tmp = ia64_getreg(_IA64_REG_PSR);
  579. ia64_srlz_i();
  580. return tmp;
  581. }
  582. static inline void
  583. pfm_set_psr_l(unsigned long val)
  584. {
  585. ia64_setreg(_IA64_REG_PSR_L, val);
  586. ia64_srlz_i();
  587. }
  588. static inline void
  589. pfm_freeze_pmu(void)
  590. {
  591. ia64_set_pmc(0,1UL);
  592. ia64_srlz_d();
  593. }
  594. static inline void
  595. pfm_unfreeze_pmu(void)
  596. {
  597. ia64_set_pmc(0,0UL);
  598. ia64_srlz_d();
  599. }
  600. static inline void
  601. pfm_restore_ibrs(unsigned long *ibrs, unsigned int nibrs)
  602. {
  603. int i;
  604. for (i=0; i < nibrs; i++) {
  605. ia64_set_ibr(i, ibrs[i]);
  606. ia64_dv_serialize_instruction();
  607. }
  608. ia64_srlz_i();
  609. }
  610. static inline void
  611. pfm_restore_dbrs(unsigned long *dbrs, unsigned int ndbrs)
  612. {
  613. int i;
  614. for (i=0; i < ndbrs; i++) {
  615. ia64_set_dbr(i, dbrs[i]);
  616. ia64_dv_serialize_data();
  617. }
  618. ia64_srlz_d();
  619. }
  620. /*
  621. * PMD[i] must be a counter. no check is made
  622. */
  623. static inline unsigned long
  624. pfm_read_soft_counter(pfm_context_t *ctx, int i)
  625. {
  626. return ctx->ctx_pmds[i].val + (ia64_get_pmd(i) & pmu_conf->ovfl_val);
  627. }
  628. /*
  629. * PMD[i] must be a counter. no check is made
  630. */
  631. static inline void
  632. pfm_write_soft_counter(pfm_context_t *ctx, int i, unsigned long val)
  633. {
  634. unsigned long ovfl_val = pmu_conf->ovfl_val;
  635. ctx->ctx_pmds[i].val = val & ~ovfl_val;
  636. /*
  637. * writing to unimplemented part is ignore, so we do not need to
  638. * mask off top part
  639. */
  640. ia64_set_pmd(i, val & ovfl_val);
  641. }
  642. static pfm_msg_t *
  643. pfm_get_new_msg(pfm_context_t *ctx)
  644. {
  645. int idx, next;
  646. next = (ctx->ctx_msgq_tail+1) % PFM_MAX_MSGS;
  647. DPRINT(("ctx_fd=%p head=%d tail=%d\n", ctx, ctx->ctx_msgq_head, ctx->ctx_msgq_tail));
  648. if (next == ctx->ctx_msgq_head) return NULL;
  649. idx = ctx->ctx_msgq_tail;
  650. ctx->ctx_msgq_tail = next;
  651. DPRINT(("ctx=%p head=%d tail=%d msg=%d\n", ctx, ctx->ctx_msgq_head, ctx->ctx_msgq_tail, idx));
  652. return ctx->ctx_msgq+idx;
  653. }
  654. static pfm_msg_t *
  655. pfm_get_next_msg(pfm_context_t *ctx)
  656. {
  657. pfm_msg_t *msg;
  658. DPRINT(("ctx=%p head=%d tail=%d\n", ctx, ctx->ctx_msgq_head, ctx->ctx_msgq_tail));
  659. if (PFM_CTXQ_EMPTY(ctx)) return NULL;
  660. /*
  661. * get oldest message
  662. */
  663. msg = ctx->ctx_msgq+ctx->ctx_msgq_head;
  664. /*
  665. * and move forward
  666. */
  667. ctx->ctx_msgq_head = (ctx->ctx_msgq_head+1) % PFM_MAX_MSGS;
  668. DPRINT(("ctx=%p head=%d tail=%d type=%d\n", ctx, ctx->ctx_msgq_head, ctx->ctx_msgq_tail, msg->pfm_gen_msg.msg_type));
  669. return msg;
  670. }
  671. static void
  672. pfm_reset_msgq(pfm_context_t *ctx)
  673. {
  674. ctx->ctx_msgq_head = ctx->ctx_msgq_tail = 0;
  675. DPRINT(("ctx=%p msgq reset\n", ctx));
  676. }
  677. static void *
  678. pfm_rvmalloc(unsigned long size)
  679. {
  680. void *mem;
  681. unsigned long addr;
  682. size = PAGE_ALIGN(size);
  683. mem = vzalloc(size);
  684. if (mem) {
  685. //printk("perfmon: CPU%d pfm_rvmalloc(%ld)=%p\n", smp_processor_id(), size, mem);
  686. addr = (unsigned long)mem;
  687. while (size > 0) {
  688. pfm_reserve_page(addr);
  689. addr+=PAGE_SIZE;
  690. size-=PAGE_SIZE;
  691. }
  692. }
  693. return mem;
  694. }
  695. static void
  696. pfm_rvfree(void *mem, unsigned long size)
  697. {
  698. unsigned long addr;
  699. if (mem) {
  700. DPRINT(("freeing physical buffer @%p size=%lu\n", mem, size));
  701. addr = (unsigned long) mem;
  702. while ((long) size > 0) {
  703. pfm_unreserve_page(addr);
  704. addr+=PAGE_SIZE;
  705. size-=PAGE_SIZE;
  706. }
  707. vfree(mem);
  708. }
  709. return;
  710. }
  711. static pfm_context_t *
  712. pfm_context_alloc(int ctx_flags)
  713. {
  714. pfm_context_t *ctx;
  715. /*
  716. * allocate context descriptor
  717. * must be able to free with interrupts disabled
  718. */
  719. ctx = kzalloc(sizeof(pfm_context_t), GFP_KERNEL);
  720. if (ctx) {
  721. DPRINT(("alloc ctx @%p\n", ctx));
  722. /*
  723. * init context protection lock
  724. */
  725. spin_lock_init(&ctx->ctx_lock);
  726. /*
  727. * context is unloaded
  728. */
  729. ctx->ctx_state = PFM_CTX_UNLOADED;
  730. /*
  731. * initialization of context's flags
  732. */
  733. ctx->ctx_fl_block = (ctx_flags & PFM_FL_NOTIFY_BLOCK) ? 1 : 0;
  734. ctx->ctx_fl_system = (ctx_flags & PFM_FL_SYSTEM_WIDE) ? 1: 0;
  735. ctx->ctx_fl_no_msg = (ctx_flags & PFM_FL_OVFL_NO_MSG) ? 1: 0;
  736. /*
  737. * will move to set properties
  738. * ctx->ctx_fl_excl_idle = (ctx_flags & PFM_FL_EXCL_IDLE) ? 1: 0;
  739. */
  740. /*
  741. * init restart semaphore to locked
  742. */
  743. init_completion(&ctx->ctx_restart_done);
  744. /*
  745. * activation is used in SMP only
  746. */
  747. ctx->ctx_last_activation = PFM_INVALID_ACTIVATION;
  748. SET_LAST_CPU(ctx, -1);
  749. /*
  750. * initialize notification message queue
  751. */
  752. ctx->ctx_msgq_head = ctx->ctx_msgq_tail = 0;
  753. init_waitqueue_head(&ctx->ctx_msgq_wait);
  754. init_waitqueue_head(&ctx->ctx_zombieq);
  755. }
  756. return ctx;
  757. }
  758. static void
  759. pfm_context_free(pfm_context_t *ctx)
  760. {
  761. if (ctx) {
  762. DPRINT(("free ctx @%p\n", ctx));
  763. kfree(ctx);
  764. }
  765. }
  766. static void
  767. pfm_mask_monitoring(struct task_struct *task)
  768. {
  769. pfm_context_t *ctx = PFM_GET_CTX(task);
  770. unsigned long mask, val, ovfl_mask;
  771. int i;
  772. DPRINT_ovfl(("masking monitoring for [%d]\n", task_pid_nr(task)));
  773. ovfl_mask = pmu_conf->ovfl_val;
  774. /*
  775. * monitoring can only be masked as a result of a valid
  776. * counter overflow. In UP, it means that the PMU still
  777. * has an owner. Note that the owner can be different
  778. * from the current task. However the PMU state belongs
  779. * to the owner.
  780. * In SMP, a valid overflow only happens when task is
  781. * current. Therefore if we come here, we know that
  782. * the PMU state belongs to the current task, therefore
  783. * we can access the live registers.
  784. *
  785. * So in both cases, the live register contains the owner's
  786. * state. We can ONLY touch the PMU registers and NOT the PSR.
  787. *
  788. * As a consequence to this call, the ctx->th_pmds[] array
  789. * contains stale information which must be ignored
  790. * when context is reloaded AND monitoring is active (see
  791. * pfm_restart).
  792. */
  793. mask = ctx->ctx_used_pmds[0];
  794. for (i = 0; mask; i++, mask>>=1) {
  795. /* skip non used pmds */
  796. if ((mask & 0x1) == 0) continue;
  797. val = ia64_get_pmd(i);
  798. if (PMD_IS_COUNTING(i)) {
  799. /*
  800. * we rebuild the full 64 bit value of the counter
  801. */
  802. ctx->ctx_pmds[i].val += (val & ovfl_mask);
  803. } else {
  804. ctx->ctx_pmds[i].val = val;
  805. }
  806. DPRINT_ovfl(("pmd[%d]=0x%lx hw_pmd=0x%lx\n",
  807. i,
  808. ctx->ctx_pmds[i].val,
  809. val & ovfl_mask));
  810. }
  811. /*
  812. * mask monitoring by setting the privilege level to 0
  813. * we cannot use psr.pp/psr.up for this, it is controlled by
  814. * the user
  815. *
  816. * if task is current, modify actual registers, otherwise modify
  817. * thread save state, i.e., what will be restored in pfm_load_regs()
  818. */
  819. mask = ctx->ctx_used_monitors[0] >> PMU_FIRST_COUNTER;
  820. for(i= PMU_FIRST_COUNTER; mask; i++, mask>>=1) {
  821. if ((mask & 0x1) == 0UL) continue;
  822. ia64_set_pmc(i, ctx->th_pmcs[i] & ~0xfUL);
  823. ctx->th_pmcs[i] &= ~0xfUL;
  824. DPRINT_ovfl(("pmc[%d]=0x%lx\n", i, ctx->th_pmcs[i]));
  825. }
  826. /*
  827. * make all of this visible
  828. */
  829. ia64_srlz_d();
  830. }
  831. /*
  832. * must always be done with task == current
  833. *
  834. * context must be in MASKED state when calling
  835. */
  836. static void
  837. pfm_restore_monitoring(struct task_struct *task)
  838. {
  839. pfm_context_t *ctx = PFM_GET_CTX(task);
  840. unsigned long mask, ovfl_mask;
  841. unsigned long psr, val;
  842. int i, is_system;
  843. is_system = ctx->ctx_fl_system;
  844. ovfl_mask = pmu_conf->ovfl_val;
  845. if (task != current) {
  846. printk(KERN_ERR "perfmon.%d: invalid task[%d] current[%d]\n", __LINE__, task_pid_nr(task), task_pid_nr(current));
  847. return;
  848. }
  849. if (ctx->ctx_state != PFM_CTX_MASKED) {
  850. printk(KERN_ERR "perfmon.%d: task[%d] current[%d] invalid state=%d\n", __LINE__,
  851. task_pid_nr(task), task_pid_nr(current), ctx->ctx_state);
  852. return;
  853. }
  854. psr = pfm_get_psr();
  855. /*
  856. * monitoring is masked via the PMC.
  857. * As we restore their value, we do not want each counter to
  858. * restart right away. We stop monitoring using the PSR,
  859. * restore the PMC (and PMD) and then re-establish the psr
  860. * as it was. Note that there can be no pending overflow at
  861. * this point, because monitoring was MASKED.
  862. *
  863. * system-wide session are pinned and self-monitoring
  864. */
  865. if (is_system && (PFM_CPUINFO_GET() & PFM_CPUINFO_DCR_PP)) {
  866. /* disable dcr pp */
  867. ia64_setreg(_IA64_REG_CR_DCR, ia64_getreg(_IA64_REG_CR_DCR) & ~IA64_DCR_PP);
  868. pfm_clear_psr_pp();
  869. } else {
  870. pfm_clear_psr_up();
  871. }
  872. /*
  873. * first, we restore the PMD
  874. */
  875. mask = ctx->ctx_used_pmds[0];
  876. for (i = 0; mask; i++, mask>>=1) {
  877. /* skip non used pmds */
  878. if ((mask & 0x1) == 0) continue;
  879. if (PMD_IS_COUNTING(i)) {
  880. /*
  881. * we split the 64bit value according to
  882. * counter width
  883. */
  884. val = ctx->ctx_pmds[i].val & ovfl_mask;
  885. ctx->ctx_pmds[i].val &= ~ovfl_mask;
  886. } else {
  887. val = ctx->ctx_pmds[i].val;
  888. }
  889. ia64_set_pmd(i, val);
  890. DPRINT(("pmd[%d]=0x%lx hw_pmd=0x%lx\n",
  891. i,
  892. ctx->ctx_pmds[i].val,
  893. val));
  894. }
  895. /*
  896. * restore the PMCs
  897. */
  898. mask = ctx->ctx_used_monitors[0] >> PMU_FIRST_COUNTER;
  899. for(i= PMU_FIRST_COUNTER; mask; i++, mask>>=1) {
  900. if ((mask & 0x1) == 0UL) continue;
  901. ctx->th_pmcs[i] = ctx->ctx_pmcs[i];
  902. ia64_set_pmc(i, ctx->th_pmcs[i]);
  903. DPRINT(("[%d] pmc[%d]=0x%lx\n",
  904. task_pid_nr(task), i, ctx->th_pmcs[i]));
  905. }
  906. ia64_srlz_d();
  907. /*
  908. * must restore DBR/IBR because could be modified while masked
  909. * XXX: need to optimize
  910. */
  911. if (ctx->ctx_fl_using_dbreg) {
  912. pfm_restore_ibrs(ctx->ctx_ibrs, pmu_conf->num_ibrs);
  913. pfm_restore_dbrs(ctx->ctx_dbrs, pmu_conf->num_dbrs);
  914. }
  915. /*
  916. * now restore PSR
  917. */
  918. if (is_system && (PFM_CPUINFO_GET() & PFM_CPUINFO_DCR_PP)) {
  919. /* enable dcr pp */
  920. ia64_setreg(_IA64_REG_CR_DCR, ia64_getreg(_IA64_REG_CR_DCR) | IA64_DCR_PP);
  921. ia64_srlz_i();
  922. }
  923. pfm_set_psr_l(psr);
  924. }
  925. static inline void
  926. pfm_save_pmds(unsigned long *pmds, unsigned long mask)
  927. {
  928. int i;
  929. ia64_srlz_d();
  930. for (i=0; mask; i++, mask>>=1) {
  931. if (mask & 0x1) pmds[i] = ia64_get_pmd(i);
  932. }
  933. }
  934. /*
  935. * reload from thread state (used for ctxw only)
  936. */
  937. static inline void
  938. pfm_restore_pmds(unsigned long *pmds, unsigned long mask)
  939. {
  940. int i;
  941. unsigned long val, ovfl_val = pmu_conf->ovfl_val;
  942. for (i=0; mask; i++, mask>>=1) {
  943. if ((mask & 0x1) == 0) continue;
  944. val = PMD_IS_COUNTING(i) ? pmds[i] & ovfl_val : pmds[i];
  945. ia64_set_pmd(i, val);
  946. }
  947. ia64_srlz_d();
  948. }
  949. /*
  950. * propagate PMD from context to thread-state
  951. */
  952. static inline void
  953. pfm_copy_pmds(struct task_struct *task, pfm_context_t *ctx)
  954. {
  955. unsigned long ovfl_val = pmu_conf->ovfl_val;
  956. unsigned long mask = ctx->ctx_all_pmds[0];
  957. unsigned long val;
  958. int i;
  959. DPRINT(("mask=0x%lx\n", mask));
  960. for (i=0; mask; i++, mask>>=1) {
  961. val = ctx->ctx_pmds[i].val;
  962. /*
  963. * We break up the 64 bit value into 2 pieces
  964. * the lower bits go to the machine state in the
  965. * thread (will be reloaded on ctxsw in).
  966. * The upper part stays in the soft-counter.
  967. */
  968. if (PMD_IS_COUNTING(i)) {
  969. ctx->ctx_pmds[i].val = val & ~ovfl_val;
  970. val &= ovfl_val;
  971. }
  972. ctx->th_pmds[i] = val;
  973. DPRINT(("pmd[%d]=0x%lx soft_val=0x%lx\n",
  974. i,
  975. ctx->th_pmds[i],
  976. ctx->ctx_pmds[i].val));
  977. }
  978. }
  979. /*
  980. * propagate PMC from context to thread-state
  981. */
  982. static inline void
  983. pfm_copy_pmcs(struct task_struct *task, pfm_context_t *ctx)
  984. {
  985. unsigned long mask = ctx->ctx_all_pmcs[0];
  986. int i;
  987. DPRINT(("mask=0x%lx\n", mask));
  988. for (i=0; mask; i++, mask>>=1) {
  989. /* masking 0 with ovfl_val yields 0 */
  990. ctx->th_pmcs[i] = ctx->ctx_pmcs[i];
  991. DPRINT(("pmc[%d]=0x%lx\n", i, ctx->th_pmcs[i]));
  992. }
  993. }
  994. static inline void
  995. pfm_restore_pmcs(unsigned long *pmcs, unsigned long mask)
  996. {
  997. int i;
  998. for (i=0; mask; i++, mask>>=1) {
  999. if ((mask & 0x1) == 0) continue;
  1000. ia64_set_pmc(i, pmcs[i]);
  1001. }
  1002. ia64_srlz_d();
  1003. }
  1004. static inline int
  1005. pfm_uuid_cmp(pfm_uuid_t a, pfm_uuid_t b)
  1006. {
  1007. return memcmp(a, b, sizeof(pfm_uuid_t));
  1008. }
  1009. static inline int
  1010. pfm_buf_fmt_exit(pfm_buffer_fmt_t *fmt, struct task_struct *task, void *buf, struct pt_regs *regs)
  1011. {
  1012. int ret = 0;
  1013. if (fmt->fmt_exit) ret = (*fmt->fmt_exit)(task, buf, regs);
  1014. return ret;
  1015. }
  1016. static inline int
  1017. pfm_buf_fmt_getsize(pfm_buffer_fmt_t *fmt, struct task_struct *task, unsigned int flags, int cpu, void *arg, unsigned long *size)
  1018. {
  1019. int ret = 0;
  1020. if (fmt->fmt_getsize) ret = (*fmt->fmt_getsize)(task, flags, cpu, arg, size);
  1021. return ret;
  1022. }
  1023. static inline int
  1024. pfm_buf_fmt_validate(pfm_buffer_fmt_t *fmt, struct task_struct *task, unsigned int flags,
  1025. int cpu, void *arg)
  1026. {
  1027. int ret = 0;
  1028. if (fmt->fmt_validate) ret = (*fmt->fmt_validate)(task, flags, cpu, arg);
  1029. return ret;
  1030. }
  1031. static inline int
  1032. pfm_buf_fmt_init(pfm_buffer_fmt_t *fmt, struct task_struct *task, void *buf, unsigned int flags,
  1033. int cpu, void *arg)
  1034. {
  1035. int ret = 0;
  1036. if (fmt->fmt_init) ret = (*fmt->fmt_init)(task, buf, flags, cpu, arg);
  1037. return ret;
  1038. }
  1039. static inline int
  1040. pfm_buf_fmt_restart(pfm_buffer_fmt_t *fmt, struct task_struct *task, pfm_ovfl_ctrl_t *ctrl, void *buf, struct pt_regs *regs)
  1041. {
  1042. int ret = 0;
  1043. if (fmt->fmt_restart) ret = (*fmt->fmt_restart)(task, ctrl, buf, regs);
  1044. return ret;
  1045. }
  1046. static inline int
  1047. pfm_buf_fmt_restart_active(pfm_buffer_fmt_t *fmt, struct task_struct *task, pfm_ovfl_ctrl_t *ctrl, void *buf, struct pt_regs *regs)
  1048. {
  1049. int ret = 0;
  1050. if (fmt->fmt_restart_active) ret = (*fmt->fmt_restart_active)(task, ctrl, buf, regs);
  1051. return ret;
  1052. }
  1053. static pfm_buffer_fmt_t *
  1054. __pfm_find_buffer_fmt(pfm_uuid_t uuid)
  1055. {
  1056. struct list_head * pos;
  1057. pfm_buffer_fmt_t * entry;
  1058. list_for_each(pos, &pfm_buffer_fmt_list) {
  1059. entry = list_entry(pos, pfm_buffer_fmt_t, fmt_list);
  1060. if (pfm_uuid_cmp(uuid, entry->fmt_uuid) == 0)
  1061. return entry;
  1062. }
  1063. return NULL;
  1064. }
  1065. /*
  1066. * find a buffer format based on its uuid
  1067. */
  1068. static pfm_buffer_fmt_t *
  1069. pfm_find_buffer_fmt(pfm_uuid_t uuid)
  1070. {
  1071. pfm_buffer_fmt_t * fmt;
  1072. spin_lock(&pfm_buffer_fmt_lock);
  1073. fmt = __pfm_find_buffer_fmt(uuid);
  1074. spin_unlock(&pfm_buffer_fmt_lock);
  1075. return fmt;
  1076. }
  1077. int
  1078. pfm_register_buffer_fmt(pfm_buffer_fmt_t *fmt)
  1079. {
  1080. int ret = 0;
  1081. /* some sanity checks */
  1082. if (fmt == NULL || fmt->fmt_name == NULL) return -EINVAL;
  1083. /* we need at least a handler */
  1084. if (fmt->fmt_handler == NULL) return -EINVAL;
  1085. /*
  1086. * XXX: need check validity of fmt_arg_size
  1087. */
  1088. spin_lock(&pfm_buffer_fmt_lock);
  1089. if (__pfm_find_buffer_fmt(fmt->fmt_uuid)) {
  1090. printk(KERN_ERR "perfmon: duplicate sampling format: %s\n", fmt->fmt_name);
  1091. ret = -EBUSY;
  1092. goto out;
  1093. }
  1094. list_add(&fmt->fmt_list, &pfm_buffer_fmt_list);
  1095. printk(KERN_INFO "perfmon: added sampling format %s\n", fmt->fmt_name);
  1096. out:
  1097. spin_unlock(&pfm_buffer_fmt_lock);
  1098. return ret;
  1099. }
  1100. EXPORT_SYMBOL(pfm_register_buffer_fmt);
  1101. int
  1102. pfm_unregister_buffer_fmt(pfm_uuid_t uuid)
  1103. {
  1104. pfm_buffer_fmt_t *fmt;
  1105. int ret = 0;
  1106. spin_lock(&pfm_buffer_fmt_lock);
  1107. fmt = __pfm_find_buffer_fmt(uuid);
  1108. if (!fmt) {
  1109. printk(KERN_ERR "perfmon: cannot unregister format, not found\n");
  1110. ret = -EINVAL;
  1111. goto out;
  1112. }
  1113. list_del_init(&fmt->fmt_list);
  1114. printk(KERN_INFO "perfmon: removed sampling format: %s\n", fmt->fmt_name);
  1115. out:
  1116. spin_unlock(&pfm_buffer_fmt_lock);
  1117. return ret;
  1118. }
  1119. EXPORT_SYMBOL(pfm_unregister_buffer_fmt);
  1120. static int
  1121. pfm_reserve_session(struct task_struct *task, int is_syswide, unsigned int cpu)
  1122. {
  1123. unsigned long flags;
  1124. /*
  1125. * validity checks on cpu_mask have been done upstream
  1126. */
  1127. LOCK_PFS(flags);
  1128. DPRINT(("in sys_sessions=%u task_sessions=%u dbregs=%u syswide=%d cpu=%u\n",
  1129. pfm_sessions.pfs_sys_sessions,
  1130. pfm_sessions.pfs_task_sessions,
  1131. pfm_sessions.pfs_sys_use_dbregs,
  1132. is_syswide,
  1133. cpu));
  1134. if (is_syswide) {
  1135. /*
  1136. * cannot mix system wide and per-task sessions
  1137. */
  1138. if (pfm_sessions.pfs_task_sessions > 0UL) {
  1139. DPRINT(("system wide not possible, %u conflicting task_sessions\n",
  1140. pfm_sessions.pfs_task_sessions));
  1141. goto abort;
  1142. }
  1143. if (pfm_sessions.pfs_sys_session[cpu]) goto error_conflict;
  1144. DPRINT(("reserving system wide session on CPU%u currently on CPU%u\n", cpu, smp_processor_id()));
  1145. pfm_sessions.pfs_sys_session[cpu] = task;
  1146. pfm_sessions.pfs_sys_sessions++ ;
  1147. } else {
  1148. if (pfm_sessions.pfs_sys_sessions) goto abort;
  1149. pfm_sessions.pfs_task_sessions++;
  1150. }
  1151. DPRINT(("out sys_sessions=%u task_sessions=%u dbregs=%u syswide=%d cpu=%u\n",
  1152. pfm_sessions.pfs_sys_sessions,
  1153. pfm_sessions.pfs_task_sessions,
  1154. pfm_sessions.pfs_sys_use_dbregs,
  1155. is_syswide,
  1156. cpu));
  1157. /*
  1158. * Force idle() into poll mode
  1159. */
  1160. cpu_idle_poll_ctrl(true);
  1161. UNLOCK_PFS(flags);
  1162. return 0;
  1163. error_conflict:
  1164. DPRINT(("system wide not possible, conflicting session [%d] on CPU%d\n",
  1165. task_pid_nr(pfm_sessions.pfs_sys_session[cpu]),
  1166. cpu));
  1167. abort:
  1168. UNLOCK_PFS(flags);
  1169. return -EBUSY;
  1170. }
  1171. static int
  1172. pfm_unreserve_session(pfm_context_t *ctx, int is_syswide, unsigned int cpu)
  1173. {
  1174. unsigned long flags;
  1175. /*
  1176. * validity checks on cpu_mask have been done upstream
  1177. */
  1178. LOCK_PFS(flags);
  1179. DPRINT(("in sys_sessions=%u task_sessions=%u dbregs=%u syswide=%d cpu=%u\n",
  1180. pfm_sessions.pfs_sys_sessions,
  1181. pfm_sessions.pfs_task_sessions,
  1182. pfm_sessions.pfs_sys_use_dbregs,
  1183. is_syswide,
  1184. cpu));
  1185. if (is_syswide) {
  1186. pfm_sessions.pfs_sys_session[cpu] = NULL;
  1187. /*
  1188. * would not work with perfmon+more than one bit in cpu_mask
  1189. */
  1190. if (ctx && ctx->ctx_fl_using_dbreg) {
  1191. if (pfm_sessions.pfs_sys_use_dbregs == 0) {
  1192. printk(KERN_ERR "perfmon: invalid release for ctx %p sys_use_dbregs=0\n", ctx);
  1193. } else {
  1194. pfm_sessions.pfs_sys_use_dbregs--;
  1195. }
  1196. }
  1197. pfm_sessions.pfs_sys_sessions--;
  1198. } else {
  1199. pfm_sessions.pfs_task_sessions--;
  1200. }
  1201. DPRINT(("out sys_sessions=%u task_sessions=%u dbregs=%u syswide=%d cpu=%u\n",
  1202. pfm_sessions.pfs_sys_sessions,
  1203. pfm_sessions.pfs_task_sessions,
  1204. pfm_sessions.pfs_sys_use_dbregs,
  1205. is_syswide,
  1206. cpu));
  1207. /* Undo forced polling. Last session reenables pal_halt */
  1208. cpu_idle_poll_ctrl(false);
  1209. UNLOCK_PFS(flags);
  1210. return 0;
  1211. }
  1212. /*
  1213. * removes virtual mapping of the sampling buffer.
  1214. * IMPORTANT: cannot be called with interrupts disable, e.g. inside
  1215. * a PROTECT_CTX() section.
  1216. */
  1217. static int
  1218. pfm_remove_smpl_mapping(void *vaddr, unsigned long size)
  1219. {
  1220. struct task_struct *task = current;
  1221. int r;
  1222. /* sanity checks */
  1223. if (task->mm == NULL || size == 0UL || vaddr == NULL) {
  1224. printk(KERN_ERR "perfmon: pfm_remove_smpl_mapping [%d] invalid context mm=%p\n", task_pid_nr(task), task->mm);
  1225. return -EINVAL;
  1226. }
  1227. DPRINT(("smpl_vaddr=%p size=%lu\n", vaddr, size));
  1228. /*
  1229. * does the actual unmapping
  1230. */
  1231. r = vm_munmap((unsigned long)vaddr, size);
  1232. if (r !=0) {
  1233. printk(KERN_ERR "perfmon: [%d] unable to unmap sampling buffer @%p size=%lu\n", task_pid_nr(task), vaddr, size);
  1234. }
  1235. DPRINT(("do_unmap(%p, %lu)=%d\n", vaddr, size, r));
  1236. return 0;
  1237. }
  1238. /*
  1239. * free actual physical storage used by sampling buffer
  1240. */
  1241. #if 0
  1242. static int
  1243. pfm_free_smpl_buffer(pfm_context_t *ctx)
  1244. {
  1245. pfm_buffer_fmt_t *fmt;
  1246. if (ctx->ctx_smpl_hdr == NULL) goto invalid_free;
  1247. /*
  1248. * we won't use the buffer format anymore
  1249. */
  1250. fmt = ctx->ctx_buf_fmt;
  1251. DPRINT(("sampling buffer @%p size %lu vaddr=%p\n",
  1252. ctx->ctx_smpl_hdr,
  1253. ctx->ctx_smpl_size,
  1254. ctx->ctx_smpl_vaddr));
  1255. pfm_buf_fmt_exit(fmt, current, NULL, NULL);
  1256. /*
  1257. * free the buffer
  1258. */
  1259. pfm_rvfree(ctx->ctx_smpl_hdr, ctx->ctx_smpl_size);
  1260. ctx->ctx_smpl_hdr = NULL;
  1261. ctx->ctx_smpl_size = 0UL;
  1262. return 0;
  1263. invalid_free:
  1264. printk(KERN_ERR "perfmon: pfm_free_smpl_buffer [%d] no buffer\n", task_pid_nr(current));
  1265. return -EINVAL;
  1266. }
  1267. #endif
  1268. static inline void
  1269. pfm_exit_smpl_buffer(pfm_buffer_fmt_t *fmt)
  1270. {
  1271. if (fmt == NULL) return;
  1272. pfm_buf_fmt_exit(fmt, current, NULL, NULL);
  1273. }
  1274. /*
  1275. * pfmfs should _never_ be mounted by userland - too much of security hassle,
  1276. * no real gain from having the whole whorehouse mounted. So we don't need
  1277. * any operations on the root directory. However, we need a non-trivial
  1278. * d_name - pfm: will go nicely and kill the special-casing in procfs.
  1279. */
  1280. static struct vfsmount *pfmfs_mnt __read_mostly;
  1281. static int __init
  1282. init_pfm_fs(void)
  1283. {
  1284. int err = register_filesystem(&pfm_fs_type);
  1285. if (!err) {
  1286. pfmfs_mnt = kern_mount(&pfm_fs_type);
  1287. err = PTR_ERR(pfmfs_mnt);
  1288. if (IS_ERR(pfmfs_mnt))
  1289. unregister_filesystem(&pfm_fs_type);
  1290. else
  1291. err = 0;
  1292. }
  1293. return err;
  1294. }
  1295. static ssize_t
  1296. pfm_read(struct file *filp, char __user *buf, size_t size, loff_t *ppos)
  1297. {
  1298. pfm_context_t *ctx;
  1299. pfm_msg_t *msg;
  1300. ssize_t ret;
  1301. unsigned long flags;
  1302. DECLARE_WAITQUEUE(wait, current);
  1303. if (PFM_IS_FILE(filp) == 0) {
  1304. printk(KERN_ERR "perfmon: pfm_poll: bad magic [%d]\n", task_pid_nr(current));
  1305. return -EINVAL;
  1306. }
  1307. ctx = filp->private_data;
  1308. if (ctx == NULL) {
  1309. printk(KERN_ERR "perfmon: pfm_read: NULL ctx [%d]\n", task_pid_nr(current));
  1310. return -EINVAL;
  1311. }
  1312. /*
  1313. * check even when there is no message
  1314. */
  1315. if (size < sizeof(pfm_msg_t)) {
  1316. DPRINT(("message is too small ctx=%p (>=%ld)\n", ctx, sizeof(pfm_msg_t)));
  1317. return -EINVAL;
  1318. }
  1319. PROTECT_CTX(ctx, flags);
  1320. /*
  1321. * put ourselves on the wait queue
  1322. */
  1323. add_wait_queue(&ctx->ctx_msgq_wait, &wait);
  1324. for(;;) {
  1325. /*
  1326. * check wait queue
  1327. */
  1328. set_current_state(TASK_INTERRUPTIBLE);
  1329. DPRINT(("head=%d tail=%d\n", ctx->ctx_msgq_head, ctx->ctx_msgq_tail));
  1330. ret = 0;
  1331. if(PFM_CTXQ_EMPTY(ctx) == 0) break;
  1332. UNPROTECT_CTX(ctx, flags);
  1333. /*
  1334. * check non-blocking read
  1335. */
  1336. ret = -EAGAIN;
  1337. if(filp->f_flags & O_NONBLOCK) break;
  1338. /*
  1339. * check pending signals
  1340. */
  1341. if(signal_pending(current)) {
  1342. ret = -EINTR;
  1343. break;
  1344. }
  1345. /*
  1346. * no message, so wait
  1347. */
  1348. schedule();
  1349. PROTECT_CTX(ctx, flags);
  1350. }
  1351. DPRINT(("[%d] back to running ret=%ld\n", task_pid_nr(current), ret));
  1352. set_current_state(TASK_RUNNING);
  1353. remove_wait_queue(&ctx->ctx_msgq_wait, &wait);
  1354. if (ret < 0) goto abort;
  1355. ret = -EINVAL;
  1356. msg = pfm_get_next_msg(ctx);
  1357. if (msg == NULL) {
  1358. printk(KERN_ERR "perfmon: pfm_read no msg for ctx=%p [%d]\n", ctx, task_pid_nr(current));
  1359. goto abort_locked;
  1360. }
  1361. DPRINT(("fd=%d type=%d\n", msg->pfm_gen_msg.msg_ctx_fd, msg->pfm_gen_msg.msg_type));
  1362. ret = -EFAULT;
  1363. if(copy_to_user(buf, msg, sizeof(pfm_msg_t)) == 0) ret = sizeof(pfm_msg_t);
  1364. abort_locked:
  1365. UNPROTECT_CTX(ctx, flags);
  1366. abort:
  1367. return ret;
  1368. }
  1369. static ssize_t
  1370. pfm_write(struct file *file, const char __user *ubuf,
  1371. size_t size, loff_t *ppos)
  1372. {
  1373. DPRINT(("pfm_write called\n"));
  1374. return -EINVAL;
  1375. }
  1376. static unsigned int
  1377. pfm_poll(struct file *filp, poll_table * wait)
  1378. {
  1379. pfm_context_t *ctx;
  1380. unsigned long flags;
  1381. unsigned int mask = 0;
  1382. if (PFM_IS_FILE(filp) == 0) {
  1383. printk(KERN_ERR "perfmon: pfm_poll: bad magic [%d]\n", task_pid_nr(current));
  1384. return 0;
  1385. }
  1386. ctx = filp->private_data;
  1387. if (ctx == NULL) {
  1388. printk(KERN_ERR "perfmon: pfm_poll: NULL ctx [%d]\n", task_pid_nr(current));
  1389. return 0;
  1390. }
  1391. DPRINT(("pfm_poll ctx_fd=%d before poll_wait\n", ctx->ctx_fd));
  1392. poll_wait(filp, &ctx->ctx_msgq_wait, wait);
  1393. PROTECT_CTX(ctx, flags);
  1394. if (PFM_CTXQ_EMPTY(ctx) == 0)
  1395. mask = POLLIN | POLLRDNORM;
  1396. UNPROTECT_CTX(ctx, flags);
  1397. DPRINT(("pfm_poll ctx_fd=%d mask=0x%x\n", ctx->ctx_fd, mask));
  1398. return mask;
  1399. }
  1400. static long
  1401. pfm_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
  1402. {
  1403. DPRINT(("pfm_ioctl called\n"));
  1404. return -EINVAL;
  1405. }
  1406. /*
  1407. * interrupt cannot be masked when coming here
  1408. */
  1409. static inline int
  1410. pfm_do_fasync(int fd, struct file *filp, pfm_context_t *ctx, int on)
  1411. {
  1412. int ret;
  1413. ret = fasync_helper (fd, filp, on, &ctx->ctx_async_queue);
  1414. DPRINT(("pfm_fasync called by [%d] on ctx_fd=%d on=%d async_queue=%p ret=%d\n",
  1415. task_pid_nr(current),
  1416. fd,
  1417. on,
  1418. ctx->ctx_async_queue, ret));
  1419. return ret;
  1420. }
  1421. static int
  1422. pfm_fasync(int fd, struct file *filp, int on)
  1423. {
  1424. pfm_context_t *ctx;
  1425. int ret;
  1426. if (PFM_IS_FILE(filp) == 0) {
  1427. printk(KERN_ERR "perfmon: pfm_fasync bad magic [%d]\n", task_pid_nr(current));
  1428. return -EBADF;
  1429. }
  1430. ctx = filp->private_data;
  1431. if (ctx == NULL) {
  1432. printk(KERN_ERR "perfmon: pfm_fasync NULL ctx [%d]\n", task_pid_nr(current));
  1433. return -EBADF;
  1434. }
  1435. /*
  1436. * we cannot mask interrupts during this call because this may
  1437. * may go to sleep if memory is not readily avalaible.
  1438. *
  1439. * We are protected from the conetxt disappearing by the get_fd()/put_fd()
  1440. * done in caller. Serialization of this function is ensured by caller.
  1441. */
  1442. ret = pfm_do_fasync(fd, filp, ctx, on);
  1443. DPRINT(("pfm_fasync called on ctx_fd=%d on=%d async_queue=%p ret=%d\n",
  1444. fd,
  1445. on,
  1446. ctx->ctx_async_queue, ret));
  1447. return ret;
  1448. }
  1449. #ifdef CONFIG_SMP
  1450. /*
  1451. * this function is exclusively called from pfm_close().
  1452. * The context is not protected at that time, nor are interrupts
  1453. * on the remote CPU. That's necessary to avoid deadlocks.
  1454. */
  1455. static void
  1456. pfm_syswide_force_stop(void *info)
  1457. {
  1458. pfm_context_t *ctx = (pfm_context_t *)info;
  1459. struct pt_regs *regs = task_pt_regs(current);
  1460. struct task_struct *owner;
  1461. unsigned long flags;
  1462. int ret;
  1463. if (ctx->ctx_cpu != smp_processor_id()) {
  1464. printk(KERN_ERR "perfmon: pfm_syswide_force_stop for CPU%d but on CPU%d\n",
  1465. ctx->ctx_cpu,
  1466. smp_processor_id());
  1467. return;
  1468. }
  1469. owner = GET_PMU_OWNER();
  1470. if (owner != ctx->ctx_task) {
  1471. printk(KERN_ERR "perfmon: pfm_syswide_force_stop CPU%d unexpected owner [%d] instead of [%d]\n",
  1472. smp_processor_id(),
  1473. task_pid_nr(owner), task_pid_nr(ctx->ctx_task));
  1474. return;
  1475. }
  1476. if (GET_PMU_CTX() != ctx) {
  1477. printk(KERN_ERR "perfmon: pfm_syswide_force_stop CPU%d unexpected ctx %p instead of %p\n",
  1478. smp_processor_id(),
  1479. GET_PMU_CTX(), ctx);
  1480. return;
  1481. }
  1482. DPRINT(("on CPU%d forcing system wide stop for [%d]\n", smp_processor_id(), task_pid_nr(ctx->ctx_task)));
  1483. /*
  1484. * the context is already protected in pfm_close(), we simply
  1485. * need to mask interrupts to avoid a PMU interrupt race on
  1486. * this CPU
  1487. */
  1488. local_irq_save(flags);
  1489. ret = pfm_context_unload(ctx, NULL, 0, regs);
  1490. if (ret) {
  1491. DPRINT(("context_unload returned %d\n", ret));
  1492. }
  1493. /*
  1494. * unmask interrupts, PMU interrupts are now spurious here
  1495. */
  1496. local_irq_restore(flags);
  1497. }
  1498. static void
  1499. pfm_syswide_cleanup_other_cpu(pfm_context_t *ctx)
  1500. {
  1501. int ret;
  1502. DPRINT(("calling CPU%d for cleanup\n", ctx->ctx_cpu));
  1503. ret = smp_call_function_single(ctx->ctx_cpu, pfm_syswide_force_stop, ctx, 1);
  1504. DPRINT(("called CPU%d for cleanup ret=%d\n", ctx->ctx_cpu, ret));
  1505. }
  1506. #endif /* CONFIG_SMP */
  1507. /*
  1508. * called for each close(). Partially free resources.
  1509. * When caller is self-monitoring, the context is unloaded.
  1510. */
  1511. static int
  1512. pfm_flush(struct file *filp, fl_owner_t id)
  1513. {
  1514. pfm_context_t *ctx;
  1515. struct task_struct *task;
  1516. struct pt_regs *regs;
  1517. unsigned long flags;
  1518. unsigned long smpl_buf_size = 0UL;
  1519. void *smpl_buf_vaddr = NULL;
  1520. int state, is_system;
  1521. if (PFM_IS_FILE(filp) == 0) {
  1522. DPRINT(("bad magic for\n"));
  1523. return -EBADF;
  1524. }
  1525. ctx = filp->private_data;
  1526. if (ctx == NULL) {
  1527. printk(KERN_ERR "perfmon: pfm_flush: NULL ctx [%d]\n", task_pid_nr(current));
  1528. return -EBADF;
  1529. }
  1530. /*
  1531. * remove our file from the async queue, if we use this mode.
  1532. * This can be done without the context being protected. We come
  1533. * here when the context has become unreachable by other tasks.
  1534. *
  1535. * We may still have active monitoring at this point and we may
  1536. * end up in pfm_overflow_handler(). However, fasync_helper()
  1537. * operates with interrupts disabled and it cleans up the
  1538. * queue. If the PMU handler is called prior to entering
  1539. * fasync_helper() then it will send a signal. If it is
  1540. * invoked after, it will find an empty queue and no
  1541. * signal will be sent. In both case, we are safe
  1542. */
  1543. PROTECT_CTX(ctx, flags);
  1544. state = ctx->ctx_state;
  1545. is_system = ctx->ctx_fl_system;
  1546. task = PFM_CTX_TASK(ctx);
  1547. regs = task_pt_regs(task);
  1548. DPRINT(("ctx_state=%d is_current=%d\n",
  1549. state,
  1550. task == current ? 1 : 0));
  1551. /*
  1552. * if state == UNLOADED, then task is NULL
  1553. */
  1554. /*
  1555. * we must stop and unload because we are losing access to the context.
  1556. */
  1557. if (task == current) {
  1558. #ifdef CONFIG_SMP
  1559. /*
  1560. * the task IS the owner but it migrated to another CPU: that's bad
  1561. * but we must handle this cleanly. Unfortunately, the kernel does
  1562. * not provide a mechanism to block migration (while the context is loaded).
  1563. *
  1564. * We need to release the resource on the ORIGINAL cpu.
  1565. */
  1566. if (is_system && ctx->ctx_cpu != smp_processor_id()) {
  1567. DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
  1568. /*
  1569. * keep context protected but unmask interrupt for IPI
  1570. */
  1571. local_irq_restore(flags);
  1572. pfm_syswide_cleanup_other_cpu(ctx);
  1573. /*
  1574. * restore interrupt masking
  1575. */
  1576. local_irq_save(flags);
  1577. /*
  1578. * context is unloaded at this point
  1579. */
  1580. } else
  1581. #endif /* CONFIG_SMP */
  1582. {
  1583. DPRINT(("forcing unload\n"));
  1584. /*
  1585. * stop and unload, returning with state UNLOADED
  1586. * and session unreserved.
  1587. */
  1588. pfm_context_unload(ctx, NULL, 0, regs);
  1589. DPRINT(("ctx_state=%d\n", ctx->ctx_state));
  1590. }
  1591. }
  1592. /*
  1593. * remove virtual mapping, if any, for the calling task.
  1594. * cannot reset ctx field until last user is calling close().
  1595. *
  1596. * ctx_smpl_vaddr must never be cleared because it is needed
  1597. * by every task with access to the context
  1598. *
  1599. * When called from do_exit(), the mm context is gone already, therefore
  1600. * mm is NULL, i.e., the VMA is already gone and we do not have to
  1601. * do anything here
  1602. */
  1603. if (ctx->ctx_smpl_vaddr && current->mm) {
  1604. smpl_buf_vaddr = ctx->ctx_smpl_vaddr;
  1605. smpl_buf_size = ctx->ctx_smpl_size;
  1606. }
  1607. UNPROTECT_CTX(ctx, flags);
  1608. /*
  1609. * if there was a mapping, then we systematically remove it
  1610. * at this point. Cannot be done inside critical section
  1611. * because some VM function reenables interrupts.
  1612. *
  1613. */
  1614. if (smpl_buf_vaddr) pfm_remove_smpl_mapping(smpl_buf_vaddr, smpl_buf_size);
  1615. return 0;
  1616. }
  1617. /*
  1618. * called either on explicit close() or from exit_files().
  1619. * Only the LAST user of the file gets to this point, i.e., it is
  1620. * called only ONCE.
  1621. *
  1622. * IMPORTANT: we get called ONLY when the refcnt on the file gets to zero
  1623. * (fput()),i.e, last task to access the file. Nobody else can access the
  1624. * file at this point.
  1625. *
  1626. * When called from exit_files(), the VMA has been freed because exit_mm()
  1627. * is executed before exit_files().
  1628. *
  1629. * When called from exit_files(), the current task is not yet ZOMBIE but we
  1630. * flush the PMU state to the context.
  1631. */
  1632. static int
  1633. pfm_close(struct inode *inode, struct file *filp)
  1634. {
  1635. pfm_context_t *ctx;
  1636. struct task_struct *task;
  1637. struct pt_regs *regs;
  1638. DECLARE_WAITQUEUE(wait, current);
  1639. unsigned long flags;
  1640. unsigned long smpl_buf_size = 0UL;
  1641. void *smpl_buf_addr = NULL;
  1642. int free_possible = 1;
  1643. int state, is_system;
  1644. DPRINT(("pfm_close called private=%p\n", filp->private_data));
  1645. if (PFM_IS_FILE(filp) == 0) {
  1646. DPRINT(("bad magic\n"));
  1647. return -EBADF;
  1648. }
  1649. ctx = filp->private_data;
  1650. if (ctx == NULL) {
  1651. printk(KERN_ERR "perfmon: pfm_close: NULL ctx [%d]\n", task_pid_nr(current));
  1652. return -EBADF;
  1653. }
  1654. PROTECT_CTX(ctx, flags);
  1655. state = ctx->ctx_state;
  1656. is_system = ctx->ctx_fl_system;
  1657. task = PFM_CTX_TASK(ctx);
  1658. regs = task_pt_regs(task);
  1659. DPRINT(("ctx_state=%d is_current=%d\n",
  1660. state,
  1661. task == current ? 1 : 0));
  1662. /*
  1663. * if task == current, then pfm_flush() unloaded the context
  1664. */
  1665. if (state == PFM_CTX_UNLOADED) goto doit;
  1666. /*
  1667. * context is loaded/masked and task != current, we need to
  1668. * either force an unload or go zombie
  1669. */
  1670. /*
  1671. * The task is currently blocked or will block after an overflow.
  1672. * we must force it to wakeup to get out of the
  1673. * MASKED state and transition to the unloaded state by itself.
  1674. *
  1675. * This situation is only possible for per-task mode
  1676. */
  1677. if (state == PFM_CTX_MASKED && CTX_OVFL_NOBLOCK(ctx) == 0) {
  1678. /*
  1679. * set a "partial" zombie state to be checked
  1680. * upon return from down() in pfm_handle_work().
  1681. *
  1682. * We cannot use the ZOMBIE state, because it is checked
  1683. * by pfm_load_regs() which is called upon wakeup from down().
  1684. * In such case, it would free the context and then we would
  1685. * return to pfm_handle_work() which would access the
  1686. * stale context. Instead, we set a flag invisible to pfm_load_regs()
  1687. * but visible to pfm_handle_work().
  1688. *
  1689. * For some window of time, we have a zombie context with
  1690. * ctx_state = MASKED and not ZOMBIE
  1691. */
  1692. ctx->ctx_fl_going_zombie = 1;
  1693. /*
  1694. * force task to wake up from MASKED state
  1695. */
  1696. complete(&ctx->ctx_restart_done);
  1697. DPRINT(("waking up ctx_state=%d\n", state));
  1698. /*
  1699. * put ourself to sleep waiting for the other
  1700. * task to report completion
  1701. *
  1702. * the context is protected by mutex, therefore there
  1703. * is no risk of being notified of completion before
  1704. * begin actually on the waitq.
  1705. */
  1706. set_current_state(TASK_INTERRUPTIBLE);
  1707. add_wait_queue(&ctx->ctx_zombieq, &wait);
  1708. UNPROTECT_CTX(ctx, flags);
  1709. /*
  1710. * XXX: check for signals :
  1711. * - ok for explicit close
  1712. * - not ok when coming from exit_files()
  1713. */
  1714. schedule();
  1715. PROTECT_CTX(ctx, flags);
  1716. remove_wait_queue(&ctx->ctx_zombieq, &wait);
  1717. set_current_state(TASK_RUNNING);
  1718. /*
  1719. * context is unloaded at this point
  1720. */
  1721. DPRINT(("after zombie wakeup ctx_state=%d for\n", state));
  1722. }
  1723. else if (task != current) {
  1724. #ifdef CONFIG_SMP
  1725. /*
  1726. * switch context to zombie state
  1727. */
  1728. ctx->ctx_state = PFM_CTX_ZOMBIE;
  1729. DPRINT(("zombie ctx for [%d]\n", task_pid_nr(task)));
  1730. /*
  1731. * cannot free the context on the spot. deferred until
  1732. * the task notices the ZOMBIE state
  1733. */
  1734. free_possible = 0;
  1735. #else
  1736. pfm_context_unload(ctx, NULL, 0, regs);
  1737. #endif
  1738. }
  1739. doit:
  1740. /* reload state, may have changed during opening of critical section */
  1741. state = ctx->ctx_state;
  1742. /*
  1743. * the context is still attached to a task (possibly current)
  1744. * we cannot destroy it right now
  1745. */
  1746. /*
  1747. * we must free the sampling buffer right here because
  1748. * we cannot rely on it being cleaned up later by the
  1749. * monitored task. It is not possible to free vmalloc'ed
  1750. * memory in pfm_load_regs(). Instead, we remove the buffer
  1751. * now. should there be subsequent PMU overflow originally
  1752. * meant for sampling, the will be converted to spurious
  1753. * and that's fine because the monitoring tools is gone anyway.
  1754. */
  1755. if (ctx->ctx_smpl_hdr) {
  1756. smpl_buf_addr = ctx->ctx_smpl_hdr;
  1757. smpl_buf_size = ctx->ctx_smpl_size;
  1758. /* no more sampling */
  1759. ctx->ctx_smpl_hdr = NULL;
  1760. ctx->ctx_fl_is_sampling = 0;
  1761. }
  1762. DPRINT(("ctx_state=%d free_possible=%d addr=%p size=%lu\n",
  1763. state,
  1764. free_possible,
  1765. smpl_buf_addr,
  1766. smpl_buf_size));
  1767. if (smpl_buf_addr) pfm_exit_smpl_buffer(ctx->ctx_buf_fmt);
  1768. /*
  1769. * UNLOADED that the session has already been unreserved.
  1770. */
  1771. if (state == PFM_CTX_ZOMBIE) {
  1772. pfm_unreserve_session(ctx, ctx->ctx_fl_system , ctx->ctx_cpu);
  1773. }
  1774. /*
  1775. * disconnect file descriptor from context must be done
  1776. * before we unlock.
  1777. */
  1778. filp->private_data = NULL;
  1779. /*
  1780. * if we free on the spot, the context is now completely unreachable
  1781. * from the callers side. The monitored task side is also cut, so we
  1782. * can freely cut.
  1783. *
  1784. * If we have a deferred free, only the caller side is disconnected.
  1785. */
  1786. UNPROTECT_CTX(ctx, flags);
  1787. /*
  1788. * All memory free operations (especially for vmalloc'ed memory)
  1789. * MUST be done with interrupts ENABLED.
  1790. */
  1791. if (smpl_buf_addr) pfm_rvfree(smpl_buf_addr, smpl_buf_size);
  1792. /*
  1793. * return the memory used by the context
  1794. */
  1795. if (free_possible) pfm_context_free(ctx);
  1796. return 0;
  1797. }
  1798. static const struct file_operations pfm_file_ops = {
  1799. .llseek = no_llseek,
  1800. .read = pfm_read,
  1801. .write = pfm_write,
  1802. .poll = pfm_poll,
  1803. .unlocked_ioctl = pfm_ioctl,
  1804. .fasync = pfm_fasync,
  1805. .release = pfm_close,
  1806. .flush = pfm_flush
  1807. };
  1808. static char *pfmfs_dname(struct dentry *dentry, char *buffer, int buflen)
  1809. {
  1810. return dynamic_dname(dentry, buffer, buflen, "pfm:[%lu]",
  1811. d_inode(dentry)->i_ino);
  1812. }
  1813. static const struct dentry_operations pfmfs_dentry_operations = {
  1814. .d_delete = always_delete_dentry,
  1815. .d_dname = pfmfs_dname,
  1816. };
  1817. static struct file *
  1818. pfm_alloc_file(pfm_context_t *ctx)
  1819. {
  1820. struct file *file;
  1821. struct inode *inode;
  1822. struct path path;
  1823. struct qstr this = { .name = "" };
  1824. /*
  1825. * allocate a new inode
  1826. */
  1827. inode = new_inode(pfmfs_mnt->mnt_sb);
  1828. if (!inode)
  1829. return ERR_PTR(-ENOMEM);
  1830. DPRINT(("new inode ino=%ld @%p\n", inode->i_ino, inode));
  1831. inode->i_mode = S_IFCHR|S_IRUGO;
  1832. inode->i_uid = current_fsuid();
  1833. inode->i_gid = current_fsgid();
  1834. /*
  1835. * allocate a new dcache entry
  1836. */
  1837. path.dentry = d_alloc(pfmfs_mnt->mnt_root, &this);
  1838. if (!path.dentry) {
  1839. iput(inode);
  1840. return ERR_PTR(-ENOMEM);
  1841. }
  1842. path.mnt = mntget(pfmfs_mnt);
  1843. d_add(path.dentry, inode);
  1844. file = alloc_file(&path, FMODE_READ, &pfm_file_ops);
  1845. if (IS_ERR(file)) {
  1846. path_put(&path);
  1847. return file;
  1848. }
  1849. file->f_flags = O_RDONLY;
  1850. file->private_data = ctx;
  1851. return file;
  1852. }
  1853. static int
  1854. pfm_remap_buffer(struct vm_area_struct *vma, unsigned long buf, unsigned long addr, unsigned long size)
  1855. {
  1856. DPRINT(("CPU%d buf=0x%lx addr=0x%lx size=%ld\n", smp_processor_id(), buf, addr, size));
  1857. while (size > 0) {
  1858. unsigned long pfn = ia64_tpa(buf) >> PAGE_SHIFT;
  1859. if (remap_pfn_range(vma, addr, pfn, PAGE_SIZE, PAGE_READONLY))
  1860. return -ENOMEM;
  1861. addr += PAGE_SIZE;
  1862. buf += PAGE_SIZE;
  1863. size -= PAGE_SIZE;
  1864. }
  1865. return 0;
  1866. }
  1867. /*
  1868. * allocate a sampling buffer and remaps it into the user address space of the task
  1869. */
  1870. static int
  1871. pfm_smpl_buffer_alloc(struct task_struct *task, struct file *filp, pfm_context_t *ctx, unsigned long rsize, void **user_vaddr)
  1872. {
  1873. struct mm_struct *mm = task->mm;
  1874. struct vm_area_struct *vma = NULL;
  1875. unsigned long size;
  1876. void *smpl_buf;
  1877. /*
  1878. * the fixed header + requested size and align to page boundary
  1879. */
  1880. size = PAGE_ALIGN(rsize);
  1881. DPRINT(("sampling buffer rsize=%lu size=%lu bytes\n", rsize, size));
  1882. /*
  1883. * check requested size to avoid Denial-of-service attacks
  1884. * XXX: may have to refine this test
  1885. * Check against address space limit.
  1886. *
  1887. * if ((mm->total_vm << PAGE_SHIFT) + len> task->rlim[RLIMIT_AS].rlim_cur)
  1888. * return -ENOMEM;
  1889. */
  1890. if (size > task_rlimit(task, RLIMIT_MEMLOCK))
  1891. return -ENOMEM;
  1892. /*
  1893. * We do the easy to undo allocations first.
  1894. *
  1895. * pfm_rvmalloc(), clears the buffer, so there is no leak
  1896. */
  1897. smpl_buf = pfm_rvmalloc(size);
  1898. if (smpl_buf == NULL) {
  1899. DPRINT(("Can't allocate sampling buffer\n"));
  1900. return -ENOMEM;
  1901. }
  1902. DPRINT(("smpl_buf @%p\n", smpl_buf));
  1903. /* allocate vma */
  1904. vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
  1905. if (!vma) {
  1906. DPRINT(("Cannot allocate vma\n"));
  1907. goto error_kmem;
  1908. }
  1909. INIT_LIST_HEAD(&vma->anon_vma_chain);
  1910. /*
  1911. * partially initialize the vma for the sampling buffer
  1912. */
  1913. vma->vm_mm = mm;
  1914. vma->vm_file = get_file(filp);
  1915. vma->vm_flags = VM_READ|VM_MAYREAD|VM_DONTEXPAND|VM_DONTDUMP;
  1916. vma->vm_page_prot = PAGE_READONLY; /* XXX may need to change */
  1917. /*
  1918. * Now we have everything we need and we can initialize
  1919. * and connect all the data structures
  1920. */
  1921. ctx->ctx_smpl_hdr = smpl_buf;
  1922. ctx->ctx_smpl_size = size; /* aligned size */
  1923. /*
  1924. * Let's do the difficult operations next.
  1925. *
  1926. * now we atomically find some area in the address space and
  1927. * remap the buffer in it.
  1928. */
  1929. down_write(&task->mm->mmap_sem);
  1930. /* find some free area in address space, must have mmap sem held */
  1931. vma->vm_start = get_unmapped_area(NULL, 0, size, 0, MAP_PRIVATE|MAP_ANONYMOUS);
  1932. if (IS_ERR_VALUE(vma->vm_start)) {
  1933. DPRINT(("Cannot find unmapped area for size %ld\n", size));
  1934. up_write(&task->mm->mmap_sem);
  1935. goto error;
  1936. }
  1937. vma->vm_end = vma->vm_start + size;
  1938. vma->vm_pgoff = vma->vm_start >> PAGE_SHIFT;
  1939. DPRINT(("aligned size=%ld, hdr=%p mapped @0x%lx\n", size, ctx->ctx_smpl_hdr, vma->vm_start));
  1940. /* can only be applied to current task, need to have the mm semaphore held when called */
  1941. if (pfm_remap_buffer(vma, (unsigned long)smpl_buf, vma->vm_start, size)) {
  1942. DPRINT(("Can't remap buffer\n"));
  1943. up_write(&task->mm->mmap_sem);
  1944. goto error;
  1945. }
  1946. /*
  1947. * now insert the vma in the vm list for the process, must be
  1948. * done with mmap lock held
  1949. */
  1950. insert_vm_struct(mm, vma);
  1951. vm_stat_account(vma->vm_mm, vma->vm_flags, vma->vm_file,
  1952. vma_pages(vma));
  1953. up_write(&task->mm->mmap_sem);
  1954. /*
  1955. * keep track of user level virtual address
  1956. */
  1957. ctx->ctx_smpl_vaddr = (void *)vma->vm_start;
  1958. *(unsigned long *)user_vaddr = vma->vm_start;
  1959. return 0;
  1960. error:
  1961. kmem_cache_free(vm_area_cachep, vma);
  1962. error_kmem:
  1963. pfm_rvfree(smpl_buf, size);
  1964. return -ENOMEM;
  1965. }
  1966. /*
  1967. * XXX: do something better here
  1968. */
  1969. static int
  1970. pfm_bad_permissions(struct task_struct *task)
  1971. {
  1972. const struct cred *tcred;
  1973. kuid_t uid = current_uid();
  1974. kgid_t gid = current_gid();
  1975. int ret;
  1976. rcu_read_lock();
  1977. tcred = __task_cred(task);
  1978. /* inspired by ptrace_attach() */
  1979. DPRINT(("cur: uid=%d gid=%d task: euid=%d suid=%d uid=%d egid=%d sgid=%d\n",
  1980. from_kuid(&init_user_ns, uid),
  1981. from_kgid(&init_user_ns, gid),
  1982. from_kuid(&init_user_ns, tcred->euid),
  1983. from_kuid(&init_user_ns, tcred->suid),
  1984. from_kuid(&init_user_ns, tcred->uid),
  1985. from_kgid(&init_user_ns, tcred->egid),
  1986. from_kgid(&init_user_ns, tcred->sgid)));
  1987. ret = ((!uid_eq(uid, tcred->euid))
  1988. || (!uid_eq(uid, tcred->suid))
  1989. || (!uid_eq(uid, tcred->uid))
  1990. || (!gid_eq(gid, tcred->egid))
  1991. || (!gid_eq(gid, tcred->sgid))
  1992. || (!gid_eq(gid, tcred->gid))) && !capable(CAP_SYS_PTRACE);
  1993. rcu_read_unlock();
  1994. return ret;
  1995. }
  1996. static int
  1997. pfarg_is_sane(struct task_struct *task, pfarg_context_t *pfx)
  1998. {
  1999. int ctx_flags;
  2000. /* valid signal */
  2001. ctx_flags = pfx->ctx_flags;
  2002. if (ctx_flags & PFM_FL_SYSTEM_WIDE) {
  2003. /*
  2004. * cannot block in this mode
  2005. */
  2006. if (ctx_flags & PFM_FL_NOTIFY_BLOCK) {
  2007. DPRINT(("cannot use blocking mode when in system wide monitoring\n"));
  2008. return -EINVAL;
  2009. }
  2010. } else {
  2011. }
  2012. /* probably more to add here */
  2013. return 0;
  2014. }
  2015. static int
  2016. pfm_setup_buffer_fmt(struct task_struct *task, struct file *filp, pfm_context_t *ctx, unsigned int ctx_flags,
  2017. unsigned int cpu, pfarg_context_t *arg)
  2018. {
  2019. pfm_buffer_fmt_t *fmt = NULL;
  2020. unsigned long size = 0UL;
  2021. void *uaddr = NULL;
  2022. void *fmt_arg = NULL;
  2023. int ret = 0;
  2024. #define PFM_CTXARG_BUF_ARG(a) (pfm_buffer_fmt_t *)(a+1)
  2025. /* invoke and lock buffer format, if found */
  2026. fmt = pfm_find_buffer_fmt(arg->ctx_smpl_buf_id);
  2027. if (fmt == NULL) {
  2028. DPRINT(("[%d] cannot find buffer format\n", task_pid_nr(task)));
  2029. return -EINVAL;
  2030. }
  2031. /*
  2032. * buffer argument MUST be contiguous to pfarg_context_t
  2033. */
  2034. if (fmt->fmt_arg_size) fmt_arg = PFM_CTXARG_BUF_ARG(arg);
  2035. ret = pfm_buf_fmt_validate(fmt, task, ctx_flags, cpu, fmt_arg);
  2036. DPRINT(("[%d] after validate(0x%x,%d,%p)=%d\n", task_pid_nr(task), ctx_flags, cpu, fmt_arg, ret));
  2037. if (ret) goto error;
  2038. /* link buffer format and context */
  2039. ctx->ctx_buf_fmt = fmt;
  2040. ctx->ctx_fl_is_sampling = 1; /* assume record() is defined */
  2041. /*
  2042. * check if buffer format wants to use perfmon buffer allocation/mapping service
  2043. */
  2044. ret = pfm_buf_fmt_getsize(fmt, task, ctx_flags, cpu, fmt_arg, &size);
  2045. if (ret) goto error;
  2046. if (size) {
  2047. /*
  2048. * buffer is always remapped into the caller's address space
  2049. */
  2050. ret = pfm_smpl_buffer_alloc(current, filp, ctx, size, &uaddr);
  2051. if (ret) goto error;
  2052. /* keep track of user address of buffer */
  2053. arg->ctx_smpl_vaddr = uaddr;
  2054. }
  2055. ret = pfm_buf_fmt_init(fmt, task, ctx->ctx_smpl_hdr, ctx_flags, cpu, fmt_arg);
  2056. error:
  2057. return ret;
  2058. }
  2059. static void
  2060. pfm_reset_pmu_state(pfm_context_t *ctx)
  2061. {
  2062. int i;
  2063. /*
  2064. * install reset values for PMC.
  2065. */
  2066. for (i=1; PMC_IS_LAST(i) == 0; i++) {
  2067. if (PMC_IS_IMPL(i) == 0) continue;
  2068. ctx->ctx_pmcs[i] = PMC_DFL_VAL(i);
  2069. DPRINT(("pmc[%d]=0x%lx\n", i, ctx->ctx_pmcs[i]));
  2070. }
  2071. /*
  2072. * PMD registers are set to 0UL when the context in memset()
  2073. */
  2074. /*
  2075. * On context switched restore, we must restore ALL pmc and ALL pmd even
  2076. * when they are not actively used by the task. In UP, the incoming process
  2077. * may otherwise pick up left over PMC, PMD state from the previous process.
  2078. * As opposed to PMD, stale PMC can cause harm to the incoming
  2079. * process because they may change what is being measured.
  2080. * Therefore, we must systematically reinstall the entire
  2081. * PMC state. In SMP, the same thing is possible on the
  2082. * same CPU but also on between 2 CPUs.
  2083. *
  2084. * The problem with PMD is information leaking especially
  2085. * to user level when psr.sp=0
  2086. *
  2087. * There is unfortunately no easy way to avoid this problem
  2088. * on either UP or SMP. This definitively slows down the
  2089. * pfm_load_regs() function.
  2090. */
  2091. /*
  2092. * bitmask of all PMCs accessible to this context
  2093. *
  2094. * PMC0 is treated differently.
  2095. */
  2096. ctx->ctx_all_pmcs[0] = pmu_conf->impl_pmcs[0] & ~0x1;
  2097. /*
  2098. * bitmask of all PMDs that are accessible to this context
  2099. */
  2100. ctx->ctx_all_pmds[0] = pmu_conf->impl_pmds[0];
  2101. DPRINT(("<%d> all_pmcs=0x%lx all_pmds=0x%lx\n", ctx->ctx_fd, ctx->ctx_all_pmcs[0],ctx->ctx_all_pmds[0]));
  2102. /*
  2103. * useful in case of re-enable after disable
  2104. */
  2105. ctx->ctx_used_ibrs[0] = 0UL;
  2106. ctx->ctx_used_dbrs[0] = 0UL;
  2107. }
  2108. static int
  2109. pfm_ctx_getsize(void *arg, size_t *sz)
  2110. {
  2111. pfarg_context_t *req = (pfarg_context_t *)arg;
  2112. pfm_buffer_fmt_t *fmt;
  2113. *sz = 0;
  2114. if (!pfm_uuid_cmp(req->ctx_smpl_buf_id, pfm_null_uuid)) return 0;
  2115. fmt = pfm_find_buffer_fmt(req->ctx_smpl_buf_id);
  2116. if (fmt == NULL) {
  2117. DPRINT(("cannot find buffer format\n"));
  2118. return -EINVAL;
  2119. }
  2120. /* get just enough to copy in user parameters */
  2121. *sz = fmt->fmt_arg_size;
  2122. DPRINT(("arg_size=%lu\n", *sz));
  2123. return 0;
  2124. }
  2125. /*
  2126. * cannot attach if :
  2127. * - kernel task
  2128. * - task not owned by caller
  2129. * - task incompatible with context mode
  2130. */
  2131. static int
  2132. pfm_task_incompatible(pfm_context_t *ctx, struct task_struct *task)
  2133. {
  2134. /*
  2135. * no kernel task or task not owner by caller
  2136. */
  2137. if (task->mm == NULL) {
  2138. DPRINT(("task [%d] has not memory context (kernel thread)\n", task_pid_nr(task)));
  2139. return -EPERM;
  2140. }
  2141. if (pfm_bad_permissions(task)) {
  2142. DPRINT(("no permission to attach to [%d]\n", task_pid_nr(task)));
  2143. return -EPERM;
  2144. }
  2145. /*
  2146. * cannot block in self-monitoring mode
  2147. */
  2148. if (CTX_OVFL_NOBLOCK(ctx) == 0 && task == current) {
  2149. DPRINT(("cannot load a blocking context on self for [%d]\n", task_pid_nr(task)));
  2150. return -EINVAL;
  2151. }
  2152. if (task->exit_state == EXIT_ZOMBIE) {
  2153. DPRINT(("cannot attach to zombie task [%d]\n", task_pid_nr(task)));
  2154. return -EBUSY;
  2155. }
  2156. /*
  2157. * always ok for self
  2158. */
  2159. if (task == current) return 0;
  2160. if (!task_is_stopped_or_traced(task)) {
  2161. DPRINT(("cannot attach to non-stopped task [%d] state=%ld\n", task_pid_nr(task), task->state));
  2162. return -EBUSY;
  2163. }
  2164. /*
  2165. * make sure the task is off any CPU
  2166. */
  2167. wait_task_inactive(task, 0);
  2168. /* more to come... */
  2169. return 0;
  2170. }
  2171. static int
  2172. pfm_get_task(pfm_context_t *ctx, pid_t pid, struct task_struct **task)
  2173. {
  2174. struct task_struct *p = current;
  2175. int ret;
  2176. /* XXX: need to add more checks here */
  2177. if (pid < 2) return -EPERM;
  2178. if (pid != task_pid_vnr(current)) {
  2179. read_lock(&tasklist_lock);
  2180. p = find_task_by_vpid(pid);
  2181. /* make sure task cannot go away while we operate on it */
  2182. if (p) get_task_struct(p);
  2183. read_unlock(&tasklist_lock);
  2184. if (p == NULL) return -ESRCH;
  2185. }
  2186. ret = pfm_task_incompatible(ctx, p);
  2187. if (ret == 0) {
  2188. *task = p;
  2189. } else if (p != current) {
  2190. pfm_put_task(p);
  2191. }
  2192. return ret;
  2193. }
  2194. static int
  2195. pfm_context_create(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
  2196. {
  2197. pfarg_context_t *req = (pfarg_context_t *)arg;
  2198. struct file *filp;
  2199. struct path path;
  2200. int ctx_flags;
  2201. int fd;
  2202. int ret;
  2203. /* let's check the arguments first */
  2204. ret = pfarg_is_sane(current, req);
  2205. if (ret < 0)
  2206. return ret;
  2207. ctx_flags = req->ctx_flags;
  2208. ret = -ENOMEM;
  2209. fd = get_unused_fd_flags(0);
  2210. if (fd < 0)
  2211. return fd;
  2212. ctx = pfm_context_alloc(ctx_flags);
  2213. if (!ctx)
  2214. goto error;
  2215. filp = pfm_alloc_file(ctx);
  2216. if (IS_ERR(filp)) {
  2217. ret = PTR_ERR(filp);
  2218. goto error_file;
  2219. }
  2220. req->ctx_fd = ctx->ctx_fd = fd;
  2221. /*
  2222. * does the user want to sample?
  2223. */
  2224. if (pfm_uuid_cmp(req->ctx_smpl_buf_id, pfm_null_uuid)) {
  2225. ret = pfm_setup_buffer_fmt(current, filp, ctx, ctx_flags, 0, req);
  2226. if (ret)
  2227. goto buffer_error;
  2228. }
  2229. DPRINT(("ctx=%p flags=0x%x system=%d notify_block=%d excl_idle=%d no_msg=%d ctx_fd=%d\n",
  2230. ctx,
  2231. ctx_flags,
  2232. ctx->ctx_fl_system,
  2233. ctx->ctx_fl_block,
  2234. ctx->ctx_fl_excl_idle,
  2235. ctx->ctx_fl_no_msg,
  2236. ctx->ctx_fd));
  2237. /*
  2238. * initialize soft PMU state
  2239. */
  2240. pfm_reset_pmu_state(ctx);
  2241. fd_install(fd, filp);
  2242. return 0;
  2243. buffer_error:
  2244. path = filp->f_path;
  2245. put_filp(filp);
  2246. path_put(&path);
  2247. if (ctx->ctx_buf_fmt) {
  2248. pfm_buf_fmt_exit(ctx->ctx_buf_fmt, current, NULL, regs);
  2249. }
  2250. error_file:
  2251. pfm_context_free(ctx);
  2252. error:
  2253. put_unused_fd(fd);
  2254. return ret;
  2255. }
  2256. static inline unsigned long
  2257. pfm_new_counter_value (pfm_counter_t *reg, int is_long_reset)
  2258. {
  2259. unsigned long val = is_long_reset ? reg->long_reset : reg->short_reset;
  2260. unsigned long new_seed, old_seed = reg->seed, mask = reg->mask;
  2261. extern unsigned long carta_random32 (unsigned long seed);
  2262. if (reg->flags & PFM_REGFL_RANDOM) {
  2263. new_seed = carta_random32(old_seed);
  2264. val -= (old_seed & mask); /* counter values are negative numbers! */
  2265. if ((mask >> 32) != 0)
  2266. /* construct a full 64-bit random value: */
  2267. new_seed |= carta_random32(old_seed >> 32) << 32;
  2268. reg->seed = new_seed;
  2269. }
  2270. reg->lval = val;
  2271. return val;
  2272. }
  2273. static void
  2274. pfm_reset_regs_masked(pfm_context_t *ctx, unsigned long *ovfl_regs, int is_long_reset)
  2275. {
  2276. unsigned long mask = ovfl_regs[0];
  2277. unsigned long reset_others = 0UL;
  2278. unsigned long val;
  2279. int i;
  2280. /*
  2281. * now restore reset value on sampling overflowed counters
  2282. */
  2283. mask >>= PMU_FIRST_COUNTER;
  2284. for(i = PMU_FIRST_COUNTER; mask; i++, mask >>= 1) {
  2285. if ((mask & 0x1UL) == 0UL) continue;
  2286. ctx->ctx_pmds[i].val = val = pfm_new_counter_value(ctx->ctx_pmds+ i, is_long_reset);
  2287. reset_others |= ctx->ctx_pmds[i].reset_pmds[0];
  2288. DPRINT_ovfl((" %s reset ctx_pmds[%d]=%lx\n", is_long_reset ? "long" : "short", i, val));
  2289. }
  2290. /*
  2291. * Now take care of resetting the other registers
  2292. */
  2293. for(i = 0; reset_others; i++, reset_others >>= 1) {
  2294. if ((reset_others & 0x1) == 0) continue;
  2295. ctx->ctx_pmds[i].val = val = pfm_new_counter_value(ctx->ctx_pmds + i, is_long_reset);
  2296. DPRINT_ovfl(("%s reset_others pmd[%d]=%lx\n",
  2297. is_long_reset ? "long" : "short", i, val));
  2298. }
  2299. }
  2300. static void
  2301. pfm_reset_regs(pfm_context_t *ctx, unsigned long *ovfl_regs, int is_long_reset)
  2302. {
  2303. unsigned long mask = ovfl_regs[0];
  2304. unsigned long reset_others = 0UL;
  2305. unsigned long val;
  2306. int i;
  2307. DPRINT_ovfl(("ovfl_regs=0x%lx is_long_reset=%d\n", ovfl_regs[0], is_long_reset));
  2308. if (ctx->ctx_state == PFM_CTX_MASKED) {
  2309. pfm_reset_regs_masked(ctx, ovfl_regs, is_long_reset);
  2310. return;
  2311. }
  2312. /*
  2313. * now restore reset value on sampling overflowed counters
  2314. */
  2315. mask >>= PMU_FIRST_COUNTER;
  2316. for(i = PMU_FIRST_COUNTER; mask; i++, mask >>= 1) {
  2317. if ((mask & 0x1UL) == 0UL) continue;
  2318. val = pfm_new_counter_value(ctx->ctx_pmds+ i, is_long_reset);
  2319. reset_others |= ctx->ctx_pmds[i].reset_pmds[0];
  2320. DPRINT_ovfl((" %s reset ctx_pmds[%d]=%lx\n", is_long_reset ? "long" : "short", i, val));
  2321. pfm_write_soft_counter(ctx, i, val);
  2322. }
  2323. /*
  2324. * Now take care of resetting the other registers
  2325. */
  2326. for(i = 0; reset_others; i++, reset_others >>= 1) {
  2327. if ((reset_others & 0x1) == 0) continue;
  2328. val = pfm_new_counter_value(ctx->ctx_pmds + i, is_long_reset);
  2329. if (PMD_IS_COUNTING(i)) {
  2330. pfm_write_soft_counter(ctx, i, val);
  2331. } else {
  2332. ia64_set_pmd(i, val);
  2333. }
  2334. DPRINT_ovfl(("%s reset_others pmd[%d]=%lx\n",
  2335. is_long_reset ? "long" : "short", i, val));
  2336. }
  2337. ia64_srlz_d();
  2338. }
  2339. static int
  2340. pfm_write_pmcs(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
  2341. {
  2342. struct task_struct *task;
  2343. pfarg_reg_t *req = (pfarg_reg_t *)arg;
  2344. unsigned long value, pmc_pm;
  2345. unsigned long smpl_pmds, reset_pmds, impl_pmds;
  2346. unsigned int cnum, reg_flags, flags, pmc_type;
  2347. int i, can_access_pmu = 0, is_loaded, is_system, expert_mode;
  2348. int is_monitor, is_counting, state;
  2349. int ret = -EINVAL;
  2350. pfm_reg_check_t wr_func;
  2351. #define PFM_CHECK_PMC_PM(x, y, z) ((x)->ctx_fl_system ^ PMC_PM(y, z))
  2352. state = ctx->ctx_state;
  2353. is_loaded = state == PFM_CTX_LOADED ? 1 : 0;
  2354. is_system = ctx->ctx_fl_system;
  2355. task = ctx->ctx_task;
  2356. impl_pmds = pmu_conf->impl_pmds[0];
  2357. if (state == PFM_CTX_ZOMBIE) return -EINVAL;
  2358. if (is_loaded) {
  2359. /*
  2360. * In system wide and when the context is loaded, access can only happen
  2361. * when the caller is running on the CPU being monitored by the session.
  2362. * It does not have to be the owner (ctx_task) of the context per se.
  2363. */
  2364. if (is_system && ctx->ctx_cpu != smp_processor_id()) {
  2365. DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
  2366. return -EBUSY;
  2367. }
  2368. can_access_pmu = GET_PMU_OWNER() == task || is_system ? 1 : 0;
  2369. }
  2370. expert_mode = pfm_sysctl.expert_mode;
  2371. for (i = 0; i < count; i++, req++) {
  2372. cnum = req->reg_num;
  2373. reg_flags = req->reg_flags;
  2374. value = req->reg_value;
  2375. smpl_pmds = req->reg_smpl_pmds[0];
  2376. reset_pmds = req->reg_reset_pmds[0];
  2377. flags = 0;
  2378. if (cnum >= PMU_MAX_PMCS) {
  2379. DPRINT(("pmc%u is invalid\n", cnum));
  2380. goto error;
  2381. }
  2382. pmc_type = pmu_conf->pmc_desc[cnum].type;
  2383. pmc_pm = (value >> pmu_conf->pmc_desc[cnum].pm_pos) & 0x1;
  2384. is_counting = (pmc_type & PFM_REG_COUNTING) == PFM_REG_COUNTING ? 1 : 0;
  2385. is_monitor = (pmc_type & PFM_REG_MONITOR) == PFM_REG_MONITOR ? 1 : 0;
  2386. /*
  2387. * we reject all non implemented PMC as well
  2388. * as attempts to modify PMC[0-3] which are used
  2389. * as status registers by the PMU
  2390. */
  2391. if ((pmc_type & PFM_REG_IMPL) == 0 || (pmc_type & PFM_REG_CONTROL) == PFM_REG_CONTROL) {
  2392. DPRINT(("pmc%u is unimplemented or no-access pmc_type=%x\n", cnum, pmc_type));
  2393. goto error;
  2394. }
  2395. wr_func = pmu_conf->pmc_desc[cnum].write_check;
  2396. /*
  2397. * If the PMC is a monitor, then if the value is not the default:
  2398. * - system-wide session: PMCx.pm=1 (privileged monitor)
  2399. * - per-task : PMCx.pm=0 (user monitor)
  2400. */
  2401. if (is_monitor && value != PMC_DFL_VAL(cnum) && is_system ^ pmc_pm) {
  2402. DPRINT(("pmc%u pmc_pm=%lu is_system=%d\n",
  2403. cnum,
  2404. pmc_pm,
  2405. is_system));
  2406. goto error;
  2407. }
  2408. if (is_counting) {
  2409. /*
  2410. * enforce generation of overflow interrupt. Necessary on all
  2411. * CPUs.
  2412. */
  2413. value |= 1 << PMU_PMC_OI;
  2414. if (reg_flags & PFM_REGFL_OVFL_NOTIFY) {
  2415. flags |= PFM_REGFL_OVFL_NOTIFY;
  2416. }
  2417. if (reg_flags & PFM_REGFL_RANDOM) flags |= PFM_REGFL_RANDOM;
  2418. /* verify validity of smpl_pmds */
  2419. if ((smpl_pmds & impl_pmds) != smpl_pmds) {
  2420. DPRINT(("invalid smpl_pmds 0x%lx for pmc%u\n", smpl_pmds, cnum));
  2421. goto error;
  2422. }
  2423. /* verify validity of reset_pmds */
  2424. if ((reset_pmds & impl_pmds) != reset_pmds) {
  2425. DPRINT(("invalid reset_pmds 0x%lx for pmc%u\n", reset_pmds, cnum));
  2426. goto error;
  2427. }
  2428. } else {
  2429. if (reg_flags & (PFM_REGFL_OVFL_NOTIFY|PFM_REGFL_RANDOM)) {
  2430. DPRINT(("cannot set ovfl_notify or random on pmc%u\n", cnum));
  2431. goto error;
  2432. }
  2433. /* eventid on non-counting monitors are ignored */
  2434. }
  2435. /*
  2436. * execute write checker, if any
  2437. */
  2438. if (likely(expert_mode == 0 && wr_func)) {
  2439. ret = (*wr_func)(task, ctx, cnum, &value, regs);
  2440. if (ret) goto error;
  2441. ret = -EINVAL;
  2442. }
  2443. /*
  2444. * no error on this register
  2445. */
  2446. PFM_REG_RETFLAG_SET(req->reg_flags, 0);
  2447. /*
  2448. * Now we commit the changes to the software state
  2449. */
  2450. /*
  2451. * update overflow information
  2452. */
  2453. if (is_counting) {
  2454. /*
  2455. * full flag update each time a register is programmed
  2456. */
  2457. ctx->ctx_pmds[cnum].flags = flags;
  2458. ctx->ctx_pmds[cnum].reset_pmds[0] = reset_pmds;
  2459. ctx->ctx_pmds[cnum].smpl_pmds[0] = smpl_pmds;
  2460. ctx->ctx_pmds[cnum].eventid = req->reg_smpl_eventid;
  2461. /*
  2462. * Mark all PMDS to be accessed as used.
  2463. *
  2464. * We do not keep track of PMC because we have to
  2465. * systematically restore ALL of them.
  2466. *
  2467. * We do not update the used_monitors mask, because
  2468. * if we have not programmed them, then will be in
  2469. * a quiescent state, therefore we will not need to
  2470. * mask/restore then when context is MASKED.
  2471. */
  2472. CTX_USED_PMD(ctx, reset_pmds);
  2473. CTX_USED_PMD(ctx, smpl_pmds);
  2474. /*
  2475. * make sure we do not try to reset on
  2476. * restart because we have established new values
  2477. */
  2478. if (state == PFM_CTX_MASKED) ctx->ctx_ovfl_regs[0] &= ~1UL << cnum;
  2479. }
  2480. /*
  2481. * Needed in case the user does not initialize the equivalent
  2482. * PMD. Clearing is done indirectly via pfm_reset_pmu_state() so there is no
  2483. * possible leak here.
  2484. */
  2485. CTX_USED_PMD(ctx, pmu_conf->pmc_desc[cnum].dep_pmd[0]);
  2486. /*
  2487. * keep track of the monitor PMC that we are using.
  2488. * we save the value of the pmc in ctx_pmcs[] and if
  2489. * the monitoring is not stopped for the context we also
  2490. * place it in the saved state area so that it will be
  2491. * picked up later by the context switch code.
  2492. *
  2493. * The value in ctx_pmcs[] can only be changed in pfm_write_pmcs().
  2494. *
  2495. * The value in th_pmcs[] may be modified on overflow, i.e., when
  2496. * monitoring needs to be stopped.
  2497. */
  2498. if (is_monitor) CTX_USED_MONITOR(ctx, 1UL << cnum);
  2499. /*
  2500. * update context state
  2501. */
  2502. ctx->ctx_pmcs[cnum] = value;
  2503. if (is_loaded) {
  2504. /*
  2505. * write thread state
  2506. */
  2507. if (is_system == 0) ctx->th_pmcs[cnum] = value;
  2508. /*
  2509. * write hardware register if we can
  2510. */
  2511. if (can_access_pmu) {
  2512. ia64_set_pmc(cnum, value);
  2513. }
  2514. #ifdef CONFIG_SMP
  2515. else {
  2516. /*
  2517. * per-task SMP only here
  2518. *
  2519. * we are guaranteed that the task is not running on the other CPU,
  2520. * we indicate that this PMD will need to be reloaded if the task
  2521. * is rescheduled on the CPU it ran last on.
  2522. */
  2523. ctx->ctx_reload_pmcs[0] |= 1UL << cnum;
  2524. }
  2525. #endif
  2526. }
  2527. DPRINT(("pmc[%u]=0x%lx ld=%d apmu=%d flags=0x%x all_pmcs=0x%lx used_pmds=0x%lx eventid=%ld smpl_pmds=0x%lx reset_pmds=0x%lx reloads_pmcs=0x%lx used_monitors=0x%lx ovfl_regs=0x%lx\n",
  2528. cnum,
  2529. value,
  2530. is_loaded,
  2531. can_access_pmu,
  2532. flags,
  2533. ctx->ctx_all_pmcs[0],
  2534. ctx->ctx_used_pmds[0],
  2535. ctx->ctx_pmds[cnum].eventid,
  2536. smpl_pmds,
  2537. reset_pmds,
  2538. ctx->ctx_reload_pmcs[0],
  2539. ctx->ctx_used_monitors[0],
  2540. ctx->ctx_ovfl_regs[0]));
  2541. }
  2542. /*
  2543. * make sure the changes are visible
  2544. */
  2545. if (can_access_pmu) ia64_srlz_d();
  2546. return 0;
  2547. error:
  2548. PFM_REG_RETFLAG_SET(req->reg_flags, PFM_REG_RETFL_EINVAL);
  2549. return ret;
  2550. }
  2551. static int
  2552. pfm_write_pmds(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
  2553. {
  2554. struct task_struct *task;
  2555. pfarg_reg_t *req = (pfarg_reg_t *)arg;
  2556. unsigned long value, hw_value, ovfl_mask;
  2557. unsigned int cnum;
  2558. int i, can_access_pmu = 0, state;
  2559. int is_counting, is_loaded, is_system, expert_mode;
  2560. int ret = -EINVAL;
  2561. pfm_reg_check_t wr_func;
  2562. state = ctx->ctx_state;
  2563. is_loaded = state == PFM_CTX_LOADED ? 1 : 0;
  2564. is_system = ctx->ctx_fl_system;
  2565. ovfl_mask = pmu_conf->ovfl_val;
  2566. task = ctx->ctx_task;
  2567. if (unlikely(state == PFM_CTX_ZOMBIE)) return -EINVAL;
  2568. /*
  2569. * on both UP and SMP, we can only write to the PMC when the task is
  2570. * the owner of the local PMU.
  2571. */
  2572. if (likely(is_loaded)) {
  2573. /*
  2574. * In system wide and when the context is loaded, access can only happen
  2575. * when the caller is running on the CPU being monitored by the session.
  2576. * It does not have to be the owner (ctx_task) of the context per se.
  2577. */
  2578. if (unlikely(is_system && ctx->ctx_cpu != smp_processor_id())) {
  2579. DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
  2580. return -EBUSY;
  2581. }
  2582. can_access_pmu = GET_PMU_OWNER() == task || is_system ? 1 : 0;
  2583. }
  2584. expert_mode = pfm_sysctl.expert_mode;
  2585. for (i = 0; i < count; i++, req++) {
  2586. cnum = req->reg_num;
  2587. value = req->reg_value;
  2588. if (!PMD_IS_IMPL(cnum)) {
  2589. DPRINT(("pmd[%u] is unimplemented or invalid\n", cnum));
  2590. goto abort_mission;
  2591. }
  2592. is_counting = PMD_IS_COUNTING(cnum);
  2593. wr_func = pmu_conf->pmd_desc[cnum].write_check;
  2594. /*
  2595. * execute write checker, if any
  2596. */
  2597. if (unlikely(expert_mode == 0 && wr_func)) {
  2598. unsigned long v = value;
  2599. ret = (*wr_func)(task, ctx, cnum, &v, regs);
  2600. if (ret) goto abort_mission;
  2601. value = v;
  2602. ret = -EINVAL;
  2603. }
  2604. /*
  2605. * no error on this register
  2606. */
  2607. PFM_REG_RETFLAG_SET(req->reg_flags, 0);
  2608. /*
  2609. * now commit changes to software state
  2610. */
  2611. hw_value = value;
  2612. /*
  2613. * update virtualized (64bits) counter
  2614. */
  2615. if (is_counting) {
  2616. /*
  2617. * write context state
  2618. */
  2619. ctx->ctx_pmds[cnum].lval = value;
  2620. /*
  2621. * when context is load we use the split value
  2622. */
  2623. if (is_loaded) {
  2624. hw_value = value & ovfl_mask;
  2625. value = value & ~ovfl_mask;
  2626. }
  2627. }
  2628. /*
  2629. * update reset values (not just for counters)
  2630. */
  2631. ctx->ctx_pmds[cnum].long_reset = req->reg_long_reset;
  2632. ctx->ctx_pmds[cnum].short_reset = req->reg_short_reset;
  2633. /*
  2634. * update randomization parameters (not just for counters)
  2635. */
  2636. ctx->ctx_pmds[cnum].seed = req->reg_random_seed;
  2637. ctx->ctx_pmds[cnum].mask = req->reg_random_mask;
  2638. /*
  2639. * update context value
  2640. */
  2641. ctx->ctx_pmds[cnum].val = value;
  2642. /*
  2643. * Keep track of what we use
  2644. *
  2645. * We do not keep track of PMC because we have to
  2646. * systematically restore ALL of them.
  2647. */
  2648. CTX_USED_PMD(ctx, PMD_PMD_DEP(cnum));
  2649. /*
  2650. * mark this PMD register used as well
  2651. */
  2652. CTX_USED_PMD(ctx, RDEP(cnum));
  2653. /*
  2654. * make sure we do not try to reset on
  2655. * restart because we have established new values
  2656. */
  2657. if (is_counting && state == PFM_CTX_MASKED) {
  2658. ctx->ctx_ovfl_regs[0] &= ~1UL << cnum;
  2659. }
  2660. if (is_loaded) {
  2661. /*
  2662. * write thread state
  2663. */
  2664. if (is_system == 0) ctx->th_pmds[cnum] = hw_value;
  2665. /*
  2666. * write hardware register if we can
  2667. */
  2668. if (can_access_pmu) {
  2669. ia64_set_pmd(cnum, hw_value);
  2670. } else {
  2671. #ifdef CONFIG_SMP
  2672. /*
  2673. * we are guaranteed that the task is not running on the other CPU,
  2674. * we indicate that this PMD will need to be reloaded if the task
  2675. * is rescheduled on the CPU it ran last on.
  2676. */
  2677. ctx->ctx_reload_pmds[0] |= 1UL << cnum;
  2678. #endif
  2679. }
  2680. }
  2681. DPRINT(("pmd[%u]=0x%lx ld=%d apmu=%d, hw_value=0x%lx ctx_pmd=0x%lx short_reset=0x%lx "
  2682. "long_reset=0x%lx notify=%c seed=0x%lx mask=0x%lx used_pmds=0x%lx reset_pmds=0x%lx reload_pmds=0x%lx all_pmds=0x%lx ovfl_regs=0x%lx\n",
  2683. cnum,
  2684. value,
  2685. is_loaded,
  2686. can_access_pmu,
  2687. hw_value,
  2688. ctx->ctx_pmds[cnum].val,
  2689. ctx->ctx_pmds[cnum].short_reset,
  2690. ctx->ctx_pmds[cnum].long_reset,
  2691. PMC_OVFL_NOTIFY(ctx, cnum) ? 'Y':'N',
  2692. ctx->ctx_pmds[cnum].seed,
  2693. ctx->ctx_pmds[cnum].mask,
  2694. ctx->ctx_used_pmds[0],
  2695. ctx->ctx_pmds[cnum].reset_pmds[0],
  2696. ctx->ctx_reload_pmds[0],
  2697. ctx->ctx_all_pmds[0],
  2698. ctx->ctx_ovfl_regs[0]));
  2699. }
  2700. /*
  2701. * make changes visible
  2702. */
  2703. if (can_access_pmu) ia64_srlz_d();
  2704. return 0;
  2705. abort_mission:
  2706. /*
  2707. * for now, we have only one possibility for error
  2708. */
  2709. PFM_REG_RETFLAG_SET(req->reg_flags, PFM_REG_RETFL_EINVAL);
  2710. return ret;
  2711. }
  2712. /*
  2713. * By the way of PROTECT_CONTEXT(), interrupts are masked while we are in this function.
  2714. * Therefore we know, we do not have to worry about the PMU overflow interrupt. If an
  2715. * interrupt is delivered during the call, it will be kept pending until we leave, making
  2716. * it appears as if it had been generated at the UNPROTECT_CONTEXT(). At least we are
  2717. * guaranteed to return consistent data to the user, it may simply be old. It is not
  2718. * trivial to treat the overflow while inside the call because you may end up in
  2719. * some module sampling buffer code causing deadlocks.
  2720. */
  2721. static int
  2722. pfm_read_pmds(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
  2723. {
  2724. struct task_struct *task;
  2725. unsigned long val = 0UL, lval, ovfl_mask, sval;
  2726. pfarg_reg_t *req = (pfarg_reg_t *)arg;
  2727. unsigned int cnum, reg_flags = 0;
  2728. int i, can_access_pmu = 0, state;
  2729. int is_loaded, is_system, is_counting, expert_mode;
  2730. int ret = -EINVAL;
  2731. pfm_reg_check_t rd_func;
  2732. /*
  2733. * access is possible when loaded only for
  2734. * self-monitoring tasks or in UP mode
  2735. */
  2736. state = ctx->ctx_state;
  2737. is_loaded = state == PFM_CTX_LOADED ? 1 : 0;
  2738. is_system = ctx->ctx_fl_system;
  2739. ovfl_mask = pmu_conf->ovfl_val;
  2740. task = ctx->ctx_task;
  2741. if (state == PFM_CTX_ZOMBIE) return -EINVAL;
  2742. if (likely(is_loaded)) {
  2743. /*
  2744. * In system wide and when the context is loaded, access can only happen
  2745. * when the caller is running on the CPU being monitored by the session.
  2746. * It does not have to be the owner (ctx_task) of the context per se.
  2747. */
  2748. if (unlikely(is_system && ctx->ctx_cpu != smp_processor_id())) {
  2749. DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
  2750. return -EBUSY;
  2751. }
  2752. /*
  2753. * this can be true when not self-monitoring only in UP
  2754. */
  2755. can_access_pmu = GET_PMU_OWNER() == task || is_system ? 1 : 0;
  2756. if (can_access_pmu) ia64_srlz_d();
  2757. }
  2758. expert_mode = pfm_sysctl.expert_mode;
  2759. DPRINT(("ld=%d apmu=%d ctx_state=%d\n",
  2760. is_loaded,
  2761. can_access_pmu,
  2762. state));
  2763. /*
  2764. * on both UP and SMP, we can only read the PMD from the hardware register when
  2765. * the task is the owner of the local PMU.
  2766. */
  2767. for (i = 0; i < count; i++, req++) {
  2768. cnum = req->reg_num;
  2769. reg_flags = req->reg_flags;
  2770. if (unlikely(!PMD_IS_IMPL(cnum))) goto error;
  2771. /*
  2772. * we can only read the register that we use. That includes
  2773. * the one we explicitly initialize AND the one we want included
  2774. * in the sampling buffer (smpl_regs).
  2775. *
  2776. * Having this restriction allows optimization in the ctxsw routine
  2777. * without compromising security (leaks)
  2778. */
  2779. if (unlikely(!CTX_IS_USED_PMD(ctx, cnum))) goto error;
  2780. sval = ctx->ctx_pmds[cnum].val;
  2781. lval = ctx->ctx_pmds[cnum].lval;
  2782. is_counting = PMD_IS_COUNTING(cnum);
  2783. /*
  2784. * If the task is not the current one, then we check if the
  2785. * PMU state is still in the local live register due to lazy ctxsw.
  2786. * If true, then we read directly from the registers.
  2787. */
  2788. if (can_access_pmu){
  2789. val = ia64_get_pmd(cnum);
  2790. } else {
  2791. /*
  2792. * context has been saved
  2793. * if context is zombie, then task does not exist anymore.
  2794. * In this case, we use the full value saved in the context (pfm_flush_regs()).
  2795. */
  2796. val = is_loaded ? ctx->th_pmds[cnum] : 0UL;
  2797. }
  2798. rd_func = pmu_conf->pmd_desc[cnum].read_check;
  2799. if (is_counting) {
  2800. /*
  2801. * XXX: need to check for overflow when loaded
  2802. */
  2803. val &= ovfl_mask;
  2804. val += sval;
  2805. }
  2806. /*
  2807. * execute read checker, if any
  2808. */
  2809. if (unlikely(expert_mode == 0 && rd_func)) {
  2810. unsigned long v = val;
  2811. ret = (*rd_func)(ctx->ctx_task, ctx, cnum, &v, regs);
  2812. if (ret) goto error;
  2813. val = v;
  2814. ret = -EINVAL;
  2815. }
  2816. PFM_REG_RETFLAG_SET(reg_flags, 0);
  2817. DPRINT(("pmd[%u]=0x%lx\n", cnum, val));
  2818. /*
  2819. * update register return value, abort all if problem during copy.
  2820. * we only modify the reg_flags field. no check mode is fine because
  2821. * access has been verified upfront in sys_perfmonctl().
  2822. */
  2823. req->reg_value = val;
  2824. req->reg_flags = reg_flags;
  2825. req->reg_last_reset_val = lval;
  2826. }
  2827. return 0;
  2828. error:
  2829. PFM_REG_RETFLAG_SET(req->reg_flags, PFM_REG_RETFL_EINVAL);
  2830. return ret;
  2831. }
  2832. int
  2833. pfm_mod_write_pmcs(struct task_struct *task, void *req, unsigned int nreq, struct pt_regs *regs)
  2834. {
  2835. pfm_context_t *ctx;
  2836. if (req == NULL) return -EINVAL;
  2837. ctx = GET_PMU_CTX();
  2838. if (ctx == NULL) return -EINVAL;
  2839. /*
  2840. * for now limit to current task, which is enough when calling
  2841. * from overflow handler
  2842. */
  2843. if (task != current && ctx->ctx_fl_system == 0) return -EBUSY;
  2844. return pfm_write_pmcs(ctx, req, nreq, regs);
  2845. }
  2846. EXPORT_SYMBOL(pfm_mod_write_pmcs);
  2847. int
  2848. pfm_mod_read_pmds(struct task_struct *task, void *req, unsigned int nreq, struct pt_regs *regs)
  2849. {
  2850. pfm_context_t *ctx;
  2851. if (req == NULL) return -EINVAL;
  2852. ctx = GET_PMU_CTX();
  2853. if (ctx == NULL) return -EINVAL;
  2854. /*
  2855. * for now limit to current task, which is enough when calling
  2856. * from overflow handler
  2857. */
  2858. if (task != current && ctx->ctx_fl_system == 0) return -EBUSY;
  2859. return pfm_read_pmds(ctx, req, nreq, regs);
  2860. }
  2861. EXPORT_SYMBOL(pfm_mod_read_pmds);
  2862. /*
  2863. * Only call this function when a process it trying to
  2864. * write the debug registers (reading is always allowed)
  2865. */
  2866. int
  2867. pfm_use_debug_registers(struct task_struct *task)
  2868. {
  2869. pfm_context_t *ctx = task->thread.pfm_context;
  2870. unsigned long flags;
  2871. int ret = 0;
  2872. if (pmu_conf->use_rr_dbregs == 0) return 0;
  2873. DPRINT(("called for [%d]\n", task_pid_nr(task)));
  2874. /*
  2875. * do it only once
  2876. */
  2877. if (task->thread.flags & IA64_THREAD_DBG_VALID) return 0;
  2878. /*
  2879. * Even on SMP, we do not need to use an atomic here because
  2880. * the only way in is via ptrace() and this is possible only when the
  2881. * process is stopped. Even in the case where the ctxsw out is not totally
  2882. * completed by the time we come here, there is no way the 'stopped' process
  2883. * could be in the middle of fiddling with the pfm_write_ibr_dbr() routine.
  2884. * So this is always safe.
  2885. */
  2886. if (ctx && ctx->ctx_fl_using_dbreg == 1) return -1;
  2887. LOCK_PFS(flags);
  2888. /*
  2889. * We cannot allow setting breakpoints when system wide monitoring
  2890. * sessions are using the debug registers.
  2891. */
  2892. if (pfm_sessions.pfs_sys_use_dbregs> 0)
  2893. ret = -1;
  2894. else
  2895. pfm_sessions.pfs_ptrace_use_dbregs++;
  2896. DPRINT(("ptrace_use_dbregs=%u sys_use_dbregs=%u by [%d] ret = %d\n",
  2897. pfm_sessions.pfs_ptrace_use_dbregs,
  2898. pfm_sessions.pfs_sys_use_dbregs,
  2899. task_pid_nr(task), ret));
  2900. UNLOCK_PFS(flags);
  2901. return ret;
  2902. }
  2903. /*
  2904. * This function is called for every task that exits with the
  2905. * IA64_THREAD_DBG_VALID set. This indicates a task which was
  2906. * able to use the debug registers for debugging purposes via
  2907. * ptrace(). Therefore we know it was not using them for
  2908. * performance monitoring, so we only decrement the number
  2909. * of "ptraced" debug register users to keep the count up to date
  2910. */
  2911. int
  2912. pfm_release_debug_registers(struct task_struct *task)
  2913. {
  2914. unsigned long flags;
  2915. int ret;
  2916. if (pmu_conf->use_rr_dbregs == 0) return 0;
  2917. LOCK_PFS(flags);
  2918. if (pfm_sessions.pfs_ptrace_use_dbregs == 0) {
  2919. printk(KERN_ERR "perfmon: invalid release for [%d] ptrace_use_dbregs=0\n", task_pid_nr(task));
  2920. ret = -1;
  2921. } else {
  2922. pfm_sessions.pfs_ptrace_use_dbregs--;
  2923. ret = 0;
  2924. }
  2925. UNLOCK_PFS(flags);
  2926. return ret;
  2927. }
  2928. static int
  2929. pfm_restart(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
  2930. {
  2931. struct task_struct *task;
  2932. pfm_buffer_fmt_t *fmt;
  2933. pfm_ovfl_ctrl_t rst_ctrl;
  2934. int state, is_system;
  2935. int ret = 0;
  2936. state = ctx->ctx_state;
  2937. fmt = ctx->ctx_buf_fmt;
  2938. is_system = ctx->ctx_fl_system;
  2939. task = PFM_CTX_TASK(ctx);
  2940. switch(state) {
  2941. case PFM_CTX_MASKED:
  2942. break;
  2943. case PFM_CTX_LOADED:
  2944. if (CTX_HAS_SMPL(ctx) && fmt->fmt_restart_active) break;
  2945. /* fall through */
  2946. case PFM_CTX_UNLOADED:
  2947. case PFM_CTX_ZOMBIE:
  2948. DPRINT(("invalid state=%d\n", state));
  2949. return -EBUSY;
  2950. default:
  2951. DPRINT(("state=%d, cannot operate (no active_restart handler)\n", state));
  2952. return -EINVAL;
  2953. }
  2954. /*
  2955. * In system wide and when the context is loaded, access can only happen
  2956. * when the caller is running on the CPU being monitored by the session.
  2957. * It does not have to be the owner (ctx_task) of the context per se.
  2958. */
  2959. if (is_system && ctx->ctx_cpu != smp_processor_id()) {
  2960. DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
  2961. return -EBUSY;
  2962. }
  2963. /* sanity check */
  2964. if (unlikely(task == NULL)) {
  2965. printk(KERN_ERR "perfmon: [%d] pfm_restart no task\n", task_pid_nr(current));
  2966. return -EINVAL;
  2967. }
  2968. if (task == current || is_system) {
  2969. fmt = ctx->ctx_buf_fmt;
  2970. DPRINT(("restarting self %d ovfl=0x%lx\n",
  2971. task_pid_nr(task),
  2972. ctx->ctx_ovfl_regs[0]));
  2973. if (CTX_HAS_SMPL(ctx)) {
  2974. prefetch(ctx->ctx_smpl_hdr);
  2975. rst_ctrl.bits.mask_monitoring = 0;
  2976. rst_ctrl.bits.reset_ovfl_pmds = 0;
  2977. if (state == PFM_CTX_LOADED)
  2978. ret = pfm_buf_fmt_restart_active(fmt, task, &rst_ctrl, ctx->ctx_smpl_hdr, regs);
  2979. else
  2980. ret = pfm_buf_fmt_restart(fmt, task, &rst_ctrl, ctx->ctx_smpl_hdr, regs);
  2981. } else {
  2982. rst_ctrl.bits.mask_monitoring = 0;
  2983. rst_ctrl.bits.reset_ovfl_pmds = 1;
  2984. }
  2985. if (ret == 0) {
  2986. if (rst_ctrl.bits.reset_ovfl_pmds)
  2987. pfm_reset_regs(ctx, ctx->ctx_ovfl_regs, PFM_PMD_LONG_RESET);
  2988. if (rst_ctrl.bits.mask_monitoring == 0) {
  2989. DPRINT(("resuming monitoring for [%d]\n", task_pid_nr(task)));
  2990. if (state == PFM_CTX_MASKED) pfm_restore_monitoring(task);
  2991. } else {
  2992. DPRINT(("keeping monitoring stopped for [%d]\n", task_pid_nr(task)));
  2993. // cannot use pfm_stop_monitoring(task, regs);
  2994. }
  2995. }
  2996. /*
  2997. * clear overflowed PMD mask to remove any stale information
  2998. */
  2999. ctx->ctx_ovfl_regs[0] = 0UL;
  3000. /*
  3001. * back to LOADED state
  3002. */
  3003. ctx->ctx_state = PFM_CTX_LOADED;
  3004. /*
  3005. * XXX: not really useful for self monitoring
  3006. */
  3007. ctx->ctx_fl_can_restart = 0;
  3008. return 0;
  3009. }
  3010. /*
  3011. * restart another task
  3012. */
  3013. /*
  3014. * When PFM_CTX_MASKED, we cannot issue a restart before the previous
  3015. * one is seen by the task.
  3016. */
  3017. if (state == PFM_CTX_MASKED) {
  3018. if (ctx->ctx_fl_can_restart == 0) return -EINVAL;
  3019. /*
  3020. * will prevent subsequent restart before this one is
  3021. * seen by other task
  3022. */
  3023. ctx->ctx_fl_can_restart = 0;
  3024. }
  3025. /*
  3026. * if blocking, then post the semaphore is PFM_CTX_MASKED, i.e.
  3027. * the task is blocked or on its way to block. That's the normal
  3028. * restart path. If the monitoring is not masked, then the task
  3029. * can be actively monitoring and we cannot directly intervene.
  3030. * Therefore we use the trap mechanism to catch the task and
  3031. * force it to reset the buffer/reset PMDs.
  3032. *
  3033. * if non-blocking, then we ensure that the task will go into
  3034. * pfm_handle_work() before returning to user mode.
  3035. *
  3036. * We cannot explicitly reset another task, it MUST always
  3037. * be done by the task itself. This works for system wide because
  3038. * the tool that is controlling the session is logically doing
  3039. * "self-monitoring".
  3040. */
  3041. if (CTX_OVFL_NOBLOCK(ctx) == 0 && state == PFM_CTX_MASKED) {
  3042. DPRINT(("unblocking [%d]\n", task_pid_nr(task)));
  3043. complete(&ctx->ctx_restart_done);
  3044. } else {
  3045. DPRINT(("[%d] armed exit trap\n", task_pid_nr(task)));
  3046. ctx->ctx_fl_trap_reason = PFM_TRAP_REASON_RESET;
  3047. PFM_SET_WORK_PENDING(task, 1);
  3048. set_notify_resume(task);
  3049. /*
  3050. * XXX: send reschedule if task runs on another CPU
  3051. */
  3052. }
  3053. return 0;
  3054. }
  3055. static int
  3056. pfm_debug(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
  3057. {
  3058. unsigned int m = *(unsigned int *)arg;
  3059. pfm_sysctl.debug = m == 0 ? 0 : 1;
  3060. printk(KERN_INFO "perfmon debugging %s (timing reset)\n", pfm_sysctl.debug ? "on" : "off");
  3061. if (m == 0) {
  3062. memset(pfm_stats, 0, sizeof(pfm_stats));
  3063. for(m=0; m < NR_CPUS; m++) pfm_stats[m].pfm_ovfl_intr_cycles_min = ~0UL;
  3064. }
  3065. return 0;
  3066. }
  3067. /*
  3068. * arg can be NULL and count can be zero for this function
  3069. */
  3070. static int
  3071. pfm_write_ibr_dbr(int mode, pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
  3072. {
  3073. struct thread_struct *thread = NULL;
  3074. struct task_struct *task;
  3075. pfarg_dbreg_t *req = (pfarg_dbreg_t *)arg;
  3076. unsigned long flags;
  3077. dbreg_t dbreg;
  3078. unsigned int rnum;
  3079. int first_time;
  3080. int ret = 0, state;
  3081. int i, can_access_pmu = 0;
  3082. int is_system, is_loaded;
  3083. if (pmu_conf->use_rr_dbregs == 0) return -EINVAL;
  3084. state = ctx->ctx_state;
  3085. is_loaded = state == PFM_CTX_LOADED ? 1 : 0;
  3086. is_system = ctx->ctx_fl_system;
  3087. task = ctx->ctx_task;
  3088. if (state == PFM_CTX_ZOMBIE) return -EINVAL;
  3089. /*
  3090. * on both UP and SMP, we can only write to the PMC when the task is
  3091. * the owner of the local PMU.
  3092. */
  3093. if (is_loaded) {
  3094. thread = &task->thread;
  3095. /*
  3096. * In system wide and when the context is loaded, access can only happen
  3097. * when the caller is running on the CPU being monitored by the session.
  3098. * It does not have to be the owner (ctx_task) of the context per se.
  3099. */
  3100. if (unlikely(is_system && ctx->ctx_cpu != smp_processor_id())) {
  3101. DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
  3102. return -EBUSY;
  3103. }
  3104. can_access_pmu = GET_PMU_OWNER() == task || is_system ? 1 : 0;
  3105. }
  3106. /*
  3107. * we do not need to check for ipsr.db because we do clear ibr.x, dbr.r, and dbr.w
  3108. * ensuring that no real breakpoint can be installed via this call.
  3109. *
  3110. * IMPORTANT: regs can be NULL in this function
  3111. */
  3112. first_time = ctx->ctx_fl_using_dbreg == 0;
  3113. /*
  3114. * don't bother if we are loaded and task is being debugged
  3115. */
  3116. if (is_loaded && (thread->flags & IA64_THREAD_DBG_VALID) != 0) {
  3117. DPRINT(("debug registers already in use for [%d]\n", task_pid_nr(task)));
  3118. return -EBUSY;
  3119. }
  3120. /*
  3121. * check for debug registers in system wide mode
  3122. *
  3123. * If though a check is done in pfm_context_load(),
  3124. * we must repeat it here, in case the registers are
  3125. * written after the context is loaded
  3126. */
  3127. if (is_loaded) {
  3128. LOCK_PFS(flags);
  3129. if (first_time && is_system) {
  3130. if (pfm_sessions.pfs_ptrace_use_dbregs)
  3131. ret = -EBUSY;
  3132. else
  3133. pfm_sessions.pfs_sys_use_dbregs++;
  3134. }
  3135. UNLOCK_PFS(flags);
  3136. }
  3137. if (ret != 0) return ret;
  3138. /*
  3139. * mark ourself as user of the debug registers for
  3140. * perfmon purposes.
  3141. */
  3142. ctx->ctx_fl_using_dbreg = 1;
  3143. /*
  3144. * clear hardware registers to make sure we don't
  3145. * pick up stale state.
  3146. *
  3147. * for a system wide session, we do not use
  3148. * thread.dbr, thread.ibr because this process
  3149. * never leaves the current CPU and the state
  3150. * is shared by all processes running on it
  3151. */
  3152. if (first_time && can_access_pmu) {
  3153. DPRINT(("[%d] clearing ibrs, dbrs\n", task_pid_nr(task)));
  3154. for (i=0; i < pmu_conf->num_ibrs; i++) {
  3155. ia64_set_ibr(i, 0UL);
  3156. ia64_dv_serialize_instruction();
  3157. }
  3158. ia64_srlz_i();
  3159. for (i=0; i < pmu_conf->num_dbrs; i++) {
  3160. ia64_set_dbr(i, 0UL);
  3161. ia64_dv_serialize_data();
  3162. }
  3163. ia64_srlz_d();
  3164. }
  3165. /*
  3166. * Now install the values into the registers
  3167. */
  3168. for (i = 0; i < count; i++, req++) {
  3169. rnum = req->dbreg_num;
  3170. dbreg.val = req->dbreg_value;
  3171. ret = -EINVAL;
  3172. if ((mode == PFM_CODE_RR && rnum >= PFM_NUM_IBRS) || ((mode == PFM_DATA_RR) && rnum >= PFM_NUM_DBRS)) {
  3173. DPRINT(("invalid register %u val=0x%lx mode=%d i=%d count=%d\n",
  3174. rnum, dbreg.val, mode, i, count));
  3175. goto abort_mission;
  3176. }
  3177. /*
  3178. * make sure we do not install enabled breakpoint
  3179. */
  3180. if (rnum & 0x1) {
  3181. if (mode == PFM_CODE_RR)
  3182. dbreg.ibr.ibr_x = 0;
  3183. else
  3184. dbreg.dbr.dbr_r = dbreg.dbr.dbr_w = 0;
  3185. }
  3186. PFM_REG_RETFLAG_SET(req->dbreg_flags, 0);
  3187. /*
  3188. * Debug registers, just like PMC, can only be modified
  3189. * by a kernel call. Moreover, perfmon() access to those
  3190. * registers are centralized in this routine. The hardware
  3191. * does not modify the value of these registers, therefore,
  3192. * if we save them as they are written, we can avoid having
  3193. * to save them on context switch out. This is made possible
  3194. * by the fact that when perfmon uses debug registers, ptrace()
  3195. * won't be able to modify them concurrently.
  3196. */
  3197. if (mode == PFM_CODE_RR) {
  3198. CTX_USED_IBR(ctx, rnum);
  3199. if (can_access_pmu) {
  3200. ia64_set_ibr(rnum, dbreg.val);
  3201. ia64_dv_serialize_instruction();
  3202. }
  3203. ctx->ctx_ibrs[rnum] = dbreg.val;
  3204. DPRINT(("write ibr%u=0x%lx used_ibrs=0x%x ld=%d apmu=%d\n",
  3205. rnum, dbreg.val, ctx->ctx_used_ibrs[0], is_loaded, can_access_pmu));
  3206. } else {
  3207. CTX_USED_DBR(ctx, rnum);
  3208. if (can_access_pmu) {
  3209. ia64_set_dbr(rnum, dbreg.val);
  3210. ia64_dv_serialize_data();
  3211. }
  3212. ctx->ctx_dbrs[rnum] = dbreg.val;
  3213. DPRINT(("write dbr%u=0x%lx used_dbrs=0x%x ld=%d apmu=%d\n",
  3214. rnum, dbreg.val, ctx->ctx_used_dbrs[0], is_loaded, can_access_pmu));
  3215. }
  3216. }
  3217. return 0;
  3218. abort_mission:
  3219. /*
  3220. * in case it was our first attempt, we undo the global modifications
  3221. */
  3222. if (first_time) {
  3223. LOCK_PFS(flags);
  3224. if (ctx->ctx_fl_system) {
  3225. pfm_sessions.pfs_sys_use_dbregs--;
  3226. }
  3227. UNLOCK_PFS(flags);
  3228. ctx->ctx_fl_using_dbreg = 0;
  3229. }
  3230. /*
  3231. * install error return flag
  3232. */
  3233. PFM_REG_RETFLAG_SET(req->dbreg_flags, PFM_REG_RETFL_EINVAL);
  3234. return ret;
  3235. }
  3236. static int
  3237. pfm_write_ibrs(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
  3238. {
  3239. return pfm_write_ibr_dbr(PFM_CODE_RR, ctx, arg, count, regs);
  3240. }
  3241. static int
  3242. pfm_write_dbrs(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
  3243. {
  3244. return pfm_write_ibr_dbr(PFM_DATA_RR, ctx, arg, count, regs);
  3245. }
  3246. int
  3247. pfm_mod_write_ibrs(struct task_struct *task, void *req, unsigned int nreq, struct pt_regs *regs)
  3248. {
  3249. pfm_context_t *ctx;
  3250. if (req == NULL) return -EINVAL;
  3251. ctx = GET_PMU_CTX();
  3252. if (ctx == NULL) return -EINVAL;
  3253. /*
  3254. * for now limit to current task, which is enough when calling
  3255. * from overflow handler
  3256. */
  3257. if (task != current && ctx->ctx_fl_system == 0) return -EBUSY;
  3258. return pfm_write_ibrs(ctx, req, nreq, regs);
  3259. }
  3260. EXPORT_SYMBOL(pfm_mod_write_ibrs);
  3261. int
  3262. pfm_mod_write_dbrs(struct task_struct *task, void *req, unsigned int nreq, struct pt_regs *regs)
  3263. {
  3264. pfm_context_t *ctx;
  3265. if (req == NULL) return -EINVAL;
  3266. ctx = GET_PMU_CTX();
  3267. if (ctx == NULL) return -EINVAL;
  3268. /*
  3269. * for now limit to current task, which is enough when calling
  3270. * from overflow handler
  3271. */
  3272. if (task != current && ctx->ctx_fl_system == 0) return -EBUSY;
  3273. return pfm_write_dbrs(ctx, req, nreq, regs);
  3274. }
  3275. EXPORT_SYMBOL(pfm_mod_write_dbrs);
  3276. static int
  3277. pfm_get_features(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
  3278. {
  3279. pfarg_features_t *req = (pfarg_features_t *)arg;
  3280. req->ft_version = PFM_VERSION;
  3281. return 0;
  3282. }
  3283. static int
  3284. pfm_stop(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
  3285. {
  3286. struct pt_regs *tregs;
  3287. struct task_struct *task = PFM_CTX_TASK(ctx);
  3288. int state, is_system;
  3289. state = ctx->ctx_state;
  3290. is_system = ctx->ctx_fl_system;
  3291. /*
  3292. * context must be attached to issue the stop command (includes LOADED,MASKED,ZOMBIE)
  3293. */
  3294. if (state == PFM_CTX_UNLOADED) return -EINVAL;
  3295. /*
  3296. * In system wide and when the context is loaded, access can only happen
  3297. * when the caller is running on the CPU being monitored by the session.
  3298. * It does not have to be the owner (ctx_task) of the context per se.
  3299. */
  3300. if (is_system && ctx->ctx_cpu != smp_processor_id()) {
  3301. DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
  3302. return -EBUSY;
  3303. }
  3304. DPRINT(("task [%d] ctx_state=%d is_system=%d\n",
  3305. task_pid_nr(PFM_CTX_TASK(ctx)),
  3306. state,
  3307. is_system));
  3308. /*
  3309. * in system mode, we need to update the PMU directly
  3310. * and the user level state of the caller, which may not
  3311. * necessarily be the creator of the context.
  3312. */
  3313. if (is_system) {
  3314. /*
  3315. * Update local PMU first
  3316. *
  3317. * disable dcr pp
  3318. */
  3319. ia64_setreg(_IA64_REG_CR_DCR, ia64_getreg(_IA64_REG_CR_DCR) & ~IA64_DCR_PP);
  3320. ia64_srlz_i();
  3321. /*
  3322. * update local cpuinfo
  3323. */
  3324. PFM_CPUINFO_CLEAR(PFM_CPUINFO_DCR_PP);
  3325. /*
  3326. * stop monitoring, does srlz.i
  3327. */
  3328. pfm_clear_psr_pp();
  3329. /*
  3330. * stop monitoring in the caller
  3331. */
  3332. ia64_psr(regs)->pp = 0;
  3333. return 0;
  3334. }
  3335. /*
  3336. * per-task mode
  3337. */
  3338. if (task == current) {
  3339. /* stop monitoring at kernel level */
  3340. pfm_clear_psr_up();
  3341. /*
  3342. * stop monitoring at the user level
  3343. */
  3344. ia64_psr(regs)->up = 0;
  3345. } else {
  3346. tregs = task_pt_regs(task);
  3347. /*
  3348. * stop monitoring at the user level
  3349. */
  3350. ia64_psr(tregs)->up = 0;
  3351. /*
  3352. * monitoring disabled in kernel at next reschedule
  3353. */
  3354. ctx->ctx_saved_psr_up = 0;
  3355. DPRINT(("task=[%d]\n", task_pid_nr(task)));
  3356. }
  3357. return 0;
  3358. }
  3359. static int
  3360. pfm_start(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
  3361. {
  3362. struct pt_regs *tregs;
  3363. int state, is_system;
  3364. state = ctx->ctx_state;
  3365. is_system = ctx->ctx_fl_system;
  3366. if (state != PFM_CTX_LOADED) return -EINVAL;
  3367. /*
  3368. * In system wide and when the context is loaded, access can only happen
  3369. * when the caller is running on the CPU being monitored by the session.
  3370. * It does not have to be the owner (ctx_task) of the context per se.
  3371. */
  3372. if (is_system && ctx->ctx_cpu != smp_processor_id()) {
  3373. DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
  3374. return -EBUSY;
  3375. }
  3376. /*
  3377. * in system mode, we need to update the PMU directly
  3378. * and the user level state of the caller, which may not
  3379. * necessarily be the creator of the context.
  3380. */
  3381. if (is_system) {
  3382. /*
  3383. * set user level psr.pp for the caller
  3384. */
  3385. ia64_psr(regs)->pp = 1;
  3386. /*
  3387. * now update the local PMU and cpuinfo
  3388. */
  3389. PFM_CPUINFO_SET(PFM_CPUINFO_DCR_PP);
  3390. /*
  3391. * start monitoring at kernel level
  3392. */
  3393. pfm_set_psr_pp();
  3394. /* enable dcr pp */
  3395. ia64_setreg(_IA64_REG_CR_DCR, ia64_getreg(_IA64_REG_CR_DCR) | IA64_DCR_PP);
  3396. ia64_srlz_i();
  3397. return 0;
  3398. }
  3399. /*
  3400. * per-process mode
  3401. */
  3402. if (ctx->ctx_task == current) {
  3403. /* start monitoring at kernel level */
  3404. pfm_set_psr_up();
  3405. /*
  3406. * activate monitoring at user level
  3407. */
  3408. ia64_psr(regs)->up = 1;
  3409. } else {
  3410. tregs = task_pt_regs(ctx->ctx_task);
  3411. /*
  3412. * start monitoring at the kernel level the next
  3413. * time the task is scheduled
  3414. */
  3415. ctx->ctx_saved_psr_up = IA64_PSR_UP;
  3416. /*
  3417. * activate monitoring at user level
  3418. */
  3419. ia64_psr(tregs)->up = 1;
  3420. }
  3421. return 0;
  3422. }
  3423. static int
  3424. pfm_get_pmc_reset(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
  3425. {
  3426. pfarg_reg_t *req = (pfarg_reg_t *)arg;
  3427. unsigned int cnum;
  3428. int i;
  3429. int ret = -EINVAL;
  3430. for (i = 0; i < count; i++, req++) {
  3431. cnum = req->reg_num;
  3432. if (!PMC_IS_IMPL(cnum)) goto abort_mission;
  3433. req->reg_value = PMC_DFL_VAL(cnum);
  3434. PFM_REG_RETFLAG_SET(req->reg_flags, 0);
  3435. DPRINT(("pmc_reset_val pmc[%u]=0x%lx\n", cnum, req->reg_value));
  3436. }
  3437. return 0;
  3438. abort_mission:
  3439. PFM_REG_RETFLAG_SET(req->reg_flags, PFM_REG_RETFL_EINVAL);
  3440. return ret;
  3441. }
  3442. static int
  3443. pfm_check_task_exist(pfm_context_t *ctx)
  3444. {
  3445. struct task_struct *g, *t;
  3446. int ret = -ESRCH;
  3447. read_lock(&tasklist_lock);
  3448. do_each_thread (g, t) {
  3449. if (t->thread.pfm_context == ctx) {
  3450. ret = 0;
  3451. goto out;
  3452. }
  3453. } while_each_thread (g, t);
  3454. out:
  3455. read_unlock(&tasklist_lock);
  3456. DPRINT(("pfm_check_task_exist: ret=%d ctx=%p\n", ret, ctx));
  3457. return ret;
  3458. }
  3459. static int
  3460. pfm_context_load(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
  3461. {
  3462. struct task_struct *task;
  3463. struct thread_struct *thread;
  3464. struct pfm_context_t *old;
  3465. unsigned long flags;
  3466. #ifndef CONFIG_SMP
  3467. struct task_struct *owner_task = NULL;
  3468. #endif
  3469. pfarg_load_t *req = (pfarg_load_t *)arg;
  3470. unsigned long *pmcs_source, *pmds_source;
  3471. int the_cpu;
  3472. int ret = 0;
  3473. int state, is_system, set_dbregs = 0;
  3474. state = ctx->ctx_state;
  3475. is_system = ctx->ctx_fl_system;
  3476. /*
  3477. * can only load from unloaded or terminated state
  3478. */
  3479. if (state != PFM_CTX_UNLOADED) {
  3480. DPRINT(("cannot load to [%d], invalid ctx_state=%d\n",
  3481. req->load_pid,
  3482. ctx->ctx_state));
  3483. return -EBUSY;
  3484. }
  3485. DPRINT(("load_pid [%d] using_dbreg=%d\n", req->load_pid, ctx->ctx_fl_using_dbreg));
  3486. if (CTX_OVFL_NOBLOCK(ctx) == 0 && req->load_pid == current->pid) {
  3487. DPRINT(("cannot use blocking mode on self\n"));
  3488. return -EINVAL;
  3489. }
  3490. ret = pfm_get_task(ctx, req->load_pid, &task);
  3491. if (ret) {
  3492. DPRINT(("load_pid [%d] get_task=%d\n", req->load_pid, ret));
  3493. return ret;
  3494. }
  3495. ret = -EINVAL;
  3496. /*
  3497. * system wide is self monitoring only
  3498. */
  3499. if (is_system && task != current) {
  3500. DPRINT(("system wide is self monitoring only load_pid=%d\n",
  3501. req->load_pid));
  3502. goto error;
  3503. }
  3504. thread = &task->thread;
  3505. ret = 0;
  3506. /*
  3507. * cannot load a context which is using range restrictions,
  3508. * into a task that is being debugged.
  3509. */
  3510. if (ctx->ctx_fl_using_dbreg) {
  3511. if (thread->flags & IA64_THREAD_DBG_VALID) {
  3512. ret = -EBUSY;
  3513. DPRINT(("load_pid [%d] task is debugged, cannot load range restrictions\n", req->load_pid));
  3514. goto error;
  3515. }
  3516. LOCK_PFS(flags);
  3517. if (is_system) {
  3518. if (pfm_sessions.pfs_ptrace_use_dbregs) {
  3519. DPRINT(("cannot load [%d] dbregs in use\n",
  3520. task_pid_nr(task)));
  3521. ret = -EBUSY;
  3522. } else {
  3523. pfm_sessions.pfs_sys_use_dbregs++;
  3524. DPRINT(("load [%d] increased sys_use_dbreg=%u\n", task_pid_nr(task), pfm_sessions.pfs_sys_use_dbregs));
  3525. set_dbregs = 1;
  3526. }
  3527. }
  3528. UNLOCK_PFS(flags);
  3529. if (ret) goto error;
  3530. }
  3531. /*
  3532. * SMP system-wide monitoring implies self-monitoring.
  3533. *
  3534. * The programming model expects the task to
  3535. * be pinned on a CPU throughout the session.
  3536. * Here we take note of the current CPU at the
  3537. * time the context is loaded. No call from
  3538. * another CPU will be allowed.
  3539. *
  3540. * The pinning via shed_setaffinity()
  3541. * must be done by the calling task prior
  3542. * to this call.
  3543. *
  3544. * systemwide: keep track of CPU this session is supposed to run on
  3545. */
  3546. the_cpu = ctx->ctx_cpu = smp_processor_id();
  3547. ret = -EBUSY;
  3548. /*
  3549. * now reserve the session
  3550. */
  3551. ret = pfm_reserve_session(current, is_system, the_cpu);
  3552. if (ret) goto error;
  3553. /*
  3554. * task is necessarily stopped at this point.
  3555. *
  3556. * If the previous context was zombie, then it got removed in
  3557. * pfm_save_regs(). Therefore we should not see it here.
  3558. * If we see a context, then this is an active context
  3559. *
  3560. * XXX: needs to be atomic
  3561. */
  3562. DPRINT(("before cmpxchg() old_ctx=%p new_ctx=%p\n",
  3563. thread->pfm_context, ctx));
  3564. ret = -EBUSY;
  3565. old = ia64_cmpxchg(acq, &thread->pfm_context, NULL, ctx, sizeof(pfm_context_t *));
  3566. if (old != NULL) {
  3567. DPRINT(("load_pid [%d] already has a context\n", req->load_pid));
  3568. goto error_unres;
  3569. }
  3570. pfm_reset_msgq(ctx);
  3571. ctx->ctx_state = PFM_CTX_LOADED;
  3572. /*
  3573. * link context to task
  3574. */
  3575. ctx->ctx_task = task;
  3576. if (is_system) {
  3577. /*
  3578. * we load as stopped
  3579. */
  3580. PFM_CPUINFO_SET(PFM_CPUINFO_SYST_WIDE);
  3581. PFM_CPUINFO_CLEAR(PFM_CPUINFO_DCR_PP);
  3582. if (ctx->ctx_fl_excl_idle) PFM_CPUINFO_SET(PFM_CPUINFO_EXCL_IDLE);
  3583. } else {
  3584. thread->flags |= IA64_THREAD_PM_VALID;
  3585. }
  3586. /*
  3587. * propagate into thread-state
  3588. */
  3589. pfm_copy_pmds(task, ctx);
  3590. pfm_copy_pmcs(task, ctx);
  3591. pmcs_source = ctx->th_pmcs;
  3592. pmds_source = ctx->th_pmds;
  3593. /*
  3594. * always the case for system-wide
  3595. */
  3596. if (task == current) {
  3597. if (is_system == 0) {
  3598. /* allow user level control */
  3599. ia64_psr(regs)->sp = 0;
  3600. DPRINT(("clearing psr.sp for [%d]\n", task_pid_nr(task)));
  3601. SET_LAST_CPU(ctx, smp_processor_id());
  3602. INC_ACTIVATION();
  3603. SET_ACTIVATION(ctx);
  3604. #ifndef CONFIG_SMP
  3605. /*
  3606. * push the other task out, if any
  3607. */
  3608. owner_task = GET_PMU_OWNER();
  3609. if (owner_task) pfm_lazy_save_regs(owner_task);
  3610. #endif
  3611. }
  3612. /*
  3613. * load all PMD from ctx to PMU (as opposed to thread state)
  3614. * restore all PMC from ctx to PMU
  3615. */
  3616. pfm_restore_pmds(pmds_source, ctx->ctx_all_pmds[0]);
  3617. pfm_restore_pmcs(pmcs_source, ctx->ctx_all_pmcs[0]);
  3618. ctx->ctx_reload_pmcs[0] = 0UL;
  3619. ctx->ctx_reload_pmds[0] = 0UL;
  3620. /*
  3621. * guaranteed safe by earlier check against DBG_VALID
  3622. */
  3623. if (ctx->ctx_fl_using_dbreg) {
  3624. pfm_restore_ibrs(ctx->ctx_ibrs, pmu_conf->num_ibrs);
  3625. pfm_restore_dbrs(ctx->ctx_dbrs, pmu_conf->num_dbrs);
  3626. }
  3627. /*
  3628. * set new ownership
  3629. */
  3630. SET_PMU_OWNER(task, ctx);
  3631. DPRINT(("context loaded on PMU for [%d]\n", task_pid_nr(task)));
  3632. } else {
  3633. /*
  3634. * when not current, task MUST be stopped, so this is safe
  3635. */
  3636. regs = task_pt_regs(task);
  3637. /* force a full reload */
  3638. ctx->ctx_last_activation = PFM_INVALID_ACTIVATION;
  3639. SET_LAST_CPU(ctx, -1);
  3640. /* initial saved psr (stopped) */
  3641. ctx->ctx_saved_psr_up = 0UL;
  3642. ia64_psr(regs)->up = ia64_psr(regs)->pp = 0;
  3643. }
  3644. ret = 0;
  3645. error_unres:
  3646. if (ret) pfm_unreserve_session(ctx, ctx->ctx_fl_system, the_cpu);
  3647. error:
  3648. /*
  3649. * we must undo the dbregs setting (for system-wide)
  3650. */
  3651. if (ret && set_dbregs) {
  3652. LOCK_PFS(flags);
  3653. pfm_sessions.pfs_sys_use_dbregs--;
  3654. UNLOCK_PFS(flags);
  3655. }
  3656. /*
  3657. * release task, there is now a link with the context
  3658. */
  3659. if (is_system == 0 && task != current) {
  3660. pfm_put_task(task);
  3661. if (ret == 0) {
  3662. ret = pfm_check_task_exist(ctx);
  3663. if (ret) {
  3664. ctx->ctx_state = PFM_CTX_UNLOADED;
  3665. ctx->ctx_task = NULL;
  3666. }
  3667. }
  3668. }
  3669. return ret;
  3670. }
  3671. /*
  3672. * in this function, we do not need to increase the use count
  3673. * for the task via get_task_struct(), because we hold the
  3674. * context lock. If the task were to disappear while having
  3675. * a context attached, it would go through pfm_exit_thread()
  3676. * which also grabs the context lock and would therefore be blocked
  3677. * until we are here.
  3678. */
  3679. static void pfm_flush_pmds(struct task_struct *, pfm_context_t *ctx);
  3680. static int
  3681. pfm_context_unload(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
  3682. {
  3683. struct task_struct *task = PFM_CTX_TASK(ctx);
  3684. struct pt_regs *tregs;
  3685. int prev_state, is_system;
  3686. int ret;
  3687. DPRINT(("ctx_state=%d task [%d]\n", ctx->ctx_state, task ? task_pid_nr(task) : -1));
  3688. prev_state = ctx->ctx_state;
  3689. is_system = ctx->ctx_fl_system;
  3690. /*
  3691. * unload only when necessary
  3692. */
  3693. if (prev_state == PFM_CTX_UNLOADED) {
  3694. DPRINT(("ctx_state=%d, nothing to do\n", prev_state));
  3695. return 0;
  3696. }
  3697. /*
  3698. * clear psr and dcr bits
  3699. */
  3700. ret = pfm_stop(ctx, NULL, 0, regs);
  3701. if (ret) return ret;
  3702. ctx->ctx_state = PFM_CTX_UNLOADED;
  3703. /*
  3704. * in system mode, we need to update the PMU directly
  3705. * and the user level state of the caller, which may not
  3706. * necessarily be the creator of the context.
  3707. */
  3708. if (is_system) {
  3709. /*
  3710. * Update cpuinfo
  3711. *
  3712. * local PMU is taken care of in pfm_stop()
  3713. */
  3714. PFM_CPUINFO_CLEAR(PFM_CPUINFO_SYST_WIDE);
  3715. PFM_CPUINFO_CLEAR(PFM_CPUINFO_EXCL_IDLE);
  3716. /*
  3717. * save PMDs in context
  3718. * release ownership
  3719. */
  3720. pfm_flush_pmds(current, ctx);
  3721. /*
  3722. * at this point we are done with the PMU
  3723. * so we can unreserve the resource.
  3724. */
  3725. if (prev_state != PFM_CTX_ZOMBIE)
  3726. pfm_unreserve_session(ctx, 1 , ctx->ctx_cpu);
  3727. /*
  3728. * disconnect context from task
  3729. */
  3730. task->thread.pfm_context = NULL;
  3731. /*
  3732. * disconnect task from context
  3733. */
  3734. ctx->ctx_task = NULL;
  3735. /*
  3736. * There is nothing more to cleanup here.
  3737. */
  3738. return 0;
  3739. }
  3740. /*
  3741. * per-task mode
  3742. */
  3743. tregs = task == current ? regs : task_pt_regs(task);
  3744. if (task == current) {
  3745. /*
  3746. * cancel user level control
  3747. */
  3748. ia64_psr(regs)->sp = 1;
  3749. DPRINT(("setting psr.sp for [%d]\n", task_pid_nr(task)));
  3750. }
  3751. /*
  3752. * save PMDs to context
  3753. * release ownership
  3754. */
  3755. pfm_flush_pmds(task, ctx);
  3756. /*
  3757. * at this point we are done with the PMU
  3758. * so we can unreserve the resource.
  3759. *
  3760. * when state was ZOMBIE, we have already unreserved.
  3761. */
  3762. if (prev_state != PFM_CTX_ZOMBIE)
  3763. pfm_unreserve_session(ctx, 0 , ctx->ctx_cpu);
  3764. /*
  3765. * reset activation counter and psr
  3766. */
  3767. ctx->ctx_last_activation = PFM_INVALID_ACTIVATION;
  3768. SET_LAST_CPU(ctx, -1);
  3769. /*
  3770. * PMU state will not be restored
  3771. */
  3772. task->thread.flags &= ~IA64_THREAD_PM_VALID;
  3773. /*
  3774. * break links between context and task
  3775. */
  3776. task->thread.pfm_context = NULL;
  3777. ctx->ctx_task = NULL;
  3778. PFM_SET_WORK_PENDING(task, 0);
  3779. ctx->ctx_fl_trap_reason = PFM_TRAP_REASON_NONE;
  3780. ctx->ctx_fl_can_restart = 0;
  3781. ctx->ctx_fl_going_zombie = 0;
  3782. DPRINT(("disconnected [%d] from context\n", task_pid_nr(task)));
  3783. return 0;
  3784. }
  3785. /*
  3786. * called only from exit_thread(): task == current
  3787. * we come here only if current has a context attached (loaded or masked)
  3788. */
  3789. void
  3790. pfm_exit_thread(struct task_struct *task)
  3791. {
  3792. pfm_context_t *ctx;
  3793. unsigned long flags;
  3794. struct pt_regs *regs = task_pt_regs(task);
  3795. int ret, state;
  3796. int free_ok = 0;
  3797. ctx = PFM_GET_CTX(task);
  3798. PROTECT_CTX(ctx, flags);
  3799. DPRINT(("state=%d task [%d]\n", ctx->ctx_state, task_pid_nr(task)));
  3800. state = ctx->ctx_state;
  3801. switch(state) {
  3802. case PFM_CTX_UNLOADED:
  3803. /*
  3804. * only comes to this function if pfm_context is not NULL, i.e., cannot
  3805. * be in unloaded state
  3806. */
  3807. printk(KERN_ERR "perfmon: pfm_exit_thread [%d] ctx unloaded\n", task_pid_nr(task));
  3808. break;
  3809. case PFM_CTX_LOADED:
  3810. case PFM_CTX_MASKED:
  3811. ret = pfm_context_unload(ctx, NULL, 0, regs);
  3812. if (ret) {
  3813. printk(KERN_ERR "perfmon: pfm_exit_thread [%d] state=%d unload failed %d\n", task_pid_nr(task), state, ret);
  3814. }
  3815. DPRINT(("ctx unloaded for current state was %d\n", state));
  3816. pfm_end_notify_user(ctx);
  3817. break;
  3818. case PFM_CTX_ZOMBIE:
  3819. ret = pfm_context_unload(ctx, NULL, 0, regs);
  3820. if (ret) {
  3821. printk(KERN_ERR "perfmon: pfm_exit_thread [%d] state=%d unload failed %d\n", task_pid_nr(task), state, ret);
  3822. }
  3823. free_ok = 1;
  3824. break;
  3825. default:
  3826. printk(KERN_ERR "perfmon: pfm_exit_thread [%d] unexpected state=%d\n", task_pid_nr(task), state);
  3827. break;
  3828. }
  3829. UNPROTECT_CTX(ctx, flags);
  3830. { u64 psr = pfm_get_psr();
  3831. BUG_ON(psr & (IA64_PSR_UP|IA64_PSR_PP));
  3832. BUG_ON(GET_PMU_OWNER());
  3833. BUG_ON(ia64_psr(regs)->up);
  3834. BUG_ON(ia64_psr(regs)->pp);
  3835. }
  3836. /*
  3837. * All memory free operations (especially for vmalloc'ed memory)
  3838. * MUST be done with interrupts ENABLED.
  3839. */
  3840. if (free_ok) pfm_context_free(ctx);
  3841. }
  3842. /*
  3843. * functions MUST be listed in the increasing order of their index (see permfon.h)
  3844. */
  3845. #define PFM_CMD(name, flags, arg_count, arg_type, getsz) { name, #name, flags, arg_count, sizeof(arg_type), getsz }
  3846. #define PFM_CMD_S(name, flags) { name, #name, flags, 0, 0, NULL }
  3847. #define PFM_CMD_PCLRWS (PFM_CMD_FD|PFM_CMD_ARG_RW|PFM_CMD_STOP)
  3848. #define PFM_CMD_PCLRW (PFM_CMD_FD|PFM_CMD_ARG_RW)
  3849. #define PFM_CMD_NONE { NULL, "no-cmd", 0, 0, 0, NULL}
  3850. static pfm_cmd_desc_t pfm_cmd_tab[]={
  3851. /* 0 */PFM_CMD_NONE,
  3852. /* 1 */PFM_CMD(pfm_write_pmcs, PFM_CMD_PCLRWS, PFM_CMD_ARG_MANY, pfarg_reg_t, NULL),
  3853. /* 2 */PFM_CMD(pfm_write_pmds, PFM_CMD_PCLRWS, PFM_CMD_ARG_MANY, pfarg_reg_t, NULL),
  3854. /* 3 */PFM_CMD(pfm_read_pmds, PFM_CMD_PCLRWS, PFM_CMD_ARG_MANY, pfarg_reg_t, NULL),
  3855. /* 4 */PFM_CMD_S(pfm_stop, PFM_CMD_PCLRWS),
  3856. /* 5 */PFM_CMD_S(pfm_start, PFM_CMD_PCLRWS),
  3857. /* 6 */PFM_CMD_NONE,
  3858. /* 7 */PFM_CMD_NONE,
  3859. /* 8 */PFM_CMD(pfm_context_create, PFM_CMD_ARG_RW, 1, pfarg_context_t, pfm_ctx_getsize),
  3860. /* 9 */PFM_CMD_NONE,
  3861. /* 10 */PFM_CMD_S(pfm_restart, PFM_CMD_PCLRW),
  3862. /* 11 */PFM_CMD_NONE,
  3863. /* 12 */PFM_CMD(pfm_get_features, PFM_CMD_ARG_RW, 1, pfarg_features_t, NULL),
  3864. /* 13 */PFM_CMD(pfm_debug, 0, 1, unsigned int, NULL),
  3865. /* 14 */PFM_CMD_NONE,
  3866. /* 15 */PFM_CMD(pfm_get_pmc_reset, PFM_CMD_ARG_RW, PFM_CMD_ARG_MANY, pfarg_reg_t, NULL),
  3867. /* 16 */PFM_CMD(pfm_context_load, PFM_CMD_PCLRWS, 1, pfarg_load_t, NULL),
  3868. /* 17 */PFM_CMD_S(pfm_context_unload, PFM_CMD_PCLRWS),
  3869. /* 18 */PFM_CMD_NONE,
  3870. /* 19 */PFM_CMD_NONE,
  3871. /* 20 */PFM_CMD_NONE,
  3872. /* 21 */PFM_CMD_NONE,
  3873. /* 22 */PFM_CMD_NONE,
  3874. /* 23 */PFM_CMD_NONE,
  3875. /* 24 */PFM_CMD_NONE,
  3876. /* 25 */PFM_CMD_NONE,
  3877. /* 26 */PFM_CMD_NONE,
  3878. /* 27 */PFM_CMD_NONE,
  3879. /* 28 */PFM_CMD_NONE,
  3880. /* 29 */PFM_CMD_NONE,
  3881. /* 30 */PFM_CMD_NONE,
  3882. /* 31 */PFM_CMD_NONE,
  3883. /* 32 */PFM_CMD(pfm_write_ibrs, PFM_CMD_PCLRWS, PFM_CMD_ARG_MANY, pfarg_dbreg_t, NULL),
  3884. /* 33 */PFM_CMD(pfm_write_dbrs, PFM_CMD_PCLRWS, PFM_CMD_ARG_MANY, pfarg_dbreg_t, NULL)
  3885. };
  3886. #define PFM_CMD_COUNT (sizeof(pfm_cmd_tab)/sizeof(pfm_cmd_desc_t))
  3887. static int
  3888. pfm_check_task_state(pfm_context_t *ctx, int cmd, unsigned long flags)
  3889. {
  3890. struct task_struct *task;
  3891. int state, old_state;
  3892. recheck:
  3893. state = ctx->ctx_state;
  3894. task = ctx->ctx_task;
  3895. if (task == NULL) {
  3896. DPRINT(("context %d no task, state=%d\n", ctx->ctx_fd, state));
  3897. return 0;
  3898. }
  3899. DPRINT(("context %d state=%d [%d] task_state=%ld must_stop=%d\n",
  3900. ctx->ctx_fd,
  3901. state,
  3902. task_pid_nr(task),
  3903. task->state, PFM_CMD_STOPPED(cmd)));
  3904. /*
  3905. * self-monitoring always ok.
  3906. *
  3907. * for system-wide the caller can either be the creator of the
  3908. * context (to one to which the context is attached to) OR
  3909. * a task running on the same CPU as the session.
  3910. */
  3911. if (task == current || ctx->ctx_fl_system) return 0;
  3912. /*
  3913. * we are monitoring another thread
  3914. */
  3915. switch(state) {
  3916. case PFM_CTX_UNLOADED:
  3917. /*
  3918. * if context is UNLOADED we are safe to go
  3919. */
  3920. return 0;
  3921. case PFM_CTX_ZOMBIE:
  3922. /*
  3923. * no command can operate on a zombie context
  3924. */
  3925. DPRINT(("cmd %d state zombie cannot operate on context\n", cmd));
  3926. return -EINVAL;
  3927. case PFM_CTX_MASKED:
  3928. /*
  3929. * PMU state has been saved to software even though
  3930. * the thread may still be running.
  3931. */
  3932. if (cmd != PFM_UNLOAD_CONTEXT) return 0;
  3933. }
  3934. /*
  3935. * context is LOADED or MASKED. Some commands may need to have
  3936. * the task stopped.
  3937. *
  3938. * We could lift this restriction for UP but it would mean that
  3939. * the user has no guarantee the task would not run between
  3940. * two successive calls to perfmonctl(). That's probably OK.
  3941. * If this user wants to ensure the task does not run, then
  3942. * the task must be stopped.
  3943. */
  3944. if (PFM_CMD_STOPPED(cmd)) {
  3945. if (!task_is_stopped_or_traced(task)) {
  3946. DPRINT(("[%d] task not in stopped state\n", task_pid_nr(task)));
  3947. return -EBUSY;
  3948. }
  3949. /*
  3950. * task is now stopped, wait for ctxsw out
  3951. *
  3952. * This is an interesting point in the code.
  3953. * We need to unprotect the context because
  3954. * the pfm_save_regs() routines needs to grab
  3955. * the same lock. There are danger in doing
  3956. * this because it leaves a window open for
  3957. * another task to get access to the context
  3958. * and possibly change its state. The one thing
  3959. * that is not possible is for the context to disappear
  3960. * because we are protected by the VFS layer, i.e.,
  3961. * get_fd()/put_fd().
  3962. */
  3963. old_state = state;
  3964. UNPROTECT_CTX(ctx, flags);
  3965. wait_task_inactive(task, 0);
  3966. PROTECT_CTX(ctx, flags);
  3967. /*
  3968. * we must recheck to verify if state has changed
  3969. */
  3970. if (ctx->ctx_state != old_state) {
  3971. DPRINT(("old_state=%d new_state=%d\n", old_state, ctx->ctx_state));
  3972. goto recheck;
  3973. }
  3974. }
  3975. return 0;
  3976. }
  3977. /*
  3978. * system-call entry point (must return long)
  3979. */
  3980. asmlinkage long
  3981. sys_perfmonctl (int fd, int cmd, void __user *arg, int count)
  3982. {
  3983. struct fd f = {NULL, 0};
  3984. pfm_context_t *ctx = NULL;
  3985. unsigned long flags = 0UL;
  3986. void *args_k = NULL;
  3987. long ret; /* will expand int return types */
  3988. size_t base_sz, sz, xtra_sz = 0;
  3989. int narg, completed_args = 0, call_made = 0, cmd_flags;
  3990. int (*func)(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs);
  3991. int (*getsize)(void *arg, size_t *sz);
  3992. #define PFM_MAX_ARGSIZE 4096
  3993. /*
  3994. * reject any call if perfmon was disabled at initialization
  3995. */
  3996. if (unlikely(pmu_conf == NULL)) return -ENOSYS;
  3997. if (unlikely(cmd < 0 || cmd >= PFM_CMD_COUNT)) {
  3998. DPRINT(("invalid cmd=%d\n", cmd));
  3999. return -EINVAL;
  4000. }
  4001. func = pfm_cmd_tab[cmd].cmd_func;
  4002. narg = pfm_cmd_tab[cmd].cmd_narg;
  4003. base_sz = pfm_cmd_tab[cmd].cmd_argsize;
  4004. getsize = pfm_cmd_tab[cmd].cmd_getsize;
  4005. cmd_flags = pfm_cmd_tab[cmd].cmd_flags;
  4006. if (unlikely(func == NULL)) {
  4007. DPRINT(("invalid cmd=%d\n", cmd));
  4008. return -EINVAL;
  4009. }
  4010. DPRINT(("cmd=%s idx=%d narg=0x%x argsz=%lu count=%d\n",
  4011. PFM_CMD_NAME(cmd),
  4012. cmd,
  4013. narg,
  4014. base_sz,
  4015. count));
  4016. /*
  4017. * check if number of arguments matches what the command expects
  4018. */
  4019. if (unlikely((narg == PFM_CMD_ARG_MANY && count <= 0) || (narg > 0 && narg != count)))
  4020. return -EINVAL;
  4021. restart_args:
  4022. sz = xtra_sz + base_sz*count;
  4023. /*
  4024. * limit abuse to min page size
  4025. */
  4026. if (unlikely(sz > PFM_MAX_ARGSIZE)) {
  4027. printk(KERN_ERR "perfmon: [%d] argument too big %lu\n", task_pid_nr(current), sz);
  4028. return -E2BIG;
  4029. }
  4030. /*
  4031. * allocate default-sized argument buffer
  4032. */
  4033. if (likely(count && args_k == NULL)) {
  4034. args_k = kmalloc(PFM_MAX_ARGSIZE, GFP_KERNEL);
  4035. if (args_k == NULL) return -ENOMEM;
  4036. }
  4037. ret = -EFAULT;
  4038. /*
  4039. * copy arguments
  4040. *
  4041. * assume sz = 0 for command without parameters
  4042. */
  4043. if (sz && copy_from_user(args_k, arg, sz)) {
  4044. DPRINT(("cannot copy_from_user %lu bytes @%p\n", sz, arg));
  4045. goto error_args;
  4046. }
  4047. /*
  4048. * check if command supports extra parameters
  4049. */
  4050. if (completed_args == 0 && getsize) {
  4051. /*
  4052. * get extra parameters size (based on main argument)
  4053. */
  4054. ret = (*getsize)(args_k, &xtra_sz);
  4055. if (ret) goto error_args;
  4056. completed_args = 1;
  4057. DPRINT(("restart_args sz=%lu xtra_sz=%lu\n", sz, xtra_sz));
  4058. /* retry if necessary */
  4059. if (likely(xtra_sz)) goto restart_args;
  4060. }
  4061. if (unlikely((cmd_flags & PFM_CMD_FD) == 0)) goto skip_fd;
  4062. ret = -EBADF;
  4063. f = fdget(fd);
  4064. if (unlikely(f.file == NULL)) {
  4065. DPRINT(("invalid fd %d\n", fd));
  4066. goto error_args;
  4067. }
  4068. if (unlikely(PFM_IS_FILE(f.file) == 0)) {
  4069. DPRINT(("fd %d not related to perfmon\n", fd));
  4070. goto error_args;
  4071. }
  4072. ctx = f.file->private_data;
  4073. if (unlikely(ctx == NULL)) {
  4074. DPRINT(("no context for fd %d\n", fd));
  4075. goto error_args;
  4076. }
  4077. prefetch(&ctx->ctx_state);
  4078. PROTECT_CTX(ctx, flags);
  4079. /*
  4080. * check task is stopped
  4081. */
  4082. ret = pfm_check_task_state(ctx, cmd, flags);
  4083. if (unlikely(ret)) goto abort_locked;
  4084. skip_fd:
  4085. ret = (*func)(ctx, args_k, count, task_pt_regs(current));
  4086. call_made = 1;
  4087. abort_locked:
  4088. if (likely(ctx)) {
  4089. DPRINT(("context unlocked\n"));
  4090. UNPROTECT_CTX(ctx, flags);
  4091. }
  4092. /* copy argument back to user, if needed */
  4093. if (call_made && PFM_CMD_RW_ARG(cmd) && copy_to_user(arg, args_k, base_sz*count)) ret = -EFAULT;
  4094. error_args:
  4095. if (f.file)
  4096. fdput(f);
  4097. kfree(args_k);
  4098. DPRINT(("cmd=%s ret=%ld\n", PFM_CMD_NAME(cmd), ret));
  4099. return ret;
  4100. }
  4101. static void
  4102. pfm_resume_after_ovfl(pfm_context_t *ctx, unsigned long ovfl_regs, struct pt_regs *regs)
  4103. {
  4104. pfm_buffer_fmt_t *fmt = ctx->ctx_buf_fmt;
  4105. pfm_ovfl_ctrl_t rst_ctrl;
  4106. int state;
  4107. int ret = 0;
  4108. state = ctx->ctx_state;
  4109. /*
  4110. * Unlock sampling buffer and reset index atomically
  4111. * XXX: not really needed when blocking
  4112. */
  4113. if (CTX_HAS_SMPL(ctx)) {
  4114. rst_ctrl.bits.mask_monitoring = 0;
  4115. rst_ctrl.bits.reset_ovfl_pmds = 0;
  4116. if (state == PFM_CTX_LOADED)
  4117. ret = pfm_buf_fmt_restart_active(fmt, current, &rst_ctrl, ctx->ctx_smpl_hdr, regs);
  4118. else
  4119. ret = pfm_buf_fmt_restart(fmt, current, &rst_ctrl, ctx->ctx_smpl_hdr, regs);
  4120. } else {
  4121. rst_ctrl.bits.mask_monitoring = 0;
  4122. rst_ctrl.bits.reset_ovfl_pmds = 1;
  4123. }
  4124. if (ret == 0) {
  4125. if (rst_ctrl.bits.reset_ovfl_pmds) {
  4126. pfm_reset_regs(ctx, &ovfl_regs, PFM_PMD_LONG_RESET);
  4127. }
  4128. if (rst_ctrl.bits.mask_monitoring == 0) {
  4129. DPRINT(("resuming monitoring\n"));
  4130. if (ctx->ctx_state == PFM_CTX_MASKED) pfm_restore_monitoring(current);
  4131. } else {
  4132. DPRINT(("stopping monitoring\n"));
  4133. //pfm_stop_monitoring(current, regs);
  4134. }
  4135. ctx->ctx_state = PFM_CTX_LOADED;
  4136. }
  4137. }
  4138. /*
  4139. * context MUST BE LOCKED when calling
  4140. * can only be called for current
  4141. */
  4142. static void
  4143. pfm_context_force_terminate(pfm_context_t *ctx, struct pt_regs *regs)
  4144. {
  4145. int ret;
  4146. DPRINT(("entering for [%d]\n", task_pid_nr(current)));
  4147. ret = pfm_context_unload(ctx, NULL, 0, regs);
  4148. if (ret) {
  4149. printk(KERN_ERR "pfm_context_force_terminate: [%d] unloaded failed with %d\n", task_pid_nr(current), ret);
  4150. }
  4151. /*
  4152. * and wakeup controlling task, indicating we are now disconnected
  4153. */
  4154. wake_up_interruptible(&ctx->ctx_zombieq);
  4155. /*
  4156. * given that context is still locked, the controlling
  4157. * task will only get access when we return from
  4158. * pfm_handle_work().
  4159. */
  4160. }
  4161. static int pfm_ovfl_notify_user(pfm_context_t *ctx, unsigned long ovfl_pmds);
  4162. /*
  4163. * pfm_handle_work() can be called with interrupts enabled
  4164. * (TIF_NEED_RESCHED) or disabled. The down_interruptible
  4165. * call may sleep, therefore we must re-enable interrupts
  4166. * to avoid deadlocks. It is safe to do so because this function
  4167. * is called ONLY when returning to user level (pUStk=1), in which case
  4168. * there is no risk of kernel stack overflow due to deep
  4169. * interrupt nesting.
  4170. */
  4171. void
  4172. pfm_handle_work(void)
  4173. {
  4174. pfm_context_t *ctx;
  4175. struct pt_regs *regs;
  4176. unsigned long flags, dummy_flags;
  4177. unsigned long ovfl_regs;
  4178. unsigned int reason;
  4179. int ret;
  4180. ctx = PFM_GET_CTX(current);
  4181. if (ctx == NULL) {
  4182. printk(KERN_ERR "perfmon: [%d] has no PFM context\n",
  4183. task_pid_nr(current));
  4184. return;
  4185. }
  4186. PROTECT_CTX(ctx, flags);
  4187. PFM_SET_WORK_PENDING(current, 0);
  4188. regs = task_pt_regs(current);
  4189. /*
  4190. * extract reason for being here and clear
  4191. */
  4192. reason = ctx->ctx_fl_trap_reason;
  4193. ctx->ctx_fl_trap_reason = PFM_TRAP_REASON_NONE;
  4194. ovfl_regs = ctx->ctx_ovfl_regs[0];
  4195. DPRINT(("reason=%d state=%d\n", reason, ctx->ctx_state));
  4196. /*
  4197. * must be done before we check for simple-reset mode
  4198. */
  4199. if (ctx->ctx_fl_going_zombie || ctx->ctx_state == PFM_CTX_ZOMBIE)
  4200. goto do_zombie;
  4201. //if (CTX_OVFL_NOBLOCK(ctx)) goto skip_blocking;
  4202. if (reason == PFM_TRAP_REASON_RESET)
  4203. goto skip_blocking;
  4204. /*
  4205. * restore interrupt mask to what it was on entry.
  4206. * Could be enabled/diasbled.
  4207. */
  4208. UNPROTECT_CTX(ctx, flags);
  4209. /*
  4210. * force interrupt enable because of down_interruptible()
  4211. */
  4212. local_irq_enable();
  4213. DPRINT(("before block sleeping\n"));
  4214. /*
  4215. * may go through without blocking on SMP systems
  4216. * if restart has been received already by the time we call down()
  4217. */
  4218. ret = wait_for_completion_interruptible(&ctx->ctx_restart_done);
  4219. DPRINT(("after block sleeping ret=%d\n", ret));
  4220. /*
  4221. * lock context and mask interrupts again
  4222. * We save flags into a dummy because we may have
  4223. * altered interrupts mask compared to entry in this
  4224. * function.
  4225. */
  4226. PROTECT_CTX(ctx, dummy_flags);
  4227. /*
  4228. * we need to read the ovfl_regs only after wake-up
  4229. * because we may have had pfm_write_pmds() in between
  4230. * and that can changed PMD values and therefore
  4231. * ovfl_regs is reset for these new PMD values.
  4232. */
  4233. ovfl_regs = ctx->ctx_ovfl_regs[0];
  4234. if (ctx->ctx_fl_going_zombie) {
  4235. do_zombie:
  4236. DPRINT(("context is zombie, bailing out\n"));
  4237. pfm_context_force_terminate(ctx, regs);
  4238. goto nothing_to_do;
  4239. }
  4240. /*
  4241. * in case of interruption of down() we don't restart anything
  4242. */
  4243. if (ret < 0)
  4244. goto nothing_to_do;
  4245. skip_blocking:
  4246. pfm_resume_after_ovfl(ctx, ovfl_regs, regs);
  4247. ctx->ctx_ovfl_regs[0] = 0UL;
  4248. nothing_to_do:
  4249. /*
  4250. * restore flags as they were upon entry
  4251. */
  4252. UNPROTECT_CTX(ctx, flags);
  4253. }
  4254. static int
  4255. pfm_notify_user(pfm_context_t *ctx, pfm_msg_t *msg)
  4256. {
  4257. if (ctx->ctx_state == PFM_CTX_ZOMBIE) {
  4258. DPRINT(("ignoring overflow notification, owner is zombie\n"));
  4259. return 0;
  4260. }
  4261. DPRINT(("waking up somebody\n"));
  4262. if (msg) wake_up_interruptible(&ctx->ctx_msgq_wait);
  4263. /*
  4264. * safe, we are not in intr handler, nor in ctxsw when
  4265. * we come here
  4266. */
  4267. kill_fasync (&ctx->ctx_async_queue, SIGIO, POLL_IN);
  4268. return 0;
  4269. }
  4270. static int
  4271. pfm_ovfl_notify_user(pfm_context_t *ctx, unsigned long ovfl_pmds)
  4272. {
  4273. pfm_msg_t *msg = NULL;
  4274. if (ctx->ctx_fl_no_msg == 0) {
  4275. msg = pfm_get_new_msg(ctx);
  4276. if (msg == NULL) {
  4277. printk(KERN_ERR "perfmon: pfm_ovfl_notify_user no more notification msgs\n");
  4278. return -1;
  4279. }
  4280. msg->pfm_ovfl_msg.msg_type = PFM_MSG_OVFL;
  4281. msg->pfm_ovfl_msg.msg_ctx_fd = ctx->ctx_fd;
  4282. msg->pfm_ovfl_msg.msg_active_set = 0;
  4283. msg->pfm_ovfl_msg.msg_ovfl_pmds[0] = ovfl_pmds;
  4284. msg->pfm_ovfl_msg.msg_ovfl_pmds[1] = 0UL;
  4285. msg->pfm_ovfl_msg.msg_ovfl_pmds[2] = 0UL;
  4286. msg->pfm_ovfl_msg.msg_ovfl_pmds[3] = 0UL;
  4287. msg->pfm_ovfl_msg.msg_tstamp = 0UL;
  4288. }
  4289. DPRINT(("ovfl msg: msg=%p no_msg=%d fd=%d ovfl_pmds=0x%lx\n",
  4290. msg,
  4291. ctx->ctx_fl_no_msg,
  4292. ctx->ctx_fd,
  4293. ovfl_pmds));
  4294. return pfm_notify_user(ctx, msg);
  4295. }
  4296. static int
  4297. pfm_end_notify_user(pfm_context_t *ctx)
  4298. {
  4299. pfm_msg_t *msg;
  4300. msg = pfm_get_new_msg(ctx);
  4301. if (msg == NULL) {
  4302. printk(KERN_ERR "perfmon: pfm_end_notify_user no more notification msgs\n");
  4303. return -1;
  4304. }
  4305. /* no leak */
  4306. memset(msg, 0, sizeof(*msg));
  4307. msg->pfm_end_msg.msg_type = PFM_MSG_END;
  4308. msg->pfm_end_msg.msg_ctx_fd = ctx->ctx_fd;
  4309. msg->pfm_ovfl_msg.msg_tstamp = 0UL;
  4310. DPRINT(("end msg: msg=%p no_msg=%d ctx_fd=%d\n",
  4311. msg,
  4312. ctx->ctx_fl_no_msg,
  4313. ctx->ctx_fd));
  4314. return pfm_notify_user(ctx, msg);
  4315. }
  4316. /*
  4317. * main overflow processing routine.
  4318. * it can be called from the interrupt path or explicitly during the context switch code
  4319. */
  4320. static void pfm_overflow_handler(struct task_struct *task, pfm_context_t *ctx,
  4321. unsigned long pmc0, struct pt_regs *regs)
  4322. {
  4323. pfm_ovfl_arg_t *ovfl_arg;
  4324. unsigned long mask;
  4325. unsigned long old_val, ovfl_val, new_val;
  4326. unsigned long ovfl_notify = 0UL, ovfl_pmds = 0UL, smpl_pmds = 0UL, reset_pmds;
  4327. unsigned long tstamp;
  4328. pfm_ovfl_ctrl_t ovfl_ctrl;
  4329. unsigned int i, has_smpl;
  4330. int must_notify = 0;
  4331. if (unlikely(ctx->ctx_state == PFM_CTX_ZOMBIE)) goto stop_monitoring;
  4332. /*
  4333. * sanity test. Should never happen
  4334. */
  4335. if (unlikely((pmc0 & 0x1) == 0)) goto sanity_check;
  4336. tstamp = ia64_get_itc();
  4337. mask = pmc0 >> PMU_FIRST_COUNTER;
  4338. ovfl_val = pmu_conf->ovfl_val;
  4339. has_smpl = CTX_HAS_SMPL(ctx);
  4340. DPRINT_ovfl(("pmc0=0x%lx pid=%d iip=0x%lx, %s "
  4341. "used_pmds=0x%lx\n",
  4342. pmc0,
  4343. task ? task_pid_nr(task): -1,
  4344. (regs ? regs->cr_iip : 0),
  4345. CTX_OVFL_NOBLOCK(ctx) ? "nonblocking" : "blocking",
  4346. ctx->ctx_used_pmds[0]));
  4347. /*
  4348. * first we update the virtual counters
  4349. * assume there was a prior ia64_srlz_d() issued
  4350. */
  4351. for (i = PMU_FIRST_COUNTER; mask ; i++, mask >>= 1) {
  4352. /* skip pmd which did not overflow */
  4353. if ((mask & 0x1) == 0) continue;
  4354. /*
  4355. * Note that the pmd is not necessarily 0 at this point as qualified events
  4356. * may have happened before the PMU was frozen. The residual count is not
  4357. * taken into consideration here but will be with any read of the pmd via
  4358. * pfm_read_pmds().
  4359. */
  4360. old_val = new_val = ctx->ctx_pmds[i].val;
  4361. new_val += 1 + ovfl_val;
  4362. ctx->ctx_pmds[i].val = new_val;
  4363. /*
  4364. * check for overflow condition
  4365. */
  4366. if (likely(old_val > new_val)) {
  4367. ovfl_pmds |= 1UL << i;
  4368. if (PMC_OVFL_NOTIFY(ctx, i)) ovfl_notify |= 1UL << i;
  4369. }
  4370. DPRINT_ovfl(("ctx_pmd[%d].val=0x%lx old_val=0x%lx pmd=0x%lx ovfl_pmds=0x%lx ovfl_notify=0x%lx\n",
  4371. i,
  4372. new_val,
  4373. old_val,
  4374. ia64_get_pmd(i) & ovfl_val,
  4375. ovfl_pmds,
  4376. ovfl_notify));
  4377. }
  4378. /*
  4379. * there was no 64-bit overflow, nothing else to do
  4380. */
  4381. if (ovfl_pmds == 0UL) return;
  4382. /*
  4383. * reset all control bits
  4384. */
  4385. ovfl_ctrl.val = 0;
  4386. reset_pmds = 0UL;
  4387. /*
  4388. * if a sampling format module exists, then we "cache" the overflow by
  4389. * calling the module's handler() routine.
  4390. */
  4391. if (has_smpl) {
  4392. unsigned long start_cycles, end_cycles;
  4393. unsigned long pmd_mask;
  4394. int j, k, ret = 0;
  4395. int this_cpu = smp_processor_id();
  4396. pmd_mask = ovfl_pmds >> PMU_FIRST_COUNTER;
  4397. ovfl_arg = &ctx->ctx_ovfl_arg;
  4398. prefetch(ctx->ctx_smpl_hdr);
  4399. for(i=PMU_FIRST_COUNTER; pmd_mask && ret == 0; i++, pmd_mask >>=1) {
  4400. mask = 1UL << i;
  4401. if ((pmd_mask & 0x1) == 0) continue;
  4402. ovfl_arg->ovfl_pmd = (unsigned char )i;
  4403. ovfl_arg->ovfl_notify = ovfl_notify & mask ? 1 : 0;
  4404. ovfl_arg->active_set = 0;
  4405. ovfl_arg->ovfl_ctrl.val = 0; /* module must fill in all fields */
  4406. ovfl_arg->smpl_pmds[0] = smpl_pmds = ctx->ctx_pmds[i].smpl_pmds[0];
  4407. ovfl_arg->pmd_value = ctx->ctx_pmds[i].val;
  4408. ovfl_arg->pmd_last_reset = ctx->ctx_pmds[i].lval;
  4409. ovfl_arg->pmd_eventid = ctx->ctx_pmds[i].eventid;
  4410. /*
  4411. * copy values of pmds of interest. Sampling format may copy them
  4412. * into sampling buffer.
  4413. */
  4414. if (smpl_pmds) {
  4415. for(j=0, k=0; smpl_pmds; j++, smpl_pmds >>=1) {
  4416. if ((smpl_pmds & 0x1) == 0) continue;
  4417. ovfl_arg->smpl_pmds_values[k++] = PMD_IS_COUNTING(j) ? pfm_read_soft_counter(ctx, j) : ia64_get_pmd(j);
  4418. DPRINT_ovfl(("smpl_pmd[%d]=pmd%u=0x%lx\n", k-1, j, ovfl_arg->smpl_pmds_values[k-1]));
  4419. }
  4420. }
  4421. pfm_stats[this_cpu].pfm_smpl_handler_calls++;
  4422. start_cycles = ia64_get_itc();
  4423. /*
  4424. * call custom buffer format record (handler) routine
  4425. */
  4426. ret = (*ctx->ctx_buf_fmt->fmt_handler)(task, ctx->ctx_smpl_hdr, ovfl_arg, regs, tstamp);
  4427. end_cycles = ia64_get_itc();
  4428. /*
  4429. * For those controls, we take the union because they have
  4430. * an all or nothing behavior.
  4431. */
  4432. ovfl_ctrl.bits.notify_user |= ovfl_arg->ovfl_ctrl.bits.notify_user;
  4433. ovfl_ctrl.bits.block_task |= ovfl_arg->ovfl_ctrl.bits.block_task;
  4434. ovfl_ctrl.bits.mask_monitoring |= ovfl_arg->ovfl_ctrl.bits.mask_monitoring;
  4435. /*
  4436. * build the bitmask of pmds to reset now
  4437. */
  4438. if (ovfl_arg->ovfl_ctrl.bits.reset_ovfl_pmds) reset_pmds |= mask;
  4439. pfm_stats[this_cpu].pfm_smpl_handler_cycles += end_cycles - start_cycles;
  4440. }
  4441. /*
  4442. * when the module cannot handle the rest of the overflows, we abort right here
  4443. */
  4444. if (ret && pmd_mask) {
  4445. DPRINT(("handler aborts leftover ovfl_pmds=0x%lx\n",
  4446. pmd_mask<<PMU_FIRST_COUNTER));
  4447. }
  4448. /*
  4449. * remove the pmds we reset now from the set of pmds to reset in pfm_restart()
  4450. */
  4451. ovfl_pmds &= ~reset_pmds;
  4452. } else {
  4453. /*
  4454. * when no sampling module is used, then the default
  4455. * is to notify on overflow if requested by user
  4456. */
  4457. ovfl_ctrl.bits.notify_user = ovfl_notify ? 1 : 0;
  4458. ovfl_ctrl.bits.block_task = ovfl_notify ? 1 : 0;
  4459. ovfl_ctrl.bits.mask_monitoring = ovfl_notify ? 1 : 0; /* XXX: change for saturation */
  4460. ovfl_ctrl.bits.reset_ovfl_pmds = ovfl_notify ? 0 : 1;
  4461. /*
  4462. * if needed, we reset all overflowed pmds
  4463. */
  4464. if (ovfl_notify == 0) reset_pmds = ovfl_pmds;
  4465. }
  4466. DPRINT_ovfl(("ovfl_pmds=0x%lx reset_pmds=0x%lx\n", ovfl_pmds, reset_pmds));
  4467. /*
  4468. * reset the requested PMD registers using the short reset values
  4469. */
  4470. if (reset_pmds) {
  4471. unsigned long bm = reset_pmds;
  4472. pfm_reset_regs(ctx, &bm, PFM_PMD_SHORT_RESET);
  4473. }
  4474. if (ovfl_notify && ovfl_ctrl.bits.notify_user) {
  4475. /*
  4476. * keep track of what to reset when unblocking
  4477. */
  4478. ctx->ctx_ovfl_regs[0] = ovfl_pmds;
  4479. /*
  4480. * check for blocking context
  4481. */
  4482. if (CTX_OVFL_NOBLOCK(ctx) == 0 && ovfl_ctrl.bits.block_task) {
  4483. ctx->ctx_fl_trap_reason = PFM_TRAP_REASON_BLOCK;
  4484. /*
  4485. * set the perfmon specific checking pending work for the task
  4486. */
  4487. PFM_SET_WORK_PENDING(task, 1);
  4488. /*
  4489. * when coming from ctxsw, current still points to the
  4490. * previous task, therefore we must work with task and not current.
  4491. */
  4492. set_notify_resume(task);
  4493. }
  4494. /*
  4495. * defer until state is changed (shorten spin window). the context is locked
  4496. * anyway, so the signal receiver would come spin for nothing.
  4497. */
  4498. must_notify = 1;
  4499. }
  4500. DPRINT_ovfl(("owner [%d] pending=%ld reason=%u ovfl_pmds=0x%lx ovfl_notify=0x%lx masked=%d\n",
  4501. GET_PMU_OWNER() ? task_pid_nr(GET_PMU_OWNER()) : -1,
  4502. PFM_GET_WORK_PENDING(task),
  4503. ctx->ctx_fl_trap_reason,
  4504. ovfl_pmds,
  4505. ovfl_notify,
  4506. ovfl_ctrl.bits.mask_monitoring ? 1 : 0));
  4507. /*
  4508. * in case monitoring must be stopped, we toggle the psr bits
  4509. */
  4510. if (ovfl_ctrl.bits.mask_monitoring) {
  4511. pfm_mask_monitoring(task);
  4512. ctx->ctx_state = PFM_CTX_MASKED;
  4513. ctx->ctx_fl_can_restart = 1;
  4514. }
  4515. /*
  4516. * send notification now
  4517. */
  4518. if (must_notify) pfm_ovfl_notify_user(ctx, ovfl_notify);
  4519. return;
  4520. sanity_check:
  4521. printk(KERN_ERR "perfmon: CPU%d overflow handler [%d] pmc0=0x%lx\n",
  4522. smp_processor_id(),
  4523. task ? task_pid_nr(task) : -1,
  4524. pmc0);
  4525. return;
  4526. stop_monitoring:
  4527. /*
  4528. * in SMP, zombie context is never restored but reclaimed in pfm_load_regs().
  4529. * Moreover, zombies are also reclaimed in pfm_save_regs(). Therefore we can
  4530. * come here as zombie only if the task is the current task. In which case, we
  4531. * can access the PMU hardware directly.
  4532. *
  4533. * Note that zombies do have PM_VALID set. So here we do the minimal.
  4534. *
  4535. * In case the context was zombified it could not be reclaimed at the time
  4536. * the monitoring program exited. At this point, the PMU reservation has been
  4537. * returned, the sampiing buffer has been freed. We must convert this call
  4538. * into a spurious interrupt. However, we must also avoid infinite overflows
  4539. * by stopping monitoring for this task. We can only come here for a per-task
  4540. * context. All we need to do is to stop monitoring using the psr bits which
  4541. * are always task private. By re-enabling secure montioring, we ensure that
  4542. * the monitored task will not be able to re-activate monitoring.
  4543. * The task will eventually be context switched out, at which point the context
  4544. * will be reclaimed (that includes releasing ownership of the PMU).
  4545. *
  4546. * So there might be a window of time where the number of per-task session is zero
  4547. * yet one PMU might have a owner and get at most one overflow interrupt for a zombie
  4548. * context. This is safe because if a per-task session comes in, it will push this one
  4549. * out and by the virtue on pfm_save_regs(), this one will disappear. If a system wide
  4550. * session is force on that CPU, given that we use task pinning, pfm_save_regs() will
  4551. * also push our zombie context out.
  4552. *
  4553. * Overall pretty hairy stuff....
  4554. */
  4555. DPRINT(("ctx is zombie for [%d], converted to spurious\n", task ? task_pid_nr(task): -1));
  4556. pfm_clear_psr_up();
  4557. ia64_psr(regs)->up = 0;
  4558. ia64_psr(regs)->sp = 1;
  4559. return;
  4560. }
  4561. static int
  4562. pfm_do_interrupt_handler(void *arg, struct pt_regs *regs)
  4563. {
  4564. struct task_struct *task;
  4565. pfm_context_t *ctx;
  4566. unsigned long flags;
  4567. u64 pmc0;
  4568. int this_cpu = smp_processor_id();
  4569. int retval = 0;
  4570. pfm_stats[this_cpu].pfm_ovfl_intr_count++;
  4571. /*
  4572. * srlz.d done before arriving here
  4573. */
  4574. pmc0 = ia64_get_pmc(0);
  4575. task = GET_PMU_OWNER();
  4576. ctx = GET_PMU_CTX();
  4577. /*
  4578. * if we have some pending bits set
  4579. * assumes : if any PMC0.bit[63-1] is set, then PMC0.fr = 1
  4580. */
  4581. if (PMC0_HAS_OVFL(pmc0) && task) {
  4582. /*
  4583. * we assume that pmc0.fr is always set here
  4584. */
  4585. /* sanity check */
  4586. if (!ctx) goto report_spurious1;
  4587. if (ctx->ctx_fl_system == 0 && (task->thread.flags & IA64_THREAD_PM_VALID) == 0)
  4588. goto report_spurious2;
  4589. PROTECT_CTX_NOPRINT(ctx, flags);
  4590. pfm_overflow_handler(task, ctx, pmc0, regs);
  4591. UNPROTECT_CTX_NOPRINT(ctx, flags);
  4592. } else {
  4593. pfm_stats[this_cpu].pfm_spurious_ovfl_intr_count++;
  4594. retval = -1;
  4595. }
  4596. /*
  4597. * keep it unfrozen at all times
  4598. */
  4599. pfm_unfreeze_pmu();
  4600. return retval;
  4601. report_spurious1:
  4602. printk(KERN_INFO "perfmon: spurious overflow interrupt on CPU%d: process %d has no PFM context\n",
  4603. this_cpu, task_pid_nr(task));
  4604. pfm_unfreeze_pmu();
  4605. return -1;
  4606. report_spurious2:
  4607. printk(KERN_INFO "perfmon: spurious overflow interrupt on CPU%d: process %d, invalid flag\n",
  4608. this_cpu,
  4609. task_pid_nr(task));
  4610. pfm_unfreeze_pmu();
  4611. return -1;
  4612. }
  4613. static irqreturn_t
  4614. pfm_interrupt_handler(int irq, void *arg)
  4615. {
  4616. unsigned long start_cycles, total_cycles;
  4617. unsigned long min, max;
  4618. int this_cpu;
  4619. int ret;
  4620. struct pt_regs *regs = get_irq_regs();
  4621. this_cpu = get_cpu();
  4622. if (likely(!pfm_alt_intr_handler)) {
  4623. min = pfm_stats[this_cpu].pfm_ovfl_intr_cycles_min;
  4624. max = pfm_stats[this_cpu].pfm_ovfl_intr_cycles_max;
  4625. start_cycles = ia64_get_itc();
  4626. ret = pfm_do_interrupt_handler(arg, regs);
  4627. total_cycles = ia64_get_itc();
  4628. /*
  4629. * don't measure spurious interrupts
  4630. */
  4631. if (likely(ret == 0)) {
  4632. total_cycles -= start_cycles;
  4633. if (total_cycles < min) pfm_stats[this_cpu].pfm_ovfl_intr_cycles_min = total_cycles;
  4634. if (total_cycles > max) pfm_stats[this_cpu].pfm_ovfl_intr_cycles_max = total_cycles;
  4635. pfm_stats[this_cpu].pfm_ovfl_intr_cycles += total_cycles;
  4636. }
  4637. }
  4638. else {
  4639. (*pfm_alt_intr_handler->handler)(irq, arg, regs);
  4640. }
  4641. put_cpu();
  4642. return IRQ_HANDLED;
  4643. }
  4644. /*
  4645. * /proc/perfmon interface, for debug only
  4646. */
  4647. #define PFM_PROC_SHOW_HEADER ((void *)(long)nr_cpu_ids+1)
  4648. static void *
  4649. pfm_proc_start(struct seq_file *m, loff_t *pos)
  4650. {
  4651. if (*pos == 0) {
  4652. return PFM_PROC_SHOW_HEADER;
  4653. }
  4654. while (*pos <= nr_cpu_ids) {
  4655. if (cpu_online(*pos - 1)) {
  4656. return (void *)*pos;
  4657. }
  4658. ++*pos;
  4659. }
  4660. return NULL;
  4661. }
  4662. static void *
  4663. pfm_proc_next(struct seq_file *m, void *v, loff_t *pos)
  4664. {
  4665. ++*pos;
  4666. return pfm_proc_start(m, pos);
  4667. }
  4668. static void
  4669. pfm_proc_stop(struct seq_file *m, void *v)
  4670. {
  4671. }
  4672. static void
  4673. pfm_proc_show_header(struct seq_file *m)
  4674. {
  4675. struct list_head * pos;
  4676. pfm_buffer_fmt_t * entry;
  4677. unsigned long flags;
  4678. seq_printf(m,
  4679. "perfmon version : %u.%u\n"
  4680. "model : %s\n"
  4681. "fastctxsw : %s\n"
  4682. "expert mode : %s\n"
  4683. "ovfl_mask : 0x%lx\n"
  4684. "PMU flags : 0x%x\n",
  4685. PFM_VERSION_MAJ, PFM_VERSION_MIN,
  4686. pmu_conf->pmu_name,
  4687. pfm_sysctl.fastctxsw > 0 ? "Yes": "No",
  4688. pfm_sysctl.expert_mode > 0 ? "Yes": "No",
  4689. pmu_conf->ovfl_val,
  4690. pmu_conf->flags);
  4691. LOCK_PFS(flags);
  4692. seq_printf(m,
  4693. "proc_sessions : %u\n"
  4694. "sys_sessions : %u\n"
  4695. "sys_use_dbregs : %u\n"
  4696. "ptrace_use_dbregs : %u\n",
  4697. pfm_sessions.pfs_task_sessions,
  4698. pfm_sessions.pfs_sys_sessions,
  4699. pfm_sessions.pfs_sys_use_dbregs,
  4700. pfm_sessions.pfs_ptrace_use_dbregs);
  4701. UNLOCK_PFS(flags);
  4702. spin_lock(&pfm_buffer_fmt_lock);
  4703. list_for_each(pos, &pfm_buffer_fmt_list) {
  4704. entry = list_entry(pos, pfm_buffer_fmt_t, fmt_list);
  4705. seq_printf(m, "format : %16phD %s\n",
  4706. entry->fmt_uuid, entry->fmt_name);
  4707. }
  4708. spin_unlock(&pfm_buffer_fmt_lock);
  4709. }
  4710. static int
  4711. pfm_proc_show(struct seq_file *m, void *v)
  4712. {
  4713. unsigned long psr;
  4714. unsigned int i;
  4715. int cpu;
  4716. if (v == PFM_PROC_SHOW_HEADER) {
  4717. pfm_proc_show_header(m);
  4718. return 0;
  4719. }
  4720. /* show info for CPU (v - 1) */
  4721. cpu = (long)v - 1;
  4722. seq_printf(m,
  4723. "CPU%-2d overflow intrs : %lu\n"
  4724. "CPU%-2d overflow cycles : %lu\n"
  4725. "CPU%-2d overflow min : %lu\n"
  4726. "CPU%-2d overflow max : %lu\n"
  4727. "CPU%-2d smpl handler calls : %lu\n"
  4728. "CPU%-2d smpl handler cycles : %lu\n"
  4729. "CPU%-2d spurious intrs : %lu\n"
  4730. "CPU%-2d replay intrs : %lu\n"
  4731. "CPU%-2d syst_wide : %d\n"
  4732. "CPU%-2d dcr_pp : %d\n"
  4733. "CPU%-2d exclude idle : %d\n"
  4734. "CPU%-2d owner : %d\n"
  4735. "CPU%-2d context : %p\n"
  4736. "CPU%-2d activations : %lu\n",
  4737. cpu, pfm_stats[cpu].pfm_ovfl_intr_count,
  4738. cpu, pfm_stats[cpu].pfm_ovfl_intr_cycles,
  4739. cpu, pfm_stats[cpu].pfm_ovfl_intr_cycles_min,
  4740. cpu, pfm_stats[cpu].pfm_ovfl_intr_cycles_max,
  4741. cpu, pfm_stats[cpu].pfm_smpl_handler_calls,
  4742. cpu, pfm_stats[cpu].pfm_smpl_handler_cycles,
  4743. cpu, pfm_stats[cpu].pfm_spurious_ovfl_intr_count,
  4744. cpu, pfm_stats[cpu].pfm_replay_ovfl_intr_count,
  4745. cpu, pfm_get_cpu_data(pfm_syst_info, cpu) & PFM_CPUINFO_SYST_WIDE ? 1 : 0,
  4746. cpu, pfm_get_cpu_data(pfm_syst_info, cpu) & PFM_CPUINFO_DCR_PP ? 1 : 0,
  4747. cpu, pfm_get_cpu_data(pfm_syst_info, cpu) & PFM_CPUINFO_EXCL_IDLE ? 1 : 0,
  4748. cpu, pfm_get_cpu_data(pmu_owner, cpu) ? pfm_get_cpu_data(pmu_owner, cpu)->pid: -1,
  4749. cpu, pfm_get_cpu_data(pmu_ctx, cpu),
  4750. cpu, pfm_get_cpu_data(pmu_activation_number, cpu));
  4751. if (num_online_cpus() == 1 && pfm_sysctl.debug > 0) {
  4752. psr = pfm_get_psr();
  4753. ia64_srlz_d();
  4754. seq_printf(m,
  4755. "CPU%-2d psr : 0x%lx\n"
  4756. "CPU%-2d pmc0 : 0x%lx\n",
  4757. cpu, psr,
  4758. cpu, ia64_get_pmc(0));
  4759. for (i=0; PMC_IS_LAST(i) == 0; i++) {
  4760. if (PMC_IS_COUNTING(i) == 0) continue;
  4761. seq_printf(m,
  4762. "CPU%-2d pmc%u : 0x%lx\n"
  4763. "CPU%-2d pmd%u : 0x%lx\n",
  4764. cpu, i, ia64_get_pmc(i),
  4765. cpu, i, ia64_get_pmd(i));
  4766. }
  4767. }
  4768. return 0;
  4769. }
  4770. const struct seq_operations pfm_seq_ops = {
  4771. .start = pfm_proc_start,
  4772. .next = pfm_proc_next,
  4773. .stop = pfm_proc_stop,
  4774. .show = pfm_proc_show
  4775. };
  4776. static int
  4777. pfm_proc_open(struct inode *inode, struct file *file)
  4778. {
  4779. return seq_open(file, &pfm_seq_ops);
  4780. }
  4781. /*
  4782. * we come here as soon as local_cpu_data->pfm_syst_wide is set. this happens
  4783. * during pfm_enable() hence before pfm_start(). We cannot assume monitoring
  4784. * is active or inactive based on mode. We must rely on the value in
  4785. * local_cpu_data->pfm_syst_info
  4786. */
  4787. void
  4788. pfm_syst_wide_update_task(struct task_struct *task, unsigned long info, int is_ctxswin)
  4789. {
  4790. struct pt_regs *regs;
  4791. unsigned long dcr;
  4792. unsigned long dcr_pp;
  4793. dcr_pp = info & PFM_CPUINFO_DCR_PP ? 1 : 0;
  4794. /*
  4795. * pid 0 is guaranteed to be the idle task. There is one such task with pid 0
  4796. * on every CPU, so we can rely on the pid to identify the idle task.
  4797. */
  4798. if ((info & PFM_CPUINFO_EXCL_IDLE) == 0 || task->pid) {
  4799. regs = task_pt_regs(task);
  4800. ia64_psr(regs)->pp = is_ctxswin ? dcr_pp : 0;
  4801. return;
  4802. }
  4803. /*
  4804. * if monitoring has started
  4805. */
  4806. if (dcr_pp) {
  4807. dcr = ia64_getreg(_IA64_REG_CR_DCR);
  4808. /*
  4809. * context switching in?
  4810. */
  4811. if (is_ctxswin) {
  4812. /* mask monitoring for the idle task */
  4813. ia64_setreg(_IA64_REG_CR_DCR, dcr & ~IA64_DCR_PP);
  4814. pfm_clear_psr_pp();
  4815. ia64_srlz_i();
  4816. return;
  4817. }
  4818. /*
  4819. * context switching out
  4820. * restore monitoring for next task
  4821. *
  4822. * Due to inlining this odd if-then-else construction generates
  4823. * better code.
  4824. */
  4825. ia64_setreg(_IA64_REG_CR_DCR, dcr |IA64_DCR_PP);
  4826. pfm_set_psr_pp();
  4827. ia64_srlz_i();
  4828. }
  4829. }
  4830. #ifdef CONFIG_SMP
  4831. static void
  4832. pfm_force_cleanup(pfm_context_t *ctx, struct pt_regs *regs)
  4833. {
  4834. struct task_struct *task = ctx->ctx_task;
  4835. ia64_psr(regs)->up = 0;
  4836. ia64_psr(regs)->sp = 1;
  4837. if (GET_PMU_OWNER() == task) {
  4838. DPRINT(("cleared ownership for [%d]\n",
  4839. task_pid_nr(ctx->ctx_task)));
  4840. SET_PMU_OWNER(NULL, NULL);
  4841. }
  4842. /*
  4843. * disconnect the task from the context and vice-versa
  4844. */
  4845. PFM_SET_WORK_PENDING(task, 0);
  4846. task->thread.pfm_context = NULL;
  4847. task->thread.flags &= ~IA64_THREAD_PM_VALID;
  4848. DPRINT(("force cleanup for [%d]\n", task_pid_nr(task)));
  4849. }
  4850. /*
  4851. * in 2.6, interrupts are masked when we come here and the runqueue lock is held
  4852. */
  4853. void
  4854. pfm_save_regs(struct task_struct *task)
  4855. {
  4856. pfm_context_t *ctx;
  4857. unsigned long flags;
  4858. u64 psr;
  4859. ctx = PFM_GET_CTX(task);
  4860. if (ctx == NULL) return;
  4861. /*
  4862. * we always come here with interrupts ALREADY disabled by
  4863. * the scheduler. So we simply need to protect against concurrent
  4864. * access, not CPU concurrency.
  4865. */
  4866. flags = pfm_protect_ctx_ctxsw(ctx);
  4867. if (ctx->ctx_state == PFM_CTX_ZOMBIE) {
  4868. struct pt_regs *regs = task_pt_regs(task);
  4869. pfm_clear_psr_up();
  4870. pfm_force_cleanup(ctx, regs);
  4871. BUG_ON(ctx->ctx_smpl_hdr);
  4872. pfm_unprotect_ctx_ctxsw(ctx, flags);
  4873. pfm_context_free(ctx);
  4874. return;
  4875. }
  4876. /*
  4877. * save current PSR: needed because we modify it
  4878. */
  4879. ia64_srlz_d();
  4880. psr = pfm_get_psr();
  4881. BUG_ON(psr & (IA64_PSR_I));
  4882. /*
  4883. * stop monitoring:
  4884. * This is the last instruction which may generate an overflow
  4885. *
  4886. * We do not need to set psr.sp because, it is irrelevant in kernel.
  4887. * It will be restored from ipsr when going back to user level
  4888. */
  4889. pfm_clear_psr_up();
  4890. /*
  4891. * keep a copy of psr.up (for reload)
  4892. */
  4893. ctx->ctx_saved_psr_up = psr & IA64_PSR_UP;
  4894. /*
  4895. * release ownership of this PMU.
  4896. * PM interrupts are masked, so nothing
  4897. * can happen.
  4898. */
  4899. SET_PMU_OWNER(NULL, NULL);
  4900. /*
  4901. * we systematically save the PMD as we have no
  4902. * guarantee we will be schedule at that same
  4903. * CPU again.
  4904. */
  4905. pfm_save_pmds(ctx->th_pmds, ctx->ctx_used_pmds[0]);
  4906. /*
  4907. * save pmc0 ia64_srlz_d() done in pfm_save_pmds()
  4908. * we will need it on the restore path to check
  4909. * for pending overflow.
  4910. */
  4911. ctx->th_pmcs[0] = ia64_get_pmc(0);
  4912. /*
  4913. * unfreeze PMU if had pending overflows
  4914. */
  4915. if (ctx->th_pmcs[0] & ~0x1UL) pfm_unfreeze_pmu();
  4916. /*
  4917. * finally, allow context access.
  4918. * interrupts will still be masked after this call.
  4919. */
  4920. pfm_unprotect_ctx_ctxsw(ctx, flags);
  4921. }
  4922. #else /* !CONFIG_SMP */
  4923. void
  4924. pfm_save_regs(struct task_struct *task)
  4925. {
  4926. pfm_context_t *ctx;
  4927. u64 psr;
  4928. ctx = PFM_GET_CTX(task);
  4929. if (ctx == NULL) return;
  4930. /*
  4931. * save current PSR: needed because we modify it
  4932. */
  4933. psr = pfm_get_psr();
  4934. BUG_ON(psr & (IA64_PSR_I));
  4935. /*
  4936. * stop monitoring:
  4937. * This is the last instruction which may generate an overflow
  4938. *
  4939. * We do not need to set psr.sp because, it is irrelevant in kernel.
  4940. * It will be restored from ipsr when going back to user level
  4941. */
  4942. pfm_clear_psr_up();
  4943. /*
  4944. * keep a copy of psr.up (for reload)
  4945. */
  4946. ctx->ctx_saved_psr_up = psr & IA64_PSR_UP;
  4947. }
  4948. static void
  4949. pfm_lazy_save_regs (struct task_struct *task)
  4950. {
  4951. pfm_context_t *ctx;
  4952. unsigned long flags;
  4953. { u64 psr = pfm_get_psr();
  4954. BUG_ON(psr & IA64_PSR_UP);
  4955. }
  4956. ctx = PFM_GET_CTX(task);
  4957. /*
  4958. * we need to mask PMU overflow here to
  4959. * make sure that we maintain pmc0 until
  4960. * we save it. overflow interrupts are
  4961. * treated as spurious if there is no
  4962. * owner.
  4963. *
  4964. * XXX: I don't think this is necessary
  4965. */
  4966. PROTECT_CTX(ctx,flags);
  4967. /*
  4968. * release ownership of this PMU.
  4969. * must be done before we save the registers.
  4970. *
  4971. * after this call any PMU interrupt is treated
  4972. * as spurious.
  4973. */
  4974. SET_PMU_OWNER(NULL, NULL);
  4975. /*
  4976. * save all the pmds we use
  4977. */
  4978. pfm_save_pmds(ctx->th_pmds, ctx->ctx_used_pmds[0]);
  4979. /*
  4980. * save pmc0 ia64_srlz_d() done in pfm_save_pmds()
  4981. * it is needed to check for pended overflow
  4982. * on the restore path
  4983. */
  4984. ctx->th_pmcs[0] = ia64_get_pmc(0);
  4985. /*
  4986. * unfreeze PMU if had pending overflows
  4987. */
  4988. if (ctx->th_pmcs[0] & ~0x1UL) pfm_unfreeze_pmu();
  4989. /*
  4990. * now get can unmask PMU interrupts, they will
  4991. * be treated as purely spurious and we will not
  4992. * lose any information
  4993. */
  4994. UNPROTECT_CTX(ctx,flags);
  4995. }
  4996. #endif /* CONFIG_SMP */
  4997. #ifdef CONFIG_SMP
  4998. /*
  4999. * in 2.6, interrupts are masked when we come here and the runqueue lock is held
  5000. */
  5001. void
  5002. pfm_load_regs (struct task_struct *task)
  5003. {
  5004. pfm_context_t *ctx;
  5005. unsigned long pmc_mask = 0UL, pmd_mask = 0UL;
  5006. unsigned long flags;
  5007. u64 psr, psr_up;
  5008. int need_irq_resend;
  5009. ctx = PFM_GET_CTX(task);
  5010. if (unlikely(ctx == NULL)) return;
  5011. BUG_ON(GET_PMU_OWNER());
  5012. /*
  5013. * possible on unload
  5014. */
  5015. if (unlikely((task->thread.flags & IA64_THREAD_PM_VALID) == 0)) return;
  5016. /*
  5017. * we always come here with interrupts ALREADY disabled by
  5018. * the scheduler. So we simply need to protect against concurrent
  5019. * access, not CPU concurrency.
  5020. */
  5021. flags = pfm_protect_ctx_ctxsw(ctx);
  5022. psr = pfm_get_psr();
  5023. need_irq_resend = pmu_conf->flags & PFM_PMU_IRQ_RESEND;
  5024. BUG_ON(psr & (IA64_PSR_UP|IA64_PSR_PP));
  5025. BUG_ON(psr & IA64_PSR_I);
  5026. if (unlikely(ctx->ctx_state == PFM_CTX_ZOMBIE)) {
  5027. struct pt_regs *regs = task_pt_regs(task);
  5028. BUG_ON(ctx->ctx_smpl_hdr);
  5029. pfm_force_cleanup(ctx, regs);
  5030. pfm_unprotect_ctx_ctxsw(ctx, flags);
  5031. /*
  5032. * this one (kmalloc'ed) is fine with interrupts disabled
  5033. */
  5034. pfm_context_free(ctx);
  5035. return;
  5036. }
  5037. /*
  5038. * we restore ALL the debug registers to avoid picking up
  5039. * stale state.
  5040. */
  5041. if (ctx->ctx_fl_using_dbreg) {
  5042. pfm_restore_ibrs(ctx->ctx_ibrs, pmu_conf->num_ibrs);
  5043. pfm_restore_dbrs(ctx->ctx_dbrs, pmu_conf->num_dbrs);
  5044. }
  5045. /*
  5046. * retrieve saved psr.up
  5047. */
  5048. psr_up = ctx->ctx_saved_psr_up;
  5049. /*
  5050. * if we were the last user of the PMU on that CPU,
  5051. * then nothing to do except restore psr
  5052. */
  5053. if (GET_LAST_CPU(ctx) == smp_processor_id() && ctx->ctx_last_activation == GET_ACTIVATION()) {
  5054. /*
  5055. * retrieve partial reload masks (due to user modifications)
  5056. */
  5057. pmc_mask = ctx->ctx_reload_pmcs[0];
  5058. pmd_mask = ctx->ctx_reload_pmds[0];
  5059. } else {
  5060. /*
  5061. * To avoid leaking information to the user level when psr.sp=0,
  5062. * we must reload ALL implemented pmds (even the ones we don't use).
  5063. * In the kernel we only allow PFM_READ_PMDS on registers which
  5064. * we initialized or requested (sampling) so there is no risk there.
  5065. */
  5066. pmd_mask = pfm_sysctl.fastctxsw ? ctx->ctx_used_pmds[0] : ctx->ctx_all_pmds[0];
  5067. /*
  5068. * ALL accessible PMCs are systematically reloaded, unused registers
  5069. * get their default (from pfm_reset_pmu_state()) values to avoid picking
  5070. * up stale configuration.
  5071. *
  5072. * PMC0 is never in the mask. It is always restored separately.
  5073. */
  5074. pmc_mask = ctx->ctx_all_pmcs[0];
  5075. }
  5076. /*
  5077. * when context is MASKED, we will restore PMC with plm=0
  5078. * and PMD with stale information, but that's ok, nothing
  5079. * will be captured.
  5080. *
  5081. * XXX: optimize here
  5082. */
  5083. if (pmd_mask) pfm_restore_pmds(ctx->th_pmds, pmd_mask);
  5084. if (pmc_mask) pfm_restore_pmcs(ctx->th_pmcs, pmc_mask);
  5085. /*
  5086. * check for pending overflow at the time the state
  5087. * was saved.
  5088. */
  5089. if (unlikely(PMC0_HAS_OVFL(ctx->th_pmcs[0]))) {
  5090. /*
  5091. * reload pmc0 with the overflow information
  5092. * On McKinley PMU, this will trigger a PMU interrupt
  5093. */
  5094. ia64_set_pmc(0, ctx->th_pmcs[0]);
  5095. ia64_srlz_d();
  5096. ctx->th_pmcs[0] = 0UL;
  5097. /*
  5098. * will replay the PMU interrupt
  5099. */
  5100. if (need_irq_resend) ia64_resend_irq(IA64_PERFMON_VECTOR);
  5101. pfm_stats[smp_processor_id()].pfm_replay_ovfl_intr_count++;
  5102. }
  5103. /*
  5104. * we just did a reload, so we reset the partial reload fields
  5105. */
  5106. ctx->ctx_reload_pmcs[0] = 0UL;
  5107. ctx->ctx_reload_pmds[0] = 0UL;
  5108. SET_LAST_CPU(ctx, smp_processor_id());
  5109. /*
  5110. * dump activation value for this PMU
  5111. */
  5112. INC_ACTIVATION();
  5113. /*
  5114. * record current activation for this context
  5115. */
  5116. SET_ACTIVATION(ctx);
  5117. /*
  5118. * establish new ownership.
  5119. */
  5120. SET_PMU_OWNER(task, ctx);
  5121. /*
  5122. * restore the psr.up bit. measurement
  5123. * is active again.
  5124. * no PMU interrupt can happen at this point
  5125. * because we still have interrupts disabled.
  5126. */
  5127. if (likely(psr_up)) pfm_set_psr_up();
  5128. /*
  5129. * allow concurrent access to context
  5130. */
  5131. pfm_unprotect_ctx_ctxsw(ctx, flags);
  5132. }
  5133. #else /* !CONFIG_SMP */
  5134. /*
  5135. * reload PMU state for UP kernels
  5136. * in 2.5 we come here with interrupts disabled
  5137. */
  5138. void
  5139. pfm_load_regs (struct task_struct *task)
  5140. {
  5141. pfm_context_t *ctx;
  5142. struct task_struct *owner;
  5143. unsigned long pmd_mask, pmc_mask;
  5144. u64 psr, psr_up;
  5145. int need_irq_resend;
  5146. owner = GET_PMU_OWNER();
  5147. ctx = PFM_GET_CTX(task);
  5148. psr = pfm_get_psr();
  5149. BUG_ON(psr & (IA64_PSR_UP|IA64_PSR_PP));
  5150. BUG_ON(psr & IA64_PSR_I);
  5151. /*
  5152. * we restore ALL the debug registers to avoid picking up
  5153. * stale state.
  5154. *
  5155. * This must be done even when the task is still the owner
  5156. * as the registers may have been modified via ptrace()
  5157. * (not perfmon) by the previous task.
  5158. */
  5159. if (ctx->ctx_fl_using_dbreg) {
  5160. pfm_restore_ibrs(ctx->ctx_ibrs, pmu_conf->num_ibrs);
  5161. pfm_restore_dbrs(ctx->ctx_dbrs, pmu_conf->num_dbrs);
  5162. }
  5163. /*
  5164. * retrieved saved psr.up
  5165. */
  5166. psr_up = ctx->ctx_saved_psr_up;
  5167. need_irq_resend = pmu_conf->flags & PFM_PMU_IRQ_RESEND;
  5168. /*
  5169. * short path, our state is still there, just
  5170. * need to restore psr and we go
  5171. *
  5172. * we do not touch either PMC nor PMD. the psr is not touched
  5173. * by the overflow_handler. So we are safe w.r.t. to interrupt
  5174. * concurrency even without interrupt masking.
  5175. */
  5176. if (likely(owner == task)) {
  5177. if (likely(psr_up)) pfm_set_psr_up();
  5178. return;
  5179. }
  5180. /*
  5181. * someone else is still using the PMU, first push it out and
  5182. * then we'll be able to install our stuff !
  5183. *
  5184. * Upon return, there will be no owner for the current PMU
  5185. */
  5186. if (owner) pfm_lazy_save_regs(owner);
  5187. /*
  5188. * To avoid leaking information to the user level when psr.sp=0,
  5189. * we must reload ALL implemented pmds (even the ones we don't use).
  5190. * In the kernel we only allow PFM_READ_PMDS on registers which
  5191. * we initialized or requested (sampling) so there is no risk there.
  5192. */
  5193. pmd_mask = pfm_sysctl.fastctxsw ? ctx->ctx_used_pmds[0] : ctx->ctx_all_pmds[0];
  5194. /*
  5195. * ALL accessible PMCs are systematically reloaded, unused registers
  5196. * get their default (from pfm_reset_pmu_state()) values to avoid picking
  5197. * up stale configuration.
  5198. *
  5199. * PMC0 is never in the mask. It is always restored separately
  5200. */
  5201. pmc_mask = ctx->ctx_all_pmcs[0];
  5202. pfm_restore_pmds(ctx->th_pmds, pmd_mask);
  5203. pfm_restore_pmcs(ctx->th_pmcs, pmc_mask);
  5204. /*
  5205. * check for pending overflow at the time the state
  5206. * was saved.
  5207. */
  5208. if (unlikely(PMC0_HAS_OVFL(ctx->th_pmcs[0]))) {
  5209. /*
  5210. * reload pmc0 with the overflow information
  5211. * On McKinley PMU, this will trigger a PMU interrupt
  5212. */
  5213. ia64_set_pmc(0, ctx->th_pmcs[0]);
  5214. ia64_srlz_d();
  5215. ctx->th_pmcs[0] = 0UL;
  5216. /*
  5217. * will replay the PMU interrupt
  5218. */
  5219. if (need_irq_resend) ia64_resend_irq(IA64_PERFMON_VECTOR);
  5220. pfm_stats[smp_processor_id()].pfm_replay_ovfl_intr_count++;
  5221. }
  5222. /*
  5223. * establish new ownership.
  5224. */
  5225. SET_PMU_OWNER(task, ctx);
  5226. /*
  5227. * restore the psr.up bit. measurement
  5228. * is active again.
  5229. * no PMU interrupt can happen at this point
  5230. * because we still have interrupts disabled.
  5231. */
  5232. if (likely(psr_up)) pfm_set_psr_up();
  5233. }
  5234. #endif /* CONFIG_SMP */
  5235. /*
  5236. * this function assumes monitoring is stopped
  5237. */
  5238. static void
  5239. pfm_flush_pmds(struct task_struct *task, pfm_context_t *ctx)
  5240. {
  5241. u64 pmc0;
  5242. unsigned long mask2, val, pmd_val, ovfl_val;
  5243. int i, can_access_pmu = 0;
  5244. int is_self;
  5245. /*
  5246. * is the caller the task being monitored (or which initiated the
  5247. * session for system wide measurements)
  5248. */
  5249. is_self = ctx->ctx_task == task ? 1 : 0;
  5250. /*
  5251. * can access PMU is task is the owner of the PMU state on the current CPU
  5252. * or if we are running on the CPU bound to the context in system-wide mode
  5253. * (that is not necessarily the task the context is attached to in this mode).
  5254. * In system-wide we always have can_access_pmu true because a task running on an
  5255. * invalid processor is flagged earlier in the call stack (see pfm_stop).
  5256. */
  5257. can_access_pmu = (GET_PMU_OWNER() == task) || (ctx->ctx_fl_system && ctx->ctx_cpu == smp_processor_id());
  5258. if (can_access_pmu) {
  5259. /*
  5260. * Mark the PMU as not owned
  5261. * This will cause the interrupt handler to do nothing in case an overflow
  5262. * interrupt was in-flight
  5263. * This also guarantees that pmc0 will contain the final state
  5264. * It virtually gives us full control on overflow processing from that point
  5265. * on.
  5266. */
  5267. SET_PMU_OWNER(NULL, NULL);
  5268. DPRINT(("releasing ownership\n"));
  5269. /*
  5270. * read current overflow status:
  5271. *
  5272. * we are guaranteed to read the final stable state
  5273. */
  5274. ia64_srlz_d();
  5275. pmc0 = ia64_get_pmc(0); /* slow */
  5276. /*
  5277. * reset freeze bit, overflow status information destroyed
  5278. */
  5279. pfm_unfreeze_pmu();
  5280. } else {
  5281. pmc0 = ctx->th_pmcs[0];
  5282. /*
  5283. * clear whatever overflow status bits there were
  5284. */
  5285. ctx->th_pmcs[0] = 0;
  5286. }
  5287. ovfl_val = pmu_conf->ovfl_val;
  5288. /*
  5289. * we save all the used pmds
  5290. * we take care of overflows for counting PMDs
  5291. *
  5292. * XXX: sampling situation is not taken into account here
  5293. */
  5294. mask2 = ctx->ctx_used_pmds[0];
  5295. DPRINT(("is_self=%d ovfl_val=0x%lx mask2=0x%lx\n", is_self, ovfl_val, mask2));
  5296. for (i = 0; mask2; i++, mask2>>=1) {
  5297. /* skip non used pmds */
  5298. if ((mask2 & 0x1) == 0) continue;
  5299. /*
  5300. * can access PMU always true in system wide mode
  5301. */
  5302. val = pmd_val = can_access_pmu ? ia64_get_pmd(i) : ctx->th_pmds[i];
  5303. if (PMD_IS_COUNTING(i)) {
  5304. DPRINT(("[%d] pmd[%d] ctx_pmd=0x%lx hw_pmd=0x%lx\n",
  5305. task_pid_nr(task),
  5306. i,
  5307. ctx->ctx_pmds[i].val,
  5308. val & ovfl_val));
  5309. /*
  5310. * we rebuild the full 64 bit value of the counter
  5311. */
  5312. val = ctx->ctx_pmds[i].val + (val & ovfl_val);
  5313. /*
  5314. * now everything is in ctx_pmds[] and we need
  5315. * to clear the saved context from save_regs() such that
  5316. * pfm_read_pmds() gets the correct value
  5317. */
  5318. pmd_val = 0UL;
  5319. /*
  5320. * take care of overflow inline
  5321. */
  5322. if (pmc0 & (1UL << i)) {
  5323. val += 1 + ovfl_val;
  5324. DPRINT(("[%d] pmd[%d] overflowed\n", task_pid_nr(task), i));
  5325. }
  5326. }
  5327. DPRINT(("[%d] ctx_pmd[%d]=0x%lx pmd_val=0x%lx\n", task_pid_nr(task), i, val, pmd_val));
  5328. if (is_self) ctx->th_pmds[i] = pmd_val;
  5329. ctx->ctx_pmds[i].val = val;
  5330. }
  5331. }
  5332. static struct irqaction perfmon_irqaction = {
  5333. .handler = pfm_interrupt_handler,
  5334. .name = "perfmon"
  5335. };
  5336. static void
  5337. pfm_alt_save_pmu_state(void *data)
  5338. {
  5339. struct pt_regs *regs;
  5340. regs = task_pt_regs(current);
  5341. DPRINT(("called\n"));
  5342. /*
  5343. * should not be necessary but
  5344. * let's take not risk
  5345. */
  5346. pfm_clear_psr_up();
  5347. pfm_clear_psr_pp();
  5348. ia64_psr(regs)->pp = 0;
  5349. /*
  5350. * This call is required
  5351. * May cause a spurious interrupt on some processors
  5352. */
  5353. pfm_freeze_pmu();
  5354. ia64_srlz_d();
  5355. }
  5356. void
  5357. pfm_alt_restore_pmu_state(void *data)
  5358. {
  5359. struct pt_regs *regs;
  5360. regs = task_pt_regs(current);
  5361. DPRINT(("called\n"));
  5362. /*
  5363. * put PMU back in state expected
  5364. * by perfmon
  5365. */
  5366. pfm_clear_psr_up();
  5367. pfm_clear_psr_pp();
  5368. ia64_psr(regs)->pp = 0;
  5369. /*
  5370. * perfmon runs with PMU unfrozen at all times
  5371. */
  5372. pfm_unfreeze_pmu();
  5373. ia64_srlz_d();
  5374. }
  5375. int
  5376. pfm_install_alt_pmu_interrupt(pfm_intr_handler_desc_t *hdl)
  5377. {
  5378. int ret, i;
  5379. int reserve_cpu;
  5380. /* some sanity checks */
  5381. if (hdl == NULL || hdl->handler == NULL) return -EINVAL;
  5382. /* do the easy test first */
  5383. if (pfm_alt_intr_handler) return -EBUSY;
  5384. /* one at a time in the install or remove, just fail the others */
  5385. if (!spin_trylock(&pfm_alt_install_check)) {
  5386. return -EBUSY;
  5387. }
  5388. /* reserve our session */
  5389. for_each_online_cpu(reserve_cpu) {
  5390. ret = pfm_reserve_session(NULL, 1, reserve_cpu);
  5391. if (ret) goto cleanup_reserve;
  5392. }
  5393. /* save the current system wide pmu states */
  5394. ret = on_each_cpu(pfm_alt_save_pmu_state, NULL, 1);
  5395. if (ret) {
  5396. DPRINT(("on_each_cpu() failed: %d\n", ret));
  5397. goto cleanup_reserve;
  5398. }
  5399. /* officially change to the alternate interrupt handler */
  5400. pfm_alt_intr_handler = hdl;
  5401. spin_unlock(&pfm_alt_install_check);
  5402. return 0;
  5403. cleanup_reserve:
  5404. for_each_online_cpu(i) {
  5405. /* don't unreserve more than we reserved */
  5406. if (i >= reserve_cpu) break;
  5407. pfm_unreserve_session(NULL, 1, i);
  5408. }
  5409. spin_unlock(&pfm_alt_install_check);
  5410. return ret;
  5411. }
  5412. EXPORT_SYMBOL_GPL(pfm_install_alt_pmu_interrupt);
  5413. int
  5414. pfm_remove_alt_pmu_interrupt(pfm_intr_handler_desc_t *hdl)
  5415. {
  5416. int i;
  5417. int ret;
  5418. if (hdl == NULL) return -EINVAL;
  5419. /* cannot remove someone else's handler! */
  5420. if (pfm_alt_intr_handler != hdl) return -EINVAL;
  5421. /* one at a time in the install or remove, just fail the others */
  5422. if (!spin_trylock(&pfm_alt_install_check)) {
  5423. return -EBUSY;
  5424. }
  5425. pfm_alt_intr_handler = NULL;
  5426. ret = on_each_cpu(pfm_alt_restore_pmu_state, NULL, 1);
  5427. if (ret) {
  5428. DPRINT(("on_each_cpu() failed: %d\n", ret));
  5429. }
  5430. for_each_online_cpu(i) {
  5431. pfm_unreserve_session(NULL, 1, i);
  5432. }
  5433. spin_unlock(&pfm_alt_install_check);
  5434. return 0;
  5435. }
  5436. EXPORT_SYMBOL_GPL(pfm_remove_alt_pmu_interrupt);
  5437. /*
  5438. * perfmon initialization routine, called from the initcall() table
  5439. */
  5440. static int init_pfm_fs(void);
  5441. static int __init
  5442. pfm_probe_pmu(void)
  5443. {
  5444. pmu_config_t **p;
  5445. int family;
  5446. family = local_cpu_data->family;
  5447. p = pmu_confs;
  5448. while(*p) {
  5449. if ((*p)->probe) {
  5450. if ((*p)->probe() == 0) goto found;
  5451. } else if ((*p)->pmu_family == family || (*p)->pmu_family == 0xff) {
  5452. goto found;
  5453. }
  5454. p++;
  5455. }
  5456. return -1;
  5457. found:
  5458. pmu_conf = *p;
  5459. return 0;
  5460. }
  5461. static const struct file_operations pfm_proc_fops = {
  5462. .open = pfm_proc_open,
  5463. .read = seq_read,
  5464. .llseek = seq_lseek,
  5465. .release = seq_release,
  5466. };
  5467. int __init
  5468. pfm_init(void)
  5469. {
  5470. unsigned int n, n_counters, i;
  5471. printk("perfmon: version %u.%u IRQ %u\n",
  5472. PFM_VERSION_MAJ,
  5473. PFM_VERSION_MIN,
  5474. IA64_PERFMON_VECTOR);
  5475. if (pfm_probe_pmu()) {
  5476. printk(KERN_INFO "perfmon: disabled, there is no support for processor family %d\n",
  5477. local_cpu_data->family);
  5478. return -ENODEV;
  5479. }
  5480. /*
  5481. * compute the number of implemented PMD/PMC from the
  5482. * description tables
  5483. */
  5484. n = 0;
  5485. for (i=0; PMC_IS_LAST(i) == 0; i++) {
  5486. if (PMC_IS_IMPL(i) == 0) continue;
  5487. pmu_conf->impl_pmcs[i>>6] |= 1UL << (i&63);
  5488. n++;
  5489. }
  5490. pmu_conf->num_pmcs = n;
  5491. n = 0; n_counters = 0;
  5492. for (i=0; PMD_IS_LAST(i) == 0; i++) {
  5493. if (PMD_IS_IMPL(i) == 0) continue;
  5494. pmu_conf->impl_pmds[i>>6] |= 1UL << (i&63);
  5495. n++;
  5496. if (PMD_IS_COUNTING(i)) n_counters++;
  5497. }
  5498. pmu_conf->num_pmds = n;
  5499. pmu_conf->num_counters = n_counters;
  5500. /*
  5501. * sanity checks on the number of debug registers
  5502. */
  5503. if (pmu_conf->use_rr_dbregs) {
  5504. if (pmu_conf->num_ibrs > IA64_NUM_DBG_REGS) {
  5505. printk(KERN_INFO "perfmon: unsupported number of code debug registers (%u)\n", pmu_conf->num_ibrs);
  5506. pmu_conf = NULL;
  5507. return -1;
  5508. }
  5509. if (pmu_conf->num_dbrs > IA64_NUM_DBG_REGS) {
  5510. printk(KERN_INFO "perfmon: unsupported number of data debug registers (%u)\n", pmu_conf->num_ibrs);
  5511. pmu_conf = NULL;
  5512. return -1;
  5513. }
  5514. }
  5515. printk("perfmon: %s PMU detected, %u PMCs, %u PMDs, %u counters (%lu bits)\n",
  5516. pmu_conf->pmu_name,
  5517. pmu_conf->num_pmcs,
  5518. pmu_conf->num_pmds,
  5519. pmu_conf->num_counters,
  5520. ffz(pmu_conf->ovfl_val));
  5521. /* sanity check */
  5522. if (pmu_conf->num_pmds >= PFM_NUM_PMD_REGS || pmu_conf->num_pmcs >= PFM_NUM_PMC_REGS) {
  5523. printk(KERN_ERR "perfmon: not enough pmc/pmd, perfmon disabled\n");
  5524. pmu_conf = NULL;
  5525. return -1;
  5526. }
  5527. /*
  5528. * create /proc/perfmon (mostly for debugging purposes)
  5529. */
  5530. perfmon_dir = proc_create("perfmon", S_IRUGO, NULL, &pfm_proc_fops);
  5531. if (perfmon_dir == NULL) {
  5532. printk(KERN_ERR "perfmon: cannot create /proc entry, perfmon disabled\n");
  5533. pmu_conf = NULL;
  5534. return -1;
  5535. }
  5536. /*
  5537. * create /proc/sys/kernel/perfmon (for debugging purposes)
  5538. */
  5539. pfm_sysctl_header = register_sysctl_table(pfm_sysctl_root);
  5540. /*
  5541. * initialize all our spinlocks
  5542. */
  5543. spin_lock_init(&pfm_sessions.pfs_lock);
  5544. spin_lock_init(&pfm_buffer_fmt_lock);
  5545. init_pfm_fs();
  5546. for(i=0; i < NR_CPUS; i++) pfm_stats[i].pfm_ovfl_intr_cycles_min = ~0UL;
  5547. return 0;
  5548. }
  5549. __initcall(pfm_init);
  5550. /*
  5551. * this function is called before pfm_init()
  5552. */
  5553. void
  5554. pfm_init_percpu (void)
  5555. {
  5556. static int first_time=1;
  5557. /*
  5558. * make sure no measurement is active
  5559. * (may inherit programmed PMCs from EFI).
  5560. */
  5561. pfm_clear_psr_pp();
  5562. pfm_clear_psr_up();
  5563. /*
  5564. * we run with the PMU not frozen at all times
  5565. */
  5566. pfm_unfreeze_pmu();
  5567. if (first_time) {
  5568. register_percpu_irq(IA64_PERFMON_VECTOR, &perfmon_irqaction);
  5569. first_time=0;
  5570. }
  5571. ia64_setreg(_IA64_REG_CR_PMV, IA64_PERFMON_VECTOR);
  5572. ia64_srlz_d();
  5573. }
  5574. /*
  5575. * used for debug purposes only
  5576. */
  5577. void
  5578. dump_pmu_state(const char *from)
  5579. {
  5580. struct task_struct *task;
  5581. struct pt_regs *regs;
  5582. pfm_context_t *ctx;
  5583. unsigned long psr, dcr, info, flags;
  5584. int i, this_cpu;
  5585. local_irq_save(flags);
  5586. this_cpu = smp_processor_id();
  5587. regs = task_pt_regs(current);
  5588. info = PFM_CPUINFO_GET();
  5589. dcr = ia64_getreg(_IA64_REG_CR_DCR);
  5590. if (info == 0 && ia64_psr(regs)->pp == 0 && (dcr & IA64_DCR_PP) == 0) {
  5591. local_irq_restore(flags);
  5592. return;
  5593. }
  5594. printk("CPU%d from %s() current [%d] iip=0x%lx %s\n",
  5595. this_cpu,
  5596. from,
  5597. task_pid_nr(current),
  5598. regs->cr_iip,
  5599. current->comm);
  5600. task = GET_PMU_OWNER();
  5601. ctx = GET_PMU_CTX();
  5602. printk("->CPU%d owner [%d] ctx=%p\n", this_cpu, task ? task_pid_nr(task) : -1, ctx);
  5603. psr = pfm_get_psr();
  5604. printk("->CPU%d pmc0=0x%lx psr.pp=%d psr.up=%d dcr.pp=%d syst_info=0x%lx user_psr.up=%d user_psr.pp=%d\n",
  5605. this_cpu,
  5606. ia64_get_pmc(0),
  5607. psr & IA64_PSR_PP ? 1 : 0,
  5608. psr & IA64_PSR_UP ? 1 : 0,
  5609. dcr & IA64_DCR_PP ? 1 : 0,
  5610. info,
  5611. ia64_psr(regs)->up,
  5612. ia64_psr(regs)->pp);
  5613. ia64_psr(regs)->up = 0;
  5614. ia64_psr(regs)->pp = 0;
  5615. for (i=1; PMC_IS_LAST(i) == 0; i++) {
  5616. if (PMC_IS_IMPL(i) == 0) continue;
  5617. printk("->CPU%d pmc[%d]=0x%lx thread_pmc[%d]=0x%lx\n", this_cpu, i, ia64_get_pmc(i), i, ctx->th_pmcs[i]);
  5618. }
  5619. for (i=1; PMD_IS_LAST(i) == 0; i++) {
  5620. if (PMD_IS_IMPL(i) == 0) continue;
  5621. printk("->CPU%d pmd[%d]=0x%lx thread_pmd[%d]=0x%lx\n", this_cpu, i, ia64_get_pmd(i), i, ctx->th_pmds[i]);
  5622. }
  5623. if (ctx) {
  5624. printk("->CPU%d ctx_state=%d vaddr=%p addr=%p fd=%d ctx_task=[%d] saved_psr_up=0x%lx\n",
  5625. this_cpu,
  5626. ctx->ctx_state,
  5627. ctx->ctx_smpl_vaddr,
  5628. ctx->ctx_smpl_hdr,
  5629. ctx->ctx_msgq_head,
  5630. ctx->ctx_msgq_tail,
  5631. ctx->ctx_saved_psr_up);
  5632. }
  5633. local_irq_restore(flags);
  5634. }
  5635. /*
  5636. * called from process.c:copy_thread(). task is new child.
  5637. */
  5638. void
  5639. pfm_inherit(struct task_struct *task, struct pt_regs *regs)
  5640. {
  5641. struct thread_struct *thread;
  5642. DPRINT(("perfmon: pfm_inherit clearing state for [%d]\n", task_pid_nr(task)));
  5643. thread = &task->thread;
  5644. /*
  5645. * cut links inherited from parent (current)
  5646. */
  5647. thread->pfm_context = NULL;
  5648. PFM_SET_WORK_PENDING(task, 0);
  5649. /*
  5650. * the psr bits are already set properly in copy_threads()
  5651. */
  5652. }
  5653. #else /* !CONFIG_PERFMON */
  5654. asmlinkage long
  5655. sys_perfmonctl (int fd, int cmd, void *arg, int count)
  5656. {
  5657. return -ENOSYS;
  5658. }
  5659. #endif /* CONFIG_PERFMON */