12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465 |
- /*
- * Copyright (C) 2012 ARM Ltd.
- * Author: Marc Zyngier <marc.zyngier@arm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
- #include <linux/cpu.h>
- #include <linux/kvm.h>
- #include <linux/kvm_host.h>
- #include <linux/interrupt.h>
- #include <linux/io.h>
- #include <linux/of.h>
- #include <linux/of_address.h>
- #include <linux/of_irq.h>
- #include <linux/rculist.h>
- #include <linux/uaccess.h>
- #include <asm/kvm_emulate.h>
- #include <asm/kvm_arm.h>
- #include <asm/kvm_mmu.h>
- #include <trace/events/kvm.h>
- #include <asm/kvm.h>
- #include <kvm/iodev.h>
- #define CREATE_TRACE_POINTS
- #include "trace.h"
- /*
- * How the whole thing works (courtesy of Christoffer Dall):
- *
- * - At any time, the dist->irq_pending_on_cpu is the oracle that knows if
- * something is pending on the CPU interface.
- * - Interrupts that are pending on the distributor are stored on the
- * vgic.irq_pending vgic bitmap (this bitmap is updated by both user land
- * ioctls and guest mmio ops, and other in-kernel peripherals such as the
- * arch. timers).
- * - Every time the bitmap changes, the irq_pending_on_cpu oracle is
- * recalculated
- * - To calculate the oracle, we need info for each cpu from
- * compute_pending_for_cpu, which considers:
- * - PPI: dist->irq_pending & dist->irq_enable
- * - SPI: dist->irq_pending & dist->irq_enable & dist->irq_spi_target
- * - irq_spi_target is a 'formatted' version of the GICD_ITARGETSRn
- * registers, stored on each vcpu. We only keep one bit of
- * information per interrupt, making sure that only one vcpu can
- * accept the interrupt.
- * - If any of the above state changes, we must recalculate the oracle.
- * - The same is true when injecting an interrupt, except that we only
- * consider a single interrupt at a time. The irq_spi_cpu array
- * contains the target CPU for each SPI.
- *
- * The handling of level interrupts adds some extra complexity. We
- * need to track when the interrupt has been EOIed, so we can sample
- * the 'line' again. This is achieved as such:
- *
- * - When a level interrupt is moved onto a vcpu, the corresponding
- * bit in irq_queued is set. As long as this bit is set, the line
- * will be ignored for further interrupts. The interrupt is injected
- * into the vcpu with the GICH_LR_EOI bit set (generate a
- * maintenance interrupt on EOI).
- * - When the interrupt is EOIed, the maintenance interrupt fires,
- * and clears the corresponding bit in irq_queued. This allows the
- * interrupt line to be sampled again.
- * - Note that level-triggered interrupts can also be set to pending from
- * writes to GICD_ISPENDRn and lowering the external input line does not
- * cause the interrupt to become inactive in such a situation.
- * Conversely, writes to GICD_ICPENDRn do not cause the interrupt to become
- * inactive as long as the external input line is held high.
- *
- *
- * Initialization rules: there are multiple stages to the vgic
- * initialization, both for the distributor and the CPU interfaces.
- *
- * Distributor:
- *
- * - kvm_vgic_early_init(): initialization of static data that doesn't
- * depend on any sizing information or emulation type. No allocation
- * is allowed there.
- *
- * - vgic_init(): allocation and initialization of the generic data
- * structures that depend on sizing information (number of CPUs,
- * number of interrupts). Also initializes the vcpu specific data
- * structures. Can be executed lazily for GICv2.
- * [to be renamed to kvm_vgic_init??]
- *
- * CPU Interface:
- *
- * - kvm_vgic_cpu_early_init(): initialization of static data that
- * doesn't depend on any sizing information or emulation type. No
- * allocation is allowed there.
- */
- #include "vgic.h"
- static void vgic_retire_disabled_irqs(struct kvm_vcpu *vcpu);
- static void vgic_retire_lr(int lr_nr, struct kvm_vcpu *vcpu);
- static struct vgic_lr vgic_get_lr(const struct kvm_vcpu *vcpu, int lr);
- static void vgic_set_lr(struct kvm_vcpu *vcpu, int lr, struct vgic_lr lr_desc);
- static u64 vgic_get_elrsr(struct kvm_vcpu *vcpu);
- static struct irq_phys_map *vgic_irq_map_search(struct kvm_vcpu *vcpu,
- int virt_irq);
- static int compute_pending_for_cpu(struct kvm_vcpu *vcpu);
- static const struct vgic_ops *vgic_ops;
- static const struct vgic_params *vgic;
- static void add_sgi_source(struct kvm_vcpu *vcpu, int irq, int source)
- {
- vcpu->kvm->arch.vgic.vm_ops.add_sgi_source(vcpu, irq, source);
- }
- static bool queue_sgi(struct kvm_vcpu *vcpu, int irq)
- {
- return vcpu->kvm->arch.vgic.vm_ops.queue_sgi(vcpu, irq);
- }
- int kvm_vgic_map_resources(struct kvm *kvm)
- {
- return kvm->arch.vgic.vm_ops.map_resources(kvm, vgic);
- }
- /*
- * struct vgic_bitmap contains a bitmap made of unsigned longs, but
- * extracts u32s out of them.
- *
- * This does not work on 64-bit BE systems, because the bitmap access
- * will store two consecutive 32-bit words with the higher-addressed
- * register's bits at the lower index and the lower-addressed register's
- * bits at the higher index.
- *
- * Therefore, swizzle the register index when accessing the 32-bit word
- * registers to access the right register's value.
- */
- #if defined(CONFIG_CPU_BIG_ENDIAN) && BITS_PER_LONG == 64
- #define REG_OFFSET_SWIZZLE 1
- #else
- #define REG_OFFSET_SWIZZLE 0
- #endif
- static int vgic_init_bitmap(struct vgic_bitmap *b, int nr_cpus, int nr_irqs)
- {
- int nr_longs;
- nr_longs = nr_cpus + BITS_TO_LONGS(nr_irqs - VGIC_NR_PRIVATE_IRQS);
- b->private = kzalloc(sizeof(unsigned long) * nr_longs, GFP_KERNEL);
- if (!b->private)
- return -ENOMEM;
- b->shared = b->private + nr_cpus;
- return 0;
- }
- static void vgic_free_bitmap(struct vgic_bitmap *b)
- {
- kfree(b->private);
- b->private = NULL;
- b->shared = NULL;
- }
- /*
- * Call this function to convert a u64 value to an unsigned long * bitmask
- * in a way that works on both 32-bit and 64-bit LE and BE platforms.
- *
- * Warning: Calling this function may modify *val.
- */
- static unsigned long *u64_to_bitmask(u64 *val)
- {
- #if defined(CONFIG_CPU_BIG_ENDIAN) && BITS_PER_LONG == 32
- *val = (*val >> 32) | (*val << 32);
- #endif
- return (unsigned long *)val;
- }
- u32 *vgic_bitmap_get_reg(struct vgic_bitmap *x, int cpuid, u32 offset)
- {
- offset >>= 2;
- if (!offset)
- return (u32 *)(x->private + cpuid) + REG_OFFSET_SWIZZLE;
- else
- return (u32 *)(x->shared) + ((offset - 1) ^ REG_OFFSET_SWIZZLE);
- }
- static int vgic_bitmap_get_irq_val(struct vgic_bitmap *x,
- int cpuid, int irq)
- {
- if (irq < VGIC_NR_PRIVATE_IRQS)
- return test_bit(irq, x->private + cpuid);
- return test_bit(irq - VGIC_NR_PRIVATE_IRQS, x->shared);
- }
- void vgic_bitmap_set_irq_val(struct vgic_bitmap *x, int cpuid,
- int irq, int val)
- {
- unsigned long *reg;
- if (irq < VGIC_NR_PRIVATE_IRQS) {
- reg = x->private + cpuid;
- } else {
- reg = x->shared;
- irq -= VGIC_NR_PRIVATE_IRQS;
- }
- if (val)
- set_bit(irq, reg);
- else
- clear_bit(irq, reg);
- }
- static unsigned long *vgic_bitmap_get_cpu_map(struct vgic_bitmap *x, int cpuid)
- {
- return x->private + cpuid;
- }
- unsigned long *vgic_bitmap_get_shared_map(struct vgic_bitmap *x)
- {
- return x->shared;
- }
- static int vgic_init_bytemap(struct vgic_bytemap *x, int nr_cpus, int nr_irqs)
- {
- int size;
- size = nr_cpus * VGIC_NR_PRIVATE_IRQS;
- size += nr_irqs - VGIC_NR_PRIVATE_IRQS;
- x->private = kzalloc(size, GFP_KERNEL);
- if (!x->private)
- return -ENOMEM;
- x->shared = x->private + nr_cpus * VGIC_NR_PRIVATE_IRQS / sizeof(u32);
- return 0;
- }
- static void vgic_free_bytemap(struct vgic_bytemap *b)
- {
- kfree(b->private);
- b->private = NULL;
- b->shared = NULL;
- }
- u32 *vgic_bytemap_get_reg(struct vgic_bytemap *x, int cpuid, u32 offset)
- {
- u32 *reg;
- if (offset < VGIC_NR_PRIVATE_IRQS) {
- reg = x->private;
- offset += cpuid * VGIC_NR_PRIVATE_IRQS;
- } else {
- reg = x->shared;
- offset -= VGIC_NR_PRIVATE_IRQS;
- }
- return reg + (offset / sizeof(u32));
- }
- #define VGIC_CFG_LEVEL 0
- #define VGIC_CFG_EDGE 1
- static bool vgic_irq_is_edge(struct kvm_vcpu *vcpu, int irq)
- {
- struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
- int irq_val;
- irq_val = vgic_bitmap_get_irq_val(&dist->irq_cfg, vcpu->vcpu_id, irq);
- return irq_val == VGIC_CFG_EDGE;
- }
- static int vgic_irq_is_enabled(struct kvm_vcpu *vcpu, int irq)
- {
- struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
- return vgic_bitmap_get_irq_val(&dist->irq_enabled, vcpu->vcpu_id, irq);
- }
- static int vgic_irq_is_queued(struct kvm_vcpu *vcpu, int irq)
- {
- struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
- return vgic_bitmap_get_irq_val(&dist->irq_queued, vcpu->vcpu_id, irq);
- }
- static int vgic_irq_is_active(struct kvm_vcpu *vcpu, int irq)
- {
- struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
- return vgic_bitmap_get_irq_val(&dist->irq_active, vcpu->vcpu_id, irq);
- }
- static void vgic_irq_set_queued(struct kvm_vcpu *vcpu, int irq)
- {
- struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
- vgic_bitmap_set_irq_val(&dist->irq_queued, vcpu->vcpu_id, irq, 1);
- }
- static void vgic_irq_clear_queued(struct kvm_vcpu *vcpu, int irq)
- {
- struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
- vgic_bitmap_set_irq_val(&dist->irq_queued, vcpu->vcpu_id, irq, 0);
- }
- static void vgic_irq_set_active(struct kvm_vcpu *vcpu, int irq)
- {
- struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
- vgic_bitmap_set_irq_val(&dist->irq_active, vcpu->vcpu_id, irq, 1);
- }
- static void vgic_irq_clear_active(struct kvm_vcpu *vcpu, int irq)
- {
- struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
- vgic_bitmap_set_irq_val(&dist->irq_active, vcpu->vcpu_id, irq, 0);
- }
- static int vgic_dist_irq_get_level(struct kvm_vcpu *vcpu, int irq)
- {
- struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
- return vgic_bitmap_get_irq_val(&dist->irq_level, vcpu->vcpu_id, irq);
- }
- static void vgic_dist_irq_set_level(struct kvm_vcpu *vcpu, int irq)
- {
- struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
- vgic_bitmap_set_irq_val(&dist->irq_level, vcpu->vcpu_id, irq, 1);
- }
- static void vgic_dist_irq_clear_level(struct kvm_vcpu *vcpu, int irq)
- {
- struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
- vgic_bitmap_set_irq_val(&dist->irq_level, vcpu->vcpu_id, irq, 0);
- }
- static int vgic_dist_irq_soft_pend(struct kvm_vcpu *vcpu, int irq)
- {
- struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
- return vgic_bitmap_get_irq_val(&dist->irq_soft_pend, vcpu->vcpu_id, irq);
- }
- static void vgic_dist_irq_clear_soft_pend(struct kvm_vcpu *vcpu, int irq)
- {
- struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
- vgic_bitmap_set_irq_val(&dist->irq_soft_pend, vcpu->vcpu_id, irq, 0);
- if (!vgic_dist_irq_get_level(vcpu, irq)) {
- vgic_dist_irq_clear_pending(vcpu, irq);
- if (!compute_pending_for_cpu(vcpu))
- clear_bit(vcpu->vcpu_id, dist->irq_pending_on_cpu);
- }
- }
- static int vgic_dist_irq_is_pending(struct kvm_vcpu *vcpu, int irq)
- {
- struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
- return vgic_bitmap_get_irq_val(&dist->irq_pending, vcpu->vcpu_id, irq);
- }
- void vgic_dist_irq_set_pending(struct kvm_vcpu *vcpu, int irq)
- {
- struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
- vgic_bitmap_set_irq_val(&dist->irq_pending, vcpu->vcpu_id, irq, 1);
- }
- void vgic_dist_irq_clear_pending(struct kvm_vcpu *vcpu, int irq)
- {
- struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
- vgic_bitmap_set_irq_val(&dist->irq_pending, vcpu->vcpu_id, irq, 0);
- }
- static void vgic_cpu_irq_set(struct kvm_vcpu *vcpu, int irq)
- {
- if (irq < VGIC_NR_PRIVATE_IRQS)
- set_bit(irq, vcpu->arch.vgic_cpu.pending_percpu);
- else
- set_bit(irq - VGIC_NR_PRIVATE_IRQS,
- vcpu->arch.vgic_cpu.pending_shared);
- }
- void vgic_cpu_irq_clear(struct kvm_vcpu *vcpu, int irq)
- {
- if (irq < VGIC_NR_PRIVATE_IRQS)
- clear_bit(irq, vcpu->arch.vgic_cpu.pending_percpu);
- else
- clear_bit(irq - VGIC_NR_PRIVATE_IRQS,
- vcpu->arch.vgic_cpu.pending_shared);
- }
- static bool vgic_can_sample_irq(struct kvm_vcpu *vcpu, int irq)
- {
- return !vgic_irq_is_queued(vcpu, irq);
- }
- /**
- * vgic_reg_access - access vgic register
- * @mmio: pointer to the data describing the mmio access
- * @reg: pointer to the virtual backing of vgic distributor data
- * @offset: least significant 2 bits used for word offset
- * @mode: ACCESS_ mode (see defines above)
- *
- * Helper to make vgic register access easier using one of the access
- * modes defined for vgic register access
- * (read,raz,write-ignored,setbit,clearbit,write)
- */
- void vgic_reg_access(struct kvm_exit_mmio *mmio, u32 *reg,
- phys_addr_t offset, int mode)
- {
- int word_offset = (offset & 3) * 8;
- u32 mask = (1UL << (mmio->len * 8)) - 1;
- u32 regval;
- /*
- * Any alignment fault should have been delivered to the guest
- * directly (ARM ARM B3.12.7 "Prioritization of aborts").
- */
- if (reg) {
- regval = *reg;
- } else {
- BUG_ON(mode != (ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED));
- regval = 0;
- }
- if (mmio->is_write) {
- u32 data = mmio_data_read(mmio, mask) << word_offset;
- switch (ACCESS_WRITE_MASK(mode)) {
- case ACCESS_WRITE_IGNORED:
- return;
- case ACCESS_WRITE_SETBIT:
- regval |= data;
- break;
- case ACCESS_WRITE_CLEARBIT:
- regval &= ~data;
- break;
- case ACCESS_WRITE_VALUE:
- regval = (regval & ~(mask << word_offset)) | data;
- break;
- }
- *reg = regval;
- } else {
- switch (ACCESS_READ_MASK(mode)) {
- case ACCESS_READ_RAZ:
- regval = 0;
- /* fall through */
- case ACCESS_READ_VALUE:
- mmio_data_write(mmio, mask, regval >> word_offset);
- }
- }
- }
- bool handle_mmio_raz_wi(struct kvm_vcpu *vcpu, struct kvm_exit_mmio *mmio,
- phys_addr_t offset)
- {
- vgic_reg_access(mmio, NULL, offset,
- ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED);
- return false;
- }
- bool vgic_handle_enable_reg(struct kvm *kvm, struct kvm_exit_mmio *mmio,
- phys_addr_t offset, int vcpu_id, int access)
- {
- u32 *reg;
- int mode = ACCESS_READ_VALUE | access;
- struct kvm_vcpu *target_vcpu = kvm_get_vcpu(kvm, vcpu_id);
- reg = vgic_bitmap_get_reg(&kvm->arch.vgic.irq_enabled, vcpu_id, offset);
- vgic_reg_access(mmio, reg, offset, mode);
- if (mmio->is_write) {
- if (access & ACCESS_WRITE_CLEARBIT) {
- if (offset < 4) /* Force SGI enabled */
- *reg |= 0xffff;
- vgic_retire_disabled_irqs(target_vcpu);
- }
- vgic_update_state(kvm);
- return true;
- }
- return false;
- }
- bool vgic_handle_set_pending_reg(struct kvm *kvm,
- struct kvm_exit_mmio *mmio,
- phys_addr_t offset, int vcpu_id)
- {
- u32 *reg, orig;
- u32 level_mask;
- int mode = ACCESS_READ_VALUE | ACCESS_WRITE_SETBIT;
- struct vgic_dist *dist = &kvm->arch.vgic;
- reg = vgic_bitmap_get_reg(&dist->irq_cfg, vcpu_id, offset);
- level_mask = (~(*reg));
- /* Mark both level and edge triggered irqs as pending */
- reg = vgic_bitmap_get_reg(&dist->irq_pending, vcpu_id, offset);
- orig = *reg;
- vgic_reg_access(mmio, reg, offset, mode);
- if (mmio->is_write) {
- /* Set the soft-pending flag only for level-triggered irqs */
- reg = vgic_bitmap_get_reg(&dist->irq_soft_pend,
- vcpu_id, offset);
- vgic_reg_access(mmio, reg, offset, mode);
- *reg &= level_mask;
- /* Ignore writes to SGIs */
- if (offset < 2) {
- *reg &= ~0xffff;
- *reg |= orig & 0xffff;
- }
- vgic_update_state(kvm);
- return true;
- }
- return false;
- }
- bool vgic_handle_clear_pending_reg(struct kvm *kvm,
- struct kvm_exit_mmio *mmio,
- phys_addr_t offset, int vcpu_id)
- {
- u32 *level_active;
- u32 *reg, orig;
- int mode = ACCESS_READ_VALUE | ACCESS_WRITE_CLEARBIT;
- struct vgic_dist *dist = &kvm->arch.vgic;
- reg = vgic_bitmap_get_reg(&dist->irq_pending, vcpu_id, offset);
- orig = *reg;
- vgic_reg_access(mmio, reg, offset, mode);
- if (mmio->is_write) {
- /* Re-set level triggered level-active interrupts */
- level_active = vgic_bitmap_get_reg(&dist->irq_level,
- vcpu_id, offset);
- reg = vgic_bitmap_get_reg(&dist->irq_pending, vcpu_id, offset);
- *reg |= *level_active;
- /* Ignore writes to SGIs */
- if (offset < 2) {
- *reg &= ~0xffff;
- *reg |= orig & 0xffff;
- }
- /* Clear soft-pending flags */
- reg = vgic_bitmap_get_reg(&dist->irq_soft_pend,
- vcpu_id, offset);
- vgic_reg_access(mmio, reg, offset, mode);
- vgic_update_state(kvm);
- return true;
- }
- return false;
- }
- bool vgic_handle_set_active_reg(struct kvm *kvm,
- struct kvm_exit_mmio *mmio,
- phys_addr_t offset, int vcpu_id)
- {
- u32 *reg;
- struct vgic_dist *dist = &kvm->arch.vgic;
- reg = vgic_bitmap_get_reg(&dist->irq_active, vcpu_id, offset);
- vgic_reg_access(mmio, reg, offset,
- ACCESS_READ_VALUE | ACCESS_WRITE_SETBIT);
- if (mmio->is_write) {
- vgic_update_state(kvm);
- return true;
- }
- return false;
- }
- bool vgic_handle_clear_active_reg(struct kvm *kvm,
- struct kvm_exit_mmio *mmio,
- phys_addr_t offset, int vcpu_id)
- {
- u32 *reg;
- struct vgic_dist *dist = &kvm->arch.vgic;
- reg = vgic_bitmap_get_reg(&dist->irq_active, vcpu_id, offset);
- vgic_reg_access(mmio, reg, offset,
- ACCESS_READ_VALUE | ACCESS_WRITE_CLEARBIT);
- if (mmio->is_write) {
- vgic_update_state(kvm);
- return true;
- }
- return false;
- }
- static u32 vgic_cfg_expand(u16 val)
- {
- u32 res = 0;
- int i;
- /*
- * Turn a 16bit value like abcd...mnop into a 32bit word
- * a0b0c0d0...m0n0o0p0, which is what the HW cfg register is.
- */
- for (i = 0; i < 16; i++)
- res |= ((val >> i) & VGIC_CFG_EDGE) << (2 * i + 1);
- return res;
- }
- static u16 vgic_cfg_compress(u32 val)
- {
- u16 res = 0;
- int i;
- /*
- * Turn a 32bit word a0b0c0d0...m0n0o0p0 into 16bit value like
- * abcd...mnop which is what we really care about.
- */
- for (i = 0; i < 16; i++)
- res |= ((val >> (i * 2 + 1)) & VGIC_CFG_EDGE) << i;
- return res;
- }
- /*
- * The distributor uses 2 bits per IRQ for the CFG register, but the
- * LSB is always 0. As such, we only keep the upper bit, and use the
- * two above functions to compress/expand the bits
- */
- bool vgic_handle_cfg_reg(u32 *reg, struct kvm_exit_mmio *mmio,
- phys_addr_t offset)
- {
- u32 val;
- if (offset & 4)
- val = *reg >> 16;
- else
- val = *reg & 0xffff;
- val = vgic_cfg_expand(val);
- vgic_reg_access(mmio, &val, offset,
- ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
- if (mmio->is_write) {
- /* Ignore writes to read-only SGI and PPI bits */
- if (offset < 8)
- return false;
- val = vgic_cfg_compress(val);
- if (offset & 4) {
- *reg &= 0xffff;
- *reg |= val << 16;
- } else {
- *reg &= 0xffff << 16;
- *reg |= val;
- }
- }
- return false;
- }
- /**
- * vgic_unqueue_irqs - move pending/active IRQs from LRs to the distributor
- * @vgic_cpu: Pointer to the vgic_cpu struct holding the LRs
- *
- * Move any IRQs that have already been assigned to LRs back to the
- * emulated distributor state so that the complete emulated state can be read
- * from the main emulation structures without investigating the LRs.
- */
- void vgic_unqueue_irqs(struct kvm_vcpu *vcpu)
- {
- struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
- u64 elrsr = vgic_get_elrsr(vcpu);
- unsigned long *elrsr_ptr = u64_to_bitmask(&elrsr);
- int i;
- for_each_clear_bit(i, elrsr_ptr, vgic_cpu->nr_lr) {
- struct vgic_lr lr = vgic_get_lr(vcpu, i);
- /*
- * There are three options for the state bits:
- *
- * 01: pending
- * 10: active
- * 11: pending and active
- */
- BUG_ON(!(lr.state & LR_STATE_MASK));
- /* Reestablish SGI source for pending and active IRQs */
- if (lr.irq < VGIC_NR_SGIS)
- add_sgi_source(vcpu, lr.irq, lr.source);
- /*
- * If the LR holds an active (10) or a pending and active (11)
- * interrupt then move the active state to the
- * distributor tracking bit.
- */
- if (lr.state & LR_STATE_ACTIVE)
- vgic_irq_set_active(vcpu, lr.irq);
- /*
- * Reestablish the pending state on the distributor and the
- * CPU interface and mark the LR as free for other use.
- */
- vgic_retire_lr(i, vcpu);
- /* Finally update the VGIC state. */
- vgic_update_state(vcpu->kvm);
- }
- }
- const
- struct vgic_io_range *vgic_find_range(const struct vgic_io_range *ranges,
- int len, gpa_t offset)
- {
- while (ranges->len) {
- if (offset >= ranges->base &&
- (offset + len) <= (ranges->base + ranges->len))
- return ranges;
- ranges++;
- }
- return NULL;
- }
- static bool vgic_validate_access(const struct vgic_dist *dist,
- const struct vgic_io_range *range,
- unsigned long offset)
- {
- int irq;
- if (!range->bits_per_irq)
- return true; /* Not an irq-based access */
- irq = offset * 8 / range->bits_per_irq;
- if (irq >= dist->nr_irqs)
- return false;
- return true;
- }
- /*
- * Call the respective handler function for the given range.
- * We split up any 64 bit accesses into two consecutive 32 bit
- * handler calls and merge the result afterwards.
- * We do this in a little endian fashion regardless of the host's
- * or guest's endianness, because the GIC is always LE and the rest of
- * the code (vgic_reg_access) also puts it in a LE fashion already.
- * At this point we have already identified the handle function, so
- * range points to that one entry and offset is relative to this.
- */
- static bool call_range_handler(struct kvm_vcpu *vcpu,
- struct kvm_exit_mmio *mmio,
- unsigned long offset,
- const struct vgic_io_range *range)
- {
- struct kvm_exit_mmio mmio32;
- bool ret;
- if (likely(mmio->len <= 4))
- return range->handle_mmio(vcpu, mmio, offset);
- /*
- * Any access bigger than 4 bytes (that we currently handle in KVM)
- * is actually 8 bytes long, caused by a 64-bit access
- */
- mmio32.len = 4;
- mmio32.is_write = mmio->is_write;
- mmio32.private = mmio->private;
- mmio32.phys_addr = mmio->phys_addr + 4;
- mmio32.data = &((u32 *)mmio->data)[1];
- ret = range->handle_mmio(vcpu, &mmio32, offset + 4);
- mmio32.phys_addr = mmio->phys_addr;
- mmio32.data = &((u32 *)mmio->data)[0];
- ret |= range->handle_mmio(vcpu, &mmio32, offset);
- return ret;
- }
- /**
- * vgic_handle_mmio_access - handle an in-kernel MMIO access
- * This is called by the read/write KVM IO device wrappers below.
- * @vcpu: pointer to the vcpu performing the access
- * @this: pointer to the KVM IO device in charge
- * @addr: guest physical address of the access
- * @len: size of the access
- * @val: pointer to the data region
- * @is_write: read or write access
- *
- * returns true if the MMIO access could be performed
- */
- static int vgic_handle_mmio_access(struct kvm_vcpu *vcpu,
- struct kvm_io_device *this, gpa_t addr,
- int len, void *val, bool is_write)
- {
- struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
- struct vgic_io_device *iodev = container_of(this,
- struct vgic_io_device, dev);
- const struct vgic_io_range *range;
- struct kvm_exit_mmio mmio;
- bool updated_state;
- gpa_t offset;
- offset = addr - iodev->addr;
- range = vgic_find_range(iodev->reg_ranges, len, offset);
- if (unlikely(!range || !range->handle_mmio)) {
- pr_warn("Unhandled access %d %08llx %d\n", is_write, addr, len);
- return -ENXIO;
- }
- mmio.phys_addr = addr;
- mmio.len = len;
- mmio.is_write = is_write;
- mmio.data = val;
- mmio.private = iodev->redist_vcpu;
- spin_lock(&dist->lock);
- offset -= range->base;
- if (vgic_validate_access(dist, range, offset)) {
- updated_state = call_range_handler(vcpu, &mmio, offset, range);
- } else {
- if (!is_write)
- memset(val, 0, len);
- updated_state = false;
- }
- spin_unlock(&dist->lock);
- if (updated_state)
- vgic_kick_vcpus(vcpu->kvm);
- return 0;
- }
- static int vgic_handle_mmio_read(struct kvm_vcpu *vcpu,
- struct kvm_io_device *this,
- gpa_t addr, int len, void *val)
- {
- return vgic_handle_mmio_access(vcpu, this, addr, len, val, false);
- }
- static int vgic_handle_mmio_write(struct kvm_vcpu *vcpu,
- struct kvm_io_device *this,
- gpa_t addr, int len, const void *val)
- {
- return vgic_handle_mmio_access(vcpu, this, addr, len, (void *)val,
- true);
- }
- struct kvm_io_device_ops vgic_io_ops = {
- .read = vgic_handle_mmio_read,
- .write = vgic_handle_mmio_write,
- };
- /**
- * vgic_register_kvm_io_dev - register VGIC register frame on the KVM I/O bus
- * @kvm: The VM structure pointer
- * @base: The (guest) base address for the register frame
- * @len: Length of the register frame window
- * @ranges: Describing the handler functions for each register
- * @redist_vcpu_id: The VCPU ID to pass on to the handlers on call
- * @iodev: Points to memory to be passed on to the handler
- *
- * @iodev stores the parameters of this function to be usable by the handler
- * respectively the dispatcher function (since the KVM I/O bus framework lacks
- * an opaque parameter). Initialization is done in this function, but the
- * reference should be valid and unique for the whole VGIC lifetime.
- * If the register frame is not mapped for a specific VCPU, pass -1 to
- * @redist_vcpu_id.
- */
- int vgic_register_kvm_io_dev(struct kvm *kvm, gpa_t base, int len,
- const struct vgic_io_range *ranges,
- int redist_vcpu_id,
- struct vgic_io_device *iodev)
- {
- struct kvm_vcpu *vcpu = NULL;
- int ret;
- if (redist_vcpu_id >= 0)
- vcpu = kvm_get_vcpu(kvm, redist_vcpu_id);
- iodev->addr = base;
- iodev->len = len;
- iodev->reg_ranges = ranges;
- iodev->redist_vcpu = vcpu;
- kvm_iodevice_init(&iodev->dev, &vgic_io_ops);
- mutex_lock(&kvm->slots_lock);
- ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, base, len,
- &iodev->dev);
- mutex_unlock(&kvm->slots_lock);
- /* Mark the iodev as invalid if registration fails. */
- if (ret)
- iodev->dev.ops = NULL;
- return ret;
- }
- static int vgic_nr_shared_irqs(struct vgic_dist *dist)
- {
- return dist->nr_irqs - VGIC_NR_PRIVATE_IRQS;
- }
- static int compute_active_for_cpu(struct kvm_vcpu *vcpu)
- {
- struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
- unsigned long *active, *enabled, *act_percpu, *act_shared;
- unsigned long active_private, active_shared;
- int nr_shared = vgic_nr_shared_irqs(dist);
- int vcpu_id;
- vcpu_id = vcpu->vcpu_id;
- act_percpu = vcpu->arch.vgic_cpu.active_percpu;
- act_shared = vcpu->arch.vgic_cpu.active_shared;
- active = vgic_bitmap_get_cpu_map(&dist->irq_active, vcpu_id);
- enabled = vgic_bitmap_get_cpu_map(&dist->irq_enabled, vcpu_id);
- bitmap_and(act_percpu, active, enabled, VGIC_NR_PRIVATE_IRQS);
- active = vgic_bitmap_get_shared_map(&dist->irq_active);
- enabled = vgic_bitmap_get_shared_map(&dist->irq_enabled);
- bitmap_and(act_shared, active, enabled, nr_shared);
- bitmap_and(act_shared, act_shared,
- vgic_bitmap_get_shared_map(&dist->irq_spi_target[vcpu_id]),
- nr_shared);
- active_private = find_first_bit(act_percpu, VGIC_NR_PRIVATE_IRQS);
- active_shared = find_first_bit(act_shared, nr_shared);
- return (active_private < VGIC_NR_PRIVATE_IRQS ||
- active_shared < nr_shared);
- }
- static int compute_pending_for_cpu(struct kvm_vcpu *vcpu)
- {
- struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
- unsigned long *pending, *enabled, *pend_percpu, *pend_shared;
- unsigned long pending_private, pending_shared;
- int nr_shared = vgic_nr_shared_irqs(dist);
- int vcpu_id;
- vcpu_id = vcpu->vcpu_id;
- pend_percpu = vcpu->arch.vgic_cpu.pending_percpu;
- pend_shared = vcpu->arch.vgic_cpu.pending_shared;
- if (!dist->enabled) {
- bitmap_zero(pend_percpu, VGIC_NR_PRIVATE_IRQS);
- bitmap_zero(pend_shared, nr_shared);
- return 0;
- }
- pending = vgic_bitmap_get_cpu_map(&dist->irq_pending, vcpu_id);
- enabled = vgic_bitmap_get_cpu_map(&dist->irq_enabled, vcpu_id);
- bitmap_and(pend_percpu, pending, enabled, VGIC_NR_PRIVATE_IRQS);
- pending = vgic_bitmap_get_shared_map(&dist->irq_pending);
- enabled = vgic_bitmap_get_shared_map(&dist->irq_enabled);
- bitmap_and(pend_shared, pending, enabled, nr_shared);
- bitmap_and(pend_shared, pend_shared,
- vgic_bitmap_get_shared_map(&dist->irq_spi_target[vcpu_id]),
- nr_shared);
- pending_private = find_first_bit(pend_percpu, VGIC_NR_PRIVATE_IRQS);
- pending_shared = find_first_bit(pend_shared, nr_shared);
- return (pending_private < VGIC_NR_PRIVATE_IRQS ||
- pending_shared < vgic_nr_shared_irqs(dist));
- }
- /*
- * Update the interrupt state and determine which CPUs have pending
- * or active interrupts. Must be called with distributor lock held.
- */
- void vgic_update_state(struct kvm *kvm)
- {
- struct vgic_dist *dist = &kvm->arch.vgic;
- struct kvm_vcpu *vcpu;
- int c;
- kvm_for_each_vcpu(c, vcpu, kvm) {
- if (compute_pending_for_cpu(vcpu))
- set_bit(c, dist->irq_pending_on_cpu);
- if (compute_active_for_cpu(vcpu))
- set_bit(c, dist->irq_active_on_cpu);
- else
- clear_bit(c, dist->irq_active_on_cpu);
- }
- }
- static struct vgic_lr vgic_get_lr(const struct kvm_vcpu *vcpu, int lr)
- {
- return vgic_ops->get_lr(vcpu, lr);
- }
- static void vgic_set_lr(struct kvm_vcpu *vcpu, int lr,
- struct vgic_lr vlr)
- {
- vgic_ops->set_lr(vcpu, lr, vlr);
- }
- static inline u64 vgic_get_elrsr(struct kvm_vcpu *vcpu)
- {
- return vgic_ops->get_elrsr(vcpu);
- }
- static inline u64 vgic_get_eisr(struct kvm_vcpu *vcpu)
- {
- return vgic_ops->get_eisr(vcpu);
- }
- static inline void vgic_clear_eisr(struct kvm_vcpu *vcpu)
- {
- vgic_ops->clear_eisr(vcpu);
- }
- static inline u32 vgic_get_interrupt_status(struct kvm_vcpu *vcpu)
- {
- return vgic_ops->get_interrupt_status(vcpu);
- }
- static inline void vgic_enable_underflow(struct kvm_vcpu *vcpu)
- {
- vgic_ops->enable_underflow(vcpu);
- }
- static inline void vgic_disable_underflow(struct kvm_vcpu *vcpu)
- {
- vgic_ops->disable_underflow(vcpu);
- }
- void vgic_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr)
- {
- vgic_ops->get_vmcr(vcpu, vmcr);
- }
- void vgic_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr)
- {
- vgic_ops->set_vmcr(vcpu, vmcr);
- }
- static inline void vgic_enable(struct kvm_vcpu *vcpu)
- {
- vgic_ops->enable(vcpu);
- }
- static void vgic_retire_lr(int lr_nr, struct kvm_vcpu *vcpu)
- {
- struct vgic_lr vlr = vgic_get_lr(vcpu, lr_nr);
- vgic_irq_clear_queued(vcpu, vlr.irq);
- /*
- * We must transfer the pending state back to the distributor before
- * retiring the LR, otherwise we may loose edge-triggered interrupts.
- */
- if (vlr.state & LR_STATE_PENDING) {
- vgic_dist_irq_set_pending(vcpu, vlr.irq);
- vlr.hwirq = 0;
- }
- vlr.state = 0;
- vgic_set_lr(vcpu, lr_nr, vlr);
- }
- static bool dist_active_irq(struct kvm_vcpu *vcpu)
- {
- struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
- return test_bit(vcpu->vcpu_id, dist->irq_active_on_cpu);
- }
- bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, struct irq_phys_map *map)
- {
- int i;
- for (i = 0; i < vcpu->arch.vgic_cpu.nr_lr; i++) {
- struct vgic_lr vlr = vgic_get_lr(vcpu, i);
- if (vlr.irq == map->virt_irq && vlr.state & LR_STATE_ACTIVE)
- return true;
- }
- return vgic_irq_is_active(vcpu, map->virt_irq);
- }
- /*
- * An interrupt may have been disabled after being made pending on the
- * CPU interface (the classic case is a timer running while we're
- * rebooting the guest - the interrupt would kick as soon as the CPU
- * interface gets enabled, with deadly consequences).
- *
- * The solution is to examine already active LRs, and check the
- * interrupt is still enabled. If not, just retire it.
- */
- static void vgic_retire_disabled_irqs(struct kvm_vcpu *vcpu)
- {
- u64 elrsr = vgic_get_elrsr(vcpu);
- unsigned long *elrsr_ptr = u64_to_bitmask(&elrsr);
- int lr;
- for_each_clear_bit(lr, elrsr_ptr, vgic->nr_lr) {
- struct vgic_lr vlr = vgic_get_lr(vcpu, lr);
- if (!vgic_irq_is_enabled(vcpu, vlr.irq))
- vgic_retire_lr(lr, vcpu);
- }
- }
- static void vgic_queue_irq_to_lr(struct kvm_vcpu *vcpu, int irq,
- int lr_nr, struct vgic_lr vlr)
- {
- if (vgic_irq_is_active(vcpu, irq)) {
- vlr.state |= LR_STATE_ACTIVE;
- kvm_debug("Set active, clear distributor: 0x%x\n", vlr.state);
- vgic_irq_clear_active(vcpu, irq);
- vgic_update_state(vcpu->kvm);
- } else {
- WARN_ON(!vgic_dist_irq_is_pending(vcpu, irq));
- vlr.state |= LR_STATE_PENDING;
- kvm_debug("Set pending: 0x%x\n", vlr.state);
- }
- if (!vgic_irq_is_edge(vcpu, irq))
- vlr.state |= LR_EOI_INT;
- if (vlr.irq >= VGIC_NR_SGIS) {
- struct irq_phys_map *map;
- map = vgic_irq_map_search(vcpu, irq);
- if (map) {
- vlr.hwirq = map->phys_irq;
- vlr.state |= LR_HW;
- vlr.state &= ~LR_EOI_INT;
- /*
- * Make sure we're not going to sample this
- * again, as a HW-backed interrupt cannot be
- * in the PENDING_ACTIVE stage.
- */
- vgic_irq_set_queued(vcpu, irq);
- }
- }
- vgic_set_lr(vcpu, lr_nr, vlr);
- }
- /*
- * Queue an interrupt to a CPU virtual interface. Return true on success,
- * or false if it wasn't possible to queue it.
- * sgi_source must be zero for any non-SGI interrupts.
- */
- bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq)
- {
- struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
- u64 elrsr = vgic_get_elrsr(vcpu);
- unsigned long *elrsr_ptr = u64_to_bitmask(&elrsr);
- struct vgic_lr vlr;
- int lr;
- /* Sanitize the input... */
- BUG_ON(sgi_source_id & ~7);
- BUG_ON(sgi_source_id && irq >= VGIC_NR_SGIS);
- BUG_ON(irq >= dist->nr_irqs);
- kvm_debug("Queue IRQ%d\n", irq);
- /* Do we have an active interrupt for the same CPUID? */
- for_each_clear_bit(lr, elrsr_ptr, vgic->nr_lr) {
- vlr = vgic_get_lr(vcpu, lr);
- if (vlr.irq == irq && vlr.source == sgi_source_id) {
- kvm_debug("LR%d piggyback for IRQ%d\n", lr, vlr.irq);
- vgic_queue_irq_to_lr(vcpu, irq, lr, vlr);
- return true;
- }
- }
- /* Try to use another LR for this interrupt */
- lr = find_first_bit(elrsr_ptr, vgic->nr_lr);
- if (lr >= vgic->nr_lr)
- return false;
- kvm_debug("LR%d allocated for IRQ%d %x\n", lr, irq, sgi_source_id);
- vlr.irq = irq;
- vlr.source = sgi_source_id;
- vlr.state = 0;
- vgic_queue_irq_to_lr(vcpu, irq, lr, vlr);
- return true;
- }
- static bool vgic_queue_hwirq(struct kvm_vcpu *vcpu, int irq)
- {
- if (!vgic_can_sample_irq(vcpu, irq))
- return true; /* level interrupt, already queued */
- if (vgic_queue_irq(vcpu, 0, irq)) {
- if (vgic_irq_is_edge(vcpu, irq)) {
- vgic_dist_irq_clear_pending(vcpu, irq);
- vgic_cpu_irq_clear(vcpu, irq);
- } else {
- vgic_irq_set_queued(vcpu, irq);
- }
- return true;
- }
- return false;
- }
- /*
- * Fill the list registers with pending interrupts before running the
- * guest.
- */
- static void __kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu)
- {
- struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
- struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
- unsigned long *pa_percpu, *pa_shared;
- int i, vcpu_id;
- int overflow = 0;
- int nr_shared = vgic_nr_shared_irqs(dist);
- vcpu_id = vcpu->vcpu_id;
- pa_percpu = vcpu->arch.vgic_cpu.pend_act_percpu;
- pa_shared = vcpu->arch.vgic_cpu.pend_act_shared;
- bitmap_or(pa_percpu, vgic_cpu->pending_percpu, vgic_cpu->active_percpu,
- VGIC_NR_PRIVATE_IRQS);
- bitmap_or(pa_shared, vgic_cpu->pending_shared, vgic_cpu->active_shared,
- nr_shared);
- /*
- * We may not have any pending interrupt, or the interrupts
- * may have been serviced from another vcpu. In all cases,
- * move along.
- */
- if (!kvm_vgic_vcpu_pending_irq(vcpu) && !dist_active_irq(vcpu))
- goto epilog;
- /* SGIs */
- for_each_set_bit(i, pa_percpu, VGIC_NR_SGIS) {
- if (!queue_sgi(vcpu, i))
- overflow = 1;
- }
- /* PPIs */
- for_each_set_bit_from(i, pa_percpu, VGIC_NR_PRIVATE_IRQS) {
- if (!vgic_queue_hwirq(vcpu, i))
- overflow = 1;
- }
- /* SPIs */
- for_each_set_bit(i, pa_shared, nr_shared) {
- if (!vgic_queue_hwirq(vcpu, i + VGIC_NR_PRIVATE_IRQS))
- overflow = 1;
- }
- epilog:
- if (overflow) {
- vgic_enable_underflow(vcpu);
- } else {
- vgic_disable_underflow(vcpu);
- /*
- * We're about to run this VCPU, and we've consumed
- * everything the distributor had in store for
- * us. Claim we don't have anything pending. We'll
- * adjust that if needed while exiting.
- */
- clear_bit(vcpu_id, dist->irq_pending_on_cpu);
- }
- }
- static int process_queued_irq(struct kvm_vcpu *vcpu,
- int lr, struct vgic_lr vlr)
- {
- int pending = 0;
- /*
- * If the IRQ was EOIed (called from vgic_process_maintenance) or it
- * went from active to non-active (called from vgic_sync_hwirq) it was
- * also ACKed and we we therefore assume we can clear the soft pending
- * state (should it had been set) for this interrupt.
- *
- * Note: if the IRQ soft pending state was set after the IRQ was
- * acked, it actually shouldn't be cleared, but we have no way of
- * knowing that unless we start trapping ACKs when the soft-pending
- * state is set.
- */
- vgic_dist_irq_clear_soft_pend(vcpu, vlr.irq);
- /*
- * Tell the gic to start sampling this interrupt again.
- */
- vgic_irq_clear_queued(vcpu, vlr.irq);
- /* Any additional pending interrupt? */
- if (vgic_irq_is_edge(vcpu, vlr.irq)) {
- BUG_ON(!(vlr.state & LR_HW));
- pending = vgic_dist_irq_is_pending(vcpu, vlr.irq);
- } else {
- if (vgic_dist_irq_get_level(vcpu, vlr.irq)) {
- vgic_cpu_irq_set(vcpu, vlr.irq);
- pending = 1;
- } else {
- vgic_dist_irq_clear_pending(vcpu, vlr.irq);
- vgic_cpu_irq_clear(vcpu, vlr.irq);
- }
- }
- /*
- * Despite being EOIed, the LR may not have
- * been marked as empty.
- */
- vlr.state = 0;
- vlr.hwirq = 0;
- vgic_set_lr(vcpu, lr, vlr);
- return pending;
- }
- static bool vgic_process_maintenance(struct kvm_vcpu *vcpu)
- {
- u32 status = vgic_get_interrupt_status(vcpu);
- struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
- struct kvm *kvm = vcpu->kvm;
- int level_pending = 0;
- kvm_debug("STATUS = %08x\n", status);
- if (status & INT_STATUS_EOI) {
- /*
- * Some level interrupts have been EOIed. Clear their
- * active bit.
- */
- u64 eisr = vgic_get_eisr(vcpu);
- unsigned long *eisr_ptr = u64_to_bitmask(&eisr);
- int lr;
- for_each_set_bit(lr, eisr_ptr, vgic->nr_lr) {
- struct vgic_lr vlr = vgic_get_lr(vcpu, lr);
- WARN_ON(vgic_irq_is_edge(vcpu, vlr.irq));
- WARN_ON(vlr.state & LR_STATE_MASK);
- /*
- * kvm_notify_acked_irq calls kvm_set_irq()
- * to reset the IRQ level, which grabs the dist->lock
- * so we call this before taking the dist->lock.
- */
- kvm_notify_acked_irq(kvm, 0,
- vlr.irq - VGIC_NR_PRIVATE_IRQS);
- spin_lock(&dist->lock);
- level_pending |= process_queued_irq(vcpu, lr, vlr);
- spin_unlock(&dist->lock);
- }
- }
- if (status & INT_STATUS_UNDERFLOW)
- vgic_disable_underflow(vcpu);
- /*
- * In the next iterations of the vcpu loop, if we sync the vgic state
- * after flushing it, but before entering the guest (this happens for
- * pending signals and vmid rollovers), then make sure we don't pick
- * up any old maintenance interrupts here.
- */
- vgic_clear_eisr(vcpu);
- return level_pending;
- }
- /*
- * Save the physical active state, and reset it to inactive.
- *
- * Return true if there's a pending forwarded interrupt to queue.
- */
- static bool vgic_sync_hwirq(struct kvm_vcpu *vcpu, int lr, struct vgic_lr vlr)
- {
- struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
- bool level_pending;
- if (!(vlr.state & LR_HW))
- return false;
- if (vlr.state & LR_STATE_ACTIVE)
- return false;
- spin_lock(&dist->lock);
- level_pending = process_queued_irq(vcpu, lr, vlr);
- spin_unlock(&dist->lock);
- return level_pending;
- }
- /* Sync back the VGIC state after a guest run */
- static void __kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
- {
- struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
- u64 elrsr;
- unsigned long *elrsr_ptr;
- int lr, pending;
- bool level_pending;
- level_pending = vgic_process_maintenance(vcpu);
- /* Deal with HW interrupts, and clear mappings for empty LRs */
- for (lr = 0; lr < vgic->nr_lr; lr++) {
- struct vgic_lr vlr = vgic_get_lr(vcpu, lr);
- level_pending |= vgic_sync_hwirq(vcpu, lr, vlr);
- BUG_ON(vlr.irq >= dist->nr_irqs);
- }
- /* Check if we still have something up our sleeve... */
- elrsr = vgic_get_elrsr(vcpu);
- elrsr_ptr = u64_to_bitmask(&elrsr);
- pending = find_first_zero_bit(elrsr_ptr, vgic->nr_lr);
- if (level_pending || pending < vgic->nr_lr)
- set_bit(vcpu->vcpu_id, dist->irq_pending_on_cpu);
- }
- void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu)
- {
- struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
- if (!irqchip_in_kernel(vcpu->kvm))
- return;
- spin_lock(&dist->lock);
- __kvm_vgic_flush_hwstate(vcpu);
- spin_unlock(&dist->lock);
- }
- void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
- {
- if (!irqchip_in_kernel(vcpu->kvm))
- return;
- __kvm_vgic_sync_hwstate(vcpu);
- }
- int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu)
- {
- struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
- if (!irqchip_in_kernel(vcpu->kvm))
- return 0;
- return test_bit(vcpu->vcpu_id, dist->irq_pending_on_cpu);
- }
- void vgic_kick_vcpus(struct kvm *kvm)
- {
- struct kvm_vcpu *vcpu;
- int c;
- /*
- * We've injected an interrupt, time to find out who deserves
- * a good kick...
- */
- kvm_for_each_vcpu(c, vcpu, kvm) {
- if (kvm_vgic_vcpu_pending_irq(vcpu))
- kvm_vcpu_kick(vcpu);
- }
- }
- static int vgic_validate_injection(struct kvm_vcpu *vcpu, int irq, int level)
- {
- int edge_triggered = vgic_irq_is_edge(vcpu, irq);
- /*
- * Only inject an interrupt if:
- * - edge triggered and we have a rising edge
- * - level triggered and we change level
- */
- if (edge_triggered) {
- int state = vgic_dist_irq_is_pending(vcpu, irq);
- return level > state;
- } else {
- int state = vgic_dist_irq_get_level(vcpu, irq);
- return level != state;
- }
- }
- static int vgic_update_irq_pending(struct kvm *kvm, int cpuid,
- struct irq_phys_map *map,
- unsigned int irq_num, bool level)
- {
- struct vgic_dist *dist = &kvm->arch.vgic;
- struct kvm_vcpu *vcpu;
- int edge_triggered, level_triggered;
- int enabled;
- bool ret = true, can_inject = true;
- trace_vgic_update_irq_pending(cpuid, irq_num, level);
- if (irq_num >= min(kvm->arch.vgic.nr_irqs, 1020))
- return -EINVAL;
- spin_lock(&dist->lock);
- vcpu = kvm_get_vcpu(kvm, cpuid);
- edge_triggered = vgic_irq_is_edge(vcpu, irq_num);
- level_triggered = !edge_triggered;
- if (!vgic_validate_injection(vcpu, irq_num, level)) {
- ret = false;
- goto out;
- }
- if (irq_num >= VGIC_NR_PRIVATE_IRQS) {
- cpuid = dist->irq_spi_cpu[irq_num - VGIC_NR_PRIVATE_IRQS];
- if (cpuid == VCPU_NOT_ALLOCATED) {
- /* Pretend we use CPU0, and prevent injection */
- cpuid = 0;
- can_inject = false;
- }
- vcpu = kvm_get_vcpu(kvm, cpuid);
- }
- kvm_debug("Inject IRQ%d level %d CPU%d\n", irq_num, level, cpuid);
- if (level) {
- if (level_triggered)
- vgic_dist_irq_set_level(vcpu, irq_num);
- vgic_dist_irq_set_pending(vcpu, irq_num);
- } else {
- if (level_triggered) {
- vgic_dist_irq_clear_level(vcpu, irq_num);
- if (!vgic_dist_irq_soft_pend(vcpu, irq_num)) {
- vgic_dist_irq_clear_pending(vcpu, irq_num);
- vgic_cpu_irq_clear(vcpu, irq_num);
- if (!compute_pending_for_cpu(vcpu))
- clear_bit(cpuid, dist->irq_pending_on_cpu);
- }
- }
- ret = false;
- goto out;
- }
- enabled = vgic_irq_is_enabled(vcpu, irq_num);
- if (!enabled || !can_inject) {
- ret = false;
- goto out;
- }
- if (!vgic_can_sample_irq(vcpu, irq_num)) {
- /*
- * Level interrupt in progress, will be picked up
- * when EOId.
- */
- ret = false;
- goto out;
- }
- if (level) {
- vgic_cpu_irq_set(vcpu, irq_num);
- set_bit(cpuid, dist->irq_pending_on_cpu);
- }
- out:
- spin_unlock(&dist->lock);
- if (ret) {
- /* kick the specified vcpu */
- kvm_vcpu_kick(kvm_get_vcpu(kvm, cpuid));
- }
- return 0;
- }
- static int vgic_lazy_init(struct kvm *kvm)
- {
- int ret = 0;
- if (unlikely(!vgic_initialized(kvm))) {
- /*
- * We only provide the automatic initialization of the VGIC
- * for the legacy case of a GICv2. Any other type must
- * be explicitly initialized once setup with the respective
- * KVM device call.
- */
- if (kvm->arch.vgic.vgic_model != KVM_DEV_TYPE_ARM_VGIC_V2)
- return -EBUSY;
- mutex_lock(&kvm->lock);
- ret = vgic_init(kvm);
- mutex_unlock(&kvm->lock);
- }
- return ret;
- }
- /**
- * kvm_vgic_inject_irq - Inject an IRQ from a device to the vgic
- * @kvm: The VM structure pointer
- * @cpuid: The CPU for PPIs
- * @irq_num: The IRQ number that is assigned to the device. This IRQ
- * must not be mapped to a HW interrupt.
- * @level: Edge-triggered: true: to trigger the interrupt
- * false: to ignore the call
- * Level-sensitive true: raise the input signal
- * false: lower the input signal
- *
- * The GIC is not concerned with devices being active-LOW or active-HIGH for
- * level-sensitive interrupts. You can think of the level parameter as 1
- * being HIGH and 0 being LOW and all devices being active-HIGH.
- */
- int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int irq_num,
- bool level)
- {
- struct irq_phys_map *map;
- int ret;
- ret = vgic_lazy_init(kvm);
- if (ret)
- return ret;
- map = vgic_irq_map_search(kvm_get_vcpu(kvm, cpuid), irq_num);
- if (map)
- return -EINVAL;
- return vgic_update_irq_pending(kvm, cpuid, NULL, irq_num, level);
- }
- /**
- * kvm_vgic_inject_mapped_irq - Inject a physically mapped IRQ to the vgic
- * @kvm: The VM structure pointer
- * @cpuid: The CPU for PPIs
- * @map: Pointer to a irq_phys_map structure describing the mapping
- * @level: Edge-triggered: true: to trigger the interrupt
- * false: to ignore the call
- * Level-sensitive true: raise the input signal
- * false: lower the input signal
- *
- * The GIC is not concerned with devices being active-LOW or active-HIGH for
- * level-sensitive interrupts. You can think of the level parameter as 1
- * being HIGH and 0 being LOW and all devices being active-HIGH.
- */
- int kvm_vgic_inject_mapped_irq(struct kvm *kvm, int cpuid,
- struct irq_phys_map *map, bool level)
- {
- int ret;
- ret = vgic_lazy_init(kvm);
- if (ret)
- return ret;
- return vgic_update_irq_pending(kvm, cpuid, map, map->virt_irq, level);
- }
- static irqreturn_t vgic_maintenance_handler(int irq, void *data)
- {
- /*
- * We cannot rely on the vgic maintenance interrupt to be
- * delivered synchronously. This means we can only use it to
- * exit the VM, and we perform the handling of EOIed
- * interrupts on the exit path (see vgic_process_maintenance).
- */
- return IRQ_HANDLED;
- }
- static struct list_head *vgic_get_irq_phys_map_list(struct kvm_vcpu *vcpu,
- int virt_irq)
- {
- if (virt_irq < VGIC_NR_PRIVATE_IRQS)
- return &vcpu->arch.vgic_cpu.irq_phys_map_list;
- else
- return &vcpu->kvm->arch.vgic.irq_phys_map_list;
- }
- /**
- * kvm_vgic_map_phys_irq - map a virtual IRQ to a physical IRQ
- * @vcpu: The VCPU pointer
- * @virt_irq: The virtual irq number
- * @irq: The Linux IRQ number
- *
- * Establish a mapping between a guest visible irq (@virt_irq) and a
- * Linux irq (@irq). On injection, @virt_irq will be associated with
- * the physical interrupt represented by @irq. This mapping can be
- * established multiple times as long as the parameters are the same.
- *
- * Returns a valid pointer on success, and an error pointer otherwise
- */
- struct irq_phys_map *kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu,
- int virt_irq, int irq)
- {
- struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
- struct list_head *root = vgic_get_irq_phys_map_list(vcpu, virt_irq);
- struct irq_phys_map *map;
- struct irq_phys_map_entry *entry;
- struct irq_desc *desc;
- struct irq_data *data;
- int phys_irq;
- desc = irq_to_desc(irq);
- if (!desc) {
- kvm_err("%s: no interrupt descriptor\n", __func__);
- return ERR_PTR(-EINVAL);
- }
- data = irq_desc_get_irq_data(desc);
- while (data->parent_data)
- data = data->parent_data;
- phys_irq = data->hwirq;
- /* Create a new mapping */
- entry = kzalloc(sizeof(*entry), GFP_KERNEL);
- if (!entry)
- return ERR_PTR(-ENOMEM);
- spin_lock(&dist->irq_phys_map_lock);
- /* Try to match an existing mapping */
- map = vgic_irq_map_search(vcpu, virt_irq);
- if (map) {
- /* Make sure this mapping matches */
- if (map->phys_irq != phys_irq ||
- map->irq != irq)
- map = ERR_PTR(-EINVAL);
- /* Found an existing, valid mapping */
- goto out;
- }
- map = &entry->map;
- map->virt_irq = virt_irq;
- map->phys_irq = phys_irq;
- map->irq = irq;
- list_add_tail_rcu(&entry->entry, root);
- out:
- spin_unlock(&dist->irq_phys_map_lock);
- /* If we've found a hit in the existing list, free the useless
- * entry */
- if (IS_ERR(map) || map != &entry->map)
- kfree(entry);
- return map;
- }
- static struct irq_phys_map *vgic_irq_map_search(struct kvm_vcpu *vcpu,
- int virt_irq)
- {
- struct list_head *root = vgic_get_irq_phys_map_list(vcpu, virt_irq);
- struct irq_phys_map_entry *entry;
- struct irq_phys_map *map;
- rcu_read_lock();
- list_for_each_entry_rcu(entry, root, entry) {
- map = &entry->map;
- if (map->virt_irq == virt_irq) {
- rcu_read_unlock();
- return map;
- }
- }
- rcu_read_unlock();
- return NULL;
- }
- static void vgic_free_phys_irq_map_rcu(struct rcu_head *rcu)
- {
- struct irq_phys_map_entry *entry;
- entry = container_of(rcu, struct irq_phys_map_entry, rcu);
- kfree(entry);
- }
- /**
- * kvm_vgic_unmap_phys_irq - Remove a virtual to physical IRQ mapping
- * @vcpu: The VCPU pointer
- * @map: The pointer to a mapping obtained through kvm_vgic_map_phys_irq
- *
- * Remove an existing mapping between virtual and physical interrupts.
- */
- int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, struct irq_phys_map *map)
- {
- struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
- struct irq_phys_map_entry *entry;
- struct list_head *root;
- if (!map)
- return -EINVAL;
- root = vgic_get_irq_phys_map_list(vcpu, map->virt_irq);
- spin_lock(&dist->irq_phys_map_lock);
- list_for_each_entry(entry, root, entry) {
- if (&entry->map == map) {
- list_del_rcu(&entry->entry);
- call_rcu(&entry->rcu, vgic_free_phys_irq_map_rcu);
- break;
- }
- }
- spin_unlock(&dist->irq_phys_map_lock);
- return 0;
- }
- static void vgic_destroy_irq_phys_map(struct kvm *kvm, struct list_head *root)
- {
- struct vgic_dist *dist = &kvm->arch.vgic;
- struct irq_phys_map_entry *entry;
- spin_lock(&dist->irq_phys_map_lock);
- list_for_each_entry(entry, root, entry) {
- list_del_rcu(&entry->entry);
- call_rcu(&entry->rcu, vgic_free_phys_irq_map_rcu);
- }
- spin_unlock(&dist->irq_phys_map_lock);
- }
- void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu)
- {
- struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
- kfree(vgic_cpu->pending_shared);
- kfree(vgic_cpu->active_shared);
- kfree(vgic_cpu->pend_act_shared);
- vgic_destroy_irq_phys_map(vcpu->kvm, &vgic_cpu->irq_phys_map_list);
- vgic_cpu->pending_shared = NULL;
- vgic_cpu->active_shared = NULL;
- vgic_cpu->pend_act_shared = NULL;
- }
- static int vgic_vcpu_init_maps(struct kvm_vcpu *vcpu, int nr_irqs)
- {
- struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
- int nr_longs = BITS_TO_LONGS(nr_irqs - VGIC_NR_PRIVATE_IRQS);
- int sz = nr_longs * sizeof(unsigned long);
- vgic_cpu->pending_shared = kzalloc(sz, GFP_KERNEL);
- vgic_cpu->active_shared = kzalloc(sz, GFP_KERNEL);
- vgic_cpu->pend_act_shared = kzalloc(sz, GFP_KERNEL);
- if (!vgic_cpu->pending_shared
- || !vgic_cpu->active_shared
- || !vgic_cpu->pend_act_shared) {
- kvm_vgic_vcpu_destroy(vcpu);
- return -ENOMEM;
- }
- /*
- * Store the number of LRs per vcpu, so we don't have to go
- * all the way to the distributor structure to find out. Only
- * assembly code should use this one.
- */
- vgic_cpu->nr_lr = vgic->nr_lr;
- return 0;
- }
- /**
- * kvm_vgic_vcpu_early_init - Earliest possible per-vcpu vgic init stage
- *
- * No memory allocation should be performed here, only static init.
- */
- void kvm_vgic_vcpu_early_init(struct kvm_vcpu *vcpu)
- {
- struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
- INIT_LIST_HEAD(&vgic_cpu->irq_phys_map_list);
- }
- /**
- * kvm_vgic_get_max_vcpus - Get the maximum number of VCPUs allowed by HW
- *
- * The host's GIC naturally limits the maximum amount of VCPUs a guest
- * can use.
- */
- int kvm_vgic_get_max_vcpus(void)
- {
- return vgic->max_gic_vcpus;
- }
- void kvm_vgic_destroy(struct kvm *kvm)
- {
- struct vgic_dist *dist = &kvm->arch.vgic;
- struct kvm_vcpu *vcpu;
- int i;
- kvm_for_each_vcpu(i, vcpu, kvm)
- kvm_vgic_vcpu_destroy(vcpu);
- vgic_free_bitmap(&dist->irq_enabled);
- vgic_free_bitmap(&dist->irq_level);
- vgic_free_bitmap(&dist->irq_pending);
- vgic_free_bitmap(&dist->irq_soft_pend);
- vgic_free_bitmap(&dist->irq_queued);
- vgic_free_bitmap(&dist->irq_cfg);
- vgic_free_bytemap(&dist->irq_priority);
- if (dist->irq_spi_target) {
- for (i = 0; i < dist->nr_cpus; i++)
- vgic_free_bitmap(&dist->irq_spi_target[i]);
- }
- kfree(dist->irq_sgi_sources);
- kfree(dist->irq_spi_cpu);
- kfree(dist->irq_spi_mpidr);
- kfree(dist->irq_spi_target);
- kfree(dist->irq_pending_on_cpu);
- kfree(dist->irq_active_on_cpu);
- vgic_destroy_irq_phys_map(kvm, &dist->irq_phys_map_list);
- dist->irq_sgi_sources = NULL;
- dist->irq_spi_cpu = NULL;
- dist->irq_spi_target = NULL;
- dist->irq_pending_on_cpu = NULL;
- dist->irq_active_on_cpu = NULL;
- dist->nr_cpus = 0;
- }
- /*
- * Allocate and initialize the various data structures. Must be called
- * with kvm->lock held!
- */
- int vgic_init(struct kvm *kvm)
- {
- struct vgic_dist *dist = &kvm->arch.vgic;
- struct kvm_vcpu *vcpu;
- int nr_cpus, nr_irqs;
- int ret, i, vcpu_id;
- if (vgic_initialized(kvm))
- return 0;
- nr_cpus = dist->nr_cpus = atomic_read(&kvm->online_vcpus);
- if (!nr_cpus) /* No vcpus? Can't be good... */
- return -ENODEV;
- /*
- * If nobody configured the number of interrupts, use the
- * legacy one.
- */
- if (!dist->nr_irqs)
- dist->nr_irqs = VGIC_NR_IRQS_LEGACY;
- nr_irqs = dist->nr_irqs;
- ret = vgic_init_bitmap(&dist->irq_enabled, nr_cpus, nr_irqs);
- ret |= vgic_init_bitmap(&dist->irq_level, nr_cpus, nr_irqs);
- ret |= vgic_init_bitmap(&dist->irq_pending, nr_cpus, nr_irqs);
- ret |= vgic_init_bitmap(&dist->irq_soft_pend, nr_cpus, nr_irqs);
- ret |= vgic_init_bitmap(&dist->irq_queued, nr_cpus, nr_irqs);
- ret |= vgic_init_bitmap(&dist->irq_active, nr_cpus, nr_irqs);
- ret |= vgic_init_bitmap(&dist->irq_cfg, nr_cpus, nr_irqs);
- ret |= vgic_init_bytemap(&dist->irq_priority, nr_cpus, nr_irqs);
- if (ret)
- goto out;
- dist->irq_sgi_sources = kzalloc(nr_cpus * VGIC_NR_SGIS, GFP_KERNEL);
- dist->irq_spi_cpu = kzalloc(nr_irqs - VGIC_NR_PRIVATE_IRQS, GFP_KERNEL);
- dist->irq_spi_target = kzalloc(sizeof(*dist->irq_spi_target) * nr_cpus,
- GFP_KERNEL);
- dist->irq_pending_on_cpu = kzalloc(BITS_TO_LONGS(nr_cpus) * sizeof(long),
- GFP_KERNEL);
- dist->irq_active_on_cpu = kzalloc(BITS_TO_LONGS(nr_cpus) * sizeof(long),
- GFP_KERNEL);
- if (!dist->irq_sgi_sources ||
- !dist->irq_spi_cpu ||
- !dist->irq_spi_target ||
- !dist->irq_pending_on_cpu ||
- !dist->irq_active_on_cpu) {
- ret = -ENOMEM;
- goto out;
- }
- for (i = 0; i < nr_cpus; i++)
- ret |= vgic_init_bitmap(&dist->irq_spi_target[i],
- nr_cpus, nr_irqs);
- if (ret)
- goto out;
- ret = kvm->arch.vgic.vm_ops.init_model(kvm);
- if (ret)
- goto out;
- kvm_for_each_vcpu(vcpu_id, vcpu, kvm) {
- ret = vgic_vcpu_init_maps(vcpu, nr_irqs);
- if (ret) {
- kvm_err("VGIC: Failed to allocate vcpu memory\n");
- break;
- }
- /*
- * Enable and configure all SGIs to be edge-triggere and
- * configure all PPIs as level-triggered.
- */
- for (i = 0; i < VGIC_NR_PRIVATE_IRQS; i++) {
- if (i < VGIC_NR_SGIS) {
- /* SGIs */
- vgic_bitmap_set_irq_val(&dist->irq_enabled,
- vcpu->vcpu_id, i, 1);
- vgic_bitmap_set_irq_val(&dist->irq_cfg,
- vcpu->vcpu_id, i,
- VGIC_CFG_EDGE);
- } else if (i < VGIC_NR_PRIVATE_IRQS) {
- /* PPIs */
- vgic_bitmap_set_irq_val(&dist->irq_cfg,
- vcpu->vcpu_id, i,
- VGIC_CFG_LEVEL);
- }
- }
- vgic_enable(vcpu);
- }
- out:
- if (ret)
- kvm_vgic_destroy(kvm);
- return ret;
- }
- static int init_vgic_model(struct kvm *kvm, int type)
- {
- switch (type) {
- case KVM_DEV_TYPE_ARM_VGIC_V2:
- vgic_v2_init_emulation(kvm);
- break;
- #ifdef CONFIG_KVM_ARM_VGIC_V3
- case KVM_DEV_TYPE_ARM_VGIC_V3:
- vgic_v3_init_emulation(kvm);
- break;
- #endif
- default:
- return -ENODEV;
- }
- if (atomic_read(&kvm->online_vcpus) > kvm->arch.max_vcpus)
- return -E2BIG;
- return 0;
- }
- /**
- * kvm_vgic_early_init - Earliest possible vgic initialization stage
- *
- * No memory allocation should be performed here, only static init.
- */
- void kvm_vgic_early_init(struct kvm *kvm)
- {
- spin_lock_init(&kvm->arch.vgic.lock);
- spin_lock_init(&kvm->arch.vgic.irq_phys_map_lock);
- INIT_LIST_HEAD(&kvm->arch.vgic.irq_phys_map_list);
- }
- int kvm_vgic_create(struct kvm *kvm, u32 type)
- {
- int i, vcpu_lock_idx = -1, ret;
- struct kvm_vcpu *vcpu;
- mutex_lock(&kvm->lock);
- if (irqchip_in_kernel(kvm)) {
- ret = -EEXIST;
- goto out;
- }
- /*
- * This function is also called by the KVM_CREATE_IRQCHIP handler,
- * which had no chance yet to check the availability of the GICv2
- * emulation. So check this here again. KVM_CREATE_DEVICE does
- * the proper checks already.
- */
- if (type == KVM_DEV_TYPE_ARM_VGIC_V2 && !vgic->can_emulate_gicv2) {
- ret = -ENODEV;
- goto out;
- }
- /*
- * Any time a vcpu is run, vcpu_load is called which tries to grab the
- * vcpu->mutex. By grabbing the vcpu->mutex of all VCPUs we ensure
- * that no other VCPUs are run while we create the vgic.
- */
- ret = -EBUSY;
- kvm_for_each_vcpu(i, vcpu, kvm) {
- if (!mutex_trylock(&vcpu->mutex))
- goto out_unlock;
- vcpu_lock_idx = i;
- }
- kvm_for_each_vcpu(i, vcpu, kvm) {
- if (vcpu->arch.has_run_once)
- goto out_unlock;
- }
- ret = 0;
- ret = init_vgic_model(kvm, type);
- if (ret)
- goto out_unlock;
- kvm->arch.vgic.in_kernel = true;
- kvm->arch.vgic.vgic_model = type;
- kvm->arch.vgic.vctrl_base = vgic->vctrl_base;
- kvm->arch.vgic.vgic_dist_base = VGIC_ADDR_UNDEF;
- kvm->arch.vgic.vgic_cpu_base = VGIC_ADDR_UNDEF;
- kvm->arch.vgic.vgic_redist_base = VGIC_ADDR_UNDEF;
- out_unlock:
- for (; vcpu_lock_idx >= 0; vcpu_lock_idx--) {
- vcpu = kvm_get_vcpu(kvm, vcpu_lock_idx);
- mutex_unlock(&vcpu->mutex);
- }
- out:
- mutex_unlock(&kvm->lock);
- return ret;
- }
- static int vgic_ioaddr_overlap(struct kvm *kvm)
- {
- phys_addr_t dist = kvm->arch.vgic.vgic_dist_base;
- phys_addr_t cpu = kvm->arch.vgic.vgic_cpu_base;
- if (IS_VGIC_ADDR_UNDEF(dist) || IS_VGIC_ADDR_UNDEF(cpu))
- return 0;
- if ((dist <= cpu && dist + KVM_VGIC_V2_DIST_SIZE > cpu) ||
- (cpu <= dist && cpu + KVM_VGIC_V2_CPU_SIZE > dist))
- return -EBUSY;
- return 0;
- }
- static int vgic_ioaddr_assign(struct kvm *kvm, phys_addr_t *ioaddr,
- phys_addr_t addr, phys_addr_t size)
- {
- int ret;
- if (addr & ~KVM_PHYS_MASK)
- return -E2BIG;
- if (addr & (SZ_4K - 1))
- return -EINVAL;
- if (!IS_VGIC_ADDR_UNDEF(*ioaddr))
- return -EEXIST;
- if (addr + size < addr)
- return -EINVAL;
- *ioaddr = addr;
- ret = vgic_ioaddr_overlap(kvm);
- if (ret)
- *ioaddr = VGIC_ADDR_UNDEF;
- return ret;
- }
- /**
- * kvm_vgic_addr - set or get vgic VM base addresses
- * @kvm: pointer to the vm struct
- * @type: the VGIC addr type, one of KVM_VGIC_V[23]_ADDR_TYPE_XXX
- * @addr: pointer to address value
- * @write: if true set the address in the VM address space, if false read the
- * address
- *
- * Set or get the vgic base addresses for the distributor and the virtual CPU
- * interface in the VM physical address space. These addresses are properties
- * of the emulated core/SoC and therefore user space initially knows this
- * information.
- */
- int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write)
- {
- int r = 0;
- struct vgic_dist *vgic = &kvm->arch.vgic;
- int type_needed;
- phys_addr_t *addr_ptr, block_size;
- phys_addr_t alignment;
- mutex_lock(&kvm->lock);
- switch (type) {
- case KVM_VGIC_V2_ADDR_TYPE_DIST:
- type_needed = KVM_DEV_TYPE_ARM_VGIC_V2;
- addr_ptr = &vgic->vgic_dist_base;
- block_size = KVM_VGIC_V2_DIST_SIZE;
- alignment = SZ_4K;
- break;
- case KVM_VGIC_V2_ADDR_TYPE_CPU:
- type_needed = KVM_DEV_TYPE_ARM_VGIC_V2;
- addr_ptr = &vgic->vgic_cpu_base;
- block_size = KVM_VGIC_V2_CPU_SIZE;
- alignment = SZ_4K;
- break;
- #ifdef CONFIG_KVM_ARM_VGIC_V3
- case KVM_VGIC_V3_ADDR_TYPE_DIST:
- type_needed = KVM_DEV_TYPE_ARM_VGIC_V3;
- addr_ptr = &vgic->vgic_dist_base;
- block_size = KVM_VGIC_V3_DIST_SIZE;
- alignment = SZ_64K;
- break;
- case KVM_VGIC_V3_ADDR_TYPE_REDIST:
- type_needed = KVM_DEV_TYPE_ARM_VGIC_V3;
- addr_ptr = &vgic->vgic_redist_base;
- block_size = KVM_VGIC_V3_REDIST_SIZE;
- alignment = SZ_64K;
- break;
- #endif
- default:
- r = -ENODEV;
- goto out;
- }
- if (vgic->vgic_model != type_needed) {
- r = -ENODEV;
- goto out;
- }
- if (write) {
- if (!IS_ALIGNED(*addr, alignment))
- r = -EINVAL;
- else
- r = vgic_ioaddr_assign(kvm, addr_ptr, *addr,
- block_size);
- } else {
- *addr = *addr_ptr;
- }
- out:
- mutex_unlock(&kvm->lock);
- return r;
- }
- int vgic_set_common_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
- {
- int r;
- switch (attr->group) {
- case KVM_DEV_ARM_VGIC_GRP_ADDR: {
- u64 __user *uaddr = (u64 __user *)(long)attr->addr;
- u64 addr;
- unsigned long type = (unsigned long)attr->attr;
- if (copy_from_user(&addr, uaddr, sizeof(addr)))
- return -EFAULT;
- r = kvm_vgic_addr(dev->kvm, type, &addr, true);
- return (r == -ENODEV) ? -ENXIO : r;
- }
- case KVM_DEV_ARM_VGIC_GRP_NR_IRQS: {
- u32 __user *uaddr = (u32 __user *)(long)attr->addr;
- u32 val;
- int ret = 0;
- if (get_user(val, uaddr))
- return -EFAULT;
- /*
- * We require:
- * - at least 32 SPIs on top of the 16 SGIs and 16 PPIs
- * - at most 1024 interrupts
- * - a multiple of 32 interrupts
- */
- if (val < (VGIC_NR_PRIVATE_IRQS + 32) ||
- val > VGIC_MAX_IRQS ||
- (val & 31))
- return -EINVAL;
- mutex_lock(&dev->kvm->lock);
- if (vgic_ready(dev->kvm) || dev->kvm->arch.vgic.nr_irqs)
- ret = -EBUSY;
- else
- dev->kvm->arch.vgic.nr_irqs = val;
- mutex_unlock(&dev->kvm->lock);
- return ret;
- }
- case KVM_DEV_ARM_VGIC_GRP_CTRL: {
- switch (attr->attr) {
- case KVM_DEV_ARM_VGIC_CTRL_INIT:
- r = vgic_init(dev->kvm);
- return r;
- }
- break;
- }
- }
- return -ENXIO;
- }
- int vgic_get_common_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
- {
- int r = -ENXIO;
- switch (attr->group) {
- case KVM_DEV_ARM_VGIC_GRP_ADDR: {
- u64 __user *uaddr = (u64 __user *)(long)attr->addr;
- u64 addr;
- unsigned long type = (unsigned long)attr->attr;
- r = kvm_vgic_addr(dev->kvm, type, &addr, false);
- if (r)
- return (r == -ENODEV) ? -ENXIO : r;
- if (copy_to_user(uaddr, &addr, sizeof(addr)))
- return -EFAULT;
- break;
- }
- case KVM_DEV_ARM_VGIC_GRP_NR_IRQS: {
- u32 __user *uaddr = (u32 __user *)(long)attr->addr;
- r = put_user(dev->kvm->arch.vgic.nr_irqs, uaddr);
- break;
- }
- }
- return r;
- }
- int vgic_has_attr_regs(const struct vgic_io_range *ranges, phys_addr_t offset)
- {
- if (vgic_find_range(ranges, 4, offset))
- return 0;
- else
- return -ENXIO;
- }
- static void vgic_init_maintenance_interrupt(void *info)
- {
- enable_percpu_irq(vgic->maint_irq, 0);
- }
- static int vgic_cpu_notify(struct notifier_block *self,
- unsigned long action, void *cpu)
- {
- switch (action) {
- case CPU_STARTING:
- case CPU_STARTING_FROZEN:
- vgic_init_maintenance_interrupt(NULL);
- break;
- case CPU_DYING:
- case CPU_DYING_FROZEN:
- disable_percpu_irq(vgic->maint_irq);
- break;
- }
- return NOTIFY_OK;
- }
- static struct notifier_block vgic_cpu_nb = {
- .notifier_call = vgic_cpu_notify,
- };
- static const struct of_device_id vgic_ids[] = {
- { .compatible = "arm,cortex-a15-gic", .data = vgic_v2_probe, },
- { .compatible = "arm,cortex-a7-gic", .data = vgic_v2_probe, },
- { .compatible = "arm,gic-400", .data = vgic_v2_probe, },
- { .compatible = "arm,gic-v3", .data = vgic_v3_probe, },
- {},
- };
- int kvm_vgic_hyp_init(void)
- {
- const struct of_device_id *matched_id;
- const int (*vgic_probe)(struct device_node *,const struct vgic_ops **,
- const struct vgic_params **);
- struct device_node *vgic_node;
- int ret;
- vgic_node = of_find_matching_node_and_match(NULL,
- vgic_ids, &matched_id);
- if (!vgic_node) {
- kvm_err("error: no compatible GIC node found\n");
- return -ENODEV;
- }
- vgic_probe = matched_id->data;
- ret = vgic_probe(vgic_node, &vgic_ops, &vgic);
- if (ret)
- return ret;
- ret = request_percpu_irq(vgic->maint_irq, vgic_maintenance_handler,
- "vgic", kvm_get_running_vcpus());
- if (ret) {
- kvm_err("Cannot register interrupt %d\n", vgic->maint_irq);
- return ret;
- }
- ret = __register_cpu_notifier(&vgic_cpu_nb);
- if (ret) {
- kvm_err("Cannot register vgic CPU notifier\n");
- goto out_free_irq;
- }
- on_each_cpu(vgic_init_maintenance_interrupt, NULL, 1);
- return 0;
- out_free_irq:
- free_percpu_irq(vgic->maint_irq, kvm_get_running_vcpus());
- return ret;
- }
- int kvm_irq_map_gsi(struct kvm *kvm,
- struct kvm_kernel_irq_routing_entry *entries,
- int gsi)
- {
- return 0;
- }
- int kvm_irq_map_chip_pin(struct kvm *kvm, unsigned irqchip, unsigned pin)
- {
- return pin;
- }
- int kvm_set_irq(struct kvm *kvm, int irq_source_id,
- u32 irq, int level, bool line_status)
- {
- unsigned int spi = irq + VGIC_NR_PRIVATE_IRQS;
- trace_kvm_set_irq(irq, level, irq_source_id);
- BUG_ON(!vgic_initialized(kvm));
- return kvm_vgic_inject_irq(kvm, 0, spi, level);
- }
- /* MSI not implemented yet */
- int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e,
- struct kvm *kvm, int irq_source_id,
- int level, bool line_status)
- {
- return 0;
- }
|