123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894 |
- /*
- * KVM paravirt_ops implementation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
- *
- * Copyright (C) 2007, Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
- * Copyright IBM Corporation, 2007
- * Authors: Anthony Liguori <aliguori@us.ibm.com>
- */
- #include <linux/context_tracking.h>
- #include <linux/module.h>
- #include <linux/kernel.h>
- #include <linux/kvm_para.h>
- #include <linux/cpu.h>
- #include <linux/mm.h>
- #include <linux/highmem.h>
- #include <linux/hardirq.h>
- #include <linux/notifier.h>
- #include <linux/reboot.h>
- #include <linux/hash.h>
- #include <linux/sched.h>
- #include <linux/slab.h>
- #include <linux/kprobes.h>
- #include <linux/debugfs.h>
- #include <linux/nmi.h>
- #include <asm/timer.h>
- #include <asm/cpu.h>
- #include <asm/traps.h>
- #include <asm/desc.h>
- #include <asm/tlbflush.h>
- #include <asm/idle.h>
- #include <asm/apic.h>
- #include <asm/apicdef.h>
- #include <asm/hypervisor.h>
- #include <asm/kvm_guest.h>
- static int kvmapf = 1;
- static int parse_no_kvmapf(char *arg)
- {
- kvmapf = 0;
- return 0;
- }
- early_param("no-kvmapf", parse_no_kvmapf);
- static int steal_acc = 1;
- static int parse_no_stealacc(char *arg)
- {
- steal_acc = 0;
- return 0;
- }
- early_param("no-steal-acc", parse_no_stealacc);
- static int kvmclock_vsyscall = 1;
- static int parse_no_kvmclock_vsyscall(char *arg)
- {
- kvmclock_vsyscall = 0;
- return 0;
- }
- early_param("no-kvmclock-vsyscall", parse_no_kvmclock_vsyscall);
- static DEFINE_PER_CPU(struct kvm_vcpu_pv_apf_data, apf_reason) __aligned(64);
- static DEFINE_PER_CPU(struct kvm_steal_time, steal_time) __aligned(64);
- static int has_steal_clock = 0;
- /*
- * No need for any "IO delay" on KVM
- */
- static void kvm_io_delay(void)
- {
- }
- #define KVM_TASK_SLEEP_HASHBITS 8
- #define KVM_TASK_SLEEP_HASHSIZE (1<<KVM_TASK_SLEEP_HASHBITS)
- struct kvm_task_sleep_node {
- struct hlist_node link;
- wait_queue_head_t wq;
- u32 token;
- int cpu;
- bool halted;
- };
- static struct kvm_task_sleep_head {
- spinlock_t lock;
- struct hlist_head list;
- } async_pf_sleepers[KVM_TASK_SLEEP_HASHSIZE];
- static struct kvm_task_sleep_node *_find_apf_task(struct kvm_task_sleep_head *b,
- u32 token)
- {
- struct hlist_node *p;
- hlist_for_each(p, &b->list) {
- struct kvm_task_sleep_node *n =
- hlist_entry(p, typeof(*n), link);
- if (n->token == token)
- return n;
- }
- return NULL;
- }
- void kvm_async_pf_task_wait(u32 token)
- {
- u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS);
- struct kvm_task_sleep_head *b = &async_pf_sleepers[key];
- struct kvm_task_sleep_node n, *e;
- DEFINE_WAIT(wait);
- rcu_irq_enter();
- spin_lock(&b->lock);
- e = _find_apf_task(b, token);
- if (e) {
- /* dummy entry exist -> wake up was delivered ahead of PF */
- hlist_del(&e->link);
- kfree(e);
- spin_unlock(&b->lock);
- rcu_irq_exit();
- return;
- }
- n.token = token;
- n.cpu = smp_processor_id();
- n.halted = is_idle_task(current) || preempt_count() > 1;
- init_waitqueue_head(&n.wq);
- hlist_add_head(&n.link, &b->list);
- spin_unlock(&b->lock);
- for (;;) {
- if (!n.halted)
- prepare_to_wait(&n.wq, &wait, TASK_UNINTERRUPTIBLE);
- if (hlist_unhashed(&n.link))
- break;
- rcu_irq_exit();
- if (!n.halted) {
- local_irq_enable();
- schedule();
- local_irq_disable();
- } else {
- /*
- * We cannot reschedule. So halt.
- */
- native_safe_halt();
- local_irq_disable();
- }
- rcu_irq_enter();
- }
- if (!n.halted)
- finish_wait(&n.wq, &wait);
- rcu_irq_exit();
- return;
- }
- EXPORT_SYMBOL_GPL(kvm_async_pf_task_wait);
- static void apf_task_wake_one(struct kvm_task_sleep_node *n)
- {
- hlist_del_init(&n->link);
- if (n->halted)
- smp_send_reschedule(n->cpu);
- else if (waitqueue_active(&n->wq))
- wake_up(&n->wq);
- }
- static void apf_task_wake_all(void)
- {
- int i;
- for (i = 0; i < KVM_TASK_SLEEP_HASHSIZE; i++) {
- struct hlist_node *p, *next;
- struct kvm_task_sleep_head *b = &async_pf_sleepers[i];
- spin_lock(&b->lock);
- hlist_for_each_safe(p, next, &b->list) {
- struct kvm_task_sleep_node *n =
- hlist_entry(p, typeof(*n), link);
- if (n->cpu == smp_processor_id())
- apf_task_wake_one(n);
- }
- spin_unlock(&b->lock);
- }
- }
- void kvm_async_pf_task_wake(u32 token)
- {
- u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS);
- struct kvm_task_sleep_head *b = &async_pf_sleepers[key];
- struct kvm_task_sleep_node *n;
- if (token == ~0) {
- apf_task_wake_all();
- return;
- }
- again:
- spin_lock(&b->lock);
- n = _find_apf_task(b, token);
- if (!n) {
- /*
- * async PF was not yet handled.
- * Add dummy entry for the token.
- */
- n = kzalloc(sizeof(*n), GFP_ATOMIC);
- if (!n) {
- /*
- * Allocation failed! Busy wait while other cpu
- * handles async PF.
- */
- spin_unlock(&b->lock);
- cpu_relax();
- goto again;
- }
- n->token = token;
- n->cpu = smp_processor_id();
- init_waitqueue_head(&n->wq);
- hlist_add_head(&n->link, &b->list);
- } else
- apf_task_wake_one(n);
- spin_unlock(&b->lock);
- return;
- }
- EXPORT_SYMBOL_GPL(kvm_async_pf_task_wake);
- u32 kvm_read_and_reset_pf_reason(void)
- {
- u32 reason = 0;
- if (__this_cpu_read(apf_reason.enabled)) {
- reason = __this_cpu_read(apf_reason.reason);
- __this_cpu_write(apf_reason.reason, 0);
- }
- return reason;
- }
- EXPORT_SYMBOL_GPL(kvm_read_and_reset_pf_reason);
- NOKPROBE_SYMBOL(kvm_read_and_reset_pf_reason);
- dotraplinkage void
- do_async_page_fault(struct pt_regs *regs, unsigned long error_code)
- {
- enum ctx_state prev_state;
- switch (kvm_read_and_reset_pf_reason()) {
- default:
- trace_do_page_fault(regs, error_code);
- break;
- case KVM_PV_REASON_PAGE_NOT_PRESENT:
- /* page is swapped out by the host. */
- prev_state = exception_enter();
- exit_idle();
- kvm_async_pf_task_wait((u32)read_cr2());
- exception_exit(prev_state);
- break;
- case KVM_PV_REASON_PAGE_READY:
- rcu_irq_enter();
- exit_idle();
- kvm_async_pf_task_wake((u32)read_cr2());
- rcu_irq_exit();
- break;
- }
- }
- NOKPROBE_SYMBOL(do_async_page_fault);
- static void __init paravirt_ops_setup(void)
- {
- pv_info.name = "KVM";
- /*
- * KVM isn't paravirt in the sense of paravirt_enabled. A KVM
- * guest kernel works like a bare metal kernel with additional
- * features, and paravirt_enabled is about features that are
- * missing.
- */
- pv_info.paravirt_enabled = 0;
- if (kvm_para_has_feature(KVM_FEATURE_NOP_IO_DELAY))
- pv_cpu_ops.io_delay = kvm_io_delay;
- #ifdef CONFIG_X86_IO_APIC
- no_timer_check = 1;
- #endif
- }
- static void kvm_register_steal_time(void)
- {
- int cpu = smp_processor_id();
- struct kvm_steal_time *st = &per_cpu(steal_time, cpu);
- if (!has_steal_clock)
- return;
- memset(st, 0, sizeof(*st));
- wrmsrl(MSR_KVM_STEAL_TIME, (slow_virt_to_phys(st) | KVM_MSR_ENABLED));
- pr_info("kvm-stealtime: cpu %d, msr %llx\n",
- cpu, (unsigned long long) slow_virt_to_phys(st));
- }
- static DEFINE_PER_CPU(unsigned long, kvm_apic_eoi) = KVM_PV_EOI_DISABLED;
- static void kvm_guest_apic_eoi_write(u32 reg, u32 val)
- {
- /**
- * This relies on __test_and_clear_bit to modify the memory
- * in a way that is atomic with respect to the local CPU.
- * The hypervisor only accesses this memory from the local CPU so
- * there's no need for lock or memory barriers.
- * An optimization barrier is implied in apic write.
- */
- if (__test_and_clear_bit(KVM_PV_EOI_BIT, this_cpu_ptr(&kvm_apic_eoi)))
- return;
- apic_write(APIC_EOI, APIC_EOI_ACK);
- }
- static void kvm_guest_cpu_init(void)
- {
- if (!kvm_para_available())
- return;
- if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF) && kvmapf) {
- u64 pa = slow_virt_to_phys(this_cpu_ptr(&apf_reason));
- #ifdef CONFIG_PREEMPT
- pa |= KVM_ASYNC_PF_SEND_ALWAYS;
- #endif
- wrmsrl(MSR_KVM_ASYNC_PF_EN, pa | KVM_ASYNC_PF_ENABLED);
- __this_cpu_write(apf_reason.enabled, 1);
- printk(KERN_INFO"KVM setup async PF for cpu %d\n",
- smp_processor_id());
- }
- if (kvm_para_has_feature(KVM_FEATURE_PV_EOI)) {
- unsigned long pa;
- /* Size alignment is implied but just to make it explicit. */
- BUILD_BUG_ON(__alignof__(kvm_apic_eoi) < 4);
- __this_cpu_write(kvm_apic_eoi, 0);
- pa = slow_virt_to_phys(this_cpu_ptr(&kvm_apic_eoi))
- | KVM_MSR_ENABLED;
- wrmsrl(MSR_KVM_PV_EOI_EN, pa);
- }
- if (has_steal_clock)
- kvm_register_steal_time();
- }
- static void kvm_pv_disable_apf(void)
- {
- if (!__this_cpu_read(apf_reason.enabled))
- return;
- wrmsrl(MSR_KVM_ASYNC_PF_EN, 0);
- __this_cpu_write(apf_reason.enabled, 0);
- printk(KERN_INFO"Unregister pv shared memory for cpu %d\n",
- smp_processor_id());
- }
- static void kvm_pv_guest_cpu_reboot(void *unused)
- {
- /*
- * We disable PV EOI before we load a new kernel by kexec,
- * since MSR_KVM_PV_EOI_EN stores a pointer into old kernel's memory.
- * New kernel can re-enable when it boots.
- */
- if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
- wrmsrl(MSR_KVM_PV_EOI_EN, 0);
- kvm_pv_disable_apf();
- kvm_disable_steal_time();
- }
- static int kvm_pv_reboot_notify(struct notifier_block *nb,
- unsigned long code, void *unused)
- {
- if (code == SYS_RESTART)
- on_each_cpu(kvm_pv_guest_cpu_reboot, NULL, 1);
- return NOTIFY_DONE;
- }
- static struct notifier_block kvm_pv_reboot_nb = {
- .notifier_call = kvm_pv_reboot_notify,
- };
- static u64 kvm_steal_clock(int cpu)
- {
- u64 steal;
- struct kvm_steal_time *src;
- int version;
- src = &per_cpu(steal_time, cpu);
- do {
- version = src->version;
- rmb();
- steal = src->steal;
- rmb();
- } while ((version & 1) || (version != src->version));
- return steal;
- }
- void kvm_disable_steal_time(void)
- {
- if (!has_steal_clock)
- return;
- wrmsr(MSR_KVM_STEAL_TIME, 0, 0);
- }
- #ifdef CONFIG_SMP
- static void __init kvm_smp_prepare_boot_cpu(void)
- {
- kvm_guest_cpu_init();
- native_smp_prepare_boot_cpu();
- kvm_spinlock_init();
- }
- static void kvm_guest_cpu_online(void *dummy)
- {
- kvm_guest_cpu_init();
- }
- static void kvm_guest_cpu_offline(void *dummy)
- {
- kvm_disable_steal_time();
- if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
- wrmsrl(MSR_KVM_PV_EOI_EN, 0);
- kvm_pv_disable_apf();
- apf_task_wake_all();
- }
- static int kvm_cpu_notify(struct notifier_block *self, unsigned long action,
- void *hcpu)
- {
- int cpu = (unsigned long)hcpu;
- switch (action) {
- case CPU_ONLINE:
- case CPU_DOWN_FAILED:
- case CPU_ONLINE_FROZEN:
- smp_call_function_single(cpu, kvm_guest_cpu_online, NULL, 0);
- break;
- case CPU_DOWN_PREPARE:
- case CPU_DOWN_PREPARE_FROZEN:
- smp_call_function_single(cpu, kvm_guest_cpu_offline, NULL, 1);
- break;
- default:
- break;
- }
- return NOTIFY_OK;
- }
- static struct notifier_block kvm_cpu_notifier = {
- .notifier_call = kvm_cpu_notify,
- };
- #endif
- static void __init kvm_apf_trap_init(void)
- {
- set_intr_gate(14, async_page_fault);
- }
- void __init kvm_guest_init(void)
- {
- int i;
- if (!kvm_para_available())
- return;
- paravirt_ops_setup();
- register_reboot_notifier(&kvm_pv_reboot_nb);
- for (i = 0; i < KVM_TASK_SLEEP_HASHSIZE; i++)
- spin_lock_init(&async_pf_sleepers[i].lock);
- if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF))
- x86_init.irqs.trap_init = kvm_apf_trap_init;
- if (kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {
- has_steal_clock = 1;
- pv_time_ops.steal_clock = kvm_steal_clock;
- }
- if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
- apic_set_eoi_write(kvm_guest_apic_eoi_write);
- if (kvmclock_vsyscall)
- kvm_setup_vsyscall_timeinfo();
- #ifdef CONFIG_SMP
- smp_ops.smp_prepare_boot_cpu = kvm_smp_prepare_boot_cpu;
- register_cpu_notifier(&kvm_cpu_notifier);
- #else
- kvm_guest_cpu_init();
- #endif
- /*
- * Hard lockup detection is enabled by default. Disable it, as guests
- * can get false positives too easily, for example if the host is
- * overcommitted.
- */
- hardlockup_detector_disable();
- }
- static noinline uint32_t __kvm_cpuid_base(void)
- {
- if (boot_cpu_data.cpuid_level < 0)
- return 0; /* So we don't blow up on old processors */
- if (cpu_has_hypervisor)
- return hypervisor_cpuid_base("KVMKVMKVM\0\0\0", 0);
- return 0;
- }
- static inline uint32_t kvm_cpuid_base(void)
- {
- static int kvm_cpuid_base = -1;
- if (kvm_cpuid_base == -1)
- kvm_cpuid_base = __kvm_cpuid_base();
- return kvm_cpuid_base;
- }
- bool kvm_para_available(void)
- {
- return kvm_cpuid_base() != 0;
- }
- EXPORT_SYMBOL_GPL(kvm_para_available);
- unsigned int kvm_arch_para_features(void)
- {
- return cpuid_eax(kvm_cpuid_base() | KVM_CPUID_FEATURES);
- }
- static uint32_t __init kvm_detect(void)
- {
- return kvm_cpuid_base();
- }
- const struct hypervisor_x86 x86_hyper_kvm __refconst = {
- .name = "KVM",
- .detect = kvm_detect,
- .x2apic_available = kvm_para_available,
- };
- EXPORT_SYMBOL_GPL(x86_hyper_kvm);
- static __init int activate_jump_labels(void)
- {
- if (has_steal_clock) {
- static_key_slow_inc(¶virt_steal_enabled);
- if (steal_acc)
- static_key_slow_inc(¶virt_steal_rq_enabled);
- }
- return 0;
- }
- arch_initcall(activate_jump_labels);
- #ifdef CONFIG_PARAVIRT_SPINLOCKS
- /* Kick a cpu by its apicid. Used to wake up a halted vcpu */
- static void kvm_kick_cpu(int cpu)
- {
- int apicid;
- unsigned long flags = 0;
- apicid = per_cpu(x86_cpu_to_apicid, cpu);
- kvm_hypercall2(KVM_HC_KICK_CPU, flags, apicid);
- }
- #ifdef CONFIG_QUEUED_SPINLOCKS
- #include <asm/qspinlock.h>
- static void kvm_wait(u8 *ptr, u8 val)
- {
- unsigned long flags;
- if (in_nmi())
- return;
- local_irq_save(flags);
- if (READ_ONCE(*ptr) != val)
- goto out;
- /*
- * halt until it's our turn and kicked. Note that we do safe halt
- * for irq enabled case to avoid hang when lock info is overwritten
- * in irq spinlock slowpath and no spurious interrupt occur to save us.
- */
- if (arch_irqs_disabled_flags(flags))
- halt();
- else
- safe_halt();
- out:
- local_irq_restore(flags);
- }
- #else /* !CONFIG_QUEUED_SPINLOCKS */
- enum kvm_contention_stat {
- TAKEN_SLOW,
- TAKEN_SLOW_PICKUP,
- RELEASED_SLOW,
- RELEASED_SLOW_KICKED,
- NR_CONTENTION_STATS
- };
- #ifdef CONFIG_KVM_DEBUG_FS
- #define HISTO_BUCKETS 30
- static struct kvm_spinlock_stats
- {
- u32 contention_stats[NR_CONTENTION_STATS];
- u32 histo_spin_blocked[HISTO_BUCKETS+1];
- u64 time_blocked;
- } spinlock_stats;
- static u8 zero_stats;
- static inline void check_zero(void)
- {
- u8 ret;
- u8 old;
- old = READ_ONCE(zero_stats);
- if (unlikely(old)) {
- ret = cmpxchg(&zero_stats, old, 0);
- /* This ensures only one fellow resets the stat */
- if (ret == old)
- memset(&spinlock_stats, 0, sizeof(spinlock_stats));
- }
- }
- static inline void add_stats(enum kvm_contention_stat var, u32 val)
- {
- check_zero();
- spinlock_stats.contention_stats[var] += val;
- }
- static inline u64 spin_time_start(void)
- {
- return sched_clock();
- }
- static void __spin_time_accum(u64 delta, u32 *array)
- {
- unsigned index;
- index = ilog2(delta);
- check_zero();
- if (index < HISTO_BUCKETS)
- array[index]++;
- else
- array[HISTO_BUCKETS]++;
- }
- static inline void spin_time_accum_blocked(u64 start)
- {
- u32 delta;
- delta = sched_clock() - start;
- __spin_time_accum(delta, spinlock_stats.histo_spin_blocked);
- spinlock_stats.time_blocked += delta;
- }
- static struct dentry *d_spin_debug;
- static struct dentry *d_kvm_debug;
- static struct dentry *kvm_init_debugfs(void)
- {
- d_kvm_debug = debugfs_create_dir("kvm-guest", NULL);
- if (!d_kvm_debug)
- printk(KERN_WARNING "Could not create 'kvm' debugfs directory\n");
- return d_kvm_debug;
- }
- static int __init kvm_spinlock_debugfs(void)
- {
- struct dentry *d_kvm;
- d_kvm = kvm_init_debugfs();
- if (d_kvm == NULL)
- return -ENOMEM;
- d_spin_debug = debugfs_create_dir("spinlocks", d_kvm);
- debugfs_create_u8("zero_stats", 0644, d_spin_debug, &zero_stats);
- debugfs_create_u32("taken_slow", 0444, d_spin_debug,
- &spinlock_stats.contention_stats[TAKEN_SLOW]);
- debugfs_create_u32("taken_slow_pickup", 0444, d_spin_debug,
- &spinlock_stats.contention_stats[TAKEN_SLOW_PICKUP]);
- debugfs_create_u32("released_slow", 0444, d_spin_debug,
- &spinlock_stats.contention_stats[RELEASED_SLOW]);
- debugfs_create_u32("released_slow_kicked", 0444, d_spin_debug,
- &spinlock_stats.contention_stats[RELEASED_SLOW_KICKED]);
- debugfs_create_u64("time_blocked", 0444, d_spin_debug,
- &spinlock_stats.time_blocked);
- debugfs_create_u32_array("histo_blocked", 0444, d_spin_debug,
- spinlock_stats.histo_spin_blocked, HISTO_BUCKETS + 1);
- return 0;
- }
- fs_initcall(kvm_spinlock_debugfs);
- #else /* !CONFIG_KVM_DEBUG_FS */
- static inline void add_stats(enum kvm_contention_stat var, u32 val)
- {
- }
- static inline u64 spin_time_start(void)
- {
- return 0;
- }
- static inline void spin_time_accum_blocked(u64 start)
- {
- }
- #endif /* CONFIG_KVM_DEBUG_FS */
- struct kvm_lock_waiting {
- struct arch_spinlock *lock;
- __ticket_t want;
- };
- /* cpus 'waiting' on a spinlock to become available */
- static cpumask_t waiting_cpus;
- /* Track spinlock on which a cpu is waiting */
- static DEFINE_PER_CPU(struct kvm_lock_waiting, klock_waiting);
- __visible void kvm_lock_spinning(struct arch_spinlock *lock, __ticket_t want)
- {
- struct kvm_lock_waiting *w;
- int cpu;
- u64 start;
- unsigned long flags;
- __ticket_t head;
- if (in_nmi())
- return;
- w = this_cpu_ptr(&klock_waiting);
- cpu = smp_processor_id();
- start = spin_time_start();
- /*
- * Make sure an interrupt handler can't upset things in a
- * partially setup state.
- */
- local_irq_save(flags);
- /*
- * The ordering protocol on this is that the "lock" pointer
- * may only be set non-NULL if the "want" ticket is correct.
- * If we're updating "want", we must first clear "lock".
- */
- w->lock = NULL;
- smp_wmb();
- w->want = want;
- smp_wmb();
- w->lock = lock;
- add_stats(TAKEN_SLOW, 1);
- /*
- * This uses set_bit, which is atomic but we should not rely on its
- * reordering gurantees. So barrier is needed after this call.
- */
- cpumask_set_cpu(cpu, &waiting_cpus);
- barrier();
- /*
- * Mark entry to slowpath before doing the pickup test to make
- * sure we don't deadlock with an unlocker.
- */
- __ticket_enter_slowpath(lock);
- /* make sure enter_slowpath, which is atomic does not cross the read */
- smp_mb__after_atomic();
- /*
- * check again make sure it didn't become free while
- * we weren't looking.
- */
- head = READ_ONCE(lock->tickets.head);
- if (__tickets_equal(head, want)) {
- add_stats(TAKEN_SLOW_PICKUP, 1);
- goto out;
- }
- /*
- * halt until it's our turn and kicked. Note that we do safe halt
- * for irq enabled case to avoid hang when lock info is overwritten
- * in irq spinlock slowpath and no spurious interrupt occur to save us.
- */
- if (arch_irqs_disabled_flags(flags))
- halt();
- else
- safe_halt();
- out:
- cpumask_clear_cpu(cpu, &waiting_cpus);
- w->lock = NULL;
- local_irq_restore(flags);
- spin_time_accum_blocked(start);
- }
- PV_CALLEE_SAVE_REGS_THUNK(kvm_lock_spinning);
- /* Kick vcpu waiting on @lock->head to reach value @ticket */
- static void kvm_unlock_kick(struct arch_spinlock *lock, __ticket_t ticket)
- {
- int cpu;
- add_stats(RELEASED_SLOW, 1);
- for_each_cpu(cpu, &waiting_cpus) {
- const struct kvm_lock_waiting *w = &per_cpu(klock_waiting, cpu);
- if (READ_ONCE(w->lock) == lock &&
- READ_ONCE(w->want) == ticket) {
- add_stats(RELEASED_SLOW_KICKED, 1);
- kvm_kick_cpu(cpu);
- break;
- }
- }
- }
- #endif /* !CONFIG_QUEUED_SPINLOCKS */
- /*
- * Setup pv_lock_ops to exploit KVM_FEATURE_PV_UNHALT if present.
- */
- void __init kvm_spinlock_init(void)
- {
- if (!kvm_para_available())
- return;
- /* Does host kernel support KVM_FEATURE_PV_UNHALT? */
- if (!kvm_para_has_feature(KVM_FEATURE_PV_UNHALT))
- return;
- #ifdef CONFIG_QUEUED_SPINLOCKS
- __pv_init_lock_hash();
- pv_lock_ops.queued_spin_lock_slowpath = __pv_queued_spin_lock_slowpath;
- pv_lock_ops.queued_spin_unlock = PV_CALLEE_SAVE(__pv_queued_spin_unlock);
- pv_lock_ops.wait = kvm_wait;
- pv_lock_ops.kick = kvm_kick_cpu;
- #else /* !CONFIG_QUEUED_SPINLOCKS */
- pv_lock_ops.lock_spinning = PV_CALLEE_SAVE(kvm_lock_spinning);
- pv_lock_ops.unlock_kick = kvm_unlock_kick;
- #endif
- }
- static __init int kvm_spinlock_init_jump(void)
- {
- if (!kvm_para_available())
- return 0;
- if (!kvm_para_has_feature(KVM_FEATURE_PV_UNHALT))
- return 0;
- static_key_slow_inc(¶virt_ticketlocks_enabled);
- printk(KERN_INFO "KVM setup paravirtual spinlock\n");
- return 0;
- }
- early_initcall(kvm_spinlock_init_jump);
- #endif /* CONFIG_PARAVIRT_SPINLOCKS */
|