MINI Sh3ll

Path : /usr/lib/modules/5.15.0-43-generic/build/include/linux/
File Upload :
Current File : //usr/lib/modules/5.15.0-43-generic/build/include/linux/kvm_host.h

/* SPDX-License-Identifier: GPL-2.0-only */
#ifndef __KVM_HOST_H
#define __KVM_HOST_H


#include <linux/types.h>
#include <linux/hardirq.h>
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/spinlock.h>
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/sched/stat.h>
#include <linux/bug.h>
#include <linux/minmax.h>
#include <linux/mm.h>
#include <linux/mmu_notifier.h>
#include <linux/ftrace.h>
#include <linux/instrumentation.h>
#include <linux/preempt.h>
#include <linux/msi.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/rcupdate.h>
#include <linux/ratelimit.h>
#include <linux/err.h>
#include <linux/irqflags.h>
#include <linux/context_tracking.h>
#include <linux/irqbypass.h>
#include <linux/rcuwait.h>
#include <linux/refcount.h>
#include <linux/nospec.h>
#include <linux/notifier.h>
#include <asm/signal.h>

#include <linux/kvm.h>
#include <linux/kvm_para.h>

#include <linux/kvm_types.h>

#include <asm/kvm_host.h>
#include <linux/kvm_dirty_ring.h>

#ifndef KVM_MAX_VCPU_ID
#define KVM_MAX_VCPU_ID KVM_MAX_VCPUS
#endif

/*
 * The bit 16 ~ bit 31 of kvm_memory_region::flags are internally used
 * in kvm, other bits are visible for userspace which are defined in
 * include/linux/kvm_h.
 */
#define KVM_MEMSLOT_INVALID	(1UL << 16)

/*
 * Bit 63 of the memslot generation number is an "update in-progress flag",
 * e.g. is temporarily set for the duration of install_new_memslots().
 * This flag effectively creates a unique generation number that is used to
 * mark cached memslot data, e.g. MMIO accesses, as potentially being stale,
 * i.e. may (or may not) have come from the previous memslots generation.
 *
 * This is necessary because the actual memslots update is not atomic with
 * respect to the generation number update.  Updating the generation number
 * first would allow a vCPU to cache a spte from the old memslots using the
 * new generation number, and updating the generation number after switching
 * to the new memslots would allow cache hits using the old generation number
 * to reference the defunct memslots.
 *
 * This mechanism is used to prevent getting hits in KVM's caches while a
 * memslot update is in-progress, and to prevent cache hits *after* updating
 * the actual generation number against accesses that were inserted into the
 * cache *before* the memslots were updated.
 */
#define KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS	BIT_ULL(63)

/* Two fragments for cross MMIO pages. */
#define KVM_MAX_MMIO_FRAGMENTS	2

#ifndef KVM_ADDRESS_SPACE_NUM
#define KVM_ADDRESS_SPACE_NUM	1
#endif

/*
 * For the normal pfn, the highest 12 bits should be zero,
 * so we can mask bit 62 ~ bit 52  to indicate the error pfn,
 * mask bit 63 to indicate the noslot pfn.
 */
#define KVM_PFN_ERR_MASK	(0x7ffULL << 52)
#define KVM_PFN_ERR_NOSLOT_MASK	(0xfffULL << 52)
#define KVM_PFN_NOSLOT		(0x1ULL << 63)

#define KVM_PFN_ERR_FAULT	(KVM_PFN_ERR_MASK)
#define KVM_PFN_ERR_HWPOISON	(KVM_PFN_ERR_MASK + 1)
#define KVM_PFN_ERR_RO_FAULT	(KVM_PFN_ERR_MASK + 2)

/*
 * error pfns indicate that the gfn is in slot but faild to
 * translate it to pfn on host.
 */
static inline bool is_error_pfn(kvm_pfn_t pfn)
{
	return !!(pfn & KVM_PFN_ERR_MASK);
}

/*
 * error_noslot pfns indicate that the gfn can not be
 * translated to pfn - it is not in slot or failed to
 * translate it to pfn.
 */
static inline bool is_error_noslot_pfn(kvm_pfn_t pfn)
{
	return !!(pfn & KVM_PFN_ERR_NOSLOT_MASK);
}

/* noslot pfn indicates that the gfn is not in slot. */
static inline bool is_noslot_pfn(kvm_pfn_t pfn)
{
	return pfn == KVM_PFN_NOSLOT;
}

/*
 * architectures with KVM_HVA_ERR_BAD other than PAGE_OFFSET (e.g. s390)
 * provide own defines and kvm_is_error_hva
 */
#ifndef KVM_HVA_ERR_BAD

#define KVM_HVA_ERR_BAD		(PAGE_OFFSET)
#define KVM_HVA_ERR_RO_BAD	(PAGE_OFFSET + PAGE_SIZE)

static inline bool kvm_is_error_hva(unsigned long addr)
{
	return addr >= PAGE_OFFSET;
}

#endif

#define KVM_ERR_PTR_BAD_PAGE	(ERR_PTR(-ENOENT))

static inline bool is_error_page(struct page *page)
{
	return IS_ERR(page);
}

#define KVM_REQUEST_MASK           GENMASK(7,0)
#define KVM_REQUEST_NO_WAKEUP      BIT(8)
#define KVM_REQUEST_WAIT           BIT(9)
/*
 * Architecture-independent vcpu->requests bit members
 * Bits 4-7 are reserved for more arch-independent bits.
 */
#define KVM_REQ_TLB_FLUSH         (0 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
#define KVM_REQ_MMU_RELOAD        (1 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
#define KVM_REQ_UNBLOCK           2
#define KVM_REQ_UNHALT            3
#define KVM_REQ_VM_BUGGED         (4 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
#define KVM_REQUEST_ARCH_BASE     8

#define KVM_ARCH_REQ_FLAGS(nr, flags) ({ \
	BUILD_BUG_ON((unsigned)(nr) >= (sizeof_field(struct kvm_vcpu, requests) * 8) - KVM_REQUEST_ARCH_BASE); \
	(unsigned)(((nr) + KVM_REQUEST_ARCH_BASE) | (flags)); \
})
#define KVM_ARCH_REQ(nr)           KVM_ARCH_REQ_FLAGS(nr, 0)

bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned int req,
				 struct kvm_vcpu *except,
				 unsigned long *vcpu_bitmap, cpumask_var_t tmp);
bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req);
bool kvm_make_all_cpus_request_except(struct kvm *kvm, unsigned int req,
				      struct kvm_vcpu *except);
bool kvm_make_cpus_request_mask(struct kvm *kvm, unsigned int req,
				unsigned long *vcpu_bitmap);

#define KVM_USERSPACE_IRQ_SOURCE_ID		0
#define KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID	1

extern struct mutex kvm_lock;
extern struct list_head vm_list;

struct kvm_io_range {
	gpa_t addr;
	int len;
	struct kvm_io_device *dev;
};

#define NR_IOBUS_DEVS 1000

struct kvm_io_bus {
	int dev_count;
	int ioeventfd_count;
	struct kvm_io_range range[];
};

enum kvm_bus {
	KVM_MMIO_BUS,
	KVM_PIO_BUS,
	KVM_VIRTIO_CCW_NOTIFY_BUS,
	KVM_FAST_MMIO_BUS,
	KVM_NR_BUSES
};

int kvm_io_bus_write(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
		     int len, const void *val);
int kvm_io_bus_write_cookie(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx,
			    gpa_t addr, int len, const void *val, long cookie);
int kvm_io_bus_read(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
		    int len, void *val);
int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
			    int len, struct kvm_io_device *dev);
int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
			      struct kvm_io_device *dev);
struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx,
					 gpa_t addr);

#ifdef CONFIG_KVM_ASYNC_PF
struct kvm_async_pf {
	struct work_struct work;
	struct list_head link;
	struct list_head queue;
	struct kvm_vcpu *vcpu;
	struct mm_struct *mm;
	gpa_t cr2_or_gpa;
	unsigned long addr;
	struct kvm_arch_async_pf arch;
	bool   wakeup_all;
	bool notpresent_injected;
};

void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu);
void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu);
bool kvm_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
			unsigned long hva, struct kvm_arch_async_pf *arch);
int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu);
#endif

#ifdef KVM_ARCH_WANT_MMU_NOTIFIER
struct kvm_gfn_range {
	struct kvm_memory_slot *slot;
	gfn_t start;
	gfn_t end;
	pte_t pte;
	bool may_block;
};
bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range);
bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range);
bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range);
bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range);
#endif

enum {
	OUTSIDE_GUEST_MODE,
	IN_GUEST_MODE,
	EXITING_GUEST_MODE,
	READING_SHADOW_PAGE_TABLES,
};

#define KVM_UNMAPPED_PAGE	((void *) 0x500 + POISON_POINTER_DELTA)

struct kvm_host_map {
	/*
	 * Only valid if the 'pfn' is managed by the host kernel (i.e. There is
	 * a 'struct page' for it. When using mem= kernel parameter some memory
	 * can be used as guest memory but they are not managed by host
	 * kernel).
	 * If 'pfn' is not managed by the host kernel, this field is
	 * initialized to KVM_UNMAPPED_PAGE.
	 */
	struct page *page;
	void *hva;
	kvm_pfn_t pfn;
	kvm_pfn_t gfn;
};

/*
 * Used to check if the mapping is valid or not. Never use 'kvm_host_map'
 * directly to check for that.
 */
static inline bool kvm_vcpu_mapped(struct kvm_host_map *map)
{
	return !!map->hva;
}

static inline bool kvm_vcpu_can_poll(ktime_t cur, ktime_t stop)
{
	return single_task_running() && !need_resched() && ktime_before(cur, stop);
}

/*
 * Sometimes a large or cross-page mmio needs to be broken up into separate
 * exits for userspace servicing.
 */
struct kvm_mmio_fragment {
	gpa_t gpa;
	void *data;
	unsigned len;
};

struct kvm_vcpu {
	struct kvm *kvm;
#ifdef CONFIG_PREEMPT_NOTIFIERS
	struct preempt_notifier preempt_notifier;
#endif
	int cpu;
	int vcpu_id; /* id given by userspace at creation */
	int vcpu_idx; /* index in kvm->vcpus array */
	int srcu_idx;
	int mode;
	u64 requests;
	unsigned long guest_debug;

	int pre_pcpu;
	struct list_head blocked_vcpu_list;

	struct mutex mutex;
	struct kvm_run *run;

	struct rcuwait wait;
	struct pid __rcu *pid;
	int sigset_active;
	sigset_t sigset;
	unsigned int halt_poll_ns;
	bool valid_wakeup;

#ifdef CONFIG_HAS_IOMEM
	int mmio_needed;
	int mmio_read_completed;
	int mmio_is_write;
	int mmio_cur_fragment;
	int mmio_nr_fragments;
	struct kvm_mmio_fragment mmio_fragments[KVM_MAX_MMIO_FRAGMENTS];
#endif

#ifdef CONFIG_KVM_ASYNC_PF
	struct {
		u32 queued;
		struct list_head queue;
		struct list_head done;
		spinlock_t lock;
	} async_pf;
#endif

#ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT
	/*
	 * Cpu relax intercept or pause loop exit optimization
	 * in_spin_loop: set when a vcpu does a pause loop exit
	 *  or cpu relax intercepted.
	 * dy_eligible: indicates whether vcpu is eligible for directed yield.
	 */
	struct {
		bool in_spin_loop;
		bool dy_eligible;
	} spin_loop;
#endif
	bool preempted;
	bool ready;
	struct kvm_vcpu_arch arch;
	struct kvm_vcpu_stat stat;
	char stats_id[KVM_STATS_NAME_SIZE];
	struct kvm_dirty_ring dirty_ring;

	/*
	 * The index of the most recently used memslot by this vCPU. It's ok
	 * if this becomes stale due to memslot changes since we always check
	 * it is a valid slot.
	 */
	int last_used_slot;
};

/*
 * Start accounting time towards a guest.
 * Must be called before entering guest context.
 */
static __always_inline void guest_timing_enter_irqoff(void)
{
	/*
	 * This is running in ioctl context so its safe to assume that it's the
	 * stime pending cputime to flush.
	 */
	instrumentation_begin();
	vtime_account_guest_enter();
	instrumentation_end();
}

/*
 * Enter guest context and enter an RCU extended quiescent state.
 *
 * Between guest_context_enter_irqoff() and guest_context_exit_irqoff() it is
 * unsafe to use any code which may directly or indirectly use RCU, tracing
 * (including IRQ flag tracing), or lockdep. All code in this period must be
 * non-instrumentable.
 */
static __always_inline void guest_context_enter_irqoff(void)
{
	/*
	 * KVM does not hold any references to rcu protected data when it
	 * switches CPU into a guest mode. In fact switching to a guest mode
	 * is very similar to exiting to userspace from rcu point of view. In
	 * addition CPU may stay in a guest mode for quite a long time (up to
	 * one time slice). Lets treat guest mode as quiescent state, just like
	 * we do with user-mode execution.
	 */
	if (!context_tracking_guest_enter()) {
		instrumentation_begin();
		rcu_virt_note_context_switch(smp_processor_id());
		instrumentation_end();
	}
}

/*
 * Deprecated. Architectures should move to guest_timing_enter_irqoff() and
 * guest_state_enter_irqoff().
 */
static __always_inline void guest_enter_irqoff(void)
{
	guest_timing_enter_irqoff();
	guest_context_enter_irqoff();
}

/**
 * guest_state_enter_irqoff - Fixup state when entering a guest
 *
 * Entry to a guest will enable interrupts, but the kernel state is interrupts
 * disabled when this is invoked. Also tell RCU about it.
 *
 * 1) Trace interrupts on state
 * 2) Invoke context tracking if enabled to adjust RCU state
 * 3) Tell lockdep that interrupts are enabled
 *
 * Invoked from architecture specific code before entering a guest.
 * Must be called with interrupts disabled and the caller must be
 * non-instrumentable.
 * The caller has to invoke guest_timing_enter_irqoff() before this.
 *
 * Note: this is analogous to exit_to_user_mode().
 */
static __always_inline void guest_state_enter_irqoff(void)
{
	instrumentation_begin();
	trace_hardirqs_on_prepare();
	lockdep_hardirqs_on_prepare(CALLER_ADDR0);
	instrumentation_end();

	guest_context_enter_irqoff();
	lockdep_hardirqs_on(CALLER_ADDR0);
}

/*
 * Exit guest context and exit an RCU extended quiescent state.
 *
 * Between guest_context_enter_irqoff() and guest_context_exit_irqoff() it is
 * unsafe to use any code which may directly or indirectly use RCU, tracing
 * (including IRQ flag tracing), or lockdep. All code in this period must be
 * non-instrumentable.
 */
static __always_inline void guest_context_exit_irqoff(void)
{
	context_tracking_guest_exit();
}

/*
 * Stop accounting time towards a guest.
 * Must be called after exiting guest context.
 */
static __always_inline void guest_timing_exit_irqoff(void)
{
	instrumentation_begin();
	/* Flush the guest cputime we spent on the guest */
	vtime_account_guest_exit();
	instrumentation_end();
}

/*
 * Deprecated. Architectures should move to guest_state_exit_irqoff() and
 * guest_timing_exit_irqoff().
 */
static __always_inline void guest_exit_irqoff(void)
{
	guest_context_exit_irqoff();
	guest_timing_exit_irqoff();
}

static inline void guest_exit(void)
{
	unsigned long flags;

	local_irq_save(flags);
	guest_exit_irqoff();
	local_irq_restore(flags);
}

/**
 * guest_state_exit_irqoff - Establish state when returning from guest mode
 *
 * Entry from a guest disables interrupts, but guest mode is traced as
 * interrupts enabled. Also with NO_HZ_FULL RCU might be idle.
 *
 * 1) Tell lockdep that interrupts are disabled
 * 2) Invoke context tracking if enabled to reactivate RCU
 * 3) Trace interrupts off state
 *
 * Invoked from architecture specific code after exiting a guest.
 * Must be invoked with interrupts disabled and the caller must be
 * non-instrumentable.
 * The caller has to invoke guest_timing_exit_irqoff() after this.
 *
 * Note: this is analogous to enter_from_user_mode().
 */
static __always_inline void guest_state_exit_irqoff(void)
{
	lockdep_hardirqs_off(CALLER_ADDR0);
	guest_context_exit_irqoff();

	instrumentation_begin();
	trace_hardirqs_off_finish();
	instrumentation_end();
}

static inline int kvm_vcpu_exiting_guest_mode(struct kvm_vcpu *vcpu)
{
	/*
	 * The memory barrier ensures a previous write to vcpu->requests cannot
	 * be reordered with the read of vcpu->mode.  It pairs with the general
	 * memory barrier following the write of vcpu->mode in VCPU RUN.
	 */
	smp_mb__before_atomic();
	return cmpxchg(&vcpu->mode, IN_GUEST_MODE, EXITING_GUEST_MODE);
}

/*
 * Some of the bitops functions do not support too long bitmaps.
 * This number must be determined not to exceed such limits.
 */
#define KVM_MEM_MAX_NR_PAGES ((1UL << 31) - 1)

struct kvm_memory_slot {
	gfn_t base_gfn;
	unsigned long npages;
	unsigned long *dirty_bitmap;
	struct kvm_arch_memory_slot arch;
	unsigned long userspace_addr;
	u32 flags;
	short id;
	u16 as_id;
};

static inline bool kvm_slot_dirty_track_enabled(struct kvm_memory_slot *slot)
{
	return slot->flags & KVM_MEM_LOG_DIRTY_PAGES;
}

static inline unsigned long kvm_dirty_bitmap_bytes(struct kvm_memory_slot *memslot)
{
	return ALIGN(memslot->npages, BITS_PER_LONG) / 8;
}

static inline unsigned long *kvm_second_dirty_bitmap(struct kvm_memory_slot *memslot)
{
	unsigned long len = kvm_dirty_bitmap_bytes(memslot);

	return memslot->dirty_bitmap + len / sizeof(*memslot->dirty_bitmap);
}

#ifndef KVM_DIRTY_LOG_MANUAL_CAPS
#define KVM_DIRTY_LOG_MANUAL_CAPS KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE
#endif

struct kvm_s390_adapter_int {
	u64 ind_addr;
	u64 summary_addr;
	u64 ind_offset;
	u32 summary_offset;
	u32 adapter_id;
};

struct kvm_hv_sint {
	u32 vcpu;
	u32 sint;
};

struct kvm_kernel_irq_routing_entry {
	u32 gsi;
	u32 type;
	int (*set)(struct kvm_kernel_irq_routing_entry *e,
		   struct kvm *kvm, int irq_source_id, int level,
		   bool line_status);
	union {
		struct {
			unsigned irqchip;
			unsigned pin;
		} irqchip;
		struct {
			u32 address_lo;
			u32 address_hi;
			u32 data;
			u32 flags;
			u32 devid;
		} msi;
		struct kvm_s390_adapter_int adapter;
		struct kvm_hv_sint hv_sint;
	};
	struct hlist_node link;
};

#ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
struct kvm_irq_routing_table {
	int chip[KVM_NR_IRQCHIPS][KVM_IRQCHIP_NUM_PINS];
	u32 nr_rt_entries;
	/*
	 * Array indexed by gsi. Each entry contains list of irq chips
	 * the gsi is connected to.
	 */
	struct hlist_head map[];
};
#endif

#ifndef KVM_PRIVATE_MEM_SLOTS
#define KVM_PRIVATE_MEM_SLOTS 0
#endif

#define KVM_MEM_SLOTS_NUM SHRT_MAX
#define KVM_USER_MEM_SLOTS (KVM_MEM_SLOTS_NUM - KVM_PRIVATE_MEM_SLOTS)

#ifndef __KVM_VCPU_MULTIPLE_ADDRESS_SPACE
static inline int kvm_arch_vcpu_memslots_id(struct kvm_vcpu *vcpu)
{
	return 0;
}
#endif

/*
 * Note:
 * memslots are not sorted by id anymore, please use id_to_memslot()
 * to get the memslot by its id.
 */
struct kvm_memslots {
	u64 generation;
	/* The mapping table from slot id to the index in memslots[]. */
	short id_to_index[KVM_MEM_SLOTS_NUM];
	atomic_t last_used_slot;
	int used_slots;
	struct kvm_memory_slot memslots[];
};

struct kvm {
#ifdef KVM_HAVE_MMU_RWLOCK
	rwlock_t mmu_lock;
#else
	spinlock_t mmu_lock;
#endif /* KVM_HAVE_MMU_RWLOCK */

	struct mutex slots_lock;

	/*
	 * Protects the arch-specific fields of struct kvm_memory_slots in
	 * use by the VM. To be used under the slots_lock (above) or in a
	 * kvm->srcu critical section where acquiring the slots_lock would
	 * lead to deadlock with the synchronize_srcu in
	 * install_new_memslots.
	 */
	struct mutex slots_arch_lock;
	struct mm_struct *mm; /* userspace tied to this vm */
	struct kvm_memslots __rcu *memslots[KVM_ADDRESS_SPACE_NUM];
	struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];

	/* Used to wait for completion of MMU notifiers.  */
	spinlock_t mn_invalidate_lock;
	unsigned long mn_active_invalidate_count;
	struct rcuwait mn_memslots_update_rcuwait;

	/*
	 * created_vcpus is protected by kvm->lock, and is incremented
	 * at the beginning of KVM_CREATE_VCPU.  online_vcpus is only
	 * incremented after storing the kvm_vcpu pointer in vcpus,
	 * and is accessed atomically.
	 */
	atomic_t online_vcpus;
	int created_vcpus;
	int last_boosted_vcpu;
	struct list_head vm_list;
	struct mutex lock;
	struct kvm_io_bus __rcu *buses[KVM_NR_BUSES];
#ifdef CONFIG_HAVE_KVM_EVENTFD
	struct {
		spinlock_t        lock;
		struct list_head  items;
		struct list_head  resampler_list;
		struct mutex      resampler_lock;
	} irqfds;
	struct list_head ioeventfds;
#endif
	struct kvm_vm_stat stat;
	struct kvm_arch arch;
	refcount_t users_count;
#ifdef CONFIG_KVM_MMIO
	struct kvm_coalesced_mmio_ring *coalesced_mmio_ring;
	spinlock_t ring_lock;
	struct list_head coalesced_zones;
#endif

	struct mutex irq_lock;
#ifdef CONFIG_HAVE_KVM_IRQCHIP
	/*
	 * Update side is protected by irq_lock.
	 */
	struct kvm_irq_routing_table __rcu *irq_routing;
#endif
#ifdef CONFIG_HAVE_KVM_IRQFD
	struct hlist_head irq_ack_notifier_list;
#endif

#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
	struct mmu_notifier mmu_notifier;
	unsigned long mmu_notifier_seq;
	long mmu_notifier_count;
	unsigned long mmu_notifier_range_start;
	unsigned long mmu_notifier_range_end;
#endif
	struct list_head devices;
	u64 manual_dirty_log_protect;
	struct dentry *debugfs_dentry;
	struct kvm_stat_data **debugfs_stat_data;
	struct srcu_struct srcu;
	struct srcu_struct irq_srcu;
	pid_t userspace_pid;
	unsigned int max_halt_poll_ns;
	u32 dirty_ring_size;
	bool vm_bugged;

#ifdef CONFIG_HAVE_KVM_PM_NOTIFIER
	struct notifier_block pm_notifier;
#endif
	char stats_id[KVM_STATS_NAME_SIZE];
};

#define kvm_err(fmt, ...) \
	pr_err("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__)
#define kvm_info(fmt, ...) \
	pr_info("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__)
#define kvm_debug(fmt, ...) \
	pr_debug("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__)
#define kvm_debug_ratelimited(fmt, ...) \
	pr_debug_ratelimited("kvm [%i]: " fmt, task_pid_nr(current), \
			     ## __VA_ARGS__)
#define kvm_pr_unimpl(fmt, ...) \
	pr_err_ratelimited("kvm [%i]: " fmt, \
			   task_tgid_nr(current), ## __VA_ARGS__)

/* The guest did something we don't support. */
#define vcpu_unimpl(vcpu, fmt, ...)					\
	kvm_pr_unimpl("vcpu%i, guest rIP: 0x%lx " fmt,			\
			(vcpu)->vcpu_id, kvm_rip_read(vcpu), ## __VA_ARGS__)

#define vcpu_debug(vcpu, fmt, ...)					\
	kvm_debug("vcpu%i " fmt, (vcpu)->vcpu_id, ## __VA_ARGS__)
#define vcpu_debug_ratelimited(vcpu, fmt, ...)				\
	kvm_debug_ratelimited("vcpu%i " fmt, (vcpu)->vcpu_id,           \
			      ## __VA_ARGS__)
#define vcpu_err(vcpu, fmt, ...)					\
	kvm_err("vcpu%i " fmt, (vcpu)->vcpu_id, ## __VA_ARGS__)

static inline void kvm_vm_bugged(struct kvm *kvm)
{
	kvm->vm_bugged = true;
	kvm_make_all_cpus_request(kvm, KVM_REQ_VM_BUGGED);
}

#define KVM_BUG(cond, kvm, fmt...)				\
({								\
	int __ret = (cond);					\
								\
	if (WARN_ONCE(__ret && !(kvm)->vm_bugged, fmt))		\
		kvm_vm_bugged(kvm);				\
	unlikely(__ret);					\
})

#define KVM_BUG_ON(cond, kvm)					\
({								\
	int __ret = (cond);					\
								\
	if (WARN_ON_ONCE(__ret && !(kvm)->vm_bugged))		\
		kvm_vm_bugged(kvm);				\
	unlikely(__ret);					\
})

static inline bool kvm_dirty_log_manual_protect_and_init_set(struct kvm *kvm)
{
	return !!(kvm->manual_dirty_log_protect & KVM_DIRTY_LOG_INITIALLY_SET);
}

static inline struct kvm_io_bus *kvm_get_bus(struct kvm *kvm, enum kvm_bus idx)
{
	return srcu_dereference_check(kvm->buses[idx], &kvm->srcu,
				      lockdep_is_held(&kvm->slots_lock) ||
				      !refcount_read(&kvm->users_count));
}

static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i)
{
	int num_vcpus = atomic_read(&kvm->online_vcpus);
	i = array_index_nospec(i, num_vcpus);

	/* Pairs with smp_wmb() in kvm_vm_ioctl_create_vcpu.  */
	smp_rmb();
	return kvm->vcpus[i];
}

#define kvm_for_each_vcpu(idx, vcpup, kvm) \
	for (idx = 0; \
	     idx < atomic_read(&kvm->online_vcpus) && \
	     (vcpup = kvm_get_vcpu(kvm, idx)) != NULL; \
	     idx++)

static inline struct kvm_vcpu *kvm_get_vcpu_by_id(struct kvm *kvm, int id)
{
	struct kvm_vcpu *vcpu = NULL;
	int i;

	if (id < 0)
		return NULL;
	if (id < KVM_MAX_VCPUS)
		vcpu = kvm_get_vcpu(kvm, id);
	if (vcpu && vcpu->vcpu_id == id)
		return vcpu;
	kvm_for_each_vcpu(i, vcpu, kvm)
		if (vcpu->vcpu_id == id)
			return vcpu;
	return NULL;
}

#define kvm_for_each_memslot(memslot, slots)				\
	for (memslot = &slots->memslots[0];				\
	     memslot < slots->memslots + slots->used_slots; memslot++)	\
		if (WARN_ON_ONCE(!memslot->npages)) {			\
		} else

void kvm_vcpu_destroy(struct kvm_vcpu *vcpu);

void vcpu_load(struct kvm_vcpu *vcpu);
void vcpu_put(struct kvm_vcpu *vcpu);

#ifdef __KVM_HAVE_IOAPIC
void kvm_arch_post_irq_ack_notifier_list_update(struct kvm *kvm);
void kvm_arch_post_irq_routing_update(struct kvm *kvm);
#else
static inline void kvm_arch_post_irq_ack_notifier_list_update(struct kvm *kvm)
{
}
static inline void kvm_arch_post_irq_routing_update(struct kvm *kvm)
{
}
#endif

#ifdef CONFIG_HAVE_KVM_IRQFD
int kvm_irqfd_init(void);
void kvm_irqfd_exit(void);
#else
static inline int kvm_irqfd_init(void)
{
	return 0;
}

static inline void kvm_irqfd_exit(void)
{
}
#endif
int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
		  struct module *module);
void kvm_exit(void);

void kvm_get_kvm(struct kvm *kvm);
bool kvm_get_kvm_safe(struct kvm *kvm);
void kvm_put_kvm(struct kvm *kvm);
bool file_is_kvm(struct file *file);
void kvm_put_kvm_no_destroy(struct kvm *kvm);

static inline struct kvm_memslots *__kvm_memslots(struct kvm *kvm, int as_id)
{
	as_id = array_index_nospec(as_id, KVM_ADDRESS_SPACE_NUM);
	return srcu_dereference_check(kvm->memslots[as_id], &kvm->srcu,
			lockdep_is_held(&kvm->slots_lock) ||
			!refcount_read(&kvm->users_count));
}

static inline struct kvm_memslots *kvm_memslots(struct kvm *kvm)
{
	return __kvm_memslots(kvm, 0);
}

static inline struct kvm_memslots *kvm_vcpu_memslots(struct kvm_vcpu *vcpu)
{
	int as_id = kvm_arch_vcpu_memslots_id(vcpu);

	return __kvm_memslots(vcpu->kvm, as_id);
}

static inline
struct kvm_memory_slot *id_to_memslot(struct kvm_memslots *slots, int id)
{
	int index = slots->id_to_index[id];
	struct kvm_memory_slot *slot;

	if (index < 0)
		return NULL;

	slot = &slots->memslots[index];

	WARN_ON(slot->id != id);
	return slot;
}

/*
 * KVM_SET_USER_MEMORY_REGION ioctl allows the following operations:
 * - create a new memory slot
 * - delete an existing memory slot
 * - modify an existing memory slot
 *   -- move it in the guest physical memory space
 *   -- just change its flags
 *
 * Since flags can be changed by some of these operations, the following
 * differentiation is the best we can do for __kvm_set_memory_region():
 */
enum kvm_mr_change {
	KVM_MR_CREATE,
	KVM_MR_DELETE,
	KVM_MR_MOVE,
	KVM_MR_FLAGS_ONLY,
};

int kvm_set_memory_region(struct kvm *kvm,
			  const struct kvm_userspace_memory_region *mem);
int __kvm_set_memory_region(struct kvm *kvm,
			    const struct kvm_userspace_memory_region *mem);
void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot);
void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen);
int kvm_arch_prepare_memory_region(struct kvm *kvm,
				struct kvm_memory_slot *memslot,
				const struct kvm_userspace_memory_region *mem,
				enum kvm_mr_change change);
void kvm_arch_commit_memory_region(struct kvm *kvm,
				const struct kvm_userspace_memory_region *mem,
				struct kvm_memory_slot *old,
				const struct kvm_memory_slot *new,
				enum kvm_mr_change change);
/* flush all memory translations */
void kvm_arch_flush_shadow_all(struct kvm *kvm);
/* flush memory translations pointing to 'slot' */
void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
				   struct kvm_memory_slot *slot);

int gfn_to_page_many_atomic(struct kvm_memory_slot *slot, gfn_t gfn,
			    struct page **pages, int nr_pages);

struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn);
unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn);
unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable);
unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn);
unsigned long gfn_to_hva_memslot_prot(struct kvm_memory_slot *slot, gfn_t gfn,
				      bool *writable);
void kvm_release_page_clean(struct page *page);
void kvm_release_page_dirty(struct page *page);
void kvm_set_page_accessed(struct page *page);

kvm_pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn);
kvm_pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault,
		      bool *writable);
kvm_pfn_t gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn);
kvm_pfn_t gfn_to_pfn_memslot_atomic(struct kvm_memory_slot *slot, gfn_t gfn);
kvm_pfn_t __gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn,
			       bool atomic, bool *async, bool write_fault,
			       bool *writable, hva_t *hva);

void kvm_release_pfn_clean(kvm_pfn_t pfn);
void kvm_release_pfn_dirty(kvm_pfn_t pfn);
void kvm_set_pfn_dirty(kvm_pfn_t pfn);
void kvm_set_pfn_accessed(kvm_pfn_t pfn);

void kvm_release_pfn(kvm_pfn_t pfn, bool dirty, struct gfn_to_pfn_cache *cache);
int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
			int len);
int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len);
int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
			   void *data, unsigned long len);
int kvm_read_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
				 void *data, unsigned int offset,
				 unsigned long len);
int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data,
			 int offset, int len);
int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
		    unsigned long len);
int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
			   void *data, unsigned long len);
int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
				  void *data, unsigned int offset,
				  unsigned long len);
int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
			      gpa_t gpa, unsigned long len);

#define __kvm_get_guest(kvm, gfn, offset, v)				\
({									\
	unsigned long __addr = gfn_to_hva(kvm, gfn);			\
	typeof(v) __user *__uaddr = (typeof(__uaddr))(__addr + offset);	\
	int __ret = -EFAULT;						\
									\
	if (!kvm_is_error_hva(__addr))					\
		__ret = get_user(v, __uaddr);				\
	__ret;								\
})

#define kvm_get_guest(kvm, gpa, v)					\
({									\
	gpa_t __gpa = gpa;						\
	struct kvm *__kvm = kvm;					\
									\
	__kvm_get_guest(__kvm, __gpa >> PAGE_SHIFT,			\
			offset_in_page(__gpa), v);			\
})

#define __kvm_put_guest(kvm, gfn, offset, v)				\
({									\
	unsigned long __addr = gfn_to_hva(kvm, gfn);			\
	typeof(v) __user *__uaddr = (typeof(__uaddr))(__addr + offset);	\
	int __ret = -EFAULT;						\
									\
	if (!kvm_is_error_hva(__addr))					\
		__ret = put_user(v, __uaddr);				\
	if (!__ret)							\
		mark_page_dirty(kvm, gfn);				\
	__ret;								\
})

#define kvm_put_guest(kvm, gpa, v)					\
({									\
	gpa_t __gpa = gpa;						\
	struct kvm *__kvm = kvm;					\
									\
	__kvm_put_guest(__kvm, __gpa >> PAGE_SHIFT,			\
			offset_in_page(__gpa), v);			\
})

int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len);
struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn);
bool kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn);
bool kvm_vcpu_is_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn);
unsigned long kvm_host_page_size(struct kvm_vcpu *vcpu, gfn_t gfn);
void mark_page_dirty_in_slot(struct kvm *kvm, struct kvm_memory_slot *memslot, gfn_t gfn);
void mark_page_dirty(struct kvm *kvm, gfn_t gfn);

struct kvm_memslots *kvm_vcpu_memslots(struct kvm_vcpu *vcpu);
struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn);
kvm_pfn_t kvm_vcpu_gfn_to_pfn_atomic(struct kvm_vcpu *vcpu, gfn_t gfn);
kvm_pfn_t kvm_vcpu_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn);
int kvm_vcpu_map(struct kvm_vcpu *vcpu, gpa_t gpa, struct kvm_host_map *map);
int kvm_map_gfn(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map,
		struct gfn_to_pfn_cache *cache, bool atomic);
struct page *kvm_vcpu_gfn_to_page(struct kvm_vcpu *vcpu, gfn_t gfn);
void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty);
int kvm_unmap_gfn(struct kvm_vcpu *vcpu, struct kvm_host_map *map,
		  struct gfn_to_pfn_cache *cache, bool dirty, bool atomic);
unsigned long kvm_vcpu_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn);
unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *writable);
int kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data, int offset,
			     int len);
int kvm_vcpu_read_guest_atomic(struct kvm_vcpu *vcpu, gpa_t gpa, void *data,
			       unsigned long len);
int kvm_vcpu_read_guest(struct kvm_vcpu *vcpu, gpa_t gpa, void *data,
			unsigned long len);
int kvm_vcpu_write_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, const void *data,
			      int offset, int len);
int kvm_vcpu_write_guest(struct kvm_vcpu *vcpu, gpa_t gpa, const void *data,
			 unsigned long len);
void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn);

void kvm_sigset_activate(struct kvm_vcpu *vcpu);
void kvm_sigset_deactivate(struct kvm_vcpu *vcpu);

void kvm_vcpu_block(struct kvm_vcpu *vcpu);
void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu);
void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu);
bool kvm_vcpu_wake_up(struct kvm_vcpu *vcpu);
void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
int kvm_vcpu_yield_to(struct kvm_vcpu *target);
void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu, bool usermode_vcpu_not_eligible);

void kvm_flush_remote_tlbs(struct kvm *kvm);
void kvm_reload_remote_mmus(struct kvm *kvm);

#ifdef KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE
int kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int min);
int kvm_mmu_memory_cache_nr_free_objects(struct kvm_mmu_memory_cache *mc);
void kvm_mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc);
void *kvm_mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc);
#endif

void kvm_inc_notifier_count(struct kvm *kvm, unsigned long start,
				   unsigned long end);
void kvm_dec_notifier_count(struct kvm *kvm, unsigned long start,
				   unsigned long end);

long kvm_arch_dev_ioctl(struct file *filp,
			unsigned int ioctl, unsigned long arg);
long kvm_arch_vcpu_ioctl(struct file *filp,
			 unsigned int ioctl, unsigned long arg);
vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf);

int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext);

void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
					struct kvm_memory_slot *slot,
					gfn_t gfn_offset,
					unsigned long mask);
void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot);

#ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm,
					const struct kvm_memory_slot *memslot);
#else /* !CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT */
int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log);
int kvm_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log,
		      int *is_dirty, struct kvm_memory_slot **memslot);
#endif

int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level,
			bool line_status);
int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
			    struct kvm_enable_cap *cap);
long kvm_arch_vm_ioctl(struct file *filp,
		       unsigned int ioctl, unsigned long arg);

int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu);
int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu);

int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
				    struct kvm_translation *tr);

int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs);
int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs);
int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
				  struct kvm_sregs *sregs);
int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
				  struct kvm_sregs *sregs);
int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
				    struct kvm_mp_state *mp_state);
int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
				    struct kvm_mp_state *mp_state);
int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
					struct kvm_guest_debug *dbg);
int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu);

int kvm_arch_init(void *opaque);
void kvm_arch_exit(void);

void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu);

void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu);
int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id);
int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu);
void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu);
void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu);

#ifdef CONFIG_HAVE_KVM_PM_NOTIFIER
int kvm_arch_pm_notifier(struct kvm *kvm, unsigned long state);
#endif

#ifdef __KVM_HAVE_ARCH_VCPU_DEBUGFS
void kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu, struct dentry *debugfs_dentry);
#endif

int kvm_arch_hardware_enable(void);
void kvm_arch_hardware_disable(void);
int kvm_arch_hardware_setup(void *opaque);
void kvm_arch_hardware_unsetup(void);
int kvm_arch_check_processor_compat(void *opaque);
int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu);
bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu);
int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu);
bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu);
bool kvm_arch_dy_has_pending_interrupt(struct kvm_vcpu *vcpu);
int kvm_arch_post_init_vm(struct kvm *kvm);
void kvm_arch_pre_destroy_vm(struct kvm *kvm);
int kvm_arch_create_vm_debugfs(struct kvm *kvm);

#ifndef __KVM_HAVE_ARCH_VM_ALLOC
/*
 * All architectures that want to use vzalloc currently also
 * need their own kvm_arch_alloc_vm implementation.
 */
static inline struct kvm *kvm_arch_alloc_vm(void)
{
	return kzalloc(sizeof(struct kvm), GFP_KERNEL);
}

static inline void kvm_arch_free_vm(struct kvm *kvm)
{
	kfree(kvm);
}
#endif

#ifndef __KVM_HAVE_ARCH_FLUSH_REMOTE_TLB
static inline int kvm_arch_flush_remote_tlb(struct kvm *kvm)
{
	return -ENOTSUPP;
}
#endif

#ifdef __KVM_HAVE_ARCH_NONCOHERENT_DMA
void kvm_arch_register_noncoherent_dma(struct kvm *kvm);
void kvm_arch_unregister_noncoherent_dma(struct kvm *kvm);
bool kvm_arch_has_noncoherent_dma(struct kvm *kvm);
#else
static inline void kvm_arch_register_noncoherent_dma(struct kvm *kvm)
{
}

static inline void kvm_arch_unregister_noncoherent_dma(struct kvm *kvm)
{
}

static inline bool kvm_arch_has_noncoherent_dma(struct kvm *kvm)
{
	return false;
}
#endif
#ifdef __KVM_HAVE_ARCH_ASSIGNED_DEVICE
void kvm_arch_start_assignment(struct kvm *kvm);
void kvm_arch_end_assignment(struct kvm *kvm);
bool kvm_arch_has_assigned_device(struct kvm *kvm);
#else
static inline void kvm_arch_start_assignment(struct kvm *kvm)
{
}

static inline void kvm_arch_end_assignment(struct kvm *kvm)
{
}

static inline bool kvm_arch_has_assigned_device(struct kvm *kvm)
{
	return false;
}
#endif

static inline struct rcuwait *kvm_arch_vcpu_get_wait(struct kvm_vcpu *vcpu)
{
#ifdef __KVM_HAVE_ARCH_WQP
	return vcpu->arch.waitp;
#else
	return &vcpu->wait;
#endif
}

#ifdef __KVM_HAVE_ARCH_INTC_INITIALIZED
/*
 * returns true if the virtual interrupt controller is initialized and
 * ready to accept virtual IRQ. On some architectures the virtual interrupt
 * controller is dynamically instantiated and this is not always true.
 */
bool kvm_arch_intc_initialized(struct kvm *kvm);
#else
static inline bool kvm_arch_intc_initialized(struct kvm *kvm)
{
	return true;
}
#endif

int kvm_arch_init_vm(struct kvm *kvm, unsigned long type);
void kvm_arch_destroy_vm(struct kvm *kvm);
void kvm_arch_sync_events(struct kvm *kvm);

int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu);

bool kvm_is_reserved_pfn(kvm_pfn_t pfn);
bool kvm_is_zone_device_pfn(kvm_pfn_t pfn);
bool kvm_is_transparent_hugepage(kvm_pfn_t pfn);

struct kvm_irq_ack_notifier {
	struct hlist_node link;
	unsigned gsi;
	void (*irq_acked)(struct kvm_irq_ack_notifier *kian);
};

int kvm_irq_map_gsi(struct kvm *kvm,
		    struct kvm_kernel_irq_routing_entry *entries, int gsi);
int kvm_irq_map_chip_pin(struct kvm *kvm, unsigned irqchip, unsigned pin);

int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level,
		bool line_status);
int kvm_set_msi(struct kvm_kernel_irq_routing_entry *irq_entry, struct kvm *kvm,
		int irq_source_id, int level, bool line_status);
int kvm_arch_set_irq_inatomic(struct kvm_kernel_irq_routing_entry *e,
			       struct kvm *kvm, int irq_source_id,
			       int level, bool line_status);
bool kvm_irq_has_notifier(struct kvm *kvm, unsigned irqchip, unsigned pin);
void kvm_notify_acked_gsi(struct kvm *kvm, int gsi);
void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin);
void kvm_register_irq_ack_notifier(struct kvm *kvm,
				   struct kvm_irq_ack_notifier *kian);
void kvm_unregister_irq_ack_notifier(struct kvm *kvm,
				   struct kvm_irq_ack_notifier *kian);
int kvm_request_irq_source_id(struct kvm *kvm);
void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id);
bool kvm_arch_irqfd_allowed(struct kvm *kvm, struct kvm_irqfd *args);

/*
 * Returns a pointer to the memslot at slot_index if it contains gfn.
 * Otherwise returns NULL.
 */
static inline struct kvm_memory_slot *
try_get_memslot(struct kvm_memslots *slots, int slot_index, gfn_t gfn)
{
	struct kvm_memory_slot *slot;

	if (slot_index < 0 || slot_index >= slots->used_slots)
		return NULL;

	/*
	 * slot_index can come from vcpu->last_used_slot which is not kept
	 * in sync with userspace-controllable memslot deletion. So use nospec
	 * to prevent the CPU from speculating past the end of memslots[].
	 */
	slot_index = array_index_nospec(slot_index, slots->used_slots);
	slot = &slots->memslots[slot_index];

	if (gfn >= slot->base_gfn && gfn < slot->base_gfn + slot->npages)
		return slot;
	else
		return NULL;
}

/*
 * Returns a pointer to the memslot that contains gfn and records the index of
 * the slot in index. Otherwise returns NULL.
 *
 * IMPORTANT: Slots are sorted from highest GFN to lowest GFN!
 */
static inline struct kvm_memory_slot *
search_memslots(struct kvm_memslots *slots, gfn_t gfn, int *index)
{
	int start = 0, end = slots->used_slots;
	struct kvm_memory_slot *memslots = slots->memslots;
	struct kvm_memory_slot *slot;

	if (unlikely(!slots->used_slots))
		return NULL;

	while (start < end) {
		int slot = start + (end - start) / 2;

		if (gfn >= memslots[slot].base_gfn)
			end = slot;
		else
			start = slot + 1;
	}

	slot = try_get_memslot(slots, start, gfn);
	if (slot) {
		*index = start;
		return slot;
	}

	return NULL;
}

/*
 * __gfn_to_memslot() and its descendants are here because it is called from
 * non-modular code in arch/powerpc/kvm/book3s_64_vio{,_hv}.c. gfn_to_memslot()
 * itself isn't here as an inline because that would bloat other code too much.
 */
static inline struct kvm_memory_slot *
__gfn_to_memslot(struct kvm_memslots *slots, gfn_t gfn)
{
	struct kvm_memory_slot *slot;
	int slot_index = atomic_read(&slots->last_used_slot);

	slot = try_get_memslot(slots, slot_index, gfn);
	if (slot)
		return slot;

	slot = search_memslots(slots, gfn, &slot_index);
	if (slot) {
		atomic_set(&slots->last_used_slot, slot_index);
		return slot;
	}

	return NULL;
}

static inline unsigned long
__gfn_to_hva_memslot(const struct kvm_memory_slot *slot, gfn_t gfn)
{
	/*
	 * The index was checked originally in search_memslots.  To avoid
	 * that a malicious guest builds a Spectre gadget out of e.g. page
	 * table walks, do not let the processor speculate loads outside
	 * the guest's registered memslots.
	 */
	unsigned long offset = gfn - slot->base_gfn;
	offset = array_index_nospec(offset, slot->npages);
	return slot->userspace_addr + offset * PAGE_SIZE;
}

static inline int memslot_id(struct kvm *kvm, gfn_t gfn)
{
	return gfn_to_memslot(kvm, gfn)->id;
}

static inline gfn_t
hva_to_gfn_memslot(unsigned long hva, struct kvm_memory_slot *slot)
{
	gfn_t gfn_offset = (hva - slot->userspace_addr) >> PAGE_SHIFT;

	return slot->base_gfn + gfn_offset;
}

static inline gpa_t gfn_to_gpa(gfn_t gfn)
{
	return (gpa_t)gfn << PAGE_SHIFT;
}

static inline gfn_t gpa_to_gfn(gpa_t gpa)
{
	return (gfn_t)(gpa >> PAGE_SHIFT);
}

static inline hpa_t pfn_to_hpa(kvm_pfn_t pfn)
{
	return (hpa_t)pfn << PAGE_SHIFT;
}

static inline struct page *kvm_vcpu_gpa_to_page(struct kvm_vcpu *vcpu,
						gpa_t gpa)
{
	return kvm_vcpu_gfn_to_page(vcpu, gpa_to_gfn(gpa));
}

static inline bool kvm_is_error_gpa(struct kvm *kvm, gpa_t gpa)
{
	unsigned long hva = gfn_to_hva(kvm, gpa_to_gfn(gpa));

	return kvm_is_error_hva(hva);
}

enum kvm_stat_kind {
	KVM_STAT_VM,
	KVM_STAT_VCPU,
};

struct kvm_stat_data {
	struct kvm *kvm;
	const struct _kvm_stats_desc *desc;
	enum kvm_stat_kind kind;
};

struct _kvm_stats_desc {
	struct kvm_stats_desc desc;
	char name[KVM_STATS_NAME_SIZE];
};

#define STATS_DESC_COMMON(type, unit, base, exp, sz, bsz)		       \
	.flags = type | unit | base |					       \
		 BUILD_BUG_ON_ZERO(type & ~KVM_STATS_TYPE_MASK) |	       \
		 BUILD_BUG_ON_ZERO(unit & ~KVM_STATS_UNIT_MASK) |	       \
		 BUILD_BUG_ON_ZERO(base & ~KVM_STATS_BASE_MASK),	       \
	.exponent = exp,						       \
	.size = sz,							       \
	.bucket_size = bsz

#define VM_GENERIC_STATS_DESC(stat, type, unit, base, exp, sz, bsz)	       \
	{								       \
		{							       \
			STATS_DESC_COMMON(type, unit, base, exp, sz, bsz),     \
			.offset = offsetof(struct kvm_vm_stat, generic.stat)   \
		},							       \
		.name = #stat,						       \
	}
#define VCPU_GENERIC_STATS_DESC(stat, type, unit, base, exp, sz, bsz)	       \
	{								       \
		{							       \
			STATS_DESC_COMMON(type, unit, base, exp, sz, bsz),     \
			.offset = offsetof(struct kvm_vcpu_stat, generic.stat) \
		},							       \
		.name = #stat,						       \
	}
#define VM_STATS_DESC(stat, type, unit, base, exp, sz, bsz)		       \
	{								       \
		{							       \
			STATS_DESC_COMMON(type, unit, base, exp, sz, bsz),     \
			.offset = offsetof(struct kvm_vm_stat, stat)	       \
		},							       \
		.name = #stat,						       \
	}
#define VCPU_STATS_DESC(stat, type, unit, base, exp, sz, bsz)		       \
	{								       \
		{							       \
			STATS_DESC_COMMON(type, unit, base, exp, sz, bsz),     \
			.offset = offsetof(struct kvm_vcpu_stat, stat)	       \
		},							       \
		.name = #stat,						       \
	}
/* SCOPE: VM, VM_GENERIC, VCPU, VCPU_GENERIC */
#define STATS_DESC(SCOPE, stat, type, unit, base, exp, sz, bsz)		       \
	SCOPE##_STATS_DESC(stat, type, unit, base, exp, sz, bsz)

#define STATS_DESC_CUMULATIVE(SCOPE, name, unit, base, exponent)	       \
	STATS_DESC(SCOPE, name, KVM_STATS_TYPE_CUMULATIVE,		       \
		unit, base, exponent, 1, 0)
#define STATS_DESC_INSTANT(SCOPE, name, unit, base, exponent)		       \
	STATS_DESC(SCOPE, name, KVM_STATS_TYPE_INSTANT,			       \
		unit, base, exponent, 1, 0)
#define STATS_DESC_PEAK(SCOPE, name, unit, base, exponent)		       \
	STATS_DESC(SCOPE, name, KVM_STATS_TYPE_PEAK,			       \
		unit, base, exponent, 1, 0)
#define STATS_DESC_LINEAR_HIST(SCOPE, name, unit, base, exponent, sz, bsz)     \
	STATS_DESC(SCOPE, name, KVM_STATS_TYPE_LINEAR_HIST,		       \
		unit, base, exponent, sz, bsz)
#define STATS_DESC_LOG_HIST(SCOPE, name, unit, base, exponent, sz)	       \
	STATS_DESC(SCOPE, name, KVM_STATS_TYPE_LOG_HIST,		       \
		unit, base, exponent, sz, 0)

/* Cumulative counter, read/write */
#define STATS_DESC_COUNTER(SCOPE, name)					       \
	STATS_DESC_CUMULATIVE(SCOPE, name, KVM_STATS_UNIT_NONE,		       \
		KVM_STATS_BASE_POW10, 0)
/* Instantaneous counter, read only */
#define STATS_DESC_ICOUNTER(SCOPE, name)				       \
	STATS_DESC_INSTANT(SCOPE, name, KVM_STATS_UNIT_NONE,		       \
		KVM_STATS_BASE_POW10, 0)
/* Peak counter, read/write */
#define STATS_DESC_PCOUNTER(SCOPE, name)				       \
	STATS_DESC_PEAK(SCOPE, name, KVM_STATS_UNIT_NONE,		       \
		KVM_STATS_BASE_POW10, 0)

/* Cumulative time in nanosecond */
#define STATS_DESC_TIME_NSEC(SCOPE, name)				       \
	STATS_DESC_CUMULATIVE(SCOPE, name, KVM_STATS_UNIT_SECONDS,	       \
		KVM_STATS_BASE_POW10, -9)
/* Linear histogram for time in nanosecond */
#define STATS_DESC_LINHIST_TIME_NSEC(SCOPE, name, sz, bsz)		       \
	STATS_DESC_LINEAR_HIST(SCOPE, name, KVM_STATS_UNIT_SECONDS,	       \
		KVM_STATS_BASE_POW10, -9, sz, bsz)
/* Logarithmic histogram for time in nanosecond */
#define STATS_DESC_LOGHIST_TIME_NSEC(SCOPE, name, sz)			       \
	STATS_DESC_LOG_HIST(SCOPE, name, KVM_STATS_UNIT_SECONDS,	       \
		KVM_STATS_BASE_POW10, -9, sz)

#define KVM_GENERIC_VM_STATS()						       \
	STATS_DESC_COUNTER(VM_GENERIC, remote_tlb_flush),		       \
	STATS_DESC_COUNTER(VM_GENERIC, remote_tlb_flush_requests)

#define KVM_GENERIC_VCPU_STATS()					       \
	STATS_DESC_COUNTER(VCPU_GENERIC, halt_successful_poll),		       \
	STATS_DESC_COUNTER(VCPU_GENERIC, halt_attempted_poll),		       \
	STATS_DESC_COUNTER(VCPU_GENERIC, halt_poll_invalid),		       \
	STATS_DESC_COUNTER(VCPU_GENERIC, halt_wakeup),			       \
	STATS_DESC_TIME_NSEC(VCPU_GENERIC, halt_poll_success_ns),	       \
	STATS_DESC_TIME_NSEC(VCPU_GENERIC, halt_poll_fail_ns),		       \
	STATS_DESC_TIME_NSEC(VCPU_GENERIC, halt_wait_ns),		       \
	STATS_DESC_LOGHIST_TIME_NSEC(VCPU_GENERIC, halt_poll_success_hist,     \
			HALT_POLL_HIST_COUNT),				       \
	STATS_DESC_LOGHIST_TIME_NSEC(VCPU_GENERIC, halt_poll_fail_hist,	       \
			HALT_POLL_HIST_COUNT),				       \
	STATS_DESC_LOGHIST_TIME_NSEC(VCPU_GENERIC, halt_wait_hist,	       \
			HALT_POLL_HIST_COUNT)

extern struct dentry *kvm_debugfs_dir;

ssize_t kvm_stats_read(char *id, const struct kvm_stats_header *header,
		       const struct _kvm_stats_desc *desc,
		       void *stats, size_t size_stats,
		       char __user *user_buffer, size_t size, loff_t *offset);

/**
 * kvm_stats_linear_hist_update() - Update bucket value for linear histogram
 * statistics data.
 *
 * @data: start address of the stats data
 * @size: the number of bucket of the stats data
 * @value: the new value used to update the linear histogram's bucket
 * @bucket_size: the size (width) of a bucket
 */
static inline void kvm_stats_linear_hist_update(u64 *data, size_t size,
						u64 value, size_t bucket_size)
{
	size_t index = div64_u64(value, bucket_size);

	index = min(index, size - 1);
	++data[index];
}

/**
 * kvm_stats_log_hist_update() - Update bucket value for logarithmic histogram
 * statistics data.
 *
 * @data: start address of the stats data
 * @size: the number of bucket of the stats data
 * @value: the new value used to update the logarithmic histogram's bucket
 */
static inline void kvm_stats_log_hist_update(u64 *data, size_t size, u64 value)
{
	size_t index = fls64(value);

	index = min(index, size - 1);
	++data[index];
}

#define KVM_STATS_LINEAR_HIST_UPDATE(array, value, bsize)		       \
	kvm_stats_linear_hist_update(array, ARRAY_SIZE(array), value, bsize)
#define KVM_STATS_LOG_HIST_UPDATE(array, value)				       \
	kvm_stats_log_hist_update(array, ARRAY_SIZE(array), value)


extern const struct kvm_stats_header kvm_vm_stats_header;
extern const struct _kvm_stats_desc kvm_vm_stats_desc[];
extern const struct kvm_stats_header kvm_vcpu_stats_header;
extern const struct _kvm_stats_desc kvm_vcpu_stats_desc[];

#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
static inline int mmu_notifier_retry(struct kvm *kvm, unsigned long mmu_seq)
{
	if (unlikely(kvm->mmu_notifier_count))
		return 1;
	/*
	 * Ensure the read of mmu_notifier_count happens before the read
	 * of mmu_notifier_seq.  This interacts with the smp_wmb() in
	 * mmu_notifier_invalidate_range_end to make sure that the caller
	 * either sees the old (non-zero) value of mmu_notifier_count or
	 * the new (incremented) value of mmu_notifier_seq.
	 * PowerPC Book3s HV KVM calls this under a per-page lock
	 * rather than under kvm->mmu_lock, for scalability, so
	 * can't rely on kvm->mmu_lock to keep things ordered.
	 */
	smp_rmb();
	if (kvm->mmu_notifier_seq != mmu_seq)
		return 1;
	return 0;
}

static inline int mmu_notifier_retry_hva(struct kvm *kvm,
					 unsigned long mmu_seq,
					 unsigned long hva)
{
	lockdep_assert_held(&kvm->mmu_lock);
	/*
	 * If mmu_notifier_count is non-zero, then the range maintained by
	 * kvm_mmu_notifier_invalidate_range_start contains all addresses that
	 * might be being invalidated. Note that it may include some false
	 * positives, due to shortcuts when handing concurrent invalidations.
	 */
	if (unlikely(kvm->mmu_notifier_count) &&
	    hva >= kvm->mmu_notifier_range_start &&
	    hva < kvm->mmu_notifier_range_end)
		return 1;
	if (kvm->mmu_notifier_seq != mmu_seq)
		return 1;
	return 0;
}
#endif

#ifdef CONFIG_HAVE_KVM_IRQ_ROUTING

#define KVM_MAX_IRQ_ROUTES 4096 /* might need extension/rework in the future */

bool kvm_arch_can_set_irq_routing(struct kvm *kvm);
int kvm_set_irq_routing(struct kvm *kvm,
			const struct kvm_irq_routing_entry *entries,
			unsigned nr,
			unsigned flags);
int kvm_set_routing_entry(struct kvm *kvm,
			  struct kvm_kernel_irq_routing_entry *e,
			  const struct kvm_irq_routing_entry *ue);
void kvm_free_irq_routing(struct kvm *kvm);

#else

static inline void kvm_free_irq_routing(struct kvm *kvm) {}

#endif

int kvm_send_userspace_msi(struct kvm *kvm, struct kvm_msi *msi);

#ifdef CONFIG_HAVE_KVM_EVENTFD

void kvm_eventfd_init(struct kvm *kvm);
int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args);

#ifdef CONFIG_HAVE_KVM_IRQFD
int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args);
void kvm_irqfd_release(struct kvm *kvm);
void kvm_irq_routing_update(struct kvm *);
#else
static inline int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args)
{
	return -EINVAL;
}

static inline void kvm_irqfd_release(struct kvm *kvm) {}
#endif

#else

static inline void kvm_eventfd_init(struct kvm *kvm) {}

static inline int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args)
{
	return -EINVAL;
}

static inline void kvm_irqfd_release(struct kvm *kvm) {}

#ifdef CONFIG_HAVE_KVM_IRQCHIP
static inline void kvm_irq_routing_update(struct kvm *kvm)
{
}
#endif

static inline int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
{
	return -ENOSYS;
}

#endif /* CONFIG_HAVE_KVM_EVENTFD */

void kvm_arch_irq_routing_update(struct kvm *kvm);

static inline void kvm_make_request(int req, struct kvm_vcpu *vcpu)
{
	/*
	 * Ensure the rest of the request is published to kvm_check_request's
	 * caller.  Paired with the smp_mb__after_atomic in kvm_check_request.
	 */
	smp_wmb();
	set_bit(req & KVM_REQUEST_MASK, (void *)&vcpu->requests);
}

static inline bool kvm_request_pending(struct kvm_vcpu *vcpu)
{
	return READ_ONCE(vcpu->requests);
}

static inline bool kvm_test_request(int req, struct kvm_vcpu *vcpu)
{
	return test_bit(req & KVM_REQUEST_MASK, (void *)&vcpu->requests);
}

static inline void kvm_clear_request(int req, struct kvm_vcpu *vcpu)
{
	clear_bit(req & KVM_REQUEST_MASK, (void *)&vcpu->requests);
}

static inline bool kvm_check_request(int req, struct kvm_vcpu *vcpu)
{
	if (kvm_test_request(req, vcpu)) {
		kvm_clear_request(req, vcpu);

		/*
		 * Ensure the rest of the request is visible to kvm_check_request's
		 * caller.  Paired with the smp_wmb in kvm_make_request.
		 */
		smp_mb__after_atomic();
		return true;
	} else {
		return false;
	}
}

extern bool kvm_rebooting;

extern unsigned int halt_poll_ns;
extern unsigned int halt_poll_ns_grow;
extern unsigned int halt_poll_ns_grow_start;
extern unsigned int halt_poll_ns_shrink;

struct kvm_device {
	const struct kvm_device_ops *ops;
	struct kvm *kvm;
	void *private;
	struct list_head vm_node;
};

/* create, destroy, and name are mandatory */
struct kvm_device_ops {
	const char *name;

	/*
	 * create is called holding kvm->lock and any operations not suitable
	 * to do while holding the lock should be deferred to init (see
	 * below).
	 */
	int (*create)(struct kvm_device *dev, u32 type);

	/*
	 * init is called after create if create is successful and is called
	 * outside of holding kvm->lock.
	 */
	void (*init)(struct kvm_device *dev);

	/*
	 * Destroy is responsible for freeing dev.
	 *
	 * Destroy may be called before or after destructors are called
	 * on emulated I/O regions, depending on whether a reference is
	 * held by a vcpu or other kvm component that gets destroyed
	 * after the emulated I/O.
	 */
	void (*destroy)(struct kvm_device *dev);

	/*
	 * Release is an alternative method to free the device. It is
	 * called when the device file descriptor is closed. Once
	 * release is called, the destroy method will not be called
	 * anymore as the device is removed from the device list of
	 * the VM. kvm->lock is held.
	 */
	void (*release)(struct kvm_device *dev);

	int (*set_attr)(struct kvm_device *dev, struct kvm_device_attr *attr);
	int (*get_attr)(struct kvm_device *dev, struct kvm_device_attr *attr);
	int (*has_attr)(struct kvm_device *dev, struct kvm_device_attr *attr);
	long (*ioctl)(struct kvm_device *dev, unsigned int ioctl,
		      unsigned long arg);
	int (*mmap)(struct kvm_device *dev, struct vm_area_struct *vma);
};

void kvm_device_get(struct kvm_device *dev);
void kvm_device_put(struct kvm_device *dev);
struct kvm_device *kvm_device_from_filp(struct file *filp);
int kvm_register_device_ops(const struct kvm_device_ops *ops, u32 type);
void kvm_unregister_device_ops(u32 type);

extern struct kvm_device_ops kvm_mpic_ops;
extern struct kvm_device_ops kvm_arm_vgic_v2_ops;
extern struct kvm_device_ops kvm_arm_vgic_v3_ops;

#ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT

static inline void kvm_vcpu_set_in_spin_loop(struct kvm_vcpu *vcpu, bool val)
{
	vcpu->spin_loop.in_spin_loop = val;
}
static inline void kvm_vcpu_set_dy_eligible(struct kvm_vcpu *vcpu, bool val)
{
	vcpu->spin_loop.dy_eligible = val;
}

#else /* !CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT */

static inline void kvm_vcpu_set_in_spin_loop(struct kvm_vcpu *vcpu, bool val)
{
}

static inline void kvm_vcpu_set_dy_eligible(struct kvm_vcpu *vcpu, bool val)
{
}
#endif /* CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT */

static inline bool kvm_is_visible_memslot(struct kvm_memory_slot *memslot)
{
	return (memslot && memslot->id < KVM_USER_MEM_SLOTS &&
		!(memslot->flags & KVM_MEMSLOT_INVALID));
}

struct kvm_vcpu *kvm_get_running_vcpu(void);
struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void);

#ifdef CONFIG_HAVE_KVM_IRQ_BYPASS
bool kvm_arch_has_irq_bypass(void);
int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *,
			   struct irq_bypass_producer *);
void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *,
			   struct irq_bypass_producer *);
void kvm_arch_irq_bypass_stop(struct irq_bypass_consumer *);
void kvm_arch_irq_bypass_start(struct irq_bypass_consumer *);
int kvm_arch_update_irqfd_routing(struct kvm *kvm, unsigned int host_irq,
				  uint32_t guest_irq, bool set);
#endif /* CONFIG_HAVE_KVM_IRQ_BYPASS */

#ifdef CONFIG_HAVE_KVM_INVALID_WAKEUPS
/* If we wakeup during the poll time, was it a sucessful poll? */
static inline bool vcpu_valid_wakeup(struct kvm_vcpu *vcpu)
{
	return vcpu->valid_wakeup;
}

#else
static inline bool vcpu_valid_wakeup(struct kvm_vcpu *vcpu)
{
	return true;
}
#endif /* CONFIG_HAVE_KVM_INVALID_WAKEUPS */

#ifdef CONFIG_HAVE_KVM_NO_POLL
/* Callback that tells if we must not poll */
bool kvm_arch_no_poll(struct kvm_vcpu *vcpu);
#else
static inline bool kvm_arch_no_poll(struct kvm_vcpu *vcpu)
{
	return false;
}
#endif /* CONFIG_HAVE_KVM_NO_POLL */

#ifdef CONFIG_HAVE_KVM_VCPU_ASYNC_IOCTL
long kvm_arch_vcpu_async_ioctl(struct file *filp,
			       unsigned int ioctl, unsigned long arg);
#else
static inline long kvm_arch_vcpu_async_ioctl(struct file *filp,
					     unsigned int ioctl,
					     unsigned long arg)
{
	return -ENOIOCTLCMD;
}
#endif /* CONFIG_HAVE_KVM_VCPU_ASYNC_IOCTL */

void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
					    unsigned long start, unsigned long end);

#ifdef CONFIG_HAVE_KVM_VCPU_RUN_PID_CHANGE
int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu);
#else
static inline int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu)
{
	return 0;
}
#endif /* CONFIG_HAVE_KVM_VCPU_RUN_PID_CHANGE */

typedef int (*kvm_vm_thread_fn_t)(struct kvm *kvm, uintptr_t data);

int kvm_vm_create_worker_thread(struct kvm *kvm, kvm_vm_thread_fn_t thread_fn,
				uintptr_t data, const char *name,
				struct task_struct **thread_ptr);

#ifdef CONFIG_KVM_XFER_TO_GUEST_WORK
static inline void kvm_handle_signal_exit(struct kvm_vcpu *vcpu)
{
	vcpu->run->exit_reason = KVM_EXIT_INTR;
	vcpu->stat.signal_exits++;
}
#endif /* CONFIG_KVM_XFER_TO_GUEST_WORK */

/*
 * This defines how many reserved entries we want to keep before we
 * kick the vcpu to the userspace to avoid dirty ring full.  This
 * value can be tuned to higher if e.g. PML is enabled on the host.
 */
#define  KVM_DIRTY_RING_RSVD_ENTRIES  64

/* Max number of entries allowed for each kvm dirty ring */
#define  KVM_DIRTY_RING_MAX_ENTRIES  65536

#endif

OHA YOOOO
����JFIF��� ( %!1!%)+...383-7(-.+  ---+--------------------+-----7------+-7-----+---++����"����M!1AQaq�"2���Rr��#3Bb�s����CSc��$4���D���TdE������'1!AQ"2q�a���� ?�Z�L�[�����=D�6]�T mѰx$�6��@ۣ`�Itl �"��(6�Dst�2:��Fk���x���4��K�h}�l �?r��@��!�Q��Y��?��-� =��O�����(6����<A�x%B��<A�x%B��<(6�@��.���*%���$e�m��T�wi��~H�]F�Ѱx"�`�Ul��ꃁ���RPl�6�UIA�x(���#�B��zy%�<�L���mvN �ԭ6�Y$Qk �S��䮰�K6ף�x�+�T��L4���>�C=j�������p�|J�ǥ���b=���Y�6g9��F1��Y�vݩ�`��塏��>� � �ݨ,�����A�o�=W*���"��>����� \ �"݄(꧈�y���9�m���d�aAD�u&�T��D �@$BITU�"��D�D!BH�� � �UTu� �^c�?�[ND�K�`�\F'�jf��<�G�G��B�q]�����!tl�6�]\4mѰx"��<�6��B�֊�o4.�Ah�8QM,�y�����%cLh��y�c����!�8Tb���h�!p�q�t����EIA�x'Pl �KT N h6�J�P7�6Ԩ�6恰&��� �� � ����R���)m�`8�nC�J���E��%H� D�"T �n��W�s+���x���+g�?t��@�����;�>�o��0�����|Ћ0�����|"�J�%EBBBB!X�����|��X��̟s��Ӭ��H獎ŏ m׷�0���—���2q����s�'q]�����7�%����hp8EAYy�Ӗc��9%�A� _g�ٙ���}ӯ�Ul�Ƽl Ѓ�a�ۮ9�i�*��R"�*������:��j�zE+�H ����kB�2�e��~��Zd# ��0Vr�T�ev������Y�����-8]o��x�~)�9��}W:RF֟��P�A�� ���G+hH�P6�����:���Ԁ��I�O{Y�F��$U"H�#2��*J����L��L�B�*��T`���(�-:�R�H Z��"�B�Ihh��B�B�����urP�%� ��9��7v",�!�A�b�X�V6F��� ���^K�+��f��qm^��'�9�K� �����o��! ��P�%B����E��}Xo�U��(BXJ󥯢t��u�&�}Xj +%�7+�c� �\��t�9t p*)��L�Z��T��KTC�NGT�PH pQ�� ɚ^qB ��8!�*��� P��"T iHS���n��W�s+���x���,mtG��@��D~���� } tY������Y���4����!!@!@!@!@�a�^h��R���*��|!���Us;����n:��#���4-h�chW꼝���%�+Z�kA��E%��4“M$�����@y�q˓��ʽ�� $U��������eH�-;�a�ކ�&����*IB� �Z�w��;c��|�3JZ@��-��w�k������Q�Ϊ �g�d���I��G����8�N�G�����(R)�2�_�]3;]z7]�2�w�r����I�Iĭ=15���b~ 2�{cuO�'෎V�nyI)��1s�� ����i�lT*�ݠ�������H��p��^j�C�Q�B*(������(m�Wb ���Z�)P*D$EGimZU�ViZ��┵\�P�L���IW���Eh����[榚V�R8+l��zV<�B�M�j�V�pw�%�*�UKYǒ}�J�% ���(�����HM��NQ�S�toԝ�ܪZd�ল���,UP�J�=�Z m�T��-]��y��*k+���:�%J��V���X�i�o6�D38�h�=� �'G�$�@��X��H�P�~��X��e�Ã�����4���WS��x�3���q�˓V�S��k'�K�w�N�w�eb��,��bcw�1�� �ȃ�%����͖��Bd�J��*V�Y��.;Kh�� �*���1 X���-�� �OJ��$ sCU��H�Zj���N��e�m�zT�"T��%���8�(Q�4 雐��d8���j�$NH�'$@�� �a�< ᴖ��K��W ��5}��{��-�����w�}�,Y����䴣�,��S�|�R��BT D!�R ^��I *I4m%Gk�2&�y�m$�k;�7m��sW���:�q��!汖s��]�i�;(��ƣ�7_�Ve�o\㛜K�y���T/.yܝ�2! �AB(BD��ꦽ� �EX�w2��\�����^{Nɥ�=����lB�V���y ��t||�K$�v��Ȃ>* D��Q��z$�y��F�MqD��(���鍵M2 �G� ;[r*4�T�Rd�oV#�t+���P�A-��v�*�>��PhU ���-QJ��Y�mE;k�"�F�?%���R��&������G�ӳhx;�i��h���5��+�Cr��8���B�:�+BI�Ϯ�LOٳ��=�~��,��b�t�C�6p\����x ��«�!{�ҽhh��7<� ĊW���<�CNw�ai�@��ںf����j#^�Ny���\^rRU9�1u`�RC% T)SCM��VtR��U溢�f���i�|��Y/SpWF�V ��*�A�5)%T9����'B��O "�TTTQZTm�Dv] �����U����������R�����5�/B^�.�/���"rE�8B)�"�P�D!Km�<W��y�� |�[�m���,Y\A�]��7��f����ѻ,H�Zj[���eh� (N+P�U"N�*���ء6L� 뛐�������"T D!BBT�"�#��I~%�͑�W ���Q���w��] ���.���.��<���O��Zl�,��S�|���%F��9"H�Cj�66��4Wy��NTR��i Y��� '����|���c<�fژ��E�����\>.�|hH�Yem��&��"h!!*lehjӼ �s�HQ�b���� pi�Η|h�'�Rh��SP3 ۽�$0��P X?�w�-;5���4h�{� ���/U�v ���ְ\2o��e�����@(��+�u���Ē�����B^��&�M]�B� �"��D��@!@!@!,�lΖF��W=���^����da�cehi�����_��#d��[ 9_=�]�Ù^�$!�-p8�u�(B�/� ��A�!�����\�F4q��3����:�[�>�w�ك�[���]��<��[�3�'��M+�yMH�R D�P��(�P��AJ�b Qq�F�bq�ߪ:J�$j��8�-5 ��z@�#��K� ڕ�N ԺKړ�^y߉�Ԭ.�tv��n9��w���n�|s�Z��;q"{���9�! �BJ��BMe��=�`���֍�&�Ba�{� ���v ���@�> ���lp�6������uRh�"�i�,ɯ�79o�*�� ��V�&�[\v��:�bq k�|���\͒��1��]q��C �xi")��*0w��{��0�c��������߸ɢj�X�MQ����Y�R%B�D�P��!�$@$J��J��K�����r^�V~�� p�KWg���.��m 뛶���L�Z Bã� U; �H7�V�A��+�B��r R7!�%\���!�!�!�� �a��W�����+��Z�cw���]�C�D��}˽��]��g���>KM���?���� G�4HI&����i=,�Ge��*��o���ׯ r�c�RZ����$� ����fӿ.:��q'\j�M\㼕j�g�߃rh�� � S\ ��g���d%UkY� ��?Nn>�Y$ &�,�[/I�ZC��>�a��S�K��p ��� �Ƭ�QY�X��h!M�m�h�8]p���y%� C/gt����ڭX��� c�iW\�;'�i�� �atd���ā�7(6L`��]�=��hP��.�ss��X�9�X��ji�;��t��\��V���0f�87�U�?k��ww3W�|��=L@� ���OG��bv�Z���uj��AD�BEjubӨ&����F�Wgb�G���:�uT1��ni���y|�X�Etu������n�k�D��q57���g�A�n ��X]&����+����sD%�0�p��<��Vtm�����Z���9�^�Y*�(�{�/��j���sn}uz�����_n����������MErjΎ��[#N-5�5�a\��cu]m��M�WN�b�_ p�t�q�~ '��H�- y�@7%Oj�h�y/B��d�k�-{o�,-5��i�4toWx꘳�_�(E�LJ�í�m;�]t��V�^23I�{�h�g�43-zJ�ֽ��g�Z "'!N��:� N������Qku�8�3�n^���s,�6(� �nv��.�),��eƷK�\K����IPO�A��������e�K���ڌ6����yW�)֋��Z}�m{쾙��x���{hyn+/EY�l�W!-�$�U.��I��� ��� �� �/M�;��Pݎx�"~+��Ή1�&ѯ���=����#�ыv�$�[�"�R��To�v~)�U�˨ x0^q��S���^�d����ʠ�W� ��5�B���dy"��&�õ���c��g�+��9��ugh�ޖG���7������ �곗Hz;f�L��`8���{"��؛f�1��&�)J[�I������!n��v��b�Ik{�������ŋ���qo�s\��}ɛ\*KO<=h�L�2�U��Z� ���v���[O��8�@7$t����4S2r�ʬm���)18op�?��]1%�<��&71�k�.�s$.?-��s�ïZ� C�DjFC�w֠r+�@U4U��xY2����N�w�S�\S+�� P�0D��� ����R�>*D�3�֎m�Fu��v^k��,�9�-�V����M�k���Rw֏�Z�[Y�=���q:Aů%�>�����O�0�pmӅC�^뇍6��+E��N�f>29^L����=e�gi�-0�����e�W5��U�E�x�і��(ZKH�F(�Y�mrA;Gf\�H�z�G��iw� �^�r�y������-.��9��2|�%���ÌR�����J��� x��ab��KD�꾏!�k��D�s4o���F/9�2��fv��?y���z;WLC�T�*���l�"�b[�qi��A� ����Vf�җ�W'fA������� ¢�#h?%.�҆�lߊ�_��w� 6�uA�*V�;M�y�*�OY����1MZ6��g1�PsWiv�e���%B����@�B"�@$�TQF�т@p�q֫9��+��F��bF� VljWx�B�%���L����ۗ$��˒r������ � � d�p:KZ����+��Z�c{��r��D�ކ}˿h}��"be�I !���-[��n�{ft�J`�ь7f#�\c�{rڱ����T0уQ~���Q��h�����9���;��Ֆr�Ts�Y�dt��L�ٳx�R�i�<�B�g�=��^caؤs�EI��SAEɤ6�\��b�pn_�Z�U�u�� c\}Z�a��y��B�PZ���'�Ya���Ʈ��N�� ��I�C��,Vp��+A^Y,��,h*��[P�!�|�����I-,nn��& �x��u )�T�o��%��]8-x�m�dh́�sz]�/�#�}�{� �uq*͚��1=��g27�4�l�;���Rn�v�G�zܑ`�Z9���j� �ᲈ�X̜�]�$���OYɊT��2� %BAJ����� �:�uԮU�j_� z'�48d�T'��F�g�rI�Ƹ��+ :��p� '$涨�䳶J5�k�48Tk�[;c��K=��zՕXʛuS��"��Q�8l �m��ˋ��k+���ᴇ��$V _�%$g��k��]��GZ�t�^ʹ�쮣��k��m�2V:9׊8c෎v%�8O�&}�����]�\�G4?��3F ,�6I9�k��^�����m���TЕm � J�P� ��}�� ��Y���2��}�&���{��|(Y�o���J���v�qܶ��EAA����BG�o<�Y1�K+�����۽���^�dŹ���-�v�����P��It���sh�VŦ���ޫ.H�4+�9m�ώ�Q �H���%D�@�!@�%@AFkK��B�&hCm��8'&7.A9y�P�$J�B�P�*BJp< �t���͑�W�����E- ��#�h�8��� {K�s���Zgu���m=IU�v���;!�cϴ� ��J�"r\ 0 p��+�8T��e]ĵy�rߧ\'�4s\�ep����� <�ޑ}ƶ!��9�V�Mk]R;1���4L���'2j���B��F@c���f���e=�k�t�n�?X��2W����j����&��Ꜩ�n�WԮ�5� �����G���|h�m�(ݹ��@Z��VY$?�6�qy$�'���l�z��Õ��I���pK_ny�Tlq��<��.#��)��&���0�)#;s�`k��xn5ʞ�4�-�Y� ���'�kFe���k%������=�{c9���u�L�Ut��,�`g���iS)Z��Z@���l8B��P1�y��Z�ѩ�\�㏲Vk���&K,��I4����iX!��U,���7‰l����`t��E�I��ʺN�P�(����k!�}֒�\q��-m'%؝CBh����aTn[�%Z}�C��0P>bs$�)�4���x��9E�&�^EQf��!�mt�z��8��ձhR�Rw�ͨ��{>g��(��c�C���p�� �޺ S��n� l���$lgn�ۂ����I�{݉$�XnM��-{��Z��E-����n��*Si/`s?%��8��6�<����� \7A��WZR#���Ǔi�˹ V2*��B�� �� T�=3n�&юA�� ���%��_O� ->���G�EgJ�C��a?��tl@����6��m��Q���p�s(^���*�� � �Tfi :�b��Ed���׀�|c���8^a���F�[J�~7*@n�3��2��j��mik^��NyF�J�g�������6���*@��Pg�珢�C�g��*=5���R��g�=U_��;G�~)&��M���\<�F,�]�&��<\o�L�]����g �� r޿��������$kk��i(�!�ݥE�V�E�;ͨ���헮�����5���TiTM�z��+N����Ī��J�e{ݺ6 �n$�q<�Y�!�1�yAs����.�@��U�%+�έ��u\]J�`�Mdփ��C���;3��:&�^��7NK ���øS��]��'�7v�M;�ä\BD�1�P:�J#i��u��e���mZ���<���X?eew'8��� �=��փ��.����Ԯe�c�e�O)�KZ9 rW,�����a#[���U[�03\ޚ�\QU�3���L<⵶Zv�dP2���4d5����\f���f�b���н�~��x��e�I��J��e�M2���d��oݴ���\|2M��6U�.��RYt|���O~�Q��pV���։c��C����E:�9��q@��N�D�;� �y�;M�Z�3���BԷڒ� ���Sm>�t��q�O@��=��s�6����H���EAi����e��;��ղ� p�<1T�&��79�ǽ�1#�5��'xµ��eODB��Z^;@7o5���"����xZ+�!S6�<�&�5��z���xa�.������73k�6��Yc@@ � ���D� ��� �f����438D75�/?��.�%����� � ?�7�L�)�u�����ٙ�S�]��s+���?��r.�G��e���sBBB!Y�4]��) G���g OҴʡi�͐�6QԼ�' �2wT�~��p嬙�z�q��ˆ8a�-��f^ˆҀ���L����E4�GX^�5�(�R�a���>���F�v"! �5�~>�]�,�~*2��n�M���>�i�A �B1U����<�R&��ϊz2�*FV�ѵ����Fm������Z5���~�r�T-�=� �1�7���pP��N �otg���dǟGo޶D�l�U����T��hp8��ε���n#ӊ�[y98��(�N!![p5 P�D! �� $8��r�#r�/;� �ȪD rTԨ�J��f�y��i.w�#�n ��k��V���#�b7]���|�^l61�w�ܼO�\�=�:`��s:=3��H��zJ3ʾJ*�Ղ�f����V�8�c0�pV[�u�!5��*ZRJFw�|�(�Ù���{���5$�8p �d$�A�J��rI���4��Gm������V�2����#2��Y N ��C��zy���+e-���q��H�]<�9�W{��%&��g��2���[���sq �U���f�Ӎ�<.���@�e=�$h�y��r��� ��vޢ���1���{.�%E�I�6�����L,��s���F�e`�Z�w5�� ��?� ���uzR��0U��?́o�i#���t��)�u[#�{M�9�q��劘�?�OdqOџv8���q>�}�c��Y�:E��U��tj/�i��#�7�yQl�����4��vUg�~;�h�� �@'�\�09�9���S�Z�Ӕ�K]��KE�8�l{�?V��ܪ��{ւ�ٱ�ԕ��zd�U�tD��~�*.j��[Du0=��w'�kR�q��z5 x: Dߘ���k��WYh�Y�OUg ���t�w*�>u��du�L'P�����O%���iuմ���=�JpvnZcm�^��OٱC%܌�`�����Ee�L�u���غ�^�owȭ�<2�����W���v�����[�KK_P*�_i�ͮ\A�Fl��h ŭ��K��r�E��;FB�qSUs�'�W�'v�ߴp?tës��`�bm;b��H�bN���q�A���u���ki�Ԕ4A�J���"Ƌ��36C[�sLM�k���⺖��/����z�(J+{���+�|��8Q��\����͎���g��ޫ#-s\�F@�d��CR��4;��Y,⍾[4m�1��m�>��4�9B�� .�%�WTm�w�����Z։n��+�vD! :�ZO��7����t�.�\����u0 x��K>Et61�g$�B�!@!@!T ���%�̭/ ��]Ȁy+)IF>�����Ů����;�+Y�p��xzqY�3��!r|�L����͛��O�ތ�G�{��snV/9���v{Cd˲�~JE�*T�Q����[�G���z?{� �T��pg�H�;��ϊzBHP�m�6��`�>�n>��V�G� qEh��S� ��h�rZ߭�ю+$g�� ����xU:3�4��M{�f���^'�ܰ�}�7�y$�u�V��R�s@z�Z05��4d�i�&�Эe-��6�5���]�v�a# %dU']\|��vv���~����TuN`u ��I�gn'������C\#�|j�>�t��"�3�� ��$��̈́��1��K�yƤ��;�'⦉�A ��4�B1*�J���ft{I}fɓ�� �n��n!>ۦ!���\�n�i��& �s���+>]%yŐ]�����G�8f(�m,v6�{s=� `�8:�`�G�;�à����6��`��?S(V�"6觼}��WW�lQ�p��J�l�ST]��0V��LX?��j�,��ԝc}��O�;C�)�v���,toe/�Ԩ�D��hhFí] K&��H륎l�b�+'�.��Hޛl�> j�c�woI�����`+�Jݶ�D���z'S�÷/�r�_�[A�i��7v��w/��@ ~��T�pɼ� ^�)�z34��$D ��R=�l�[�j�a��FGT&�ŧ6��V��H�S�ttY]%o�3��y O�Y�4ⴹ�����D��F���94�zx���]{ܾ+7��*;P�nX�E I7t�XW���4�KD��i���epS��B72U{)B3cZ`nc;����բ��F�<�72Bמ������\k��F��?��$-�F�� ��Z,�fHۯhsM*�4��FY��@цbP�\�ÎW:s@JE�.�������b��N�D�$d��w�]49oi��Z|Ŏu���+}�WC�6b| Ǵ�P[��ps��Q��`�����=�G6�c� �R]N&�&�Fƣ������u�� i�Yњ[�������n���eu� ~,�=�k�4d��Bז�������;���Q�˴Ѱ1����()�yT}4�5�pd&�i5��n�ZvN������eÔ�br�~^�6�����v�m���:���Q=�8���O���Pw8��汴gL,Ґ�(�]{K��<�G�����+,o��3^�暃]��j �E �i#;�Դ v�iz3ƃ� Ap&X.Iڤч~�vNBAMy�uf [��u@�v��3�5��$�V���5�����9��i?�72:ր7U����S~�"y$��Rv�C�+�5�NЋh������ �mt�+�4��*$Ұ!�1�����z��:*��3�l�sX�1���h�J�g��`{NTj#h#Q���yL�y�ڜ1,}d��h�qh�p�Eq��g,�f�����(���]'�E]�����S?y���O��G˽G� ��?�͓�$�Ew�4{���I�i=��o#�Ը�(��Ru��[�L�ܾ݉���e��������_�g��?�\oY������U�}'����A� ��������DU�e�����M�o��/.S��o�aVsd��H��a�2v8�����V琽셤� .q�"m/P�$����u/i���V�;��:���­�2��W��.�{uÖ����>���ô1���'2w�i L2 �W(�}d�9����� H�A�= �!�) �d�\D�������ԥ�f���j F�AA�p 䴖u�G��XHd����L����xb7�Ly5�X�����׳�G������a�����4���t9=KjIµ+���c�x7�Q���ߊ�!T*��� :ߣ��nb)�h+E�����̓�k�k��+MU�X� ���=�Л�Է&����ݽ����;��]��Xwj:���b��C�Z�i�>9��sRf��h����� ��y�NF��%Mn\���*TԵD*Tڥ@�SP��SR�Ir< ാ��=���|��$��\&����ʟ� ���].=�������ĴsX���U�6�����*�]q�T���s�]¾�%��> Ș������غ�C)[�x$���WL�(,P�6�m�� mķ�5��MJ`QU����3A��k�W�6 ��� �����p8��5�G��ߑ�8�8�㹠rZ�hm nM��\�/��R�D�45�Z(� 70�B��Ktu�/=uƨ�:n;�y[�)�4���Hʂ6�[yƙō?�4��P��;��Ini���l�CJ��_I��j�,m��A�)N ;o�?�o�+�ש���BQ]t�i�h�RQKu!��j.�HIu�D�W�'�EٴT�il�}YV�>�{[��������uV?Gض���.>�Ol�/�+�݌@�;� ��Kr#��yͮ N��R��b�4��f�+�N�������q���๮��'Z�hid/-m�/�����y+)���4��,�X��p0dC^��A��jk�h�|.��6݆�lu2?T��р�55�MJ�3lWL]U���C�g ��o �W���~���˯P�@H��H�۲�(rjD��OE��E�����˻L�9r��J�q�����hΝ?�b�)9�%2��׍k�(��p��D��l��8„�m 3�J�U��b���� 鵝��C+����p��;���Ŧ��L�J�6��VF���_�U�z���ϴ���I�U��"�������.�@f�I��f��8�R.��i��6�X�V���Ү�Yg�u&GK,.oU#X�H܉qp7{�h����@)�$����j���`�I��5�lQ����⊪�y f� �CҤJ��!%�\��o�:#�V�DkpVI=���h9�n�� ˾�gcm20�����u�p�kȯSj���姫�z�N �և�K�lg}�a�ⴴ��8_#�Ɨ8�h�y�EzGz�#d�Z]V{��o1�6�2;nG�4� ��[�Ⴐ����$�W1�� cƾEJ��'��S��[��Z�(i=��j�skq����1�mOF�,� �h�ܕ��0��1�P�v�a��6�N��ӆI���Ln�1�1���滑�x�l�x7�v���y���X���-�)���W�]�uN�P� � ��y�(U� a���m h&k�����(������gk� �1��M(]��7�v� �xi�K,a„,y"������j��<6V���Wm�' �c[]N ����@ܹ'U1���R��@�SR�T$�J�� &G�\5�}��� �Z�w�O�yfy��7�\O�\�n���=��(�.��v�OK��:�����2Q�q�� x�.ӦhC��=IU�ƙ��h��' �)��R��E�g��cuG!�i�}X� [K*����+�m+���F-=a�H難�����5� �p�U\���fd��#��Nh!fݪT׶�*" adL����edr8��߼�d��PKt�{\ ^�{.i��P��$l)�[�Ǎi�j�Y���8 5>��e�p'�D,8G����C������Ň�5ql���+u � ����Tv7�`;FK�� c��Tƫ�zQ�>�3�;���1�V��N>R�Ň�G'/�?�ƚZ�ˍI$�IQT�NB�2>m�6�o�K��rU�!��Ev�BiT�"�BH� � \ڧ!w2�VSKVt�D��k��p$�L��F�<QoG�� �����׍�9�Ts^���̞&L��Esx5��4�E�0�����VX�u~6�t<��sṷ~�t����O ������xR����Ů�\��Qk��� �'<��jpx���Go��R\�S.���Ë�ӯ#�: ���~�0'���t��ׁtsK��hd��4����=���^��{Z����i�EAS<|j㗔Q�E� �� b4���<��-KL�_��J6юÛoj�\�b�2m�9��Y��G�_������m�/�B����lo�H�aR93�o �=����Y#|�IBZ�h�c�5�a����sՈek���F �*N��$`sH ��2 �U7���?w+��:�!�t\�p�CP]x��c(�A)��Sj��qm> ���0:�c\�=��|����V��1�Q� ��j:�1 9c*�tx:�i�hAe p"��i��k�T�no�;�)Ri�w��WF�upJ��@�SR�pB@��6�j�R�Bˑ�W�WHv����o�v�8�uO��G`W3W.7����}:q�i=*�GĀ�,�7%(6��I�ݓ����xM�`8)����$��$�WfY,���64��4h�����^;(u$��^rf�ơ�3䎉�u�V_xg��9����N�р��ʐ59Q���4V[�4��M��X�L� 4纥5J� ����k�r4vdh����O*U�"�!����3C$�"��+G㌟6�G  �����Yt�Վ��2sN���ɉq�@#.�暁#?)��j5)[°�S1I��#��p�C�KKi#��'ǵ�^���/��LK[�7� �s�9�;ҭ�J���x�J)^�U�{'��EI����T4T�}�k�+ �~#��{j=�ﭑ��t�����\�A�/g�;���k��O��V��훧-Ή�k0|��� s����y.|G��W8���<έ�-��BKc�e�;��[_�+%g'���o��i�*V�@�#�TM��Ց�9��6��w ���ed��R l�N��M�)J������G�V�L�k?UQPK�#��(�/F᳓!d��dpȜ(���n��E���t� qX�"��Y#��t�}�`��v����FY�=҂ )`4|���]��xS. ȥr��ZFI�t�>�ƍMh��%{x0���͗� P�B�� � JS$U*B ���@!@!@!@QB!6��� ��-ςVM/F�µ��A�� �4�,�Yt�}k���X��6^�P�{��ӆV�&��u�F�v�Qmtg���g0��-.0�ݺ���jk��{�'j���H�W=�{��8ԯ.vd�$�Ҭkݺ%-ls�����|�Яn�A����dx9�g�x�Y��Ln$���)Z�sV4�cO�Y�F<~G��b��~�?� ���m%��w�џvH�)��bgH��,��f�%��B^�}��O�o�)��ү�����e�}۟Խ���.�c�$p������H�׆��u���5�x K#��.��P��i�vG0y4+��~�?�<1߳�O�~��-k�^�!|lq���q�HJZ�AT/S�Np�UgDXoG��E>;Q�y;_=�Xsy��C!��a�v�u��'9���k8v9�#��n���4 qћ���q x�nz��|���b�,�i�� ����x�we��R�EB!@$)Rn������z�x�F�-�pBF�8%E BD� �J��J��9%�<���.�Uͅ��Y)Y�$��j���b�7=�;qB���y�o �u�sV�^���qǷZF�t@�}nA�D�� ��!�k����6�C!Ƅސ���c��$/b� ֆ�ZA�P�t�yS��!e��!�!�!�!U )a�s($��7�sc�+�+��W�6y��Z�s]�k�k��P��Nٺ�[� ������zZ��X�PZ?) ��;u�\_K#w�~8�N�+A»n��lQ�]��$���]�IIC��#�kN ^�����C�<�i36���~ � ͺ?�Y�չ$o�� �Ů����^�����@ � �ZG�S�a��� m k�w �0�AA)�Ǔ,z�8�&�>��V0�Ƶw�ܵ����(7%U��-%��c�l�u�C{��&Yܻ^L�f���sZ֊�8�� 䬷[m���O�i�?x�\�L�g.{�+��1���\Ʀ�@ i�NB�s ��龝F�Yfi�� )���Cy\���\d��{��s�4��O*�N⤞�u�j.uMv⚄/���|ܮ�P�*���R � �!P$J���PMN@!*D��� �!%�D"��}��:�f�R��E�z�х�}RFS+�w1�� �x/7�O[v��^�������J�).���`����Hm�ROz'1���0�\���S٧��Q��g?W��ub{i��]ą��Ykhu�����c}���]|�*�'��ix�϶h|�*7�����!|��X�<2��H�g��k���=����8��4k1[v C/N���{Z p�ؘ�Mݧk�-��4������9B�v��y9r�^ɡt�v������4�� �V�^S�+��z�:��\�.�w�1�}X�ö���j�3ӿ��t��O!�_��C����&]����o��4 �����+���ľ�&Jw�X> �KO"��`��V �n=�a\{k?�UT�8%_F>aP�"�R ��@���P �P�B (Q�)�Oc��ÁBi ��� � � T!�R �F4�՟ q!����MN �w"|��Jr��j�7Wol�����C���R���1����[�x�B$?_����c�*n���i�W��������/(�ұ���ͻ#���q�H�� �L��V��u��/�^Ž8�Q���s�t���Zߎ�&�������y5ɖ�eA�f�7B� Y'xP�CBq`��C0ƻT�{��G�,�M� �B �#%�=�_Z��l��6<;�W�^$һߣM'G>�Nl~�x��T�5�z*AN ��Q�v~*D( d�ge���:³J��Pv&�\ ��j�F��4ێ��;A�9;楆F����ӟ-�{��-Mk@� ��*]�n��+ pp�MB�L��x�ɻ��Tdؾ���d���Fv�T��*;BBBD T!UҖ��3kG�L<胗^.q�r��^7��Wkh�3SX����U��v���7K�8��}h�̖��=K7G<�%���KJ������}^�T��/����d�c�f�*rBBBBB ��A�zF� �HBT �zi�ʗZ"���ƽkxH�v�? �C��%y�Mz1�8�aoٻ�h~ONƹ;$�^#�u�6!5��^���O�W1�.�]7F�`H��bw��\�GԮ�ݛ�p��]"�x{.�o�� �R5�S�i���څo��+��X;���Z2�M�*����FѬs^�l��9㓼�P����9���*E��cHQ�k�f�ٸs�Ժe���� ���\/N����>n?%��t���3��ަ-������´TV��1ߕ�� �=����؎ �(��} z|�T! �BD��E�4�d�\��T�R%Z@�!�* p�@�P� ���f��HF�/ԦM�&���! P"���T�A�У�g?����5^ �m� ��w#�����ndδ>9�m�����Ƹm����{z�o�;E�G��ݐ�g�>��`^Ch��^9�I�q>k��}%2��;Ee��q �i�� Y��8Uk� {g�=�2Q�pAM��Gխl|26X��y�8�TsUKT���z3�Yl��E�4�FV�]��A�~J�{��Od`9����}�q#kA����su��{.���Nװ�5��#"4\,Ӵ�qb9�=hZ� hv]�k�5���0�x�հ�+)2 R!C,�vNН��Ɂ��G��J�#� ���5�-f�=��{l�o`EA�*�2^��S�e� ��8!W6��4a��#��%Mn\���P�*)P�*D U��Ii`�I94�>M�Z�����6�k$y�KCԳ��5gl��dv�s$���W�~3�B?t���xooL�WC�z�tm���>�;G�i�z��}��M,�}�a���S��5�M q�:r�`!@!@!@!@!@!@!@��E�8q'!�=2��՝���Լ�=K���:�e�aX�A�489�c�j����^����EA0B��t}�I/�������a����<�r�9-6j�:��������+9�,"�� �������D������F{�=������t���P>�s=�?�Wn ����1�;T�Ŵ���j3�8|v��E����OS�Z�� �����-i�p��׆֓Q�Nn?/qǃ��껅���m� ��.�9�p �dAȮ?���B��0��ג=�L�6X�G.���Uev���$v����U/N]�:�T�uB�8_O��g�B�*D�6L�`��Dȃ.k?*�*D-!P� � D�F D�(���P��b�:��KI����#��+l��TB �B繬cK��5�hĹ�4s)��~��.jt��p �P�MGjPb��;ݹL���Α�&�|Խj���Y/:�/7�`] .��j+�y�^��"�[,���i�fY�s�ye��66��$V��;�W��蟭۠�f��d����R÷�;�X�.�:�����7���p�+�ccs�m ���(��fM�m���[,P0E ��|&)#!���� W"3"���aӯ��~� ����������hk�7�.f����*RK�����ߢ�IK~Z��]- G�TDӫi9�����o�Me�ء�9�Dz�$�8z��`@��k#�2� �^�C��Ѥ:�F�ܤ�� u�E��C������_m H� ��v?�m��W���.���8C��w�q澴��������$�T��Q��D�kFƁ�:E�g���u�GP�����;���yu�at.+�(�0s�y�?���{*G:��*k�E@�Ȃ����47N�J��UƐ�/�i#ۈ9�{��W��~.��Y�](׀施�[MU��Xu��7� B�T!"BD T!!!�Al�2F:7�9��X�qS��V� �m��$�2{.��89������,�������#d�#��]�����"tR �ÛN�4�!p�j7� l���G,&7bЇ��G G�'X�e��e���i9��x��#��D&�G`q�qR�|x+C��L��C�ѵ�7�#�1��:��F�fR�Ni��,�� TҔ��v)-�@�dto&�:��x���5�J�^^^?{�g.櫝R�M,���'b�r�c_?%�&��O]�H�D3R��5�����OBT����P�(�R �f�P�D*�,d�>7�NrUB�'jICِ��63����&��J�QD�J�E�e릊��t��xҍ���ܫU�I},�G >�����äu�Hb���#m���c7�$M &����Y;��دH�9饭���m��fN�/�5�`Z{ з���%�=� _[�u�{�{Ndv����kk�M6�p�'8H�ėd�%K��{F��>��9ͺ�lkh��n�N׹� ��.��h#G��� ��o��j�<ׇ &���0���R�3+�N���� ��4��kE�5~��:��5���A8�k!�c�c���i4�4T8ePuQ3E}� ~���GՏZ޺id��n!ΒS�2�٧|n��D�E#�}6^iY�O$���z�<�L��@VcQ�} t�i;`|U������H ���KC������Z��[\�7������^�����87ch��4��V��mC��y� ����81�������SPv?C�^8-��G Lmc 4k]V����` kj��cm|�2="���Z&��ZZ�� Mpk�ZV��Լ������7n ]�6���]���ˡԢ����.��M ���4{��V9��Z] d�ɥ#'�P Ay��ю�M�2e{#�ѭ܅M7*q���MN�v��I�HsIii����FE\q��諒*�-� f4� �5,�G�ZN׹� v��ql�#���G��=�|�t�q�U�$�¤�P��z7n�Ŗ)+RX���]� �5�;A�\ѕ��RBOݼ=����O����g�y�ˤ�f^�u8X-�fk�Zj�H�j��7��[uSr��]е��6,�M�ё�渐�iZjä�'d�\3�[�{��\���Б ��H���}㸡%�����i��R���U����P R!���?O�H u�[�Ok�C�=nŠ{O`�PO�+WN�YX�a�狍��5�+*|^����?Լ��n���I�v�vC�W�N�wh���u�6�^���H�� �`+UP������B?�+�n!"TFu�:;�*�ԴCxo,�q��&�4���ne>����L릏��#}���g�$�W�d����O�ZkC��;�г��;�dZ�aݶ�J`���=�c0���=��if����oٵ��Ӭs�. �sIk�X�=�0~#X:��q�yG�� Sh�[���u� -5i�GQ�E��вZE�Dq�ym��ਨߗ���+0��d����h:�|��F^�Qg�Ϡc\�a�^k�+�tz����f��U����q+ج�6F۱��h�Ɔ��'�Ayv�o���=��U�Y鹅��U�5��4{\ñ�->}�����##N��8x�sX�x�;��)�鯣�,��+g~�U�-9r!y�����~�>�h$e]��4^�9�qˎ�:l��Lb*�஻s�f,��ZL{Z�-�D%BH�R!*PN�j�)� �,X���D�.m,Œz�褕�[��:6WW$m�P�*U� �Ϊա�DJ,�J�!@�ZU�1VV�J� Hn��=Vq�KUa�*��U��aP�*���iCf��O�M��8��XK٘���S�=��lK��P ����,�ס"U�� �p�r#"3 #�J�>� $��,��ٚ�h!TB :����[���P���! 8"�!�M�T ��@�H�i;A��R���}��|�PaO)|�?Q����;#Оj�A2��kO�T�2�Cv o* �5����=��Axr��N3Qab�v��ඊ�N�����f��q��B��hWV��o��[p�B�U�`�F�YB��#��-4�p޳Ee٥c��ֶ�?j{QKќ����I�t��\e!�`����H�$��J��ؽ���q�.�|~_�˗�c�v�tf�k�b����iFKOgaܷ!���y�`��Nd� ���њe�Q��jy���������' ��|����Ci�T�)�"��PF�U�=��;�pۭ��u A�*2T!!!! �+I��v� �=D�`%$TBH� �񦲵t����K^�#�r;]�w��Y�lk&�Ĝ�㋜w�S�T>8�E�� ���FPIJ��M(�J�PR%B)r� p ��7�*D���;;8�_+}��?G�e�G��_�v(Ve~��}<���^�����`� �0�賭?Gv����s�m<׬ ��˔�f��^'i腽�ٞ�?�1�ɦ�*�a�<$�H��9��{�������T.��/�c�G�F4۫ܭ��K߳Ŏ���w�hV�����b�X�J�:5�ɟ,����$]�����߻|3x����y�+wG�p�%�V�����څ�r�~\�Q��%��չYѥ HH�*�1MS���Z�HJ��� P�6(�T))�@%H��!Q,L�h�q�P�U*˕U��<.��>#B�e ����:+�M���F��\��X� xK H¦SmK��֧,��[��,Rk�u��{.��5����@!@ �n?h�)Sm�x�(Zi�ܒ� VZB* Z��?/r?i���X0�"����t��y��F?N'��r\�.��c7UmN��a{ ���<�YF7 aZl'<�v�SO�w����ixރ^�v����޶}۽ҹ�Ci\J�^�:�h�g�5k,�>�X���/�ෂۅBBR�V���ɴV�� KLiN�]e#�@91�۷l�2�4�R�Ay�cuos�4.Y�|�"�\Mf����ku2��N<<�ޓ�?��H�ok#�&���@r ��(8���+N=ƴ4T묝�G%�ڨ}W���ݩ�NsH�Q5m��o|��ä́� ���ݑݚ�,V�L��5�9�c�����tn��\#2{�5�~T\98e�;z8��>�N��r�b� ��as#K6qB J������{��֣$#"�vYu^�YM��B�g��w��[�EB�@BHUEz�I��;@��H1�O�B���+Zi[C�?�w��`�B�{}���A�@7�P�@4���ɒ�r�d�D\Ƽ6[v�c�T V��s�*+te����Xk�r�f����������� hoXt��v{��{�q�Z|���)����-�y��� ����������鈕��)�� q:SQ$��5��8c�x���E�„l$� m/�J�"��Z�֙���1܆�oV�ު��N�J^+��z�:�����gY٭n�Vz���_�ӄ�+�����n��I,�[o����� Y��5��\m�b��1/cĮ��1� sh�I�Գ����.�Թw�n�K���Wڻ���XGS�:�\cv�-��%gYtV�&�v �rc��Gy[���U�5��.�4pc��M� �Fgvc h��G�FG�޹p۴u���۷kZ�V�������Q�u��S��ҧ�i����[��)~�ޏ_��Z�~>2��N���S?[O ��y�jk��8�lƮ|���#���T]lт'{�X��E�@{����c���dk:�M� ��|6Ԧb6QX��! �!*D��Ui�*�, R!eVXj�0�R�t�BsR�Q��FS��#=���}A]��Ϣٱ���w��%罽��QB� u��� -�x�(Zm}��ђ*��ڢ�9"Kȼ���q�q�-.<�W17q�����j�2V�������ǀ}��w�9��Nfh��ƃ̯?5�v�&ݭ����/�` g�<�Ȟ�qh�(�s��� �Q�M&~���q���.{ޱ;Gp�\J�N��z��G��}�:��Ē7���Ի+�i�(ro����6��(�RP`�4�����:�h�f� ��Z䫎� �-�g���8��~ �����q�ʮYLq�9,��Ӏ�FPǍݧ?2v-;4 ����$�gz��g m5�����V�c#Ŗ[�T�u=�YV�mq&�qZeb�08 w��RX�t�����mܝ�( iy��#0ѫ��֛M+��7X/�6��V�5�2�=�8�F Iߏ%cGC� ���м�������i�oX�M�aROy����vn.Y�����ol���f������ra��c�Ifsy����p���pW肼�e�w^�p��PT�N[�XY.���i��&�����8+V;[%e���� � Yi��vEH�é�� �S�0���+�Ll������i���M%�Xv:R+��^Y�@ޖ���!���Ni�/���4d��T$�@�M��ȣ�n�"��0斜������h�̔ޕ�i]u��%�7��� ����|�:���+xr���sϋ ���0�#�ˢ���@��,;�1�cZoґ���Z�WmV[�1�rD潍����o4���M֍�Ecv����_�q��g�(h��#� �X����A1a]s�3�U�,f�M�1��ch{��\��s�vF e��u���Q�7o�W.|򚩏���6� �ݩoi�P��p{Mx�,��@�flvGL�5βFe��ը5ִ����(��Ȭ�˖V���?�"�w�c��d��#�|�����GZ��9�vx��������/�X�+ a%��^>AX�[�ȥ�[�ȫy���x0�����D"7�q8��>��7��A sZ^{�n_-�ڇ�� �¸��'ZݾE'ZݾE\���j��a��= �kv�u���Wd� D�����$0ct1{�ɣd id���..�h$�p��YQ�<��hq���f�N�uQ\f�9]E���B\��<������ �V�e�ȠJݾEzdy�=7�n�"���|�!h�����ȣ�n�"�?Ih+5�Rx"�{�/��x�;L}��٤|S]Yb>=��W�u���Q}�|����n��΋�`�pVF�"��0�������+Mtf�k��]������Ϊ̇��$+�4�с�$��������y���HX�C����*�y�E�v+%H��R�$����Z�FBUWaP����~G���=�*�,BT�ZTX�෋5"�GjNs���R��A�'ih?����Ay��DB���X�ʏ?�!����Bm��Šz/�_ ���%���A�[��;����� N���