@@ -149,14 +149,6 @@ struct its_ite {
149149 u32 event_id ;
150150};
151151
152- struct vgic_translation_cache_entry {
153- struct list_head entry ;
154- phys_addr_t db ;
155- u32 devid ;
156- u32 eventid ;
157- struct vgic_irq * irq ;
158- };
159-
160152/**
161153 * struct vgic_its_abi - ITS abi ops and settings
162154 * @cte_esz: collection table entry size
@@ -568,96 +560,34 @@ static void vgic_its_cache_translation(struct kvm *kvm, struct vgic_its *its,
568560 struct vgic_irq * irq )
569561{
570562 unsigned long cache_key = vgic_its_cache_key (devid , eventid );
571- struct vgic_dist * dist = & kvm -> arch .vgic ;
572- struct vgic_translation_cache_entry * cte ;
573563 struct vgic_irq * old ;
574- unsigned long flags ;
575- phys_addr_t db ;
576564
577565 /* Do not cache a directly injected interrupt */
578566 if (irq -> hw )
579567 return ;
580568
581- raw_spin_lock_irqsave (& dist -> lpi_list_lock , flags );
582-
583- if (unlikely (list_empty (& dist -> lpi_translation_cache )))
584- goto out ;
585-
586- db = its -> vgic_its_base + GITS_TRANSLATER ;
587-
588- /* Always reuse the last entry (LRU policy) */
589- cte = list_last_entry (& dist -> lpi_translation_cache ,
590- typeof (* cte ), entry );
591-
592- /*
593- * Caching the translation implies having an extra reference
594- * to the interrupt, so drop the potential reference on what
595- * was in the cache, and increment it on the new interrupt.
596- */
597- if (cte -> irq )
598- vgic_put_irq (kvm , cte -> irq );
599-
600569 /*
601570 * The irq refcount is guaranteed to be nonzero while holding the
602571 * its_lock, as the ITE (and the reference it holds) cannot be freed.
603572 */
604573 lockdep_assert_held (& its -> its_lock );
605-
606- /*
607- * Yes, two references are necessary at the moment:
608- * - One for the global LPI translation cache
609- * - Another for the translation cache belonging to @its
610- *
611- * This will soon disappear.
612- */
613- vgic_get_irq_kref (irq );
614574 vgic_get_irq_kref (irq );
615575
616- cte -> db = db ;
617- cte -> devid = devid ;
618- cte -> eventid = eventid ;
619- cte -> irq = irq ;
620-
621- /* Move the new translation to the head of the list */
622- list_move (& cte -> entry , & dist -> lpi_translation_cache );
623- raw_spin_unlock_irqrestore (& dist -> lpi_list_lock , flags );
624-
625576 /*
626- * The per-ITS cache is a perfect cache, so it may already have an
627- * identical translation even if it were missing from the global
628- * cache. Ensure we don't leak a reference if that is the case.
577+ * We could have raced with another CPU caching the same
578+ * translation behind our back, ensure we don't leak a
579+ * reference if that is the case.
629580 */
630581 old = xa_store (& its -> translation_cache , cache_key , irq , GFP_KERNEL_ACCOUNT );
631582 if (old )
632583 vgic_put_irq (kvm , old );
633-
634- out :
635- raw_spin_unlock_irqrestore (& dist -> lpi_list_lock , flags );
636584}
637585
638586static void vgic_its_invalidate_cache (struct vgic_its * its )
639587{
640588 struct kvm * kvm = its -> dev -> kvm ;
641- struct vgic_dist * dist = & kvm -> arch .vgic ;
642- struct vgic_translation_cache_entry * cte ;
643- unsigned long flags , idx ;
644589 struct vgic_irq * irq ;
645-
646- raw_spin_lock_irqsave (& dist -> lpi_list_lock , flags );
647-
648- list_for_each_entry (cte , & dist -> lpi_translation_cache , entry ) {
649- /*
650- * If we hit a NULL entry, there is nothing after this
651- * point.
652- */
653- if (!cte -> irq )
654- break ;
655-
656- vgic_put_irq (kvm , cte -> irq );
657- cte -> irq = NULL ;
658- }
659-
660- raw_spin_unlock_irqrestore (& dist -> lpi_list_lock , flags );
590+ unsigned long idx ;
661591
662592 xa_for_each (& its -> translation_cache , idx , irq ) {
663593 xa_erase (& its -> translation_cache , idx );
@@ -1880,47 +1810,6 @@ static int vgic_register_its_iodev(struct kvm *kvm, struct vgic_its *its,
18801810 return ret ;
18811811}
18821812
1883- /* Default is 16 cached LPIs per vcpu */
1884- #define LPI_DEFAULT_PCPU_CACHE_SIZE 16
1885-
1886- void vgic_lpi_translation_cache_init (struct kvm * kvm )
1887- {
1888- struct vgic_dist * dist = & kvm -> arch .vgic ;
1889- unsigned int sz ;
1890- int i ;
1891-
1892- if (!list_empty (& dist -> lpi_translation_cache ))
1893- return ;
1894-
1895- sz = atomic_read (& kvm -> online_vcpus ) * LPI_DEFAULT_PCPU_CACHE_SIZE ;
1896-
1897- for (i = 0 ; i < sz ; i ++ ) {
1898- struct vgic_translation_cache_entry * cte ;
1899-
1900- /* An allocation failure is not fatal */
1901- cte = kzalloc (sizeof (* cte ), GFP_KERNEL_ACCOUNT );
1902- if (WARN_ON (!cte ))
1903- break ;
1904-
1905- INIT_LIST_HEAD (& cte -> entry );
1906- list_add (& cte -> entry , & dist -> lpi_translation_cache );
1907- }
1908- }
1909-
1910- void vgic_lpi_translation_cache_destroy (struct kvm * kvm )
1911- {
1912- struct vgic_dist * dist = & kvm -> arch .vgic ;
1913- struct vgic_translation_cache_entry * cte , * tmp ;
1914-
1915- vgic_its_invalidate_all_caches (kvm );
1916-
1917- list_for_each_entry_safe (cte , tmp ,
1918- & dist -> lpi_translation_cache , entry ) {
1919- list_del (& cte -> entry );
1920- kfree (cte );
1921- }
1922- }
1923-
19241813#define INITIAL_BASER_VALUE \
19251814 (GIC_BASER_CACHEABILITY(GITS_BASER, INNER, RaWb) | \
19261815 GIC_BASER_CACHEABILITY(GITS_BASER, OUTER, SameAsInner) | \
@@ -1953,8 +1842,6 @@ static int vgic_its_create(struct kvm_device *dev, u32 type)
19531842 kfree (its );
19541843 return ret ;
19551844 }
1956-
1957- vgic_lpi_translation_cache_init (dev -> kvm );
19581845 }
19591846
19601847 mutex_init (& its -> its_lock );
0 commit comments