qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [PATCH 3/8] pci: Use pci_for_each_device_under_bus*()


From: Peter Xu
Subject: Re: [PATCH 3/8] pci: Use pci_for_each_device_under_bus*()
Date: Fri, 22 Oct 2021 10:19:37 +0800

On Thu, Oct 21, 2021 at 01:34:07PM +0200, Eric Auger wrote:
> Hi Peter,
> On 10/21/21 12:42 PM, Peter Xu wrote:
> > Replace all the call sites of existing pci_for_each_device*() where the bus
> > number is calculated from a PCIBus* already.  It should avoid the lookup of 
> > the
> > PCIBus again.
> >
> > Signed-off-by: Peter Xu <peterx@redhat.com>
> > ---
> >  hw/i386/acpi-build.c       |  5 ++---
> >  hw/pci/pcie.c              |  4 +---
> >  hw/ppc/spapr_pci.c         | 12 +++++-------
> >  hw/ppc/spapr_pci_nvlink2.c |  7 +++----
> >  hw/ppc/spapr_pci_vfio.c    |  4 ++--
> >  hw/s390x/s390-pci-bus.c    |  5 ++---
> >  hw/xen/xen_pt.c            |  4 ++--
> >  7 files changed, 17 insertions(+), 24 deletions(-)
> >
> > diff --git a/hw/i386/acpi-build.c b/hw/i386/acpi-build.c
> > index 81418b7911..a76b17ed92 100644
> > --- a/hw/i386/acpi-build.c
> > +++ b/hw/i386/acpi-build.c
> > @@ -2132,8 +2132,7 @@ dmar_host_bridges(Object *obj, void *opaque)
> >          PCIBus *bus = PCI_HOST_BRIDGE(obj)->bus;
> >  
> >          if (bus && !pci_bus_bypass_iommu(bus)) {
> > -            pci_for_each_device(bus, pci_bus_num(bus), insert_scope,
> > -                                scope_blob);
> > +            pci_for_each_device_under_bus(bus, insert_scope, scope_blob);
> >          }
> >      }
> >  
> > @@ -2339,7 +2338,7 @@ ivrs_host_bridges(Object *obj, void *opaque)
> >          PCIBus *bus = PCI_HOST_BRIDGE(obj)->bus;
> >  
> >          if (bus && !pci_bus_bypass_iommu(bus)) {
> > -            pci_for_each_device(bus, pci_bus_num(bus), insert_ivhd, 
> > ivhd_blob);
> > +            pci_for_each_device_under_bus(bus, insert_ivhd, ivhd_blob);
> >          }
> >      }
> >  
> > diff --git a/hw/pci/pcie.c b/hw/pci/pcie.c
> > index 6e95d82903..914a9bf3d1 100644
> > --- a/hw/pci/pcie.c
> > +++ b/hw/pci/pcie.c
> > @@ -694,9 +694,7 @@ void pcie_cap_slot_write_config(PCIDevice *dev,
> >          (!(old_slt_ctl & PCI_EXP_SLTCTL_PCC) ||
> >          (old_slt_ctl & PCI_EXP_SLTCTL_PIC_OFF) != PCI_EXP_SLTCTL_PIC_OFF)) 
> > {
> >          PCIBus *sec_bus = pci_bridge_get_sec_bus(PCI_BRIDGE(dev));
> > -        pci_for_each_device(sec_bus, pci_bus_num(sec_bus),
> > -                            pcie_unplug_device, NULL);
> > -
> > +        pci_for_each_device_under_bus(sec_bus, pcie_unplug_device, NULL);
> >          pci_word_test_and_clear_mask(exp_cap + PCI_EXP_SLTSTA,
> >                                       PCI_EXP_SLTSTA_PDS);
> >          if (dev->cap_present & QEMU_PCIE_LNKSTA_DLLLA ||
> > diff --git a/hw/ppc/spapr_pci.c b/hw/ppc/spapr_pci.c
> > index 7430bd6314..5bfd4aa9e5 100644
> > --- a/hw/ppc/spapr_pci.c
> > +++ b/hw/ppc/spapr_pci.c
> > @@ -1317,8 +1317,7 @@ static int spapr_dt_pci_bus(SpaprPhbState *sphb, 
> > PCIBus *bus,
> >                            RESOURCE_CELLS_SIZE));
> >  
> >      assert(bus);
> > -    pci_for_each_device_reverse(bus, pci_bus_num(bus),
> > -                                spapr_dt_pci_device_cb, &cbinfo);
> > +    pci_for_each_device_under_bus_reverse(bus, spapr_dt_pci_device_cb, 
> > &cbinfo);
> >      if (cbinfo.err) {
> >          return cbinfo.err;
> >      }
> > @@ -2306,8 +2305,8 @@ static void spapr_phb_pci_enumerate_bridge(PCIBus 
> > *bus, PCIDevice *pdev,
> >          return;
> >      }
> >  
> > -    pci_for_each_device(sec_bus, pci_bus_num(sec_bus),
> > -                        spapr_phb_pci_enumerate_bridge, bus_no);
> > +    pci_for_each_device_under_bus(sec_bus, spapr_phb_pci_enumerate_bridge,
> > +                                  bus_no);
> >      pci_default_write_config(pdev, PCI_SUBORDINATE_BUS, *bus_no, 1);
> >  }
> >  
> > @@ -2316,9 +2315,8 @@ static void spapr_phb_pci_enumerate(SpaprPhbState 
> > *phb)
> >      PCIBus *bus = PCI_HOST_BRIDGE(phb)->bus;
> >      unsigned int bus_no = 0;
> >  
> > -    pci_for_each_device(bus, pci_bus_num(bus),
> > -                        spapr_phb_pci_enumerate_bridge,
> > -                        &bus_no);
> > +    pci_for_each_device_under_bus(bus, spapr_phb_pci_enumerate_bridge,
> > +                                  &bus_no);
> >  
> >  }
> >  
> > diff --git a/hw/ppc/spapr_pci_nvlink2.c b/hw/ppc/spapr_pci_nvlink2.c
> > index 8ef9b40a18..7fb0cf4d04 100644
> > --- a/hw/ppc/spapr_pci_nvlink2.c
> > +++ b/hw/ppc/spapr_pci_nvlink2.c
> > @@ -164,8 +164,7 @@ static void spapr_phb_pci_collect_nvgpu(PCIBus *bus, 
> > PCIDevice *pdev,
> >          return;
> >      }
> >  
> > -    pci_for_each_device(sec_bus, pci_bus_num(sec_bus),
> > -                        spapr_phb_pci_collect_nvgpu, opaque);
> > +    pci_for_each_device_under_bus(sec_bus, spapr_phb_pci_collect_nvgpu, 
> > opaque);
> >  }
> >  
> >  void spapr_phb_nvgpu_setup(SpaprPhbState *sphb, Error **errp)
> > @@ -183,8 +182,8 @@ void spapr_phb_nvgpu_setup(SpaprPhbState *sphb, Error 
> > **errp)
> >      sphb->nvgpus->nv2_atsd_current = sphb->nv2_atsd_win_addr;
> >  
> >      bus = PCI_HOST_BRIDGE(sphb)->bus;
> > -    pci_for_each_device(bus, pci_bus_num(bus),
> > -                        spapr_phb_pci_collect_nvgpu, sphb->nvgpus);
> > +    pci_for_each_device_under_bus(bus, spapr_phb_pci_collect_nvgpu,
> > +                                  sphb->nvgpus);
> >  
> >      if (sphb->nvgpus->err) {
> >          error_propagate(errp, sphb->nvgpus->err);
> > diff --git a/hw/ppc/spapr_pci_vfio.c b/hw/ppc/spapr_pci_vfio.c
> > index f3b37df8ea..2a76b4e0b5 100644
> > --- a/hw/ppc/spapr_pci_vfio.c
> > +++ b/hw/ppc/spapr_pci_vfio.c
> > @@ -164,8 +164,8 @@ static void spapr_phb_vfio_eeh_clear_dev_msix(PCIBus 
> > *bus,
> >  
> >  static void spapr_phb_vfio_eeh_clear_bus_msix(PCIBus *bus, void *opaque)
> >  {
> > -       pci_for_each_device(bus, pci_bus_num(bus),
> > -                           spapr_phb_vfio_eeh_clear_dev_msix, NULL);
> > +       pci_for_each_device_under_bus(bus, 
> > spapr_phb_vfio_eeh_clear_dev_msix,
> > +                                     NULL);
> >  }
> >  
> >  static void spapr_phb_vfio_eeh_pre_reset(SpaprPhbState *sphb)
> > diff --git a/hw/s390x/s390-pci-bus.c b/hw/s390x/s390-pci-bus.c
> > index 6fafffb029..1b51a72838 100644
> > --- a/hw/s390x/s390-pci-bus.c
> > +++ b/hw/s390x/s390-pci-bus.c
> > @@ -1163,8 +1163,7 @@ static void s390_pci_enumerate_bridge(PCIBus *bus, 
> > PCIDevice *pdev,
> >      }
> >  
> >      /* Assign numbers to all child bridges. The last is the highest 
> > number. */
> > -    pci_for_each_device(sec_bus, pci_bus_num(sec_bus),
> > -                        s390_pci_enumerate_bridge, s);
> > +    pci_for_each_device_under_bus(sec_bus, s390_pci_enumerate_bridge, s);
> >      pci_default_write_config(pdev, PCI_SUBORDINATE_BUS, s->bus_no, 1);
> >  }
> >  
> > @@ -1193,7 +1192,7 @@ static void s390_pcihost_reset(DeviceState *dev)
> >       * on every system reset, we also have to reassign numbers.
> >       */
> >      s->bus_no = 0;
> > -    pci_for_each_device(bus, pci_bus_num(bus), s390_pci_enumerate_bridge, 
> > s);
> > +    pci_for_each_device_under_bus(bus, s390_pci_enumerate_bridge, s);
> >  }
> >  
> >  static void s390_pcihost_class_init(ObjectClass *klass, void *data)
> > diff --git a/hw/xen/xen_pt.c b/hw/xen/xen_pt.c
> > index ca0a98187e..027190fa44 100644
> > --- a/hw/xen/xen_pt.c
> > +++ b/hw/xen/xen_pt.c
> > @@ -615,8 +615,8 @@ static void xen_pt_region_update(XenPCIPassthroughState 
> > *s,
> >      }
> >  
> >      args.type = d->io_regions[bar].type;
> > -    pci_for_each_device(pci_get_bus(d), pci_dev_bus_num(d),
> > -                        xen_pt_check_bar_overlap, &args);
> > +    pci_for_each_device_under_bus(pci_get_bus(d),
> > +                                  xen_pt_check_bar_overlap, &args);
> >      if (args.rc) {
> >          XEN_PT_WARN(d, "Region: %d (addr: 0x%"FMT_PCIBUS
> >                      ", len: 0x%"FMT_PCIBUS") is overlapped.\n",
> Maybe squash with the previous one?

Will do.

> 
> Besides
> Reviewed-by: Eric Auger <eric.auger@redhat.com>

Thanks,

-- 
Peter Xu




reply via email to

[Prev in Thread] Current Thread [Next in Thread]