diff --git a/config-arm-generic b/config-arm-generic index b87b5d928..b45703849 100644 --- a/config-arm-generic +++ b/config-arm-generic @@ -304,3 +304,8 @@ CONFIG_CMA_AREAS=7 # CONFIG_BMP085_SPI is not set # CONFIG_TI_DAC7512 is not set # CONFIG_SPI_ROCKCHIP is not set + +# EDAC +CONFIG_EDAC=y +CONFIG_EDAC_MM_EDAC=m +CONFIG_EDAC_LEGACY_SYSFS=y diff --git a/config-arm64 b/config-arm64 index 5ad900f8c..c6a9a9e97 100644 --- a/config-arm64 +++ b/config-arm64 @@ -164,3 +164,5 @@ CONFIG_SATA_AHCI_PLATFORM=y # CONFIG_DEBUG_RODATA is not set CONFIG_DEBUG_SECTION_MISMATCH=y + +CONFIG_EDAC_XGENE=y diff --git a/config-armv7-generic b/config-armv7-generic index bf9e95eb0..bf3f8c01e 100644 --- a/config-armv7-generic +++ b/config-armv7-generic @@ -559,11 +559,6 @@ CONFIG_CRYPTO_SHA512_ARM_NEON=m CONFIG_TI_PRIV_EDMA=y CONFIG_TI_EDMA=y -# EDAC -CONFIG_EDAC=y -CONFIG_EDAC_MM_EDAC=m -CONFIG_EDAC_LEGACY_SYSFS=y - # Watchdog # Mailbox diff --git a/kernel-arm64.patch b/kernel-arm64.patch index 6c618105b..eb5d07ffc 100644 --- a/kernel-arm64.patch +++ b/kernel-arm64.patch @@ -1,1387 +1,139 @@ -commit fdd2bbd9e18e813faa3880f820c1b674267eec0c -Author: Kyle McMartin -Date: Wed Feb 18 09:49:05 2015 -0500 - - fixes for xgene enet - -commit ac44fa9c24a21d78e8fff79c0dab3deea490d782 -Author: Kyle McMartin -Date: Tue Feb 17 12:04:33 2015 -0500 - - fixes for HEAD - -commit 7320bdde2a2abd1267f0a138ed1c19791be519c5 -Merge: 9d1c60d 796e1c5 -Author: Kyle McMartin -Date: Tue Feb 17 11:11:12 2015 -0500 - - Merge branch 'master' into devel - - Conflicts: - arch/arm64/kernel/efi.c - arch/arm64/kernel/pci.c - arch/arm64/mm/dma-mapping.c - arch/x86/pci/mmconfig-shared.c - drivers/ata/ahci_xgene.c - drivers/net/ethernet/apm/xgene/xgene_enet_hw.c - drivers/net/ethernet/apm/xgene/xgene_enet_main.c - drivers/tty/serial/8250/8250_dw.c - include/linux/pci.h - -commit 9d1c60d3f33dd3b331e1365dba5c8c5de50db77c -Author: Ivan Khoronzhuk -Date: Wed Feb 4 19:06:03 2015 +0200 - - firmware: dmi-sysfs: add SMBIOS entry point area attribute - - Some utils, like dmidecode and smbios, need to access SMBIOS entry - table area in order to get information like SMBIOS version, size, etc. - Currently it's done via /dev/mem. But for situation when /dev/mem - usage is disabled, the utils have to use dmi sysfs instead, which - doesn't represent SMBIOS entry. So this patch adds SMBIOS area to - dmi-sysfs in order to allow utils in question to work correctly with - dmi sysfs interface. - - Reviewed-by: Ard Biesheuvel - Signed-off-by: Ivan Khoronzhuk - -commit 9ede97bc136e217ea00406a3388c6082a6a8d049 -Author: Mark Salter -Date: Mon Feb 16 13:46:56 2015 -0500 - - ata: ahci_platform: DO NOT UPSTREAM Add HID for AMD seattle platform - - Add HID match to get modules working. The class matching works but not - for modules. Yet. - - Signed-off-by: Mark Salter - -commit 794db044849e4388586abda59a262d34f18f20fa -Author: Mark Langsdorf -Date: Thu Nov 13 21:46:59 2014 -0500 - - usb: make xhci platform driver use 64 bit or 32 bit DMA - - The xhci platform driver needs to work on systems that either only - support 64-bit DMA or only support 32-bit DMA. Attempt to set a - coherent dma mask for 64-bit DMA, and attempt again with 32-bit - DMA if that fails. - - Signed-off-by: Mark Langsdorf - -commit da4d6ecc4b7b5802b7caee5844933abaca6f7de4 -Author: Mark Salter -Date: Mon Nov 10 16:31:05 2014 -0500 - - iommu/arm-smmu: fix NULL dereference with ACPI PCI devices - - Fix a NULL dereference in find_mmu_master which occurs when - booting with ACPI. In that case, PCI bridges with not have - an of_node. Add a check for NULL of_node and bail out if that - is the case. - - Signed-off-by: Mark Salter - -commit eb81ac50a2193a52251c933c07ddc05a39717c7c -Author: Mark Salter -Date: Mon Nov 10 21:35:11 2014 -0500 - - DO NOT UPSTREAM - arm64: fix dma_ops for ACPI and PCI devices - - Commit 2189064795dc3fb4101e5: - - arm64: Implement set_arch_dma_coherent_ops() to replace bus notifiers - - removed the bus notifiers from dma-mapping.c. This patch - adds the notifier back for ACPI and PCI devices until a - better permanent solution is worked out. - - Signed-off-by: Mark Salter - -commit efaf1674559c92ed2a862cfc44d51967ccd99928 -Author: Mark Salter -Date: Thu Aug 14 12:32:13 2014 -0400 - - acpi: add utility to test for device dma coherency - - ACPI 5.1 adds a _CCA object to indicate memory coherency - of a bus master device. It is an integer with zero meaning - non-coherent and one meaning coherent. This attribute may - be inherited from a parent device. It may also be missing - entirely, in which case, an architecture-specific default - is assumed. - - This patch adds a utility function to parse a device handle - (and its parents) for a _CCA object and return the coherency - attribute if found. - - Signed-off-by: Mark Salter - -commit 907e55f6d08572382856382ea0726c377ced90fc -Author: Donald Dutile -Date: Sat Nov 22 12:08:53 2014 -0500 - - DO NOT UPSTREAM - arm64: kvm: Change vgic resource size error to info - - A new check was added to upstream to ensure a full - kernel page was allocated to the vgic. The check failed - kvm configuration if the condition wasn't met. An arm64 - kernel with 64K pagesize and certain early firmware will - fail this test. Change error to info & continue configuration - for now. - - Signed-off-by: Mark Salter - -commit 7572b5b209ececd0a7a7c94e8104b829b0f22d5e -Author: Wei Huang -Date: Sat Nov 22 10:38:45 2014 -0500 - - KVM/ACPI: Enable ACPI support for KVM virt GIC - - This patches enables ACPI support for KVM virtual GIC. KVM parses - ACPI table for virt GIC related information when DT table is not - present. This is done by retrieving the information defined in - generic_interrupt entry of MADT table. - - Note: Alexander Spyridakis from Virtual Open System posts a - _very_ similar patch to enable acpi-kvm. This patch borrows some - ideas from his patch. - - Signed-off-by: Wei Huang - [combined with subsequent patch to use acpi_disabled] - Signed-off-by: Mark Salter - -commit f40884650cda7b3f255a72f5b8ed554b1e3d660e -Author: Wei Huang -Date: Sat Nov 22 10:18:57 2014 -0500 - - KVM/ACPI: Enable ACPI support for virt arch timer - - This patches enables ACPI support for KVM virtual arch_timer. It - allows KVM to parse ACPI table for virt arch_timer PPI when DT table - is not present. This is done by retrieving the information from - arch_timer_ppi array in arm_arch_timer driver. - - Signed-off-by: Wei Huang - [combined with subsequent patch to use acpi_disabled] - Signed-off-by: Mark Salter - -commit 541a331980be3eec12baa65040ce7c2b354462af -Author: Mark Salter -Date: Tue Oct 7 12:54:08 2014 -0400 - - xgene acpi network - first cut - -commit a90c889901a6455730539cb7a0935df27f1a68de -Author: Mark Salter -Date: Mon Feb 16 16:39:38 2015 -0500 - - net: phy: amd: DO NOT UPSTREAM: Add support for A0 silicon xgbe phy - - From: Tom Lendacky - Support A0 silicon xgbe phy - - Signed-off-by: Mark Salter - -commit c72678575de9cfe30cfc2da2fb15b90692fd2068 -Author: Mark Salter -Date: Mon Feb 16 16:37:15 2015 -0500 - - net: amd: DO NOT UPSTREAM: Add xgbe-a0 driver - - From: Tom Lendacky - Add support for A0 silicon xgbe driver - - Signed-off-by: Mark Salter - -commit 72b6be9435d46fe4a3e09e5e01317f8a0951624d -Author: Graeme Gregory -Date: Fri Jul 26 17:55:02 2013 +0100 - - virtio-mmio: add ACPI probing - - Added the match table and pointers for ACPI probing to the driver. - - Signed-off-by: Graeme Gregory - -commit bcb68db63adaf54843fa5c8a559ba48034886576 -Author: Graeme Gregory -Date: Wed Jul 24 11:29:48 2013 +0100 - - net: smc91x: add ACPI probing support. - - Add device ID LINA0003 for this device and add the match table. - - As its a platform device it needs no other code and will be probed in by - acpi_platform once device ID is added. - - Signed-off-by: Graeme Gregory - -commit 2df0d5a82afba0e0d9c55977ae7e7483e0f4080f -Author: Mark Salter -Date: Sun Sep 14 09:44:44 2014 -0400 - - Revert "ahci_xgene: Skip the PHY and clock initialization if already configured by the firmware." - - This reverts commit 0bed13bebd6c99d097796d2ca6c4f10fb5b2eabc. - - Temporarily revert for backwards compatibility with rh-0.12-1 firmware - -commit 52e7166eb5ecf60fcc1d557ecfe8447fd83c55d9 -Author: Mark Salter -Date: Mon Aug 11 13:46:43 2014 -0400 - - xgene: add support for ACPI-probed serial port - -commit 862142c487e37cea64a9fef9fbd37816395de142 -Author: Mark Salter -Date: Sat Aug 9 12:01:20 2014 -0400 - - sata/xgene: support acpi probing - - Signed-off-by: Mark Salter - -commit 7c26e9688ab62d89c5027127482dc40095a9d5ba -Author: Mark Salter -Date: Thu Sep 18 15:05:23 2014 -0400 - - arm64: add sev to parking protocol - - Parking protocol wakes secondary cores with an interrupt. - This patch adds an additional sev() to send an event. This - is a temporary hack for APM Mustang board and not intended - for upstream. - - Signed-off-by: Mark Salter - -commit be1439218c1ce70298d976f5fad95b85644ae0fa -Author: Mark Salter -Date: Tue Sep 9 22:59:48 2014 -0400 - - arm64: add parking protocol support - - This is a first-cut effort at parking protocol support. It is - very much a work in progress (as is the spec it is based on). - This code deviates from the current spec in a number of ways - to work around current firmware issues and issues with kernels - using 64K page sizes. - - caveat utilitor - - Signed-off-by: Mark Salter - -commit 8a0834f58a9484d5983b349779efe2921db23c30 -Author: Mark Salter -Date: Mon Oct 13 20:49:43 2014 -0400 - - arm64/perf: add ACPI support - - Add ACPI support to perf_event driver. This involves getting - the irq info from the MADT and registering a platform device - when booting with ACPI. This requires updated firmware for - Mustang and FM for PMU driver to get correct info from ACPI. - - Signed-off-by: Mark Salter - -commit 0a8be018986d895852300cb33de2d316e1029f77 -Author: Suravee Suthikulpanit -Date: Mon Feb 9 00:20:04 2015 +0800 - - ata: ahci_platform: Add ACPI _CLS matching - - This patch adds ACPI supports for AHCI platform driver, which uses _CLS - method to match the device. - - The following is an example of ASL structure in DSDT for a SATA controller, - which contains _CLS package to be matched by the ahci_platform driver: - - Device (AHC0) // AHCI Controller - { - Name(_HID, "AMDI0600") - Name (_CCA, 1) - Name (_CLS, Package (3) - { - 0x01, // Base Class: Mass Storage - 0x06, // Sub-Class: serial ATA - 0x01, // Interface: AHCI - }) - Name (_CRS, ResourceTemplate () - { - Memory32Fixed (ReadWrite, 0xE0300000, 0x00010000) - Interrupt (ResourceConsumer, Level, ActiveHigh, Exclusive,,,) { 387 } - }) - } - - Also, since ATA driver should not require PCI support for ATA_ACPI, - this patch removes dependency in the driver/ata/Kconfig. - - Acked-by: Tejun Heo - Signed-off-by: Suravee Suthikulpanit - -commit c89b443a5d643c81be1eef7817ea5e91f9d7e2fd -Author: Suravee Suthikulpanit -Date: Mon Feb 9 00:20:03 2015 +0800 - - ACPI / scan: Add support for ACPI _CLS device matching - - Device drivers typically use ACPI _HIDs/_CIDs listed in struct device_driver - acpi_match_table to match devices. However, for generic drivers, we do - not want to list _HID for all supported devices, and some device classes - do not have _CID (e.g. SATA, USB). Instead, we can leverage ACPI _CLS, - which specifies PCI-defined class code (i.e. base-class, subclass and - programming interface). - - This patch adds support for matching ACPI devices using the _CLS method. - - Signed-off-by: Suravee Suthikulpanit - -commit ca42ac9e3bcde0394316ed697a05146730ed8c06 -Author: Mark Salter -Date: Mon Sep 8 17:04:28 2014 -0400 - - acpi/arm64: NOT FOR UPSTREAM - remove EXPERT dependency - - For convenience to keep existing configs working, remove - CONFIG_EXPERT dependency from ACPI for ARM64. This shouldn't - go upstream just yet. - - Signed-off-by: Mark Salter - -commit 574138b1f1aacdb83c362a44bc20e7bc4c9ddfa3 -Author: Mark Salter -Date: Mon Jan 19 18:15:16 2015 -0500 - - DO NOT UPSTREAM - pci/xgene: Provide fixup for ACPI MCFG support - - Xgene doesn't decode bus bits of mmconfig region and only - supports devfn 0 of bus 0. For other buses/devices, some - internal registers need to be poked. This patch provides - a fixup to support ACPI MCFG tables. This is a horrible - hack allowing the hardware to be used for PCI testing, but - it is not intended to be a long term patch. - - Signed-off-by: Mark Salter - -commit 2963b36f72691836ffe87beb2a1819dcf46c8b5f -Author: Mark Salter -Date: Mon Jan 19 17:43:54 2015 -0500 - - DO NOT UPSTREAM - provide hook for MCFG fixups - - This is a temprary mechanism needed by at least one early - arm64 hardware platform with broken MCFG support. This is - not intended for upstream and will go away as soon as newer - hardware with fully compliant ECAM becomes available. - - Signed-off-by: Mark Salter - -commit e1dee689c6b97065d8de205f78218cb348c7f600 -Author: Mark Salter -Date: Wed Feb 11 14:46:03 2015 -0500 - - arm64/pci/acpi: initial support for ACPI probing of PCI - - Signed-off-by: Mark Salter - -commit 507ba9b44cb9454d45b79f93e2ac4f7e45233f98 -Author: Tomasz Nowicki -Date: Wed Nov 19 17:04:51 2014 +0100 - - pci, acpi: Share ACPI PCI config space accessors. - - MMCFG can be used perfectly for all architectures which support ACPI. - ACPI mandates MMCFG to describe PCI config space ranges which means - we should use MMCONFIG accessors by default. - - Signed-off-by: Tomasz Nowicki - Tested-by: Hanjun Guo - -commit cc00e957e27e141416df5a48cc2bda4437992c67 -Author: Tomasz Nowicki -Date: Wed Nov 19 17:04:50 2014 +0100 - - x86, acpi, pci: mmconfig_64.c becomes default implementation for arch agnostic low-level direct PCI config space accessors via MMCONFIG. - - Note that x86 32bits machines still have its own low-level direct - PCI config space accessors. - - Signed-off-by: Tomasz Nowicki - -commit 18343755a531c6dafdfd390bef7e62f2137e6836 -Author: Tomasz Nowicki -Date: Wed Nov 19 17:04:49 2014 +0100 - - x86, acpi, pci: mmconfig_{32,64}.c code refactoring - remove code duplication. - - mmconfig_64.c version is going to be default implementation for arch - agnostic low-level direct PCI config space accessors via MMCONFIG. - However, now it initialize raw_pci_ext_ops pointer which is used in - x86 specific code only. Moreover, mmconfig_32.c is doing the same thing - at the same time. - - Move it to mmconfig_shared.c so it becomes common for both and - mmconfig_64.c turns out to be purely arch agnostic. - - Signed-off-by: Tomasz Nowicki - Tested-by: Hanjun Guo - -commit 36db151289ae6e5cb09a86c23127a9cd6e10129c -Author: Tomasz Nowicki -Date: Wed Nov 19 17:04:48 2014 +0100 - - x86, acpi, pci: Move PCI config space accessors. - - We are going to use mmio_config_{} name convention across all architectures. - Currently it belongs to asm/pci_x86.h header which should be included - only for x86 specific files. From now on, those accessors are in asm/pci.h - header which can be included in non-architecture code much easier. - - Signed-off-by: Tomasz Nowicki - Tested-by: Hanjun Guo - -commit 72208d51b9b656ac97fc632b015af87e702bd352 -Author: Tomasz Nowicki -Date: Wed Nov 19 17:04:47 2014 +0100 - - x86, acpi, pci: Move arch-agnostic MMCFG code out of arch/x86/ directory - - MMCFG table seems to be architecture independent and it makes sense - to share common code across all architectures. The ones that may need - architectural specific actions have default prototype (__weak). - - Signed-off-by: Tomasz Nowicki - Tested-by: Hanjun Guo - -commit 4620b275253d117ba30e996bfc890bcc472165ba -Author: Tomasz Nowicki -Date: Wed Nov 19 17:04:46 2014 +0100 - - x86, acpi, pci: Reorder logic of pci_mmconfig_insert() function - - This patch is the first step of MMCONFIG refactoring process. - - Code that uses pci_mmcfg_lock will be moved to common file and become - accessible for all architectures. pci_mmconfig_insert() cannot be moved - so easily since it is mixing generic mmcfg code with x86 specific logic - inside of mutual exclusive block guarded by pci_mmcfg_lock. - - To get rid of that constraint we reorder actions as fallow: - 1. mmconfig entry allocation can be done at first, does not need lock - 2. insertion to iomem_resource has its own lock, no need to wrap it into mutex - 3. insertion to mmconfig list can be done as the final stage in separate - function (candidate for further factoring) - - Signed-off-by: Tomasz Nowicki - Tested-by: Hanjun Guo - -commit 1686ea5a6feed1c661173e66a2996fc6286cd7c3 -Author: Suravee Suthikulanit -Date: Tue Jan 20 23:49:46 2015 -0600 - - DO NOT UPSTREAM YET: Introducing ACPI support for GICv2m - - Signed-off-by: Mark Salter - -commit be2b8706d9bf4e69cc3fd63709859a148c0fdb6e -Author: Suravee Suthikulpanit -Date: Tue Feb 3 12:55:57 2015 -0500 - - DO NOT UPSTREAM YET: Clean up GIC irq domain for ACPI - - Instead of using the irq_default_domain, define the acpi_irq_domain. - This still have the same assumption that ACPI only support a single - GIC domain. - - Also, rename acpi_gic_init() to acpi_irq_init() - - From: Suravee Suthikulpanit - [Add extern declaration for gicv2m_acpi_init()] - [Do not rename acpi_gic_init()] - Signed-off-by: Mark Salter - -commit 6eb83df4e91d30b66fbfa16292e09677f683ee93 -Author: Marc Zyngier -Date: Thu Jan 8 17:06:12 2015 +0000 - - PCI/MSI: Drop domain field from msi_controller - - The only two users of that field are not using the msi_controller - structure anymore, so drop it altogether. - - Signed-off-by: Marc Zyngier - -commit 8326736fc8b61b9422756c0bf0e4c5dd9dc1ce80 -Author: Marc Zyngier -Date: Thu Jan 8 17:06:11 2015 +0000 - - irqchip: gicv3-its: Get rid of struct msi_controller - - The GICv3 ITS only uses the msi_controller structure as a way - to match the PHB with its MSI HW, and thus the msi_domain. - But now that we can directly associate an msi_domain with a device, - there is no use keeping this msi_controller around. - - Just remove all traces of msi_controller from the driver. - - Signed-off-by: Marc Zyngier - -commit fded15d5b5e0492f64564824111f2f901911b5de -Author: Marc Zyngier -Date: Thu Jan 8 17:06:10 2015 +0000 - - irqchip: GICv2m: Get rid of struct msi_controller - - GICv2m only uses the msi_controller structure as a way to match - the PHB with its MSI HW, and thus the msi_domain. But now that - we can directly associate an msi_domain with a device, there is - no use keeping this msi_controller around. - - Just remove all traces of msi_controller from the driver. - - Signed-off-by: Marc Zyngier - -commit 4b34322f05a85bb863f3d013776588677e326767 -Author: Marc Zyngier -Date: Thu Jan 8 17:06:09 2015 +0000 - - PCI/MSI: Let pci_msi_get_domain use struct device's msi_domain - - Now that we can easily find which MSI domain a PCI device is - using, use dev_get_msi_domain as a way to retrieve the information. - - The original code is still used as a fallback. - - Signed-off-by: Marc Zyngier - -commit cc60a2e8d9efe415509b532a6fbb04ce1f43b1c5 -Author: Marc Zyngier -Date: Thu Jan 8 17:06:08 2015 +0000 - - PCI/MSI: of: Allow msi_domain lookup using the PHB node - - A number of platforms do not need to use the msi-parent property, - as the host bridge itself provides the MSI controller. - - Allow this configuration by performing an irq domain lookup based - on the PHB node if it doesn't have a valid msi-parent property. - - Signed-off-by: Marc Zyngier - -commit af6b972b6cabcda796b88b52111d2f5883006fbc -Author: Marc Zyngier -Date: Thu Jan 8 17:06:07 2015 +0000 - - PCI/MSI: of: Add support for OF-provided msi_domain - - In order to populate the PHB msi_domain, use the "msi-parent" - attribute to lookup a corresponding irq domain. If found, - this is our MSI domain. - - This gets plugged into the core PCI code. - - Signed-off-by: Marc Zyngier - -commit 3e4b7b8d1bc9927144be8e123dfcc790e7ff1f46 -Author: Marc Zyngier -Date: Thu Jan 8 17:06:06 2015 +0000 - - PCI/MSI: Add hooks to populate the msi_domain field - - In order to be able to populate the device msi_domain field, - add the necesary hooks to propagate the PHB msi_domain across - secondary busses to devices. - - So far, nobody populates the initial msi_domain. - - Signed-off-by: Marc Zyngier - -commit e44063e72aecf5273d2b2a72afdef227cf34b92e -Author: Marc Zyngier -Date: Thu Jan 8 17:06:05 2015 +0000 - - device core: Introduce per-device MSI domain pointer - - As MSI-type features are creeping into non-PCI devices, it is - starting to make sense to give our struct device some form of - support for this, by allowing a pointer to an MSI irq domain to - be set/retrieved. - - Signed-off-by: Marc Zyngier - -commit 2b88e0efc0079e2914b8d29eda7178d319bc451d -Author: Al Stone -Date: Mon Feb 2 20:45:49 2015 +0800 - - arm64: ACPI: additions of ACPI documentation for arm64 - - Two more documentation files are also being added: - (1) A verbatim copy of the "Why ACPI on ARM?" blog posting by Grant Likely, - which is also summarized in arm-acpi.txt, and - - (2) A section by section review of the ACPI spec (acpi_object_usage.txt) - to note recommendations and prohibitions on the use of the numerous - ACPI tables and objects. This sets out the current expectations of - the firmware by Linux very explicitly (or as explicitly as I can, for - now). - - CC: Suravee Suthikulpanit - CC: Yi Li - CC: Mark Langsdorf - CC: Ashwin Chaugule - Signed-off-by: Al Stone - Signed-off-by: Hanjun Guo - -commit 11898311c598c099fff3be2fc80296ba0ce04760 -Author: Graeme Gregory -Date: Mon Feb 2 20:45:48 2015 +0800 - - Documentation: ACPI for ARM64 - - Add documentation for the guidelines of how to use ACPI - on ARM64. - - Reviewed-by: Suravee Suthikulpanit - Reviewed-by: Yi Li - Reviewed-by: Mark Langsdorf - Reviewed-by: Ashwin Chaugule - Signed-off-by: Graeme Gregory - Signed-off-by: Al Stone - Signed-off-by: Hanjun Guo - -commit 15c86f7418a04b2d6f10f6c7c22c09b2eb4f184a -Author: Graeme Gregory -Date: Mon Feb 2 20:45:47 2015 +0800 - - ARM64 / ACPI: Enable ARM64 in Kconfig - - Add Kconfigs to build ACPI on ARM64, and make ACPI available on ARM64. - - acpi_idle driver is x86/IA64 dependent now, so make CONFIG_ACPI_PROCESSOR - depend on X86 || IA64, and implement it on ARM64 in the future. - - CC: Rafael J. Wysocki - CC: Catalin Marinas - CC: Will Deacon - Reviewed-by: Grant Likely - Tested-by: Suravee Suthikulpanit - Tested-by: Yijing Wang - Tested-by: Mark Langsdorf - Tested-by: Jon Masters - Tested-by: Timur Tabi - Signed-off-by: Graeme Gregory - Signed-off-by: Al Stone - Signed-off-by: Hanjun Guo - -commit 7f6b100b64905fcb75d6daadf794a20f6dce2f1c -Author: Mark Salter -Date: Tue Feb 3 10:51:16 2015 -0500 - - acpi: fix acpi_os_ioremap for arm64 - - The acpi_os_ioremap() function may be used to map normal RAM or IO - regions. The current implementation simply uses ioremap_cache(). This - will work for some architectures, but arm64 ioremap_cache() cannot be - used to map IO regions which don't support caching. So for arm64, use - ioremap() for non-RAM regions. - - CC: Rafael J Wysocki - Signed-off-by: Mark Salter - -commit 49e6c38cd04dbc34d1582209cf54ed6d0810f10b -Author: Al Stone -Date: Mon Feb 2 20:45:46 2015 +0800 - - ARM64 / ACPI: Select ACPI_REDUCED_HARDWARE_ONLY if ACPI is enabled on ARM64 - - ACPI reduced hardware mode is disabled by default, but ARM64 - can only run properly in ACPI hardware reduced mode, so select - ACPI_REDUCED_HARDWARE_ONLY if ACPI is enabled on ARM64. - - CC: Catalin Marinas - CC: Will Deacon - Reviewed-by: Grant Likely - Tested-by: Suravee Suthikulpanit - Tested-by: Yijing Wang - Tested-by: Mark Langsdorf - Tested-by: Jon Masters - Tested-by: Timur Tabi - Signed-off-by: Al Stone - Signed-off-by: Hanjun Guo - -commit 2b40359067fcd5b12fcd9c0b50064bcc6d4ed8a6 -Author: Hanjun Guo -Date: Mon Feb 2 20:45:45 2015 +0800 - - clocksource / arch_timer: Parse GTDT to initialize arch timer - - Using the information presented by GTDT (Generic Timer Description Table) - to initialize the arch timer (not memory-mapped). - - CC: Daniel Lezcano - Originally-by: Amit Daniel Kachhap - Tested-by: Suravee Suthikulpanit - Tested-by: Yijing Wang - Tested-by: Mark Langsdorf - Tested-by: Jon Masters - Tested-by: Timur Tabi - Signed-off-by: Hanjun Guo - -commit 19b24b7ca4887f2e6d600d380a59d32478545499 -Author: Tomasz Nowicki -Date: Mon Feb 2 20:45:44 2015 +0800 - - irqchip: Add GICv2 specific ACPI boot support - - ACPI kernel uses MADT table for proper GIC initialization. It needs to - parse GIC related subtables, collect CPU interface and distributor - addresses and call driver initialization function (which is hardware - abstraction agnostic). In a similar way, FDT initialize GICv1/2. - - NOTE: This commit allow to initialize GICv1/2 basic functionality. - While now simple GICv2 init call is used, any further GIC features - require generic infrastructure for proper ACPI irqchip initialization. - That mechanism and stacked irqdomains to support GICv2 MSI/vitalization - extension, GICv3/4 and its ITS are considered as next steps. - - CC: Jason Cooper - CC: Marc Zyngier - Tested-by: Suravee Suthikulpanit - Tested-by: Yijing Wang - Tested-by: Mark Langsdorf - Tested-by: Jon Masters - Tested-by: Timur Tabi - Signed-off-by: Tomasz Nowicki - Signed-off-by: Hanjun Guo - -commit 5a8765560eac0563826a7abf83f5bca2ce158ec7 -Author: Hanjun Guo -Date: Mon Feb 2 20:45:43 2015 +0800 - - ARM64 / ACPI: Introduce ACPI_IRQ_MODEL_GIC and register device's gsi - - Introduce ACPI_IRQ_MODEL_GIC which is needed for ARM64 as GIC is - used, and then register device's gsi with the core IRQ subsystem. - - acpi_register_gsi() is similar to DT based irq_of_parse_and_map(), - since gsi is unique in the system, so use hwirq number directly - for the mapping. - - We are going to implement stacked domains when GICv2m, GICv3, ITS - support are added. - - CC: Marc Zyngier - Originally-by: Amit Daniel Kachhap - Tested-by: Suravee Suthikulpanit - Tested-by: Yijing Wang - Tested-by: Mark Langsdorf - Tested-by: Jon Masters - Tested-by: Timur Tabi - Signed-off-by: Hanjun Guo - -commit e81cf7fc6beaad3f9532a7db22d91e43b7fc1ae5 -Author: Hanjun Guo -Date: Mon Feb 2 20:45:42 2015 +0800 - - ACPI / processor: Make it possible to get CPU hardware ID via GICC - - Introduce a new function map_gicc_mpidr() to allow MPIDRs to be obtained - from the GICC Structure introduced by ACPI 5.1. - - MPIDR is the CPU hardware ID as local APIC ID on x86 platform, so we use - MPIDR not the GIC CPU interface ID to identify CPUs. - - Further steps would typedef a phys_id_t for in arch code(with - appropriate size and a corresponding invalid value, say ~0) and use that - instead of an int in drivers/acpi/processor_core.c to store phys_id, then - no need for mpidr packing. - - CC: Rafael J. Wysocki - CC: Catalin Marinas - CC: Will Deacon - Tested-by: Suravee Suthikulpanit - Tested-by: Yijing Wang - Tested-by: Mark Langsdorf - Tested-by: Jon Masters - Tested-by: Timur Tabi - Signed-off-by: Hanjun Guo - -commit 1887acdfd84b02c68bf17db11742a2c6b1461d14 -Author: Hanjun Guo -Date: Mon Feb 2 20:45:41 2015 +0800 - - ARM64 / ACPI: Parse MADT for SMP initialization - - MADT contains the information for MPIDR which is essential for - SMP initialization, parse the GIC cpu interface structures to - get the MPIDR value and map it to cpu_logical_map(), and add - enabled cpu with valid MPIDR into cpu_possible_map. - - ACPI 5.1 only has two explicit methods to boot up SMP, PSCI and - Parking protocol, but the Parking protocol is only specified for - ARMv7 now, so make PSCI as the only way for the SMP boot protocol - before some updates for the ACPI spec or the Parking protocol spec. - - Parking protocol patches for SMP boot will be sent to upstream when - the new version of Parking protocol is ready. - - CC: Lorenzo Pieralisi - CC: Catalin Marinas - CC: Will Deacon - CC: Mark Rutland - Tested-by: Suravee Suthikulpanit - Tested-by: Yijing Wang - Tested-by: Mark Langsdorf - Tested-by: Jon Masters - Tested-by: Timur Tabi - Signed-off-by: Hanjun Guo - Signed-off-by: Tomasz Nowicki - -commit ce773743a3ea42fa3fcdb8a19c8883b0555abc87 -Author: Hanjun Guo -Date: Mon Feb 2 20:45:40 2015 +0800 - - ACPI / table: Print GIC information when MADT is parsed - - When MADT is parsed, print GIC information to make the boot - log look pretty: - - ACPI: GICC (acpi_id[0x0000] address[00000000e112f000] MPIDR[0x0] enabled) - ACPI: GICC (acpi_id[0x0001] address[00000000e112f000] MPIDR[0x1] enabled) - ... - ACPI: GICC (acpi_id[0x0201] address[00000000e112f000] MPIDR[0x201] enabled) - - These information will be very helpful to bring up early systems to - see if acpi_id and MPIDR are matched or not as spec defined. - - CC: Rafael J. Wysocki - Tested-by: Suravee Suthikulpanit - Tested-by: Yijing Wang - Tested-by: Mark Langsdorf - Tested-by: Jon Masters - Tested-by: Timur Tabi - Signed-off-by: Hanjun Guo - Signed-off-by: Tomasz Nowicki - -commit 093da4679d8c27c8e688ad527c63cf9cf908e21c -Author: Graeme Gregory -Date: Mon Feb 2 20:45:39 2015 +0800 - - ARM64 / ACPI: Get PSCI flags in FADT for PSCI init - - There are two flags: PSCI_COMPLIANT and PSCI_USE_HVC. When set, - the former signals to the OS that the firmware is PSCI compliant. - The latter selects the appropriate conduit for PSCI calls by - toggling between Hypervisor Calls (HVC) and Secure Monitor Calls - (SMC). - - FADT table contains such information in ACPI 5.1, FADT table was - parsed in ACPI table init and copy to struct acpi_gbl_FADT, so - use the flags in struct acpi_gbl_FADT for PSCI init. - - Since ACPI 5.1 doesn't support self defined PSCI function IDs, - which means that only PSCI 0.2+ is supported in ACPI. - - CC: Lorenzo Pieralisi - CC: Catalin Marinas - CC: Will Deacon - Tested-by: Suravee Suthikulpanit - Tested-by: Yijing Wang - Tested-by: Mark Langsdorf - Tested-by: Jon Masters - Tested-by: Timur Tabi - Signed-off-by: Graeme Gregory - Signed-off-by: Tomasz Nowicki - Signed-off-by: Hanjun Guo - -commit 029a9833a4e8a876bdac71fe7560718bca3a7312 -Author: Graeme Gregory -Date: Mon Feb 2 20:45:38 2015 +0800 - - ARM64 / ACPI: If we chose to boot from acpi then disable FDT - - If the early boot methods of acpi are happy that we have valid ACPI - tables and acpi=force has been passed, then do not unflat devicetree - effectively disabling further hardware probing from DT. - - CC: Catalin Marinas - CC: Will Deacon - Tested-by: Suravee Suthikulpanit - Tested-by: Yijing Wang - Tested-by: Mark Langsdorf - Tested-by: Jon Masters - Tested-by: Timur Tabi - Signed-off-by: Graeme Gregory - Signed-off-by: Hanjun Guo - -commit 1d9af820ab2bc576e3d1ddc6e1797049f46d32ba -Author: Hanjun Guo -Date: Mon Feb 2 20:45:37 2015 +0800 - - ARM64 / ACPI: Disable ACPI if FADT revision is less than 5.1 - - FADT Major.Minor version was introduced in ACPI 5.1, it is the same - as ACPI version. - - In ACPI 5.1, some major gaps are fixed for ARM, such as updates in - MADT table for GIC and SMP init, without those updates, we can not - get the MPIDR for SMP init, and GICv2/3 related init information, so - we can't boot arm64 ACPI properly with table versions predating 5.1. - - If firmware provides ACPI tables with ACPI version less than 5.1, - OS has no way to retrieve the configuration data that is necessary - to init SMP boot protocol and the GIC properly, so disable ACPI if - we get an FADT table with version less that 5.1. - - CC: Lorenzo Pieralisi - CC: Catalin Marinas - CC: Will Deacon - Tested-by: Suravee Suthikulpanit - Tested-by: Yijing Wang - Tested-by: Mark Langsdorf - Tested-by: Jon Masters - Tested-by: Timur Tabi - Signed-off-by: Hanjun Guo - -commit 1bcb26c31cbc9db1645a62a71b8bce9302d778fc -Author: Hanjun Guo -Date: Mon Feb 2 20:45:36 2015 +0800 - - dt / chosen: Add linux,uefi-stub-generated-dtb property - - When system supporting both DT and ACPI but firmware providing - no dtb, we can use this linux,uefi-stub-generated-dtb property - to let kernel know that we can try ACPI configuration data even - if no "acpi=force" is passed in early parameters. - - CC: Mark Rutland - CC: Jonathan Corbet - CC: Catalin Marinas - CC: Will Deacon - CC: Leif Lindholm - CC: Grant Likely - CC: Matt Fleming - Signed-off-by: Hanjun Guo - -commit db0a9f442cefc5eb655fd25baaf725c7f69440c7 -Author: Al Stone -Date: Mon Feb 2 20:45:35 2015 +0800 - - ARM64 / ACPI: Introduce early_param for "acpi" and pass acpi=force to enable ACPI - - Introduce two early parameters "off" and "force" for "acpi", acpi=off - will be the default behavior for ARM64, so introduce acpi=force to - enable ACPI on ARM64. - - Disable ACPI before early parameters parsed, and enable it to pass - "acpi=force" if people want use ACPI on ARM64. This ensures DT be - the prefer one if ACPI table and DT both are provided at this moment. - - CC: Catalin Marinas - CC: Will Deacon - CC: Rafael J. Wysocki - Tested-by: Suravee Suthikulpanit - Tested-by: Yijing Wang - Tested-by: Mark Langsdorf - Tested-by: Jon Masters - Tested-by: Timur Tabi - Signed-off-by: Al Stone - Signed-off-by: Graeme Gregory - Signed-off-by: Hanjun Guo - -commit 4d8a88fa316ced237776e33106aaf11696f458a0 -Author: Hanjun Guo -Date: Mon Feb 2 20:45:34 2015 +0800 - - ARM64 / ACPI: Introduce PCI stub functions for ACPI - - CONFIG_ACPI depends CONFIG_PCI on x86 and ia64, in ARM64 server - world we will have PCIe in most cases, but some of them may not, - make CONFIG_ACPI depend CONFIG_PCI on ARM64 will satisfy both. - - With that case, we need some arch dependent PCI functions to - access the config space before the PCI root bridge is created, and - pci_acpi_scan_root() to create the PCI root bus. So introduce - some stub function here to make ACPI core compile and revisit - them later when implemented on ARM64. - - CC: Liviu Dudau - CC: Catalin Marinas - CC: Will Deacon - Tested-by: Suravee Suthikulpanit - Tested-by: Yijing Wang - Tested-by: Mark Langsdorf - Tested-by: Jon Masters - Tested-by: Timur Tabi - Signed-off-by: Hanjun Guo - -commit b74ac8899a0e305d32808f735953b465991b8f17 -Author: Graeme Gregory -Date: Mon Feb 2 20:45:33 2015 +0800 - - ACPI / sleep: Introduce sleep_arm.c - - ACPI 5.1 does not currently support S states for ARM64 hardware but - ACPI code will call acpi_target_system_state() for device power - managment, so introduce sleep_arm.c to allow other drivers to function - until S states are defined. - - CC: Rafael J. Wysocki - Tested-by: Suravee Suthikulpanit - Tested-by: Yijing Wang - Tested-by: Mark Langsdorf - Tested-by: Jon Masters - Tested-by: Timur Tabi - Signed-off-by: Graeme Gregory - Signed-off-by: Tomasz Nowicki - Signed-off-by: Hanjun Guo - -commit bfbe33ec40d51e51d0544fa1273ecf3dd4142a92 -Author: Al Stone -Date: Mon Feb 2 20:45:32 2015 +0800 - - ARM64 / ACPI: Get RSDP and ACPI boot-time tables - - As we want to get ACPI tables to parse and then use the information - for system initialization, we should get the RSDP (Root System - Description Pointer) first, it then locates Extended Root Description - Table (XSDT) which contains all the 64-bit physical address that - pointer to other boot-time tables. - - Introduce acpi.c and its related head file in this patch to provide - fundamental needs of extern variables and functions for ACPI core, - and then get boot-time tables as needed. - - asm/acenv.h for arch specific ACPICA environments and - implementation, It is needed unconditionally by ACPI core; - - asm/acpi.h for arch specific variables and functions needed by - ACPI driver core; - - acpi.c for ARM64 related ACPI implementation for ACPI driver - core; - - acpi_boot_table_init() is introduced to get RSDP and boot-time tables, - it will be called in setup_arch() before paging_init(), so we should - use eary_memremap() mechanism here to get the RSDP and all the table - pointers. - - CC: Catalin Marinas - CC: Will Deacon - Tested-by: Suravee Suthikulpanit - Tested-by: Yijing Wang - Tested-by: Mark Langsdorf - Tested-by: Jon Masters - Tested-by: Timur Tabi - Signed-off-by: Al Stone - Signed-off-by: Graeme Gregory - Signed-off-by: Tomasz Nowicki - Signed-off-by: Hanjun Guo - -commit db306fcca88084b45084a72ac7a9491994e3a1eb -Author: Mark Salter -Date: Mon Feb 2 20:45:31 2015 +0800 - - arm64: allow late use of early_ioremap - - Commit 0e63ea48b4d8 (arm64/efi: add missing call to early_ioremap_reset()) - added a missing call to early_ioremap_reset(). This triggers a BUG if code - tries using early_ioremap() after the early_ioremap_reset(). This is a - problem for some ACPI code which needs short-lived temporary mappings - after paging_init() but before acpi_early_init() in start_kernel(). This - patch adds definitions for the __late_set_fixmap() and __late_clear_fixmap() - which avoids the BUG by allowing later use of early_ioremap(). - - CC: Leif Lindholm - CC: Ard Biesheuvel - Tested-by: Suravee Suthikulpanit - Tested-by: Mark Langsdorf - Tested-by: Jon Masters - Signed-off-by: Mark Salter - Signed-off-by: Hanjun Guo - -commit df7dbcea6c185ceb3d62626e1e98024c0742b658 -Author: Graeme Gregory -Date: Mon Feb 2 20:45:29 2015 +0800 - - acpi: add arm64 to the platforms that use ioremap - - Now with the base changes to the arm memory mapping it is safe - to convert to using ioremap to map in the tables after - acpi_gbl_permanent_mmap is set. - - CC: Rafael J Wysocki - Signed-off-by: Al Stone - Signed-off-by: Graeme Gregory - Signed-off-by: Hanjun Guo - -commit 3f61bd4dc0405314173e5b4e129e79745ec994b9 -Author: Mark Salter -Date: Tue Sep 30 17:19:24 2014 -0400 - - arm64: avoid need for console= to enable serial console - - Tell kernel to prefer one of the serial ports on platforms - pl011, 8250, or sbsa uarts. console= on command line will - override these assumed preferences. This is just a hack to - get the behavior we want from SPCR table support. Once SPCR - is supported, we can drop this. - - Signed-off-by: Mark Salter - -commit 830714ea6390e53a92751b338dea8863aabaf81f -Author: Mark Salter -Date: Fri Nov 21 23:21:30 2014 -0500 - - DO NOT UPSTREAM - tty/pl011: make ttyAMA0 the active console device - - The pl011 uart driver doesn't register itself as a console - until device_initcall time. This allows the virtual console - driver to register the active console if no console= is - given on the cmdline. This patch allows ttyAMA0 to take - over the active console device role from any existing - console device if no console= is given on the cmdline. - - This is just a temporary hack until SPCR table is supported. - - Signed-off-by: Mark Salter - -commit a1c79144bc02623b27edafa950eacb304e1b6548 -Author: Mark Salter -Date: Wed Nov 19 10:08:29 2014 -0500 - - tty/sbsauart: DO NOT UPSTREAM - make ttySBSA the active console device - - The sbsauart driver doesn't register itself as a console - until module_initcall time. This allows the virtual console - driver to register the active console if no console= is - given on the cmdline. This patch allows ttySBSA to take - over the active console device role from any existing - console device if no console= is given on the cmdline. - - This is just a temprary hack until SPCR table is supported. - - Signed-off-by: Mark Salter - -commit 92c29f8cd602b302e6b56823ce056df05e9116cb -Author: Graeme Gregory -Date: Wed Aug 13 13:47:18 2014 +0100 - - tty: SBSA compatible UART - - This is a subset of pl011 UART which does not supprt DMA or baud rate - changing. It does, however, provide earlycon support (i.e., using - "earlycon=ttySBSA" on the kernel command line). - - It is specified in the Server Base System Architecture document from - ARM. - - Signed-off-by: Graeme Gregory - -commit 6c38227252804421b94861cd06473fd608d44276 -Author: Mark Salter -Date: Sat Nov 8 22:25:48 2014 -0500 - - arm64: use UEFI for reboot - - Wire in support for UEFI reboot. We want UEFI reboot to have - highest priority for capsule support. - - Signed-off-by: Mark Salter - -commit 116f11ce2a8538873fb787d42e69518e4dfb07fa -Author: Mark Salter -Date: Sat Nov 8 15:25:41 2014 -0500 - - arm64: use UEFI as last resort for poweroff - - Wire in support for poweroff via UEFI. - - Signed-off-by: Mark Salter - -commit 5dfa57078318e948a199b9f00855c0ae2dd7e2ca -Author: Mark Salter -Date: Thu Jul 17 13:34:50 2014 -0400 - - ahci_xgene: add errata workaround for ATA_CMD_SMART - - commit 2a0bdff6b958d1b2: - - ahci_xgene: fix the dma state machine lockup for the IDENTIFY DEVICE PIO mode command. - - added a workaround for X-Gene AHCI controller errata. This was done - for all ATA_CMD_ID_ATA commands. The errata also appears to affect - ATA_CMD_SMART commands as well. This was discovered when running - smartd or just smartctl -x. This patch adds a dma engine restart for - ATA_CMD_SMART commands which clears up the issues seen with smartd. - - Signed-off-by: Mark Salter - -commit 8dc5d6c4d782adc8a5f83f02ce7fd848eff129ca -Author: Kyle McMartin -Date: Tue May 13 22:25:26 2014 -0400 - - arm64: don't set READ_IMPLIES_EXEC for EM_AARCH64 ELF objects - - Currently, we're accidentally ending up with executable stacks on - AArch64 when the ABI says we shouldn't be, and relying on glibc to fix - things up for us when we're loaded. However, SELinux will deny us - mucking with the stack, and hit us with execmem AVCs. - - The reason this is happening is somewhat complex: - - fs/binfmt_elf.c:load_elf_binary() - - initializes executable_stack = EXSTACK_DEFAULT implying the - architecture should make up its mind. - - does a pile of loading goo - - runs through the program headers, looking for PT_GNU_STACK - and setting (or unsetting) executable_stack if it finds it. - - This is our first problem, we won't generate these unless an - executable stack is explicitly requested. - - - more ELF loading goo - - sets whether we're a compat task or not (TIF_32BIT) based on compat.h - - for compat reasons (pre-GNU_STACK) checks if the READ_IMPLIES_EXEC - flag should be set for ancient toolchains - - Here's our second problem, we test if read_implies_exec based on - stk != EXSTACK_DISABLE_X, which is true since stk == EXSTACK_DEFAULT. - - So we set current->personality |= READ_IMPLIES_EXEC like a broken - legacy toolchain would want. - - - Now we call setup_arg_pages to set up the stack... - - fs/exec.c:setup_arg_pages() - - lots of magic happens here - - vm_flags gets initialized to VM_STACK_FLAGS - - Here's our third problem, VM_STACK_FLAGS on arm64 is - VM_DEFAULT_DATA_FLAG which tests READ_IMPLIES_EXEC and sets VM_EXEC - if it's true. So we end up with an executable stack mapping, since we - don't have executable_stack set (it's still EXSTACK_DEFAULT at this - point) to unset it anywhere. - - Bang. execstack AVC when the program starts running. - - The easiest way I can see to fix this is to test if we're a legacy task - and fix it up there. But that's not as simple as it sounds, because - the 32-bit ABI depends on what revision of the CPU we've enabled (not - that it matters since we're ARMv8...) Regardless, in the compat case, - set READ_IMPLIES_EXEC if we've found a GNU_STACK header which explicitly - requested it as in arch/arm/kernel/elf.c:arm_elf_read_implies_exec(). - - Signed-off-by: Kyle McMartin - Signed-off-by: Donald Dutile - - Documentation/ABI/testing/sysfs-firmware-dmi | 10 + - Documentation/arm/uefi.txt | 3 + - Documentation/arm64/acpi_object_usage.txt | 592 +++++ - Documentation/arm64/arm-acpi.txt | 506 ++++ - Documentation/arm64/why_use_acpi.txt | 231 ++ - Documentation/kernel-parameters.txt | 3 +- - arch/arm64/Kconfig | 10 + - arch/arm64/include/asm/acenv.h | 18 + - arch/arm64/include/asm/acpi.h | 120 + - arch/arm64/include/asm/cpu_ops.h | 1 + - arch/arm64/include/asm/elf.h | 3 +- - arch/arm64/include/asm/fixmap.h | 3 + - arch/arm64/include/asm/pci.h | 66 + - arch/arm64/include/asm/psci.h | 3 +- - arch/arm64/include/asm/smp.h | 10 +- - arch/arm64/kernel/Makefile | 4 +- - arch/arm64/kernel/acpi.c | 397 +++ - arch/arm64/kernel/cpu_ops.c | 6 +- - arch/arm64/kernel/efi.c | 37 + - arch/arm64/kernel/pci.c | 424 +++- - arch/arm64/kernel/perf_event.c | 102 + - arch/arm64/kernel/psci.c | 78 +- - arch/arm64/kernel/setup.c | 80 +- - arch/arm64/kernel/smp.c | 2 +- - arch/arm64/kernel/smp_parking_protocol.c | 110 + - arch/arm64/kernel/time.c | 7 + - arch/arm64/mm/dma-mapping.c | 6 + - arch/x86/include/asm/pci.h | 42 + - arch/x86/include/asm/pci_x86.h | 72 - - arch/x86/pci/Makefile | 5 +- - arch/x86/pci/acpi.c | 1 + - arch/x86/pci/init.c | 1 + - arch/x86/pci/mmconfig-shared.c | 198 +- - arch/x86/pci/mmconfig_32.c | 11 +- - arch/x86/pci/mmconfig_64.c | 153 -- - drivers/acpi/Kconfig | 3 +- - drivers/acpi/Makefile | 5 + - drivers/acpi/bus.c | 4 + - drivers/acpi/mmconfig.c | 414 +++ - drivers/acpi/osl.c | 6 +- - drivers/acpi/processor_core.c | 37 + - drivers/acpi/scan.c | 75 +- - drivers/acpi/sleep_arm.c | 28 + - drivers/acpi/tables.c | 43 + - drivers/acpi/utils.c | 26 + - drivers/ata/Kconfig | 2 +- - drivers/ata/ahci_platform.c | 12 + - drivers/ata/ahci_xgene.c | 27 +- - drivers/clocksource/arm_arch_timer.c | 135 +- - drivers/firmware/dmi-sysfs.c | 42 + - drivers/firmware/dmi_scan.c | 26 + - drivers/firmware/efi/libstub/fdt.c | 8 + - drivers/iommu/arm-smmu.c | 8 +- - drivers/irqchip/irq-gic-v2m.c | 148 +- - drivers/irqchip/irq-gic-v3-its.c | 33 +- - drivers/irqchip/irq-gic-v3.c | 10 + - drivers/irqchip/irq-gic.c | 125 +- - drivers/irqchip/irqchip.c | 3 + - drivers/net/ethernet/amd/Makefile | 1 + - drivers/net/ethernet/amd/xgbe-a0/Makefile | 8 + - drivers/net/ethernet/amd/xgbe-a0/xgbe-common.h | 1142 +++++++++ - drivers/net/ethernet/amd/xgbe-a0/xgbe-dcb.c | 269 ++ - drivers/net/ethernet/amd/xgbe-a0/xgbe-debugfs.c | 373 +++ - drivers/net/ethernet/amd/xgbe-a0/xgbe-desc.c | 636 +++++ - drivers/net/ethernet/amd/xgbe-a0/xgbe-dev.c | 2964 ++++++++++++++++++++++ - drivers/net/ethernet/amd/xgbe-a0/xgbe-drv.c | 2204 ++++++++++++++++ - drivers/net/ethernet/amd/xgbe-a0/xgbe-ethtool.c | 616 +++++ - drivers/net/ethernet/amd/xgbe-a0/xgbe-main.c | 643 +++++ - drivers/net/ethernet/amd/xgbe-a0/xgbe-mdio.c | 312 +++ - drivers/net/ethernet/amd/xgbe-a0/xgbe-ptp.c | 284 +++ - drivers/net/ethernet/amd/xgbe-a0/xgbe.h | 868 +++++++ - drivers/net/ethernet/apm/xgene/xgene_enet_hw.c | 69 +- - drivers/net/ethernet/apm/xgene/xgene_enet_main.c | 111 +- - drivers/net/ethernet/apm/xgene/xgene_enet_main.h | 4 +- - drivers/net/ethernet/smsc/smc91x.c | 10 + - drivers/net/phy/Makefile | 1 + - drivers/net/phy/amd-xgbe-phy-a0.c | 1829 +++++++++++++ - drivers/pci/host/pci-xgene.c | 248 ++ - drivers/pci/msi.c | 3 +- - drivers/pci/of.c | 20 + - drivers/pci/pci-acpi.c | 36 + - drivers/pci/probe.c | 33 + - drivers/tty/Kconfig | 6 + - drivers/tty/Makefile | 1 + - drivers/tty/sbsauart.c | 358 +++ - drivers/tty/serial/8250/8250_dw.c | 6 +- - drivers/tty/serial/amba-pl011.c | 8 + - drivers/usb/host/xhci-plat.c | 15 +- - drivers/virtio/virtio_mmio.c | 12 +- - include/acpi/acnames.h | 1 + - include/acpi/acpi_bus.h | 2 + - include/acpi/acpi_io.h | 3 + - include/asm-generic/vmlinux.lds.h | 7 + - include/kvm/arm_vgic.h | 20 +- - include/linux/acpi.h | 26 + - include/linux/clocksource.h | 6 + - include/linux/device.h | 21 + - include/linux/dmi.h | 3 + - include/linux/irqchip/arm-gic-acpi.h | 31 + - include/linux/irqchip/arm-gic.h | 7 + - include/linux/mmconfig.h | 86 + - include/linux/mod_devicetable.h | 6 + - include/linux/msi.h | 4 +- - include/linux/pci-acpi.h | 3 + - include/linux/pci.h | 11 +- - kernel/irq/msi.c | 24 + - virt/kvm/arm/arch_timer.c | 107 +- - virt/kvm/arm/vgic-v2.c | 86 +- - virt/kvm/arm/vgic-v3.c | 8 +- - virt/kvm/arm/vgic.c | 32 +- - 110 files changed, 17386 insertions(+), 733 deletions(-) + Documentation/ABI/testing/sysfs-firmware-dmi | 10 + + Documentation/arm64/acpi_object_usage.txt | 593 ++++ + Documentation/arm64/arm-acpi.txt | 505 ++++ + .../devicetree/bindings/edac/apm-xgene-edac.txt | 83 + + Documentation/kernel-parameters.txt | 3 +- + MAINTAINERS | 8 + + arch/arm64/Kconfig | 13 + + arch/arm64/boot/dts/apm/apm-storm.dtsi | 98 + + arch/arm64/include/asm/acenv.h | 18 + + arch/arm64/include/asm/acpi.h | 99 + + arch/arm64/include/asm/cpu_ops.h | 1 + + arch/arm64/include/asm/edac.h | 38 + + arch/arm64/include/asm/efi.h | 28 +- + arch/arm64/include/asm/elf.h | 3 +- + arch/arm64/include/asm/fixmap.h | 3 + + arch/arm64/include/asm/irq.h | 13 + + arch/arm64/include/asm/mmu.h | 2 + + arch/arm64/include/asm/pci.h | 66 + + arch/arm64/include/asm/psci.h | 3 +- + arch/arm64/include/asm/smp.h | 10 +- + arch/arm64/kernel/Makefile | 4 +- + arch/arm64/kernel/acpi.c | 432 +++ + arch/arm64/kernel/cpu_ops.c | 6 +- + arch/arm64/kernel/efi.c | 223 +- + arch/arm64/kernel/pci.c | 424 ++- + arch/arm64/kernel/perf_event.c | 102 + + arch/arm64/kernel/psci.c | 78 +- + arch/arm64/kernel/setup.c | 44 +- + arch/arm64/kernel/smp.c | 2 +- + arch/arm64/kernel/smp_parking_protocol.c | 110 + + arch/arm64/kernel/time.c | 7 + + arch/arm64/mm/dma-mapping.c | 98 + + arch/arm64/mm/mmu.c | 14 +- + arch/ia64/Kconfig | 1 + + arch/ia64/kernel/acpi.c | 2 +- + arch/x86/Kconfig | 4 + + arch/x86/include/asm/pci_x86.h | 34 +- + arch/x86/kernel/acpi/boot.c | 2 +- + arch/x86/pci/Makefile | 5 +- + arch/x86/pci/acpi.c | 3 +- + arch/x86/pci/mmconfig-shared.c | 346 +-- + arch/x86/pci/mmconfig_32.c | 47 +- + arch/x86/pci/mmconfig_64.c | 153 - + arch/x86/pci/numachip.c | 27 +- + drivers/acpi/Kconfig | 7 +- + drivers/acpi/Makefile | 3 +- + drivers/acpi/acpi_processor.c | 7 +- + drivers/acpi/acpica/acutils.h | 3 + + drivers/acpi/acpica/nsxfname.c | 21 +- + drivers/acpi/acpica/utids.c | 71 + + drivers/acpi/bus.c | 3 + + drivers/acpi/internal.h | 4 + + drivers/acpi/mcfg.c | 140 + + drivers/acpi/osl.c | 6 +- + drivers/acpi/processor_core.c | 60 +- + drivers/acpi/scan.c | 17 +- + drivers/acpi/tables.c | 52 +- + drivers/acpi/utils.c | 26 + + drivers/ata/Kconfig | 2 +- + drivers/ata/ahci_platform.c | 9 + + drivers/ata/ahci_xgene.c | 27 +- + drivers/clocksource/arm_arch_timer.c | 135 +- + drivers/edac/Kconfig | 9 +- + drivers/edac/Makefile | 2 + + drivers/edac/xgene_edac.c | 2132 ++++++++++++++ + drivers/firmware/dmi-sysfs.c | 42 + + drivers/firmware/dmi_scan.c | 26 + + drivers/firmware/efi/libstub/arm-stub.c | 59 - + drivers/firmware/efi/libstub/efistub.h | 4 - + drivers/firmware/efi/libstub/fdt.c | 62 +- + drivers/iommu/arm-smmu.c | 8 +- + drivers/irqchip/irq-gic-v2m.c | 148 +- + drivers/irqchip/irq-gic-v3-its.c | 35 +- + drivers/irqchip/irq-gic-v3.c | 10 + + drivers/irqchip/irq-gic.c | 125 +- + drivers/irqchip/irqchip.c | 3 + + drivers/net/ethernet/amd/Makefile | 1 + + drivers/net/ethernet/amd/xgbe-a0/Makefile | 8 + + drivers/net/ethernet/amd/xgbe-a0/xgbe-common.h | 1142 ++++++++ + drivers/net/ethernet/amd/xgbe-a0/xgbe-dcb.c | 269 ++ + drivers/net/ethernet/amd/xgbe-a0/xgbe-debugfs.c | 373 +++ + drivers/net/ethernet/amd/xgbe-a0/xgbe-desc.c | 636 +++++ + drivers/net/ethernet/amd/xgbe-a0/xgbe-dev.c | 2964 ++++++++++++++++++++ + drivers/net/ethernet/amd/xgbe-a0/xgbe-drv.c | 2204 +++++++++++++++ + drivers/net/ethernet/amd/xgbe-a0/xgbe-ethtool.c | 616 ++++ + drivers/net/ethernet/amd/xgbe-a0/xgbe-main.c | 643 +++++ + drivers/net/ethernet/amd/xgbe-a0/xgbe-mdio.c | 312 +++ + drivers/net/ethernet/amd/xgbe-a0/xgbe-ptp.c | 284 ++ + drivers/net/ethernet/amd/xgbe-a0/xgbe.h | 868 ++++++ + drivers/net/ethernet/smsc/smc91x.c | 10 + + drivers/net/phy/Makefile | 1 + + drivers/net/phy/amd-xgbe-phy-a0.c | 1829 ++++++++++++ + drivers/pci/Kconfig | 7 + + drivers/pci/Makefile | 5 + + drivers/pci/ecam.c | 361 +++ + drivers/pci/host/pci-xgene.c | 156 ++ + drivers/pci/msi.c | 3 +- + drivers/pci/of.c | 20 + + drivers/pci/pci-acpi.c | 36 + + drivers/pci/pci.c | 26 +- + drivers/pci/probe.c | 33 + + drivers/tty/Kconfig | 6 + + drivers/tty/Makefile | 1 + + drivers/tty/sbsauart.c | 358 +++ + drivers/tty/serial/8250/8250_dw.c | 14 +- + drivers/tty/serial/amba-pl011.c | 8 + + drivers/usb/host/xhci-plat.c | 15 +- + drivers/virtio/virtio_mmio.c | 12 +- + drivers/xen/pci.c | 6 +- + include/acpi/acnames.h | 1 + + include/acpi/acpi_bus.h | 2 + + include/acpi/acpi_io.h | 4 + + include/acpi/actypes.h | 4 +- + include/acpi/processor.h | 6 +- + include/asm-generic/vmlinux.lds.h | 7 + + include/kvm/arm_vgic.h | 20 +- + include/linux/acpi.h | 8 +- + include/linux/acpi_irq.h | 10 + + include/linux/clocksource.h | 6 + + include/linux/device.h | 20 + + include/linux/dmi.h | 3 + + include/linux/ecam.h | 81 + + include/linux/irqchip/arm-gic-acpi.h | 32 + + include/linux/irqchip/arm-gic.h | 7 + + include/linux/mod_devicetable.h | 1 + + include/linux/msi.h | 4 +- + include/linux/pci-acpi.h | 3 + + include/linux/pci.h | 3 + + kernel/irq/msi.c | 24 + + scripts/mod/devicetable-offsets.c | 1 + + scripts/mod/file2alias.c | 13 +- + virt/kvm/arm/arch_timer.c | 107 +- + virt/kvm/arm/vgic-v2.c | 86 +- + virt/kvm/arm/vgic-v3.c | 8 +- + virt/kvm/arm/vgic.c | 32 +- + 135 files changed, 19707 insertions(+), 1036 deletions(-) diff --git a/Documentation/ABI/testing/sysfs-firmware-dmi b/Documentation/ABI/testing/sysfs-firmware-dmi index c78f9ab..3a9ffe8 100644 @@ -1404,25 +156,12 @@ index c78f9ab..3a9ffe8 100644 DMI is structured as a large table of entries, where each entry has a common header indicating the type and length of the entry, as well as a firmware-provided -diff --git a/Documentation/arm/uefi.txt b/Documentation/arm/uefi.txt -index d60030a..5f86eae 100644 ---- a/Documentation/arm/uefi.txt -+++ b/Documentation/arm/uefi.txt -@@ -60,5 +60,8 @@ linux,uefi-mmap-desc-ver | 32-bit | Version of the mmap descriptor format. - -------------------------------------------------------------------------------- - linux,uefi-stub-kern-ver | string | Copy of linux_banner from build. - -------------------------------------------------------------------------------- -+linux,uefi-stub-generated-dtb | bool | Indication for no DTB provided by -+ | | firmware. -+-------------------------------------------------------------------------------- - - For verbose debug messages, specify 'uefi_debug' on the kernel command line. diff --git a/Documentation/arm64/acpi_object_usage.txt b/Documentation/arm64/acpi_object_usage.txt new file mode 100644 -index 0000000..2c4f733 +index 0000000..96e2273 --- /dev/null +++ b/Documentation/arm64/acpi_object_usage.txt -@@ -0,0 +1,592 @@ +@@ -0,0 +1,593 @@ +ACPI Tables +----------- +The expectations of individual ACPI tables are discussed in the list that @@ -1514,8 +253,10 @@ index 0000000..2c4f733 + +ERST Section 18.5 (signature == "ERST") + == Error Record Serialization Table == -+ Must be supplied if RAS support is provided by the platform. It -+ is recommended this table be supplied. ++ On a platform supports RAS, this table must be supplied if it is not ++ UEFI-based; if it is UEFI-based, this table may be supplied. When this ++ table is not present, UEFI run time service will be utilized to save ++ and retrieve hardware error information to and from a persistent store. + +ETDT Signature Reserved (signature == "ETDT") + == Event Timer Description Table == @@ -1584,7 +325,7 @@ index 0000000..2c4f733 + +MADT Section 5.2.12 (signature == "APIC") + == Multiple APIC Description Table == -+ Required for arm64. Only the GIC interrupt controller structures ++ Required for arm64. Only the GIC interrupt controller structures + should be used (types 0xA - 0xE). + +MCFG Signature Reserved (signature == "MCFG") @@ -1741,7 +482,7 @@ index 0000000..2c4f733 + + In either case, submit the _DSD definition along with + any driver patches for discussion, especially when -+ device properties are used. A driver will not be ++ device properties are used. A driver will not be + considered complete without a corresponding _DSD + description. Once approved by kernel maintainers, + the UUID or device properties must then be registered @@ -1794,7 +535,7 @@ index 0000000..2c4f733 + +_MAT 6.2.10 Optional; see also the MADT. + -+_MLS 6.1.7 Optional, but highly recommended for use in ++_MLS 6.1.7 Optional, but highly recommended for use in + internationalization. + +_OFF 7.1.2 It is recommended to define this method for any device @@ -1813,7 +554,7 @@ index 0000000..2c4f733 + device (e.g., \_SB.DEV0._OSC), or both. When used + as a global method, only capabilities published in + the ACPI specification are allowed. When used as -+ a device-specifc method, the process described for ++ a device-specific method, the process described for + using _DSD MUST be used to create an _OSC definition; + out-of-process use of _OSC is not allowed. That is, + submit the device-specific _OSC usage description as @@ -1848,7 +589,7 @@ index 0000000..2c4f733 +_PRW 7.2.13 Use as needed; power management specific. + +_PRx 7.2.8-11 Use as needed; power management specific. If _PR0 is -+ defined, _PR3 must also be defined. ++ defined, _PR3 must also be defined. + +_PSC 7.2.6 Use as needed; power management specific. + @@ -2014,17 +755,16 @@ index 0000000..2c4f733 + + -- Section 17: NUMA support (prototypes have been submitted for + review) -+ diff --git a/Documentation/arm64/arm-acpi.txt b/Documentation/arm64/arm-acpi.txt new file mode 100644 -index 0000000..275524e +index 0000000..7d6e636 --- /dev/null +++ b/Documentation/arm64/arm-acpi.txt -@@ -0,0 +1,506 @@ +@@ -0,0 +1,505 @@ +ACPI on ARMv8 Servers +--------------------- +ACPI can be used for ARMv8 general purpose servers designed to follow -+the ARM SBSA (Server Base System Architecture) [0] and SBBR (Server ++the ARM SBSA (Server Base System Architecture) [0] and SBBR (Server +Base Boot Requirements) [1] specifications. Please note that the SBBR +can be retrieved simply by visiting [1], but the SBSA is currently only +available to those with an ARM login due to ARM IP licensing concerns. @@ -2059,10 +799,10 @@ index 0000000..275524e + +-- ACPI’s bytecode (AML) allows the platform to encode hardware behavior, + while DT explicitly does not support this. For hardware vendors, being -+ able to encode behavior is a key tool used in supporting operating ++ able to encode behavior is a key tool used in supporting operating + system releases on new hardware. + -+-- ACPI’s OSPM defines a power management model that constrains what the ++-- ACPI’s OSPM defines a power management model that constrains what the + platform is allowed to do into a specific model, while still providing + flexibility in hardware design. + @@ -2217,7 +957,7 @@ index 0000000..275524e +-------------- +Drivers should determine their probe() type by checking for a null +value for ACPI_HANDLE, or checking .of_node, or other information in -+the device structure. This is detailed further in the "Driver ++the device structure. This is detailed further in the "Driver +Recommendations" section. + +In non-driver code, if the presence of ACPI needs to be detected at @@ -2279,7 +1019,7 @@ index 0000000..275524e +only use the _DSD Device Properties UUID [5]: + + -- UUID: daffd814-6eba-4d8c-8a91-bc9bbf4aa301 -+ ++ + -- http://www.uefi.org/sites/default/files/resources/_DSD-device-properties-UUID.pdf + +The UEFI Forum provides a mechanism for registering device properties [4] @@ -2369,15 +1109,15 @@ index 0000000..275524e + +Clocks +------ -+ACPI makes the assumption that clocks are initialized by the firmware -- ++ACPI makes the assumption that clocks are initialized by the firmware -- +UEFI, in this case -- to some working value before control is handed over +to the kernel. This has implications for devices such as UARTs, or SoC-driven +LCD displays, for example. + +When the kernel boots, the clocks are assumed to be set to reasonable +working values. If for some reason the frequency needs to change -- e.g., -+throttling for power management -- the device driver should expect that -+process to be abstracted out into some ACPI method that can be invoked ++throttling for power management -- the device driver should expect that ++process to be abstracted out into some ACPI method that can be invoked +(please see the ACPI specification for further recommendations on standard +methods to be expected). The only exceptions to this are CPU clocks where +CPPC provides a much richer interface than ACPI methods. If the clocks @@ -2417,7 +1157,7 @@ index 0000000..275524e + ... +} + -+static int device_probe(stuct platform_device *pdev) ++static int device_probe(struct platform_device *pdev) +{ + ... + struct device_node node = pdev->dev.of_node; @@ -2507,7 +1247,7 @@ index 0000000..275524e + from formatting) is also in Documentation/arm64/why_use_acpi.txt. + +[3] AMD ACPI for Seattle platform documentation: -+ http://amd-dev.wpengine.netdna-cdn.com/wordpress/media/2012/10/Seattle_ACPI_Guide.pdf ++ http://amd-dev.wpengine.netdna-cdn.com/wordpress/media/2012/10/Seattle_ACPI_Guide.pdf + +[4] http://www.uefi.org/acpi -- please see the link for the "ACPI _DSD Device + Property Registry Instructions" @@ -2526,243 +1266,94 @@ index 0000000..275524e +Hanjun Guo + +Grant Likely , for the "Why ACPI on ARM?" section -+ -diff --git a/Documentation/arm64/why_use_acpi.txt b/Documentation/arm64/why_use_acpi.txt +diff --git a/Documentation/devicetree/bindings/edac/apm-xgene-edac.txt b/Documentation/devicetree/bindings/edac/apm-xgene-edac.txt new file mode 100644 -index 0000000..9bb583e +index 0000000..ce8c30e --- /dev/null -+++ b/Documentation/arm64/why_use_acpi.txt -@@ -0,0 +1,231 @@ -+Why ACPI on ARM? -+---------------- -+Copyright (c) 2015, Linaro, Ltd. -+Author: Grant Likely ++++ b/Documentation/devicetree/bindings/edac/apm-xgene-edac.txt +@@ -0,0 +1,83 @@ ++* APM X-Gene SoC EDAC nodes + -+Why are we doing ACPI on ARM? That question has been asked many times, but -+we haven’t yet had a good summary of the most important reasons for wanting -+ACPI on ARM. This article is an attempt to state the rationale clearly. ++EDAC nodes are defined to describe on-chip error detection and correction. ++There are four types of EDAC: + -+During an email conversation late last year, Catalin Marinas asked for -+a summary of exactly why we want ACPI on ARM, Dong Wei replied with the -+following list: -+> 1. Support multiple OSes, including Linux and Windows -+> 2. Support device configurations -+> 3. Support dynamic device configurations (hot add/removal) -+> 4. Support hardware abstraction through control methods -+> 5. Support power management -+> 6. Support thermal management -+> 7. Support RAS interfaces ++ memory controller - Memory controller ++ PMD (L1/L2) - Processor module unit (PMD) L1/L2 cache ++ L3 - CPU L3 cache ++ SoC - SoC IP such as SATA, Ethernet, and etc + -+The above list is certainly true in that all of them need to be supported. -+However, that list doesn’t give the rationale for choosing ACPI. We already -+have DT mechanisms for doing most of the above, and can certainly create -+new bindings for anything that is missing. So, if it isn’t an issue of -+functionality, then how does ACPI differ from DT and why is ACPI a better -+fit for general purpose ARM servers? ++The following section describes the memory controller DT node binding. + -+The difference is in the support model. To explain what I mean, I’m first -+going to expand on each of the items above and discuss the similarities and -+differences between ACPI and DT. Then, with that as the groundwork, I’ll -+discuss how ACPI is a better fit for the general purpose hardware support -+model. ++Required properties: ++- compatible : Shall be "apm,xgene-edac-mc". ++- reg : First resource shall be the PCP resource. ++ Second resource shall be the CSW resource. ++ Third resource shall be the MCB-A resource. ++ Fourth resource shall be the MCB-B resource. ++ Fifth resource shall be the MCU resource. ++- interrupts : Interrupt-specifier for MCU error IRQ(s). + ++The following section describes the L1/L2 DT node binding. + -+Device Configurations -+--------------------- -+2. Support device configurations -+3. Support dynamic device configurations (hot add/removal) ++- compatible : Shall be "apm,xgene-edac-pmd". ++- reg : First resource shall be the PCP resource. ++ Second resource shall be the PMD resource. ++ Third resource shall be the PMD efuse resource. ++- interrupts : Interrupt-specifier for PMD error IRQ(s). + -+From day one, DT was about device configurations. There isn’t any significant -+difference between ACPI & DT here. In fact, the majority of ACPI tables are -+completely analogous to DT descriptions. With the exception of the DSDT and -+SSDT tables, most ACPI tables are merely flat data used to describe hardware. ++The following section describes the L3 DT node binding. + -+DT platforms have also supported dynamic configuration and hotplug for years. -+There isn’t a lot here that differentiates between ACPI and DT. The biggest -+difference is that dynamic changes to the ACPI namespace can be triggered by -+ACPI methods, whereas for DT changes are received as messages from firmware -+and have been very much platform specific (e.g. IBM pSeries does this) ++- compatible : Shall be "apm,xgene-edac-l3". ++- reg : First resource shall be the PCP resource. ++ Second resource shall be the L3 resource. ++- interrupts : Interrupt-specifier for L3 error IRQ(s). + ++The following section describes the SoC DT node binding. + -+Power Management -+---------------- -+4. Support hardware abstraction through control methods -+5. Support power management -+6. Support thermal management ++- compatible : Shall be "apm,xgene-edac-soc"". ++- reg : First resource shall be the PCP resource. ++ Second resource shall be the SoC resource. ++ Third resource shall be the register bus resource. ++- interrupts : Interrupt-specifier for SoC error IRQ(s). + -+Power, thermal, and clock management can all be dealt with as a group. ACPI -+defines a power management model (OSPM) that both the platform and the OS -+conform to. The OS implements the OSPM state machine, but the platform can -+provide state change behaviour in the form of bytecode methods. Methods can -+access hardware directly or hand off PM operations to a coprocessor. The OS -+really doesn’t have to care about the details as long as the platform obeys -+the rules of the OSPM model. ++Example: ++ edacmc0: edacmc0@7e800000 { ++ compatible = "apm,xgene-edac-mc"; ++ reg = <0x0 0x78800000 0x0 0x1000>, ++ <0x0 0x7e200000 0x0 0x1000>, ++ <0x0 0x7e700000 0x0 0x1000>, ++ <0x0 0x7e720000 0x0 0x1000>, ++ <0x0 0x7e800000 0x0 0x1000>; ++ interrupts = <0x0 0x20 0x4>, ++ <0x0 0x21 0x4>; ++ }; + -+With DT, the kernel has device drivers for each and every component in the -+platform, and configures them using DT data. DT itself doesn’t have a PM model. -+Rather the PM model is an implementation detail of the kernel. Device drivers -+use DT data to decide how to handle PM state changes. We have clock, pinctrl, -+and regulator frameworks in the kernel for working out runtime PM. However, -+this only works when all the drivers and support code have been merged into -+the kernel. When the kernel’s PM model doesn’t work for new hardware, then we -+change the model. This works very well for mobile/embedded because the vendor -+controls the kernel. We can change things when we need to, but we also struggle -+with getting board support mainlined. ++ edacl3: edacl3@7e600000 { ++ compatible = "apm,xgene-edac-l3"; ++ reg = <0x0 0x78800000 0x0 0x1000>, ++ <0x0 0x7e600000 0x0 0x1000>; ++ interrupts = <0x0 0x20 0x4>, ++ <0x0 0x21 0x4>; ++ }; + -+This difference has a big impact when it comes to OS support. Engineers from -+hardware vendors, Microsoft, and most vocally Red Hat have all told me bluntly -+that rebuilding the kernel doesn’t work for enterprise OS support. Their model -+is based around a fixed OS release that ideally boots out-of-the-box. It may -+still need additional device drivers for specific peripherals/features, but -+from a system view, the OS works. When additional drivers are provided -+separately, those drivers fit within the existing OSPM model for power -+management. This is where ACPI has a technical advantage over DT. The ACPI -+OSPM model and it’s bytecode gives the HW vendors a level of abstraction -+under their control, not the kernel’s. When the hardware behaves differently -+from what the OS expects, the vendor is able to change the behaviour without -+changing the HW or patching the OS. ++ edacpmd0: edacpmd0@7c000000 { ++ compatible = "apm,xgene-edac-pmd"; ++ reg = <0x0 0x78800000 0x0 0x1000>, ++ <0x0 0x7c000000 0x0 0x200000>, ++ <0x0 0x1054a000 0x0 0x10>; ++ interrupts = <0x0 0x20 0x4>, ++ <0x0 0x21 0x4>; ++ }; + -+At this point you’d be right to point out that it is harder to get the whole -+system working correctly when behaviour is split between the kernel and the -+platform. The OS must trust that the platform doesn’t violate the OSPM model. -+All manner of bad things happen if it does. That is exactly why the DT model -+doesn’t encode behaviour: It is easier to make changes and fix bugs when -+everything is within the same code base. We don’t need a platform/kernel -+split when we can modify the kernel. -+ -+However, the enterprise folks don’t have that luxury. The platform/kernel -+split isn’t a design choice. It is a characteristic of the market. Hardware -+and OS vendors each have their own product timetables, and they don’t line -+up. The timeline for getting patches into the kernel and flowing through into -+OS releases puts OS support far downstream from the actual release of hardware. -+Hardware vendors simply cannot wait for OS support to come online to be able to -+release their products. They need to be able to work with available releases, -+and make their hardware behave in the way the OS expects. The advantage of ACPI -+OSPM is that it defines behaviour and limits what the hardware is allowed to do -+without involving the kernel. -+ -+What remains is sorting out how we make sure everything works. How do we make -+sure there is enough cross platform testing to ensure new hardware doesn’t -+ship broken and that new OS releases don’t break on old hardware? Those are -+the reasons why a UEFI/ACPI firmware summit is being organized, it’s why the -+UEFI forum holds plugfests 3 times a year, and it is why we’re working on -+FWTS and LuvOS. -+ -+ -+Reliability, Availability & Serviceability (RAS) -+------------------------------------------------ -+7. Support RAS interfaces -+ -+This isn’t a question of whether or not DT can support RAS. Of course it can. -+Rather it is a matter of RAS bindings already existing for ACPI, including a -+usage model. We’ve barely begun to explore this on DT. This item doesn’t make -+ACPI technically superior to DT, but it certainly makes it more mature. -+ -+ -+Multiplatform Support -+--------------------- -+1. Support multiple OSes, including Linux and Windows -+ -+I’m tackling this item last because I think it is the most contentious for -+those of us in the Linux world. I wanted to get the other issues out of the -+way before addressing it. -+ -+The separation between hardware vendors and OS vendors in the server market -+is new for ARM. For the first time ARM hardware and OS release cycles are -+completely decoupled from each other, and neither are expected to have specific -+knowledge of the other (ie. the hardware vendor doesn’t control the choice of -+OS). ARM and their partners want to create an ecosystem of independent OSes -+and hardware platforms that don’t explicitly require the former to be ported -+to the latter. -+ -+Now, one could argue that Linux is driving the potential market for ARM -+servers, and therefore Linux is the only thing that matters, but hardware -+vendors don’t see it that way. For hardware vendors it is in their best -+interest to support as wide a choice of OSes as possible in order to catch -+the widest potential customer base. Even if the majority choose Linux, some -+will choose BSD, some will choose Windows, and some will choose something -+else. Whether or not we think this is foolish is beside the point; it isn’t -+something we have influence over. -+ -+During early ARM server planning meetings between ARM, its partners and other -+industry representatives (myself included) we discussed this exact point. -+Before us were two options, DT and ACPI. As one of the Linux people in the -+room, I advised that ACPI’s closed governance model was a show stopper for -+Linux and that DT is the working interface. Microsoft on the other hand made -+it abundantly clear that ACPI was the only interface that they would support. -+For their part, the hardware vendors stated the platform abstraction behaviour -+of ACPI is a hard requirement for their support model and that they would not -+close the door on either Linux or Windows. -+ -+However, the one thing that all of us could agree on was that supporting -+multiple interfaces doesn’t help anyone: It would require twice as much -+effort on defining bindings (once for Linux-DT and once for Windows-ACPI) -+and it would require firmware to describe everything twice. Eventually we -+reached the compromise to use ACPI, but on the condition of opening the -+governance process to give Linux engineers equal influence over the -+specification. The fact that we now have a much better seat at the ACPI -+table, for both ARM and x86, is a direct result of these early ARM server -+negotiations. We are no longer second class citizens in the ACPI world and -+are actually driving much of the recent development. -+ -+I know that this line of thought is more about market forces rather than a -+hard technical argument between ACPI and DT, but it is an equally significant -+one. Agreeing on a single way of doing things is important. The ARM server -+ecosystem is better for the agreement to use the same interface for all -+operating systems. This is what is meant by standards compliant. The standard -+is a codification of the mutually agreed interface. It provides confidence -+that all vendors are using the same rules for interoperability. -+ -+ -+Summary -+------- -+To summarize, here is the short form rationale for ACPI on ARM: -+ -+-- ACPI’s bytecode allows the platform to encode behaviour. DT explicitly -+ does not support this. For hardware vendors, being able to encode behaviour -+ is an important tool for supporting operating system releases on new -+ hardware. -+ -+-- ACPI’s OSPM defines a power management model that constrains what the -+ platform is allowed into a specific model while still having flexibility -+ in hardware design. -+ -+-- For enterprise use-cases, ACPI has extablished bindings, such as for RAS, -+ which are used in production. DT does not. Yes, we can define those bindings -+ but doing so means ARM and x86 will use completely different code paths in -+ both firmware and the kernel. -+ -+-- Choosing a single interface for platform/OS abstraction is important. It -+ is not reasonable to require vendors to implement both DT and ACPI if they -+ want to support multiple operating systems. Agreeing on a single interface -+ instead of being fragmented into per-OS interfaces makes for better -+ interoperability overall. -+ -+-- The ACPI governance process works well and we’re at the same table as HW -+ vendors and other OS vendors. In fact, there is no longer any reason to -+ feel that ACPI is a Windows thing or that we are playing second fiddle to -+ Microsoft. The move of ACPI governance into the UEFI forum has significantly -+ opened up the processes, and currently, a large portion of the changes being -+ made to ACPI is being driven by Linux. -+ -+At the beginning of this article I made the statement that the difference -+is in the support model. For servers, responsibility for hardware behaviour -+cannot be purely the domain of the kernel, but rather is split between the -+platform and the kernel. ACPI frees the OS from needing to understand all -+the minute details of the hardware so that the OS doesn’t need to be ported -+to each and every device individually. It allows the hardware vendors to take -+responsibility for PM behaviour without depending on an OS release cycle which -+it is not under their control. -+ -+ACPI is also important because hardware and OS vendors have already worked -+out how to use it to support the general purpose ecosystem. The infrastructure -+is in place, the bindings are in place, and the process is in place. DT does -+exactly what we need it to when working with vertically integrated devices, -+but we don’t have good processes for supporting what the server vendors need. -+We could potentially get there with DT, but doing so doesn’t buy us anything. -+ACPI already does what the hardware vendors need, Microsoft won’t collaborate -+with us on DT, and the hardware vendors would still need to provide two -+completely separate firmware interface; one for Linux and one for Windows. ++ edacsoc: edacsoc@7e930000 { ++ compatible = "apm,xgene-edac-soc"; ++ reg = <0x0 0x78800000 0x0 0x1000>, ++ <0x0 0x7e930000 0x0 0x1000>, ++ <0x0 0x7e000000 0x0 0x1000>; ++ interrupts = <0x0 0x20 0x4>, ++ <0x0 0x21 0x4>, ++ <0x0 0x27 0x4>; ++ }; + diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index bfcb1a6..d6c35a7 100644 @@ -2785,8 +1376,27 @@ index bfcb1a6..d6c35a7 100644 See also Documentation/power/runtime_pm.txt, pci=noacpi +diff --git a/MAINTAINERS b/MAINTAINERS +index 0e1abe8..fb54e14 100644 +--- a/MAINTAINERS ++++ b/MAINTAINERS +@@ -3714,6 +3714,14 @@ W: bluesmoke.sourceforge.net + S: Maintained + F: drivers/edac/sb_edac.c + ++EDAC-XGENE ++APPLIED MICRO (APM) X-GENE SOC EDAC ++M: Loc Ho ++M: Feng Kan ++S: Supported ++F: drivers/edac/xgene_edac.c ++F: Documentation/devicetree/bindings/edac/apm-xgene-edac.txt ++ + EDIROL UA-101/UA-1000 DRIVER + M: Clemens Ladisch + L: alsa-devel@alsa-project.org (moderated for non-subscribers) diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig -index 676454a..6ef7874 100644 +index 1b8e973..3bdd120 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -1,5 +1,6 @@ @@ -2796,18 +1406,28 @@ index 676454a..6ef7874 100644 select ARCH_BINFMT_ELF_RANDOMIZE_PIE select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE select ARCH_HAS_GCOV_PROFILE_ALL -@@ -194,6 +195,10 @@ config PCI_DOMAINS_GENERIC +@@ -22,6 +23,7 @@ config ARM64 + select COMMON_CLK + select CPU_PM if (SUSPEND || CPU_IDLE) + select DCACHE_WORD_ACCESS ++ select EDAC_SUPPORT + select GENERIC_ALLOCATOR + select GENERIC_CLOCKEVENTS + select GENERIC_CLOCKEVENTS_BROADCAST if SMP +@@ -248,6 +250,12 @@ config PCI_DOMAINS_GENERIC config PCI_SYSCALL def_bool PCI +config PCI_MMCONFIG + def_bool y ++ select PCI_ECAM ++ select PCI_ECAM_GENERIC + depends on PCI && ACPI + source "drivers/pci/Kconfig" source "drivers/pci/pcie/Kconfig" source "drivers/pci/hotplug/Kconfig" -@@ -384,6 +389,9 @@ config SMP +@@ -438,6 +446,9 @@ config SMP If you don't know what to do here, say N. @@ -2817,7 +1437,7 @@ index 676454a..6ef7874 100644 config SCHED_MC bool "Multi-core scheduler support" depends on SMP -@@ -658,6 +666,8 @@ source "drivers/Kconfig" +@@ -712,6 +723,8 @@ source "drivers/Kconfig" source "drivers/firmware/Kconfig" @@ -2826,6 +1446,115 @@ index 676454a..6ef7874 100644 source "fs/Kconfig" source "arch/arm64/kvm/Kconfig" +diff --git a/arch/arm64/boot/dts/apm/apm-storm.dtsi b/arch/arm64/boot/dts/apm/apm-storm.dtsi +index a857794..fc1c545 100644 +--- a/arch/arm64/boot/dts/apm/apm-storm.dtsi ++++ b/arch/arm64/boot/dts/apm/apm-storm.dtsi +@@ -513,6 +513,104 @@ + interrupts = <0x0 0x4f 0x4>; + }; + ++ edacmc0: edacmc0@7e800000 { ++ compatible = "apm,xgene-edac-mc"; ++ reg = <0x0 0x78800000 0x0 0x1000>, ++ <0x0 0x7e200000 0x0 0x1000>, ++ <0x0 0x7e700000 0x0 0x1000>, ++ <0x0 0x7e720000 0x0 0x1000>, ++ <0x0 0x7e800000 0x0 0x1000>; ++ interrupts = <0x0 0x20 0x4>, ++ <0x0 0x21 0x4>; ++ }; ++ ++ edacmc1: edacmc1@7e840000 { ++ compatible = "apm,xgene-edac-mc"; ++ reg = <0x0 0x78800000 0x0 0x1000>, ++ <0x0 0x7e200000 0x0 0x1000>, ++ <0x0 0x7e700000 0x0 0x1000>, ++ <0x0 0x7e720000 0x0 0x1000>, ++ <0x0 0x7e840000 0x0 0x1000>; ++ interrupts = <0x0 0x20 0x4>, ++ <0x0 0x21 0x4>; ++ }; ++ ++ edacmc2: edacmc2@7e880000 { ++ compatible = "apm,xgene-edac-mc"; ++ reg = <0x0 0x78800000 0x0 0x1000>, ++ <0x0 0x7e200000 0x0 0x1000>, ++ <0x0 0x7e700000 0x0 0x1000>, ++ <0x0 0x7e720000 0x0 0x1000>, ++ <0x0 0x7e880000 0x0 0x1000>; ++ interrupts = <0x0 0x20 0x4>, ++ <0x0 0x21 0x4>; ++ }; ++ ++ edacmc3: edacmc3@7e8c0000 { ++ compatible = "apm,xgene-edac-mc"; ++ reg = <0x0 0x78800000 0x0 0x1000>, ++ <0x0 0x7e200000 0x0 0x1000>, ++ <0x0 0x7e700000 0x0 0x1000>, ++ <0x0 0x7e720000 0x0 0x1000>, ++ <0x0 0x7e8c0000 0x0 0x1000>; ++ interrupts = <0x0 0x20 0x4>, ++ <0x0 0x21 0x4>; ++ }; ++ ++ edacpmd0: edacpmd0@7c000000 { ++ compatible = "apm,xgene-edac-pmd"; ++ reg = <0x0 0x78800000 0x0 0x1000>, ++ <0x0 0x7c000000 0x0 0x200000>, ++ <0x0 0x1054a000 0x0 0x10>; ++ interrupts = <0x0 0x20 0x4>, ++ <0x0 0x21 0x4>; ++ }; ++ ++ edacpmd1: edacpmd1@7c200000 { ++ compatible = "apm,xgene-edac-pmd"; ++ reg = <0x0 0x78800000 0x0 0x1000>, ++ <0x0 0x7c200000 0x0 0x200000>, ++ <0x0 0x1054a000 0x0 0x10>; ++ interrupts = <0x0 0x20 0x4>, ++ <0x0 0x21 0x4>; ++ }; ++ ++ edacpmd2: edacpmd2@7c400000 { ++ compatible = "apm,xgene-edac-pmd"; ++ reg = <0x0 0x78800000 0x0 0x1000>, ++ <0x0 0x7c400000 0x0 0x200000>, ++ <0x0 0x1054a000 0x0 0x10>; ++ interrupts = <0x0 0x20 0x4>, ++ <0x0 0x21 0x4>; ++ }; ++ ++ edacpmd3: edacpmd3@7c600000 { ++ compatible = "apm,xgene-edac-pmd"; ++ reg = <0x0 0x78800000 0x0 0x1000>, ++ <0x0 0x7c600000 0x0 0x200000>, ++ <0x0 0x1054a000 0x0 0x10>; ++ interrupts = <0x0 0x20 0x4>, ++ <0x0 0x21 0x4>; ++ }; ++ ++ edacl3: edacl3@7e600000 { ++ compatible = "apm,xgene-edac-l3"; ++ reg = <0x0 0x78800000 0x0 0x1000>, ++ <0x0 0x7e600000 0x0 0x1000>; ++ interrupts = <0x0 0x20 0x4>, ++ <0x0 0x21 0x4>; ++ }; ++ ++ edacsoc: edacsoc@7e930000 { ++ compatible = "apm,xgene-edac-soc"; ++ reg = <0x0 0x78800000 0x0 0x1000>, ++ <0x0 0x7e930000 0x0 0x1000>, ++ <0x0 0x7e000000 0x0 0x1000>; ++ interrupts = <0x0 0x20 0x4>, ++ <0x0 0x21 0x4>, ++ <0x0 0x27 0x4>; ++ }; ++ + phy1: phy@1f21a000 { + compatible = "apm,xgene-phy"; + reg = <0x0 0x1f21a000 0x0 0x100>; diff --git a/arch/arm64/include/asm/acenv.h b/arch/arm64/include/asm/acenv.h new file mode 100644 index 0000000..b49166f @@ -2852,10 +1581,10 @@ index 0000000..b49166f +#endif /* _ASM_ACENV_H */ diff --git a/arch/arm64/include/asm/acpi.h b/arch/arm64/include/asm/acpi.h new file mode 100644 -index 0000000..b4d1971 +index 0000000..0f7e976 --- /dev/null +++ b/arch/arm64/include/asm/acpi.h -@@ -0,0 +1,120 @@ +@@ -0,0 +1,99 @@ +/* + * Copyright (C) 2013-2014, Linaro Ltd. + * Author: Al Stone @@ -2870,13 +1599,28 @@ index 0000000..b4d1971 +#ifndef _ASM_ACPI_H +#define _ASM_ACPI_H + ++#include +#include + -+#include ++#include +#include + +/* Basic configuration for ACPI */ +#ifdef CONFIG_ACPI ++/* ACPI table mapping after acpi_gbl_permanent_mmap is set */ ++static inline void __iomem *acpi_os_ioremap(acpi_physical_address phys, ++ acpi_size size) ++{ ++ if (!page_is_ram(phys >> PAGE_SHIFT)) ++ return ioremap(phys, size); ++ ++ return ioremap_cache(phys, size); ++} ++#define acpi_os_ioremap acpi_os_ioremap ++ ++typedef u64 phys_cpuid_t; ++#define PHYS_CPUID_INVALID INVALID_HWID ++ +#define acpi_strict 1 /* No out-of-spec workarounds on ARM64 */ +extern int acpi_disabled; +extern int acpi_noirq; @@ -2908,33 +1652,12 @@ index 0000000..b4d1971 + acpi_noirq = 0; +} + -+/* MPIDR value provided in GICC structure is 64 bits, but the -+ * existing phys_id (CPU hardware ID) using in acpi processor -+ * driver is 32-bit, to conform to the same datatype we need -+ * to repack the GICC structure MPIDR. -+ * -+ * bits other than following 32 bits are defined as 0, so it -+ * will be no information lost after repacked. -+ * -+ * Bits [0:7] Aff0; -+ * Bits [8:15] Aff1; -+ * Bits [16:23] Aff2; -+ * Bits [32:39] Aff3; -+ */ -+static inline u32 pack_mpidr(u64 mpidr) -+{ -+ return (u32) ((mpidr & 0xff00000000) >> 8) | mpidr; -+} -+ +/* + * The ACPI processor driver for ACPI core code needs this macro + * to find out this cpu was already mapped (mapping from CPU hardware + * ID to CPU logical ID) or not. -+ * -+ * cpu_logical_map(cpu) is the mapping of MPIDR and the logical cpu, -+ * and MPIDR is the cpu hardware ID we needed to pack. + */ -+#define cpu_physical_id(cpu) pack_mpidr(cpu_logical_map(cpu)) ++#define cpu_physical_id(cpu) cpu_logical_map(cpu) + +/* + * It's used from ACPI core in kdump to boot UP system with SMP kernel, @@ -2954,27 +1677,12 @@ index 0000000..b4d1971 +extern int acpi_get_cpu_parked_address(int cpu, u64 *addr); + +#else -+static inline void disable_acpi(void) { } -+static inline void enable_acpi(void) { } +static inline bool acpi_psci_present(void) { return false; } +static inline bool acpi_psci_use_hvc(void) { return false; } +static inline void acpi_init_cpus(void) { } +static inline int acpi_get_cpu_parked_address(int cpu, u64 *addr) { return -EOPNOTSUPP; } +#endif /* CONFIG_ACPI */ + -+/* -+ * ACPI table mapping -+ */ -+static inline void __iomem *acpi_os_ioremap(acpi_physical_address phys, -+ acpi_size size) -+{ -+ if (!page_is_ram(phys >> PAGE_SHIFT)) -+ return ioremap(phys, size); -+ -+ return ioremap_cache(phys, size); -+} -+#define acpi_os_ioremap acpi_os_ioremap -+ +#endif /*_ASM_ACPI_H*/ diff --git a/arch/arm64/include/asm/cpu_ops.h b/arch/arm64/include/asm/cpu_ops.h index da301ee..5a31d67 100644 @@ -2987,6 +1695,113 @@ index da301ee..5a31d67 100644 +const struct cpu_operations *cpu_get_ops(const char *name); #endif /* ifndef __ASM_CPU_OPS_H */ +diff --git a/arch/arm64/include/asm/edac.h b/arch/arm64/include/asm/edac.h +new file mode 100644 +index 0000000..87469eb +--- /dev/null ++++ b/arch/arm64/include/asm/edac.h +@@ -0,0 +1,38 @@ ++/* ++ * Copyright 2013 Calxeda, Inc. ++ * Based on PPC version Copyright 2007 MontaVista Software, Inc. ++ * ++ * This program is free software; you can redistribute it and/or modify it ++ * under the terms and conditions of the GNU General Public License, ++ * version 2, as published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for ++ * more details. ++ */ ++#ifndef ASM_EDAC_H ++#define ASM_EDAC_H ++/* ++ * ECC atomic, DMA, SMP and interrupt safe scrub function. ++ * Implements the per arch atomic_scrub() that EDAC use for software ++ * ECC scrubbing. It reads memory and then writes back the original ++ * value, allowing the hardware to detect and correct memory errors. ++ */ ++static inline void atomic_scrub(void *va, u32 size) ++{ ++ unsigned int *virt_addr = va; ++ unsigned int i; ++ ++ for (i = 0; i < size / sizeof(*virt_addr); i++, virt_addr++) { ++ long result; ++ unsigned long tmp; ++ ++ asm volatile("/* atomic_scrub */\n" ++ "1: ldxr %w0, %2\n" ++ " stxr %w1, %w0, %2\n" ++ " cbnz %w1, 1b" ++ : "=&r" (result), "=&r" (tmp), "+Q" (*virt_addr) : : ); ++ } ++} ++#endif +diff --git a/arch/arm64/include/asm/efi.h b/arch/arm64/include/asm/efi.h +index ef57220..7129125 100644 +--- a/arch/arm64/include/asm/efi.h ++++ b/arch/arm64/include/asm/efi.h +@@ -6,33 +6,29 @@ + + #ifdef CONFIG_EFI + extern void efi_init(void); ++extern void efi_idmap_init(void); + #else + #define efi_init() ++#define efi_idmap_init() + #endif + + #define efi_call_virt(f, ...) \ + ({ \ +- efi_##f##_t *__f; \ ++ efi_##f##_t *__f = efi.systab->runtime->f; \ + efi_status_t __s; \ + \ + kernel_neon_begin(); \ +- efi_virtmap_load(); \ +- __f = efi.systab->runtime->f; \ + __s = __f(__VA_ARGS__); \ +- efi_virtmap_unload(); \ + kernel_neon_end(); \ + __s; \ + }) + + #define __efi_call_virt(f, ...) \ + ({ \ +- efi_##f##_t *__f; \ ++ efi_##f##_t *__f = efi.systab->runtime->f; \ + \ + kernel_neon_begin(); \ +- efi_virtmap_load(); \ +- __f = efi.systab->runtime->f; \ + __f(__VA_ARGS__); \ +- efi_virtmap_unload(); \ + kernel_neon_end(); \ + }) + +@@ -50,20 +46,4 @@ extern void efi_init(void); + + #define EFI_ALLOC_ALIGN SZ_64K + +-/* +- * On ARM systems, virtually remapped UEFI runtime services are set up in two +- * distinct stages: +- * - The stub retrieves the final version of the memory map from UEFI, populates +- * the virt_addr fields and calls the SetVirtualAddressMap() [SVAM] runtime +- * service to communicate the new mapping to the firmware (Note that the new +- * mapping is not live at this time) +- * - During an early initcall(), the EFI system table is permanently remapped +- * and the virtual remapping of the UEFI Runtime Services regions is loaded +- * into a private set of page tables. If this all succeeds, the Runtime +- * Services are enabled and the EFI_RUNTIME_SERVICES bit set. +- */ +- +-void efi_virtmap_load(void); +-void efi_virtmap_unload(void); +- + #endif /* _ASM_EFI_H */ diff --git a/arch/arm64/include/asm/elf.h b/arch/arm64/include/asm/elf.h index 1f65be3..c0f89a0 100644 --- a/arch/arm64/include/asm/elf.h @@ -3015,24 +1830,65 @@ index defa0ff9..f196e40 100644 extern void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot); #include +diff --git a/arch/arm64/include/asm/irq.h b/arch/arm64/include/asm/irq.h +index 94c5367..bbb251b 100644 +--- a/arch/arm64/include/asm/irq.h ++++ b/arch/arm64/include/asm/irq.h +@@ -1,6 +1,8 @@ + #ifndef __ASM_IRQ_H + #define __ASM_IRQ_H + ++#include ++ + #include + + struct pt_regs; +@@ -8,4 +10,15 @@ struct pt_regs; + extern void migrate_irqs(void); + extern void set_handle_irq(void (*handle_irq)(struct pt_regs *)); + ++static inline void acpi_irq_init(void) ++{ ++ /* ++ * Hardcode ACPI IRQ chip initialization to GICv2 for now. ++ * Proper irqchip infrastructure will be implemented along with ++ * incoming GICv2m|GICv3|ITS bits. ++ */ ++ acpi_gic_init(); ++} ++#define acpi_irq_init acpi_irq_init ++ + #endif +diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h +index 3d31176..5fd40c4 100644 +--- a/arch/arm64/include/asm/mmu.h ++++ b/arch/arm64/include/asm/mmu.h +@@ -31,6 +31,8 @@ extern void paging_init(void); + extern void setup_mm_for_reboot(void); + extern void __iomem *early_io_map(phys_addr_t phys, unsigned long virt); + extern void init_mem_pgprot(void); ++/* create an identity mapping for memory (or io if map_io is true) */ ++extern void create_id_mapping(phys_addr_t addr, phys_addr_t size, int map_io); + extern void create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys, + unsigned long virt, phys_addr_t size, + pgprot_t prot); diff --git a/arch/arm64/include/asm/pci.h b/arch/arm64/include/asm/pci.h -index 872ba93..c47baa4 100644 +index 872ba93..bbcf88d 100644 --- a/arch/arm64/include/asm/pci.h +++ b/arch/arm64/include/asm/pci.h -@@ -24,6 +24,12 @@ - */ - #define PCI_DMA_BUS_IS_PHYS (0) +@@ -27,11 +27,77 @@ + extern int isa_dma_bridge_buggy; + #ifdef CONFIG_PCI +static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel) +{ + /* no legacy IRQ on arm64 */ + return -ENODEV; +} + - extern int isa_dma_bridge_buggy; - - #ifdef CONFIG_PCI -@@ -33,5 +39,65 @@ static inline int pci_proc_domain(struct pci_bus *bus) + static inline int pci_proc_domain(struct pci_bus *bus) + { + return 1; } #endif /* CONFIG_PCI */ @@ -3141,10 +1997,10 @@ index 780f82c..3411561 100644 */ asmlinkage void secondary_start_kernel(void); diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile -index bef04af..f484339 100644 +index 5ee07ee..b3ac38a 100644 --- a/arch/arm64/kernel/Makefile +++ b/arch/arm64/kernel/Makefile -@@ -23,7 +23,8 @@ arm64-obj-$(CONFIG_COMPAT) += sys32.o kuser32.o signal32.o \ +@@ -24,7 +24,8 @@ arm64-obj-$(CONFIG_COMPAT) += sys32.o kuser32.o signal32.o \ ../../arm/kernel/opcodes.o arm64-obj-$(CONFIG_FUNCTION_TRACER) += ftrace.o entry-ftrace.o arm64-obj-$(CONFIG_MODULES) += arm64ksyms.o module.o @@ -3154,7 +2010,7 @@ index bef04af..f484339 100644 arm64-obj-$(CONFIG_PERF_EVENTS) += perf_regs.o arm64-obj-$(CONFIG_HW_PERF_EVENTS) += perf_event.o arm64-obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o -@@ -34,6 +35,7 @@ arm64-obj-$(CONFIG_KGDB) += kgdb.o +@@ -35,6 +36,7 @@ arm64-obj-$(CONFIG_KGDB) += kgdb.o arm64-obj-$(CONFIG_EFI) += efi.o efi-stub.o efi-entry.o arm64-obj-$(CONFIG_PCI) += pci.o arm64-obj-$(CONFIG_ARMV8_DEPRECATED) += armv8_deprecated.o @@ -3164,10 +2020,10 @@ index bef04af..f484339 100644 obj-m += $(arm64-obj-m) diff --git a/arch/arm64/kernel/acpi.c b/arch/arm64/kernel/acpi.c new file mode 100644 -index 0000000..56127e9 +index 0000000..0bb0f1f --- /dev/null +++ b/arch/arm64/kernel/acpi.c -@@ -0,0 +1,397 @@ +@@ -0,0 +1,432 @@ +/* + * ARM64 Specific Low-Level ACPI Boot Support + * @@ -3192,6 +2048,7 @@ index 0000000..56127e9 +#include +#include +#include ++#include +#include +#include + @@ -3199,14 +2056,51 @@ index 0000000..56127e9 +#include +#include + -+int acpi_noirq; /* skip ACPI IRQ initialization */ -+int acpi_disabled; ++int acpi_noirq = 1; /* skip ACPI IRQ initialization */ ++int acpi_disabled = 1; +EXPORT_SYMBOL(acpi_disabled); + -+int acpi_pci_disabled; /* skip ACPI PCI scan and IRQ initialization */ ++int acpi_pci_disabled = 1; /* skip ACPI PCI scan and IRQ initialization */ +EXPORT_SYMBOL(acpi_pci_disabled); + -+static int enabled_cpus; /* Processors (GICC) with enabled flag in MADT */ ++/* Processors with enabled flag and sane MPIDR */ ++static int enabled_cpus; ++ ++/* Boot CPU is valid or not in MADT */ ++static bool bootcpu_valid __initdata; ++ ++static bool param_acpi_off __initdata; ++static bool param_acpi_force __initdata; ++ ++static int __init parse_acpi(char *arg) ++{ ++ if (!arg) ++ return -EINVAL; ++ ++ /* "acpi=off" disables both ACPI table parsing and interpreter */ ++ if (strcmp(arg, "off") == 0) ++ param_acpi_off = true; ++ else if (strcmp(arg, "force") == 0) /* force ACPI to be enabled */ ++ param_acpi_force = true; ++ else ++ return -EINVAL; /* Core will print when we return error */ ++ ++ return 0; ++} ++early_param("acpi", parse_acpi); ++ ++static int __init dt_scan_depth1_nodes(unsigned long node, ++ const char *uname, int depth, ++ void *data) ++{ ++ /* ++ * Return 1 as soon as we encounter a node at depth 1 that is ++ * not the /chosen node. ++ */ ++ if (depth == 1 && (strcmp(uname, "chosen") != 0)) ++ return 1; ++ return 0; ++} + +static char *boot_method; +static u64 parked_address[NR_CPUS]; @@ -3223,7 +2117,7 @@ index 0000000..56127e9 + */ +char *__init __acpi_map_table(unsigned long phys, unsigned long size) +{ -+ if (!phys || !size) ++ if (!size) + return NULL; + + return early_memremap(phys, size); @@ -3247,7 +2141,7 @@ index 0000000..56127e9 + */ +static int __init acpi_map_gic_cpu_interface(u64 mpidr, u64 parked_addr, u8 enabled) +{ -+ int cpu; ++ int i; + + if (mpidr == INVALID_HWID) { + pr_info("Skip MADT cpu entry with invalid MPIDR\n"); @@ -3264,62 +2158,49 @@ index 0000000..56127e9 + return -EINVAL; + } + -+ /* No need to check duplicate MPIDRs for the first CPU */ -+ if (enabled_cpus) { -+ /* -+ * Duplicate MPIDRs are a recipe for disaster. Scan -+ * all initialized entries and check for -+ * duplicates. If any is found just ignore the CPU. -+ */ -+ for_each_possible_cpu(cpu) { -+ if (cpu_logical_map(cpu) == mpidr) { -+ pr_err("Firmware bug, duplicate CPU MPIDR: 0x%llx in MADT\n", -+ mpidr); -+ return -EINVAL; -+ } -+ } -+ -+ /* allocate a logical cpu id for the new comer */ -+ cpu = cpumask_next_zero(-1, cpu_possible_mask); -+ } else { -+ /* -+ * First GICC entry must be BSP as ACPI spec said -+ * in section 5.2.12.15 -+ */ -+ if (cpu_logical_map(0) != mpidr) { -+ pr_err("First GICC entry with MPIDR 0x%llx is not BSP\n", ++ /* Check if GICC structure of boot CPU is available in the MADT */ ++ if (cpu_logical_map(0) == mpidr) { ++ if (bootcpu_valid) { ++ pr_err("Firmware bug, duplicate CPU MPIDR: 0x%llx in MADT\n", + mpidr); + return -EINVAL; + } + -+ /* -+ * boot_cpu_init() already hold bit 0 in cpu_possible_mask -+ * for BSP, no need to allocate again. -+ */ -+ cpu = 0; ++ bootcpu_valid = true; ++ } ++ ++ /* ++ * Duplicate MPIDRs are a recipe for disaster. Scan ++ * all initialized entries and check for ++ * duplicates. If any is found just ignore the CPU. ++ */ ++ for (i = 1; i < enabled_cpus; i++) { ++ if (cpu_logical_map(i) == mpidr) { ++ pr_err("Firmware bug, duplicate CPU MPIDR: 0x%llx in MADT\n", ++ mpidr); ++ return -EINVAL; ++ } + } + + if (!boot_method) + return -EOPNOTSUPP; + -+ parked_address[cpu] = parked_addr; -+ cpu_ops[cpu] = cpu_get_ops(boot_method); ++ parked_address[enabled_cpus] = parked_addr; ++ cpu_ops[enabled_cpus] = cpu_get_ops(boot_method); + /* CPU 0 was already initialized */ -+ if (cpu) { -+ if (!cpu_ops[cpu]) ++ if (enabled_cpus) { ++ if (!cpu_ops[enabled_cpus]) + return -EINVAL; + -+ if (cpu_ops[cpu]->cpu_init(NULL, cpu)) ++ if (cpu_ops[enabled_cpus]->cpu_init(NULL, enabled_cpus)) + return -EOPNOTSUPP; + + /* map the logical cpu id to cpu MPIDR */ -+ cpu_logical_map(cpu) = mpidr; -+ -+ set_cpu_possible(cpu, true); ++ cpu_logical_map(enabled_cpus) = mpidr; + } + + enabled_cpus++; -+ return cpu; ++ return enabled_cpus; +} + +static int __init @@ -3344,7 +2225,7 @@ index 0000000..56127e9 +/* Parse GIC cpu interface entries in MADT for SMP init */ +void __init acpi_init_cpus(void) +{ -+ int count; ++ int count, i; + + /* + * do a partial walk of MADT to determine how many CPUs @@ -3362,6 +2243,14 @@ index 0000000..56127e9 + return; + } + ++ if (!bootcpu_valid) { ++ pr_err("MADT missing boot CPU MPIDR, not enabling secondaries\n"); ++ return; ++ } ++ ++ for (i = 0; i < enabled_cpus; i++) ++ set_cpu_possible(i, true); ++ + /* Make boot-up look pretty */ + pr_info("%d CPUs enabled, %d CPUs total\n", enabled_cpus, total_cpus); +} @@ -3422,8 +2311,13 @@ index 0000000..56127e9 + + args.np = acpi_irq_domain->of_node; + args.args_count = 3; -+ args.args[0] = 0; -+ args.args[1] = gsi - 32; ++ if (gsi < 32) { ++ args.args[0] = 1; ++ args.args[1] = gsi - 16; ++ } else { ++ args.args[0] = 0; ++ args.args[1] = gsi - 32; ++ } + args.args[2] = irq_type; + + irq = __irq_domain_alloc_irqs(acpi_irq_domain, -1, 1, @@ -3458,6 +2352,11 @@ index 0000000..56127e9 + */ + if (table->revision > 5 || + (table->revision == 5 && fadt->minor_revision >= 1)) { ++ if (!acpi_gbl_reduced_hardware) { ++ pr_err("Not hardware reduced ACPI mode, will not be supported\n"); ++ goto disable_acpi; ++ } ++ + /* + * ACPI 5.1 only has two explicit methods to boot up SMP, + * PSCI and Parking protocol, but the Parking protocol is @@ -3477,8 +2376,9 @@ index 0000000..56127e9 + + pr_warn("Unsupported FADT revision %d.%d, should be 5.1+, will disable ACPI\n", + table->revision, fadt->minor_revision); -+ disable_acpi(); + ++disable_acpi: ++ disable_acpi(); + return -EINVAL; +} + @@ -3493,10 +2393,18 @@ index 0000000..56127e9 + */ +void __init acpi_boot_table_init(void) +{ -+ /* If acpi_disabled, bail out */ -+ if (acpi_disabled) ++ /* ++ * Enable ACPI instead of device tree unless ++ * - ACPI has been disabled explicitly (acpi=off), or ++ * - the device tree is not empty (it has more than just a /chosen node) ++ * and ACPI has not been force enabled (acpi=force) ++ */ ++ if (param_acpi_off || ++ (!param_acpi_force && of_scan_flat_dt(dt_scan_depth1_nodes, NULL))) + return; + ++ enable_acpi(); ++ + /* Initialize the ACPI boot-time table parser. */ + if (acpi_table_init()) { + disable_acpi(); @@ -3548,23 +2456,6 @@ index 0000000..56127e9 + + return 0; +} -+ -+static int __init parse_acpi(char *arg) -+{ -+ if (!arg) -+ return -EINVAL; -+ -+ /* "acpi=off" disables both ACPI table parsing and interpreter */ -+ if (strcmp(arg, "off") == 0) -+ disable_acpi(); -+ else if (strcmp(arg, "force") == 0) /* force ACPI to be enabled */ -+ enable_acpi(); -+ else -+ return -EINVAL; /* Core will print when we return error */ -+ -+ return 0; -+} -+early_param("acpi", parse_acpi); diff --git a/arch/arm64/kernel/cpu_ops.c b/arch/arm64/kernel/cpu_ops.c index cce9524..c50ca8f 100644 --- a/arch/arm64/kernel/cpu_ops.c @@ -3595,54 +2486,343 @@ index cce9524..c50ca8f 100644 const struct cpu_operations **ops = supported_cpu_ops; diff --git a/arch/arm64/kernel/efi.c b/arch/arm64/kernel/efi.c -index 2b8d70164428..f5808cee51a2 100644 +index 2b8d701..0ea8829 100644 --- a/arch/arm64/kernel/efi.c +++ b/arch/arm64/kernel/efi.c -@@ -363,3 +363,42 @@ bool efi_poweroff_required(void) - { - return efi_enabled(EFI_RUNTIME_SERVICES); +@@ -11,45 +11,26 @@ + * + */ + +-#include + #include + #include + #include + #include +-#include + #include + #include + #include +-#include +-#include +-#include + #include + #include +-#include + + #include + #include + #include + #include +-#include +-#include + + struct efi_memory_map memmap; + +-static u64 efi_system_table; +- +-static pgd_t efi_pgd[PTRS_PER_PGD] __page_aligned_bss; ++static efi_runtime_services_t *runtime; + +-static struct mm_struct efi_mm = { +- .mm_rb = RB_ROOT, +- .pgd = efi_pgd, +- .mm_users = ATOMIC_INIT(2), +- .mm_count = ATOMIC_INIT(1), +- .mmap_sem = __RWSEM_INITIALIZER(efi_mm.mmap_sem), +- .page_table_lock = __SPIN_LOCK_UNLOCKED(efi_mm.page_table_lock), +- .mmlist = LIST_HEAD_INIT(efi_mm.mmlist), +- INIT_MM_CONTEXT(efi_mm) +-}; ++static u64 efi_system_table; + + static int uefi_debug __initdata; + static int __init uefi_debug_setup(char *str) +@@ -67,33 +48,30 @@ static int __init is_normal_ram(efi_memory_desc_t *md) + return 0; } + +-/* +- * Translate a EFI virtual address into a physical address: this is necessary, +- * as some data members of the EFI system table are virtually remapped after +- * SetVirtualAddressMap() has been called. +- */ +-static phys_addr_t efi_to_phys(unsigned long addr) ++static void __init efi_setup_idmap(void) + { ++ struct memblock_region *r; + efi_memory_desc_t *md; ++ u64 paddr, npages, size; + ++ for_each_memblock(memory, r) ++ create_id_mapping(r->base, r->size, 0); + -+ -+/* -+ * If nothing else is handling pm_power_off, use EFI -+ * -+ * When Guenter Roeck's power-off handler call chain patches land, -+ * we just need to return true unconditionally. -+ */ -+bool efi_poweroff_required(void) -+{ -+ return pm_power_off == NULL; ++ /* map runtime io spaces */ + for_each_efi_memory_desc(&memmap, md) { +- if (!(md->attribute & EFI_MEMORY_RUNTIME)) ++ if (!(md->attribute & EFI_MEMORY_RUNTIME) || is_normal_ram(md)) + continue; +- if (md->virt_addr == 0) +- /* no virtual mapping has been installed by the stub */ +- break; +- if (md->virt_addr <= addr && +- (addr - md->virt_addr) < (md->num_pages << EFI_PAGE_SHIFT)) +- return md->phys_addr + addr - md->virt_addr; ++ paddr = md->phys_addr; ++ npages = md->num_pages; ++ memrange_efi_to_native(&paddr, &npages); ++ size = npages << PAGE_SHIFT; ++ create_id_mapping(paddr, size, 1); + } +- return addr; + } + + static int __init uefi_init(void) + { + efi_char16_t *c16; +- void *config_tables; +- u64 table_size; + char vendor[100] = "unknown"; + int i, retval; + +@@ -121,7 +99,7 @@ static int __init uefi_init(void) + efi.systab->hdr.revision & 0xffff); + + /* Show what we know for posterity */ +- c16 = early_memremap(efi_to_phys(efi.systab->fw_vendor), ++ c16 = early_memremap(efi.systab->fw_vendor, + sizeof(vendor)); + if (c16) { + for (i = 0; i < (int) sizeof(vendor) - 1 && *c16; ++i) +@@ -134,14 +112,8 @@ static int __init uefi_init(void) + efi.systab->hdr.revision >> 16, + efi.systab->hdr.revision & 0xffff, vendor); + +- table_size = sizeof(efi_config_table_64_t) * efi.systab->nr_tables; +- config_tables = early_memremap(efi_to_phys(efi.systab->tables), +- table_size); ++ retval = efi_config_init(NULL); + +- retval = efi_config_parse_tables(config_tables, efi.systab->nr_tables, +- sizeof(efi_config_table_64_t), NULL); +- +- early_memunmap(config_tables, table_size); + out: + early_memunmap(efi.systab, sizeof(efi_system_table_t)); + return retval; +@@ -226,55 +198,63 @@ void __init efi_init(void) + return; + + reserve_regions(); +- early_memunmap(memmap.map, params.mmap_size); + } + +-static bool __init efi_virtmap_init(void) ++void __init efi_idmap_init(void) + { +- efi_memory_desc_t *md; ++ if (!efi_enabled(EFI_BOOT)) ++ return; + +- for_each_efi_memory_desc(&memmap, md) { +- u64 paddr, npages, size; +- pgprot_t prot; ++ /* boot time idmap_pg_dir is incomplete, so fill in missing parts */ ++ efi_setup_idmap(); ++ early_memunmap(memmap.map, memmap.map_end - memmap.map); +} -+ -+static int arm64_efi_restart(struct notifier_block *this, -+ unsigned long mode, void *cmd) + +- if (!(md->attribute & EFI_MEMORY_RUNTIME)) +- continue; +- if (md->virt_addr == 0) +- return false; ++static int __init remap_region(efi_memory_desc_t *md, void **new) +{ -+ efi_reboot(reboot_mode, cmd); -+ return NOTIFY_DONE; -+} ++ u64 paddr, vaddr, npages, size; + +- paddr = md->phys_addr; +- npages = md->num_pages; +- memrange_efi_to_native(&paddr, &npages); +- size = npages << PAGE_SHIFT; ++ paddr = md->phys_addr; ++ npages = md->num_pages; ++ memrange_efi_to_native(&paddr, &npages); ++ size = npages << PAGE_SHIFT; + +- pr_info(" EFI remap 0x%016llx => %p\n", +- md->phys_addr, (void *)md->virt_addr); ++ if (is_normal_ram(md)) ++ vaddr = (__force u64)ioremap_cache(paddr, size); ++ else ++ vaddr = (__force u64)ioremap(paddr, size); + +- /* +- * Only regions of type EFI_RUNTIME_SERVICES_CODE need to be +- * executable, everything else can be mapped with the XN bits +- * set. +- */ +- if (!is_normal_ram(md)) +- prot = __pgprot(PROT_DEVICE_nGnRE); +- else if (md->type == EFI_RUNTIME_SERVICES_CODE) +- prot = PAGE_KERNEL_EXEC; +- else +- prot = PAGE_KERNEL; +- +- create_pgd_mapping(&efi_mm, paddr, md->virt_addr, size, prot); ++ if (!vaddr) { ++ pr_err("Unable to remap 0x%llx pages @ %p\n", ++ npages, (void *)paddr); ++ return 0; + } +- return true; + -+static struct notifier_block arm64_efi_restart_nb = { -+ .notifier_call = arm64_efi_restart, -+ .priority = INT_MAX, -+}; ++ /* adjust for any rounding when EFI and system pagesize differs */ ++ md->virt_addr = vaddr + (md->phys_addr - paddr); + -+static int __init arm64_register_efi_restart(void) -+{ -+ int ret = 0; ++ if (uefi_debug) ++ pr_info(" EFI remap 0x%012llx => %p\n", ++ md->phys_addr, (void *)md->virt_addr); + -+ if (efi_enabled(EFI_RUNTIME_SERVICES)) { -+ ret = register_restart_handler(&arm64_efi_restart_nb); -+ if (ret) -+ pr_err("%s: cannot register restart handler, %d\n", -+ __func__, ret); ++ memcpy(*new, md, memmap.desc_size); ++ *new += memmap.desc_size; ++ ++ return 1; + } + + /* +- * Enable the UEFI Runtime Services if all prerequisites are in place, i.e., +- * non-early mapping of the UEFI system table and virtual mappings for all +- * EFI_MEMORY_RUNTIME regions. ++ * Switch UEFI from an identity map to a kernel virtual map + */ +-static int __init arm64_enable_runtime_services(void) ++static int __init arm64_enter_virtual_mode(void) + { ++ efi_memory_desc_t *md; ++ phys_addr_t virtmap_phys; ++ void *virtmap, *virt_md; ++ efi_status_t status; + u64 mapsize; ++ int count = 0; ++ unsigned long flags; + + if (!efi_enabled(EFI_BOOT)) { + pr_info("EFI services will not be available.\n"); +@@ -298,28 +278,79 @@ static int __init arm64_enable_runtime_services(void) + memmap.map_end = memmap.map + mapsize; + efi.memmap = &memmap; + +- efi.systab = (__force void *)ioremap_cache(efi_system_table, +- sizeof(efi_system_table_t)); +- if (!efi.systab) { +- pr_err("Failed to remap EFI System Table\n"); ++ /* Map the runtime regions */ ++ virtmap = kmalloc(mapsize, GFP_KERNEL); ++ if (!virtmap) { ++ pr_err("Failed to allocate EFI virtual memmap\n"); + return -1; + } ++ virtmap_phys = virt_to_phys(virtmap); ++ virt_md = virtmap; ++ ++ for_each_efi_memory_desc(&memmap, md) { ++ if (!(md->attribute & EFI_MEMORY_RUNTIME)) ++ continue; ++ if (!remap_region(md, &virt_md)) ++ goto err_unmap; ++ ++count; + } -+ return ret; -+} -+late_initcall(arm64_register_efi_restart); + ++ efi.systab = (__force void *)efi_lookup_mapped_addr(efi_system_table); ++ if (!efi.systab) { ++ /* ++ * If we have no virtual mapping for the System Table at this ++ * point, the memory map doesn't cover the physical offset where ++ * it resides. This means the System Table will be inaccessible ++ * to Runtime Services themselves once the virtual mapping is ++ * installed. ++ */ ++ pr_err("Failed to remap EFI System Table -- buggy firmware?\n"); ++ goto err_unmap; ++ } + set_bit(EFI_SYSTEM_TABLES, &efi.flags); + +- if (!efi_virtmap_init()) { +- pr_err("No UEFI virtual mapping was installed -- runtime services will not be available\n"); ++ local_irq_save(flags); ++ cpu_switch_mm(idmap_pg_dir, &init_mm); ++ ++ /* Call SetVirtualAddressMap with the physical address of the map */ ++ runtime = efi.systab->runtime; ++ efi.set_virtual_address_map = runtime->set_virtual_address_map; ++ ++ status = efi.set_virtual_address_map(count * memmap.desc_size, ++ memmap.desc_size, ++ memmap.desc_version, ++ (efi_memory_desc_t *)virtmap_phys); ++ cpu_set_reserved_ttbr0(); ++ flush_tlb_all(); ++ local_irq_restore(flags); ++ ++ kfree(virtmap); ++ ++ if (status != EFI_SUCCESS) { ++ pr_err("Failed to set EFI virtual address map! [%lx]\n", ++ status); + return -1; + } + + /* Set up runtime services function pointers */ ++ runtime = efi.systab->runtime; + efi_native_runtime_setup(); + set_bit(EFI_RUNTIME_SERVICES, &efi.flags); + + efi.runtime_version = efi.systab->hdr.revision; + + return 0; ++ ++err_unmap: ++ /* unmap all mappings that succeeded: there are 'count' of those */ ++ for (virt_md = virtmap; count--; virt_md += memmap.desc_size) { ++ md = virt_md; ++ iounmap((__force void __iomem *)md->virt_addr); ++ } ++ kfree(virtmap); ++ return -1; + } +-early_initcall(arm64_enable_runtime_services); ++early_initcall(arm64_enter_virtual_mode); + + static int __init arm64_dmi_init(void) + { +@@ -335,26 +366,6 @@ static int __init arm64_dmi_init(void) + } + core_initcall(arm64_dmi_init); + +-static void efi_set_pgd(struct mm_struct *mm) +-{ +- cpu_switch_mm(mm->pgd, mm); +- flush_tlb_all(); +- if (icache_is_aivivt()) +- __flush_icache_all(); +-} +- +-void efi_virtmap_load(void) +-{ +- preempt_disable(); +- efi_set_pgd(&efi_mm); +-} +- +-void efi_virtmap_unload(void) +-{ +- efi_set_pgd(current->active_mm); +- preempt_enable(); +-} +- + /* + * UpdateCapsule() depends on the system being shutdown via + * ResetSystem(). diff --git a/arch/arm64/kernel/pci.c b/arch/arm64/kernel/pci.c -index 6f93c24..c870fa4 100644 +index 6f93c24..8456e72 100644 --- a/arch/arm64/kernel/pci.c +++ b/arch/arm64/kernel/pci.c @@ -10,14 +10,17 @@ @@ -3660,14 +2840,23 @@ index 6f93c24..c870fa4 100644 #include - +#include -+#include ++#include #include /* -@@ -37,12 +40,429 @@ resource_size_t pcibios_align_resource(void *data, const struct resource *res, - return res->start; - } - +@@ -42,7 +45,424 @@ resource_size_t pcibios_align_resource(void *data, const struct resource *res, + */ + int pcibios_add_device(struct pci_dev *dev) + { +- dev->irq = of_irq_parse_and_map_pci(dev, 0, 0); ++ if (acpi_disabled) ++ dev->irq = of_irq_parse_and_map_pci(dev, 0, 0); ++ ++ return 0; ++} ++ ++#ifdef CONFIG_ACPI ++ +int pcibios_root_bridge_prepare(struct pci_host_bridge *bridge) +{ + struct pci_sysdata *sd; @@ -3678,18 +2867,6 @@ index 6f93c24..c870fa4 100644 + } + return 0; +} -+ - /* - * Try to assign the IRQ number from DT when adding a new device - */ - int pcibios_add_device(struct pci_dev *dev) - { -- dev->irq = of_irq_parse_and_map_pci(dev, 0, 0); -+ if (acpi_disabled) -+ dev->irq = of_irq_parse_and_map_pci(dev, 0, 0); -+ -+ return 0; -+} + +void pcibios_add_bus(struct pci_bus *bus) +{ @@ -3702,13 +2879,13 @@ index 6f93c24..c870fa4 100644 + if (!acpi_disabled) + acpi_pci_remove_bus(bus); +} - ++ +int pcibios_enable_irq(struct pci_dev *dev) +{ + if (!acpi_disabled && !pci_dev_msi_enabled(dev)) + acpi_pci_irq_enable(dev); - return 0; - } ++ return 0; ++} + +int pcibios_disable_irq(struct pci_dev *dev) +{ @@ -3733,7 +2910,7 @@ index 6f93c24..c870fa4 100644 +static int __init pcibios_assign_resources(void) +{ + struct pci_bus *root_bus; -+ + + if (acpi_disabled) + return 0; + @@ -3741,17 +2918,14 @@ index 6f93c24..c870fa4 100644 + pcibios_resource_survey_bus(root_bus); + pci_assign_unassigned_root_bus_resources(root_bus); + } -+ return 0; -+} -+ + return 0; + } +/* + * fs_initcall comes after subsys_initcall, so we know acpi scan + * has run. + */ +fs_initcall(pcibios_assign_resources); + -+#ifdef CONFIG_ACPI -+ +static int pci_read(struct pci_bus *bus, unsigned int devfn, int where, + int size, u32 *value) +{ @@ -3797,6 +2971,7 @@ index 6f93c24..c870fa4 100644 + if (ACPI_SUCCESS(status) && + (addr->resource_type == ACPI_MEMORY_RANGE || + addr->resource_type == ACPI_IO_RANGE) && ++ addr->producer_consumer == ACPI_PRODUCER && + addr->address.address_length > 0) { + return AE_OK; + } @@ -4017,7 +3192,7 @@ index 6f93c24..c870fa4 100644 +struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root) +{ + struct acpi_device *device = root->device; -+ struct pci_mmcfg_region *mcfg; ++ struct pci_ecam_region *mcfg; + struct pci_root_info *info; + int domain = root->segment; + int busnum = root->secondary.start; @@ -4027,7 +3202,7 @@ index 6f93c24..c870fa4 100644 + int node; + + /* we need mmconfig */ -+ mcfg = pci_mmconfig_lookup(domain, busnum); ++ mcfg = pci_ecam_lookup(domain, busnum); + if (!mcfg) { + pr_err("pci_bus %04x:%02x has no MCFG table\n", + domain, busnum); @@ -4216,7 +3391,7 @@ index 25a5308..1e4fd17 100644 { return this_cpu_ptr(&cpu_hw_events); diff --git a/arch/arm64/kernel/psci.c b/arch/arm64/kernel/psci.c -index 3425f31..bab2bea 100644 +index 9b8a70a..d3c52ce 100644 --- a/arch/arm64/kernel/psci.c +++ b/arch/arm64/kernel/psci.c @@ -15,6 +15,7 @@ @@ -4235,7 +3410,7 @@ index 3425f31..bab2bea 100644 #include #include #include -@@ -304,6 +306,33 @@ static void psci_sys_poweroff(void) +@@ -273,6 +275,33 @@ static void psci_sys_poweroff(void) invoke_psci_fn(PSCI_0_2_FN_SYSTEM_OFF, 0, 0, 0); } @@ -4269,7 +3444,7 @@ index 3425f31..bab2bea 100644 /* * PSCI Function IDs for v0.2+ are well defined so use * standard values. -@@ -337,29 +366,7 @@ static int __init psci_0_2_init(struct device_node *np) +@@ -306,29 +335,7 @@ static int __init psci_0_2_init(struct device_node *np) } } @@ -4300,7 +3475,7 @@ index 3425f31..bab2bea 100644 out_put_node: of_node_put(np); -@@ -412,7 +419,7 @@ static const struct of_device_id psci_of_match[] __initconst = { +@@ -381,7 +388,7 @@ static const struct of_device_id psci_of_match[] __initconst = { {}, }; @@ -4309,7 +3484,7 @@ index 3425f31..bab2bea 100644 { struct device_node *np; const struct of_device_id *matched_np; -@@ -427,6 +434,29 @@ int __init psci_init(void) +@@ -396,6 +403,29 @@ int __init psci_init(void) return init_fn(np); } @@ -4340,7 +3515,7 @@ index 3425f31..bab2bea 100644 static int __init cpu_psci_cpu_init(struct device_node *dn, unsigned int cpu) diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c -index e8420f6..0029b7a 100644 +index e8420f6..5c3e289 100644 --- a/arch/arm64/kernel/setup.c +++ b/arch/arm64/kernel/setup.c @@ -17,6 +17,7 @@ @@ -4351,69 +3526,15 @@ index e8420f6..0029b7a 100644 #include #include #include -@@ -62,6 +63,7 @@ - #include - #include - #include +@@ -46,6 +47,7 @@ + #include + #include + +#include - - unsigned int processor_id; - EXPORT_SYMBOL(processor_id); -@@ -351,6 +353,29 @@ static void __init request_standard_resources(void) - } - } - -+static int __init dt_scan_chosen(unsigned long node, const char *uname, -+ int depth, void *data) -+{ -+ const char *p; -+ -+ if (depth != 1 || !data || (strcmp(uname, "chosen") != 0)) -+ return 0; -+ -+ p = of_get_flat_dt_prop(node, "linux,uefi-stub-generated-dtb", NULL); -+ *(bool *)data = p ? true : false; -+ -+ return 1; -+} -+ -+static bool __init is_uefi_stub_generated_dtb(void) -+{ -+ bool flag = false; -+ -+ of_scan_flat_dt(dt_scan_chosen, &flag); -+ -+ return flag; -+} -+ - u64 __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = INVALID_HWID }; - - void __init setup_arch(char **cmdline_p) -@@ -369,9 +394,23 @@ void __init setup_arch(char **cmdline_p) - early_fixmap_init(); - early_ioremap_init(); - -+ /* -+ * Disable ACPI before early parameters parsed and -+ * it will be enabled in parse_early_param() if -+ * "acpi=force" is passed -+ */ -+ disable_acpi(); -+ - parse_early_param(); - - /* -+ * If no dtb provided by firmware, enable ACPI and give system a -+ * chance to boot with ACPI configuration data -+ */ -+ if (is_uefi_stub_generated_dtb() && acpi_disabled) -+ enable_acpi(); -+ -+ /* - * Unmask asynchronous aborts after bringing up possible earlycon. - * (Report possible System Errors once we can report this occurred) - */ -@@ -380,18 +419,27 @@ void __init setup_arch(char **cmdline_p) + #include + #include + #include +@@ -380,18 +382,28 @@ void __init setup_arch(char **cmdline_p) efi_init(); arm64_memblock_init(); @@ -4423,6 +3544,7 @@ index e8420f6..0029b7a 100644 paging_init(); request_standard_resources(); ++ efi_idmap_init(); early_ioremap_reset(); - unflatten_device_tree(); @@ -4446,7 +3568,7 @@ index e8420f6..0029b7a 100644 smp_build_mpidr_hash(); #endif -@@ -547,3 +595,25 @@ const struct seq_operations cpuinfo_op = { +@@ -547,3 +559,25 @@ const struct seq_operations cpuinfo_op = { .stop = c_stop, .show = c_show }; @@ -4627,7 +3749,7 @@ index 1a7125c..42f9195 100644 if (!arch_timer_rate) panic("Unable to initialise architected timer.\n"); diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c -index 0a24b9b..af90cdb 100644 +index 58e0c2b..360edc6 100644 --- a/arch/arm64/mm/dma-mapping.c +++ b/arch/arm64/mm/dma-mapping.c @@ -23,8 +23,14 @@ @@ -4645,68 +3767,208 @@ index 0a24b9b..af90cdb 100644 #include -diff --git a/arch/x86/include/asm/pci.h b/arch/x86/include/asm/pci.h -index 4e370a5..f2b132b 100644 ---- a/arch/x86/include/asm/pci.h -+++ b/arch/x86/include/asm/pci.h -@@ -71,6 +71,48 @@ void pcibios_set_master(struct pci_dev *dev); - struct irq_routing_table *pcibios_get_irq_routing_table(void); - int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq); +@@ -409,10 +415,102 @@ out: + return -ENOMEM; + } -+/* -+ * AMD Fam10h CPUs are buggy, and cannot access MMIO config space -+ * on their northbrige except through the * %eax register. As such, you MUST -+ * NOT use normal IOMEM accesses, you need to only use the magic mmio-config -+ * accessor functions. -+ * In fact just use pci_config_*, nothing else please. -+ */ -+static inline unsigned char mmio_config_readb(void __iomem *pos) ++#ifdef CONFIG_PCI ++static void arm64_of_set_dma_ops(void *_dev) +{ -+ u8 val; -+ asm volatile("movb (%1),%%al" : "=a" (val) : "r" (pos)); -+ return val; ++ struct device *dev = _dev; ++ ++ /* ++ * PCI devices won't have an ACPI handle but the bridge will. ++ * Search up the device chain until we find an of_node ++ * to check. ++ */ ++ while (dev) { ++ if (dev->of_node) { ++ if (of_dma_is_coherent(dev->of_node)) ++ ((struct device *)_dev)->archdata.dma_coherent = true; ++ break; ++ } ++ dev = dev->parent; ++ } ++} ++#else ++static inline arm64_of_set_dma_ops(void *_dev) {} ++#endif ++ ++ ++#ifdef CONFIG_ACPI ++static void arm64_acpi_set_dma_ops(void *_dev) ++{ ++ struct device *dev = _dev; ++ ++ /* ++ * Kernel defaults to noncoherent ops but ACPI 5.1 spec says arm64 ++ * defaults to coherent. Set coherent ops if _CCA not found or _CCA ++ * found and non-zero. ++ * ++ * PCI devices won't have an of_node but the bridge will. ++ * Search up the device chain until we find an ACPI handle ++ * to check. ++ */ ++ while (dev) { ++ if (ACPI_HANDLE(dev)) { ++ acpi_status status; ++ int coherent; ++ status = acpi_check_coherency(ACPI_HANDLE(dev), ++ &coherent); ++ if (ACPI_FAILURE(status) || coherent) ++ ((struct device *)_dev)->archdata.dma_coherent = true; ++ break; ++ } ++ dev = dev->parent; ++ } ++} ++#else ++static inline arm64_acpi_set_dma_ops(void *_dev) {} ++#endif ++ ++static int dma_bus_notifier(struct notifier_block *nb, ++ unsigned long event, void *_dev) ++{ ++ if (event != BUS_NOTIFY_ADD_DEVICE) ++ return NOTIFY_DONE; ++ ++ if (acpi_disabled) ++ arm64_of_set_dma_ops(_dev); ++ else ++ arm64_acpi_set_dma_ops(_dev); ++ ++ return NOTIFY_OK; +} + -+static inline unsigned short mmio_config_readw(void __iomem *pos) -+{ -+ u16 val; -+ asm volatile("movw (%1),%%ax" : "=a" (val) : "r" (pos)); -+ return val; -+} ++#ifdef CONFIG_ACPI ++static struct notifier_block platform_bus_nb = { ++ .notifier_call = dma_bus_notifier, ++}; + -+static inline unsigned int mmio_config_readl(void __iomem *pos) -+{ -+ u32 val; -+ asm volatile("movl (%1),%%eax" : "=a" (val) : "r" (pos)); -+ return val; -+} ++static struct notifier_block amba_bus_nb = { ++ .notifier_call = dma_bus_notifier, ++}; ++#endif + -+static inline void mmio_config_writeb(void __iomem *pos, u8 val) -+{ -+ asm volatile("movb %%al,(%1)" : : "a" (val), "r" (pos) : "memory"); -+} ++#ifdef CONFIG_PCI ++static struct notifier_block pci_bus_nb = { ++ .notifier_call = dma_bus_notifier, ++}; ++#endif + -+static inline void mmio_config_writew(void __iomem *pos, u16 val) -+{ -+ asm volatile("movw %%ax,(%1)" : : "a" (val), "r" (pos) : "memory"); -+} -+ -+static inline void mmio_config_writel(void __iomem *pos, u32 val) -+{ -+ asm volatile("movl %%eax,(%1)" : : "a" (val), "r" (pos) : "memory"); -+} + static int __init arm64_dma_init(void) + { + int ret; - #define HAVE_PCI_MMAP - extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, ++ if (IS_ENABLED(CONFIG_ACPI)) { ++ bus_register_notifier(&platform_bus_type, &platform_bus_nb); ++ bus_register_notifier(&amba_bustype, &amba_bus_nb); ++ } ++ if (IS_ENABLED(CONFIG_PCI)) ++ bus_register_notifier(&pci_bus_type, &pci_bus_nb); ++ + dma_ops = &swiotlb_dma_ops; + + ret = atomic_pool_init(); +diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c +index c6daaf6..c3c3134 100644 +--- a/arch/arm64/mm/mmu.c ++++ b/arch/arm64/mm/mmu.c +@@ -276,12 +276,24 @@ static void __ref create_mapping(phys_addr_t phys, unsigned long virt, + size, prot, early_alloc); + } + ++void __init create_id_mapping(phys_addr_t addr, phys_addr_t size, int map_io) ++{ ++ if ((addr >> PGDIR_SHIFT) >= ARRAY_SIZE(idmap_pg_dir)) { ++ pr_warn("BUG: not creating id mapping for %pa\n", &addr); ++ return; ++ } ++ __create_mapping(&init_mm, &idmap_pg_dir[pgd_index(addr)], ++ addr, addr, size, ++ map_io ? __pgprot(PROT_DEVICE_nGnRE) ++ : PAGE_KERNEL_EXEC, early_alloc); ++} ++ + void __init create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys, + unsigned long virt, phys_addr_t size, + pgprot_t prot) + { + __create_mapping(mm, pgd_offset(mm, virt), phys, virt, size, prot, +- late_alloc); ++ early_alloc); + } + + static void create_mapping_late(phys_addr_t phys, unsigned long virt, +diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig +index 074e52b..e8728d7 100644 +--- a/arch/ia64/Kconfig ++++ b/arch/ia64/Kconfig +@@ -10,6 +10,7 @@ config IA64 + select ARCH_MIGHT_HAVE_PC_SERIO + select PCI if (!IA64_HP_SIM) + select ACPI if (!IA64_HP_SIM) ++ select ACPI_GENERIC_SLEEP if ACPI + select ARCH_MIGHT_HAVE_ACPI_PDC if ACPI + select HAVE_UNSTABLE_SCHED_CLOCK + select HAVE_IDE +diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c +index 2c44989..067ef44 100644 +--- a/arch/ia64/kernel/acpi.c ++++ b/arch/ia64/kernel/acpi.c +@@ -887,7 +887,7 @@ static int _acpi_map_lsapic(acpi_handle handle, int physid, int *pcpu) + } + + /* wrapper to silence section mismatch warning */ +-int __ref acpi_map_cpu(acpi_handle handle, int physid, int *pcpu) ++int __ref acpi_map_cpu(acpi_handle handle, phys_cpuid_t physid, int *pcpu) + { + return _acpi_map_lsapic(handle, physid, pcpu); + } +diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig +index b7d31ca..b77071a 100644 +--- a/arch/x86/Kconfig ++++ b/arch/x86/Kconfig +@@ -22,6 +22,7 @@ config X86_64 + ### Arch settings + config X86 + def_bool y ++ select ACPI_GENERIC_SLEEP if ACPI + select ARCH_MIGHT_HAVE_ACPI_PDC if ACPI + select ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS + select ARCH_HAS_FAST_MULTIPLIER +@@ -141,6 +142,7 @@ config X86 + select ACPI_LEGACY_TABLES_LOOKUP if ACPI + select X86_FEATURE_NAMES if PROC_FS + select SRCU ++ select PCI_ECAM_GENERIC if X86_64 + + config INSTRUCTION_DECODER + def_bool y +@@ -2277,6 +2279,7 @@ config PCI_DIRECT + + config PCI_MMCONFIG + def_bool y ++ select PCI_ECAM + depends on X86_32 && PCI && (ACPI || SFI) && (PCI_GOMMCONFIG || PCI_GOANY) + + config PCI_OLPC +@@ -2294,6 +2297,7 @@ config PCI_DOMAINS + + config PCI_MMCONFIG + bool "Support mmconfig PCI config space access" ++ select PCI_ECAM + depends on X86_64 && PCI && ACPI + + config PCI_CNB20LE_QUIRK diff --git a/arch/x86/include/asm/pci_x86.h b/arch/x86/include/asm/pci_x86.h -index fa1195d..42e7332 100644 +index fa1195d..e8a237f 100644 --- a/arch/x86/include/asm/pci_x86.h +++ b/arch/x86/include/asm/pci_x86.h -@@ -121,78 +121,6 @@ extern int __init pcibios_init(void); - extern int pci_legacy_init(void); +@@ -122,40 +122,18 @@ extern int pci_legacy_init(void); extern void pcibios_fixup_irqs(void); --/* pci-mmconfig.c */ + /* pci-mmconfig.c */ - -/* "PCI MMCONFIG %04x [bus %02x-%02x]" */ -#define PCI_MMCFG_RESOURCE_NAME_LEN (22 + 4 + 2 + 2) @@ -4726,61 +3988,43 @@ index fa1195d..42e7332 100644 -extern void __init pci_mmcfg_arch_free(void); -extern int pci_mmcfg_arch_map(struct pci_mmcfg_region *cfg); -extern void pci_mmcfg_arch_unmap(struct pci_mmcfg_region *cfg); --extern int pci_mmconfig_insert(struct device *dev, u16 seg, u8 start, u8 end, -- phys_addr_t addr); + extern int pci_mmconfig_insert(struct device *dev, u16 seg, u8 start, u8 end, + phys_addr_t addr); -extern int pci_mmconfig_delete(u16 seg, u8 start, u8 end); -extern struct pci_mmcfg_region *pci_mmconfig_lookup(int segment, int bus); - -extern struct list_head pci_mmcfg_list; - -#define PCI_MMCFG_BUS_OFFSET(bus) ((bus) << 20) -- --/* -- * AMD Fam10h CPUs are buggy, and cannot access MMIO config space + + /* + * AMD Fam10h CPUs are buggy, and cannot access MMIO config space - * on their northbrige except through the * %eax register. As such, you MUST - * NOT use normal IOMEM accesses, you need to only use the magic mmio-config -- * accessor functions. ++ * on their northbridge except through the * %eax register. As such, you MUST ++ * NOT use normal IOMEM accesses, you need to only use the magic mmio_config_* + * accessor functions. - * In fact just use pci_config_*, nothing else please. -- */ --static inline unsigned char mmio_config_readb(void __iomem *pos) --{ -- u8 val; -- asm volatile("movb (%1),%%al" : "=a" (val) : "r" (pos)); -- return val; --} -- --static inline unsigned short mmio_config_readw(void __iomem *pos) --{ -- u16 val; -- asm volatile("movw (%1),%%ax" : "=a" (val) : "r" (pos)); -- return val; --} -- --static inline unsigned int mmio_config_readl(void __iomem *pos) --{ -- u32 val; -- asm volatile("movl (%1),%%eax" : "=a" (val) : "r" (pos)); -- return val; --} -- --static inline void mmio_config_writeb(void __iomem *pos, u8 val) --{ -- asm volatile("movb %%al,(%1)" : : "a" (val), "r" (pos) : "memory"); --} -- --static inline void mmio_config_writew(void __iomem *pos, u16 val) --{ -- asm volatile("movw %%ax,(%1)" : : "a" (val), "r" (pos) : "memory"); --} -- --static inline void mmio_config_writel(void __iomem *pos, u32 val) --{ -- asm volatile("movl %%eax,(%1)" : : "a" (val), "r" (pos) : "memory"); --} -- - #ifdef CONFIG_PCI - # ifdef CONFIG_ACPI - # define x86_default_pci_init pci_acpi_init ++ * ++ * Please refer to the following doc: ++ * "BIOS and Kernel Developer's Guide (BKDG) For AMD Family 10h Processors", ++ * rev. 3.48, sec 2.11.1, "MMIO Configuration Coding Requirements". + */ + static inline unsigned char mmio_config_readb(void __iomem *pos) + { +diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c +index 3d525c6..e4f8582 100644 +--- a/arch/x86/kernel/acpi/boot.c ++++ b/arch/x86/kernel/acpi/boot.c +@@ -757,7 +757,7 @@ static int _acpi_map_lsapic(acpi_handle handle, int physid, int *pcpu) + } + + /* wrapper to silence section mismatch warning */ +-int __ref acpi_map_cpu(acpi_handle handle, int physid, int *pcpu) ++int __ref acpi_map_cpu(acpi_handle handle, phys_cpuid_t physid, int *pcpu) + { + return _acpi_map_lsapic(handle, physid, pcpu); + } diff --git a/arch/x86/pci/Makefile b/arch/x86/pci/Makefile index 5c6fc35..35c765b 100644 --- a/arch/x86/pci/Makefile @@ -4798,41 +4042,39 @@ index 5c6fc35..35c765b 100644 obj-$(CONFIG_PCI_OLPC) += olpc.o obj-$(CONFIG_PCI_XEN) += xen.o diff --git a/arch/x86/pci/acpi.c b/arch/x86/pci/acpi.c -index 6ac2738..eae3846 100644 +index e469598..fc9eb43 100644 --- a/arch/x86/pci/acpi.c +++ b/arch/x86/pci/acpi.c @@ -4,6 +4,7 @@ #include #include #include -+#include ++#include #include #include -diff --git a/arch/x86/pci/init.c b/arch/x86/pci/init.c -index adb62aa..b4a55df 100644 ---- a/arch/x86/pci/init.c -+++ b/arch/x86/pci/init.c -@@ -1,5 +1,6 @@ - #include - #include -+#include - #include - #include - +@@ -198,7 +199,7 @@ static int setup_mcfg_map(struct pci_root_info *info, u16 seg, u8 start, + static void teardown_mcfg_map(struct pci_root_info *info) + { + if (info->mcfg_added) { +- pci_mmconfig_delete(info->segment, info->start_bus, ++ pci_ecam_delete(info->segment, info->start_bus, + info->end_bus); + info->mcfg_added = false; + } diff --git a/arch/x86/pci/mmconfig-shared.c b/arch/x86/pci/mmconfig-shared.c -index dd30b7e..ce3d93c 100644 +index dd30b7e..8f78671 100644 --- a/arch/x86/pci/mmconfig-shared.c +++ b/arch/x86/pci/mmconfig-shared.c @@ -18,6 +18,7 @@ #include #include #include -+#include ++#include #include #include #include -@@ -27,103 +28,11 @@ +@@ -27,103 +28,52 @@ /* Indicate if the mmcfg resources have been placed into the resource table. */ static bool pci_mmcfg_running_state; static bool pci_mmcfg_arch_init_failed; @@ -4847,16 +4089,23 @@ index dd30b7e..ce3d93c 100644 - list_del(&cfg->list); - kfree(cfg); -} -- ++const struct pci_raw_ops pci_mmcfg = { ++ .read = pci_ecam_read, ++ .write = pci_ecam_write, ++}; + -static void __init free_all_mmcfg(void) --{ ++static u32 ++pci_mmconfig_amd_read(int len, void __iomem *addr) + { - struct pci_mmcfg_region *cfg, *tmp; - - pci_mmcfg_arch_free(); - list_for_each_entry_safe(cfg, tmp, &pci_mmcfg_list, list) - pci_mmconfig_remove(cfg); -} -- ++ u32 data = 0; + -static void list_add_sorted(struct pci_mmcfg_region *new) -{ - struct pci_mmcfg_region *cfg; @@ -4869,7 +4118,17 @@ index dd30b7e..ce3d93c 100644 - list_add_tail_rcu(&new->list, &cfg->list); - return; - } -- } ++ switch (len) { ++ case 1: ++ data = mmio_config_readb(addr); ++ break; ++ case 2: ++ data = mmio_config_readw(addr); ++ break; ++ case 4: ++ data = mmio_config_readl(addr); ++ break; + } - list_add_tail_rcu(&new->list, &pci_mmcfg_list); -} - @@ -4881,7 +4140,7 @@ index dd30b7e..ce3d93c 100644 - - if (addr == 0) - return NULL; -- + - new = kzalloc(sizeof(*new), GFP_KERNEL); - if (!new) - return NULL; @@ -4900,11 +4159,14 @@ index dd30b7e..ce3d93c 100644 - res->name = new->name; - - return new; --} -- ++ return data; + } + -static struct pci_mmcfg_region *__init pci_mmconfig_add(int segment, int start, - int end, u64 addr) --{ ++static void ++pci_mmconfig_amd_write(int len, void __iomem *addr, u32 value) + { - struct pci_mmcfg_region *new; - - new = pci_mmconfig_alloc(segment, start, end, addr); @@ -4917,11 +4179,21 @@ index dd30b7e..ce3d93c 100644 - "MMCONFIG for domain %04x [bus %02x-%02x] at %pR " - "(base %#lx)\n", - segment, start, end, &new->res, (unsigned long)addr); -- } ++ switch (len) { ++ case 1: ++ mmio_config_writeb(addr, value); ++ break; ++ case 2: ++ mmio_config_writew(addr, value); ++ break; ++ case 4: ++ mmio_config_writel(addr, value); ++ break; + } - - return new; --} -- + } + -struct pci_mmcfg_region *pci_mmconfig_lookup(int segment, int bus) -{ - struct pci_mmcfg_region *cfg; @@ -4933,36 +4205,310 @@ index dd30b7e..ce3d93c 100644 - - return NULL; -} -+const struct pci_raw_ops pci_mmcfg = { -+ .read = pci_mmcfg_read, -+ .write = pci_mmcfg_write, ++static struct pci_ecam_mmio_ops pci_mmcfg_mmio_amd_fam10h = { ++ .read = pci_mmconfig_amd_read, ++ .write = pci_mmconfig_amd_write, +}; static const char *__init pci_mmcfg_e7520(void) { -@@ -543,7 +452,7 @@ static void __init pci_mmcfg_reject_broken(int early) +@@ -134,7 +84,7 @@ static const char *__init pci_mmcfg_e7520(void) + if (win == 0x0000 || win == 0xf000) + return NULL; + +- if (pci_mmconfig_add(0, 0, 255, win << 16) == NULL) ++ if (pci_ecam_add(0, 0, 255, win << 16) == NULL) + return NULL; + + return "Intel Corporation E7520 Memory Controller Hub"; +@@ -178,7 +128,7 @@ static const char *__init pci_mmcfg_intel_945(void) + if ((pciexbar & mask) >= 0xf0000000U) + return NULL; + +- if (pci_mmconfig_add(0, 0, (len >> 20) - 1, pciexbar & mask) == NULL) ++ if (pci_ecam_add(0, 0, (len >> 20) - 1, pciexbar & mask) == NULL) + return NULL; + + return "Intel Corporation 945G/GZ/P/PL Express Memory Controller Hub"; +@@ -225,12 +175,14 @@ static const char *__init pci_mmcfg_amd_fam10h(void) + + end_bus = (1 << busnbits) - 1; + for (i = 0; i < (1 << segnbits); i++) +- if (pci_mmconfig_add(i, 0, end_bus, ++ if (pci_ecam_add(i, 0, end_bus, + base + (1<<28) * i) == NULL) { +- free_all_mmcfg(); ++ pci_ecam_free_all(); + return NULL; + } + ++ pci_ecam_register_mmio(&pci_mmcfg_mmio_amd_fam10h); ++ + return "AMD Family 10h NB"; + } + +@@ -258,7 +210,7 @@ static const char *__init pci_mmcfg_nvidia_mcp55(void) + /* + * do check if amd fam10h already took over + */ +- if (!acpi_disabled || !list_empty(&pci_mmcfg_list) || mcp55_checked) ++ if (!acpi_disabled || !list_empty(&pci_ecam_list) || mcp55_checked) + return NULL; + + mcp55_checked = true; +@@ -287,7 +239,7 @@ static const char *__init pci_mmcfg_nvidia_mcp55(void) + base <<= extcfg_base_lshift; + start = (extcfg & extcfg_start_mask) >> extcfg_start_shift; + end = start + extcfg_sizebus[size_index] - 1; +- if (pci_mmconfig_add(0, start, end, base) == NULL) ++ if (pci_ecam_add(0, start, end, base) == NULL) + continue; + mcp55_mmconf_found++; + } +@@ -321,15 +273,15 @@ static const struct pci_mmcfg_hostbridge_probe pci_mmcfg_probes[] __initconst = + + static void __init pci_mmcfg_check_end_bus_number(void) + { +- struct pci_mmcfg_region *cfg, *cfgx; ++ struct pci_ecam_region *cfg, *cfgx; + + /* Fixup overlaps */ +- list_for_each_entry(cfg, &pci_mmcfg_list, list) { ++ list_for_each_entry(cfg, &pci_ecam_list, list) { + if (cfg->end_bus < cfg->start_bus) + cfg->end_bus = 255; + + /* Don't access the list head ! */ +- if (cfg->list.next == &pci_mmcfg_list) ++ if (cfg->list.next == &pci_ecam_list) + break; + + cfgx = list_entry(cfg->list.next, typeof(*cfg), list); +@@ -349,7 +301,7 @@ static int __init pci_mmcfg_check_hostbridge(void) + if (!raw_pci_ops) + return 0; + +- free_all_mmcfg(); ++ pci_ecam_free_all(); + + for (i = 0; i < ARRAY_SIZE(pci_mmcfg_probes); i++) { + bus = pci_mmcfg_probes[i].bus; +@@ -370,7 +322,7 @@ static int __init pci_mmcfg_check_hostbridge(void) + /* some end_bus_number is crazy, fix it */ + pci_mmcfg_check_end_bus_number(); + +- return !list_empty(&pci_mmcfg_list); ++ return !list_empty(&pci_ecam_list); + } + + static acpi_status check_mcfg_resource(struct acpi_resource *res, void *data) +@@ -443,7 +395,7 @@ static int is_acpi_reserved(u64 start, u64 end, unsigned not_used) + typedef int (*check_reserved_t)(u64 start, u64 end, unsigned type); + + static int __ref is_mmconf_reserved(check_reserved_t is_reserved, +- struct pci_mmcfg_region *cfg, ++ struct pci_ecam_region *cfg, + struct device *dev, int with_e820) + { + u64 addr = cfg->res.start; +@@ -473,8 +425,8 @@ static int __ref is_mmconf_reserved(check_reserved_t is_reserved, + cfg->end_bus = cfg->start_bus + ((size>>20) - 1); + num_buses = cfg->end_bus - cfg->start_bus + 1; + cfg->res.end = cfg->res.start + +- PCI_MMCFG_BUS_OFFSET(num_buses) - 1; +- snprintf(cfg->name, PCI_MMCFG_RESOURCE_NAME_LEN, ++ PCI_ECAM_BUS_OFFSET(num_buses) - 1; ++ snprintf(cfg->name, PCI_ECAM_RESOURCE_NAME_LEN, + "PCI MMCONFIG %04x [bus %02x-%02x]", + cfg->segment, cfg->start_bus, cfg->end_bus); + +@@ -495,7 +447,7 @@ static int __ref is_mmconf_reserved(check_reserved_t is_reserved, + } + + static int __ref pci_mmcfg_check_reserved(struct device *dev, +- struct pci_mmcfg_region *cfg, int early) ++ struct pci_ecam_region *cfg, int early) + { + if (!early && !acpi_disabled) { + if (is_mmconf_reserved(is_acpi_reserved, cfg, dev, 0)) +@@ -532,84 +484,17 @@ static int __ref pci_mmcfg_check_reserved(struct device *dev, + + static void __init pci_mmcfg_reject_broken(int early) + { +- struct pci_mmcfg_region *cfg; ++ struct pci_ecam_region *cfg; + +- list_for_each_entry(cfg, &pci_mmcfg_list, list) { ++ list_for_each_entry(cfg, &pci_ecam_list, list) { + if (pci_mmcfg_check_reserved(NULL, cfg, early) == 0) { + pr_info(PREFIX "not using MMCONFIG\n"); +- free_all_mmcfg(); ++ pci_ecam_free_all(); + return; + } } } -static int __init acpi_mcfg_check_entry(struct acpi_table_mcfg *mcfg, -+int __init acpi_mcfg_check_entry(struct acpi_table_mcfg *mcfg, - struct acpi_mcfg_allocation *cfg) +- struct acpi_mcfg_allocation *cfg) +-{ +- int year; +- +- if (cfg->address < 0xFFFFFFFF) +- return 0; +- +- if (!strncmp(mcfg->header.oem_id, "SGI", 3)) +- return 0; +- +- if (mcfg->header.revision >= 1) { +- if (dmi_get_date(DMI_BIOS_DATE, &year, NULL, NULL) && +- year >= 2010) +- return 0; +- } +- +- pr_err(PREFIX "MCFG region for %04x [bus %02x-%02x] at %#llx " +- "is above 4GB, ignored\n", cfg->pci_segment, +- cfg->start_bus_number, cfg->end_bus_number, cfg->address); +- return -EINVAL; +-} +- +-static int __init pci_parse_mcfg(struct acpi_table_header *header) +-{ +- struct acpi_table_mcfg *mcfg; +- struct acpi_mcfg_allocation *cfg_table, *cfg; +- unsigned long i; +- int entries; +- +- if (!header) +- return -EINVAL; +- +- mcfg = (struct acpi_table_mcfg *)header; +- +- /* how many config structures do we have */ +- free_all_mmcfg(); +- entries = 0; +- i = header->length - sizeof(struct acpi_table_mcfg); +- while (i >= sizeof(struct acpi_mcfg_allocation)) { +- entries++; +- i -= sizeof(struct acpi_mcfg_allocation); +- } +- if (entries == 0) { +- pr_err(PREFIX "MMCONFIG has no entries\n"); +- return -ENODEV; +- } +- +- cfg_table = (struct acpi_mcfg_allocation *) &mcfg[1]; +- for (i = 0; i < entries; i++) { +- cfg = &cfg_table[i]; +- if (acpi_mcfg_check_entry(mcfg, cfg)) { +- free_all_mmcfg(); +- return -ENODEV; +- } +- +- if (pci_mmconfig_add(cfg->pci_segment, cfg->start_bus_number, +- cfg->end_bus_number, cfg->address) == NULL) { +- pr_warn(PREFIX "no memory for MCFG entries\n"); +- free_all_mmcfg(); +- return -ENOMEM; +- } +- } +- +- return 0; +-} +- + #ifdef CONFIG_ACPI_APEI + extern int (*arch_apei_filter_addr)(int (*func)(__u64 start, __u64 size, + void *data), void *data); +@@ -617,13 +502,13 @@ extern int (*arch_apei_filter_addr)(int (*func)(__u64 start, __u64 size, + static int pci_mmcfg_for_each_region(int (*func)(__u64 start, __u64 size, + void *data), void *data) { - int year; -@@ -652,9 +561,10 @@ static void __init __pci_mmcfg_init(int early) +- struct pci_mmcfg_region *cfg; ++ struct pci_ecam_region *cfg; + int rc; + +- if (list_empty(&pci_mmcfg_list)) ++ if (list_empty(&pci_ecam_list)) + return 0; + +- list_for_each_entry(cfg, &pci_mmcfg_list, list) { ++ list_for_each_entry(cfg, &pci_ecam_list, list) { + rc = func(cfg->res.start, resource_size(&cfg->res), data); + if (rc) + return rc; +@@ -639,23 +524,24 @@ static int pci_mmcfg_for_each_region(int (*func)(__u64 start, __u64 size, + static void __init __pci_mmcfg_init(int early) + { + pci_mmcfg_reject_broken(early); +- if (list_empty(&pci_mmcfg_list)) ++ if (list_empty(&pci_ecam_list)) + return; + + if (pcibios_last_bus < 0) { +- const struct pci_mmcfg_region *cfg; ++ const struct pci_ecam_region *cfg; + +- list_for_each_entry(cfg, &pci_mmcfg_list, list) { ++ list_for_each_entry(cfg, &pci_ecam_list, list) { + if (cfg->segment) + break; + pcibios_last_bus = cfg->end_bus; } } - if (pci_mmcfg_arch_init()) -+ if (pci_mmcfg_arch_init()) { ++ if (pci_ecam_arch_init()) { + raw_pci_ext_ops = &pci_mmcfg; pci_probe = (pci_probe & ~PCI_PROBE_MASK) | PCI_PROBE_MMCONF; - else { +- free_all_mmcfg(); + } else { - free_all_mmcfg(); ++ pci_ecam_free_all(); pci_mmcfg_arch_init_failed = true; } -@@ -731,88 +641,40 @@ int pci_mmconfig_insert(struct device *dev, u16 seg, u8 start, u8 end, + } +@@ -668,7 +554,7 @@ void __init pci_mmcfg_early_init(void) + if (pci_mmcfg_check_hostbridge()) + known_bridge = 1; + else +- acpi_sfi_table_parse(ACPI_SIG_MCFG, pci_parse_mcfg); ++ acpi_sfi_table_parse(ACPI_SIG_MCFG, acpi_parse_mcfg); + __pci_mmcfg_init(1); + + set_apei_filter(); +@@ -686,14 +572,14 @@ void __init pci_mmcfg_late_init(void) + + /* MMCONFIG hasn't been enabled yet, try again */ + if (pci_probe & PCI_PROBE_MASK & ~PCI_PROBE_MMCONF) { +- acpi_sfi_table_parse(ACPI_SIG_MCFG, pci_parse_mcfg); ++ acpi_sfi_table_parse(ACPI_SIG_MCFG, acpi_parse_mcfg); + __pci_mmcfg_init(0); + } + } + + static int __init pci_mmcfg_late_insert_resources(void) + { +- struct pci_mmcfg_region *cfg; ++ struct pci_ecam_region *cfg; + + pci_mmcfg_running_state = true; + +@@ -706,7 +592,7 @@ static int __init pci_mmcfg_late_insert_resources(void) + * marked so it won't cause request errors when __request_region is + * called. + */ +- list_for_each_entry(cfg, &pci_mmcfg_list, list) ++ list_for_each_entry(cfg, &pci_ecam_list, list) + if (!cfg->res.parent) + insert_resource(&iomem_resource, &cfg->res); + +@@ -726,93 +612,45 @@ int pci_mmconfig_insert(struct device *dev, u16 seg, u8 start, u8 end, + { + int rc; + struct resource *tmp = NULL; +- struct pci_mmcfg_region *cfg; ++ struct pci_ecam_region *cfg; + if (!(pci_probe & PCI_PROBE_MMCONF) || pci_mmcfg_arch_init_failed) return -ENODEV; @@ -4989,7 +4535,8 @@ index dd30b7e..ce3d93c 100644 - } - rc = -EBUSY; - cfg = pci_mmconfig_alloc(seg, start, end, addr); +- cfg = pci_mmconfig_alloc(seg, start, end, addr); ++ cfg = pci_ecam_alloc(seg, start, end, addr); if (cfg == NULL) { dev_warn(dev, "fail to add MMCONFIG (out of memory)\n"); - rc = -ENOMEM; @@ -5018,22 +4565,19 @@ index dd30b7e..ce3d93c 100644 - cfg = NULL; - rc = 0; - } -- } -- ++ goto error; + } + - if (cfg) { - if (cfg->res.parent) - release_resource(&cfg->res); - kfree(cfg); -+ goto error; - } - -- mutex_unlock(&pci_mmcfg_lock); +- } + /* Insert resource if it's not in boot stage */ + if (pci_mmcfg_running_state) + tmp = insert_resource_conflict(&iomem_resource, &cfg->res); -- return rc; --} +- mutex_unlock(&pci_mmcfg_lock); + if (tmp) { + dev_warn(dev, + "MMCONFIG %pR conflicts with %s %pR\n", @@ -5041,14 +4585,17 @@ index dd30b7e..ce3d93c 100644 + goto error; + } +- return rc; +-} ++ rc = pci_ecam_inject(cfg); ++ if (rc) ++ goto error; + -/* Delete MMCFG information for host bridges */ -int pci_mmconfig_delete(u16 seg, u8 start, u8 end) -{ - struct pci_mmcfg_region *cfg; -+ rc = pci_mmconfig_inject(cfg); -+ if (rc) -+ goto error; - +- - mutex_lock(&pci_mmcfg_lock); - list_for_each_entry_rcu(cfg, &pci_mmcfg_list, list) - if (cfg->segment == seg && cfg->start_bus == start && @@ -5073,36 +4620,90 @@ index dd30b7e..ce3d93c 100644 + return rc; } diff --git a/arch/x86/pci/mmconfig_32.c b/arch/x86/pci/mmconfig_32.c -index 43984bc..c0106a6 100644 +index 43984bc..27e707d 100644 --- a/arch/x86/pci/mmconfig_32.c +++ b/arch/x86/pci/mmconfig_32.c @@ -12,6 +12,7 @@ #include #include #include -+#include ++#include #include #include +@@ -27,7 +28,7 @@ static int mmcfg_last_accessed_cpu; + */ + static u32 get_base_addr(unsigned int seg, int bus, unsigned devfn) + { +- struct pci_mmcfg_region *cfg = pci_mmconfig_lookup(seg, bus); ++ struct pci_ecam_region *cfg = pci_ecam_lookup(seg, bus); + + if (cfg) + return cfg->address; +@@ -39,7 +40,7 @@ static u32 get_base_addr(unsigned int seg, int bus, unsigned devfn) + */ + static void pci_exp_set_dev_base(unsigned int base, int bus, int devfn) + { +- u32 dev_base = base | PCI_MMCFG_BUS_OFFSET(bus) | (devfn << 12); ++ u32 dev_base = base | PCI_ECAM_BUS_OFFSET(bus) | (devfn << 12); + int cpu = smp_processor_id(); + if (dev_base != mmcfg_last_accessed_device || + cpu != mmcfg_last_accessed_cpu) { @@ -49,7 +50,7 @@ static void pci_exp_set_dev_base(unsigned int base, int bus, int devfn) } } -static int pci_mmcfg_read(unsigned int seg, unsigned int bus, -+int pci_mmcfg_read(unsigned int seg, unsigned int bus, ++int pci_ecam_read(unsigned int seg, unsigned int bus, unsigned int devfn, int reg, int len, u32 *value) { unsigned long flags; -@@ -88,7 +89,7 @@ err: *value = -1; +@@ -71,24 +72,14 @@ err: *value = -1; + + pci_exp_set_dev_base(base, bus, devfn); + +- switch (len) { +- case 1: +- *value = mmio_config_readb(mmcfg_virt_addr + reg); +- break; +- case 2: +- *value = mmio_config_readw(mmcfg_virt_addr + reg); +- break; +- case 4: +- *value = mmio_config_readl(mmcfg_virt_addr + reg); +- break; +- } ++ *value = pci_mmio_read(len, mmcfg_virt_addr + reg); + raw_spin_unlock_irqrestore(&pci_config_lock, flags); + rcu_read_unlock(); + return 0; } -static int pci_mmcfg_write(unsigned int seg, unsigned int bus, -+int pci_mmcfg_write(unsigned int seg, unsigned int bus, ++int pci_ecam_write(unsigned int seg, unsigned int bus, unsigned int devfn, int reg, int len, u32 value) { unsigned long flags; -@@ -125,15 +126,9 @@ static int pci_mmcfg_write(unsigned int seg, unsigned int bus, +@@ -108,45 +99,29 @@ static int pci_mmcfg_write(unsigned int seg, unsigned int bus, + + pci_exp_set_dev_base(base, bus, devfn); + +- switch (len) { +- case 1: +- mmio_config_writeb(mmcfg_virt_addr + reg, value); +- break; +- case 2: +- mmio_config_writew(mmcfg_virt_addr + reg, value); +- break; +- case 4: +- mmio_config_writel(mmcfg_virt_addr + reg, value); +- break; +- } ++ pci_mmio_write(len, mmcfg_virt_addr + reg, value); + raw_spin_unlock_irqrestore(&pci_config_lock, flags); + rcu_read_unlock(); + return 0; } @@ -5111,13 +4712,30 @@ index 43984bc..c0106a6 100644 - .write = pci_mmcfg_write, -}; - - int __init pci_mmcfg_arch_init(void) +-int __init pci_mmcfg_arch_init(void) ++int __init pci_ecam_arch_init(void) { printk(KERN_INFO "PCI: Using MMCONFIG for extended config space\n"); - raw_pci_ext_ops = &pci_mmcfg; return 1; } +-void __init pci_mmcfg_arch_free(void) ++void __init pci_ecam_arch_free(void) + { + } + +-int pci_mmcfg_arch_map(struct pci_mmcfg_region *cfg) ++int pci_ecam_arch_map(struct pci_ecam_region *cfg) + { + return 0; + } + +-void pci_mmcfg_arch_unmap(struct pci_mmcfg_region *cfg) ++void pci_ecam_arch_unmap(struct pci_ecam_region *cfg) + { + unsigned long flags; + diff --git a/arch/x86/pci/mmconfig_64.c b/arch/x86/pci/mmconfig_64.c deleted file mode 100644 index bea5249..0000000 @@ -5277,8 +4895,67 @@ index bea5249..0000000 - cfg->virt = NULL; - } -} +diff --git a/arch/x86/pci/numachip.c b/arch/x86/pci/numachip.c +index 2e565e6..f60d403 100644 +--- a/arch/x86/pci/numachip.c ++++ b/arch/x86/pci/numachip.c +@@ -13,6 +13,7 @@ + * + */ + ++#include + #include + #include + +@@ -20,7 +21,7 @@ static u8 limit __read_mostly; + + static inline char __iomem *pci_dev_base(unsigned int seg, unsigned int bus, unsigned int devfn) + { +- struct pci_mmcfg_region *cfg = pci_mmconfig_lookup(seg, bus); ++ struct pci_ecam_region *cfg = pci_ecam_lookup(seg, bus); + + if (cfg && cfg->virt) + return cfg->virt + (PCI_MMCFG_BUS_OFFSET(bus) | (devfn << 12)); +@@ -51,17 +52,7 @@ err: *value = -1; + goto err; + } + +- switch (len) { +- case 1: +- *value = mmio_config_readb(addr + reg); +- break; +- case 2: +- *value = mmio_config_readw(addr + reg); +- break; +- case 4: +- *value = mmio_config_readl(addr + reg); +- break; +- } ++ *value = pci_mmio_read(len, addr + reg); + rcu_read_unlock(); + + return 0; +@@ -87,17 +78,7 @@ static int pci_mmcfg_write_numachip(unsigned int seg, unsigned int bus, + return -EINVAL; + } + +- switch (len) { +- case 1: +- mmio_config_writeb(addr + reg, value); +- break; +- case 2: +- mmio_config_writew(addr + reg, value); +- break; +- case 4: +- mmio_config_writel(addr + reg, value); +- break; +- } ++ pci_mmio_write(len, addr + reg, value); + rcu_read_unlock(); + + return 0; diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig -index e6c3ddd..aad0a08 100644 +index e6c3ddd..25226c9 100644 --- a/drivers/acpi/Kconfig +++ b/drivers/acpi/Kconfig @@ -5,7 +5,7 @@ @@ -5290,7 +4967,21 @@ index e6c3ddd..aad0a08 100644 depends on PCI select PNP default y -@@ -163,6 +163,7 @@ config ACPI_PROCESSOR +@@ -48,9 +48,13 @@ config ACPI_LEGACY_TABLES_LOOKUP + config ARCH_MIGHT_HAVE_ACPI_PDC + bool + ++config ACPI_GENERIC_SLEEP ++ bool ++ + config ACPI_SLEEP + bool + depends on SUSPEND || HIBERNATION ++ depends on ACPI_GENERIC_SLEEP + default y + + config ACPI_PROCFS_POWER +@@ -163,6 +167,7 @@ config ACPI_PROCESSOR tristate "Processor" select THERMAL select CPU_IDLE @@ -5299,42 +4990,220 @@ index e6c3ddd..aad0a08 100644 help This driver installs ACPI as the idle handler for Linux and uses diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile -index b18cd21..0e6abf9 100644 +index 623b117..9595d13 100644 --- a/drivers/acpi/Makefile +++ b/drivers/acpi/Makefile -@@ -23,7 +23,11 @@ acpi-y += nvs.o +@@ -23,7 +23,7 @@ acpi-y += nvs.o # Power management related files acpi-y += wakeup.o -+ifeq ($(ARCH), arm64) -+acpi-y += sleep_arm.o -+else # X86, IA64 - acpi-y += sleep.o -+endif +-acpi-y += sleep.o ++acpi-$(CONFIG_ACPI_GENERIC_SLEEP) += sleep.o acpi-y += device_pm.o acpi-$(CONFIG_ACPI_SLEEP) += proc.o -@@ -66,6 +70,7 @@ obj-$(CONFIG_ACPI_BUTTON) += button.o +@@ -67,6 +67,7 @@ obj-$(CONFIG_ACPI_BUTTON) += button.o obj-$(CONFIG_ACPI_FAN) += fan.o obj-$(CONFIG_ACPI_VIDEO) += video.o obj-$(CONFIG_ACPI_PCI_SLOT) += pci_slot.o -+obj-$(CONFIG_PCI_MMCONFIG) += mmconfig.o ++obj-$(CONFIG_PCI_MMCONFIG) += mcfg.o obj-$(CONFIG_ACPI_PROCESSOR) += processor.o obj-y += container.o obj-$(CONFIG_ACPI_THERMAL) += thermal.o +diff --git a/drivers/acpi/acpi_processor.c b/drivers/acpi/acpi_processor.c +index 1020b1b..58f335c 100644 +--- a/drivers/acpi/acpi_processor.c ++++ b/drivers/acpi/acpi_processor.c +@@ -170,7 +170,7 @@ static int acpi_processor_hotadd_init(struct acpi_processor *pr) + acpi_status status; + int ret; + +- if (pr->phys_id == -1) ++ if (pr->phys_id == PHYS_CPUID_INVALID) + return -ENODEV; + + status = acpi_evaluate_integer(pr->handle, "_STA", NULL, &sta); +@@ -215,7 +215,8 @@ static int acpi_processor_get_info(struct acpi_device *device) + union acpi_object object = { 0 }; + struct acpi_buffer buffer = { sizeof(union acpi_object), &object }; + struct acpi_processor *pr = acpi_driver_data(device); +- int phys_id, cpu_index, device_declaration = 0; ++ phys_cpuid_t phys_id; ++ int cpu_index, device_declaration = 0; + acpi_status status = AE_OK; + static int cpu0_initialized; + unsigned long long value; +@@ -263,7 +264,7 @@ static int acpi_processor_get_info(struct acpi_device *device) + } + + phys_id = acpi_get_phys_id(pr->handle, device_declaration, pr->acpi_id); +- if (phys_id < 0) ++ if (phys_id == PHYS_CPUID_INVALID) + acpi_handle_debug(pr->handle, "failed to get CPU physical ID.\n"); + pr->phys_id = phys_id; + +diff --git a/drivers/acpi/acpica/acutils.h b/drivers/acpi/acpica/acutils.h +index c2f03e8..2aef850 100644 +--- a/drivers/acpi/acpica/acutils.h ++++ b/drivers/acpi/acpica/acutils.h +@@ -430,6 +430,9 @@ acpi_status + acpi_ut_execute_CID(struct acpi_namespace_node *device_node, + struct acpi_pnp_device_id_list ** return_cid_list); + ++acpi_status ++acpi_ut_execute_CLS(struct acpi_namespace_node *device_node, ++ struct acpi_pnp_device_id **return_id); + /* + * utlock - reader/writer locks + */ +diff --git a/drivers/acpi/acpica/nsxfname.c b/drivers/acpi/acpica/nsxfname.c +index d66c326..590ef06 100644 +--- a/drivers/acpi/acpica/nsxfname.c ++++ b/drivers/acpi/acpica/nsxfname.c +@@ -276,11 +276,12 @@ acpi_get_object_info(acpi_handle handle, + struct acpi_pnp_device_id *hid = NULL; + struct acpi_pnp_device_id *uid = NULL; + struct acpi_pnp_device_id *sub = NULL; ++ struct acpi_pnp_device_id *cls = NULL; + char *next_id_string; + acpi_object_type type; + acpi_name name; + u8 param_count = 0; +- u8 valid = 0; ++ u16 valid = 0; + u32 info_size; + u32 i; + acpi_status status; +@@ -320,7 +321,7 @@ acpi_get_object_info(acpi_handle handle, + if ((type == ACPI_TYPE_DEVICE) || (type == ACPI_TYPE_PROCESSOR)) { + /* + * Get extra info for ACPI Device/Processor objects only: +- * Run the Device _HID, _UID, _SUB, and _CID methods. ++ * Run the Device _HID, _UID, _SUB, _CID and _CLS methods. + * + * Note: none of these methods are required, so they may or may + * not be present for this device. The Info->Valid bitfield is used +@@ -351,6 +352,14 @@ acpi_get_object_info(acpi_handle handle, + valid |= ACPI_VALID_SUB; + } + ++ /* Execute the Device._CLS method */ ++ ++ status = acpi_ut_execute_CLS(node, &cls); ++ if (ACPI_SUCCESS(status)) { ++ info_size += cls->length; ++ valid |= ACPI_VALID_CLS; ++ } ++ + /* Execute the Device._CID method */ + + status = acpi_ut_execute_CID(node, &cid_list); +@@ -468,6 +477,11 @@ acpi_get_object_info(acpi_handle handle, + sub, next_id_string); + } + ++ if (cls) { ++ next_id_string = acpi_ns_copy_device_id(&info->cls, ++ cls, next_id_string); ++ } ++ + if (cid_list) { + info->compatible_id_list.count = cid_list->count; + info->compatible_id_list.list_size = cid_list->list_size; +@@ -507,6 +521,9 @@ cleanup: + if (sub) { + ACPI_FREE(sub); + } ++ if (cls) { ++ ACPI_FREE(cls); ++ } + if (cid_list) { + ACPI_FREE(cid_list); + } +diff --git a/drivers/acpi/acpica/utids.c b/drivers/acpi/acpica/utids.c +index 27431cf..a64b5d1 100644 +--- a/drivers/acpi/acpica/utids.c ++++ b/drivers/acpi/acpica/utids.c +@@ -416,3 +416,74 @@ cleanup: + acpi_ut_remove_reference(obj_desc); + return_ACPI_STATUS(status); + } ++ ++/******************************************************************************* ++ * ++ * FUNCTION: acpi_ut_execute_CLS ++ * ++ * PARAMETERS: device_node - Node for the device ++ * return_id - Where the string UID is returned ++ * ++ * RETURN: Status ++ * ++ * DESCRIPTION: Executes the _CLS control method that returns PCI-defined ++ * class code of the device. The ACPI spec define _CLS as a ++ * package with three integers. The returned string has format: ++ * ++ * "bbsspp" ++ * where: ++ * bb = Base-class code ++ * ss = Sub-class code ++ * pp = Programming Interface code ++ * ++ ******************************************************************************/ ++ ++acpi_status ++acpi_ut_execute_CLS(struct acpi_namespace_node *device_node, ++ struct acpi_pnp_device_id **return_id) ++{ ++ struct acpi_pnp_device_id *cls; ++ union acpi_operand_object *obj_desc; ++ union acpi_operand_object **cls_objects; ++ acpi_status status; ++ ++ ACPI_FUNCTION_TRACE(ut_execute_CLS); ++ status = acpi_ut_evaluate_object(device_node, METHOD_NAME__CLS, ++ ACPI_BTYPE_PACKAGE, &obj_desc); ++ if (ACPI_FAILURE(status)) ++ return_ACPI_STATUS(status); ++ ++ cls_objects = obj_desc->package.elements; ++ ++ if (obj_desc->common.type == ACPI_TYPE_PACKAGE && ++ obj_desc->package.count == 3 && ++ cls_objects[0]->common.type == ACPI_TYPE_INTEGER && ++ cls_objects[1]->common.type == ACPI_TYPE_INTEGER && ++ cls_objects[2]->common.type == ACPI_TYPE_INTEGER) { ++ ++ /* Allocate a buffer for the CLS */ ++ cls = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_pnp_device_id) + ++ (acpi_size) 7); ++ if (!cls) { ++ status = AE_NO_MEMORY; ++ goto cleanup; ++ } ++ ++ cls->string = ++ ACPI_ADD_PTR(char, cls, sizeof(struct acpi_pnp_device_id)); ++ ++ sprintf(cls->string, "%02x%02x%02x", ++ (u8)ACPI_TO_INTEGER(cls_objects[0]->integer.value), ++ (u8)ACPI_TO_INTEGER(cls_objects[1]->integer.value), ++ (u8)ACPI_TO_INTEGER(cls_objects[2]->integer.value)); ++ cls->length = 7; ++ *return_id = cls; ++ } ++ ++cleanup: ++ ++ /* On exit, we must delete the return object */ ++ ++ acpi_ut_remove_reference(obj_desc); ++ return_ACPI_STATUS(status); ++} diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c -index 8b67bd0..6d5412ab 100644 +index 8b67bd0..c412fdb 100644 --- a/drivers/acpi/bus.c +++ b/drivers/acpi/bus.c -@@ -41,6 +41,7 @@ - #include - #include - #include -+#include - - #include "internal.h" - -@@ -448,6 +449,9 @@ static int __init acpi_bus_init_irq(void) +@@ -448,6 +448,9 @@ static int __init acpi_bus_init_irq(void) case ACPI_IRQ_MODEL_IOSAPIC: message = "IOSAPIC"; break; @@ -5344,17 +5213,30 @@ index 8b67bd0..6d5412ab 100644 case ACPI_IRQ_MODEL_PLATFORM: message = "platform specific model"; break; -diff --git a/drivers/acpi/mmconfig.c b/drivers/acpi/mmconfig.c +diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h +index 56b321a..b5eef4c 100644 +--- a/drivers/acpi/internal.h ++++ b/drivers/acpi/internal.h +@@ -161,7 +161,11 @@ void acpi_ec_remove_query_handler(struct acpi_ec *ec, u8 query_bit); + /*-------------------------------------------------------------------------- + Suspend/Resume + -------------------------------------------------------------------------- */ ++#ifdef CONFIG_ACPI_GENERIC_SLEEP + extern int acpi_sleep_init(void); ++#else ++static inline int acpi_sleep_init(void) { return -ENOSYS; } ++#endif + + #ifdef CONFIG_ACPI_SLEEP + int acpi_sleep_proc_init(void); +diff --git a/drivers/acpi/mcfg.c b/drivers/acpi/mcfg.c new file mode 100644 -index 0000000..b13a9e4 +index 0000000..ed4b85b --- /dev/null -+++ b/drivers/acpi/mmconfig.c -@@ -0,0 +1,414 @@ ++++ b/drivers/acpi/mcfg.c +@@ -0,0 +1,140 @@ +/* -+ * Arch agnostic low-level direct PCI config space access via MMCONFIG -+ * -+ * Per-architecture code takes care of the mappings, region validation and -+ * accesses themselves. ++ * MCFG ACPI table parser. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as @@ -5362,18 +5244,11 @@ index 0000000..b13a9e4 + * + */ + -+#include -+#include -+#include -+#include ++#include ++#include ++#include + -+#include -+ -+#define PREFIX "PCI: " -+ -+static DEFINE_MUTEX(pci_mmcfg_lock); -+ -+LIST_HEAD(pci_mmcfg_list); ++#define PREFIX "MCFG: " + +extern struct acpi_mcfg_fixup __start_acpi_mcfg_fixups[]; +extern struct acpi_mcfg_fixup __end_acpi_mcfg_fixups[]; @@ -5381,295 +5256,55 @@ index 0000000..b13a9e4 +/* + * raw_pci_read/write - ACPI PCI config space accessors. + * -+ * ACPI spec defines MMCFG as the way we can access PCI config space, -+ * so let MMCFG be default (__weak). ++ * ACPI spec defines MCFG table as the way we can describe access to PCI config ++ * space, so let MCFG be default (__weak). + * + * If platform needs more fancy stuff, should provides its own implementation. + */ +int __weak raw_pci_read(unsigned int domain, unsigned int bus, + unsigned int devfn, int reg, int len, u32 *val) +{ -+ return pci_mmcfg_read(domain, bus, devfn, reg, len, val); ++ return pci_ecam_read(domain, bus, devfn, reg, len, val); +} + +int __weak raw_pci_write(unsigned int domain, unsigned int bus, + unsigned int devfn, int reg, int len, u32 val) +{ -+ return pci_mmcfg_write(domain, bus, devfn, reg, len, val); ++ return pci_ecam_write(domain, bus, devfn, reg, len, val); +} + -+int __weak pci_mmcfg_read(unsigned int seg, unsigned int bus, -+ unsigned int devfn, int reg, int len, u32 *value) -+{ -+ struct pci_mmcfg_region *cfg; -+ char __iomem *addr; -+ -+ /* Why do we have this when nobody checks it. How about a BUG()!? -AK */ -+ if (unlikely((bus > 255) || (devfn > 255) || (reg > 4095))) { -+err: *value = -1; -+ return -EINVAL; -+ } -+ -+ rcu_read_lock(); -+ cfg = pci_mmconfig_lookup(seg, bus); -+ if (!cfg || !cfg->virt) { -+ rcu_read_unlock(); -+ goto err; -+ } -+ if (cfg->read) -+ (*cfg->read)(cfg, bus, devfn, reg, len, value); -+ else { -+ addr = cfg->virt + (PCI_MMCFG_BUS_OFFSET(bus) | (devfn << 12)); -+ -+ switch (len) { -+ case 1: -+ *value = mmio_config_readb(addr + reg); -+ break; -+ case 2: -+ *value = mmio_config_readw(addr + reg); -+ break; -+ case 4: -+ *value = mmio_config_readl(addr + reg); -+ break; -+ } -+ } -+ rcu_read_unlock(); -+ -+ return 0; -+} -+ -+int __weak pci_mmcfg_write(unsigned int seg, unsigned int bus, -+ unsigned int devfn, int reg, int len, u32 value) -+{ -+ struct pci_mmcfg_region *cfg; -+ char __iomem *addr; -+ -+ /* Why do we have this when nobody checks it. How about a BUG()!? -AK */ -+ if (unlikely((bus > 255) || (devfn > 255) || (reg > 4095))) -+ return -EINVAL; -+ -+ rcu_read_lock(); -+ cfg = pci_mmconfig_lookup(seg, bus); -+ if (!cfg || !cfg->virt) { -+ rcu_read_unlock(); -+ return -EINVAL; -+ } -+ if (cfg->write) -+ (*cfg->write)(cfg, bus, devfn, reg, len, value); -+ else { -+ addr = cfg->virt + (PCI_MMCFG_BUS_OFFSET(bus) | (devfn << 12)); -+ -+ switch (len) { -+ case 1: -+ mmio_config_writeb(addr + reg, value); -+ break; -+ case 2: -+ mmio_config_writew(addr + reg, value); -+ break; -+ case 4: -+ mmio_config_writel(addr + reg, value); -+ break; -+ } -+ } -+ rcu_read_unlock(); -+ -+ return 0; -+} -+ -+static void __iomem *mcfg_ioremap(struct pci_mmcfg_region *cfg) -+{ -+ void __iomem *addr; -+ u64 start, size; -+ int num_buses; -+ -+ start = cfg->address + PCI_MMCFG_BUS_OFFSET(cfg->start_bus); -+ num_buses = cfg->end_bus - cfg->start_bus + 1; -+ size = PCI_MMCFG_BUS_OFFSET(num_buses); -+ addr = ioremap_nocache(start, size); -+ if (addr) -+ addr -= PCI_MMCFG_BUS_OFFSET(cfg->start_bus); -+ return addr; -+} -+ -+int __init __weak pci_mmcfg_arch_init(void) -+{ -+ struct pci_mmcfg_region *cfg; -+ -+ list_for_each_entry(cfg, &pci_mmcfg_list, list) -+ if (pci_mmcfg_arch_map(cfg)) { -+ pci_mmcfg_arch_free(); -+ return 0; -+ } -+ -+ return 1; -+} -+ -+void __init __weak pci_mmcfg_arch_free(void) -+{ -+ struct pci_mmcfg_region *cfg; -+ -+ list_for_each_entry(cfg, &pci_mmcfg_list, list) -+ pci_mmcfg_arch_unmap(cfg); -+} -+ -+int __weak pci_mmcfg_arch_map(struct pci_mmcfg_region *cfg) -+{ -+ cfg->virt = mcfg_ioremap(cfg); -+ if (!cfg->virt) { -+ pr_err(PREFIX "can't map MMCONFIG at %pR\n", &cfg->res); -+ return -ENOMEM; -+ } -+ -+ return 0; -+} -+ -+void __weak pci_mmcfg_arch_unmap(struct pci_mmcfg_region *cfg) -+{ -+ if (cfg && cfg->virt) { -+ iounmap(cfg->virt + PCI_MMCFG_BUS_OFFSET(cfg->start_bus)); -+ cfg->virt = NULL; -+ } -+} -+ -+static void __init pci_mmconfig_remove(struct pci_mmcfg_region *cfg) -+{ -+ if (cfg->res.parent) -+ release_resource(&cfg->res); -+ list_del(&cfg->list); -+ kfree(cfg); -+} -+ -+void __init free_all_mmcfg(void) -+{ -+ struct pci_mmcfg_region *cfg, *tmp; -+ -+ pci_mmcfg_arch_free(); -+ list_for_each_entry_safe(cfg, tmp, &pci_mmcfg_list, list) -+ pci_mmconfig_remove(cfg); -+} -+ -+void list_add_sorted(struct pci_mmcfg_region *new) -+{ -+ struct pci_mmcfg_region *cfg; -+ -+ /* keep list sorted by segment and starting bus number */ -+ list_for_each_entry_rcu(cfg, &pci_mmcfg_list, list) { -+ if (cfg->segment > new->segment || -+ (cfg->segment == new->segment && -+ cfg->start_bus >= new->start_bus)) { -+ list_add_tail_rcu(&new->list, &cfg->list); -+ return; -+ } -+ } -+ list_add_tail_rcu(&new->list, &pci_mmcfg_list); -+} -+ -+struct pci_mmcfg_region *pci_mmconfig_alloc(int segment, int start, -+ int end, u64 addr) -+{ -+ struct pci_mmcfg_region *new; -+ struct resource *res; -+ -+ if (addr == 0) -+ return NULL; -+ -+ new = kzalloc(sizeof(*new), GFP_KERNEL); -+ if (!new) -+ return NULL; -+ -+ new->address = addr; -+ new->segment = segment; -+ new->start_bus = start; -+ new->end_bus = end; -+ -+ res = &new->res; -+ res->start = addr + PCI_MMCFG_BUS_OFFSET(start); -+ res->end = addr + PCI_MMCFG_BUS_OFFSET(end + 1) - 1; -+ res->flags = IORESOURCE_MEM | IORESOURCE_BUSY; -+ snprintf(new->name, PCI_MMCFG_RESOURCE_NAME_LEN, -+ "PCI MMCONFIG %04x [bus %02x-%02x]", segment, start, end); -+ res->name = new->name; -+ -+ return new; -+} -+ -+struct pci_mmcfg_region *pci_mmconfig_add(int segment, int start, -+ int end, u64 addr) -+{ -+ struct pci_mmcfg_region *new; -+ -+ new = pci_mmconfig_alloc(segment, start, end, addr); -+ if (new) { -+ mutex_lock(&pci_mmcfg_lock); -+ list_add_sorted(new); -+ mutex_unlock(&pci_mmcfg_lock); -+ -+ pr_info(PREFIX -+ "MMCONFIG for domain %04x [bus %02x-%02x] at %pR " -+ "(base %#lx)\n", -+ segment, start, end, &new->res, (unsigned long)addr); -+ } -+ -+ return new; -+} -+ -+int __init pci_mmconfig_inject(struct pci_mmcfg_region *cfg) -+{ -+ struct pci_mmcfg_region *cfg_conflict; -+ int err = 0; -+ -+ mutex_lock(&pci_mmcfg_lock); -+ cfg_conflict = pci_mmconfig_lookup(cfg->segment, cfg->start_bus); -+ if (cfg_conflict) { -+ if (cfg_conflict->end_bus < cfg->end_bus) -+ pr_info(FW_INFO "MMCONFIG for " -+ "domain %04x [bus %02x-%02x] " -+ "only partially covers this bridge\n", -+ cfg_conflict->segment, cfg_conflict->start_bus, -+ cfg_conflict->end_bus); -+ err = -EEXIST; -+ goto out; -+ } -+ -+ if (pci_mmcfg_arch_map(cfg)) { -+ pr_warn("fail to map MMCONFIG %pR.\n", &cfg->res); -+ err = -ENOMEM; -+ goto out; -+ } else { -+ list_add_sorted(cfg); -+ pr_info("MMCONFIG at %pR (base %#lx)\n", -+ &cfg->res, (unsigned long)cfg->address); -+ -+ } -+out: -+ mutex_unlock(&pci_mmcfg_lock); -+ return err; -+} -+ -+struct pci_mmcfg_region *pci_mmconfig_lookup(int segment, int bus) -+{ -+ struct pci_mmcfg_region *cfg; -+ -+ list_for_each_entry_rcu(cfg, &pci_mmcfg_list, list) -+ if (cfg->segment == segment && -+ cfg->start_bus <= bus && bus <= cfg->end_bus) -+ return cfg; -+ -+ return NULL; -+} -+ -+int __init __weak acpi_mcfg_check_entry(struct acpi_table_mcfg *mcfg, ++static int __init acpi_mcfg_check_entry(struct acpi_table_mcfg *mcfg, + struct acpi_mcfg_allocation *cfg) +{ -+ return 0; ++ int year; ++ ++ if (IS_ENABLED(CONFIG_ARM64)) ++ return 0; ++ ++ if (cfg->address < 0xFFFFFFFF) ++ return 0; ++ ++ if (!strncmp(mcfg->header.oem_id, "SGI", 3)) ++ return 0; ++ ++ if (mcfg->header.revision >= 1) { ++ if (dmi_get_date(DMI_BIOS_DATE, &year, NULL, NULL) && ++ year >= 2010) ++ return 0; ++ } ++ ++ pr_err(PREFIX "MCFG region for %04x [bus %02x-%02x] at %#llx " ++ "is above 4GB, ignored\n", cfg->pci_segment, ++ cfg->start_bus_number, cfg->end_bus_number, cfg->address); ++ return -EINVAL; +} + -+int __init pci_parse_mcfg(struct acpi_table_header *header) ++int __init acpi_parse_mcfg(struct acpi_table_header *header) +{ + struct acpi_table_mcfg *mcfg; + struct acpi_mcfg_allocation *cfg_table, *cfg; + struct acpi_mcfg_fixup *fixup; -+ struct pci_mmcfg_region *new; ++ struct pci_ecam_region *new; + unsigned long i; + int entries; + @@ -5679,7 +5314,7 @@ index 0000000..b13a9e4 + mcfg = (struct acpi_table_mcfg *)header; + + /* how many config structures do we have */ -+ free_all_mmcfg(); ++ pci_ecam_free_all(); + entries = 0; + i = header->length - sizeof(struct acpi_table_mcfg); + while (i >= sizeof(struct acpi_mcfg_allocation)) { @@ -5687,7 +5322,7 @@ index 0000000..b13a9e4 + i -= sizeof(struct acpi_mcfg_allocation); + } + if (entries == 0) { -+ pr_err(PREFIX "MMCONFIG has no entries\n"); ++ pr_err(PREFIX "MCFG table has no entries\n"); + return -ENODEV; + } + @@ -5703,15 +5338,15 @@ index 0000000..b13a9e4 + for (i = 0; i < entries; i++) { + cfg = &cfg_table[i]; + if (acpi_mcfg_check_entry(mcfg, cfg)) { -+ free_all_mmcfg(); ++ pci_ecam_free_all(); + return -ENODEV; + } + -+ new = pci_mmconfig_add(cfg->pci_segment, cfg->start_bus_number, -+ cfg->end_bus_number, cfg->address); ++ new = pci_ecam_add(cfg->pci_segment, cfg->start_bus_number, ++ cfg->end_bus_number, cfg->address); + if (!new) { + pr_warn(PREFIX "no memory for MCFG entries\n"); -+ free_all_mmcfg(); ++ pci_ecam_free_all(); + return -ENOMEM; + } + if (fixup < __end_acpi_mcfg_fixups) @@ -5721,29 +5356,6 @@ index 0000000..b13a9e4 + return 0; +} + -+/* Delete MMCFG information for host bridges */ -+int pci_mmconfig_delete(u16 seg, u8 start, u8 end) -+{ -+ struct pci_mmcfg_region *cfg; -+ -+ mutex_lock(&pci_mmcfg_lock); -+ list_for_each_entry_rcu(cfg, &pci_mmcfg_list, list) -+ if (cfg->segment == seg && cfg->start_bus == start && -+ cfg->end_bus == end) { -+ list_del_rcu(&cfg->list); -+ synchronize_rcu(); -+ pci_mmcfg_arch_unmap(cfg); -+ if (cfg->res.parent) -+ release_resource(&cfg->res); -+ mutex_unlock(&pci_mmcfg_lock); -+ kfree(cfg); -+ return 0; -+ } -+ mutex_unlock(&pci_mmcfg_lock); -+ -+ return -ENOENT; -+} -+ +void __init __weak pci_mmcfg_early_init(void) +{ + @@ -5751,17 +5363,16 @@ index 0000000..b13a9e4 + +void __init __weak pci_mmcfg_late_init(void) +{ -+ struct pci_mmcfg_region *cfg; ++ struct pci_ecam_region *cfg; + -+ acpi_table_parse(ACPI_SIG_MCFG, pci_parse_mcfg); ++ acpi_table_parse(ACPI_SIG_MCFG, acpi_parse_mcfg); + -+ if (list_empty(&pci_mmcfg_list)) ++ if (list_empty(&pci_ecam_list)) + return; ++ if (!pci_ecam_arch_init()) ++ pci_ecam_free_all(); + -+ if (!pci_mmcfg_arch_init()) -+ free_all_mmcfg(); -+ -+ list_for_each_entry(cfg, &pci_mmcfg_list, list) ++ list_for_each_entry(cfg, &pci_ecam_list, list) + insert_resource(&iomem_resource, &cfg->res); +} diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c @@ -5784,19 +5395,46 @@ index f9eeae8..39748bb 100644 static void __iomem *acpi_map(acpi_physical_address pg_off, unsigned long pg_sz) diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c -index 7962651..b289cb4 100644 +index 7962651..b1ec78b 100644 --- a/drivers/acpi/processor_core.c +++ b/drivers/acpi/processor_core.c -@@ -83,6 +83,38 @@ static int map_lsapic_id(struct acpi_subtable_header *entry, +@@ -32,7 +32,7 @@ static struct acpi_table_madt *get_madt_table(void) + } + + static int map_lapic_id(struct acpi_subtable_header *entry, +- u32 acpi_id, int *apic_id) ++ u32 acpi_id, phys_cpuid_t *apic_id) + { + struct acpi_madt_local_apic *lapic = + container_of(entry, struct acpi_madt_local_apic, header); +@@ -48,7 +48,7 @@ static int map_lapic_id(struct acpi_subtable_header *entry, + } + + static int map_x2apic_id(struct acpi_subtable_header *entry, +- int device_declaration, u32 acpi_id, int *apic_id) ++ int device_declaration, u32 acpi_id, phys_cpuid_t *apic_id) + { + struct acpi_madt_local_x2apic *apic = + container_of(entry, struct acpi_madt_local_x2apic, header); +@@ -65,7 +65,7 @@ static int map_x2apic_id(struct acpi_subtable_header *entry, + } + + static int map_lsapic_id(struct acpi_subtable_header *entry, +- int device_declaration, u32 acpi_id, int *apic_id) ++ int device_declaration, u32 acpi_id, phys_cpuid_t *apic_id) + { + struct acpi_madt_local_sapic *lsapic = + container_of(entry, struct acpi_madt_local_sapic, header); +@@ -83,10 +83,35 @@ static int map_lsapic_id(struct acpi_subtable_header *entry, return 0; } +-static int map_madt_entry(int type, u32 acpi_id) +/* -+ * On ARM platform, MPIDR value is the hardware ID as apic ID -+ * on Intel platforms ++ * Retrieve the ARM CPU physical identifier (MPIDR) + */ +static int map_gicc_mpidr(struct acpi_subtable_header *entry, -+ int device_declaration, u32 acpi_id, int *mpidr) ++ int device_declaration, u32 acpi_id, phys_cpuid_t *mpidr) +{ + struct acpi_madt_generic_interrupt *gicc = + container_of(entry, struct acpi_madt_generic_interrupt, header); @@ -5804,29 +5442,28 @@ index 7962651..b289cb4 100644 + if (!(gicc->flags & ACPI_MADT_ENABLED)) + return -ENODEV; + -+ /* In the GIC interrupt model, logical processors are -+ * required to have a Processor Device object in the DSDT, -+ * so we should check device_declaration here ++ /* device_declaration means Device object in DSDT, in the ++ * GIC interrupt model, logical processors are required to ++ * have a Processor Device object in the DSDT, so we should ++ * check device_declaration here + */ + if (device_declaration && (gicc->uid == acpi_id)) { -+ /* -+ * bits other than [0:7] Aff0, [8:15] Aff1, [16:23] Aff2 and -+ * [32:39] Aff3 must be 0 which is defined in ACPI 5.1, so pack -+ * the Affx fields into a single 32 bit identifier to accommodate -+ * the acpi processor drivers. -+ */ -+ *mpidr = ((gicc->arm_mpidr & 0xff00000000) >> 8) -+ | gicc->arm_mpidr; ++ *mpidr = gicc->arm_mpidr; + return 0; + } + + return -EINVAL; +} + - static int map_madt_entry(int type, u32 acpi_id) ++static phys_cpuid_t map_madt_entry(int type, u32 acpi_id) { unsigned long madt_end, entry; -@@ -111,6 +143,9 @@ static int map_madt_entry(int type, u32 acpi_id) +- int phys_id = -1; /* CPU hardware ID */ ++ phys_cpuid_t phys_id = PHYS_CPUID_INVALID; /* CPU hardware ID */ + struct acpi_table_madt *madt; + + madt = get_madt_table(); +@@ -111,18 +136,21 @@ static int map_madt_entry(int type, u32 acpi_id) } else if (header->type == ACPI_MADT_TYPE_LOCAL_SAPIC) { if (!map_lsapic_id(header, type, acpi_id, &phys_id)) break; @@ -5836,7 +5473,21 @@ index 7962651..b289cb4 100644 } entry += header->length; } -@@ -143,6 +178,8 @@ static int map_mat_entry(acpi_handle handle, int type, u32 acpi_id) + return phys_id; + } + +-static int map_mat_entry(acpi_handle handle, int type, u32 acpi_id) ++static phys_cpuid_t map_mat_entry(acpi_handle handle, int type, u32 acpi_id) + { + struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; + union acpi_object *obj; + struct acpi_subtable_header *header; +- int phys_id = -1; ++ phys_cpuid_t phys_id = PHYS_CPUID_INVALID; + + if (ACPI_FAILURE(acpi_evaluate_object(handle, "_MAT", NULL, &buffer))) + goto exit; +@@ -143,33 +171,35 @@ static int map_mat_entry(acpi_handle handle, int type, u32 acpi_id) map_lsapic_id(header, type, acpi_id, &phys_id); else if (header->type == ACPI_MADT_TYPE_LOCAL_X2APIC) map_x2apic_id(header, type, acpi_id, &phys_id); @@ -5845,135 +5496,170 @@ index 7962651..b289cb4 100644 exit: kfree(buffer.pointer); + return phys_id; + } + +-int acpi_get_phys_id(acpi_handle handle, int type, u32 acpi_id) ++phys_cpuid_t acpi_get_phys_id(acpi_handle handle, int type, u32 acpi_id) + { +- int phys_id; ++ phys_cpuid_t phys_id; + + phys_id = map_mat_entry(handle, type, acpi_id); +- if (phys_id == -1) ++ if (phys_id == PHYS_CPUID_INVALID) + phys_id = map_madt_entry(type, acpi_id); + + return phys_id; + } + +-int acpi_map_cpuid(int phys_id, u32 acpi_id) ++int acpi_map_cpuid(phys_cpuid_t phys_id, u32 acpi_id) + { + #ifdef CONFIG_SMP + int i; + #endif + +- if (phys_id == -1) { ++ if (phys_id == PHYS_CPUID_INVALID) { + /* + * On UP processor, there is no _MAT or MADT table. +- * So above phys_id is always set to -1. ++ * So above phys_id is always set to PHYS_CPUID_INVALID. + * + * BIOS may define multiple CPU handles even for UP processor. + * For example, +@@ -190,7 +220,7 @@ int acpi_map_cpuid(int phys_id, u32 acpi_id) + if (nr_cpu_ids <= 1 && acpi_id == 0) + return acpi_id; + else +- return phys_id; ++ return -1; + } + + #ifdef CONFIG_SMP +@@ -208,7 +238,7 @@ int acpi_map_cpuid(int phys_id, u32 acpi_id) + + int acpi_get_cpuid(acpi_handle handle, int type, u32 acpi_id) + { +- int phys_id; ++ phys_cpuid_t phys_id; + + phys_id = acpi_get_phys_id(handle, type, acpi_id); + diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c -index bbca783..1b9cca3 100644 +index bbca783..f6ecbd1 100644 --- a/drivers/acpi/scan.c +++ b/drivers/acpi/scan.c -@@ -987,13 +987,82 @@ static bool acpi_of_driver_match_device(struct device *dev, - bool acpi_driver_match_device(struct device *dev, - const struct device_driver *drv) - { -- if (!drv->acpi_match_table) -- return acpi_of_driver_match_device(dev, drv); -+ bool ret = false; +@@ -907,10 +907,19 @@ static const struct acpi_device_id *__acpi_match_device( + if (!device->status.present) + return NULL; -- return !!acpi_match_device(drv->acpi_match_table, dev); -+ if (drv->acpi_match_table) -+ ret = !!acpi_match_device(drv->acpi_match_table, dev); +- for (id = ids; id->id[0]; id++) +- list_for_each_entry(hwid, &device->pnp.ids, list) +- if (!strcmp((char *) id->id, hwid->id)) ++ for (id = ids; id->id[0] || id->cls; id++) { ++ list_for_each_entry(hwid, &device->pnp.ids, list) { ++ if (id->id[0] && !strcmp((char *) id->id, hwid->id)) { + return id; ++ } else if (id->cls) { ++ char buf[7]; + -+ /* Next, try to match with special "PRP0001" _HID */ -+ if (!ret && drv->of_match_table) -+ ret = acpi_of_driver_match_device(dev, drv); -+ -+ /* Next, try to match with PCI-defined class-code */ -+ if (!ret && drv->acpi_match_cls) -+ ret = acpi_match_device_cls(drv->acpi_match_cls, dev); -+ -+ return ret; - } - EXPORT_SYMBOL_GPL(acpi_driver_match_device); - -+/** -+ * acpi_match_device_cls - Match a struct device against a ACPI _CLS method -+ * @dev_cls: A pointer to struct acpi_device_cls object to match against. -+ * @dev: The ACPI device structure to match. -+ * -+ * Check if @dev has a valid ACPI and _CLS handle. If there is a -+ * struct acpi_device_cls object for that handle, use that object to match -+ * against the given struct acpi_device_cls object. -+ * -+ * Return true on success or false on failure. -+ */ -+bool acpi_match_device_cls(const struct acpi_device_cls *dev_cls, -+ const struct device *dev) -+{ -+ acpi_status status; -+ union acpi_object *pkg; -+ struct acpi_device_cls cls; -+ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; -+ struct acpi_buffer format = { sizeof("NNN"), "NNN" }; -+ struct acpi_buffer state = { 0, NULL }; -+ struct acpi_device *adev = ACPI_COMPANION(dev); -+ -+ if (!adev || !adev->status.present || !dev_cls) -+ return false; -+ -+ status = acpi_evaluate_object(adev->handle, METHOD_NAME__CLS, -+ NULL, &buffer); -+ if (ACPI_FAILURE(status)) -+ return false; -+ -+ /** -+ * Note: -+ * ACPIv5.1 defines the package to contain 3 integers for -+ * Base-Class code, Sub-Class code, and Programming Interface code. -+ */ -+ pkg = buffer.pointer; -+ if (!pkg || -+ (pkg->type != ACPI_TYPE_PACKAGE) || -+ (pkg->package.count != 3)) { -+ dev_dbg(&adev->dev, "Invalid _CLS data\n"); -+ goto out; ++ sprintf(buf, "%06x", id->cls); ++ if (!strcmp(buf, hwid->id)) ++ return id; ++ } ++ } + } -+ -+ state.length = sizeof(struct acpi_device_cls); -+ state.pointer = &cls; -+ -+ status = acpi_extract_package(pkg, &format, &state); -+ if (ACPI_FAILURE(status)) -+ goto out; -+ -+ return (dev_cls->base_class == cls.base_class && -+ dev_cls->sub_class == cls.sub_class && -+ dev_cls->prog_interface == cls.prog_interface); -+out: -+ kfree(pkg); -+ return false; -+} -+EXPORT_SYMBOL_GPL(acpi_match_device_cls); -+ - static void acpi_free_power_resources_lists(struct acpi_device *device) - { - int i; -diff --git a/drivers/acpi/sleep_arm.c b/drivers/acpi/sleep_arm.c -new file mode 100644 -index 0000000..54578ef ---- /dev/null -+++ b/drivers/acpi/sleep_arm.c -@@ -0,0 +1,28 @@ -+/* -+ * ARM64 Specific Sleep Functionality -+ * -+ * Copyright (C) 2013-2014, Linaro Ltd. -+ * Author: Graeme Gregory -+ * -+ * This program is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License version 2 as -+ * published by the Free Software Foundation. -+ */ -+ -+#include -+ -+/* -+ * Currently the ACPI 5.1 standard does not define S states in a -+ * manner which is usable for ARM64. These two stubs are sufficient -+ * that system initialises and device PM works. -+ */ -+u32 acpi_target_system_state(void) -+{ -+ return ACPI_STATE_S0; -+} -+EXPORT_SYMBOL_GPL(acpi_target_system_state); -+ -+int __init acpi_sleep_init(void) -+{ -+ return -ENOSYS; -+} + + return NULL; + } +@@ -1974,6 +1983,8 @@ static void acpi_set_pnp_ids(acpi_handle handle, struct acpi_device_pnp *pnp, + if (info->valid & ACPI_VALID_UID) + pnp->unique_id = kstrdup(info->unique_id.string, + GFP_KERNEL); ++ if (info->valid & ACPI_VALID_CLS) ++ acpi_add_id(pnp, info->cls.string); + + kfree(info); + diff --git a/drivers/acpi/tables.c b/drivers/acpi/tables.c -index 93b8152..42d314f 100644 +index 93b8152..2e19189 100644 --- a/drivers/acpi/tables.c +++ b/drivers/acpi/tables.c -@@ -183,6 +183,49 @@ void acpi_table_print_madt_entry(struct acpi_subtable_header *header) +@@ -23,6 +23,8 @@ + * + */ + ++/* Uncomment next line to get verbose printout */ ++/* #define DEBUG */ + #define pr_fmt(fmt) "ACPI: " fmt + + #include +@@ -61,9 +63,9 @@ void acpi_table_print_madt_entry(struct acpi_subtable_header *header) + { + struct acpi_madt_local_apic *p = + (struct acpi_madt_local_apic *)header; +- pr_info("LAPIC (acpi_id[0x%02x] lapic_id[0x%02x] %s)\n", +- p->processor_id, p->id, +- (p->lapic_flags & ACPI_MADT_ENABLED) ? "enabled" : "disabled"); ++ pr_debug("LAPIC (acpi_id[0x%02x] lapic_id[0x%02x] %s)\n", ++ p->processor_id, p->id, ++ (p->lapic_flags & ACPI_MADT_ENABLED) ? "enabled" : "disabled"); + } + break; + +@@ -71,9 +73,9 @@ void acpi_table_print_madt_entry(struct acpi_subtable_header *header) + { + struct acpi_madt_local_x2apic *p = + (struct acpi_madt_local_x2apic *)header; +- pr_info("X2APIC (apic_id[0x%02x] uid[0x%02x] %s)\n", +- p->local_apic_id, p->uid, +- (p->lapic_flags & ACPI_MADT_ENABLED) ? "enabled" : "disabled"); ++ pr_debug("X2APIC (apic_id[0x%02x] uid[0x%02x] %s)\n", ++ p->local_apic_id, p->uid, ++ (p->lapic_flags & ACPI_MADT_ENABLED) ? "enabled" : "disabled"); + } + break; + +@@ -81,8 +83,8 @@ void acpi_table_print_madt_entry(struct acpi_subtable_header *header) + { + struct acpi_madt_io_apic *p = + (struct acpi_madt_io_apic *)header; +- pr_info("IOAPIC (id[0x%02x] address[0x%08x] gsi_base[%d])\n", +- p->id, p->address, p->global_irq_base); ++ pr_debug("IOAPIC (id[0x%02x] address[0x%08x] gsi_base[%d])\n", ++ p->id, p->address, p->global_irq_base); + } + break; + +@@ -155,9 +157,9 @@ void acpi_table_print_madt_entry(struct acpi_subtable_header *header) + { + struct acpi_madt_io_sapic *p = + (struct acpi_madt_io_sapic *)header; +- pr_info("IOSAPIC (id[0x%x] address[%p] gsi_base[%d])\n", +- p->id, (void *)(unsigned long)p->address, +- p->global_irq_base); ++ pr_debug("IOSAPIC (id[0x%x] address[%p] gsi_base[%d])\n", ++ p->id, (void *)(unsigned long)p->address, ++ p->global_irq_base); + } + break; + +@@ -165,9 +167,9 @@ void acpi_table_print_madt_entry(struct acpi_subtable_header *header) + { + struct acpi_madt_local_sapic *p = + (struct acpi_madt_local_sapic *)header; +- pr_info("LSAPIC (acpi_id[0x%02x] lsapic_id[0x%02x] lsapic_eid[0x%02x] %s)\n", +- p->processor_id, p->id, p->eid, +- (p->lapic_flags & ACPI_MADT_ENABLED) ? "enabled" : "disabled"); ++ pr_debug("LSAPIC (acpi_id[0x%02x] lsapic_id[0x%02x] lsapic_eid[0x%02x] %s)\n", ++ p->processor_id, p->id, p->eid, ++ (p->lapic_flags & ACPI_MADT_ENABLED) ? "enabled" : "disabled"); + } + break; + +@@ -183,6 +185,28 @@ void acpi_table_print_madt_entry(struct acpi_subtable_header *header) } break; @@ -5981,10 +5667,10 @@ index 93b8152..42d314f 100644 + { + struct acpi_madt_generic_interrupt *p = + (struct acpi_madt_generic_interrupt *)header; -+ pr_info("GICC (acpi_id[0x%04x] address[%p] MPIDR[0x%llx] %s)\n", -+ p->uid, (void *)(unsigned long)p->base_address, -+ p->arm_mpidr, -+ (p->flags & ACPI_MADT_ENABLED) ? "enabled" : "disabled"); ++ pr_debug("GICC (acpi_id[0x%04x] address[%llx] MPIDR[0x%llx] %s)\n", ++ p->uid, p->base_address, ++ p->arm_mpidr, ++ (p->flags & ACPI_MADT_ENABLED) ? "enabled" : "disabled"); + + } + break; @@ -5993,30 +5679,9 @@ index 93b8152..42d314f 100644 + { + struct acpi_madt_generic_distributor *p = + (struct acpi_madt_generic_distributor *)header; -+ pr_info("GIC Distributor (gic_id[0x%04x] address[%p] gsi_base[%d])\n", -+ p->gic_id, -+ (void *)(unsigned long)p->base_address, -+ p->global_irq_base); -+ } -+ break; -+ -+ case ACPI_MADT_TYPE_GENERIC_MSI_FRAME: -+ { -+ struct acpi_madt_generic_msi_frame *p = -+ (struct acpi_madt_generic_msi_frame *)header; -+ pr_info("GIC MSI Frame (msi_fame_id[%d] address[%p])\n", -+ p->msi_frame_id, -+ (void *)(unsigned long)p->base_address); -+ } -+ break; -+ -+ case ACPI_MADT_TYPE_GENERIC_REDISTRIBUTOR: -+ { -+ struct acpi_madt_generic_redistributor *p = -+ (struct acpi_madt_generic_redistributor *)header; -+ pr_info("GIC Redistributor (address[%p] region_size[0x%x])\n", -+ (void *)(unsigned long)p->base_address, -+ p->length); ++ pr_debug("GIC Distributor (gic_id[0x%04x] address[%llx] gsi_base[%d])\n", ++ p->gic_id, p->base_address, ++ p->global_irq_base); + } + break; + @@ -6071,22 +5736,27 @@ index 5f60155..50305e3 100644 help This option adds support for ATA-related ACPI objects. diff --git a/drivers/ata/ahci_platform.c b/drivers/ata/ahci_platform.c -index 78d6ae0..d110c95 100644 +index 78d6ae0..842cd13 100644 --- a/drivers/ata/ahci_platform.c +++ b/drivers/ata/ahci_platform.c -@@ -78,12 +78,24 @@ static const struct of_device_id ahci_of_match[] = { +@@ -20,6 +20,8 @@ + #include + #include + #include ++#include ++#include + #include "ahci.h" + + #define DRV_NAME "ahci" +@@ -78,12 +80,19 @@ static const struct of_device_id ahci_of_match[] = { }; MODULE_DEVICE_TABLE(of, ahci_of_match); -+#ifdef CONFIG_ATA_ACPI +static const struct acpi_device_id ahci_acpi_match[] = { -+ { "AMDI0600", 0 }, /* AMD Seattle AHCI */ -+ { }, ++ { "", 0, PCI_CLASS_STORAGE_SATA_AHCI }, ++ {}, +}; +MODULE_DEVICE_TABLE(acpi, ahci_acpi_match); -+#endif -+ -+static const struct acpi_device_cls ahci_cls = {0x01, 0x06, 0x01}; + static struct platform_driver ahci_driver = { .probe = ahci_probe, @@ -6094,8 +5764,7 @@ index 78d6ae0..d110c95 100644 .driver = { .name = DRV_NAME, .of_match_table = ahci_of_match, -+ .acpi_match_cls = &ahci_cls, -+ .acpi_match_table = ACPI_PTR(ahci_acpi_match), ++ .acpi_match_table = ahci_acpi_match, .pm = &ahci_pm_ops, }, }; @@ -6354,6 +6023,2179 @@ index a3025e7..3b2e2d0 100644 + acpi_table_parse(ACPI_SIG_GTDT, arch_timer_acpi_init); +} +#endif +diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig +index cb59619..6289dd9 100644 +--- a/drivers/edac/Kconfig ++++ b/drivers/edac/Kconfig +@@ -10,7 +10,7 @@ config EDAC_SUPPORT + menuconfig EDAC + bool "EDAC (Error Detection And Correction) reporting" + depends on HAS_IOMEM +- depends on X86 || PPC || TILE || ARM || EDAC_SUPPORT ++ depends on X86 || PPC || TILE || ARM || ARM64 || EDAC_SUPPORT + help + EDAC is designed to report errors in the core system. + These are low-level errors that are reported in the CPU or +@@ -392,4 +392,11 @@ config EDAC_SYNOPSYS + Support for error detection and correction on the Synopsys DDR + memory controller. + ++config EDAC_XGENE ++ tristate "APM X-Gene SoC" ++ depends on EDAC_MM_EDAC && ARM64 ++ help ++ Support for error detection and correction on the ++ APM X-Gene family of SOCs. ++ + endif # EDAC +diff --git a/drivers/edac/Makefile b/drivers/edac/Makefile +index b255f36..4c2b0bd 100644 +--- a/drivers/edac/Makefile ++++ b/drivers/edac/Makefile +@@ -68,3 +68,5 @@ obj-$(CONFIG_EDAC_OCTEON_PCI) += octeon_edac-pci.o + + obj-$(CONFIG_EDAC_ALTERA_MC) += altera_edac.o + obj-$(CONFIG_EDAC_SYNOPSYS) += synopsys_edac.o ++ ++obj-$(CONFIG_EDAC_XGENE) += xgene_edac.o +diff --git a/drivers/edac/xgene_edac.c b/drivers/edac/xgene_edac.c +new file mode 100644 +index 0000000..027e5d9 +--- /dev/null ++++ b/drivers/edac/xgene_edac.c +@@ -0,0 +1,2132 @@ ++/* ++ * APM X-Gene SoC EDAC (error detection and correction) Module ++ * ++ * Copyright (c) 2014, Applied Micro Circuits Corporation ++ * Author: Feng Kan ++ * Loc Ho ++ * ++ * This program is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License as published by the ++ * Free Software Foundation; either version 2 of the License, or (at your ++ * option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program. If not, see . ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include "edac_core.h" ++ ++#define EDAC_MOD_STR "xgene_edac" ++ ++static int edac_mc_idx; ++static int edac_mc_active_mask; ++static int edac_mc_registered_mask; ++static DEFINE_MUTEX(xgene_edac_lock); ++ ++/* Global error configuration status registers (CSR) */ ++#define PCPHPERRINTSTS 0x0000 ++#define PCPHPERRINTMSK 0x0004 ++#define MCU_CTL_ERR_MASK BIT(12) ++#define IOB_PA_ERR_MASK BIT(11) ++#define IOB_BA_ERR_MASK BIT(10) ++#define IOB_XGIC_ERR_MASK BIT(9) ++#define IOB_RB_ERR_MASK BIT(8) ++#define L3C_UNCORR_ERR_MASK BIT(5) ++#define MCU_UNCORR_ERR_MASK BIT(4) ++#define PMD3_MERR_MASK BIT(3) ++#define PMD2_MERR_MASK BIT(2) ++#define PMD1_MERR_MASK BIT(1) ++#define PMD0_MERR_MASK BIT(0) ++#define PCPLPERRINTSTS 0x0008 ++#define PCPLPERRINTMSK 0x000C ++#define CSW_SWITCH_TRACE_ERR_MASK BIT(2) ++#define L3C_CORR_ERR_MASK BIT(1) ++#define MCU_CORR_ERR_MASK BIT(0) ++#define MEMERRINTSTS 0x0010 ++#define MEMERRINTMSK 0x0014 ++ ++/* Memory controller error CSR */ ++#define MCU_MAX_RANK 8 ++#define MCU_RANK_STRIDE 0x40 ++ ++#define MCUGECR 0x0110 ++#define MCU_GECR_DEMANDUCINTREN_MASK BIT(0) ++#define MCU_GECR_BACKUCINTREN_MASK BIT(1) ++#define MCU_GECR_CINTREN_MASK BIT(2) ++#define MUC_GECR_MCUADDRERREN_MASK BIT(9) ++#define MCUGESR 0x0114 ++#define MCU_GESR_ADDRNOMATCH_ERR_MASK BIT(7) ++#define MCU_GESR_ADDRMULTIMATCH_ERR_MASK BIT(6) ++#define MCU_GESR_PHYP_ERR_MASK BIT(3) ++#define MCUESRR0 0x0314 ++#define MCU_ESRR_MULTUCERR_MASK BIT(3) ++#define MCU_ESRR_BACKUCERR_MASK BIT(2) ++#define MCU_ESRR_DEMANDUCERR_MASK BIT(1) ++#define MCU_ESRR_CERR_MASK BIT(0) ++#define MCUESRRA0 0x0318 ++#define MCUEBLRR0 0x031c ++#define MCU_EBLRR_ERRBANK_RD(src) (((src) & 0x00000007) >> 0) ++#define MCUERCRR0 0x0320 ++#define MCU_ERCRR_ERRROW_RD(src) (((src) & 0xFFFF0000) >> 16) ++#define MCU_ERCRR_ERRCOL_RD(src) ((src) & 0x00000FFF) ++#define MCUSBECNT0 0x0324 ++#define MCU_SBECNT_COUNT(src) ((src) & 0xFFFF) ++ ++#define CSW_CSWCR 0x0000 ++#define CSW_CSWCR_DUALMCB_MASK BIT(0) ++ ++#define MCBADDRMR 0x0000 ++#define MCBADDRMR_MCU_INTLV_MODE_MASK BIT(3) ++#define MCBADDRMR_DUALMCU_MODE_MASK BIT(2) ++#define MCBADDRMR_MCB_INTLV_MODE_MASK BIT(1) ++#define MCBADDRMR_ADDRESS_MODE_MASK BIT(0) ++ ++struct xgene_edac_mc_ctx { ++ char *name; ++ void __iomem *pcp_csr; ++ void __iomem *csw_csr; ++ void __iomem *mcba_csr; ++ void __iomem *mcbb_csr; ++ void __iomem *mcu_csr; ++ int mcu_id; ++}; ++ ++#define to_mci(k) container_of(k, struct mem_ctl_info, dev) ++ ++#ifdef CONFIG_EDAC_DEBUG ++static ssize_t xgene_edac_mc_err_inject_write(struct file *file, ++ const char __user *data, ++ size_t count, loff_t *ppos) ++{ ++ struct mem_ctl_info *mci = file->private_data; ++ struct xgene_edac_mc_ctx *ctx = mci->pvt_info; ++ int i; ++ ++ for (i = 0; i < MCU_MAX_RANK; i++) { ++ writel(MCU_ESRR_MULTUCERR_MASK | MCU_ESRR_BACKUCERR_MASK | ++ MCU_ESRR_DEMANDUCERR_MASK | MCU_ESRR_CERR_MASK, ++ ctx->mcu_csr + MCUESRRA0 + i * MCU_RANK_STRIDE); ++ } ++ return count; ++} ++ ++static const struct file_operations xgene_edac_mc_debug_inject_fops = { ++ .open = simple_open, ++ .write = xgene_edac_mc_err_inject_write, ++ .llseek = generic_file_llseek, ++}; ++ ++static void xgene_edac_mc_create_debugfs_node(struct mem_ctl_info *mci) ++{ ++ if (!mci->debugfs) ++ return; ++ ++ debugfs_create_file("inject_ctrl", S_IWUSR, mci->debugfs, mci, ++ &xgene_edac_mc_debug_inject_fops); ++} ++#else ++static void xgene_edac_mc_create_debugfs_node(struct mem_ctl_info *mci) ++{ ++} ++#endif ++ ++static void xgene_edac_mc_check(struct mem_ctl_info *mci) ++{ ++ struct xgene_edac_mc_ctx *ctx = mci->pvt_info; ++ u32 pcp_hp_stat; ++ u32 pcp_lp_stat; ++ u32 reg; ++ u32 rank; ++ u32 bank; ++ u32 count; ++ u32 col_row; ++ ++ pcp_hp_stat = readl(ctx->pcp_csr + PCPHPERRINTSTS); ++ pcp_lp_stat = readl(ctx->pcp_csr + PCPLPERRINTSTS); ++ if (!((MCU_UNCORR_ERR_MASK & pcp_hp_stat) || ++ (MCU_CTL_ERR_MASK & pcp_hp_stat) || ++ (MCU_CORR_ERR_MASK & pcp_lp_stat))) ++ return; ++ ++ for (rank = 0; rank < MCU_MAX_RANK; rank++) { ++ reg = readl(ctx->mcu_csr + MCUESRR0 + rank * MCU_RANK_STRIDE); ++ ++ /* Detect uncorrectable memory error */ ++ if (reg & (MCU_ESRR_DEMANDUCERR_MASK | ++ MCU_ESRR_BACKUCERR_MASK)) { ++ /* Detected uncorrectable memory error */ ++ edac_mc_chipset_printk(mci, KERN_ERR, "X-Gene", ++ "MCU uncorrectable error at rank %d\n", rank); ++ ++ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, ++ 1, 0, 0, 0, 0, 0, -1, mci->ctl_name, ""); ++ } ++ ++ /* Detect correctable memory error */ ++ if (reg & MCU_ESRR_CERR_MASK) { ++ bank = readl(ctx->mcu_csr + MCUEBLRR0 + ++ rank * MCU_RANK_STRIDE); ++ col_row = readl(ctx->mcu_csr + MCUERCRR0 + ++ rank * MCU_RANK_STRIDE); ++ count = readl(ctx->mcu_csr + MCUSBECNT0 + ++ rank * MCU_RANK_STRIDE); ++ edac_mc_chipset_printk(mci, KERN_WARNING, "X-Gene", ++ "MCU correctable error at rank %d bank %d column %d row %d count %d\n", ++ rank, MCU_EBLRR_ERRBANK_RD(bank), ++ MCU_ERCRR_ERRCOL_RD(col_row), ++ MCU_ERCRR_ERRROW_RD(col_row), ++ MCU_SBECNT_COUNT(count)); ++ ++ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, ++ 1, 0, 0, 0, 0, 0, -1, mci->ctl_name, ""); ++ } ++ ++ /* Clear all error registers */ ++ writel(0x0, ctx->mcu_csr + MCUEBLRR0 + rank * MCU_RANK_STRIDE); ++ writel(0x0, ctx->mcu_csr + MCUERCRR0 + rank * MCU_RANK_STRIDE); ++ writel(0x0, ctx->mcu_csr + MCUSBECNT0 + ++ rank * MCU_RANK_STRIDE); ++ writel(reg, ctx->mcu_csr + MCUESRR0 + rank * MCU_RANK_STRIDE); ++ } ++ ++ /* Detect memory controller error */ ++ reg = readl(ctx->mcu_csr + MCUGESR); ++ if (reg) { ++ if (reg & MCU_GESR_ADDRNOMATCH_ERR_MASK) ++ edac_mc_chipset_printk(mci, KERN_WARNING, "X-Gene", ++ "MCU address miss-match error\n"); ++ if (reg & MCU_GESR_ADDRMULTIMATCH_ERR_MASK) ++ edac_mc_chipset_printk(mci, KERN_WARNING, "X-Gene", ++ "MCU address multi-match error\n"); ++ ++ writel(reg, ctx->mcu_csr + MCUGESR); ++ } ++} ++ ++static irqreturn_t xgene_edac_mc_isr(int irq, void *dev_id) ++{ ++ struct mem_ctl_info *mci = dev_id; ++ struct xgene_edac_mc_ctx *ctx = mci->pvt_info; ++ u32 pcp_hp_stat; ++ u32 pcp_lp_stat; ++ ++ pcp_hp_stat = readl(ctx->pcp_csr + PCPHPERRINTSTS); ++ pcp_lp_stat = readl(ctx->pcp_csr + PCPLPERRINTSTS); ++ if (!((MCU_UNCORR_ERR_MASK & pcp_hp_stat) || ++ (MCU_CTL_ERR_MASK & pcp_hp_stat) || ++ (MCU_CORR_ERR_MASK & pcp_lp_stat))) ++ return IRQ_NONE; ++ ++ xgene_edac_mc_check(mci); ++ ++ return IRQ_HANDLED; ++} ++ ++static void xgene_edac_mc_irq_ctl(struct mem_ctl_info *mci, bool enable) ++{ ++ struct xgene_edac_mc_ctx *ctx = mci->pvt_info; ++ u32 val; ++ ++ if (edac_op_state != EDAC_OPSTATE_INT) ++ return; ++ ++ mutex_lock(&xgene_edac_lock); ++ ++ /* ++ * As there is only single bit for enable error and interrupt mask, ++ * we must only enable top level interrupt after all MCUs are ++ * registered. Otherwise, if there is an error and the corresponding ++ * MCU has not registered, the interrupt will never get cleared. To ++ * determine all MCU have registered, we will keep track of active ++ * MCUs and registered MCUs. ++ */ ++ if (enable) { ++ /* Set registered MCU bit */ ++ edac_mc_registered_mask |= 1 << ctx->mcu_id; ++ ++ /* Enable interrupt after all active MCU registered */ ++ if (edac_mc_registered_mask == edac_mc_active_mask) { ++ /* Enable memory controller top level interrupt */ ++ val = readl(ctx->pcp_csr + PCPHPERRINTMSK); ++ val &= ~(MCU_UNCORR_ERR_MASK | MCU_CTL_ERR_MASK); ++ writel(val, ctx->pcp_csr + PCPHPERRINTMSK); ++ val = readl(ctx->pcp_csr + PCPLPERRINTMSK); ++ val &= ~MCU_CORR_ERR_MASK; ++ writel(val, ctx->pcp_csr + PCPLPERRINTMSK); ++ } ++ ++ /* Enable MCU interrupt and error reporting */ ++ val = readl(ctx->mcu_csr + MCUGECR); ++ val |= MCU_GECR_DEMANDUCINTREN_MASK | ++ MCU_GECR_BACKUCINTREN_MASK | ++ MCU_GECR_CINTREN_MASK | ++ MUC_GECR_MCUADDRERREN_MASK; ++ writel(val, ctx->mcu_csr + MCUGECR); ++ } else { ++ /* Disable MCU interrupt */ ++ val = readl(ctx->mcu_csr + MCUGECR); ++ val &= ~(MCU_GECR_DEMANDUCINTREN_MASK | ++ MCU_GECR_BACKUCINTREN_MASK | ++ MCU_GECR_CINTREN_MASK | ++ MUC_GECR_MCUADDRERREN_MASK); ++ writel(val, ctx->mcu_csr + MCUGECR); ++ ++ /* Disable memory controller top level interrupt */ ++ val = readl(ctx->pcp_csr + PCPHPERRINTMSK); ++ val |= MCU_UNCORR_ERR_MASK | MCU_CTL_ERR_MASK; ++ writel(val, ctx->pcp_csr + PCPHPERRINTMSK); ++ val = readl(ctx->pcp_csr + PCPLPERRINTMSK); ++ val |= MCU_CORR_ERR_MASK; ++ writel(val, ctx->pcp_csr + PCPLPERRINTMSK); ++ ++ /* Clear registered MCU bit */ ++ edac_mc_registered_mask &= ~(1 << ctx->mcu_id); ++ } ++ ++ mutex_unlock(&xgene_edac_lock); ++} ++ ++static int xgene_edac_mc_is_active(struct xgene_edac_mc_ctx *ctx, int mc_idx) ++{ ++ u32 reg; ++ u32 mcu_mask; ++ ++ reg = readl(ctx->csw_csr + CSW_CSWCR); ++ if (reg & CSW_CSWCR_DUALMCB_MASK) { ++ /* ++ * Dual MCB active - Determine if all 4 active or just MCU0 ++ * and MCU2 active ++ */ ++ reg = readl(ctx->mcbb_csr + MCBADDRMR); ++ mcu_mask = (reg & MCBADDRMR_DUALMCU_MODE_MASK) ? 0xF : 0x5; ++ } else { ++ /* ++ * Single MCB active - Determine if MCU0/MCU1 or just MCU0 ++ * active ++ */ ++ reg = readl(ctx->mcba_csr + MCBADDRMR); ++ mcu_mask = (reg & MCBADDRMR_DUALMCU_MODE_MASK) ? 0x3 : 0x1; ++ } ++ ++ /* Save active MC mask if hasn't set already */ ++ if (!edac_mc_active_mask) ++ edac_mc_active_mask = mcu_mask; ++ ++ return (mcu_mask & (1 << mc_idx)) ? 1 : 0; ++} ++ ++static int xgene_edac_mc_probe(struct platform_device *pdev) ++{ ++ struct mem_ctl_info *mci; ++ struct edac_mc_layer layers[2]; ++ struct xgene_edac_mc_ctx tmp_ctx; ++ struct xgene_edac_mc_ctx *ctx; ++ struct resource *res; ++ int rc = 0; ++ ++ if (!devres_open_group(&pdev->dev, xgene_edac_mc_probe, GFP_KERNEL)) ++ return -ENOMEM; ++ ++ /* Retrieve resources */ ++ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); ++ if (!res) { ++ dev_err(&pdev->dev, "no PCP resource address\n"); ++ rc = -EINVAL; ++ goto err_group; ++ } ++ tmp_ctx.pcp_csr = devm_ioremap(&pdev->dev, res->start, ++ resource_size(res)); ++ if (IS_ERR(tmp_ctx.pcp_csr)) { ++ dev_err(&pdev->dev, "no PCP resource address\n"); ++ rc = PTR_ERR(tmp_ctx.pcp_csr); ++ goto err_group; ++ } ++ ++ res = platform_get_resource(pdev, IORESOURCE_MEM, 1); ++ if (!res) { ++ dev_err(&pdev->dev, "no CSW resource address\n"); ++ rc = -EINVAL; ++ goto err_group; ++ } ++ tmp_ctx.csw_csr = devm_ioremap(&pdev->dev, res->start, ++ resource_size(res)); ++ if (IS_ERR(tmp_ctx.csw_csr)) { ++ dev_err(&pdev->dev, "no CSW resource address\n"); ++ rc = PTR_ERR(tmp_ctx.csw_csr); ++ goto err_group; ++ } ++ ++ res = platform_get_resource(pdev, IORESOURCE_MEM, 2); ++ if (!res) { ++ dev_err(&pdev->dev, "no MCBA resource address\n"); ++ rc = -EINVAL; ++ goto err_group; ++ } ++ tmp_ctx.mcba_csr = devm_ioremap(&pdev->dev, res->start, ++ resource_size(res)); ++ if (IS_ERR(tmp_ctx.mcba_csr)) { ++ dev_err(&pdev->dev, "no MCBA resource address\n"); ++ rc = PTR_ERR(tmp_ctx.mcba_csr); ++ goto err_group; ++ } ++ ++ res = platform_get_resource(pdev, IORESOURCE_MEM, 3); ++ if (!res) { ++ dev_err(&pdev->dev, "no MCBB resource address\n"); ++ rc = -EINVAL; ++ goto err_group; ++ } ++ tmp_ctx.mcbb_csr = devm_ioremap(&pdev->dev, res->start, ++ resource_size(res)); ++ if (IS_ERR(tmp_ctx.mcbb_csr)) { ++ dev_err(&pdev->dev, "no MCBB resource address\n"); ++ rc = PTR_ERR(tmp_ctx.mcbb_csr); ++ goto err_group; ++ } ++ ++ res = platform_get_resource(pdev, IORESOURCE_MEM, 4); ++ tmp_ctx.mcu_csr = devm_ioremap_resource(&pdev->dev, res); ++ if (IS_ERR(tmp_ctx.mcu_csr)) { ++ dev_err(&pdev->dev, "no MCU resource address\n"); ++ rc = PTR_ERR(tmp_ctx.mcu_csr); ++ goto err_group; ++ } ++ /* Ignore non-active MCU */ ++ tmp_ctx.mcu_id = ((res->start >> 16) & 0xF) / 4; ++ if (!xgene_edac_mc_is_active(&tmp_ctx, tmp_ctx.mcu_id)) { ++ rc = -ENODEV; ++ goto err_group; ++ } ++ ++ layers[0].type = EDAC_MC_LAYER_CHIP_SELECT; ++ layers[0].size = 4; ++ layers[0].is_virt_csrow = true; ++ layers[1].type = EDAC_MC_LAYER_CHANNEL; ++ layers[1].size = 2; ++ layers[1].is_virt_csrow = false; ++ mci = edac_mc_alloc(edac_mc_idx++, ARRAY_SIZE(layers), layers, ++ sizeof(*ctx)); ++ if (!mci) { ++ rc = -ENOMEM; ++ goto err_group; ++ } ++ ++ ctx = mci->pvt_info; ++ *ctx = tmp_ctx; /* Copy over resource value */ ++ ctx->name = "xgene_edac_mc_err"; ++ mci->pdev = &pdev->dev; ++ dev_set_drvdata(mci->pdev, mci); ++ mci->ctl_name = ctx->name; ++ mci->dev_name = ctx->name; ++ ++ mci->mtype_cap = MEM_FLAG_RDDR | MEM_FLAG_RDDR2 | MEM_FLAG_RDDR3 | ++ MEM_FLAG_DDR | MEM_FLAG_DDR2 | MEM_FLAG_DDR3; ++ mci->edac_ctl_cap = EDAC_FLAG_SECDED; ++ mci->edac_cap = EDAC_FLAG_SECDED; ++ mci->mod_name = EDAC_MOD_STR; ++ mci->mod_ver = "0.1"; ++ mci->ctl_page_to_phys = NULL; ++ mci->scrub_cap = SCRUB_FLAG_HW_SRC; ++ mci->scrub_mode = SCRUB_HW_SRC; ++ ++ if (edac_op_state == EDAC_OPSTATE_POLL) ++ mci->edac_check = xgene_edac_mc_check; ++ ++ if (edac_mc_add_mc(mci)) { ++ dev_err(&pdev->dev, "edac_mc_add_mc failed\n"); ++ rc = -EINVAL; ++ goto err_free; ++ } ++ ++ xgene_edac_mc_create_debugfs_node(mci); ++ ++ if (edac_op_state == EDAC_OPSTATE_INT) { ++ int irq; ++ int i; ++ ++ for (i = 0; i < 2; i++) { ++ irq = platform_get_irq(pdev, i); ++ if (irq < 0) { ++ dev_err(&pdev->dev, "No IRQ resource\n"); ++ rc = -EINVAL; ++ goto err_del; ++ } ++ rc = devm_request_irq(&pdev->dev, irq, ++ xgene_edac_mc_isr, IRQF_SHARED, ++ dev_name(&pdev->dev), mci); ++ if (rc) { ++ dev_err(&pdev->dev, ++ "Could not request IRQ %d\n", irq); ++ goto err_del; ++ } ++ } ++ } ++ ++ xgene_edac_mc_irq_ctl(mci, true); ++ ++ devres_remove_group(&pdev->dev, xgene_edac_mc_probe); ++ ++ dev_info(&pdev->dev, "X-Gene EDAC MC registered\n"); ++ return 0; ++ ++err_del: ++ edac_mc_del_mc(&pdev->dev); ++err_free: ++ edac_mc_free(mci); ++err_group: ++ devres_release_group(&pdev->dev, xgene_edac_mc_probe); ++ return rc; ++} ++ ++static int xgene_edac_mc_remove(struct platform_device *pdev) ++{ ++ struct mem_ctl_info *mci = dev_get_drvdata(&pdev->dev); ++ ++ xgene_edac_mc_irq_ctl(mci, false); ++ edac_mc_del_mc(&pdev->dev); ++ edac_mc_free(mci); ++ return 0; ++} ++ ++#ifdef CONFIG_OF ++static struct of_device_id xgene_edac_mc_of_match[] = { ++ { .compatible = "apm,xgene-edac-mc" }, ++ {}, ++}; ++MODULE_DEVICE_TABLE(of, xgene_edac_mc_of_match); ++#endif ++ ++static struct platform_driver xgene_edac_mc_driver = { ++ .probe = xgene_edac_mc_probe, ++ .remove = xgene_edac_mc_remove, ++ .driver = { ++ .name = "xgene-edac-mc", ++ .owner = THIS_MODULE, ++ .of_match_table = of_match_ptr(xgene_edac_mc_of_match), ++ }, ++}; ++ ++/* CPU L1/L2 error device */ ++#define MAX_CPU_PER_PMD 2 ++#define CPU_CSR_STRIDE 0x00100000 ++#define CPU_L2C_PAGE 0x000D0000 ++#define CPU_MEMERR_L2C_PAGE 0x000E0000 ++#define CPU_MEMERR_CPU_PAGE 0x000F0000 ++ ++#define MEMERR_CPU_ICFECR_PAGE_OFFSET 0x0000 ++#define MEMERR_CPU_ICFESR_PAGE_OFFSET 0x0004 ++#define MEMERR_CPU_ICFESR_ERRWAY_RD(src) (((src) & 0xFF000000) >> 24) ++#define MEMERR_CPU_ICFESR_ERRINDEX_RD(src) (((src) & 0x003F0000) >> 16) ++#define MEMERR_CPU_ICFESR_ERRINFO_RD(src) (((src) & 0x0000FF00) >> 8) ++#define MEMERR_CPU_ICFESR_ERRTYPE_RD(src) (((src) & 0x00000070) >> 4) ++#define MEMERR_CPU_ICFESR_MULTCERR_MASK BIT(2) ++#define MEMERR_CPU_ICFESR_CERR_MASK BIT(0) ++#define MEMERR_CPU_LSUESR_PAGE_OFFSET 0x000c ++#define MEMERR_CPU_LSUESR_ERRWAY_RD(src) (((src) & 0xFF000000) >> 24) ++#define MEMERR_CPU_LSUESR_ERRINDEX_RD(src) (((src) & 0x003F0000) >> 16) ++#define MEMERR_CPU_LSUESR_ERRINFO_RD(src) (((src) & 0x0000FF00) >> 8) ++#define MEMERR_CPU_LSUESR_ERRTYPE_RD(src) (((src) & 0x00000070) >> 4) ++#define MEMERR_CPU_LSUESR_MULTCERR_MASK BIT(2) ++#define MEMERR_CPU_LSUESR_CERR_MASK BIT(0) ++#define MEMERR_CPU_LSUECR_PAGE_OFFSET 0x0008 ++#define MEMERR_CPU_MMUECR_PAGE_OFFSET 0x0010 ++#define MEMERR_CPU_MMUESR_PAGE_OFFSET 0x0014 ++#define MEMERR_CPU_MMUESR_ERRWAY_RD(src) (((src) & 0xFF000000) >> 24) ++#define MEMERR_CPU_MMUESR_ERRINDEX_RD(src) (((src) & 0x007F0000) >> 16) ++#define MEMERR_CPU_MMUESR_ERRINFO_RD(src) (((src) & 0x0000FF00) >> 8) ++#define MEMERR_CPU_MMUESR_ERRREQSTR_LSU_MASK BIT(7) ++#define MEMERR_CPU_MMUESR_ERRTYPE_RD(src) (((src) & 0x00000070) >> 4) ++#define MEMERR_CPU_MMUESR_MULTCERR_MASK BIT(2) ++#define MEMERR_CPU_MMUESR_CERR_MASK BIT(0) ++#define MEMERR_CPU_ICFESRA_PAGE_OFFSET 0x0804 ++#define MEMERR_CPU_LSUESRA_PAGE_OFFSET 0x080c ++#define MEMERR_CPU_MMUESRA_PAGE_OFFSET 0x0814 ++ ++#define MEMERR_L2C_L2ECR_PAGE_OFFSET 0x0000 ++#define MEMERR_L2C_L2ESR_PAGE_OFFSET 0x0004 ++#define MEMERR_L2C_L2ESR_ERRSYN_RD(src) (((src) & 0xFF000000) >> 24) ++#define MEMERR_L2C_L2ESR_ERRWAY_RD(src) (((src) & 0x00FC0000) >> 18) ++#define MEMERR_L2C_L2ESR_ERRCPU_RD(src) (((src) & 0x00020000) >> 17) ++#define MEMERR_L2C_L2ESR_ERRGROUP_RD(src) (((src) & 0x0000E000) >> 13) ++#define MEMERR_L2C_L2ESR_ERRACTION_RD(src) (((src) & 0x00001C00) >> 10) ++#define MEMERR_L2C_L2ESR_ERRTYPE_RD(src) (((src) & 0x00000300) >> 8) ++#define MEMERR_L2C_L2ESR_MULTUCERR_MASK BIT(3) ++#define MEMERR_L2C_L2ESR_MULTICERR_MASK BIT(2) ++#define MEMERR_L2C_L2ESR_UCERR_MASK BIT(1) ++#define MEMERR_L2C_L2ESR_ERR_MASK BIT(0) ++#define MEMERR_L2C_L2EALR_PAGE_OFFSET 0x0008 ++#define CPUX_L2C_L2RTOCR_PAGE_OFFSET 0x0010 ++#define MEMERR_L2C_L2EAHR_PAGE_OFFSET 0x000c ++#define CPUX_L2C_L2RTOSR_PAGE_OFFSET 0x0014 ++#define CPUX_L2C_L2RTOALR_PAGE_OFFSET 0x0018 ++#define CPUX_L2C_L2RTOAHR_PAGE_OFFSET 0x001c ++#define MEMERR_L2C_L2ESRA_PAGE_OFFSET 0x0804 ++ ++/* ++ * Processor Module Domain (PMD) context - Context for a pair of processsors. ++ * Each PMD consists of 2 CPUs and a shared L2 cache. Each CPU consists of ++ * its own L1 cache. ++ */ ++struct xgene_edac_pmd_ctx { ++ char *name; ++ void __iomem *pcp_csr; /* PCP CSR for reading error interrupt reg */ ++ void __iomem *pmd_csr; /* PMD CSR for reading L1/L2 error reg */ ++ int pmd; /* Identify the register in pcp_csr */ ++}; ++ ++static void xgene_edac_pmd_l1_check(struct edac_device_ctl_info *edac_dev, ++ int cpu_idx) ++{ ++ struct xgene_edac_pmd_ctx *ctx = edac_dev->pvt_info; ++ void __iomem *pg_f; ++ u32 val; ++ ++ pg_f = ctx->pmd_csr + cpu_idx * CPU_CSR_STRIDE + CPU_MEMERR_CPU_PAGE; ++ ++ val = readl(pg_f + MEMERR_CPU_ICFESR_PAGE_OFFSET); ++ if (val) { ++ dev_err(edac_dev->dev, ++ "CPU%d L1 memory error ICF 0x%08X Way 0x%02X Index 0x%02X Info 0x%02X\n", ++ ctx->pmd * MAX_CPU_PER_PMD + cpu_idx, val, ++ MEMERR_CPU_ICFESR_ERRWAY_RD(val), ++ MEMERR_CPU_ICFESR_ERRINDEX_RD(val), ++ MEMERR_CPU_ICFESR_ERRINFO_RD(val)); ++ if (val & MEMERR_CPU_ICFESR_CERR_MASK) ++ dev_err(edac_dev->dev, ++ "One or more correctable error\n"); ++ if (val & MEMERR_CPU_ICFESR_MULTCERR_MASK) ++ dev_err(edac_dev->dev, "Multiple correctable error\n"); ++ switch (MEMERR_CPU_ICFESR_ERRTYPE_RD(val)) { ++ case 1: ++ dev_err(edac_dev->dev, "L1 TLB multiple hit\n"); ++ break; ++ case 2: ++ dev_err(edac_dev->dev, "Way select multiple hit\n"); ++ break; ++ case 3: ++ dev_err(edac_dev->dev, "Physical tag parity error\n"); ++ break; ++ case 4: ++ case 5: ++ dev_err(edac_dev->dev, "L1 data parity error\n"); ++ break; ++ case 6: ++ dev_err(edac_dev->dev, "L1 pre-decode parity error\n"); ++ break; ++ } ++ ++ /* Clear SW generated and HW errors */ ++ writel(0x0, pg_f + MEMERR_CPU_ICFESRA_PAGE_OFFSET); ++ writel(val, pg_f + MEMERR_CPU_ICFESR_PAGE_OFFSET); ++ ++ if (val & (MEMERR_CPU_ICFESR_CERR_MASK | ++ MEMERR_CPU_ICFESR_MULTCERR_MASK)) ++ edac_device_handle_ce(edac_dev, 0, 0, ++ edac_dev->ctl_name); ++ } ++ ++ val = readl(pg_f + MEMERR_CPU_LSUESR_PAGE_OFFSET); ++ if (val) { ++ dev_err(edac_dev->dev, ++ "CPU%d memory error LSU 0x%08X Way 0x%02X Index 0x%02X Info 0x%02X\n", ++ ctx->pmd * MAX_CPU_PER_PMD + cpu_idx, val, ++ MEMERR_CPU_LSUESR_ERRWAY_RD(val), ++ MEMERR_CPU_LSUESR_ERRINDEX_RD(val), ++ MEMERR_CPU_LSUESR_ERRINFO_RD(val)); ++ if (val & MEMERR_CPU_LSUESR_CERR_MASK) ++ dev_err(edac_dev->dev, ++ "One or more correctable error\n"); ++ if (val & MEMERR_CPU_LSUESR_MULTCERR_MASK) ++ dev_err(edac_dev->dev, "Multiple correctable error\n"); ++ switch (MEMERR_CPU_LSUESR_ERRTYPE_RD(val)) { ++ case 0: ++ dev_err(edac_dev->dev, "Load tag error\n"); ++ break; ++ case 1: ++ dev_err(edac_dev->dev, "Load data error\n"); ++ break; ++ case 2: ++ dev_err(edac_dev->dev, "WSL multihit error\n"); ++ break; ++ case 3: ++ dev_err(edac_dev->dev, "Store tag error\n"); ++ break; ++ case 4: ++ dev_err(edac_dev->dev, ++ "DTB multihit from load pipeline error\n"); ++ break; ++ case 5: ++ dev_err(edac_dev->dev, ++ "DTB multihit from store pipeline error\n"); ++ break; ++ } ++ ++ /* Clear SW generated and HW errors */ ++ writel(0x0, pg_f + MEMERR_CPU_LSUESRA_PAGE_OFFSET); ++ writel(val, pg_f + MEMERR_CPU_LSUESR_PAGE_OFFSET); ++ ++ if (val & (MEMERR_CPU_LSUESR_CERR_MASK | ++ MEMERR_CPU_LSUESR_MULTCERR_MASK)) ++ edac_device_handle_ce(edac_dev, 0, 0, ++ edac_dev->ctl_name); ++ else ++ edac_device_handle_ue(edac_dev, 0, 0, ++ edac_dev->ctl_name); ++ } ++ ++ val = readl(pg_f + MEMERR_CPU_MMUESR_PAGE_OFFSET); ++ if (val) { ++ dev_err(edac_dev->dev, ++ "CPU%d memory error MMU 0x%08X Way 0x%02X Index 0x%02X Info 0x%02X %s\n", ++ ctx->pmd * MAX_CPU_PER_PMD + cpu_idx, val, ++ MEMERR_CPU_MMUESR_ERRWAY_RD(val), ++ MEMERR_CPU_MMUESR_ERRINDEX_RD(val), ++ MEMERR_CPU_MMUESR_ERRINFO_RD(val), ++ val & MEMERR_CPU_MMUESR_ERRREQSTR_LSU_MASK ? "LSU" : ++ "ICF"); ++ if (val & MEMERR_CPU_MMUESR_CERR_MASK) ++ dev_err(edac_dev->dev, ++ "One or more correctable error\n"); ++ if (val & MEMERR_CPU_MMUESR_MULTCERR_MASK) ++ dev_err(edac_dev->dev, "Multiple correctable error\n"); ++ switch (MEMERR_CPU_MMUESR_ERRTYPE_RD(val)) { ++ case 0: ++ dev_err(edac_dev->dev, "Stage 1 UTB hit error\n"); ++ break; ++ case 1: ++ dev_err(edac_dev->dev, "Stage 1 UTB miss error\n"); ++ break; ++ case 2: ++ dev_err(edac_dev->dev, "Stage 1 UTB allocate error\n"); ++ break; ++ case 3: ++ dev_err(edac_dev->dev, ++ "TMO operation single bank error\n"); ++ break; ++ case 4: ++ dev_err(edac_dev->dev, "Stage 2 UTB error\n"); ++ break; ++ case 5: ++ dev_err(edac_dev->dev, "Stage 2 UTB miss error\n"); ++ break; ++ case 6: ++ dev_err(edac_dev->dev, "Stage 2 UTB allocate error\n"); ++ break; ++ case 7: ++ dev_err(edac_dev->dev, ++ "TMO operation multiple bank error\n"); ++ break; ++ } ++ ++ /* Clear SW generated and HW errors */ ++ writel(0x0, pg_f + MEMERR_CPU_MMUESRA_PAGE_OFFSET); ++ writel(val, pg_f + MEMERR_CPU_MMUESR_PAGE_OFFSET); ++ ++ edac_device_handle_ue(edac_dev, 0, 0, edac_dev->ctl_name); ++ } ++} ++ ++static void xgene_edac_pmd_l2_check(struct edac_device_ctl_info *edac_dev) ++{ ++ struct xgene_edac_pmd_ctx *ctx = edac_dev->pvt_info; ++ void __iomem *pg_d; ++ void __iomem *pg_e; ++ u32 val_hi; ++ u32 val_lo; ++ u32 val; ++ ++ /* Check L2 */ ++ pg_e = ctx->pmd_csr + CPU_MEMERR_L2C_PAGE; ++ val = readl(pg_e + MEMERR_L2C_L2ESR_PAGE_OFFSET); ++ if (val) { ++ val_lo = readl(pg_e + MEMERR_L2C_L2EALR_PAGE_OFFSET); ++ val_hi = readl(pg_e + MEMERR_L2C_L2EAHR_PAGE_OFFSET); ++ dev_err(edac_dev->dev, ++ "PMD%d memory error L2C L2ESR 0x%08X @ 0x%08X.%08X\n", ++ ctx->pmd, val, val_hi, val_lo); ++ dev_err(edac_dev->dev, ++ "ErrSyndrome 0x%02X ErrWay 0x%02X ErrCpu %d ErrGroup 0x%02X ErrAction 0x%02X\n", ++ MEMERR_L2C_L2ESR_ERRSYN_RD(val), ++ MEMERR_L2C_L2ESR_ERRWAY_RD(val), ++ MEMERR_L2C_L2ESR_ERRCPU_RD(val), ++ MEMERR_L2C_L2ESR_ERRGROUP_RD(val), ++ MEMERR_L2C_L2ESR_ERRACTION_RD(val)); ++ ++ if (val & MEMERR_L2C_L2ESR_ERR_MASK) ++ dev_err(edac_dev->dev, ++ "One or more correctable error\n"); ++ if (val & MEMERR_L2C_L2ESR_MULTICERR_MASK) ++ dev_err(edac_dev->dev, "Multiple correctable error\n"); ++ if (val & MEMERR_L2C_L2ESR_UCERR_MASK) ++ dev_err(edac_dev->dev, ++ "One or more uncorrectable error\n"); ++ if (val & MEMERR_L2C_L2ESR_MULTUCERR_MASK) ++ dev_err(edac_dev->dev, ++ "Multiple uncorrectable error\n"); ++ ++ switch (MEMERR_L2C_L2ESR_ERRTYPE_RD(val)) { ++ case 0: ++ dev_err(edac_dev->dev, "Outbound SDB parity error\n"); ++ break; ++ case 1: ++ dev_err(edac_dev->dev, "Inbound SDB parity error\n"); ++ break; ++ case 2: ++ dev_err(edac_dev->dev, "Tag ECC error\n"); ++ break; ++ case 3: ++ dev_err(edac_dev->dev, "Data ECC error\n"); ++ break; ++ } ++ ++ writel(0x0, pg_e + MEMERR_L2C_L2EALR_PAGE_OFFSET); ++ writel(0x0, pg_e + MEMERR_L2C_L2EAHR_PAGE_OFFSET); ++ ++ /* Clear SW generated and HW errors */ ++ writel(0x0, pg_e + MEMERR_L2C_L2ESRA_PAGE_OFFSET); ++ writel(val, pg_e + MEMERR_L2C_L2ESR_PAGE_OFFSET); ++ ++ if (val & (MEMERR_L2C_L2ESR_ERR_MASK | ++ MEMERR_L2C_L2ESR_MULTICERR_MASK)) ++ edac_device_handle_ce(edac_dev, 0, 0, ++ edac_dev->ctl_name); ++ if (val & (MEMERR_L2C_L2ESR_UCERR_MASK | ++ MEMERR_L2C_L2ESR_MULTUCERR_MASK)) ++ edac_device_handle_ue(edac_dev, 0, 0, ++ edac_dev->ctl_name); ++ } ++ ++ /* Check if any memory request timed out on L2 cache */ ++ pg_d = ctx->pmd_csr + CPU_L2C_PAGE; ++ val = readl(pg_d + CPUX_L2C_L2RTOSR_PAGE_OFFSET); ++ if (val) { ++ val_lo = readl(pg_d + CPUX_L2C_L2RTOALR_PAGE_OFFSET); ++ val_hi = readl(pg_d + CPUX_L2C_L2RTOAHR_PAGE_OFFSET); ++ dev_err(edac_dev->dev, ++ "PMD%d L2C error L2C RTOSR 0x%08X @ 0x%08X.%08X\n", ++ ctx->pmd, val, val_hi, val_lo); ++ writel(0x0, pg_d + CPUX_L2C_L2RTOALR_PAGE_OFFSET); ++ writel(0x0, pg_d + CPUX_L2C_L2RTOAHR_PAGE_OFFSET); ++ writel(0x0, pg_d + CPUX_L2C_L2RTOSR_PAGE_OFFSET); ++ } ++} ++ ++static void xgene_edac_pmd_check(struct edac_device_ctl_info *edac_dev) ++{ ++ struct xgene_edac_pmd_ctx *ctx = edac_dev->pvt_info; ++ u32 pcp_hp_stat; ++ int i; ++ ++ pcp_hp_stat = readl(ctx->pcp_csr + PCPHPERRINTSTS); ++ if (!((PMD0_MERR_MASK << ctx->pmd) & pcp_hp_stat)) ++ return; ++ ++ /* Check CPU L1 error */ ++ for (i = 0; i < MAX_CPU_PER_PMD; i++) ++ xgene_edac_pmd_l1_check(edac_dev, i); ++ ++ /* Check CPU L2 error */ ++ xgene_edac_pmd_l2_check(edac_dev); ++} ++ ++static irqreturn_t xgene_edac_pmd_isr(int irq, void *dev_id) ++{ ++ struct edac_device_ctl_info *edac_dev = dev_id; ++ struct xgene_edac_pmd_ctx *ctx = edac_dev->pvt_info; ++ u32 pcp_hp_stat; ++ ++ pcp_hp_stat = readl(ctx->pcp_csr + PCPHPERRINTSTS); ++ if (!(pcp_hp_stat & (PMD0_MERR_MASK << ctx->pmd))) ++ return IRQ_NONE; ++ ++ xgene_edac_pmd_check(edac_dev); ++ ++ return IRQ_HANDLED; ++} ++ ++static void xgene_edac_pmd_cpu_hw_cfg(struct edac_device_ctl_info *edac_dev, ++ int cpu) ++{ ++ struct xgene_edac_pmd_ctx *ctx = edac_dev->pvt_info; ++ void __iomem *pg_f = ctx->pmd_csr + cpu * CPU_CSR_STRIDE + ++ CPU_MEMERR_CPU_PAGE; ++ ++ /* ++ * Enable CPU memory error: ++ * MEMERR_CPU_ICFESRA, MEMERR_CPU_LSUESRA, and MEMERR_CPU_MMUESRA ++ */ ++ writel(0x00000301, pg_f + MEMERR_CPU_ICFECR_PAGE_OFFSET); ++ writel(0x00000301, pg_f + MEMERR_CPU_LSUECR_PAGE_OFFSET); ++ writel(0x00000101, pg_f + MEMERR_CPU_MMUECR_PAGE_OFFSET); ++} ++ ++static void xgene_edac_pmd_hw_cfg(struct edac_device_ctl_info *edac_dev) ++{ ++ struct xgene_edac_pmd_ctx *ctx = edac_dev->pvt_info; ++ void __iomem *pg_d = ctx->pmd_csr + CPU_L2C_PAGE; ++ void __iomem *pg_e = ctx->pmd_csr + CPU_MEMERR_L2C_PAGE; ++ ++ /* Enable PMD memory error - MEMERR_L2C_L2ECR and L2C_L2RTOCR */ ++ writel(0x00000703, pg_e + MEMERR_L2C_L2ECR_PAGE_OFFSET); ++ writel(0x00000119, pg_d + CPUX_L2C_L2RTOCR_PAGE_OFFSET); ++} ++ ++static void xgene_edac_pmd_hw_ctl(struct edac_device_ctl_info *edac_dev, ++ bool enable) ++{ ++ struct xgene_edac_pmd_ctx *ctx = edac_dev->pvt_info; ++ u32 val; ++ int i; ++ ++ /* Enable PMD error interrupt */ ++ if (edac_dev->op_state == OP_RUNNING_INTERRUPT) { ++ mutex_lock(&xgene_edac_lock); ++ ++ val = readl(ctx->pcp_csr + PCPHPERRINTMSK); ++ if (enable) ++ val &= ~(PMD0_MERR_MASK << ctx->pmd); ++ else ++ val |= PMD0_MERR_MASK << ctx->pmd; ++ writel(val, ctx->pcp_csr + PCPHPERRINTMSK); ++ ++ mutex_unlock(&xgene_edac_lock); ++ } ++ ++ if (enable) { ++ xgene_edac_pmd_hw_cfg(edac_dev); ++ ++ /* Two CPUs per a PMD */ ++ for (i = 0; i < MAX_CPU_PER_PMD; i++) ++ xgene_edac_pmd_cpu_hw_cfg(edac_dev, i); ++ } ++} ++ ++#ifdef CONFIG_EDAC_DEBUG ++static ssize_t xgene_edac_pmd_l1_inject_ctrl_write(struct file *file, ++ const char __user *data, ++ size_t count, loff_t *ppos) ++{ ++ struct edac_device_ctl_info *edac_dev = file->private_data; ++ struct xgene_edac_pmd_ctx *ctx = edac_dev->pvt_info; ++ void __iomem *cpux_pg_f; ++ int i; ++ ++ for (i = 0; i < MAX_CPU_PER_PMD; i++) { ++ cpux_pg_f = ctx->pmd_csr + i * CPU_CSR_STRIDE + ++ CPU_MEMERR_CPU_PAGE; ++ ++ writel(MEMERR_CPU_ICFESR_MULTCERR_MASK | ++ MEMERR_CPU_ICFESR_CERR_MASK, ++ cpux_pg_f + MEMERR_CPU_ICFESRA_PAGE_OFFSET); ++ writel(MEMERR_CPU_LSUESR_MULTCERR_MASK | ++ MEMERR_CPU_LSUESR_CERR_MASK, ++ cpux_pg_f + MEMERR_CPU_LSUESRA_PAGE_OFFSET); ++ writel(MEMERR_CPU_MMUESR_MULTCERR_MASK | ++ MEMERR_CPU_MMUESR_CERR_MASK, ++ cpux_pg_f + MEMERR_CPU_MMUESRA_PAGE_OFFSET); ++ } ++ return count; ++} ++ ++static ssize_t xgene_edac_pmd_l2_inject_ctrl_write(struct file *file, ++ const char __user *data, ++ size_t count, loff_t *ppos) ++{ ++ struct edac_device_ctl_info *edac_dev = file->private_data; ++ struct xgene_edac_pmd_ctx *ctx = edac_dev->pvt_info; ++ void __iomem *pg_e = ctx->pmd_csr + CPU_MEMERR_L2C_PAGE; ++ ++ writel(MEMERR_L2C_L2ESR_MULTUCERR_MASK | ++ MEMERR_L2C_L2ESR_MULTICERR_MASK | ++ MEMERR_L2C_L2ESR_UCERR_MASK | ++ MEMERR_L2C_L2ESR_ERR_MASK, ++ pg_e + MEMERR_L2C_L2ESRA_PAGE_OFFSET); ++ return count; ++} ++ ++static const struct file_operations xgene_edac_pmd_debug_inject_fops[] = { ++ { ++ .open = simple_open, ++ .write = xgene_edac_pmd_l1_inject_ctrl_write, ++ .llseek = generic_file_llseek, }, ++ { ++ .open = simple_open, ++ .write = xgene_edac_pmd_l2_inject_ctrl_write, ++ .llseek = generic_file_llseek, }, ++ { } ++}; ++ ++static void xgene_edac_pmd_create_debugfs_nodes( ++ struct edac_device_ctl_info *edac_dev) ++{ ++ struct dentry *edac_debugfs; ++ ++ /* ++ * Todo: Switch to common EDAC debug file system for edac device ++ * when available. ++ */ ++ edac_debugfs = debugfs_create_dir(edac_dev->dev->kobj.name, NULL); ++ if (!edac_debugfs) ++ return; ++ ++ debugfs_create_file("l1_inject_ctrl", S_IWUSR, edac_debugfs, edac_dev, ++ &xgene_edac_pmd_debug_inject_fops[0]); ++ debugfs_create_file("l2_inject_ctrl", S_IWUSR, edac_debugfs, edac_dev, ++ &xgene_edac_pmd_debug_inject_fops[1]); ++} ++#else ++static void xgene_edac_pmd_create_debugfs_nodes( ++ struct edac_device_ctl_info *edac_dev) ++{ ++} ++#endif ++ ++static int xgene_edac_pmd_available(u32 efuse, int pmd) ++{ ++ return (efuse & (1 << pmd)) ? 0 : 1; ++} ++ ++static int xgene_edac_pmd_probe(struct platform_device *pdev) ++{ ++ struct edac_device_ctl_info *edac_dev; ++ struct xgene_edac_pmd_ctx *ctx; ++ char edac_name[10]; ++ struct resource *res; ++ void __iomem *pmd_efuse; ++ int pmd; ++ int rc = 0; ++ ++ if (!devres_open_group(&pdev->dev, xgene_edac_pmd_probe, GFP_KERNEL)) ++ return -ENOMEM; ++ ++ /* Find the PMD number from its address */ ++ res = platform_get_resource(pdev, IORESOURCE_MEM, 1); ++ if (!res || resource_size(res) <= 0) { ++ rc = -ENODEV; ++ goto err_group; ++ } ++ pmd = ((res->start >> 20) & 0x1E) >> 1; ++ ++ /* Determine if this PMD is disabled */ ++ res = platform_get_resource(pdev, IORESOURCE_MEM, 2); ++ if (!res || resource_size(res) <= 0) { ++ rc = -ENODEV; ++ goto err_group; ++ } ++ pmd_efuse = devm_ioremap(&pdev->dev, res->start, resource_size(res)); ++ if (IS_ERR(pmd_efuse)) { ++ dev_err(&pdev->dev, "no PMD efuse resource address\n"); ++ rc = PTR_ERR(pmd_efuse); ++ goto err_group; ++ } ++ if (!xgene_edac_pmd_available(readl(pmd_efuse), pmd)) { ++ rc = -ENODEV; ++ goto err_group; ++ } ++ devm_iounmap(&pdev->dev, pmd_efuse); ++ ++ sprintf(edac_name, "l2c%d", pmd); ++ edac_dev = edac_device_alloc_ctl_info(sizeof(*ctx), ++ edac_name, 1, "l2c", 1, 2, NULL, ++ 0, edac_device_alloc_index()); ++ if (!edac_dev) { ++ rc = -ENOMEM; ++ goto err_group; ++ } ++ ++ ctx = edac_dev->pvt_info; ++ ctx->name = "xgene_pmd_err"; ++ ctx->pmd = pmd; ++ edac_dev->dev = &pdev->dev; ++ dev_set_drvdata(edac_dev->dev, edac_dev); ++ edac_dev->ctl_name = ctx->name; ++ edac_dev->dev_name = ctx->name; ++ edac_dev->mod_name = EDAC_MOD_STR; ++ ++ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); ++ if (!res) { ++ dev_err(&pdev->dev, "no PCP resource address\n"); ++ rc = -EINVAL; ++ goto err_free; ++ } ++ ctx->pcp_csr = devm_ioremap(&pdev->dev, res->start, resource_size(res)); ++ if (IS_ERR(ctx->pcp_csr)) { ++ dev_err(&pdev->dev, "no PCP resource address\n"); ++ rc = PTR_ERR(ctx->pcp_csr); ++ goto err_free; ++ } ++ ++ res = platform_get_resource(pdev, IORESOURCE_MEM, 1); ++ if (!res) { ++ dev_err(&pdev->dev, "no PMD resource address\n"); ++ rc = -EINVAL; ++ goto err_free; ++ } ++ ctx->pmd_csr = devm_ioremap(&pdev->dev, res->start, resource_size(res)); ++ if (IS_ERR(ctx->pmd_csr)) { ++ dev_err(&pdev->dev, ++ "devm_ioremap failed for PMD resource address\n"); ++ rc = PTR_ERR(ctx->pmd_csr); ++ goto err_free; ++ } ++ ++ if (edac_op_state == EDAC_OPSTATE_POLL) ++ edac_dev->edac_check = xgene_edac_pmd_check; ++ ++ xgene_edac_pmd_create_debugfs_nodes(edac_dev); ++ ++ rc = edac_device_add_device(edac_dev); ++ if (rc > 0) { ++ dev_err(&pdev->dev, "edac_device_add_device failed\n"); ++ rc = -ENOMEM; ++ goto err_free; ++ } ++ ++ if (edac_op_state == EDAC_OPSTATE_INT) { ++ int irq = platform_get_irq(pdev, 0); ++ ++ if (irq < 0) { ++ dev_err(&pdev->dev, "No IRQ resource\n"); ++ rc = -EINVAL; ++ goto err_del; ++ } ++ rc = devm_request_irq(&pdev->dev, irq, ++ xgene_edac_pmd_isr, IRQF_SHARED, ++ dev_name(&pdev->dev), edac_dev); ++ if (rc) { ++ dev_err(&pdev->dev, "Could not request IRQ %d\n", irq); ++ goto err_del; ++ } ++ edac_dev->op_state = OP_RUNNING_INTERRUPT; ++ } ++ ++ xgene_edac_pmd_hw_ctl(edac_dev, 1); ++ ++ devres_remove_group(&pdev->dev, xgene_edac_pmd_probe); ++ ++ dev_info(&pdev->dev, "X-Gene EDAC PMD registered\n"); ++ return 0; ++ ++err_del: ++ edac_device_del_device(&pdev->dev); ++err_free: ++ edac_device_free_ctl_info(edac_dev); ++err_group: ++ devres_release_group(&pdev->dev, xgene_edac_pmd_probe); ++ return rc; ++} ++ ++static int xgene_edac_pmd_remove(struct platform_device *pdev) ++{ ++ struct edac_device_ctl_info *edac_dev = dev_get_drvdata(&pdev->dev); ++ ++ xgene_edac_pmd_hw_ctl(edac_dev, 0); ++ edac_device_del_device(&pdev->dev); ++ edac_device_free_ctl_info(edac_dev); ++ return 0; ++} ++ ++#ifdef CONFIG_OF ++static struct of_device_id xgene_edac_pmd_of_match[] = { ++ { .compatible = "apm,xgene-edac-pmd" }, ++ {}, ++}; ++MODULE_DEVICE_TABLE(of, xgene_edac_pmd_of_match); ++#endif ++ ++static struct platform_driver xgene_edac_pmd_driver = { ++ .probe = xgene_edac_pmd_probe, ++ .remove = xgene_edac_pmd_remove, ++ .driver = { ++ .name = "xgene-edac-pmd", ++ .owner = THIS_MODULE, ++ .of_match_table = of_match_ptr(xgene_edac_pmd_of_match), ++ }, ++}; ++ ++/* L3 Error device */ ++#define L3C_ESR (0x0A * 4) ++#define L3C_ESR_DATATAG_MASK BIT(9) ++#define L3C_ESR_MULTIHIT_MASK BIT(8) ++#define L3C_ESR_UCEVICT_MASK BIT(6) ++#define L3C_ESR_MULTIUCERR_MASK BIT(5) ++#define L3C_ESR_MULTICERR_MASK BIT(4) ++#define L3C_ESR_UCERR_MASK BIT(3) ++#define L3C_ESR_CERR_MASK BIT(2) ++#define L3C_ESR_UCERRINTR_MASK BIT(1) ++#define L3C_ESR_CERRINTR_MASK BIT(0) ++#define L3C_ECR (0x0B * 4) ++#define L3C_ECR_UCINTREN BIT(3) ++#define L3C_ECR_CINTREN BIT(2) ++#define L3C_UCERREN BIT(1) ++#define L3C_CERREN BIT(0) ++#define L3C_ELR (0x0C * 4) ++#define L3C_ELR_ERRSYN(src) ((src & 0xFF800000) >> 23) ++#define L3C_ELR_ERRWAY(src) ((src & 0x007E0000) >> 17) ++#define L3C_ELR_AGENTID(src) ((src & 0x0001E000) >> 13) ++#define L3C_ELR_ERRGRP(src) ((src & 0x00000F00) >> 8) ++#define L3C_ELR_OPTYPE(src) ((src & 0x000000F0) >> 4) ++#define L3C_ELR_PADDRHIGH(src) (src & 0x0000000F) ++#define L3C_AELR (0x0D * 4) ++#define L3C_BELR (0x0E * 4) ++#define L3C_BELR_BANK(src) (src & 0x0000000F) ++ ++struct xgene_edac_dev_ctx { ++ char *name; ++ int edac_idx; ++ void __iomem *pcp_csr; ++ void __iomem *dev_csr; ++ void __iomem *bus_csr; ++}; ++ ++static void xgene_edac_l3_check(struct edac_device_ctl_info *edac_dev) ++{ ++ struct xgene_edac_dev_ctx *ctx = edac_dev->pvt_info; ++ u32 l3cesr; ++ u32 l3celr; ++ u32 l3caelr; ++ u32 l3cbelr; ++ ++ l3cesr = readl(ctx->dev_csr + L3C_ESR); ++ if (!(l3cesr & (L3C_ESR_UCERR_MASK | L3C_ESR_CERR_MASK))) ++ return; ++ ++ if (l3cesr & L3C_ESR_UCERR_MASK) ++ dev_err(edac_dev->dev, "L3C uncorrectable error\n"); ++ if (l3cesr & L3C_ESR_CERR_MASK) ++ dev_warn(edac_dev->dev, "L3C correctable error\n"); ++ ++ l3celr = readl(ctx->dev_csr + L3C_ELR); ++ l3caelr = readl(ctx->dev_csr + L3C_AELR); ++ l3cbelr = readl(ctx->dev_csr + L3C_BELR); ++ if (l3cesr & L3C_ESR_MULTIHIT_MASK) ++ dev_err(edac_dev->dev, "L3C multiple hit error\n"); ++ if (l3cesr & L3C_ESR_UCEVICT_MASK) ++ dev_err(edac_dev->dev, ++ "L3C dropped eviction of line with error\n"); ++ if (l3cesr & L3C_ESR_MULTIUCERR_MASK) ++ dev_err(edac_dev->dev, "L3C multiple uncorrectable error\n"); ++ if (l3cesr & L3C_ESR_DATATAG_MASK) ++ dev_err(edac_dev->dev, ++ "L3C data error syndrome 0x%X group 0x%X\n", ++ L3C_ELR_ERRSYN(l3celr), L3C_ELR_ERRGRP(l3celr)); ++ else ++ dev_err(edac_dev->dev, ++ "L3C tag error syndrome 0x%X Way of Tag 0x%X Agent ID 0x%X Operation type 0x%X\n", ++ L3C_ELR_ERRSYN(l3celr), L3C_ELR_ERRWAY(l3celr), ++ L3C_ELR_AGENTID(l3celr), L3C_ELR_OPTYPE(l3celr)); ++ /* ++ * NOTE: Address [41:38] in L3C_ELR_PADDRHIGH(l3celr). ++ * Address [37:6] in l3caelr. Lower 6 bits are zero. ++ */ ++ dev_err(edac_dev->dev, "L3C error address 0x%08X.%08X bank %d\n", ++ L3C_ELR_PADDRHIGH(l3celr) << 6 | (l3caelr >> 26), ++ (l3caelr & 0x3FFFFFFF) << 6, L3C_BELR_BANK(l3cbelr)); ++ dev_err(edac_dev->dev, ++ "L3C error status register value 0x%X\n", l3cesr); ++ ++ /* Clear L3C error interrupt */ ++ writel(0, ctx->dev_csr + L3C_ESR); ++ ++ if (l3cesr & L3C_ESR_CERR_MASK) ++ edac_device_handle_ce(edac_dev, 0, 0, edac_dev->ctl_name); ++ if (l3cesr & L3C_ESR_UCERR_MASK) ++ edac_device_handle_ue(edac_dev, 0, 0, edac_dev->ctl_name); ++} ++ ++static irqreturn_t xgene_edac_l3_isr(int irq, void *dev_id) ++{ ++ struct edac_device_ctl_info *edac_dev = dev_id; ++ struct xgene_edac_dev_ctx *ctx = edac_dev->pvt_info; ++ u32 l3cesr; ++ ++ l3cesr = readl(ctx->dev_csr + L3C_ESR); ++ if (!(l3cesr & (L3C_ESR_UCERRINTR_MASK | L3C_ESR_CERRINTR_MASK))) ++ return IRQ_NONE; ++ ++ xgene_edac_l3_check(edac_dev); ++ ++ return IRQ_HANDLED; ++} ++ ++static void xgene_edac_l3_hw_ctl(struct edac_device_ctl_info *edac_dev, ++ bool enable) ++{ ++ struct xgene_edac_dev_ctx *ctx = edac_dev->pvt_info; ++ u32 val; ++ ++ val = readl(ctx->dev_csr + L3C_ECR); ++ val |= L3C_UCERREN | L3C_CERREN; ++ /* On disable, we just disable interrupt but keep error enabled */ ++ if (edac_dev->op_state == OP_RUNNING_INTERRUPT) { ++ if (enable) ++ val |= L3C_ECR_UCINTREN | L3C_ECR_CINTREN; ++ else ++ val &= ~(L3C_ECR_UCINTREN | L3C_ECR_CINTREN); ++ } ++ writel(val, ctx->dev_csr + L3C_ECR); ++ ++ mutex_lock(&xgene_edac_lock); ++ ++ if (edac_dev->op_state == OP_RUNNING_INTERRUPT) { ++ /* Enable L3C error top level interrupt */ ++ val = readl(ctx->pcp_csr + PCPHPERRINTMSK); ++ if (enable) ++ val &= ~L3C_UNCORR_ERR_MASK; ++ else ++ val |= L3C_UNCORR_ERR_MASK; ++ writel(val, ctx->pcp_csr + PCPHPERRINTMSK); ++ val = readl(ctx->pcp_csr + PCPLPERRINTMSK); ++ if (enable) ++ val &= ~L3C_CORR_ERR_MASK; ++ else ++ val |= L3C_CORR_ERR_MASK; ++ writel(val, ctx->pcp_csr + PCPLPERRINTMSK); ++ } ++ ++ mutex_unlock(&xgene_edac_lock); ++} ++ ++#ifdef CONFIG_EDAC_DEBUG ++static ssize_t xgene_edac_l3_inject_ctrl_write(struct file *file, ++ const char __user *data, ++ size_t count, loff_t *ppos) ++{ ++ struct edac_device_ctl_info *edac_dev = file->private_data; ++ struct xgene_edac_dev_ctx *ctx = edac_dev->pvt_info; ++ ++ writel(L3C_ESR_UCERR_MASK | L3C_ESR_CERR_MASK | ++ L3C_ESR_MULTIUCERR_MASK | L3C_ESR_MULTICERR_MASK, ++ ctx->dev_csr + L3C_ESR); ++ return count; ++} ++ ++static const struct file_operations xgene_edac_l3_debug_inject_fops = { ++ .open = simple_open, ++ .write = xgene_edac_l3_inject_ctrl_write, ++ .llseek = generic_file_llseek, ++}; ++ ++static void xgene_edac_l3_create_debugfs_node( ++ struct edac_device_ctl_info *edac_dev) ++{ ++ struct dentry *edac_debugfs; ++ ++ /* ++ * Todo: Switch to common EDAC debug file system for edac device ++ * when available. ++ */ ++ edac_debugfs = debugfs_create_dir(edac_dev->dev->kobj.name, NULL); ++ if (!edac_debugfs) ++ return; ++ ++ debugfs_create_file("inject_ctrl", S_IWUSR, edac_debugfs, edac_dev, ++ &xgene_edac_l3_debug_inject_fops); ++} ++#else ++static void xgene_edac_l3_create_debugfs_node( ++ struct edac_device_ctl_info *edac_dev) ++{ ++} ++#endif ++ ++static int xgene_edac_l3_probe(struct platform_device *pdev) ++{ ++ struct edac_device_ctl_info *edac_dev; ++ struct xgene_edac_dev_ctx *ctx; ++ struct resource *res; ++ int rc = 0; ++ ++ if (!devres_open_group(&pdev->dev, xgene_edac_l3_probe, GFP_KERNEL)) ++ return -ENOMEM; ++ ++ edac_dev = edac_device_alloc_ctl_info(sizeof(*ctx), ++ "l3c", 1, "l3c", 1, 0, NULL, 0, ++ edac_device_alloc_index()); ++ if (!edac_dev) { ++ rc = -ENOMEM; ++ goto err; ++ } ++ ++ ctx = edac_dev->pvt_info; ++ ctx->name = "xgene_l3_err"; ++ edac_dev->dev = &pdev->dev; ++ dev_set_drvdata(edac_dev->dev, edac_dev); ++ edac_dev->ctl_name = ctx->name; ++ edac_dev->dev_name = ctx->name; ++ edac_dev->mod_name = EDAC_MOD_STR; ++ ++ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); ++ if (!res) { ++ dev_err(&pdev->dev, "no PCP resource address\n"); ++ rc = -EINVAL; ++ goto err1; ++ } ++ ctx->pcp_csr = devm_ioremap(&pdev->dev, res->start, resource_size(res)); ++ if (IS_ERR(ctx->pcp_csr)) { ++ dev_err(&pdev->dev, ++ "devm_ioremap failed for PCP resource address\n"); ++ rc = PTR_ERR(ctx->pcp_csr); ++ goto err1; ++ } ++ ++ res = platform_get_resource(pdev, IORESOURCE_MEM, 1); ++ ctx->dev_csr = devm_ioremap_resource(&pdev->dev, res); ++ if (IS_ERR(ctx->dev_csr)) { ++ dev_err(&pdev->dev, "no L3 resource address\n"); ++ rc = PTR_ERR(ctx->dev_csr); ++ goto err1; ++ } ++ ++ if (edac_op_state == EDAC_OPSTATE_POLL) ++ edac_dev->edac_check = xgene_edac_l3_check; ++ ++ xgene_edac_l3_create_debugfs_node(edac_dev); ++ ++ rc = edac_device_add_device(edac_dev); ++ if (rc > 0) { ++ dev_err(&pdev->dev, "edac_device_add_device failed\n"); ++ rc = -ENOMEM; ++ goto err1; ++ } ++ ++ if (edac_op_state == EDAC_OPSTATE_INT) { ++ int irq; ++ int i; ++ ++ for (i = 0; i < 2; i++) { ++ irq = platform_get_irq(pdev, i); ++ if (irq < 0) { ++ dev_err(&pdev->dev, "No IRQ resource\n"); ++ rc = -EINVAL; ++ goto err2; ++ } ++ rc = devm_request_irq(&pdev->dev, irq, ++ xgene_edac_l3_isr, IRQF_SHARED, ++ dev_name(&pdev->dev), edac_dev); ++ if (rc) { ++ dev_err(&pdev->dev, ++ "Could not request IRQ %d\n", irq); ++ goto err2; ++ } ++ } ++ edac_dev->op_state = OP_RUNNING_INTERRUPT; ++ } ++ ++ xgene_edac_l3_hw_ctl(edac_dev, true); ++ ++ devres_remove_group(&pdev->dev, xgene_edac_l3_probe); ++ ++ dev_info(&pdev->dev, "X-Gene EDAC L3 registered\n"); ++ return 0; ++ ++err2: ++ edac_device_del_device(&pdev->dev); ++err1: ++ edac_device_free_ctl_info(edac_dev); ++err: ++ devres_release_group(&pdev->dev, xgene_edac_l3_probe); ++ return rc; ++} ++ ++static int xgene_edac_l3_remove(struct platform_device *pdev) ++{ ++ struct edac_device_ctl_info *edac_dev = dev_get_drvdata(&pdev->dev); ++ ++ xgene_edac_l3_hw_ctl(edac_dev, false); ++ edac_device_del_device(&pdev->dev); ++ edac_device_free_ctl_info(edac_dev); ++ return 0; ++} ++ ++#ifdef CONFIG_OF ++static struct of_device_id xgene_edac_l3_of_match[] = { ++ { .compatible = "apm,xgene-edac-l3" }, ++ {}, ++}; ++MODULE_DEVICE_TABLE(of, xgene_edac_l3_of_match); ++#endif ++ ++static struct platform_driver xgene_edac_l3_driver = { ++ .probe = xgene_edac_l3_probe, ++ .remove = xgene_edac_l3_remove, ++ .driver = { ++ .name = "xgene-edac-l3", ++ .owner = THIS_MODULE, ++ .of_match_table = of_match_ptr(xgene_edac_l3_of_match), ++ }, ++}; ++ ++/* SoC Error device */ ++#define IOBAXIS0TRANSERRINTSTS 0x0000 ++#define IOBAXIS0_M_ILLEGAL_ACCESS_MASK BIT(1) ++#define IOBAXIS0_ILLEGAL_ACCESS_MASK BIT(0) ++#define IOBAXIS0TRANSERRINTMSK 0x0004 ++#define IOBAXIS0TRANSERRREQINFOL 0x0008 ++#define IOBAXIS0TRANSERRREQINFOH 0x000c ++#define REQTYPE_RD(src) (((src) & BIT(0))) ++#define ERRADDRH_RD(src) (((src) & 0xffc00000) >> 22) ++#define IOBAXIS1TRANSERRINTSTS 0x0010 ++#define IOBAXIS1TRANSERRINTMSK 0x0014 ++#define IOBAXIS1TRANSERRREQINFOL 0x0018 ++#define IOBAXIS1TRANSERRREQINFOH 0x001c ++#define IOBPATRANSERRINTSTS 0x0020 ++#define IOBPA_M_REQIDRAM_CORRUPT_MASK BIT(7) ++#define IOBPA_REQIDRAM_CORRUPT_MASK BIT(6) ++#define IOBPA_M_TRANS_CORRUPT_MASK BIT(5) ++#define IOBPA_TRANS_CORRUPT_MASK BIT(4) ++#define IOBPA_M_WDATA_CORRUPT_MASK BIT(3) ++#define IOBPA_WDATA_CORRUPT_MASK BIT(2) ++#define IOBPA_M_RDATA_CORRUPT_MASK BIT(1) ++#define IOBPA_RDATA_CORRUPT_MASK BIT(0) ++#define IOBBATRANSERRINTSTS 0x0030 ++#define M_ILLEGAL_ACCESS_MASK 0x00008000 ++#define ILLEGAL_ACCESS_MASK 0x00004000 ++#define M_WIDRAM_CORRUPT_MASK 0x00002000 ++#define WIDRAM_CORRUPT_MASK BIT(12) ++#define M_RIDRAM_CORRUPT_MASK BIT(11) ++#define RIDRAM_CORRUPT_MASK BIT(10) ++#define M_TRANS_CORRUPT_MASK BIT(9) ++#define TRANS_CORRUPT_MASK BIT(8) ++#define M_WDATA_CORRUPT_MASK BIT(7) ++#define WDATA_CORRUPT_MASK BIT(6) ++#define M_RBM_POISONED_REQ_MASK BIT(5) ++#define RBM_POISONED_REQ_MASK BIT(4) ++#define M_XGIC_POISONED_REQ_MASK BIT(3) ++#define XGIC_POISONED_REQ_MASK BIT(2) ++#define M_WRERR_RESP_MASK BIT(1) ++#define WRERR_RESP_MASK BIT(0) ++#define IOBBATRANSERRREQINFOL 0x0038 ++#define IOBBATRANSERRREQINFOH 0x003c ++#define REQTYPE_F2_RD(src) (((src) & BIT(0))) ++#define ERRADDRH_F2_RD(src) (((src) & 0xffc00000) >> 22) ++#define IOBBATRANSERRCSWREQID 0x0040 ++#define XGICTRANSERRINTSTS 0x0050 ++#define M_WR_ACCESS_ERR_MASK BIT(3) ++#define WR_ACCESS_ERR_MASK BIT(2) ++#define M_RD_ACCESS_ERR_MASK BIT(1) ++#define RD_ACCESS_ERR_MASK BIT(0) ++#define XGICTRANSERRINTMSK 0x0054 ++#define XGICTRANSERRREQINFO 0x0058 ++#define REQTYPE_MASK 0x04000000 ++#define ERRADDR_RD(src) ((src) & 0x03ffffff) ++#define GLBL_ERR_STS 0x0800 ++#define MDED_ERR_MASK BIT(3) ++#define DED_ERR_MASK BIT(2) ++#define MSEC_ERR_MASK BIT(1) ++#define SEC_ERR_MASK BIT(0) ++#define GLBL_SEC_ERRL 0x0810 ++#define GLBL_SEC_ERRH 0x0818 ++#define GLBL_MSEC_ERRL 0x0820 ++#define GLBL_MSEC_ERRH 0x0828 ++#define GLBL_DED_ERRL 0x0830 ++#define GLBL_DED_ERRLMASK 0x0834 ++#define GLBL_DED_ERRH 0x0838 ++#define GLBL_DED_ERRHMASK 0x083c ++#define GLBL_MDED_ERRL 0x0840 ++#define GLBL_MDED_ERRLMASK 0x0844 ++#define GLBL_MDED_ERRH 0x0848 ++#define GLBL_MDED_ERRHMASK 0x084c ++ ++/* IO Bus Registers */ ++#define RBCSR 0x0000 ++#define STICKYERR_MASK BIT(0) ++#define RBEIR 0x0008 ++#define AGENT_OFFLINE_ERR_MASK BIT(30) ++#define UNIMPL_RBPAGE_ERR_MASK BIT(29) ++#define WORD_ALIGNED_ERR_MASK BIT(28) ++#define PAGE_ACCESS_ERR_MASK BIT(27) ++#define WRITE_ACCESS_MASK BIT(26) ++#define RBERRADDR_RD(src) ((src) & 0x03FFFFFF) ++ ++static void xgene_edac_iob_gic_report(struct edac_device_ctl_info *edac_dev) ++{ ++ struct xgene_edac_dev_ctx *ctx = edac_dev->pvt_info; ++ u32 err_addr_lo; ++ u32 err_addr_hi; ++ u32 reg; ++ u32 info; ++ ++ /* GIC transaction error interrupt */ ++ reg = readl(ctx->dev_csr + XGICTRANSERRINTSTS); ++ if (reg) { ++ dev_err(edac_dev->dev, "XGIC transaction error\n"); ++ if (reg & RD_ACCESS_ERR_MASK) ++ dev_err(edac_dev->dev, "XGIC read size error\n"); ++ if (reg & M_RD_ACCESS_ERR_MASK) ++ dev_err(edac_dev->dev, ++ "Multiple XGIC read size error\n"); ++ if (reg & WR_ACCESS_ERR_MASK) ++ dev_err(edac_dev->dev, "XGIC write size error\n"); ++ if (reg & M_WR_ACCESS_ERR_MASK) ++ dev_err(edac_dev->dev, ++ "Multiple XGIC write size error\n"); ++ info = readl(ctx->dev_csr + XGICTRANSERRREQINFO); ++ dev_err(edac_dev->dev, "XGIC %s access @ 0x%08X (0x%08X)\n", ++ info & REQTYPE_MASK ? "read" : "write", ++ ERRADDR_RD(info), info); ++ writel(reg, ctx->dev_csr + XGICTRANSERRINTSTS); ++ } ++ ++ /* IOB memory error */ ++ reg = readl(ctx->dev_csr + GLBL_ERR_STS); ++ if (reg) { ++ if (reg & SEC_ERR_MASK) { ++ err_addr_lo = readl(ctx->dev_csr + GLBL_SEC_ERRL); ++ err_addr_hi = readl(ctx->dev_csr + GLBL_SEC_ERRH); ++ dev_err(edac_dev->dev, ++ "IOB single-bit correctable memory at 0x%08X.%08X error\n", ++ err_addr_lo, err_addr_hi); ++ writel(err_addr_lo, ctx->dev_csr + GLBL_SEC_ERRL); ++ writel(err_addr_hi, ctx->dev_csr + GLBL_SEC_ERRH); ++ } ++ if (reg & MSEC_ERR_MASK) { ++ err_addr_lo = readl(ctx->dev_csr + GLBL_MSEC_ERRL); ++ err_addr_hi = readl(ctx->dev_csr + GLBL_MSEC_ERRH); ++ dev_err(edac_dev->dev, ++ "IOB multiple single-bit correctable memory at 0x%08X.%08X error\n", ++ err_addr_lo, err_addr_hi); ++ writel(err_addr_lo, ctx->dev_csr + GLBL_MSEC_ERRL); ++ writel(err_addr_hi, ctx->dev_csr + GLBL_MSEC_ERRH); ++ } ++ if (reg & (SEC_ERR_MASK | MSEC_ERR_MASK)) ++ edac_device_handle_ce(edac_dev, 0, 0, ++ edac_dev->ctl_name); ++ ++ if (reg & DED_ERR_MASK) { ++ err_addr_lo = readl(ctx->dev_csr + GLBL_DED_ERRL); ++ err_addr_hi = readl(ctx->dev_csr + GLBL_DED_ERRH); ++ dev_err(edac_dev->dev, ++ "IOB double-bit uncorrectable memory at 0x%08X.%08X error\n", ++ err_addr_lo, err_addr_hi); ++ writel(err_addr_lo, ctx->dev_csr + GLBL_DED_ERRL); ++ writel(err_addr_hi, ctx->dev_csr + GLBL_DED_ERRH); ++ } ++ if (reg & MDED_ERR_MASK) { ++ err_addr_lo = readl(ctx->dev_csr + GLBL_MDED_ERRL); ++ err_addr_hi = readl(ctx->dev_csr + GLBL_MDED_ERRH); ++ dev_err(edac_dev->dev, ++ "Multiple IOB double-bit uncorrectable memory at 0x%08X.%08X error\n", ++ err_addr_lo, err_addr_hi); ++ writel(err_addr_lo, ctx->dev_csr + GLBL_MDED_ERRL); ++ writel(err_addr_hi, ctx->dev_csr + GLBL_MDED_ERRH); ++ } ++ if (reg & (DED_ERR_MASK | MDED_ERR_MASK)) ++ edac_device_handle_ue(edac_dev, 0, 0, ++ edac_dev->ctl_name); ++ } ++} ++ ++static void xgene_edac_rb_report(struct edac_device_ctl_info *edac_dev) ++{ ++ struct xgene_edac_dev_ctx *ctx = edac_dev->pvt_info; ++ u32 err_addr_lo; ++ u32 err_addr_hi; ++ u32 reg; ++ ++ /* ++ * Check RB acess errors ++ * 1. Out of range ++ * 2. Un-implemented page ++ * 3. Un-aligned access ++ * 4. Offline slave IP ++ */ ++ reg = readl(ctx->bus_csr + RBCSR); ++ if (reg & STICKYERR_MASK) { ++ bool write; ++ u32 address; ++ ++ dev_err(edac_dev->dev, "IOB bus access error(s)\n"); ++ reg = readl(ctx->bus_csr + RBEIR); ++ write = reg & WRITE_ACCESS_MASK ? 1 : 0; ++ address = RBERRADDR_RD(reg); ++ if (reg & AGENT_OFFLINE_ERR_MASK) ++ dev_err(edac_dev->dev, ++ "IOB bus %s access to offline agent error\n", ++ write ? "write" : "read"); ++ if (reg & UNIMPL_RBPAGE_ERR_MASK) ++ dev_err(edac_dev->dev, ++ "IOB bus %s access to unimplemented page error\n", ++ write ? "write" : "read"); ++ if (reg & WORD_ALIGNED_ERR_MASK) ++ dev_err(edac_dev->dev, ++ "IOB bus %s word aligned access error\n", ++ write ? "write" : "read"); ++ if (reg & PAGE_ACCESS_ERR_MASK) ++ dev_err(edac_dev->dev, ++ "IOB bus %s to page out of range access error\n", ++ write ? "write" : "read"); ++ writel(0x0, ctx->bus_csr + RBEIR); ++ writel(0x0, ctx->bus_csr + RBCSR); ++ } ++ ++ /* IOB Bridge agent transaction error interrupt */ ++ reg = readl(ctx->dev_csr + IOBBATRANSERRINTSTS); ++ if (!reg) ++ return; ++ ++ dev_err(edac_dev->dev, "IOB bridge agent (BA) transaction error\n"); ++ if (reg & WRERR_RESP_MASK) ++ dev_err(edac_dev->dev, "IOB BA write response error\n"); ++ if (reg & M_WRERR_RESP_MASK) ++ dev_err(edac_dev->dev, ++ "Multiple IOB BA write response error\n"); ++ if (reg & XGIC_POISONED_REQ_MASK) ++ dev_err(edac_dev->dev, "IOB BA XGIC poisoned write error\n"); ++ if (reg & M_XGIC_POISONED_REQ_MASK) ++ dev_err(edac_dev->dev, ++ "Multiple IOB BA XGIC poisoned write error\n"); ++ if (reg & RBM_POISONED_REQ_MASK) ++ dev_err(edac_dev->dev, "IOB BA RBM poisoned write error\n"); ++ if (reg & M_RBM_POISONED_REQ_MASK) ++ dev_err(edac_dev->dev, ++ "Multiple IOB BA RBM poisoned write error\n"); ++ if (reg & WDATA_CORRUPT_MASK) ++ dev_err(edac_dev->dev, "IOB BA write error\n"); ++ if (reg & M_WDATA_CORRUPT_MASK) ++ dev_err(edac_dev->dev, "Multiple IOB BA write error\n"); ++ if (reg & TRANS_CORRUPT_MASK) ++ dev_err(edac_dev->dev, "IOB BA transaction error\n"); ++ if (reg & M_TRANS_CORRUPT_MASK) ++ dev_err(edac_dev->dev, "Multiple IOB BA transaction error\n"); ++ if (reg & RIDRAM_CORRUPT_MASK) ++ dev_err(edac_dev->dev, ++ "IOB BA RDIDRAM read transaction ID error\n"); ++ if (reg & M_RIDRAM_CORRUPT_MASK) ++ dev_err(edac_dev->dev, ++ "Multiple IOB BA RDIDRAM read transaction ID error\n"); ++ if (reg & WIDRAM_CORRUPT_MASK) ++ dev_err(edac_dev->dev, ++ "IOB BA RDIDRAM write transaction ID error\n"); ++ if (reg & M_WIDRAM_CORRUPT_MASK) ++ dev_err(edac_dev->dev, ++ "Multiple IOB BA RDIDRAM write transaction ID error\n"); ++ if (reg & ILLEGAL_ACCESS_MASK) ++ dev_err(edac_dev->dev, ++ "IOB BA XGIC/RB illegal access error\n"); ++ if (reg & M_ILLEGAL_ACCESS_MASK) ++ dev_err(edac_dev->dev, ++ "Multiple IOB BA XGIC/RB illegal access error\n"); ++ ++ err_addr_lo = readl(ctx->dev_csr + IOBBATRANSERRREQINFOL); ++ err_addr_hi = readl(ctx->dev_csr + IOBBATRANSERRREQINFOH); ++ dev_err(edac_dev->dev, "IOB BA %s access at 0x%02X.%08X (0x%08X)\n", ++ REQTYPE_F2_RD(err_addr_hi) ? "read" : "write", ++ ERRADDRH_F2_RD(err_addr_hi), err_addr_lo, err_addr_hi); ++ if (reg & WRERR_RESP_MASK) ++ dev_err(edac_dev->dev, "IOB BA requestor ID 0x%08X\n", ++ readl(ctx->dev_csr + IOBBATRANSERRCSWREQID)); ++ writel(reg, ctx->dev_csr + IOBBATRANSERRINTSTS); ++} ++ ++static void xgene_edac_pa_report(struct edac_device_ctl_info *edac_dev) ++{ ++ struct xgene_edac_dev_ctx *ctx = edac_dev->pvt_info; ++ u32 err_addr_lo; ++ u32 err_addr_hi; ++ u32 reg; ++ ++ /* IOB Processing agent transaction error interrupt */ ++ reg = readl(ctx->dev_csr + IOBPATRANSERRINTSTS); ++ if (reg) { ++ dev_err(edac_dev->dev, ++ "IOB procesing agent (PA) transaction error\n"); ++ if (reg & IOBPA_RDATA_CORRUPT_MASK) ++ dev_err(edac_dev->dev, "IOB PA read data RAM error\n"); ++ if (reg & IOBPA_M_RDATA_CORRUPT_MASK) ++ dev_err(edac_dev->dev, ++ "Mutilple IOB PA read data RAM error\n"); ++ if (reg & IOBPA_WDATA_CORRUPT_MASK) ++ dev_err(edac_dev->dev, ++ "IOB PA write data RAM error\n"); ++ if (reg & IOBPA_M_WDATA_CORRUPT_MASK) ++ dev_err(edac_dev->dev, ++ "Mutilple IOB PA write data RAM error\n"); ++ if (reg & IOBPA_TRANS_CORRUPT_MASK) ++ dev_err(edac_dev->dev, "IOB PA transaction error\n"); ++ if (reg & IOBPA_M_TRANS_CORRUPT_MASK) ++ dev_err(edac_dev->dev, ++ "Mutilple IOB PA transaction error\n"); ++ if (reg & IOBPA_REQIDRAM_CORRUPT_MASK) ++ dev_err(edac_dev->dev, ++ "IOB PA transaction ID RAM error\n"); ++ if (reg & IOBPA_M_REQIDRAM_CORRUPT_MASK) ++ dev_err(edac_dev->dev, ++ "Multiple IOB PA transaction ID RAM error\n"); ++ writel(reg, ctx->dev_csr + IOBPATRANSERRINTSTS); ++ } ++ ++ /* IOB AXI0 Error */ ++ reg = readl(ctx->dev_csr + IOBAXIS0TRANSERRINTSTS); ++ if (reg) { ++ err_addr_lo = readl(ctx->dev_csr + IOBAXIS0TRANSERRREQINFOL); ++ err_addr_hi = readl(ctx->dev_csr + IOBAXIS0TRANSERRREQINFOH); ++ dev_err(edac_dev->dev, ++ "%sAXI slave 0 illegal %s access @ 0x%02X.%08X (0x%08X)\n", ++ reg & IOBAXIS0_M_ILLEGAL_ACCESS_MASK ? "Multiple " : "", ++ REQTYPE_RD(err_addr_hi) ? "read" : "write", ++ ERRADDRH_RD(err_addr_hi), err_addr_lo, err_addr_hi); ++ writel(reg, ctx->dev_csr + IOBAXIS0TRANSERRINTSTS); ++ } ++ ++ /* IOB AXI1 Error */ ++ reg = readl(ctx->dev_csr + IOBAXIS1TRANSERRINTSTS); ++ if (reg) { ++ err_addr_lo = readl(ctx->dev_csr + IOBAXIS1TRANSERRREQINFOL); ++ err_addr_hi = readl(ctx->dev_csr + IOBAXIS1TRANSERRREQINFOH); ++ dev_err(edac_dev->dev, ++ "%sAXI slave 1 illegal %s access @ 0x%02X.%08X (0x%08X)\n", ++ reg & IOBAXIS0_M_ILLEGAL_ACCESS_MASK ? "Multiple " : "", ++ REQTYPE_RD(err_addr_hi) ? "read" : "write", ++ ERRADDRH_RD(err_addr_hi), err_addr_lo, err_addr_hi); ++ writel(reg, ctx->dev_csr + IOBAXIS1TRANSERRINTSTS); ++ } ++} ++ ++static void xgene_edac_soc_check(struct edac_device_ctl_info *edac_dev) ++{ ++ struct xgene_edac_dev_ctx *ctx = edac_dev->pvt_info; ++ static const char * const mem_err_ip[] = { ++ "10GbE0", ++ "10GbE1", ++ "Security", ++ "SATA45", ++ "SATA23/ETH23", ++ "SATA01/ETH01", ++ "USB1", ++ "USB0", ++ "QML", ++ "QM0", ++ "QM1 (XGbE01)", ++ "PCIE4", ++ "PCIE3", ++ "PCIE2", ++ "PCIE1", ++ "PCIE0", ++ "CTX Manager", ++ "OCM", ++ "1GbE", ++ "CLE", ++ "AHBC", ++ "PktDMA", ++ "GFC", ++ "MSLIM", ++ "10GbE2", ++ "10GbE3", ++ "QM2 (XGbE23)", ++ "IOB", ++ "unknown", ++ "unknown", ++ "unknown", ++ "unknown", ++ }; ++ u32 pcp_hp_stat; ++ u32 pcp_lp_stat; ++ u32 reg; ++ int i; ++ ++ pcp_hp_stat = readl(ctx->pcp_csr + PCPHPERRINTSTS); ++ pcp_lp_stat = readl(ctx->pcp_csr + PCPLPERRINTSTS); ++ reg = readl(ctx->pcp_csr + MEMERRINTSTS); ++ if (!((pcp_hp_stat & (IOB_PA_ERR_MASK | IOB_BA_ERR_MASK | ++ IOB_XGIC_ERR_MASK | IOB_RB_ERR_MASK)) || ++ (pcp_lp_stat & CSW_SWITCH_TRACE_ERR_MASK) || reg)) ++ return; ++ ++ if (pcp_hp_stat & IOB_XGIC_ERR_MASK) ++ xgene_edac_iob_gic_report(edac_dev); ++ ++ if (pcp_hp_stat & (IOB_RB_ERR_MASK | IOB_BA_ERR_MASK)) ++ xgene_edac_rb_report(edac_dev); ++ ++ if (pcp_hp_stat & IOB_PA_ERR_MASK) ++ xgene_edac_pa_report(edac_dev); ++ ++ if (pcp_lp_stat & CSW_SWITCH_TRACE_ERR_MASK) { ++ dev_info(edac_dev->dev, ++ "CSW switch trace correctable memory parity error\n"); ++ edac_device_handle_ce(edac_dev, 0, 0, edac_dev->ctl_name); ++ } ++ ++ for (i = 0; i < 31; i++) { ++ if (reg & (1 << i)) { ++ dev_err(edac_dev->dev, "%s memory parity error\n", ++ mem_err_ip[i]); ++ edac_device_handle_ue(edac_dev, 0, 0, ++ edac_dev->ctl_name); ++ } ++ } ++} ++ ++static irqreturn_t xgene_edac_soc_isr(int irq, void *dev_id) ++{ ++ struct edac_device_ctl_info *edac_dev = dev_id; ++ struct xgene_edac_dev_ctx *ctx = edac_dev->pvt_info; ++ u32 pcp_hp_stat; ++ u32 pcp_lp_stat; ++ u32 reg; ++ ++ pcp_hp_stat = readl(ctx->pcp_csr + PCPHPERRINTSTS); ++ pcp_lp_stat = readl(ctx->pcp_csr + PCPLPERRINTSTS); ++ reg = readl(ctx->pcp_csr + MEMERRINTSTS); ++ if (!((pcp_hp_stat & (IOB_PA_ERR_MASK | IOB_BA_ERR_MASK | ++ IOB_XGIC_ERR_MASK | IOB_RB_ERR_MASK)) || ++ (pcp_lp_stat & CSW_SWITCH_TRACE_ERR_MASK) || reg)) ++ return IRQ_NONE; ++ ++ xgene_edac_soc_check(edac_dev); ++ ++ return IRQ_HANDLED; ++} ++ ++static void xgene_edac_soc_hw_ctl(struct edac_device_ctl_info *edac_dev, ++ bool enable) ++{ ++ struct xgene_edac_dev_ctx *ctx = edac_dev->pvt_info; ++ u32 val; ++ ++ /* Enable SoC IP error interrupt */ ++ if (edac_dev->op_state == OP_RUNNING_INTERRUPT) { ++ mutex_lock(&xgene_edac_lock); ++ ++ val = readl(ctx->pcp_csr + PCPHPERRINTMSK); ++ if (enable) ++ val &= ~(IOB_PA_ERR_MASK | IOB_BA_ERR_MASK | ++ IOB_XGIC_ERR_MASK | IOB_RB_ERR_MASK); ++ else ++ val |= IOB_PA_ERR_MASK | IOB_BA_ERR_MASK | ++ IOB_XGIC_ERR_MASK | IOB_RB_ERR_MASK; ++ writel(val, ctx->pcp_csr + PCPHPERRINTMSK); ++ val = readl(ctx->pcp_csr + PCPLPERRINTMSK); ++ if (enable) ++ val &= ~CSW_SWITCH_TRACE_ERR_MASK; ++ else ++ val |= CSW_SWITCH_TRACE_ERR_MASK; ++ writel(val, ctx->pcp_csr + PCPLPERRINTMSK); ++ ++ mutex_unlock(&xgene_edac_lock); ++ ++ writel(enable ? 0x0 : 0xFFFFFFFF, ++ ctx->dev_csr + IOBAXIS0TRANSERRINTMSK); ++ writel(enable ? 0x0 : 0xFFFFFFFF, ++ ctx->dev_csr + IOBAXIS1TRANSERRINTMSK); ++ writel(enable ? 0x0 : 0xFFFFFFFF, ++ ctx->dev_csr + XGICTRANSERRINTMSK); ++ ++ writel(enable ? 0x0 : 0xFFFFFFFF, ctx->pcp_csr + MEMERRINTMSK); ++ } ++} ++ ++static int xgene_edac_soc_probe(struct platform_device *pdev) ++{ ++ struct edac_device_ctl_info *edac_dev; ++ struct xgene_edac_dev_ctx *ctx; ++ struct resource *res; ++ int rc = 0; ++ ++ if (!devres_open_group(&pdev->dev, xgene_edac_soc_probe, GFP_KERNEL)) ++ return -ENOMEM; ++ ++ edac_dev = edac_device_alloc_ctl_info(sizeof(*ctx), ++ "SOC", 1, "SOC", 1, 2, NULL, 0, ++ edac_device_alloc_index()); ++ if (!edac_dev) { ++ rc = -ENOMEM; ++ goto err; ++ } ++ ++ ctx = edac_dev->pvt_info; ++ ctx->name = "xgene_soc_err"; ++ edac_dev->dev = &pdev->dev; ++ dev_set_drvdata(edac_dev->dev, edac_dev); ++ edac_dev->ctl_name = ctx->name; ++ edac_dev->dev_name = ctx->name; ++ edac_dev->mod_name = EDAC_MOD_STR; ++ ++ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); ++ if (!res) { ++ dev_err(&pdev->dev, "no PCP resource address\n"); ++ rc = -EINVAL; ++ goto err1; ++ } ++ ctx->pcp_csr = devm_ioremap(&pdev->dev, res->start, ++ resource_size(res)); ++ if (IS_ERR(ctx->pcp_csr)) { ++ dev_err(&pdev->dev, "no PCP resource address\n"); ++ rc = PTR_ERR(ctx->pcp_csr); ++ goto err1; ++ } ++ ++ res = platform_get_resource(pdev, IORESOURCE_MEM, 1); ++ ctx->dev_csr = devm_ioremap_resource(&pdev->dev, res); ++ if (IS_ERR(ctx->dev_csr)) { ++ dev_err(&pdev->dev, "no SoC resource address\n"); ++ rc = PTR_ERR(ctx->dev_csr); ++ goto err1; ++ } ++ ++ res = platform_get_resource(pdev, IORESOURCE_MEM, 2); ++ ctx->bus_csr = devm_ioremap_resource(&pdev->dev, res); ++ if (IS_ERR(ctx->bus_csr)) { ++ dev_err(&pdev->dev, "no SoC bus resource address\n"); ++ rc = PTR_ERR(ctx->bus_csr); ++ goto err1; ++ } ++ ++ if (edac_op_state == EDAC_OPSTATE_POLL) ++ edac_dev->edac_check = xgene_edac_soc_check; ++ ++ rc = edac_device_add_device(edac_dev); ++ if (rc > 0) { ++ dev_err(&pdev->dev, "edac_device_add_device failed\n"); ++ rc = -ENOMEM; ++ goto err1; ++ } ++ ++ if (edac_op_state == EDAC_OPSTATE_INT) { ++ int irq; ++ int i; ++ ++ /* ++ * Register for SoC un-correctable and correctable errors ++ */ ++ for (i = 0; i < 3; i++) { ++ irq = platform_get_irq(pdev, i); ++ if (irq < 0) { ++ dev_err(&pdev->dev, "No IRQ resource\n"); ++ rc = -EINVAL; ++ goto err2; ++ } ++ rc = devm_request_irq(&pdev->dev, irq, ++ xgene_edac_soc_isr, IRQF_SHARED, ++ dev_name(&pdev->dev), edac_dev); ++ if (rc) { ++ dev_err(&pdev->dev, ++ "Could not request IRQ %d\n", irq); ++ goto err2; ++ } ++ } ++ ++ edac_dev->op_state = OP_RUNNING_INTERRUPT; ++ } ++ ++ xgene_edac_soc_hw_ctl(edac_dev, true); ++ ++ devres_remove_group(&pdev->dev, xgene_edac_soc_probe); ++ ++ dev_info(&pdev->dev, "X-Gene EDAC SoC registered\n"); ++ return 0; ++ ++err2: ++ edac_device_del_device(&pdev->dev); ++err1: ++ edac_device_free_ctl_info(edac_dev); ++err: ++ devres_release_group(&pdev->dev, xgene_edac_soc_probe); ++ return rc; ++} ++ ++static int xgene_edac_soc_remove(struct platform_device *pdev) ++{ ++ struct edac_device_ctl_info *edac_dev = dev_get_drvdata(&pdev->dev); ++ ++ xgene_edac_soc_hw_ctl(edac_dev, false); ++ edac_device_del_device(&pdev->dev); ++ edac_device_free_ctl_info(edac_dev); ++ return 0; ++} ++ ++#ifdef CONFIG_OF ++static struct of_device_id xgene_edac_soc_of_match[] = { ++ { .compatible = "apm,xgene-edac-soc" }, ++ {}, ++}; ++MODULE_DEVICE_TABLE(of, xgene_edac_soc_of_match); ++#endif ++ ++static struct platform_driver xgene_edac_soc_driver = { ++ .probe = xgene_edac_soc_probe, ++ .remove = xgene_edac_soc_remove, ++ .driver = { ++ .name = "xgene-edac-soc", ++ .owner = THIS_MODULE, ++ .of_match_table = of_match_ptr(xgene_edac_soc_of_match), ++ }, ++}; ++ ++static int __init xgene_edac_init(void) ++{ ++ int rc; ++ ++ /* Make sure error reporting method is sane */ ++ switch (edac_op_state) { ++ case EDAC_OPSTATE_POLL: ++ case EDAC_OPSTATE_INT: ++ break; ++ default: ++ edac_op_state = EDAC_OPSTATE_INT; ++ break; ++ } ++ ++ rc = platform_driver_register(&xgene_edac_mc_driver); ++ if (rc) { ++ edac_printk(KERN_ERR, EDAC_MOD_STR, "MCU fails to register\n"); ++ goto reg_mc_failed; ++ } ++ rc = platform_driver_register(&xgene_edac_pmd_driver); ++ if (rc) { ++ edac_printk(KERN_ERR, EDAC_MOD_STR, "PMD fails to register\n"); ++ goto reg_pmd_failed; ++ } ++ rc = platform_driver_register(&xgene_edac_l3_driver); ++ if (rc) { ++ edac_printk(KERN_ERR, EDAC_MOD_STR, "L3 fails to register\n"); ++ goto reg_l3_failed; ++ } ++ rc = platform_driver_register(&xgene_edac_soc_driver); ++ if (rc) { ++ edac_printk(KERN_ERR, EDAC_MOD_STR, "SoC fails to register\n"); ++ goto reg_soc_failed; ++ } ++ ++ return 0; ++ ++reg_soc_failed: ++ platform_driver_unregister(&xgene_edac_l3_driver); ++ ++reg_l3_failed: ++ platform_driver_unregister(&xgene_edac_pmd_driver); ++ ++reg_pmd_failed: ++ platform_driver_unregister(&xgene_edac_mc_driver); ++ ++reg_mc_failed: ++ return rc; ++} ++module_init(xgene_edac_init); ++ ++static void __exit xgene_edac_exit(void) ++{ ++ platform_driver_unregister(&xgene_edac_soc_driver); ++ platform_driver_unregister(&xgene_edac_l3_driver); ++ platform_driver_unregister(&xgene_edac_pmd_driver); ++ platform_driver_unregister(&xgene_edac_mc_driver); ++} ++module_exit(xgene_edac_exit); ++ ++MODULE_LICENSE("GPL"); ++MODULE_AUTHOR("Feng Kan "); ++MODULE_DESCRIPTION("APM X-Gene EDAC driver"); ++module_param(edac_op_state, int, 0444); ++MODULE_PARM_DESC(edac_op_state, ++ "EDAC Error Reporting state: 0=Poll, 2=Interrupt"); diff --git a/drivers/firmware/dmi-sysfs.c b/drivers/firmware/dmi-sysfs.c index e0f1cb3..9b396d7 100644 --- a/drivers/firmware/dmi-sysfs.c @@ -6425,10 +8267,10 @@ index e0f1cb3..9b396d7 100644 return 0; diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c -index c5f7b4e..d55c712 100644 +index 69fac06..07d2960 100644 --- a/drivers/firmware/dmi_scan.c +++ b/drivers/firmware/dmi_scan.c -@@ -113,6 +113,8 @@ static void dmi_table(u8 *buf, int len, int num, +@@ -114,6 +114,8 @@ static void dmi_table(u8 *buf, u32 len, int num, } } @@ -6437,7 +8279,7 @@ index c5f7b4e..d55c712 100644 static phys_addr_t dmi_base; static u32 dmi_len; static u16 dmi_num; -@@ -474,6 +476,8 @@ static int __init dmi_present(const u8 *buf) +@@ -475,6 +477,8 @@ static int __init dmi_present(const u8 *buf) if (memcmp(buf, "_SM_", 4) == 0 && buf[5] < 32 && dmi_checksum(buf, buf[5])) { smbios_ver = get_unaligned_be16(buf + 6); @@ -6446,7 +8288,7 @@ index c5f7b4e..d55c712 100644 /* Some BIOS report weird SMBIOS version, fix that up */ switch (smbios_ver) { -@@ -505,6 +509,8 @@ static int __init dmi_present(const u8 *buf) +@@ -506,6 +510,8 @@ static int __init dmi_present(const u8 *buf) pr_info("SMBIOS %d.%d present.\n", dmi_ver >> 8, dmi_ver & 0xFF); } else { @@ -6455,7 +8297,7 @@ index c5f7b4e..d55c712 100644 dmi_ver = (buf[14] & 0xF0) << 4 | (buf[14] & 0x0F); pr_info("Legacy DMI %d.%d present.\n", -@@ -530,6 +536,8 @@ static int __init dmi_smbios3_present(const u8 *buf) +@@ -531,6 +537,8 @@ static int __init dmi_smbios3_present(const u8 *buf) dmi_ver = get_unaligned_be16(buf + 7); dmi_len = get_unaligned_le32(buf + 12); dmi_base = get_unaligned_le64(buf + 16); @@ -6464,7 +8306,7 @@ index c5f7b4e..d55c712 100644 /* * The 64-bit SMBIOS 3.0 entry point no longer has a field -@@ -941,3 +949,21 @@ void dmi_memdev_name(u16 handle, const char **bank, const char **device) +@@ -942,3 +950,21 @@ void dmi_memdev_name(u16 handle, const char **bank, const char **device) } } EXPORT_SYMBOL_GPL(dmi_memdev_name); @@ -6486,25 +8328,186 @@ index c5f7b4e..d55c712 100644 + return smbios_header; +} +EXPORT_SYMBOL_GPL(dmi_get_smbios_entry_area); +diff --git a/drivers/firmware/efi/libstub/arm-stub.c b/drivers/firmware/efi/libstub/arm-stub.c +index dcae482..2b38147 100644 +--- a/drivers/firmware/efi/libstub/arm-stub.c ++++ b/drivers/firmware/efi/libstub/arm-stub.c +@@ -295,62 +295,3 @@ fail_free_image: + fail: + return EFI_ERROR; + } +- +-/* +- * This is the base address at which to start allocating virtual memory ranges +- * for UEFI Runtime Services. This is in the low TTBR0 range so that we can use +- * any allocation we choose, and eliminate the risk of a conflict after kexec. +- * The value chosen is the largest non-zero power of 2 suitable for this purpose +- * both on 32-bit and 64-bit ARM CPUs, to maximize the likelihood that it can +- * be mapped efficiently. +- */ +-#define EFI_RT_VIRTUAL_BASE 0x40000000 +- +-/* +- * efi_get_virtmap() - create a virtual mapping for the EFI memory map +- * +- * This function populates the virt_addr fields of all memory region descriptors +- * in @memory_map whose EFI_MEMORY_RUNTIME attribute is set. Those descriptors +- * are also copied to @runtime_map, and their total count is returned in @count. +- */ +-void efi_get_virtmap(efi_memory_desc_t *memory_map, unsigned long map_size, +- unsigned long desc_size, efi_memory_desc_t *runtime_map, +- int *count) +-{ +- u64 efi_virt_base = EFI_RT_VIRTUAL_BASE; +- efi_memory_desc_t *out = runtime_map; +- int l; +- +- for (l = 0; l < map_size; l += desc_size) { +- efi_memory_desc_t *in = (void *)memory_map + l; +- u64 paddr, size; +- +- if (!(in->attribute & EFI_MEMORY_RUNTIME)) +- continue; +- +- /* +- * Make the mapping compatible with 64k pages: this allows +- * a 4k page size kernel to kexec a 64k page size kernel and +- * vice versa. +- */ +- paddr = round_down(in->phys_addr, SZ_64K); +- size = round_up(in->num_pages * EFI_PAGE_SIZE + +- in->phys_addr - paddr, SZ_64K); +- +- /* +- * Avoid wasting memory on PTEs by choosing a virtual base that +- * is compatible with section mappings if this region has the +- * appropriate size and physical alignment. (Sections are 2 MB +- * on 4k granule kernels) +- */ +- if (IS_ALIGNED(in->phys_addr, SZ_2M) && size >= SZ_2M) +- efi_virt_base = round_up(efi_virt_base, SZ_2M); +- +- in->virt_addr = efi_virt_base + in->phys_addr - paddr; +- efi_virt_base += size; +- +- memcpy(out, in, desc_size); +- out = (void *)out + desc_size; +- ++*count; +- } +-} +diff --git a/drivers/firmware/efi/libstub/efistub.h b/drivers/firmware/efi/libstub/efistub.h +index 47437b1..d1ba39c 100644 +--- a/drivers/firmware/efi/libstub/efistub.h ++++ b/drivers/firmware/efi/libstub/efistub.h +@@ -43,8 +43,4 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table, + + void *get_fdt(efi_system_table_t *sys_table); + +-void efi_get_virtmap(efi_memory_desc_t *memory_map, unsigned long map_size, +- unsigned long desc_size, efi_memory_desc_t *runtime_map, +- int *count); +- + #endif diff --git a/drivers/firmware/efi/libstub/fdt.c b/drivers/firmware/efi/libstub/fdt.c -index 91da56c..7c62760 100644 +index 91da56c..c846a96 100644 --- a/drivers/firmware/efi/libstub/fdt.c +++ b/drivers/firmware/efi/libstub/fdt.c -@@ -156,6 +156,14 @@ efi_status_t update_fdt(efi_system_table_t *sys_table, void *orig_fdt, - if (status) - goto fdt_set_fail; +@@ -14,8 +14,6 @@ + #include + #include -+ /* Add a property to show the dtb is generated by uefi stub */ -+ if (!orig_fdt) { -+ status = fdt_setprop(fdt, node, -+ "linux,uefi-stub-generated-dtb", NULL, 0); -+ if (status) -+ goto fdt_set_fail; -+ } -+ - return EFI_SUCCESS; +-#include "efistub.h" +- + efi_status_t update_fdt(efi_system_table_t *sys_table, void *orig_fdt, + unsigned long orig_fdt_size, + void *fdt, int new_fdt_size, char *cmdline_ptr, +@@ -195,26 +193,9 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table, + unsigned long map_size, desc_size; + u32 desc_ver; + unsigned long mmap_key; +- efi_memory_desc_t *memory_map, *runtime_map; ++ efi_memory_desc_t *memory_map; + unsigned long new_fdt_size; + efi_status_t status; +- int runtime_entry_count = 0; +- +- /* +- * Get a copy of the current memory map that we will use to prepare +- * the input for SetVirtualAddressMap(). We don't have to worry about +- * subsequent allocations adding entries, since they could not affect +- * the number of EFI_MEMORY_RUNTIME regions. +- */ +- status = efi_get_memory_map(sys_table, &runtime_map, &map_size, +- &desc_size, &desc_ver, &mmap_key); +- if (status != EFI_SUCCESS) { +- pr_efi_err(sys_table, "Unable to retrieve UEFI memory map.\n"); +- return status; +- } +- +- pr_efi(sys_table, +- "Exiting boot services and installing virtual address map...\n"); + + /* + * Estimate size of new FDT, and allocate memory for it. We +@@ -267,48 +248,12 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table, + } + } + +- /* +- * Update the memory map with virtual addresses. The function will also +- * populate @runtime_map with copies of just the EFI_MEMORY_RUNTIME +- * entries so that we can pass it straight into SetVirtualAddressMap() +- */ +- efi_get_virtmap(memory_map, map_size, desc_size, runtime_map, +- &runtime_entry_count); +- + /* Now we are ready to exit_boot_services.*/ + status = sys_table->boottime->exit_boot_services(handle, mmap_key); + +- if (status == EFI_SUCCESS) { +- efi_set_virtual_address_map_t *svam; +- +- /* Install the new virtual address map */ +- svam = sys_table->runtime->set_virtual_address_map; +- status = svam(runtime_entry_count * desc_size, desc_size, +- desc_ver, runtime_map); + +- /* +- * We are beyond the point of no return here, so if the call to +- * SetVirtualAddressMap() failed, we need to signal that to the +- * incoming kernel but proceed normally otherwise. +- */ +- if (status != EFI_SUCCESS) { +- int l; +- +- /* +- * Set the virtual address field of all +- * EFI_MEMORY_RUNTIME entries to 0. This will signal +- * the incoming kernel that no virtual translation has +- * been installed. +- */ +- for (l = 0; l < map_size; l += desc_size) { +- efi_memory_desc_t *p = (void *)memory_map + l; +- +- if (p->attribute & EFI_MEMORY_RUNTIME) +- p->virt_addr = 0; +- } +- } +- return EFI_SUCCESS; +- } ++ if (status == EFI_SUCCESS) ++ return status; + + pr_efi_err(sys_table, "Exit boot services failed.\n"); + +@@ -319,7 +264,6 @@ fail_free_new_fdt: + efi_free(sys_table, new_fdt_size, *new_fdt_addr); + + fail: +- sys_table->boottime->free_pool(runtime_map); + return EFI_LOAD_ERROR; + } - fdt_set_fail: diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c index fc13dd5..3143a6e 100644 --- a/drivers/iommu/arm-smmu.c @@ -6767,7 +8770,7 @@ index fdf7065..235524d 100644 + +#endif /* CONFIG_ACPI */ diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c -index d8996bd..c76884b 100644 +index 596b0a9..71c1ca4 100644 --- a/drivers/irqchip/irq-gic-v3-its.c +++ b/drivers/irqchip/irq-gic-v3-its.c @@ -54,13 +54,12 @@ struct its_collection { @@ -6786,7 +8789,16 @@ index d8996bd..c76884b 100644 struct irq_domain *domain; void __iomem *base; unsigned long phys_base; -@@ -875,7 +874,7 @@ retry_baser: +@@ -831,7 +830,7 @@ static int its_alloc_tables(struct its_node *its) + if (order >= MAX_ORDER) { + order = MAX_ORDER - 1; + pr_warn("%s: Device Table too large, reduce its page order to %u\n", +- its->msi_chip.of_node->full_name, order); ++ its->domain->of_node->full_name, order); + } + } + +@@ -898,7 +897,7 @@ retry_baser: if (val != tmp) { pr_err("ITS: %s: GITS_BASER%d doesn't stick: %lx %lx\n", @@ -6795,7 +8807,7 @@ index d8996bd..c76884b 100644 (unsigned long) val, (unsigned long) tmp); err = -ENXIO; goto out_free; -@@ -1260,6 +1259,7 @@ static int its_probe(struct device_node *node, struct irq_domain *parent) +@@ -1353,6 +1352,7 @@ static int its_probe(struct device_node *node, struct irq_domain *parent) struct resource res; struct its_node *its; void __iomem *its_base; @@ -6803,7 +8815,7 @@ index d8996bd..c76884b 100644 u32 val; u64 baser, tmp; int err; -@@ -1296,7 +1296,6 @@ static int its_probe(struct device_node *node, struct irq_domain *parent) +@@ -1396,7 +1396,6 @@ static int its_probe(struct device_node *node, struct irq_domain *parent) INIT_LIST_HEAD(&its->its_device_list); its->base = its_base; its->phys_base = res.start; @@ -6811,7 +8823,7 @@ index d8996bd..c76884b 100644 its->ite_size = ((readl_relaxed(its_base + GITS_TYPER) >> 4) & 0xf) + 1; its->cmd_base = kzalloc(ITS_CMD_QUEUE_SZ, GFP_KERNEL); -@@ -1330,26 +1329,22 @@ static int its_probe(struct device_node *node, struct irq_domain *parent) +@@ -1430,26 +1429,22 @@ static int its_probe(struct device_node *node, struct irq_domain *parent) its->flags |= ITS_FLAGS_CMDQ_NEEDS_FLUSHING; } @@ -6846,7 +8858,7 @@ index d8996bd..c76884b 100644 } spin_lock(&its_lock); -@@ -1359,10 +1354,10 @@ static int its_probe(struct device_node *node, struct irq_domain *parent) +@@ -1459,10 +1454,10 @@ static int its_probe(struct device_node *node, struct irq_domain *parent) return 0; out_free_domains: @@ -6860,7 +8872,7 @@ index d8996bd..c76884b 100644 its_free_tables(its); out_free_cmd: diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c -index 1c6dea2..7f073f0 100644 +index fd8850d..507a34a 100644 --- a/drivers/irqchip/irq-gic-v3.c +++ b/drivers/irqchip/irq-gic-v3.c @@ -524,9 +524,19 @@ static void gic_raise_softirq(const struct cpumask *mask, unsigned int irq) @@ -6884,7 +8896,7 @@ index 1c6dea2..7f073f0 100644 } diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c -index 4634cf7..d2df23b 100644 +index 471e1cd..aec5f2f 100644 --- a/drivers/irqchip/irq-gic.c +++ b/drivers/irqchip/irq-gic.c @@ -33,12 +33,14 @@ @@ -6902,7 +8914,7 @@ index 4634cf7..d2df23b 100644 #include #include -@@ -644,6 +646,13 @@ static void gic_raise_softirq(const struct cpumask *mask, unsigned int irq) +@@ -648,6 +650,13 @@ static void gic_raise_softirq(const struct cpumask *mask, unsigned int irq) raw_spin_unlock_irqrestore(&irq_controller_lock, flags); } @@ -6916,7 +8928,7 @@ index 4634cf7..d2df23b 100644 #endif #ifdef CONFIG_BL_SWITCHER -@@ -984,7 +993,10 @@ void __init gic_init_bases(unsigned int gic_nr, int irq_start, +@@ -988,7 +997,10 @@ void __init gic_init_bases(unsigned int gic_nr, int irq_start, gic_irqs = 1020; gic->gic_irqs = gic_irqs; @@ -6928,7 +8940,7 @@ index 4634cf7..d2df23b 100644 const struct irq_domain_ops *ops = &gic_irq_domain_hierarchy_ops; if (!of_property_read_u32(node, "arm,routable-irqs", -@@ -1028,6 +1040,9 @@ void __init gic_init_bases(unsigned int gic_nr, int irq_start, +@@ -1032,6 +1044,9 @@ void __init gic_init_bases(unsigned int gic_nr, int irq_start, #ifdef CONFIG_SMP set_smp_cross_call(gic_raise_softirq); register_cpu_notifier(&gic_cpu_notifier); @@ -6938,7 +8950,7 @@ index 4634cf7..d2df23b 100644 #endif set_handle_irq(gic_handle_irq); } -@@ -1038,9 +1053,9 @@ void __init gic_init_bases(unsigned int gic_nr, int irq_start, +@@ -1042,9 +1057,9 @@ void __init gic_init_bases(unsigned int gic_nr, int irq_start, gic_pm_init(gic); } @@ -6949,14 +8961,13 @@ index 4634cf7..d2df23b 100644 static int __init gic_of_init(struct device_node *node, struct device_node *parent) { -@@ -1086,3 +1101,109 @@ IRQCHIP_DECLARE(msm_8660_qgic, "qcom,msm-8660-qgic", gic_of_init); +@@ -1090,3 +1105,109 @@ IRQCHIP_DECLARE(msm_8660_qgic, "qcom,msm-8660-qgic", gic_of_init); IRQCHIP_DECLARE(msm_qgic2, "qcom,msm-qgic2", gic_of_init); #endif + +#ifdef CONFIG_ACPI -+static phys_addr_t dist_phy_base, cpu_phy_base; -+static int cpu_base_assigned; ++static phys_addr_t dist_phy_base, cpu_phy_base __initdata; + +static int __init +gic_acpi_parse_madt_cpu(struct acpi_subtable_header *header, @@ -6964,6 +8975,7 @@ index 4634cf7..d2df23b 100644 +{ + struct acpi_madt_generic_interrupt *processor; + phys_addr_t gic_cpu_base; ++ static int cpu_base_assigned; + + processor = (struct acpi_madt_generic_interrupt *)header; + @@ -7060,14 +9072,14 @@ index 4634cf7..d2df23b 100644 +} +#endif diff --git a/drivers/irqchip/irqchip.c b/drivers/irqchip/irqchip.c -index 0fe2f71..5855240 100644 +index 0fe2f71..afd1af3 100644 --- a/drivers/irqchip/irqchip.c +++ b/drivers/irqchip/irqchip.c @@ -8,6 +8,7 @@ * warranty of any kind, whether express or implied. */ -+#include ++#include #include #include #include @@ -17478,367 +19490,8 @@ index 0000000..dd8500d +#endif + +#endif -diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c -index 869d97f..29aad5e 100644 ---- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c -+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c -@@ -593,12 +593,10 @@ static int xgene_enet_reset(struct xgene_enet_pdata *pdata) - if (!xgene_ring_mgr_init(pdata)) - return -ENODEV; - -- if (pdata->clk) { -- clk_prepare_enable(pdata->clk); -- clk_disable_unprepare(pdata->clk); -- clk_prepare_enable(pdata->clk); -- xgene_enet_ecc_init(pdata); -- } -+ clk_prepare_enable(pdata->clk); -+ clk_disable_unprepare(pdata->clk); -+ clk_prepare_enable(pdata->clk); -+ xgene_enet_ecc_init(pdata); - xgene_enet_config_ring_if_assoc(pdata); - - /* Enable auto-incr for scanning */ -@@ -676,7 +674,7 @@ static int xgene_enet_phy_connect(struct net_device *ndev) - - phy_dev = pdata->phy_dev; - -- if (!phy_dev || -+ if (phy_dev == NULL || - phy_connect_direct(ndev, phy_dev, &xgene_enet_adjust_link, - pdata->phy_mode)) { - netdev_err(ndev, "Could not connect to PHY\n"); -@@ -692,37 +690,23 @@ static int xgene_enet_phy_connect(struct net_device *ndev) - return 0; - } - --static int xgene_mdiobus_register(struct xgene_enet_pdata *pdata, -- struct mii_bus *mdio) -+#ifdef CONFIG_ACPI -+static int xgene_acpi_mdiobus_register(struct xgene_enet_pdata *pdata, -+ struct mii_bus *mdio) - { - struct device *dev = &pdata->pdev->dev; -- struct net_device *ndev = pdata->ndev; - struct phy_device *phy; -- struct device_node *child_np; -- struct device_node *mdio_np = NULL; -- int ret; -+ int i, ret; - u32 phy_id; - -- if (dev->of_node) { -- for_each_child_of_node(dev->of_node, child_np) { -- if (of_device_is_compatible(child_np, -- "apm,xgene-mdio")) { -- mdio_np = child_np; -- break; -- } -- } -- -- if (!mdio_np) { -- netdev_dbg(ndev, "No mdio node in the dts\n"); -- return -ENXIO; -- } -- -- return of_mdiobus_register(mdio, mdio_np); -- } -- - /* Mask out all PHYs from auto probing. */ - mdio->phy_mask = ~0; - -+ /* Clear all the IRQ properties */ -+ if (mdio->irq) -+ for (i = 0; i < PHY_MAX_ADDR; i++) -+ mdio->irq[i] = PHY_POLL; -+ - /* Register the MDIO bus */ - ret = mdiobus_register(mdio); - if (ret) -@@ -730,8 +714,6 @@ static int xgene_mdiobus_register(struct xgene_enet_pdata *pdata, - - ret = device_property_read_u32(dev, "phy-channel", &phy_id); - if (ret) -- ret = device_property_read_u32(dev, "phy-addr", &phy_id); -- if (ret) - return -EINVAL; - - phy = get_phy_device(mdio, phy_id, true); -@@ -746,13 +728,31 @@ static int xgene_mdiobus_register(struct xgene_enet_pdata *pdata, - - return ret; - } -+#else -+#define xgene_acpi_mdiobus_register(a, b) -1 -+#endif - - int xgene_enet_mdio_config(struct xgene_enet_pdata *pdata) - { - struct net_device *ndev = pdata->ndev; -+ struct device *dev = &pdata->pdev->dev; -+ struct device_node *child_np; -+ struct device_node *mdio_np = NULL; - struct mii_bus *mdio_bus; - int ret; - -+ for_each_child_of_node(dev->of_node, child_np) { -+ if (of_device_is_compatible(child_np, "apm,xgene-mdio")) { -+ mdio_np = child_np; -+ break; -+ } -+ } -+ -+ if (dev->of_node && !mdio_np) { -+ netdev_dbg(ndev, "No mdio node in the dts\n"); -+ return -ENXIO; -+ } -+ - mdio_bus = mdiobus_alloc(); - if (!mdio_bus) - return -ENOMEM; -@@ -766,7 +766,10 @@ int xgene_enet_mdio_config(struct xgene_enet_pdata *pdata) - mdio_bus->priv = pdata; - mdio_bus->parent = &ndev->dev; - -- ret = xgene_mdiobus_register(pdata, mdio_bus); -+ if (dev->of_node) -+ ret = of_mdiobus_register(mdio_bus, mdio_np); -+ else -+ ret = xgene_acpi_mdiobus_register(pdata, mdio_bus); - if (ret) { - netdev_err(ndev, "Failed to register MDIO bus\n"); - mdiobus_free(mdio_bus); -diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c -index 44b1537..a4a53a7 100644 ---- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c -+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c -@@ -24,10 +24,6 @@ - #include "xgene_enet_sgmac.h" - #include "xgene_enet_xgmac.h" - --#define RES_ENET_CSR 0 --#define RES_RING_CSR 1 --#define RES_RING_CMD 2 -- - static void xgene_enet_init_bufpool(struct xgene_enet_desc_ring *buf_pool) - { - struct xgene_enet_raw_desc16 *raw_desc; -@@ -752,40 +748,41 @@ static const struct net_device_ops xgene_ndev_ops = { - .ndo_set_mac_address = xgene_enet_set_mac_address, - }; - --static int xgene_get_mac_address(struct device *dev, -- unsigned char *addr) -+#ifdef CONFIG_ACPI -+static int acpi_get_mac_address(struct device *dev, -+ unsigned char *addr) - { - int ret; - -- ret = device_property_read_u8_array(dev, "local-mac-address", addr, 6); -- if (ret) -- ret = device_property_read_u8_array(dev, "mac-address", -- addr, 6); -+ ret = device_property_read_u8_array(dev, "mac-address", addr, 6); - if (ret) -- return -ENODEV; -+ return 0; - -- return ETH_ALEN; -+ return 6; - } - --static int xgene_get_phy_mode(struct device *dev) -+static int acpi_get_phy_mode(struct device *dev) - { -- int i, ret; -+ int i, ret, phy_mode; - char *modestr; - -- ret = device_property_read_string(dev, "phy-connection-type", -- (const char **)&modestr); -- if (ret) -- ret = device_property_read_string(dev, "phy-mode", -- (const char **)&modestr); -+ ret = device_property_read_string(dev, "phy-mode", &modestr); - if (ret) -- return -ENODEV; -+ return -1; - -+ phy_mode = -1; - for (i = 0; i < PHY_INTERFACE_MODE_MAX; i++) { -- if (!strcasecmp(modestr, phy_modes(i))) -- return i; -+ if (!strcasecmp(modestr, phy_modes(i))) { -+ phy_mode = i; -+ break; -+ } - } -- return -ENODEV; -+ return phy_mode; - } -+#else -+#define acpi_get_mac_address(a, b, c) 0 -+#define acpi_get_phy_mode(a) -1 -+#endif - - static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata) - { -@@ -794,45 +791,38 @@ static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata) - struct device *dev; - struct resource *res; - void __iomem *base_addr; -+ const char *mac; - int ret; - - pdev = pdata->pdev; - dev = &pdev->dev; - ndev = pdata->ndev; - -- res = platform_get_resource(pdev, IORESOURCE_MEM, RES_ENET_CSR); -- if (!res) { -- dev_err(dev, "Resource enet_csr not defined\n"); -- return -ENODEV; -- } -- pdata->base_addr = devm_ioremap(dev, res->start, resource_size(res)); -- if (!pdata->base_addr) { -+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "enet_csr"); -+ if (!res) -+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); -+ pdata->base_addr = devm_ioremap_resource(dev, res); -+ if (IS_ERR(pdata->base_addr)) { - dev_err(dev, "Unable to retrieve ENET Port CSR region\n"); -- return -ENOMEM; -+ return PTR_ERR(pdata->base_addr); - } - -- res = platform_get_resource(pdev, IORESOURCE_MEM, RES_RING_CSR); -- if (!res) { -- dev_err(dev, "Resource ring_csr not defined\n"); -- return -ENODEV; -- } -- pdata->ring_csr_addr = devm_ioremap(dev, res->start, -- resource_size(res)); -- if (!pdata->ring_csr_addr) { -+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ring_csr"); -+ if (!res) -+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1); -+ pdata->ring_csr_addr = devm_ioremap_resource(dev, res); -+ if (IS_ERR(pdata->ring_csr_addr)) { - dev_err(dev, "Unable to retrieve ENET Ring CSR region\n"); -- return -ENOMEM; -+ return PTR_ERR(pdata->ring_csr_addr); - } - -- res = platform_get_resource(pdev, IORESOURCE_MEM, RES_RING_CMD); -- if (!res) { -- dev_err(dev, "Resource ring_cmd not defined\n"); -- return -ENODEV; -- } -- pdata->ring_cmd_addr = devm_ioremap(dev, res->start, -- resource_size(res)); -- if (!pdata->ring_cmd_addr) { -+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ring_cmd"); -+ if (!res) -+ res = platform_get_resource(pdev, IORESOURCE_MEM, 2); -+ pdata->ring_cmd_addr = devm_ioremap_resource(dev, res); -+ if (IS_ERR(pdata->ring_cmd_addr)) { - dev_err(dev, "Unable to retrieve ENET Ring command region\n"); -- return -ENOMEM; -+ return PTR_ERR(pdata->ring_cmd_addr); - } - - ret = platform_get_irq(pdev, 0); -@@ -843,12 +833,16 @@ static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata) - } - pdata->rx_irq = ret; - -- if (xgene_get_mac_address(dev, ndev->dev_addr) != ETH_ALEN) -+ mac = of_get_mac_address(dev->of_node); -+ if (mac) -+ memcpy(ndev->dev_addr, mac, ndev->addr_len); -+ else if (!acpi_get_mac_address(dev, ndev->dev_addr)) - eth_hw_addr_random(ndev); -- - memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len); - -- pdata->phy_mode = xgene_get_phy_mode(dev); -+ pdata->phy_mode = of_get_phy_mode(pdev->dev.of_node); -+ if (pdata->phy_mode < 0) -+ pdata->phy_mode = acpi_get_phy_mode(dev); - if (pdata->phy_mode < 0) { - dev_err(dev, "Unable to get phy-connection-type\n"); - return pdata->phy_mode; -@@ -862,7 +856,10 @@ static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata) - - pdata->clk = devm_clk_get(&pdev->dev, NULL); - if (IS_ERR(pdata->clk)) { -- /* Firmware may have set up the clock already. */ -+ /* -+ * Not necessarily an error. Firmware may have -+ * set up the clock already. -+ */ - pdata->clk = NULL; - } - -@@ -913,7 +910,7 @@ static int xgene_enet_init_hw(struct xgene_enet_pdata *pdata) - pdata->port_ops->cle_bypass(pdata, dst_ring_num, buf_pool->id); - pdata->mac_ops->init(pdata); - -- return ret; -+ return 0; - } - - static void xgene_enet_setup_ops(struct xgene_enet_pdata *pdata) -@@ -1033,20 +1033,20 @@ MODULE_DEVICE_TABLE(acpi, xgene_enet_acpi_match); - #endif - - #ifdef CONFIG_OF --static struct of_device_id xgene_enet_of_match[] = { -+static struct of_device_id xgene_enet_match[] = { - {.compatible = "apm,xgene-enet",}, - {.compatible = "apm,xgene1-sgenet",}, - {.compatible = "apm,xgene1-xgenet",}, - {}, - }; - --MODULE_DEVICE_TABLE(of, xgene_enet_of_match); -+MODULE_DEVICE_TABLE(of, xgene_enet_match); - #endif - - static struct platform_driver xgene_enet_driver = { - .driver = { - .name = "xgene-enet", -- .of_match_table = of_match_ptr(xgene_enet_of_match), -+ .of_match_table = xgene_enet_match, - .acpi_match_table = ACPI_PTR(xgene_enet_acpi_match), - }, - .probe = xgene_enet_probe, -diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.h b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h -index c2d465c..0e06cad 100644 ---- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.h -+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h -@@ -22,10 +22,7 @@ - #ifndef __XGENE_ENET_MAIN_H__ - #define __XGENE_ENET_MAIN_H__ - --#include - #include --#include --#include - #include - #include - #include -@@ -34,6 +31,7 @@ - #include - #include - #include -+#include - #include "xgene_enet_hw.h" - - #define XGENE_DRV_VERSION "v1.0" diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c -index 88a55f9..944b177 100644 +index 5d093dc..edb4218 100644 --- a/drivers/net/ethernet/smsc/smc91x.c +++ b/drivers/net/ethernet/smsc/smc91x.c @@ -82,6 +82,7 @@ static const char version[] = @@ -17849,7 +19502,7 @@ index 88a55f9..944b177 100644 #include #include -@@ -2467,6 +2468,14 @@ static struct dev_pm_ops smc_drv_pm_ops = { +@@ -2473,6 +2474,14 @@ static struct dev_pm_ops smc_drv_pm_ops = { .resume = smc_drv_resume, }; @@ -17864,7 +19517,7 @@ index 88a55f9..944b177 100644 static struct platform_driver smc_driver = { .probe = smc_drv_probe, .remove = smc_drv_remove, -@@ -2474,6 +2483,7 @@ static struct platform_driver smc_driver = { +@@ -2480,6 +2489,7 @@ static struct platform_driver smc_driver = { .name = CARDNAME, .pm = &smc_drv_pm_ops, .of_match_table = of_match_ptr(smc91x_match), @@ -17884,7 +19537,7 @@ index 501ea76..92e7644 100644 obj-$(CONFIG_MDIO_BCM_UNIMAC) += mdio-bcm-unimac.o diff --git a/drivers/net/phy/amd-xgbe-phy-a0.c b/drivers/net/phy/amd-xgbe-phy-a0.c new file mode 100644 -index 0000000..ab6414a +index 0000000..93faf9e --- /dev/null +++ b/drivers/net/phy/amd-xgbe-phy-a0.c @@ -0,0 +1,1829 @@ @@ -19524,7 +21177,7 @@ index 0000000..ab6414a + dev_err(dev, "platform_get_irq failed\n"); + goto err_cmu; + } -+ if (!phy_irqnum) { ++ if (priv->adev && !acpi_disabled && !phy_irqnum) { + struct irq_data *d = irq_get_irq_data(ret); + if (!d) { + dev_err(dev, "unable to set AN interrupt\n"); @@ -19717,8 +21370,409 @@ index 0000000..ab6414a + { } +}; +MODULE_DEVICE_TABLE(mdio, amd_xgbe_phy_ids_a0); +diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig +index 7a8f1c5..32e0a73 100644 +--- a/drivers/pci/Kconfig ++++ b/drivers/pci/Kconfig +@@ -22,6 +22,13 @@ config PCI_MSI_IRQ_DOMAIN + depends on PCI_MSI + select GENERIC_MSI_IRQ_DOMAIN + ++config PCI_ECAM ++ bool "Enhanced Configuration Access Mechanism (ECAM)" ++ depends on PCI ++ ++config PCI_ECAM_GENERIC ++ bool ++ + config PCI_DEBUG + bool "PCI Debugging" + depends on PCI && DEBUG_KERNEL +diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile +index 73e4af4..ce7b630 100644 +--- a/drivers/pci/Makefile ++++ b/drivers/pci/Makefile +@@ -41,6 +41,11 @@ obj-$(CONFIG_SPARC_LEON) += setup-irq.o + obj-$(CONFIG_M68K) += setup-irq.o + + # ++# Enhanced Configuration Access Mechanism (ECAM) ++# ++obj-$(CONFIG_PCI_ECAM) += ecam.o ++ ++# + # ACPI Related PCI FW Functions + # ACPI _DSM provided firmware instance and string name + # +diff --git a/drivers/pci/ecam.c b/drivers/pci/ecam.c +new file mode 100644 +index 0000000..bcb0c2f +--- /dev/null ++++ b/drivers/pci/ecam.c +@@ -0,0 +1,361 @@ ++/* ++ * Arch agnostic direct PCI config space access via ++ * ECAM (Enhanced Configuration Access Mechanism) ++ * ++ * Per-architecture code takes care of the mappings, region validation and ++ * accesses themselves. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ */ ++ ++#include ++#include ++#include ++ ++#define PREFIX "PCI ECAM: " ++ ++static DEFINE_MUTEX(pci_ecam_lock); ++ ++LIST_HEAD(pci_ecam_list); ++ ++extern struct acpi_mcfg_fixup __start_acpi_mcfg_fixups[]; ++extern struct acpi_mcfg_fixup __end_acpi_mcfg_fixups[]; ++ ++#ifdef CONFIG_PCI_ECAM_GENERIC ++int pci_ecam_read(unsigned int seg, unsigned int bus, ++ unsigned int devfn, int reg, int len, u32 *value) ++{ ++ struct pci_ecam_region *cfg; ++ char __iomem *addr; ++ ++ /* Why do we have this when nobody checks it. How about a BUG()!? -AK */ ++ if (unlikely((bus > 255) || (devfn > 255) || (reg > 4095))) { ++err: *value = -1; ++ return -EINVAL; ++ } ++ ++ rcu_read_lock(); ++ cfg = pci_ecam_lookup(seg, bus); ++ if (!cfg || !cfg->virt) { ++ rcu_read_unlock(); ++ goto err; ++ } ++ ++ if (cfg->read) ++ (*cfg->read)(cfg, bus, devfn, reg, len, value); ++ else { ++ addr = cfg->virt + (PCI_ECAM_BUS_OFFSET(bus) | (devfn << 12)); ++ *value = pci_mmio_read(len, addr + reg); ++ } ++ rcu_read_unlock(); ++ ++ return 0; ++} ++ ++int pci_ecam_write(unsigned int seg, unsigned int bus, ++ unsigned int devfn, int reg, int len, u32 value) ++{ ++ struct pci_ecam_region *cfg; ++ char __iomem *addr; ++ ++ /* Why do we have this when nobody checks it. How about a BUG()!? -AK */ ++ if (unlikely((bus > 255) || (devfn > 255) || (reg > 4095))) ++ return -EINVAL; ++ ++ rcu_read_lock(); ++ cfg = pci_ecam_lookup(seg, bus); ++ if (!cfg || !cfg->virt) { ++ rcu_read_unlock(); ++ return -EINVAL; ++ } ++ ++ if (cfg->write) ++ (*cfg->write)(cfg, bus, devfn, reg, len, value); ++ else { ++ addr = cfg->virt + (PCI_ECAM_BUS_OFFSET(bus) | (devfn << 12)); ++ pci_mmio_write(len, addr + reg, value); ++ } ++ rcu_read_unlock(); ++ ++ return 0; ++} ++ ++static void __iomem *pci_ecam_ioremap(struct pci_ecam_region *cfg) ++{ ++ void __iomem *addr; ++ u64 start, size; ++ int num_buses; ++ ++ start = cfg->address + PCI_ECAM_BUS_OFFSET(cfg->start_bus); ++ num_buses = cfg->end_bus - cfg->start_bus + 1; ++ size = PCI_ECAM_BUS_OFFSET(num_buses); ++ addr = ioremap_nocache(start, size); ++ if (addr) ++ addr -= PCI_ECAM_BUS_OFFSET(cfg->start_bus); ++ return addr; ++} ++ ++int __init pci_ecam_arch_init(void) ++{ ++ struct pci_ecam_region *cfg; ++ ++ list_for_each_entry(cfg, &pci_ecam_list, list) ++ if (pci_ecam_arch_map(cfg)) { ++ pci_ecam_arch_free(); ++ return 0; ++ } ++ ++ return 1; ++} ++ ++void __init pci_ecam_arch_free(void) ++{ ++ struct pci_ecam_region *cfg; ++ ++ list_for_each_entry(cfg, &pci_ecam_list, list) ++ pci_ecam_arch_unmap(cfg); ++} ++ ++int pci_ecam_arch_map(struct pci_ecam_region *cfg) ++{ ++ cfg->virt = pci_ecam_ioremap(cfg); ++ if (!cfg->virt) { ++ pr_err(PREFIX "can't map ECAM at %pR\n", &cfg->res); ++ return -ENOMEM; ++ } ++ ++ return 0; ++} ++ ++void pci_ecam_arch_unmap(struct pci_ecam_region *cfg) ++{ ++ if (cfg && cfg->virt) { ++ iounmap(cfg->virt + PCI_ECAM_BUS_OFFSET(cfg->start_bus)); ++ cfg->virt = NULL; ++ } ++} ++#endif ++ ++static u32 ++pci_ecam_generic_read(int len, void __iomem *addr) ++{ ++ u32 data = 0; ++ ++ switch (len) { ++ case 1: ++ data = readb(addr); ++ break; ++ case 2: ++ data = readw(addr); ++ break; ++ case 4: ++ data = readl(addr); ++ break; ++ } ++ ++ return data; ++} ++ ++static void ++pci_ecam_generic_write(int len, void __iomem *addr, u32 value) ++{ ++ switch (len) { ++ case 1: ++ writeb(value, addr); ++ break; ++ case 2: ++ writew(value, addr); ++ break; ++ case 4: ++ writel(value, addr); ++ break; ++ } ++} ++ ++static struct pci_ecam_mmio_ops pci_ecam_mmio_default = { ++ .read = pci_ecam_generic_read, ++ .write = pci_ecam_generic_write, ++}; ++ ++static struct pci_ecam_mmio_ops *pci_ecam_mmio = &pci_ecam_mmio_default; ++ ++void ++pci_ecam_register_mmio(struct pci_ecam_mmio_ops *ops) ++{ ++ pci_ecam_mmio = ops; ++} ++ ++u32 ++pci_mmio_read(int len, void __iomem *addr) ++{ ++ if (!pci_ecam_mmio) { ++ pr_err("PCI config space has no accessors !"); ++ return 0; ++ } ++ ++ return pci_ecam_mmio->read(len, addr); ++} ++ ++void ++pci_mmio_write(int len, void __iomem *addr, u32 value) ++{ ++ if (!pci_ecam_mmio) { ++ pr_err("PCI config space has no accessors !"); ++ return; ++ } ++ ++ pci_ecam_mmio->write(len, addr, value); ++} ++ ++static void __init pci_ecam_remove(struct pci_ecam_region *cfg) ++{ ++ if (cfg->res.parent) ++ release_resource(&cfg->res); ++ list_del(&cfg->list); ++ kfree(cfg); ++} ++ ++void __init pci_ecam_free_all(void) ++{ ++ struct pci_ecam_region *cfg, *tmp; ++ ++ pci_ecam_arch_free(); ++ list_for_each_entry_safe(cfg, tmp, &pci_ecam_list, list) ++ pci_ecam_remove(cfg); ++} ++ ++void pci_ecam_list_add_sorted(struct pci_ecam_region *new) ++{ ++ struct pci_ecam_region *cfg; ++ ++ /* keep list sorted by segment and starting bus number */ ++ list_for_each_entry_rcu(cfg, &pci_ecam_list, list) { ++ if (cfg->segment > new->segment || ++ (cfg->segment == new->segment && ++ cfg->start_bus >= new->start_bus)) { ++ list_add_tail_rcu(&new->list, &cfg->list); ++ return; ++ } ++ } ++ list_add_tail_rcu(&new->list, &pci_ecam_list); ++} ++ ++struct pci_ecam_region *pci_ecam_alloc(int segment, int start, ++ int end, u64 addr) ++{ ++ struct pci_ecam_region *new; ++ struct resource *res; ++ ++ if (addr == 0) ++ return NULL; ++ ++ new = kzalloc(sizeof(*new), GFP_KERNEL); ++ if (!new) ++ return NULL; ++ ++ new->address = addr; ++ new->segment = segment; ++ new->start_bus = start; ++ new->end_bus = end; ++ ++ res = &new->res; ++ res->start = addr + PCI_ECAM_BUS_OFFSET(start); ++ res->end = addr + PCI_ECAM_BUS_OFFSET(end + 1) - 1; ++ res->flags = IORESOURCE_MEM | IORESOURCE_BUSY; ++ snprintf(new->name, PCI_ECAM_RESOURCE_NAME_LEN, ++ "PCI ECAM %04x [bus %02x-%02x]", segment, start, end); ++ res->name = new->name; ++ ++ return new; ++} ++ ++struct pci_ecam_region *pci_ecam_add(int segment, int start, ++ int end, u64 addr) ++{ ++ struct pci_ecam_region *new; ++ ++ new = pci_ecam_alloc(segment, start, end, addr); ++ if (new) { ++ mutex_lock(&pci_ecam_lock); ++ pci_ecam_list_add_sorted(new); ++ mutex_unlock(&pci_ecam_lock); ++ ++ pr_info(PREFIX ++ "ECAM for domain %04x [bus %02x-%02x] at %pR " ++ "(base %#lx)\n", ++ segment, start, end, &new->res, (unsigned long)addr); ++ } ++ ++ return new; ++} ++ ++struct pci_ecam_region *pci_ecam_lookup(int segment, int bus) ++{ ++ struct pci_ecam_region *cfg; ++ ++ list_for_each_entry_rcu(cfg, &pci_ecam_list, list) ++ if (cfg->segment == segment && ++ cfg->start_bus <= bus && bus <= cfg->end_bus) ++ return cfg; ++ ++ return NULL; ++} ++ ++/* Delete ECAM information for host bridges */ ++int pci_ecam_delete(u16 seg, u8 start, u8 end) ++{ ++ struct pci_ecam_region *cfg; ++ ++ mutex_lock(&pci_ecam_lock); ++ list_for_each_entry_rcu(cfg, &pci_ecam_list, list) ++ if (cfg->segment == seg && cfg->start_bus == start && ++ cfg->end_bus == end) { ++ list_del_rcu(&cfg->list); ++ synchronize_rcu(); ++ pci_ecam_arch_unmap(cfg); ++ if (cfg->res.parent) ++ release_resource(&cfg->res); ++ mutex_unlock(&pci_ecam_lock); ++ kfree(cfg); ++ return 0; ++ } ++ mutex_unlock(&pci_ecam_lock); ++ ++ return -ENOENT; ++} ++ ++int pci_ecam_inject(struct pci_ecam_region *cfg) ++{ ++ struct pci_ecam_region *cfg_conflict; ++ int err = 0; ++ ++ mutex_lock(&pci_ecam_lock); ++ cfg_conflict = pci_ecam_lookup(cfg->segment, cfg->start_bus); ++ if (cfg_conflict) { ++ if (cfg_conflict->end_bus < cfg->end_bus) ++ pr_info(FW_INFO "ECAM for " ++ "domain %04x [bus %02x-%02x] " ++ "only partially covers this bridge\n", ++ cfg_conflict->segment, cfg_conflict->start_bus, ++ cfg_conflict->end_bus); ++ err = -EEXIST; ++ goto out; ++ } ++ ++ if (pci_ecam_arch_map(cfg)) { ++ pr_warn("fail to map ECAM %pR.\n", &cfg->res); ++ err = -ENOMEM; ++ goto out; ++ } else { ++ pci_ecam_list_add_sorted(cfg); ++ pr_info("ECAM at %pR (base %#lx)\n", ++ &cfg->res, (unsigned long)cfg->address); ++ ++ } ++out: ++ mutex_unlock(&pci_ecam_lock); ++ return err; ++} diff --git a/drivers/pci/host/pci-xgene.c b/drivers/pci/host/pci-xgene.c -index aab5547..967ad80 100644 +index ee082c0..c37a3f3 100644 --- a/drivers/pci/host/pci-xgene.c +++ b/drivers/pci/host/pci-xgene.c @@ -29,6 +29,8 @@ @@ -19726,102 +21780,15 @@ index aab5547..967ad80 100644 #include #include +#include -+#include ++#include #define PCIECORE_CTLANDSTATUS 0x50 #define PIM1_1L 0x80 -@@ -468,6 +470,252 @@ static int xgene_pcie_setup(struct xgene_pcie_port *port, +@@ -468,6 +470,160 @@ static int xgene_pcie_setup(struct xgene_pcie_port *port, return 0; } +#ifdef CONFIG_ACPI -+ -+/* PCIe Configuration Out/In */ -+static inline void xgene_pcie_cfg_out32(void __iomem *addr, int offset, u32 val) -+{ -+ writel(val, addr + offset); -+} -+ -+static inline void xgene_pcie_cfg_out16(void __iomem *addr, int offset, u16 val) -+{ -+ u32 val32 = readl(addr + (offset & ~0x3)); -+ -+ switch (offset & 0x3) { -+ case 2: -+ val32 &= ~0xFFFF0000; -+ val32 |= (u32)val << 16; -+ break; -+ case 0: -+ default: -+ val32 &= ~0xFFFF; -+ val32 |= val; -+ break; -+ } -+ writel(val32, addr + (offset & ~0x3)); -+} -+ -+static inline void xgene_pcie_cfg_out8(void __iomem *addr, int offset, u8 val) -+{ -+ u32 val32 = readl(addr + (offset & ~0x3)); -+ -+ switch (offset & 0x3) { -+ case 0: -+ val32 &= ~0xFF; -+ val32 |= val; -+ break; -+ case 1: -+ val32 &= ~0xFF00; -+ val32 |= (u32)val << 8; -+ break; -+ case 2: -+ val32 &= ~0xFF0000; -+ val32 |= (u32)val << 16; -+ break; -+ case 3: -+ default: -+ val32 &= ~0xFF000000; -+ val32 |= (u32)val << 24; -+ break; -+ } -+ writel(val32, addr + (offset & ~0x3)); -+} -+ -+static inline void xgene_pcie_cfg_in32(void __iomem *addr, int offset, u32 *val) -+{ -+ *val = readl(addr + offset); -+} -+ -+static inline void xgene_pcie_cfg_in16(void __iomem *addr, int offset, u32 *val) -+{ -+ *val = readl(addr + (offset & ~0x3)); -+ -+ switch (offset & 0x3) { -+ case 2: -+ *val >>= 16; -+ break; -+ } -+ -+ *val &= 0xFFFF; -+} -+ -+static inline void xgene_pcie_cfg_in8(void __iomem *addr, int offset, u32 *val) -+{ -+ *val = readl(addr + (offset & ~0x3)); -+ -+ switch (offset & 0x3) { -+ case 3: -+ *val = *val >> 24; -+ break; -+ case 2: -+ *val = *val >> 16; -+ break; -+ case 1: -+ *val = *val >> 8; -+ break; -+ } -+ *val &= 0xFF; -+} -+ +struct xgene_mcfg_info { + void __iomem *csr_base; +}; @@ -19830,7 +21797,7 @@ index aab5547..967ad80 100644 + * When the address bit [17:16] is 2'b01, the Configuration access will be + * treated as Type 1 and it will be forwarded to external PCIe device. + */ -+static void __iomem *__get_cfg_base(struct pci_mmcfg_region *cfg, ++static void __iomem *__get_cfg_base(struct pci_ecam_region *cfg, + unsigned int bus) +{ + if (bus > cfg->start_bus) @@ -19843,7 +21810,7 @@ index aab5547..967ad80 100644 + * For Configuration request, RTDID register is used as Bus Number, + * Device Number and Function number of the header fields. + */ -+static void __set_rtdid_reg(struct pci_mmcfg_region *cfg, ++static void __set_rtdid_reg(struct pci_ecam_region *cfg, + unsigned int bus, unsigned int devfn) +{ + struct xgene_mcfg_info *info = cfg->data; @@ -19862,7 +21829,7 @@ index aab5547..967ad80 100644 + readl(info->csr_base + RTDID); +} + -+static int xgene_raw_pci_read(struct pci_mmcfg_region *cfg, unsigned int bus, ++static int xgene_raw_pci_read(struct pci_ecam_region *cfg, unsigned int bus, + unsigned int devfn, int offset, int len, u32 *val) +{ + void __iomem *addr; @@ -19882,55 +21849,50 @@ index aab5547..967ad80 100644 + } + + __set_rtdid_reg(cfg, bus, devfn); -+ addr = __get_cfg_base(cfg, bus); -+ switch (len) { -+ case 1: -+ xgene_pcie_cfg_in8(addr, offset, val); -+ break; -+ case 2: -+ xgene_pcie_cfg_in16(addr, offset, val); -+ /* FIXME. -+ * Something wrong with Configuration Request Retry Status -+ * on this hw. Pretend it isn't supported until the problem -+ * gets sorted out properly. -+ */ -+ if (bus == cfg->start_bus && offset == (0x40 + PCI_EXP_RTCAP)) -+ *val &= ~PCI_EXP_RTCAP_CRSVIS; -+ break; -+ default: -+ xgene_pcie_cfg_in32(addr, offset, val); -+ break; -+ } ++ addr = __get_cfg_base(cfg, bus) + (offset & ~0x3); ++ *val = readl(addr); ++ if (len <= 2) ++ *val = (*val >> (8 * (offset & 3))) & ((1 << (len * 8)) - 1); ++ ++ /* FIXME. ++ * Something wrong with Configuration Request Retry Status ++ * on this hw. Pretend it isn't supported until the problem ++ * gets sorted out properly. ++ */ ++ if (len == 2 && bus == cfg->start_bus && offset == (0x40 + PCI_EXP_RTCAP)) ++ *val &= ~PCI_EXP_RTCAP_CRSVIS; ++ + return PCIBIOS_SUCCESSFUL; +} + -+static int xgene_raw_pci_write(struct pci_mmcfg_region *cfg, unsigned int bus, ++static int xgene_raw_pci_write(struct pci_ecam_region *cfg, unsigned int bus, + unsigned int devfn, int offset, int len, u32 val) +{ + void __iomem *addr; ++ u32 mask, tmp; + + if (bus == cfg->start_bus && devfn != 0) + return PCIBIOS_DEVICE_NOT_FOUND; + + __set_rtdid_reg(cfg, bus, devfn); -+ addr = __get_cfg_base(cfg, bus); -+ switch (len) { -+ case 1: -+ xgene_pcie_cfg_out8(addr, offset, (u8)val); -+ break; -+ case 2: -+ xgene_pcie_cfg_out16(addr, offset, (u16)val); -+ break; -+ default: -+ xgene_pcie_cfg_out32(addr, offset, val); -+ break; -+ } ++ addr = __get_cfg_base(cfg, bus) + (offset & ~0x3); ++ ++ if (len == 4) { ++ writel(val, addr); ++ return PCIBIOS_SUCCESSFUL; ++ } else ++ mask = ~(((1 << (len * 8)) - 1) << ((offset & 0x3) * 8)); ++ ++ tmp = readl(addr) & mask; ++ tmp |= val << ((offset & 0x3) * 8); ++ writel(tmp, addr); ++ + return PCIBIOS_SUCCESSFUL; +} + +static acpi_status find_csr_base(struct acpi_resource *acpi_res, void *data) +{ -+ struct pci_mmcfg_region *cfg = data; ++ struct pci_ecam_region *cfg = data; + struct xgene_mcfg_info *info = cfg->data; + struct acpi_resource_fixed_memory32 *fixed32; + @@ -19944,7 +21906,7 @@ index aab5547..967ad80 100644 +} + +static int xgene_mcfg_fixup(struct acpi_pci_root *root, -+ struct pci_mmcfg_region *cfg) ++ struct pci_ecam_region *cfg) +{ + struct acpi_device *device = root->device; + struct xgene_mcfg_info *info; @@ -20086,6 +22048,59 @@ index 4890639..04b676b 100644 +#endif +} +#endif /* CONFIG_PCI_MSI */ +diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c +index 81f06e8..ae3fce5 100644 +--- a/drivers/pci/pci.c ++++ b/drivers/pci/pci.c +@@ -25,6 +25,7 @@ + #include + #include + #include ++#include + #include + #include + #include "pci.h" +@@ -4504,7 +4505,7 @@ int pci_get_new_domain_nr(void) + void pci_bus_assign_domain_nr(struct pci_bus *bus, struct device *parent) + { + static int use_dt_domains = -1; +- int domain = of_get_pci_domain_nr(parent->of_node); ++ int domain; + + /* + * Check DT domain and use_dt_domains values. +@@ -4532,17 +4533,22 @@ void pci_bus_assign_domain_nr(struct pci_bus *bus, struct device *parent) + * invalidating the domain value (domain = -1) and printing a + * corresponding error. + */ +- if (domain >= 0 && use_dt_domains) { +- use_dt_domains = 1; +- } else if (domain < 0 && use_dt_domains != 1) { +- use_dt_domains = 0; +- domain = pci_get_new_domain_nr(); ++ if (acpi_disabled) { ++ domain = of_get_pci_domain_nr(parent->of_node); ++ if (domain >= 0 && use_dt_domains) { ++ use_dt_domains = 1; ++ } else if (domain < 0 && use_dt_domains != 1) { ++ use_dt_domains = 0; ++ domain = pci_get_new_domain_nr(); ++ } else { ++ dev_err(parent, "Node %s has inconsistent \"linux,pci-domain\" property in DT\n", ++ parent->of_node->full_name); ++ domain = -1; ++ } + } else { +- dev_err(parent, "Node %s has inconsistent \"linux,pci-domain\" property in DT\n", +- parent->of_node->full_name); +- domain = -1; ++ struct pci_sysdata *sd = bus->sysdata; ++ domain = sd->domain; + } +- + bus->domain_nr = domain; + } + #endif diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index 8d2f400..324cdce 100644 --- a/drivers/pci/probe.c @@ -20556,29 +22571,30 @@ index 0000000..0f44624 + +MODULE_LICENSE("GPL v2"); diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c -index e601162..3991aa0 100644 +index 2ab229d..27e2e8f 100644 --- a/drivers/tty/serial/8250/8250_dw.c +++ b/drivers/tty/serial/8250/8250_dw.c -@@ -361,9 +361,7 @@ static int dw8250_probe_acpi(struct uart_8250_port *up, - return -ENODEV; +@@ -394,8 +394,18 @@ static int dw8250_probe_acpi(struct uart_8250_port *up, if (!p->uartclk) -- if (device_property_read_u32(p->dev, "clock-frequency", + if (device_property_read_u32(p->dev, "clock-frequency", - &p->uartclk)) - return -EINVAL; -+ p->uartclk = (unsigned int)id->driver_data; ++ &p->uartclk)) { ++ if (strncmp("APMC0D08", id->id, 8)) ++ return -EINVAL; ++ /* ++ * Temp hack for Mustang to continue working ++ * with older firmware. ++ */ ++ dev_info(p->dev, ++ "clock-frequency not found in ACPI tables."); ++ dev_info(p->dev, "Updated firmware needed!\n"); ++ p->uartclk = 50000000; ++ } p->iotype = UPIO_MEM32; p->serial_in = dw8250_serial_in32; -@@ -587,7 +585,7 @@ static const struct acpi_device_id dw8250_acpi_match[] = { - { "INT3435", 0 }, - { "80860F0A", 0 }, - { "8086228A", 0 }, -- { "APMC0D08", 0}, -+ { "APMC0D08", 50000000}, - { }, - }; - MODULE_DEVICE_TABLE(acpi, dw8250_acpi_match); diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c index 8d94c19..04930e2 100644 --- a/drivers/tty/serial/amba-pl011.c @@ -20600,7 +22616,7 @@ index 8d94c19..04930e2 100644 amba_ports[i] = NULL; uart_unregister_driver(&amba_reg); diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c -index 0e11d61408ff..88ffbeee4d1c 100644 +index 0e11d61..88ffbee 100644 --- a/drivers/usb/host/xhci-plat.c +++ b/drivers/usb/host/xhci-plat.c @@ -83,14 +83,13 @@ static int xhci_plat_probe(struct platform_device *pdev) @@ -20626,10 +22642,10 @@ index 0e11d61408ff..88ffbeee4d1c 100644 hcd = usb_create_hcd(driver, &pdev->dev, dev_name(&pdev->dev)); if (!hcd) diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c -index 00d115b..cd9b974 100644 +index cad5698..88b6cd3 100644 --- a/drivers/virtio/virtio_mmio.c +++ b/drivers/virtio/virtio_mmio.c -@@ -100,8 +100,7 @@ +@@ -70,8 +70,7 @@ #include #include #include @@ -20639,7 +22655,7 @@ index 00d115b..cd9b974 100644 /* The alignment to use between consumer and producer parts of vring. * Currently hardcoded to the page size. */ -@@ -635,12 +634,21 @@ static struct of_device_id virtio_mmio_match[] = { +@@ -666,12 +665,21 @@ static struct of_device_id virtio_mmio_match[] = { }; MODULE_DEVICE_TABLE(of, virtio_mmio_match); @@ -20661,6 +22677,33 @@ index 00d115b..cd9b974 100644 }, }; +diff --git a/drivers/xen/pci.c b/drivers/xen/pci.c +index 95ee430..eff4035 100644 +--- a/drivers/xen/pci.c ++++ b/drivers/xen/pci.c +@@ -204,7 +204,7 @@ arch_initcall(register_xen_pci_notifier); + #ifdef CONFIG_PCI_MMCONFIG + static int __init xen_mcfg_late(void) + { +- struct pci_mmcfg_region *cfg; ++ struct pci_ecam_region *cfg; + int rc; + + if (!xen_initial_domain()) +@@ -213,11 +213,11 @@ static int __init xen_mcfg_late(void) + if ((pci_probe & PCI_PROBE_MMCONF) == 0) + return 0; + +- if (list_empty(&pci_mmcfg_list)) ++ if (list_empty(&pci_ecam_list)) + return 0; + + /* Check whether they are in the right area. */ +- list_for_each_entry(cfg, &pci_mmcfg_list, list) { ++ list_for_each_entry(cfg, &pci_ecam_list, list) { + struct physdev_pci_mmcfg_reserved r; + + r.address = cfg->address; diff --git a/include/acpi/acnames.h b/include/acpi/acnames.h index 273de70..b52c0dc 100644 --- a/include/acpi/acnames.h @@ -20687,15 +22730,15 @@ index 61e32ec..1fec6f5 100644 acpi_evaluate_dsm_typed(acpi_handle handle, const u8 *uuid, int rev, int func, union acpi_object *argv4, acpi_object_type type) diff --git a/include/acpi/acpi_io.h b/include/acpi/acpi_io.h -index 444671e..48f504a 100644 +index 444671e..dd86c5f 100644 --- a/include/acpi/acpi_io.h +++ b/include/acpi/acpi_io.h -@@ -2,12 +2,15 @@ - #define _ACPI_IO_H_ +@@ -3,11 +3,15 @@ #include -+#include ++#include ++ +#ifndef acpi_os_ioremap static inline void __iomem *acpi_os_ioremap(acpi_physical_address phys, acpi_size size) @@ -20706,6 +22749,59 @@ index 444671e..48f504a 100644 void __iomem *__init_refok acpi_os_map_iomem(acpi_physical_address phys, acpi_size size); +diff --git a/include/acpi/actypes.h b/include/acpi/actypes.h +index b034f10..ab3dac8 100644 +--- a/include/acpi/actypes.h ++++ b/include/acpi/actypes.h +@@ -1148,7 +1148,7 @@ struct acpi_device_info { + u32 name; /* ACPI object Name */ + acpi_object_type type; /* ACPI object Type */ + u8 param_count; /* If a method, required parameter count */ +- u8 valid; /* Indicates which optional fields are valid */ ++ u16 valid; /* Indicates which optional fields are valid */ + u8 flags; /* Miscellaneous info */ + u8 highest_dstates[4]; /* _sx_d values: 0xFF indicates not valid */ + u8 lowest_dstates[5]; /* _sx_w values: 0xFF indicates not valid */ +@@ -1157,6 +1157,7 @@ struct acpi_device_info { + struct acpi_pnp_device_id hardware_id; /* _HID value */ + struct acpi_pnp_device_id unique_id; /* _UID value */ + struct acpi_pnp_device_id subsystem_id; /* _SUB value */ ++ struct acpi_pnp_device_id cls; /* _CLS value */ + struct acpi_pnp_device_id_list compatible_id_list; /* _CID list */ + }; + +@@ -1174,6 +1175,7 @@ struct acpi_device_info { + #define ACPI_VALID_CID 0x20 + #define ACPI_VALID_SXDS 0x40 + #define ACPI_VALID_SXWS 0x80 ++#define ACPI_VALID_CLS 0x100 + + /* Flags for _STA return value (current_status above) */ + +diff --git a/include/acpi/processor.h b/include/acpi/processor.h +index b95dc32..4188a4d 100644 +--- a/include/acpi/processor.h ++++ b/include/acpi/processor.h +@@ -196,7 +196,7 @@ struct acpi_processor_flags { + struct acpi_processor { + acpi_handle handle; + u32 acpi_id; +- u32 phys_id; /* CPU hardware ID such as APIC ID for x86 */ ++ phys_cpuid_t phys_id; /* CPU hardware ID such as APIC ID for x86 */ + u32 id; /* CPU logical ID allocated by OS */ + u32 pblk; + int performance_platform_limit; +@@ -310,8 +310,8 @@ static inline int acpi_processor_get_bios_limit(int cpu, unsigned int *limit) + #endif /* CONFIG_CPU_FREQ */ + + /* in processor_core.c */ +-int acpi_get_phys_id(acpi_handle, int type, u32 acpi_id); +-int acpi_map_cpuid(int phys_id, u32 acpi_id); ++phys_cpuid_t acpi_get_phys_id(acpi_handle, int type, u32 acpi_id); ++int acpi_map_cpuid(phys_cpuid_t phys_id, u32 acpi_id); + int acpi_get_cpuid(acpi_handle, int type, u32 acpi_id); + + /* in processor_pdc.c */ diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index ac78910..472d6b8 100644 --- a/include/asm-generic/vmlinux.lds.h @@ -20758,7 +22854,7 @@ index 7c55dd5..d7fcc50 100644 return -ENODEV; } diff --git a/include/linux/acpi.h b/include/linux/acpi.h -index 24c7aa8..23c807d 100644 +index 24c7aa8..de4e86f 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -73,6 +73,7 @@ enum acpi_irq_model_id { @@ -20769,59 +22865,38 @@ index 24c7aa8..23c807d 100644 ACPI_IRQ_MODEL_COUNT }; -@@ -166,6 +167,16 @@ extern u32 acpi_irq_not_handled; - extern int sbf_port; - extern unsigned long acpi_realmode_flags; +@@ -146,9 +147,14 @@ void acpi_numa_x2apic_affinity_init(struct acpi_srat_x2apic_cpu_affinity *pa); + int acpi_numa_memory_affinity_init (struct acpi_srat_mem_affinity *ma); + void acpi_numa_arch_fixup(void); -+static inline void acpi_irq_init(void) -+{ -+ /* -+ * Hardcode ACPI IRQ chip initialization to GICv2 for now. -+ * Proper irqchip infrastructure will be implemented along with -+ * incoming GICv2m|GICv3|ITS bits. -+ */ -+ acpi_gic_init(); -+} ++#ifndef PHYS_CPUID_INVALID ++typedef u32 phys_cpuid_t; ++#define PHYS_CPUID_INVALID (phys_cpuid_t)(-1) ++#endif + - int acpi_register_gsi (struct device *dev, u32 gsi, int triggering, int polarity); - int acpi_gsi_to_irq (u32 gsi, unsigned int *irq); - int acpi_isa_irq_to_gsi (unsigned isa_irq, u32 *gsi); -@@ -439,6 +450,10 @@ const struct acpi_device_id *acpi_match_device(const struct acpi_device_id *ids, + #ifdef CONFIG_ACPI_HOTPLUG_CPU + /* Arch dependent functions for cpu hotplug support */ +-int acpi_map_cpu(acpi_handle handle, int physid, int *pcpu); ++int acpi_map_cpu(acpi_handle handle, phys_cpuid_t physid, int *pcpu); + int acpi_unmap_cpu(int cpu); + #endif /* CONFIG_ACPI_HOTPLUG_CPU */ - extern bool acpi_driver_match_device(struct device *dev, - const struct device_driver *drv); +diff --git a/include/linux/acpi_irq.h b/include/linux/acpi_irq.h +new file mode 100644 +index 0000000..e4e8a81 +--- /dev/null ++++ b/include/linux/acpi_irq.h +@@ -0,0 +1,10 @@ ++#ifndef _LINUX_ACPI_IRQ_H ++#define _LINUX_ACPI_IRQ_H + -+bool acpi_match_device_cls(const struct acpi_device_cls *dev_cls, -+ const struct device *dev); ++#include + - int acpi_device_uevent_modalias(struct device *, struct kobj_uevent_env *); - int acpi_device_modalias(struct device *, char *, int); - void acpi_walk_dep_device_list(acpi_handle handle); -@@ -515,6 +530,11 @@ static inline int acpi_table_parse(char *id, - return -ENODEV; - } - -+static inline void acpi_irq_init(void) -+{ -+ return; -+} ++#ifndef acpi_irq_init ++static inline void acpi_irq_init(void) { } ++#endif + - static inline int acpi_nvs_register(__u64 start, __u64 size) - { - return 0; -@@ -534,6 +554,12 @@ static inline const struct acpi_device_id *acpi_match_device( - return NULL; - } - -+static inline bool acpi_match_device_cls(const struct acpi_device_cls *dev_cls, -+ const struct device *dev) -+{ -+ return false; -+} -+ - static inline bool acpi_driver_match_device(struct device *dev, - const struct device_driver *drv) - { ++#endif /* _LINUX_ACPI_IRQ_H */ diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h index 9c78d15..2b2e1f8 100644 --- a/include/linux/clocksource.h @@ -20838,18 +22913,10 @@ index 9c78d15..2b2e1f8 100644 + #endif /* _LINUX_CLOCKSOURCE_H */ diff --git a/include/linux/device.h b/include/linux/device.h -index 0eb8ee2..4972a5c 100644 +index 0eb8ee2..66730dd 100644 --- a/include/linux/device.h +++ b/include/linux/device.h -@@ -237,6 +237,7 @@ struct device_driver { - - const struct of_device_id *of_match_table; - const struct acpi_device_id *acpi_match_table; -+ const struct acpi_device_cls *acpi_match_cls; - - int (*probe) (struct device *dev); - int (*remove) (struct device *dev); -@@ -690,6 +691,7 @@ struct acpi_dev_node { +@@ -690,6 +690,7 @@ struct acpi_dev_node { * along with subsystem-level and driver-level callbacks. * @pins: For device pin management. * See Documentation/pinctrl.txt for details. @@ -20857,7 +22924,7 @@ index 0eb8ee2..4972a5c 100644 * @numa_node: NUMA node this device is close to. * @dma_mask: Dma mask (if dma'ble device). * @coherent_dma_mask: Like dma_mask, but for alloc_coherent mapping as not all -@@ -750,6 +752,9 @@ struct device { +@@ -750,6 +751,9 @@ struct device { struct dev_pm_info power; struct dev_pm_domain *pm_domain; @@ -20867,7 +22934,7 @@ index 0eb8ee2..4972a5c 100644 #ifdef CONFIG_PINCTRL struct dev_pin_info *pins; #endif -@@ -837,6 +842,22 @@ static inline void set_dev_node(struct device *dev, int node) +@@ -837,6 +841,22 @@ static inline void set_dev_node(struct device *dev, int node) } #endif @@ -20911,12 +22978,99 @@ index f820f0a..8e1a28d 100644 #endif +diff --git a/include/linux/ecam.h b/include/linux/ecam.h +new file mode 100644 +index 0000000..4d42eff +--- /dev/null ++++ b/include/linux/ecam.h +@@ -0,0 +1,81 @@ ++#ifndef __ECAM_H ++#define __ECAM_H ++#ifdef __KERNEL__ ++ ++#include ++#include ++ ++/* "PCI ECAM %04x [bus %02x-%02x]" */ ++#define PCI_ECAM_RESOURCE_NAME_LEN (22 + 4 + 2 + 2) ++ ++struct acpi_pci_root; ++struct pci_ecam_region; ++ ++typedef int (*acpi_mcfg_fixup_t)(struct acpi_pci_root *root, ++ struct pci_ecam_region *cfg); ++ ++struct pci_ecam_region { ++ struct list_head list; ++ struct resource res; ++ int (*read)(struct pci_ecam_region *cfg, unsigned int bus, ++ unsigned int devfn, int reg, int len, u32 *value); ++ int (*write)(struct pci_ecam_region *cfg, unsigned int bus, ++ unsigned int devfn, int reg, int len, u32 value); ++ acpi_mcfg_fixup_t fixup; ++ void *data; ++ u64 address; ++ char __iomem *virt; ++ u16 segment; ++ u8 start_bus; ++ u8 end_bus; ++ char name[PCI_ECAM_RESOURCE_NAME_LEN]; ++}; ++ ++struct acpi_mcfg_fixup { ++ char oem_id[7]; ++ char oem_table_id[9]; ++ acpi_mcfg_fixup_t hook; ++}; ++ ++/* Designate a routine to fix up buggy MCFG */ ++#define DECLARE_ACPI_MCFG_FIXUP(oem_id, table_id, hook) \ ++ static const struct acpi_mcfg_fixup __acpi_fixup_##hook __used \ ++ __attribute__((__section__(".acpi_fixup_mcfg"), aligned((sizeof(void *))))) \ ++ = { {oem_id}, {table_id}, hook }; ++ ++ ++struct pci_ecam_mmio_ops { ++ u32 (*read)(int len, void __iomem *addr); ++ void (*write)(int len, void __iomem *addr, u32 value); ++}; ++ ++struct pci_ecam_region *pci_ecam_lookup(int segment, int bus); ++struct pci_ecam_region *pci_ecam_alloc(int segment, int start, ++ int end, u64 addr); ++int pci_ecam_inject(struct pci_ecam_region *cfg); ++struct pci_ecam_region *pci_ecam_add(int segment, int start, ++ int end, u64 addr); ++void pci_ecam_list_add_sorted(struct pci_ecam_region *new); ++void pci_ecam_free_all(void); ++int pci_ecam_delete(u16 seg, u8 start, u8 end); ++ ++/* Arch specific calls */ ++int pci_ecam_arch_init(void); ++void pci_ecam_arch_free(void); ++int pci_ecam_arch_map(struct pci_ecam_region *cfg); ++void pci_ecam_arch_unmap(struct pci_ecam_region *cfg); ++extern u32 pci_mmio_read(int len, void __iomem *addr); ++extern void pci_mmio_write(int len, void __iomem *addr, u32 value); ++extern void pci_ecam_register_mmio(struct pci_ecam_mmio_ops *ops); ++ ++extern struct list_head pci_ecam_list; ++ ++#define PCI_ECAM_BUS_OFFSET(bus) ((bus) << 20) ++ ++int pci_ecam_read(unsigned int seg, unsigned int bus, ++ unsigned int devfn, int reg, int len, u32 *value); ++int pci_ecam_write(unsigned int seg, unsigned int bus, ++ unsigned int devfn, int reg, int len, u32 value); ++ ++#endif /* __KERNEL__ */ ++#endif /* __ECAM_H */ diff --git a/include/linux/irqchip/arm-gic-acpi.h b/include/linux/irqchip/arm-gic-acpi.h new file mode 100644 -index 0000000..cc7753d +index 0000000..8776eec --- /dev/null +++ b/include/linux/irqchip/arm-gic-acpi.h -@@ -0,0 +1,31 @@ +@@ -0,0 +1,32 @@ +/* + * Copyright (C) 2014, Linaro Ltd. + * Author: Tomasz Nowicki @@ -20940,9 +23094,10 @@ index 0000000..cc7753d +#define ACPI_GIC_CPU_IF_MEM_SIZE (SZ_8K) + +struct acpi_table_header; ++struct irq_domain; + -+void acpi_gic_init(void); +int gic_v2_acpi_init(struct acpi_table_header *table, struct irq_domain **domain); ++void acpi_gic_init(void); +#else +static inline void acpi_gic_init(void) { } +#endif @@ -20973,115 +23128,18 @@ index 71d706d..0b45062 100644 void gic_send_sgi(unsigned int cpu_id, unsigned int irq); int gic_get_cpu_id(unsigned int cpu); void gic_migrate_target(unsigned int new_cpu_id); -diff --git a/include/linux/mmconfig.h b/include/linux/mmconfig.h -new file mode 100644 -index 0000000..4360e9a ---- /dev/null -+++ b/include/linux/mmconfig.h -@@ -0,0 +1,86 @@ -+#ifndef __MMCONFIG_H -+#define __MMCONFIG_H -+#ifdef __KERNEL__ -+ -+#include -+#include -+ -+#ifdef CONFIG_PCI_MMCONFIG -+/* "PCI MMCONFIG %04x [bus %02x-%02x]" */ -+#define PCI_MMCFG_RESOURCE_NAME_LEN (22 + 4 + 2 + 2) -+ -+struct acpi_pci_root; -+struct pci_mmcfg_region; -+ -+typedef int (*acpi_mcfg_fixup_t)(struct acpi_pci_root *root, -+ struct pci_mmcfg_region *cfg); -+ -+struct pci_mmcfg_region { -+ struct list_head list; -+ struct resource res; -+ int (*read)(struct pci_mmcfg_region *cfg, unsigned int bus, -+ unsigned int devfn, int reg, int len, u32 *value); -+ int (*write)(struct pci_mmcfg_region *cfg, unsigned int bus, -+ unsigned int devfn, int reg, int len, u32 value); -+ acpi_mcfg_fixup_t fixup; -+ void *data; -+ u64 address; -+ char __iomem *virt; -+ u16 segment; -+ u8 start_bus; -+ u8 end_bus; -+ char name[PCI_MMCFG_RESOURCE_NAME_LEN]; -+}; -+ -+struct acpi_mcfg_fixup { -+ char oem_id[7]; -+ char oem_table_id[9]; -+ acpi_mcfg_fixup_t hook; -+}; -+ -+/* Designate a routine to fix up buggy MCFG */ -+#define DECLARE_ACPI_MCFG_FIXUP(oem_id, table_id, hook) \ -+ static const struct acpi_mcfg_fixup __acpi_fixup_##hook __used \ -+ __attribute__((__section__(".acpi_fixup_mcfg"), aligned((sizeof(void *))))) \ -+ = { {oem_id}, {table_id}, hook }; -+ -+void pci_mmcfg_early_init(void); -+void pci_mmcfg_late_init(void); -+struct pci_mmcfg_region *pci_mmconfig_lookup(int segment, int bus); -+ -+int pci_parse_mcfg(struct acpi_table_header *header); -+struct pci_mmcfg_region *pci_mmconfig_alloc(int segment, int start, -+ int end, u64 addr); -+int pci_mmconfig_inject(struct pci_mmcfg_region *cfg); -+struct pci_mmcfg_region *pci_mmconfig_add(int segment, int start, -+ int end, u64 addr); -+void list_add_sorted(struct pci_mmcfg_region *new); -+int acpi_mcfg_check_entry(struct acpi_table_mcfg *mcfg, -+ struct acpi_mcfg_allocation *cfg); -+void free_all_mmcfg(void); -+int pci_mmconfig_insert(struct device *dev, u16 seg, u8 start, u8 end, -+ phys_addr_t addr); -+int pci_mmconfig_delete(u16 seg, u8 start, u8 end); -+ -+/* Arch specific calls */ -+int pci_mmcfg_arch_init(void); -+void pci_mmcfg_arch_free(void); -+int pci_mmcfg_arch_map(struct pci_mmcfg_region *cfg); -+void pci_mmcfg_arch_unmap(struct pci_mmcfg_region *cfg); -+int pci_mmcfg_read(unsigned int seg, unsigned int bus, -+ unsigned int devfn, int reg, int len, u32 *value); -+int pci_mmcfg_write(unsigned int seg, unsigned int bus, -+ unsigned int devfn, int reg, int len, u32 value); -+ -+extern struct list_head pci_mmcfg_list; -+ -+#define PCI_MMCFG_BUS_OFFSET(bus) ((bus) << 20) -+#else /* CONFIG_PCI_MMCONFIG */ -+static inline void pci_mmcfg_late_init(void) { } -+static inline void pci_mmcfg_early_init(void) { } -+static inline void *pci_mmconfig_lookup(int segment, int bus) -+{ return NULL; } -+#endif /* CONFIG_PCI_MMCONFIG */ -+ -+#endif /* __KERNEL__ */ -+#endif /* __MMCONFIG_H */ diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h -index 2e75ab0..9da3162 100644 +index e530533..9a42522 100644 --- a/include/linux/mod_devicetable.h +++ b/include/linux/mod_devicetable.h -@@ -191,6 +191,12 @@ struct acpi_device_id { +@@ -189,6 +189,7 @@ struct css_device_id { + struct acpi_device_id { + __u8 id[ACPI_ID_LEN]; kernel_ulong_t driver_data; ++ __u32 cls; }; -+struct acpi_device_cls { -+ kernel_ulong_t base_class; -+ kernel_ulong_t sub_class; -+ kernel_ulong_t prog_interface; -+}; -+ #define PNP_ID_LEN 8 - #define PNP_MAX_DEVICES 8 - diff --git a/include/linux/msi.h b/include/linux/msi.h index 8ac4a68..01b648f 100644 --- a/include/linux/msi.h @@ -21122,10 +23180,10 @@ index 24c7728..3e95ec8 100644 #ifdef CONFIG_ACPI_APEI diff --git a/include/linux/pci.h b/include/linux/pci.h -index 211e9da..36e5b57 100644 +index 211e9da..0027171 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h -@@ -1651,19 +1651,12 @@ int pcibios_set_pcie_reset_state(struct pci_dev *dev, +@@ -1651,6 +1651,7 @@ int pcibios_set_pcie_reset_state(struct pci_dev *dev, int pcibios_add_device(struct pci_dev *dev); void pcibios_release_device(struct pci_dev *dev); void pcibios_penalize_isa_irq(int irq, int active); @@ -21133,20 +23191,7 @@ index 211e9da..36e5b57 100644 #ifdef CONFIG_HIBERNATE_CALLBACKS extern struct dev_pm_ops pcibios_pm_ops; - #endif - --#ifdef CONFIG_PCI_MMCONFIG --void __init pci_mmcfg_early_init(void); --void __init pci_mmcfg_late_init(void); --#else --static inline void pci_mmcfg_early_init(void) { } --static inline void pci_mmcfg_late_init(void) { } --#endif -- - int pci_ext_cfg_avail(void); - - void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar); -@@ -1838,6 +1831,7 @@ void pci_set_of_node(struct pci_dev *dev); +@@ -1838,6 +1839,7 @@ void pci_set_of_node(struct pci_dev *dev); void pci_release_of_node(struct pci_dev *dev); void pci_set_bus_of_node(struct pci_bus *bus); void pci_release_bus_of_node(struct pci_bus *bus); @@ -21154,7 +23199,7 @@ index 211e9da..36e5b57 100644 /* Arch may override this (weak) */ struct device_node *pcibios_get_phb_of_node(struct pci_bus *bus); -@@ -1858,6 +1852,7 @@ static inline void pci_set_of_node(struct pci_dev *dev) { } +@@ -1858,6 +1860,7 @@ static inline void pci_set_of_node(struct pci_dev *dev) { } static inline void pci_release_of_node(struct pci_dev *dev) { } static inline void pci_set_bus_of_node(struct pci_bus *bus) { } static inline void pci_release_bus_of_node(struct pci_bus *bus) { } @@ -21214,6 +23259,46 @@ index 3e18163..2c43e96 100644 }; #ifdef GENERIC_MSI_DOMAIN_OPS +diff --git a/scripts/mod/devicetable-offsets.c b/scripts/mod/devicetable-offsets.c +index f282516..7f68268 100644 +--- a/scripts/mod/devicetable-offsets.c ++++ b/scripts/mod/devicetable-offsets.c +@@ -63,6 +63,7 @@ int main(void) + + DEVID(acpi_device_id); + DEVID_FIELD(acpi_device_id, id); ++ DEVID_FIELD(acpi_device_id, cls); + + DEVID(pnp_device_id); + DEVID_FIELD(pnp_device_id, id); +diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c +index e614ef6..ba5998c 100644 +--- a/scripts/mod/file2alias.c ++++ b/scripts/mod/file2alias.c +@@ -511,12 +511,21 @@ static int do_serio_entry(const char *filename, + } + ADD_TO_DEVTABLE("serio", serio_device_id, do_serio_entry); + +-/* looks like: "acpi:ACPI0003 or acpi:PNP0C0B" or "acpi:LNXVIDEO" */ ++/* looks like: "acpi:ACPI0003" or "acpi:PNP0C0B" or "acpi:LNXVIDEO" or ++ * "acpi:bbsspp" (bb=base-class, ss=sub-class, pp=prog-if) ++ * ++ * NOTE: * Each driver should use one of the following : _HID, _CIDs or _CLS. ++ */ + static int do_acpi_entry(const char *filename, + void *symval, char *alias) + { + DEF_FIELD_ADDR(symval, acpi_device_id, id); +- sprintf(alias, "acpi*:%s:*", *id); ++ DEF_FIELD_ADDR(symval, acpi_device_id, cls); ++ ++ if (id && strlen((const char *)*id)) ++ sprintf(alias, "acpi*:%s:*", *id); ++ else if (cls) ++ sprintf(alias, "acpi*:%06x:*", *cls); + return 1; + } + ADD_TO_DEVTABLE("acpi", acpi_device_id, do_acpi_entry); diff --git a/virt/kvm/arm/arch_timer.c b/virt/kvm/arm/arch_timer.c index 6e54f35..691e868 100644 --- a/virt/kvm/arm/arch_timer.c diff --git a/kernel.spec b/kernel.spec index 874cec027..1ef39b742 100644 --- a/kernel.spec +++ b/kernel.spec @@ -2255,6 +2255,9 @@ fi # # %changelog +* Tue Mar 17 2015 Kyle McMartin +- Update kernel-arm64.patch, move EDAC to arm-generic, add EDAC_XGENE on arm64. + * Mon Mar 16 2015 Jarod Wilson - Fix bad variant usage in kernel dependencies