diff --git a/Documentation/ABI/testing/sysfs-devices-system-cpu b/Documentation/ABI/testing/sysfs-devices-system-cpu index 6a1acabb29d85..53755b2021ed0 100644 --- a/Documentation/ABI/testing/sysfs-devices-system-cpu +++ b/Documentation/ABI/testing/sysfs-devices-system-cpu @@ -523,6 +523,7 @@ What: /sys/devices/system/cpu/vulnerabilities /sys/devices/system/cpu/vulnerabilities/spectre_v1 /sys/devices/system/cpu/vulnerabilities/spectre_v2 /sys/devices/system/cpu/vulnerabilities/srbds + /sys/devices/system/cpu/vulnerabilities/tsa /sys/devices/system/cpu/vulnerabilities/tsx_async_abort Date: January 2018 Contact: Linux kernel mailing list diff --git a/Documentation/ABI/testing/sysfs-driver-ufs b/Documentation/ABI/testing/sysfs-driver-ufs index 5fa6655aee840..16f17e91ee496 100644 --- a/Documentation/ABI/testing/sysfs-driver-ufs +++ b/Documentation/ABI/testing/sysfs-driver-ufs @@ -711,7 +711,7 @@ Description: This file shows the thin provisioning type. This is one of The file is read only. -What: /sys/class/scsi_device/*/device/unit_descriptor/physical_memory_resourse_count +What: /sys/class/scsi_device/*/device/unit_descriptor/physical_memory_resource_count Date: February 2018 Contact: Stanislav Nijnikov Description: This file shows the total physical memory resources. This is diff --git a/Documentation/admin-guide/hw-vuln/processor_mmio_stale_data.rst b/Documentation/admin-guide/hw-vuln/processor_mmio_stale_data.rst index 1302fd1b55e83..6dba18dbb9abc 100644 --- a/Documentation/admin-guide/hw-vuln/processor_mmio_stale_data.rst +++ b/Documentation/admin-guide/hw-vuln/processor_mmio_stale_data.rst @@ -157,9 +157,7 @@ This is achieved by using the otherwise unused and obsolete VERW instruction in combination with a microcode update. The microcode clears the affected CPU buffers when the VERW instruction is executed. -Kernel reuses the MDS function to invoke the buffer clearing: - - mds_clear_cpu_buffers() +Kernel does the buffer clearing with x86_clear_cpu_buffers(). On MDS affected CPUs, the kernel already invokes CPU buffer clear on kernel/userspace, hypervisor/guest and C-state (idle) transitions. No diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index b5cb361485541..f402bbaccc8aa 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -6993,6 +6993,19 @@ having this key zero'ed is acceptable. E.g. in testing scenarios. + tsa= [X86] Control mitigation for Transient Scheduler + Attacks on AMD CPUs. Search the following in your + favourite search engine for more details: + + "Technical guidance for mitigating transient scheduler + attacks". + + off - disable the mitigation + on - enable the mitigation (default) + user - mitigate only user/kernel transitions + vm - mitigate only guest/host transitions + + tsc= Disable clocksource stability checks for TSC. Format: [x86] reliable: mark tsc clocksource as reliable, this diff --git a/Documentation/arch/x86/mds.rst b/Documentation/arch/x86/mds.rst index 5a2e6c0ef04a5..3518671e1a850 100644 --- a/Documentation/arch/x86/mds.rst +++ b/Documentation/arch/x86/mds.rst @@ -93,7 +93,7 @@ enters a C-state. The kernel provides a function to invoke the buffer clearing: - mds_clear_cpu_buffers() + x86_clear_cpu_buffers() Also macro CLEAR_CPU_BUFFERS can be used in ASM late in exit-to-user path. Other than CFLAGS.ZF, this macro doesn't clobber any registers. @@ -185,9 +185,9 @@ Mitigation points idle clearing would be a window dressing exercise and is therefore not activated. - The invocation is controlled by the static key mds_idle_clear which is - switched depending on the chosen mitigation mode and the SMT state of - the system. + The invocation is controlled by the static key cpu_buf_idle_clear which is + switched depending on the chosen mitigation mode and the SMT state of the + system. The buffer clear is only invoked before entering the C-State to prevent that stale data from the idling CPU from spilling to the Hyper-Thread diff --git a/Documentation/bpf/map_hash.rst b/Documentation/bpf/map_hash.rst index d2343952f2cbd..8606bf958a8cf 100644 --- a/Documentation/bpf/map_hash.rst +++ b/Documentation/bpf/map_hash.rst @@ -233,10 +233,16 @@ attempts in order to enforce the LRU property which have increasing impacts on other CPUs involved in the following operation attempts: - Attempt to use CPU-local state to batch operations -- Attempt to fetch free nodes from global lists +- Attempt to fetch ``target_free`` free nodes from global lists - Attempt to pull any node from a global list and remove it from the hashmap - Attempt to pull any node from any CPU's list and remove it from the hashmap +The number of nodes to borrow from the global list in a batch, ``target_free``, +depends on the size of the map. Larger batch size reduces lock contention, but +may also exhaust the global structure. The value is computed at map init to +avoid exhaustion, by limiting aggregate reservation by all CPUs to half the map +size. With a minimum of a single element and maximum budget of 128 at a time. + This algorithm is described visually in the following diagram. See the description in commit 3a08c2fd7634 ("bpf: LRU List") for a full explanation of the corresponding operations: diff --git a/Documentation/bpf/map_lru_hash_update.dot b/Documentation/bpf/map_lru_hash_update.dot index a0fee349d29c2..ab10058f5b79f 100644 --- a/Documentation/bpf/map_lru_hash_update.dot +++ b/Documentation/bpf/map_lru_hash_update.dot @@ -35,18 +35,18 @@ digraph { fn_bpf_lru_list_pop_free_to_local [shape=rectangle,fillcolor=2, label="Flush local pending, Rotate Global list, move - LOCAL_FREE_TARGET + target_free from global -> local"] // Also corresponds to: // fn__local_list_flush() // fn_bpf_lru_list_rotate() fn___bpf_lru_node_move_to_free[shape=diamond,fillcolor=2, - label="Able to free\nLOCAL_FREE_TARGET\nnodes?"] + label="Able to free\ntarget_free\nnodes?"] fn___bpf_lru_list_shrink_inactive [shape=rectangle,fillcolor=3, label="Shrink inactive list up to remaining - LOCAL_FREE_TARGET + target_free (global LRU -> local)"] fn___bpf_lru_list_shrink [shape=diamond,fillcolor=2, label="> 0 entries in\nlocal free list?"] diff --git a/Documentation/core-api/symbol-namespaces.rst b/Documentation/core-api/symbol-namespaces.rst index d1154eb438101..cca94469fa414 100644 --- a/Documentation/core-api/symbol-namespaces.rst +++ b/Documentation/core-api/symbol-namespaces.rst @@ -28,6 +28,9 @@ kernel. As of today, modules that make use of symbols exported into namespaces, are required to import the namespace. Otherwise the kernel will, depending on its configuration, reject loading the module or warn about a missing import. +Additionally, it is possible to put symbols into a module namespace, strictly +limiting which modules are allowed to use these symbols. + 2. How to define Symbol Namespaces ================================== @@ -84,6 +87,22 @@ unit as preprocessor statement. The above example would then read:: within the corresponding compilation unit before any EXPORT_SYMBOL macro is used. +2.3 Using the EXPORT_SYMBOL_GPL_FOR_MODULES() macro +=================================================== + +Symbols exported using this macro are put into a module namespace. This +namespace cannot be imported. + +The macro takes a comma separated list of module names, allowing only those +modules to access this symbol. Simple tail-globs are supported. + +For example: + + EXPORT_SYMBOL_GPL_FOR_MODULES(preempt_notifier_inc, "kvm,kvm-*") + +will limit usage of this symbol to modules whoes name matches the given +patterns. + 3. How to use Symbols exported in Namespaces ============================================ @@ -155,3 +174,6 @@ in-tree modules:: You can also run nsdeps for external module builds. A typical usage is:: $ make -C M=$PWD nsdeps + +Note: it will happily generate an import statement for the module namespace; +which will not work and generates build and runtime failures. diff --git a/Documentation/devicetree/bindings/i2c/nvidia,tegra20-i2c.yaml b/Documentation/devicetree/bindings/i2c/nvidia,tegra20-i2c.yaml index b57ae6963e629..6b6f6762d122f 100644 --- a/Documentation/devicetree/bindings/i2c/nvidia,tegra20-i2c.yaml +++ b/Documentation/devicetree/bindings/i2c/nvidia,tegra20-i2c.yaml @@ -97,7 +97,10 @@ properties: resets: items: - - description: module reset + - description: + Module reset. This property is optional for controllers in Tegra194, + Tegra234 etc where an internal software reset is available as an + alternative. reset-names: items: @@ -116,6 +119,13 @@ properties: - const: rx - const: tx +required: + - compatible + - reg + - interrupts + - clocks + - clock-names + allOf: - $ref: /schemas/i2c/i2c-controller.yaml - if: @@ -169,6 +179,18 @@ allOf: properties: power-domains: false + - if: + not: + properties: + compatible: + contains: + enum: + - nvidia,tegra194-i2c + then: + required: + - resets + - reset-names + unevaluatedProperties: false examples: diff --git a/Documentation/devicetree/bindings/phy/fsl,imx8mq-usb-phy.yaml b/Documentation/devicetree/bindings/phy/fsl,imx8mq-usb-phy.yaml index dc3a3f709feaa..bac4d0b51d8a1 100644 --- a/Documentation/devicetree/bindings/phy/fsl,imx8mq-usb-phy.yaml +++ b/Documentation/devicetree/bindings/phy/fsl,imx8mq-usb-phy.yaml @@ -58,8 +58,7 @@ properties: fsl,phy-tx-vboost-level-microvolt: description: Adjust the boosted transmit launch pk-pk differential amplitude - minimum: 880 - maximum: 1120 + enum: [844, 1008, 1156] fsl,phy-comp-dis-tune-percent: description: diff --git a/Documentation/devicetree/bindings/pwm/adi,axi-pwmgen.yaml b/Documentation/devicetree/bindings/pwm/adi,axi-pwmgen.yaml index ec6115d3796ba..5575c58357d6e 100644 --- a/Documentation/devicetree/bindings/pwm/adi,axi-pwmgen.yaml +++ b/Documentation/devicetree/bindings/pwm/adi,axi-pwmgen.yaml @@ -27,22 +27,31 @@ properties: maxItems: 1 "#pwm-cells": - const: 2 + const: 3 clocks: - maxItems: 1 + minItems: 1 + maxItems: 2 + + clock-names: + minItems: 1 + items: + - const: axi + - const: ext required: - reg - clocks + - clock-names unevaluatedProperties: false examples: - | pwm@44b00000 { - compatible = "adi,axi-pwmgen-2.00.a"; - reg = <0x44b00000 0x1000>; - clocks = <&spi_clk>; - #pwm-cells = <2>; + compatible = "adi,axi-pwmgen-2.00.a"; + reg = <0x44b00000 0x1000>; + clocks = <&fpga_clk>, <&spi_clk>; + clock-names = "axi", "ext"; + #pwm-cells = <3>; }; diff --git a/Documentation/devicetree/bindings/pwm/brcm,bcm7038-pwm.yaml b/Documentation/devicetree/bindings/pwm/brcm,bcm7038-pwm.yaml index 119de3d7f9dd7..44548a9da1580 100644 --- a/Documentation/devicetree/bindings/pwm/brcm,bcm7038-pwm.yaml +++ b/Documentation/devicetree/bindings/pwm/brcm,bcm7038-pwm.yaml @@ -35,8 +35,8 @@ additionalProperties: false examples: - | pwm: pwm@f0408000 { - compatible = "brcm,bcm7038-pwm"; - reg = <0xf0408000 0x28>; - #pwm-cells = <2>; - clocks = <&upg_fixed>; + compatible = "brcm,bcm7038-pwm"; + reg = <0xf0408000 0x28>; + #pwm-cells = <2>; + clocks = <&upg_fixed>; }; diff --git a/Documentation/devicetree/bindings/pwm/brcm,kona-pwm.yaml b/Documentation/devicetree/bindings/pwm/brcm,kona-pwm.yaml index e86c8053b366a..fd785da5d3d73 100644 --- a/Documentation/devicetree/bindings/pwm/brcm,kona-pwm.yaml +++ b/Documentation/devicetree/bindings/pwm/brcm,kona-pwm.yaml @@ -43,9 +43,9 @@ examples: #include pwm@3e01a000 { - compatible = "brcm,bcm11351-pwm", "brcm,kona-pwm"; - reg = <0x3e01a000 0xcc>; - clocks = <&slave_ccu BCM281XX_SLAVE_CCU_PWM>; - #pwm-cells = <3>; + compatible = "brcm,bcm11351-pwm", "brcm,kona-pwm"; + reg = <0x3e01a000 0xcc>; + clocks = <&slave_ccu BCM281XX_SLAVE_CCU_PWM>; + #pwm-cells = <3>; }; ... diff --git a/Documentation/devicetree/bindings/regulator/mediatek,mt6357-regulator.yaml b/Documentation/devicetree/bindings/regulator/mediatek,mt6357-regulator.yaml index 6327bb2f6ee08..698266c09e253 100644 --- a/Documentation/devicetree/bindings/regulator/mediatek,mt6357-regulator.yaml +++ b/Documentation/devicetree/bindings/regulator/mediatek,mt6357-regulator.yaml @@ -33,7 +33,7 @@ patternProperties: "^ldo-v(camio18|aud28|aux18|io18|io28|rf12|rf18|cn18|cn28|fe28)$": type: object - $ref: fixed-regulator.yaml# + $ref: regulator.yaml# unevaluatedProperties: false description: Properties for single fixed LDO regulator. @@ -112,7 +112,6 @@ examples: regulator-enable-ramp-delay = <220>; }; mt6357_vfe28_reg: ldo-vfe28 { - compatible = "regulator-fixed"; regulator-name = "vfe28"; regulator-min-microvolt = <2800000>; regulator-max-microvolt = <2800000>; @@ -125,14 +124,12 @@ examples: regulator-enable-ramp-delay = <110>; }; mt6357_vrf18_reg: ldo-vrf18 { - compatible = "regulator-fixed"; regulator-name = "vrf18"; regulator-min-microvolt = <1800000>; regulator-max-microvolt = <1800000>; regulator-enable-ramp-delay = <110>; }; mt6357_vrf12_reg: ldo-vrf12 { - compatible = "regulator-fixed"; regulator-name = "vrf12"; regulator-min-microvolt = <1200000>; regulator-max-microvolt = <1200000>; @@ -157,14 +154,12 @@ examples: regulator-enable-ramp-delay = <264>; }; mt6357_vcn28_reg: ldo-vcn28 { - compatible = "regulator-fixed"; regulator-name = "vcn28"; regulator-min-microvolt = <2800000>; regulator-max-microvolt = <2800000>; regulator-enable-ramp-delay = <264>; }; mt6357_vcn18_reg: ldo-vcn18 { - compatible = "regulator-fixed"; regulator-name = "vcn18"; regulator-min-microvolt = <1800000>; regulator-max-microvolt = <1800000>; @@ -183,7 +178,6 @@ examples: regulator-enable-ramp-delay = <264>; }; mt6357_vcamio_reg: ldo-vcamio18 { - compatible = "regulator-fixed"; regulator-name = "vcamio"; regulator-min-microvolt = <1800000>; regulator-max-microvolt = <1800000>; @@ -212,28 +206,24 @@ examples: regulator-always-on; }; mt6357_vaux18_reg: ldo-vaux18 { - compatible = "regulator-fixed"; regulator-name = "vaux18"; regulator-min-microvolt = <1800000>; regulator-max-microvolt = <1800000>; regulator-enable-ramp-delay = <264>; }; mt6357_vaud28_reg: ldo-vaud28 { - compatible = "regulator-fixed"; regulator-name = "vaud28"; regulator-min-microvolt = <2800000>; regulator-max-microvolt = <2800000>; regulator-enable-ramp-delay = <264>; }; mt6357_vio28_reg: ldo-vio28 { - compatible = "regulator-fixed"; regulator-name = "vio28"; regulator-min-microvolt = <2800000>; regulator-max-microvolt = <2800000>; regulator-enable-ramp-delay = <264>; }; mt6357_vio18_reg: ldo-vio18 { - compatible = "regulator-fixed"; regulator-name = "vio18"; regulator-min-microvolt = <1800000>; regulator-max-microvolt = <1800000>; diff --git a/Documentation/devicetree/bindings/serial/8250.yaml b/Documentation/devicetree/bindings/serial/8250.yaml index 692aa05500fd5..6ba0325039be2 100644 --- a/Documentation/devicetree/bindings/serial/8250.yaml +++ b/Documentation/devicetree/bindings/serial/8250.yaml @@ -45,7 +45,7 @@ allOf: - ns16550 - ns16550a then: - anyOf: + oneOf: - required: [ clock-frequency ] - required: [ clocks ] diff --git a/Documentation/devicetree/bindings/soc/fsl/fsl,qman-fqd.yaml b/Documentation/devicetree/bindings/soc/fsl/fsl,qman-fqd.yaml index de0b4ae740ff2..a975bce599750 100644 --- a/Documentation/devicetree/bindings/soc/fsl/fsl,qman-fqd.yaml +++ b/Documentation/devicetree/bindings/soc/fsl/fsl,qman-fqd.yaml @@ -50,7 +50,7 @@ required: - compatible allOf: - - $ref: reserved-memory.yaml + - $ref: /schemas/reserved-memory/reserved-memory.yaml unevaluatedProperties: false @@ -61,7 +61,7 @@ examples: #size-cells = <2>; qman-fqd { - compatible = "shared-dma-pool"; + compatible = "fsl,qman-fqd"; size = <0 0x400000>; alignment = <0 0x400000>; no-map; diff --git a/Documentation/devicetree/bindings/usb/cypress,hx3.yaml b/Documentation/devicetree/bindings/usb/cypress,hx3.yaml index e44e88d993d0b..e802e9ac975b8 100644 --- a/Documentation/devicetree/bindings/usb/cypress,hx3.yaml +++ b/Documentation/devicetree/bindings/usb/cypress,hx3.yaml @@ -14,9 +14,22 @@ allOf: properties: compatible: - enum: - - usb4b4,6504 - - usb4b4,6506 + oneOf: + - enum: + - usb4b4,6504 + - usb4b4,6506 + - items: + - enum: + - usb4b4,6500 + - usb4b4,6508 + - const: usb4b4,6504 + - items: + - enum: + - usb4b4,6502 + - usb4b4,6503 + - usb4b4,6507 + - usb4b4,650a + - const: usb4b4,6506 reg: true diff --git a/Documentation/devicetree/bindings/vendor-prefixes.yaml b/Documentation/devicetree/bindings/vendor-prefixes.yaml index 71a1a399e1e1f..af9a8d43b2479 100644 --- a/Documentation/devicetree/bindings/vendor-prefixes.yaml +++ b/Documentation/devicetree/bindings/vendor-prefixes.yaml @@ -846,6 +846,8 @@ patternProperties: description: Linux-specific binding "^linx,.*": description: Linx Technologies + "^liontron,.*": + description: Shenzhen Liontron Technology Co., Ltd "^liteon,.*": description: LITE-ON Technology Corp. "^litex,.*": diff --git a/Documentation/firmware-guide/acpi/dsd/data-node-references.rst b/Documentation/firmware-guide/acpi/dsd/data-node-references.rst index 8d8b53e96bcfe..ccb4b153e6f2d 100644 --- a/Documentation/firmware-guide/acpi/dsd/data-node-references.rst +++ b/Documentation/firmware-guide/acpi/dsd/data-node-references.rst @@ -12,11 +12,14 @@ ACPI in general allows referring to device objects in the tree only. Hierarchical data extension nodes may not be referred to directly, hence this document defines a scheme to implement such references. -A reference consist of the device object name followed by one or more -hierarchical data extension [dsd-guide] keys. Specifically, the hierarchical -data extension node which is referred to by the key shall lie directly under -the parent object i.e. either the device object or another hierarchical data -extension node. +A reference to a _DSD hierarchical data node is a string consisting of a +device object reference followed by a dot (".") and a relative path to a data +node object. Do not use non-string references as this will produce a copy of +the hierarchical data node, not a reference! + +The hierarchical data extension node which is referred to shall be located +directly under its parent object i.e. either the device object or another +hierarchical data extension node [dsd-guide]. The keys in the hierarchical data nodes shall consist of the name of the node, "@" character and the number of the node in hexadecimal notation (without pre- @@ -33,11 +36,9 @@ extension key. Example ======= -In the ASL snippet below, the "reference" _DSD property contains a -device object reference to DEV0 and under that device object, a -hierarchical data extension key "node@1" referring to the NOD1 object -and lastly, a hierarchical data extension key "anothernode" referring to -the ANOD object which is also the final target node of the reference. +In the ASL snippet below, the "reference" _DSD property contains a string +reference to a hierarchical data extension node ANOD under DEV0 under the parent +of DEV1. ANOD is also the final target node of the reference. :: Device (DEV0) @@ -76,10 +77,7 @@ the ANOD object which is also the final target node of the reference. Name (_DSD, Package () { ToUUID("daffd814-6eba-4d8c-8a91-bc9bbf4aa301"), Package () { - Package () { - "reference", Package () { - ^DEV0, "node@1", "anothernode" - } + Package () { "reference", "^DEV0.ANOD" } }, } }) diff --git a/Documentation/firmware-guide/acpi/dsd/graph.rst b/Documentation/firmware-guide/acpi/dsd/graph.rst index b9dbfc73ed25b..d6ae5ffa748ca 100644 --- a/Documentation/firmware-guide/acpi/dsd/graph.rst +++ b/Documentation/firmware-guide/acpi/dsd/graph.rst @@ -66,12 +66,9 @@ of that port shall be zero. Similarly, if a port may only have a single endpoint, the number of that endpoint shall be zero. The endpoint reference uses property extension with "remote-endpoint" property -name followed by a reference in the same package. Such references consist of -the remote device reference, the first package entry of the port data extension -reference under the device and finally the first package entry of the endpoint -data extension reference under the port. Individual references thus appear as:: +name followed by a string reference in the same package. [data-node-ref]:: - Package() { device, "port@X", "endpoint@Y" } + "device.datanode" In the above example, "X" is the number of the port and "Y" is the number of the endpoint. @@ -109,7 +106,7 @@ A simple example of this is show below:: ToUUID("daffd814-6eba-4d8c-8a91-bc9bbf4aa301"), Package () { Package () { "reg", 0 }, - Package () { "remote-endpoint", Package() { \_SB.PCI0.ISP, "port@4", "endpoint@0" } }, + Package () { "remote-endpoint", "\\_SB.PCI0.ISP.EP40" }, } }) } @@ -141,7 +138,7 @@ A simple example of this is show below:: ToUUID("daffd814-6eba-4d8c-8a91-bc9bbf4aa301"), Package () { Package () { "reg", 0 }, - Package () { "remote-endpoint", Package () { \_SB.PCI0.I2C2.CAM0, "port@0", "endpoint@0" } }, + Package () { "remote-endpoint", "\\_SB.PCI0.I2C2.CAM0.EP00" }, } }) } diff --git a/Documentation/firmware-guide/acpi/dsd/leds.rst b/Documentation/firmware-guide/acpi/dsd/leds.rst index 93db592c93c71..a97cd07d49be3 100644 --- a/Documentation/firmware-guide/acpi/dsd/leds.rst +++ b/Documentation/firmware-guide/acpi/dsd/leds.rst @@ -15,11 +15,6 @@ Referring to LEDs in Device tree is documented in [video-interfaces], in "flash-leds" property documentation. In short, LEDs are directly referred to by using phandles. -While Device tree allows referring to any node in the tree [devicetree], in -ACPI references are limited to device nodes only [acpi]. For this reason using -the same mechanism on ACPI is not possible. A mechanism to refer to non-device -ACPI nodes is documented in [data-node-ref]. - ACPI allows (as does DT) using integer arguments after the reference. A combination of the LED driver device reference and an integer argument, referring to the "reg" property of the relevant LED, is used to identify @@ -74,7 +69,7 @@ omitted. :: Package () { Package () { "flash-leds", - Package () { ^LED, "led@0", ^LED, "led@1" }, + Package () { "^LED.LED0", "^LED.LED1" }, } } }) diff --git a/Documentation/gpu/xe/index.rst b/Documentation/gpu/xe/index.rst index 3f07aa3b54325..89bbdcccf8eb7 100644 --- a/Documentation/gpu/xe/index.rst +++ b/Documentation/gpu/xe/index.rst @@ -16,6 +16,7 @@ DG2, etc is provided to prototype the driver. xe_migrate xe_cs xe_pm + xe_gt_freq xe_pcode xe_gt_mcr xe_wa diff --git a/Documentation/gpu/xe/xe_gt_freq.rst b/Documentation/gpu/xe/xe_gt_freq.rst new file mode 100644 index 0000000000000..c0811200e3275 --- /dev/null +++ b/Documentation/gpu/xe/xe_gt_freq.rst @@ -0,0 +1,14 @@ +.. SPDX-License-Identifier: (GPL-2.0+ OR MIT) + +========================== +Xe GT Frequency Management +========================== + +.. kernel-doc:: drivers/gpu/drm/xe/xe_gt_freq.c + :doc: Xe GT Frequency Management + +Internal API +============ + +.. kernel-doc:: drivers/gpu/drm/xe/xe_gt_freq.c + :internal: diff --git a/Documentation/kbuild/makefiles.rst b/Documentation/kbuild/makefiles.rst index 7964e0c245aeb..81607ce407595 100644 --- a/Documentation/kbuild/makefiles.rst +++ b/Documentation/kbuild/makefiles.rst @@ -656,6 +656,20 @@ cc-cross-prefix endif endif +$(RUSTC) support functions +-------------------------- + +rustc-min-version + rustc-min-version tests if the value of $(CONFIG_RUSTC_VERSION) is greater + than or equal to the provided value and evaluates to y if so. + + Example:: + + rustflags-$(call rustc-min-version, 108500) := -Cfoo + + In this example, rustflags-y will be assigned the value -Cfoo if + $(CONFIG_RUSTC_VERSION) is >= 1.85.0. + $(LD) support functions ----------------------- diff --git a/Documentation/netlink/specs/tc.yaml b/Documentation/netlink/specs/tc.yaml index c5579a5412fc9..043f205bc1ae7 100644 --- a/Documentation/netlink/specs/tc.yaml +++ b/Documentation/netlink/specs/tc.yaml @@ -227,7 +227,7 @@ definitions: type: u8 doc: log(P_max / (qth-max - qth-min)) - - name: Scell_log + name: Scell-log type: u8 doc: cell size for idle damping - @@ -248,7 +248,7 @@ definitions: name: DPs type: u32 - - name: def_DP + name: def-DP type: u32 - name: grio diff --git a/Makefile b/Makefile index 18c2a7cf9e913..ba6054d96398d 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 VERSION = 6 PATCHLEVEL = 12 -SUBLEVEL = 31 +SUBLEVEL = 39 EXTRAVERSION = NAME = Baby Opossum Posse diff --git a/arch/arm/boot/dts/microchip/at91sam9263ek.dts b/arch/arm/boot/dts/microchip/at91sam9263ek.dts index ce8baff6a9f4e..e42e1a75a715d 100644 --- a/arch/arm/boot/dts/microchip/at91sam9263ek.dts +++ b/arch/arm/boot/dts/microchip/at91sam9263ek.dts @@ -152,7 +152,7 @@ nand@3 { reg = <0x3 0x0 0x800000>; rb-gpios = <&pioA 22 GPIO_ACTIVE_HIGH>; - cs-gpios = <&pioA 15 GPIO_ACTIVE_HIGH>; + cs-gpios = <&pioD 15 GPIO_ACTIVE_HIGH>; nand-bus-width = <8>; nand-ecc-mode = "soft"; nand-on-flash-bbt; diff --git a/arch/arm/boot/dts/microchip/tny_a9263.dts b/arch/arm/boot/dts/microchip/tny_a9263.dts index 62b7d9f9a926c..c8b6318aaa838 100644 --- a/arch/arm/boot/dts/microchip/tny_a9263.dts +++ b/arch/arm/boot/dts/microchip/tny_a9263.dts @@ -64,7 +64,7 @@ nand@3 { reg = <0x3 0x0 0x800000>; rb-gpios = <&pioA 22 GPIO_ACTIVE_HIGH>; - cs-gpios = <&pioA 15 GPIO_ACTIVE_HIGH>; + cs-gpios = <&pioD 15 GPIO_ACTIVE_HIGH>; nand-bus-width = <8>; nand-ecc-mode = "soft"; nand-on-flash-bbt; diff --git a/arch/arm/boot/dts/microchip/usb_a9263.dts b/arch/arm/boot/dts/microchip/usb_a9263.dts index 45745915b2e16..454176ce6d3ff 100644 --- a/arch/arm/boot/dts/microchip/usb_a9263.dts +++ b/arch/arm/boot/dts/microchip/usb_a9263.dts @@ -58,7 +58,7 @@ }; spi0: spi@fffa4000 { - cs-gpios = <&pioB 15 GPIO_ACTIVE_HIGH>; + cs-gpios = <&pioA 5 GPIO_ACTIVE_LOW>; status = "okay"; flash@0 { compatible = "atmel,at45", "atmel,dataflash"; @@ -84,7 +84,7 @@ nand@3 { reg = <0x3 0x0 0x800000>; rb-gpios = <&pioA 22 GPIO_ACTIVE_HIGH>; - cs-gpios = <&pioA 15 GPIO_ACTIVE_HIGH>; + cs-gpios = <&pioD 15 GPIO_ACTIVE_HIGH>; nand-bus-width = <8>; nand-ecc-mode = "soft"; nand-on-flash-bbt; diff --git a/arch/arm/boot/dts/qcom/qcom-apq8064.dtsi b/arch/arm/boot/dts/qcom/qcom-apq8064.dtsi index ac7494ed633e1..be87c396f05f1 100644 --- a/arch/arm/boot/dts/qcom/qcom-apq8064.dtsi +++ b/arch/arm/boot/dts/qcom/qcom-apq8064.dtsi @@ -213,12 +213,6 @@ }; }; - sfpb_mutex: hwmutex { - compatible = "qcom,sfpb-mutex"; - syscon = <&sfpb_wrapper_mutex 0x604 0x4>; - #hwlock-cells = <1>; - }; - smem { compatible = "qcom,smem"; memory-region = <&smem_region>; @@ -284,6 +278,40 @@ }; }; + replicator { + compatible = "arm,coresight-static-replicator"; + + clocks = <&rpmcc RPM_QDSS_CLK>; + clock-names = "apb_pclk"; + + in-ports { + port { + replicator_in: endpoint { + remote-endpoint = <&funnel_out>; + }; + }; + }; + + out-ports { + #address-cells = <1>; + #size-cells = <0>; + + port@0 { + reg = <0>; + replicator_out0: endpoint { + remote-endpoint = <&etb_in>; + }; + }; + + port@1 { + reg = <1>; + replicator_out1: endpoint { + remote-endpoint = <&tpiu_in>; + }; + }; + }; + }; + soc: soc { #address-cells = <1>; #size-cells = <1>; @@ -305,9 +333,10 @@ pinctrl-0 = <&ps_hold_default_state>; }; - sfpb_wrapper_mutex: syscon@1200000 { - compatible = "syscon"; - reg = <0x01200000 0x8000>; + sfpb_mutex: hwmutex@1200600 { + compatible = "qcom,sfpb-mutex"; + reg = <0x01200600 0x100>; + #hwlock-cells = <1>; }; intc: interrupt-controller@2000000 { @@ -326,6 +355,8 @@ ; reg = <0x0200a000 0x100>; clock-frequency = <27000000>; + clocks = <&sleep_clk>; + clock-names = "sleep"; cpu-offset = <0x80000>; }; @@ -1532,39 +1563,6 @@ }; }; - replicator { - compatible = "arm,coresight-static-replicator"; - - clocks = <&rpmcc RPM_QDSS_CLK>; - clock-names = "apb_pclk"; - - out-ports { - #address-cells = <1>; - #size-cells = <0>; - - port@0 { - reg = <0>; - replicator_out0: endpoint { - remote-endpoint = <&etb_in>; - }; - }; - port@1 { - reg = <1>; - replicator_out1: endpoint { - remote-endpoint = <&tpiu_in>; - }; - }; - }; - - in-ports { - port { - replicator_in: endpoint { - remote-endpoint = <&funnel_out>; - }; - }; - }; - }; - funnel@1a04000 { compatible = "arm,coresight-dynamic-funnel", "arm,primecell"; reg = <0x1a04000 0x1000>; diff --git a/arch/arm/mach-aspeed/Kconfig b/arch/arm/mach-aspeed/Kconfig index 080019aa6fcd8..fcf287edd0e5e 100644 --- a/arch/arm/mach-aspeed/Kconfig +++ b/arch/arm/mach-aspeed/Kconfig @@ -2,7 +2,6 @@ menuconfig ARCH_ASPEED bool "Aspeed BMC architectures" depends on (CPU_LITTLE_ENDIAN && ARCH_MULTI_V5) || ARCH_MULTI_V6 || ARCH_MULTI_V7 - select SRAM select WATCHDOG select ASPEED_WATCHDOG select MFD_SYSCON diff --git a/arch/arm/mach-omap2/clockdomain.h b/arch/arm/mach-omap2/clockdomain.h index c36fb27212615..86a2f9e5d0ef9 100644 --- a/arch/arm/mach-omap2/clockdomain.h +++ b/arch/arm/mach-omap2/clockdomain.h @@ -48,6 +48,7 @@ #define CLKDM_NO_AUTODEPS (1 << 4) #define CLKDM_ACTIVE_WITH_MPU (1 << 5) #define CLKDM_MISSING_IDLE_REPORTING (1 << 6) +#define CLKDM_STANDBY_FORCE_WAKEUP BIT(7) #define CLKDM_CAN_HWSUP (CLKDM_CAN_ENABLE_AUTO | CLKDM_CAN_DISABLE_AUTO) #define CLKDM_CAN_SWSUP (CLKDM_CAN_FORCE_SLEEP | CLKDM_CAN_FORCE_WAKEUP) diff --git a/arch/arm/mach-omap2/clockdomains33xx_data.c b/arch/arm/mach-omap2/clockdomains33xx_data.c index 87f4e927eb183..c05a3c07d4486 100644 --- a/arch/arm/mach-omap2/clockdomains33xx_data.c +++ b/arch/arm/mach-omap2/clockdomains33xx_data.c @@ -19,7 +19,7 @@ static struct clockdomain l4ls_am33xx_clkdm = { .pwrdm = { .name = "per_pwrdm" }, .cm_inst = AM33XX_CM_PER_MOD, .clkdm_offs = AM33XX_CM_PER_L4LS_CLKSTCTRL_OFFSET, - .flags = CLKDM_CAN_SWSUP, + .flags = CLKDM_CAN_SWSUP | CLKDM_STANDBY_FORCE_WAKEUP, }; static struct clockdomain l3s_am33xx_clkdm = { diff --git a/arch/arm/mach-omap2/cm33xx.c b/arch/arm/mach-omap2/cm33xx.c index acdf72a541c02..a4dd42abda89b 100644 --- a/arch/arm/mach-omap2/cm33xx.c +++ b/arch/arm/mach-omap2/cm33xx.c @@ -20,6 +20,9 @@ #include "cm-regbits-34xx.h" #include "cm-regbits-33xx.h" #include "prm33xx.h" +#if IS_ENABLED(CONFIG_SUSPEND) +#include +#endif /* * CLKCTRL_IDLEST_*: possible values for the CM_*_CLKCTRL.IDLEST bitfield: @@ -328,8 +331,17 @@ static int am33xx_clkdm_clk_disable(struct clockdomain *clkdm) { bool hwsup = false; +#if IS_ENABLED(CONFIG_SUSPEND) + /* + * In case of standby, Don't put the l4ls clk domain to sleep. + * Since CM3 PM FW doesn't wake-up/enable the l4ls clk domain + * upon wake-up, CM3 PM FW fails to wake-up th MPU. + */ + if (pm_suspend_target_state == PM_SUSPEND_STANDBY && + (clkdm->flags & CLKDM_STANDBY_FORCE_WAKEUP)) + return 0; +#endif hwsup = am33xx_cm_is_clkdm_in_hwsup(clkdm->cm_inst, clkdm->clkdm_offs); - if (!hwsup && (clkdm->flags & CLKDM_CAN_FORCE_SLEEP)) am33xx_clkdm_sleep(clkdm); diff --git a/arch/arm/mach-omap2/pmic-cpcap.c b/arch/arm/mach-omap2/pmic-cpcap.c index 4f31e61c0c90c..9f9a20274db84 100644 --- a/arch/arm/mach-omap2/pmic-cpcap.c +++ b/arch/arm/mach-omap2/pmic-cpcap.c @@ -264,7 +264,11 @@ int __init omap4_cpcap_init(void) static int __init cpcap_late_init(void) { - omap4_vc_set_pmic_signaling(PWRDM_POWER_RET); + if (!of_find_compatible_node(NULL, NULL, "motorola,cpcap")) + return 0; + + if (soc_is_omap443x() || soc_is_omap446x() || soc_is_omap447x()) + omap4_vc_set_pmic_signaling(PWRDM_POWER_RET); return 0; } diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c index 89f1c97f3079c..cbf5a03d2b189 100644 --- a/arch/arm/mm/ioremap.c +++ b/arch/arm/mm/ioremap.c @@ -515,7 +515,5 @@ void __init early_ioremap_init(void) bool arch_memremap_can_ram_remap(resource_size_t offset, size_t size, unsigned long flags) { - unsigned long pfn = PHYS_PFN(offset); - - return memblock_is_map_memory(pfn); + return memblock_is_map_memory(offset); } diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index a11a7a42edbfb..7887d18cce3e4 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -322,9 +322,9 @@ config ARCH_MMAP_RND_BITS_MAX default 24 if ARM64_VA_BITS=39 default 27 if ARM64_VA_BITS=42 default 30 if ARM64_VA_BITS=47 - default 29 if ARM64_VA_BITS=48 && ARM64_64K_PAGES - default 31 if ARM64_VA_BITS=48 && ARM64_16K_PAGES - default 33 if ARM64_VA_BITS=48 + default 29 if (ARM64_VA_BITS=48 || ARM64_VA_BITS=52) && ARM64_64K_PAGES + default 31 if (ARM64_VA_BITS=48 || ARM64_VA_BITS=52) && ARM64_16K_PAGES + default 33 if (ARM64_VA_BITS=48 || ARM64_VA_BITS=52) default 14 if ARM64_64K_PAGES default 16 if ARM64_16K_PAGES default 18 diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile index 19a4988621ac9..88029d38b3c65 100644 --- a/arch/arm64/Makefile +++ b/arch/arm64/Makefile @@ -48,7 +48,7 @@ KBUILD_CFLAGS += $(CC_FLAGS_NO_FPU) \ KBUILD_CFLAGS += $(call cc-disable-warning, psabi) KBUILD_AFLAGS += $(compat_vdso) -ifeq ($(call test-ge, $(CONFIG_RUSTC_VERSION), 108500),y) +ifeq ($(call rustc-min-version, 108500),y) KBUILD_RUSTFLAGS += --target=aarch64-unknown-none-softfloat else KBUILD_RUSTFLAGS += --target=aarch64-unknown-none -Ctarget-feature="-neon" diff --git a/arch/arm64/boot/dts/apple/t8103-jxxx.dtsi b/arch/arm64/boot/dts/apple/t8103-jxxx.dtsi index 5988a4eb6efaa..cb78ce7af0b38 100644 --- a/arch/arm64/boot/dts/apple/t8103-jxxx.dtsi +++ b/arch/arm64/boot/dts/apple/t8103-jxxx.dtsi @@ -71,7 +71,7 @@ */ &port00 { bus-range = <1 1>; - wifi0: network@0,0 { + wifi0: wifi@0,0 { compatible = "pci14e4,4425"; reg = <0x10000 0x0 0x0 0x0 0x0>; /* To be filled by the loader */ diff --git a/arch/arm64/boot/dts/freescale/imx8mm-beacon-kit.dts b/arch/arm64/boot/dts/freescale/imx8mm-beacon-kit.dts index 97ff1ddd63188..734a75198f06e 100644 --- a/arch/arm64/boot/dts/freescale/imx8mm-beacon-kit.dts +++ b/arch/arm64/boot/dts/freescale/imx8mm-beacon-kit.dts @@ -124,6 +124,7 @@ assigned-clock-parents = <&clk IMX8MM_AUDIO_PLL1_OUT>; assigned-clock-rates = <24576000>; #sound-dai-cells = <0>; + fsl,sai-mclk-direction-output; status = "okay"; }; diff --git a/arch/arm64/boot/dts/freescale/imx8mm-beacon-som.dtsi b/arch/arm64/boot/dts/freescale/imx8mm-beacon-som.dtsi index 62ed64663f495..9ba0cb89fa24e 100644 --- a/arch/arm64/boot/dts/freescale/imx8mm-beacon-som.dtsi +++ b/arch/arm64/boot/dts/freescale/imx8mm-beacon-som.dtsi @@ -233,6 +233,7 @@ rtc: rtc@51 { compatible = "nxp,pcf85263"; reg = <0x51>; + quartz-load-femtofarads = <12500>; }; }; diff --git a/arch/arm64/boot/dts/freescale/imx8mn-beacon-kit.dts b/arch/arm64/boot/dts/freescale/imx8mn-beacon-kit.dts index 1df5ceb113879..37fc5ed98d7f6 100644 --- a/arch/arm64/boot/dts/freescale/imx8mn-beacon-kit.dts +++ b/arch/arm64/boot/dts/freescale/imx8mn-beacon-kit.dts @@ -124,6 +124,7 @@ assigned-clock-parents = <&clk IMX8MN_AUDIO_PLL1_OUT>; assigned-clock-rates = <24576000>; #sound-dai-cells = <0>; + fsl,sai-mclk-direction-output; status = "okay"; }; diff --git a/arch/arm64/boot/dts/freescale/imx8mn-beacon-som.dtsi b/arch/arm64/boot/dts/freescale/imx8mn-beacon-som.dtsi index 2a64115eebf1c..bb11590473a4c 100644 --- a/arch/arm64/boot/dts/freescale/imx8mn-beacon-som.dtsi +++ b/arch/arm64/boot/dts/freescale/imx8mn-beacon-som.dtsi @@ -242,6 +242,7 @@ rtc: rtc@51 { compatible = "nxp,pcf85263"; reg = <0x51>; + quartz-load-femtofarads = <12500>; }; }; diff --git a/arch/arm64/boot/dts/freescale/imx8mp-beacon-som.dtsi b/arch/arm64/boot/dts/freescale/imx8mp-beacon-som.dtsi index 15f7ab58db36c..88561df70d03a 100644 --- a/arch/arm64/boot/dts/freescale/imx8mp-beacon-som.dtsi +++ b/arch/arm64/boot/dts/freescale/imx8mp-beacon-som.dtsi @@ -257,6 +257,7 @@ rtc: rtc@51 { compatible = "nxp,pcf85263"; reg = <0x51>; + quartz-load-femtofarads = <12500>; }; }; diff --git a/arch/arm64/boot/dts/mediatek/mt6357.dtsi b/arch/arm64/boot/dts/mediatek/mt6357.dtsi index 5fafa842d312f..dca4e5c3d8e21 100644 --- a/arch/arm64/boot/dts/mediatek/mt6357.dtsi +++ b/arch/arm64/boot/dts/mediatek/mt6357.dtsi @@ -60,7 +60,6 @@ }; mt6357_vfe28_reg: ldo-vfe28 { - compatible = "regulator-fixed"; regulator-name = "vfe28"; regulator-min-microvolt = <2800000>; regulator-max-microvolt = <2800000>; @@ -75,7 +74,6 @@ }; mt6357_vrf18_reg: ldo-vrf18 { - compatible = "regulator-fixed"; regulator-name = "vrf18"; regulator-min-microvolt = <1800000>; regulator-max-microvolt = <1800000>; @@ -83,7 +81,6 @@ }; mt6357_vrf12_reg: ldo-vrf12 { - compatible = "regulator-fixed"; regulator-name = "vrf12"; regulator-min-microvolt = <1200000>; regulator-max-microvolt = <1200000>; @@ -112,7 +109,6 @@ }; mt6357_vcn28_reg: ldo-vcn28 { - compatible = "regulator-fixed"; regulator-name = "vcn28"; regulator-min-microvolt = <2800000>; regulator-max-microvolt = <2800000>; @@ -120,7 +116,6 @@ }; mt6357_vcn18_reg: ldo-vcn18 { - compatible = "regulator-fixed"; regulator-name = "vcn18"; regulator-min-microvolt = <1800000>; regulator-max-microvolt = <1800000>; @@ -142,7 +137,6 @@ }; mt6357_vcamio_reg: ldo-vcamio18 { - compatible = "regulator-fixed"; regulator-name = "vcamio"; regulator-min-microvolt = <1800000>; regulator-max-microvolt = <1800000>; @@ -175,7 +169,6 @@ }; mt6357_vaux18_reg: ldo-vaux18 { - compatible = "regulator-fixed"; regulator-name = "vaux18"; regulator-min-microvolt = <1800000>; regulator-max-microvolt = <1800000>; @@ -183,7 +176,6 @@ }; mt6357_vaud28_reg: ldo-vaud28 { - compatible = "regulator-fixed"; regulator-name = "vaud28"; regulator-min-microvolt = <2800000>; regulator-max-microvolt = <2800000>; @@ -191,7 +183,6 @@ }; mt6357_vio28_reg: ldo-vio28 { - compatible = "regulator-fixed"; regulator-name = "vio28"; regulator-min-microvolt = <2800000>; regulator-max-microvolt = <2800000>; @@ -199,7 +190,6 @@ }; mt6357_vio18_reg: ldo-vio18 { - compatible = "regulator-fixed"; regulator-name = "vio18"; regulator-min-microvolt = <1800000>; regulator-max-microvolt = <1800000>; diff --git a/arch/arm64/boot/dts/mediatek/mt6359.dtsi b/arch/arm64/boot/dts/mediatek/mt6359.dtsi index 8e1b8c85c6ede..779d6dfb55c00 100644 --- a/arch/arm64/boot/dts/mediatek/mt6359.dtsi +++ b/arch/arm64/boot/dts/mediatek/mt6359.dtsi @@ -18,6 +18,8 @@ }; regulators { + compatible = "mediatek,mt6359-regulator"; + mt6359_vs1_buck_reg: buck_vs1 { regulator-name = "vs1"; regulator-min-microvolt = <800000>; @@ -296,7 +298,7 @@ }; }; - mt6359rtc: mt6359rtc { + mt6359rtc: rtc { compatible = "mediatek,mt6358-rtc"; }; }; diff --git a/arch/arm64/boot/dts/mediatek/mt8183-kukui.dtsi b/arch/arm64/boot/dts/mediatek/mt8183-kukui.dtsi index 22924f61ec9ed..c4fafd51b1225 100644 --- a/arch/arm64/boot/dts/mediatek/mt8183-kukui.dtsi +++ b/arch/arm64/boot/dts/mediatek/mt8183-kukui.dtsi @@ -280,14 +280,10 @@ }; }; }; +}; - ports { - port { - dsi_out: endpoint { - remote-endpoint = <&panel_in>; - }; - }; - }; +&dsi_out { + remote-endpoint = <&panel_in>; }; &gic { diff --git a/arch/arm64/boot/dts/mediatek/mt8183.dtsi b/arch/arm64/boot/dts/mediatek/mt8183.dtsi index 92c41463d10e3..65be2c2c26d40 100644 --- a/arch/arm64/boot/dts/mediatek/mt8183.dtsi +++ b/arch/arm64/boot/dts/mediatek/mt8183.dtsi @@ -1836,6 +1836,10 @@ phys = <&mipi_tx0>; phy-names = "dphy"; status = "disabled"; + + port { + dsi_out: endpoint { }; + }; }; dpi0: dpi@14015000 { diff --git a/arch/arm64/boot/dts/mediatek/mt8195.dtsi b/arch/arm64/boot/dts/mediatek/mt8195.dtsi index f013dbad9dc4e..2e138b54f5563 100644 --- a/arch/arm64/boot/dts/mediatek/mt8195.dtsi +++ b/arch/arm64/boot/dts/mediatek/mt8195.dtsi @@ -617,22 +617,6 @@ #size-cells = <0>; #power-domain-cells = <1>; - power-domain@MT8195_POWER_DOMAIN_VDEC1 { - reg = ; - clocks = <&vdecsys CLK_VDEC_LARB1>; - clock-names = "vdec1-0"; - mediatek,infracfg = <&infracfg_ao>; - #power-domain-cells = <0>; - }; - - power-domain@MT8195_POWER_DOMAIN_VENC_CORE1 { - reg = ; - clocks = <&vencsys_core1 CLK_VENC_CORE1_LARB>; - clock-names = "venc1-larb"; - mediatek,infracfg = <&infracfg_ao>; - #power-domain-cells = <0>; - }; - power-domain@MT8195_POWER_DOMAIN_VDOSYS0 { reg = ; clocks = <&topckgen CLK_TOP_CFG_VDO0>, @@ -678,15 +662,25 @@ clocks = <&vdecsys_soc CLK_VDEC_SOC_LARB1>; clock-names = "vdec0-0"; mediatek,infracfg = <&infracfg_ao>; + #address-cells = <1>; + #size-cells = <0>; #power-domain-cells = <0>; - }; - power-domain@MT8195_POWER_DOMAIN_VDEC2 { - reg = ; - clocks = <&vdecsys_core1 CLK_VDEC_CORE1_LARB1>; - clock-names = "vdec2-0"; - mediatek,infracfg = <&infracfg_ao>; - #power-domain-cells = <0>; + power-domain@MT8195_POWER_DOMAIN_VDEC1 { + reg = ; + clocks = <&vdecsys CLK_VDEC_LARB1>; + clock-names = "vdec1-0"; + mediatek,infracfg = <&infracfg_ao>; + #power-domain-cells = <0>; + }; + + power-domain@MT8195_POWER_DOMAIN_VDEC2 { + reg = ; + clocks = <&vdecsys_core1 CLK_VDEC_CORE1_LARB1>; + clock-names = "vdec2-0"; + mediatek,infracfg = <&infracfg_ao>; + #power-domain-cells = <0>; + }; }; power-domain@MT8195_POWER_DOMAIN_VENC { @@ -694,7 +688,17 @@ clocks = <&vencsys CLK_VENC_LARB>; clock-names = "venc0-larb"; mediatek,infracfg = <&infracfg_ao>; + #address-cells = <1>; + #size-cells = <0>; #power-domain-cells = <0>; + + power-domain@MT8195_POWER_DOMAIN_VENC_CORE1 { + reg = ; + clocks = <&vencsys_core1 CLK_VENC_CORE1_LARB>; + clock-names = "venc1-larb"; + mediatek,infracfg = <&infracfg_ao>; + #power-domain-cells = <0>; + }; }; power-domain@MT8195_POWER_DOMAIN_VDOSYS1 { diff --git a/arch/arm64/boot/dts/nvidia/tegra186.dtsi b/arch/arm64/boot/dts/nvidia/tegra186.dtsi index 2b3bb5d0af17b..f0b7949df92c0 100644 --- a/arch/arm64/boot/dts/nvidia/tegra186.dtsi +++ b/arch/arm64/boot/dts/nvidia/tegra186.dtsi @@ -621,9 +621,7 @@ reg-shift = <2>; interrupts = ; clocks = <&bpmp TEGRA186_CLK_UARTB>; - clock-names = "serial"; resets = <&bpmp TEGRA186_RESET_UARTB>; - reset-names = "serial"; status = "disabled"; }; @@ -633,9 +631,7 @@ reg-shift = <2>; interrupts = ; clocks = <&bpmp TEGRA186_CLK_UARTD>; - clock-names = "serial"; resets = <&bpmp TEGRA186_RESET_UARTD>; - reset-names = "serial"; status = "disabled"; }; @@ -645,9 +641,7 @@ reg-shift = <2>; interrupts = ; clocks = <&bpmp TEGRA186_CLK_UARTE>; - clock-names = "serial"; resets = <&bpmp TEGRA186_RESET_UARTE>; - reset-names = "serial"; status = "disabled"; }; @@ -657,9 +651,7 @@ reg-shift = <2>; interrupts = ; clocks = <&bpmp TEGRA186_CLK_UARTF>; - clock-names = "serial"; resets = <&bpmp TEGRA186_RESET_UARTF>; - reset-names = "serial"; status = "disabled"; }; @@ -1236,9 +1228,7 @@ reg-shift = <2>; interrupts = ; clocks = <&bpmp TEGRA186_CLK_UARTC>; - clock-names = "serial"; resets = <&bpmp TEGRA186_RESET_UARTC>; - reset-names = "serial"; status = "disabled"; }; @@ -1248,9 +1238,7 @@ reg-shift = <2>; interrupts = ; clocks = <&bpmp TEGRA186_CLK_UARTG>; - clock-names = "serial"; resets = <&bpmp TEGRA186_RESET_UARTG>; - reset-names = "serial"; status = "disabled"; }; diff --git a/arch/arm64/boot/dts/nvidia/tegra194.dtsi b/arch/arm64/boot/dts/nvidia/tegra194.dtsi index 33f92b77cd9d9..c369507747851 100644 --- a/arch/arm64/boot/dts/nvidia/tegra194.dtsi +++ b/arch/arm64/boot/dts/nvidia/tegra194.dtsi @@ -766,9 +766,7 @@ reg-shift = <2>; interrupts = ; clocks = <&bpmp TEGRA194_CLK_UARTD>; - clock-names = "serial"; resets = <&bpmp TEGRA194_RESET_UARTD>; - reset-names = "serial"; status = "disabled"; }; @@ -778,9 +776,7 @@ reg-shift = <2>; interrupts = ; clocks = <&bpmp TEGRA194_CLK_UARTE>; - clock-names = "serial"; resets = <&bpmp TEGRA194_RESET_UARTE>; - reset-names = "serial"; status = "disabled"; }; @@ -790,9 +786,7 @@ reg-shift = <2>; interrupts = ; clocks = <&bpmp TEGRA194_CLK_UARTF>; - clock-names = "serial"; resets = <&bpmp TEGRA194_RESET_UARTF>; - reset-names = "serial"; status = "disabled"; }; @@ -817,9 +811,7 @@ reg-shift = <2>; interrupts = ; clocks = <&bpmp TEGRA194_CLK_UARTH>; - clock-names = "serial"; resets = <&bpmp TEGRA194_RESET_UARTH>; - reset-names = "serial"; status = "disabled"; }; @@ -1616,9 +1608,7 @@ reg-shift = <2>; interrupts = ; clocks = <&bpmp TEGRA194_CLK_UARTC>; - clock-names = "serial"; resets = <&bpmp TEGRA194_RESET_UARTC>; - reset-names = "serial"; status = "disabled"; }; @@ -1628,9 +1618,7 @@ reg-shift = <2>; interrupts = ; clocks = <&bpmp TEGRA194_CLK_UARTG>; - clock-names = "serial"; resets = <&bpmp TEGRA194_RESET_UARTG>; - reset-names = "serial"; status = "disabled"; }; diff --git a/arch/arm64/boot/dts/nvidia/tegra210-p2180.dtsi b/arch/arm64/boot/dts/nvidia/tegra210-p2180.dtsi index 1c53ccc5e3cbf..9c1b2e7d3997f 100644 --- a/arch/arm64/boot/dts/nvidia/tegra210-p2180.dtsi +++ b/arch/arm64/boot/dts/nvidia/tegra210-p2180.dtsi @@ -11,6 +11,7 @@ rtc0 = "/i2c@7000d000/pmic@3c"; rtc1 = "/rtc@7000e000"; serial0 = &uarta; + serial3 = &uartd; }; chosen { diff --git a/arch/arm64/boot/dts/qcom/ipq9574-rdp-common.dtsi b/arch/arm64/boot/dts/qcom/ipq9574-rdp-common.dtsi index 91e104b0f8653..a5294a42c287a 100644 --- a/arch/arm64/boot/dts/qcom/ipq9574-rdp-common.dtsi +++ b/arch/arm64/boot/dts/qcom/ipq9574-rdp-common.dtsi @@ -111,6 +111,13 @@ regulator-always-on; regulator-boot-on; }; + + mp5496_l5: l5 { + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + regulator-always-on; + regulator-boot-on; + }; }; }; @@ -146,7 +153,7 @@ }; &usb_0_qmpphy { - vdda-pll-supply = <&mp5496_l2>; + vdda-pll-supply = <&mp5496_l5>; vdda-phy-supply = <®ulator_fixed_0p925>; status = "okay"; @@ -154,7 +161,7 @@ &usb_0_qusbphy { vdd-supply = <®ulator_fixed_0p925>; - vdda-pll-supply = <&mp5496_l2>; + vdda-pll-supply = <&mp5496_l5>; vdda-phy-dpdm-supply = <®ulator_fixed_3p3>; status = "okay"; diff --git a/arch/arm64/boot/dts/qcom/ipq9574.dtsi b/arch/arm64/boot/dts/qcom/ipq9574.dtsi index 08a82a5cf6675..81ccd0600c5ab 100644 --- a/arch/arm64/boot/dts/qcom/ipq9574.dtsi +++ b/arch/arm64/boot/dts/qcom/ipq9574.dtsi @@ -261,6 +261,8 @@ interrupts = ; #dma-cells = <1>; qcom,ee = <1>; + qcom,num-ees = <4>; + num-channels = <16>; qcom,controlled-remotely; }; diff --git a/arch/arm64/boot/dts/qcom/qcm2290.dtsi b/arch/arm64/boot/dts/qcom/qcm2290.dtsi index 79bc42ffb6a1f..2cfdf5bd5fd9b 100644 --- a/arch/arm64/boot/dts/qcom/qcm2290.dtsi +++ b/arch/arm64/boot/dts/qcom/qcm2290.dtsi @@ -1073,7 +1073,7 @@ interconnects = <&qup_virt MASTER_QUP_CORE_0 RPM_ALWAYS_TAG &qup_virt SLAVE_QUP_CORE_0 RPM_ALWAYS_TAG>, <&bimc MASTER_APPSS_PROC RPM_ALWAYS_TAG - &config_noc MASTER_APPSS_PROC RPM_ALWAYS_TAG>; + &config_noc SLAVE_QUP_0 RPM_ALWAYS_TAG>; interconnect-names = "qup-core", "qup-config"; #address-cells = <1>; @@ -1092,7 +1092,7 @@ interconnects = <&qup_virt MASTER_QUP_CORE_0 RPM_ALWAYS_TAG &qup_virt SLAVE_QUP_CORE_0 RPM_ALWAYS_TAG>, <&bimc MASTER_APPSS_PROC RPM_ALWAYS_TAG - &config_noc MASTER_APPSS_PROC RPM_ALWAYS_TAG>; + &config_noc SLAVE_QUP_0 RPM_ALWAYS_TAG>; interconnect-names = "qup-core", "qup-config"; status = "disabled"; @@ -1137,7 +1137,7 @@ interconnects = <&qup_virt MASTER_QUP_CORE_0 RPM_ALWAYS_TAG &qup_virt SLAVE_QUP_CORE_0 RPM_ALWAYS_TAG>, <&bimc MASTER_APPSS_PROC RPM_ALWAYS_TAG - &config_noc MASTER_APPSS_PROC RPM_ALWAYS_TAG>; + &config_noc SLAVE_QUP_0 RPM_ALWAYS_TAG>; interconnect-names = "qup-core", "qup-config"; #address-cells = <1>; @@ -1184,7 +1184,7 @@ interconnects = <&qup_virt MASTER_QUP_CORE_0 RPM_ALWAYS_TAG &qup_virt SLAVE_QUP_CORE_0 RPM_ALWAYS_TAG>, <&bimc MASTER_APPSS_PROC RPM_ALWAYS_TAG - &config_noc MASTER_APPSS_PROC RPM_ALWAYS_TAG>; + &config_noc SLAVE_QUP_0 RPM_ALWAYS_TAG>; interconnect-names = "qup-core", "qup-config"; #address-cells = <1>; @@ -1231,7 +1231,7 @@ interconnects = <&qup_virt MASTER_QUP_CORE_0 RPM_ALWAYS_TAG &qup_virt SLAVE_QUP_CORE_0 RPM_ALWAYS_TAG>, <&bimc MASTER_APPSS_PROC RPM_ALWAYS_TAG - &config_noc MASTER_APPSS_PROC RPM_ALWAYS_TAG>; + &config_noc SLAVE_QUP_0 RPM_ALWAYS_TAG>; interconnect-names = "qup-core", "qup-config"; #address-cells = <1>; @@ -1278,7 +1278,7 @@ interconnects = <&qup_virt MASTER_QUP_CORE_0 RPM_ALWAYS_TAG &qup_virt SLAVE_QUP_CORE_0 RPM_ALWAYS_TAG>, <&bimc MASTER_APPSS_PROC RPM_ALWAYS_TAG - &config_noc MASTER_APPSS_PROC RPM_ALWAYS_TAG>; + &config_noc SLAVE_QUP_0 RPM_ALWAYS_TAG>; interconnect-names = "qup-core", "qup-config"; #address-cells = <1>; @@ -1297,7 +1297,7 @@ interconnects = <&qup_virt MASTER_QUP_CORE_0 RPM_ALWAYS_TAG &qup_virt SLAVE_QUP_CORE_0 RPM_ALWAYS_TAG>, <&bimc MASTER_APPSS_PROC RPM_ALWAYS_TAG - &config_noc MASTER_APPSS_PROC RPM_ALWAYS_TAG>; + &config_noc SLAVE_QUP_0 RPM_ALWAYS_TAG>; interconnect-names = "qup-core", "qup-config"; status = "disabled"; @@ -1342,7 +1342,7 @@ interconnects = <&qup_virt MASTER_QUP_CORE_0 RPM_ALWAYS_TAG &qup_virt SLAVE_QUP_CORE_0 RPM_ALWAYS_TAG>, <&bimc MASTER_APPSS_PROC RPM_ALWAYS_TAG - &config_noc MASTER_APPSS_PROC RPM_ALWAYS_TAG>; + &config_noc SLAVE_QUP_0 RPM_ALWAYS_TAG>; interconnect-names = "qup-core", "qup-config"; #address-cells = <1>; diff --git a/arch/arm64/boot/dts/qcom/sa8775p.dtsi b/arch/arm64/boot/dts/qcom/sa8775p.dtsi index 8a21448c0fa84..b28fa598cebb3 100644 --- a/arch/arm64/boot/dts/qcom/sa8775p.dtsi +++ b/arch/arm64/boot/dts/qcom/sa8775p.dtsi @@ -4012,15 +4012,7 @@ compatible = "qcom,fastrpc-compute-cb"; reg = <1>; iommus = <&apps_smmu 0x2141 0x04a0>, - <&apps_smmu 0x2161 0x04a0>, - <&apps_smmu 0x2181 0x0400>, - <&apps_smmu 0x21c1 0x04a0>, - <&apps_smmu 0x21e1 0x04a0>, - <&apps_smmu 0x2541 0x04a0>, - <&apps_smmu 0x2561 0x04a0>, - <&apps_smmu 0x2581 0x0400>, - <&apps_smmu 0x25c1 0x04a0>, - <&apps_smmu 0x25e1 0x04a0>; + <&apps_smmu 0x2181 0x0400>; dma-coherent; }; @@ -4028,15 +4020,7 @@ compatible = "qcom,fastrpc-compute-cb"; reg = <2>; iommus = <&apps_smmu 0x2142 0x04a0>, - <&apps_smmu 0x2162 0x04a0>, - <&apps_smmu 0x2182 0x0400>, - <&apps_smmu 0x21c2 0x04a0>, - <&apps_smmu 0x21e2 0x04a0>, - <&apps_smmu 0x2542 0x04a0>, - <&apps_smmu 0x2562 0x04a0>, - <&apps_smmu 0x2582 0x0400>, - <&apps_smmu 0x25c2 0x04a0>, - <&apps_smmu 0x25e2 0x04a0>; + <&apps_smmu 0x2182 0x0400>; dma-coherent; }; @@ -4044,15 +4028,7 @@ compatible = "qcom,fastrpc-compute-cb"; reg = <3>; iommus = <&apps_smmu 0x2143 0x04a0>, - <&apps_smmu 0x2163 0x04a0>, - <&apps_smmu 0x2183 0x0400>, - <&apps_smmu 0x21c3 0x04a0>, - <&apps_smmu 0x21e3 0x04a0>, - <&apps_smmu 0x2543 0x04a0>, - <&apps_smmu 0x2563 0x04a0>, - <&apps_smmu 0x2583 0x0400>, - <&apps_smmu 0x25c3 0x04a0>, - <&apps_smmu 0x25e3 0x04a0>; + <&apps_smmu 0x2183 0x0400>; dma-coherent; }; @@ -4060,15 +4036,7 @@ compatible = "qcom,fastrpc-compute-cb"; reg = <4>; iommus = <&apps_smmu 0x2144 0x04a0>, - <&apps_smmu 0x2164 0x04a0>, - <&apps_smmu 0x2184 0x0400>, - <&apps_smmu 0x21c4 0x04a0>, - <&apps_smmu 0x21e4 0x04a0>, - <&apps_smmu 0x2544 0x04a0>, - <&apps_smmu 0x2564 0x04a0>, - <&apps_smmu 0x2584 0x0400>, - <&apps_smmu 0x25c4 0x04a0>, - <&apps_smmu 0x25e4 0x04a0>; + <&apps_smmu 0x2184 0x0400>; dma-coherent; }; @@ -4076,15 +4044,7 @@ compatible = "qcom,fastrpc-compute-cb"; reg = <5>; iommus = <&apps_smmu 0x2145 0x04a0>, - <&apps_smmu 0x2165 0x04a0>, - <&apps_smmu 0x2185 0x0400>, - <&apps_smmu 0x21c5 0x04a0>, - <&apps_smmu 0x21e5 0x04a0>, - <&apps_smmu 0x2545 0x04a0>, - <&apps_smmu 0x2565 0x04a0>, - <&apps_smmu 0x2585 0x0400>, - <&apps_smmu 0x25c5 0x04a0>, - <&apps_smmu 0x25e5 0x04a0>; + <&apps_smmu 0x2185 0x0400>; dma-coherent; }; @@ -4092,15 +4052,7 @@ compatible = "qcom,fastrpc-compute-cb"; reg = <6>; iommus = <&apps_smmu 0x2146 0x04a0>, - <&apps_smmu 0x2166 0x04a0>, - <&apps_smmu 0x2186 0x0400>, - <&apps_smmu 0x21c6 0x04a0>, - <&apps_smmu 0x21e6 0x04a0>, - <&apps_smmu 0x2546 0x04a0>, - <&apps_smmu 0x2566 0x04a0>, - <&apps_smmu 0x2586 0x0400>, - <&apps_smmu 0x25c6 0x04a0>, - <&apps_smmu 0x25e6 0x04a0>; + <&apps_smmu 0x2186 0x0400>; dma-coherent; }; @@ -4108,15 +4060,7 @@ compatible = "qcom,fastrpc-compute-cb"; reg = <7>; iommus = <&apps_smmu 0x2147 0x04a0>, - <&apps_smmu 0x2167 0x04a0>, - <&apps_smmu 0x2187 0x0400>, - <&apps_smmu 0x21c7 0x04a0>, - <&apps_smmu 0x21e7 0x04a0>, - <&apps_smmu 0x2547 0x04a0>, - <&apps_smmu 0x2567 0x04a0>, - <&apps_smmu 0x2587 0x0400>, - <&apps_smmu 0x25c7 0x04a0>, - <&apps_smmu 0x25e7 0x04a0>; + <&apps_smmu 0x2187 0x0400>; dma-coherent; }; @@ -4124,15 +4068,7 @@ compatible = "qcom,fastrpc-compute-cb"; reg = <8>; iommus = <&apps_smmu 0x2148 0x04a0>, - <&apps_smmu 0x2168 0x04a0>, - <&apps_smmu 0x2188 0x0400>, - <&apps_smmu 0x21c8 0x04a0>, - <&apps_smmu 0x21e8 0x04a0>, - <&apps_smmu 0x2548 0x04a0>, - <&apps_smmu 0x2568 0x04a0>, - <&apps_smmu 0x2588 0x0400>, - <&apps_smmu 0x25c8 0x04a0>, - <&apps_smmu 0x25e8 0x04a0>; + <&apps_smmu 0x2188 0x0400>; dma-coherent; }; @@ -4140,31 +4076,7 @@ compatible = "qcom,fastrpc-compute-cb"; reg = <9>; iommus = <&apps_smmu 0x2149 0x04a0>, - <&apps_smmu 0x2169 0x04a0>, - <&apps_smmu 0x2189 0x0400>, - <&apps_smmu 0x21c9 0x04a0>, - <&apps_smmu 0x21e9 0x04a0>, - <&apps_smmu 0x2549 0x04a0>, - <&apps_smmu 0x2569 0x04a0>, - <&apps_smmu 0x2589 0x0400>, - <&apps_smmu 0x25c9 0x04a0>, - <&apps_smmu 0x25e9 0x04a0>; - dma-coherent; - }; - - compute-cb@10 { - compatible = "qcom,fastrpc-compute-cb"; - reg = <10>; - iommus = <&apps_smmu 0x214a 0x04a0>, - <&apps_smmu 0x216a 0x04a0>, - <&apps_smmu 0x218a 0x0400>, - <&apps_smmu 0x21ca 0x04a0>, - <&apps_smmu 0x21ea 0x04a0>, - <&apps_smmu 0x254a 0x04a0>, - <&apps_smmu 0x256a 0x04a0>, - <&apps_smmu 0x258a 0x0400>, - <&apps_smmu 0x25ca 0x04a0>, - <&apps_smmu 0x25ea 0x04a0>; + <&apps_smmu 0x2189 0x0400>; dma-coherent; }; @@ -4172,15 +4084,7 @@ compatible = "qcom,fastrpc-compute-cb"; reg = <11>; iommus = <&apps_smmu 0x214b 0x04a0>, - <&apps_smmu 0x216b 0x04a0>, - <&apps_smmu 0x218b 0x0400>, - <&apps_smmu 0x21cb 0x04a0>, - <&apps_smmu 0x21eb 0x04a0>, - <&apps_smmu 0x254b 0x04a0>, - <&apps_smmu 0x256b 0x04a0>, - <&apps_smmu 0x258b 0x0400>, - <&apps_smmu 0x25cb 0x04a0>, - <&apps_smmu 0x25eb 0x04a0>; + <&apps_smmu 0x218b 0x0400>; dma-coherent; }; }; @@ -4240,15 +4144,7 @@ compatible = "qcom,fastrpc-compute-cb"; reg = <1>; iommus = <&apps_smmu 0x2941 0x04a0>, - <&apps_smmu 0x2961 0x04a0>, - <&apps_smmu 0x2981 0x0400>, - <&apps_smmu 0x29c1 0x04a0>, - <&apps_smmu 0x29e1 0x04a0>, - <&apps_smmu 0x2d41 0x04a0>, - <&apps_smmu 0x2d61 0x04a0>, - <&apps_smmu 0x2d81 0x0400>, - <&apps_smmu 0x2dc1 0x04a0>, - <&apps_smmu 0x2de1 0x04a0>; + <&apps_smmu 0x2981 0x0400>; dma-coherent; }; @@ -4256,15 +4152,7 @@ compatible = "qcom,fastrpc-compute-cb"; reg = <2>; iommus = <&apps_smmu 0x2942 0x04a0>, - <&apps_smmu 0x2962 0x04a0>, - <&apps_smmu 0x2982 0x0400>, - <&apps_smmu 0x29c2 0x04a0>, - <&apps_smmu 0x29e2 0x04a0>, - <&apps_smmu 0x2d42 0x04a0>, - <&apps_smmu 0x2d62 0x04a0>, - <&apps_smmu 0x2d82 0x0400>, - <&apps_smmu 0x2dc2 0x04a0>, - <&apps_smmu 0x2de2 0x04a0>; + <&apps_smmu 0x2982 0x0400>; dma-coherent; }; @@ -4272,15 +4160,7 @@ compatible = "qcom,fastrpc-compute-cb"; reg = <3>; iommus = <&apps_smmu 0x2943 0x04a0>, - <&apps_smmu 0x2963 0x04a0>, - <&apps_smmu 0x2983 0x0400>, - <&apps_smmu 0x29c3 0x04a0>, - <&apps_smmu 0x29e3 0x04a0>, - <&apps_smmu 0x2d43 0x04a0>, - <&apps_smmu 0x2d63 0x04a0>, - <&apps_smmu 0x2d83 0x0400>, - <&apps_smmu 0x2dc3 0x04a0>, - <&apps_smmu 0x2de3 0x04a0>; + <&apps_smmu 0x2983 0x0400>; dma-coherent; }; @@ -4288,15 +4168,7 @@ compatible = "qcom,fastrpc-compute-cb"; reg = <4>; iommus = <&apps_smmu 0x2944 0x04a0>, - <&apps_smmu 0x2964 0x04a0>, - <&apps_smmu 0x2984 0x0400>, - <&apps_smmu 0x29c4 0x04a0>, - <&apps_smmu 0x29e4 0x04a0>, - <&apps_smmu 0x2d44 0x04a0>, - <&apps_smmu 0x2d64 0x04a0>, - <&apps_smmu 0x2d84 0x0400>, - <&apps_smmu 0x2dc4 0x04a0>, - <&apps_smmu 0x2de4 0x04a0>; + <&apps_smmu 0x2984 0x0400>; dma-coherent; }; @@ -4304,15 +4176,7 @@ compatible = "qcom,fastrpc-compute-cb"; reg = <5>; iommus = <&apps_smmu 0x2945 0x04a0>, - <&apps_smmu 0x2965 0x04a0>, - <&apps_smmu 0x2985 0x0400>, - <&apps_smmu 0x29c5 0x04a0>, - <&apps_smmu 0x29e5 0x04a0>, - <&apps_smmu 0x2d45 0x04a0>, - <&apps_smmu 0x2d65 0x04a0>, - <&apps_smmu 0x2d85 0x0400>, - <&apps_smmu 0x2dc5 0x04a0>, - <&apps_smmu 0x2de5 0x04a0>; + <&apps_smmu 0x2985 0x0400>; dma-coherent; }; @@ -4320,15 +4184,7 @@ compatible = "qcom,fastrpc-compute-cb"; reg = <6>; iommus = <&apps_smmu 0x2946 0x04a0>, - <&apps_smmu 0x2966 0x04a0>, - <&apps_smmu 0x2986 0x0400>, - <&apps_smmu 0x29c6 0x04a0>, - <&apps_smmu 0x29e6 0x04a0>, - <&apps_smmu 0x2d46 0x04a0>, - <&apps_smmu 0x2d66 0x04a0>, - <&apps_smmu 0x2d86 0x0400>, - <&apps_smmu 0x2dc6 0x04a0>, - <&apps_smmu 0x2de6 0x04a0>; + <&apps_smmu 0x2986 0x0400>; dma-coherent; }; @@ -4336,15 +4192,7 @@ compatible = "qcom,fastrpc-compute-cb"; reg = <7>; iommus = <&apps_smmu 0x2947 0x04a0>, - <&apps_smmu 0x2967 0x04a0>, - <&apps_smmu 0x2987 0x0400>, - <&apps_smmu 0x29c7 0x04a0>, - <&apps_smmu 0x29e7 0x04a0>, - <&apps_smmu 0x2d47 0x04a0>, - <&apps_smmu 0x2d67 0x04a0>, - <&apps_smmu 0x2d87 0x0400>, - <&apps_smmu 0x2dc7 0x04a0>, - <&apps_smmu 0x2de7 0x04a0>; + <&apps_smmu 0x2987 0x0400>; dma-coherent; }; @@ -4352,15 +4200,7 @@ compatible = "qcom,fastrpc-compute-cb"; reg = <8>; iommus = <&apps_smmu 0x2948 0x04a0>, - <&apps_smmu 0x2968 0x04a0>, - <&apps_smmu 0x2988 0x0400>, - <&apps_smmu 0x29c8 0x04a0>, - <&apps_smmu 0x29e8 0x04a0>, - <&apps_smmu 0x2d48 0x04a0>, - <&apps_smmu 0x2d68 0x04a0>, - <&apps_smmu 0x2d88 0x0400>, - <&apps_smmu 0x2dc8 0x04a0>, - <&apps_smmu 0x2de8 0x04a0>; + <&apps_smmu 0x2988 0x0400>; dma-coherent; }; @@ -4368,15 +4208,7 @@ compatible = "qcom,fastrpc-compute-cb"; reg = <9>; iommus = <&apps_smmu 0x2949 0x04a0>, - <&apps_smmu 0x2969 0x04a0>, - <&apps_smmu 0x2989 0x0400>, - <&apps_smmu 0x29c9 0x04a0>, - <&apps_smmu 0x29e9 0x04a0>, - <&apps_smmu 0x2d49 0x04a0>, - <&apps_smmu 0x2d69 0x04a0>, - <&apps_smmu 0x2d89 0x0400>, - <&apps_smmu 0x2dc9 0x04a0>, - <&apps_smmu 0x2de9 0x04a0>; + <&apps_smmu 0x2989 0x0400>; dma-coherent; }; @@ -4384,15 +4216,7 @@ compatible = "qcom,fastrpc-compute-cb"; reg = <10>; iommus = <&apps_smmu 0x294a 0x04a0>, - <&apps_smmu 0x296a 0x04a0>, - <&apps_smmu 0x298a 0x0400>, - <&apps_smmu 0x29ca 0x04a0>, - <&apps_smmu 0x29ea 0x04a0>, - <&apps_smmu 0x2d4a 0x04a0>, - <&apps_smmu 0x2d6a 0x04a0>, - <&apps_smmu 0x2d8a 0x0400>, - <&apps_smmu 0x2dca 0x04a0>, - <&apps_smmu 0x2dea 0x04a0>; + <&apps_smmu 0x298a 0x0400>; dma-coherent; }; @@ -4400,15 +4224,7 @@ compatible = "qcom,fastrpc-compute-cb"; reg = <11>; iommus = <&apps_smmu 0x294b 0x04a0>, - <&apps_smmu 0x296b 0x04a0>, - <&apps_smmu 0x298b 0x0400>, - <&apps_smmu 0x29cb 0x04a0>, - <&apps_smmu 0x29eb 0x04a0>, - <&apps_smmu 0x2d4b 0x04a0>, - <&apps_smmu 0x2d6b 0x04a0>, - <&apps_smmu 0x2d8b 0x0400>, - <&apps_smmu 0x2dcb 0x04a0>, - <&apps_smmu 0x2deb 0x04a0>; + <&apps_smmu 0x298b 0x0400>; dma-coherent; }; @@ -4416,15 +4232,7 @@ compatible = "qcom,fastrpc-compute-cb"; reg = <12>; iommus = <&apps_smmu 0x294c 0x04a0>, - <&apps_smmu 0x296c 0x04a0>, - <&apps_smmu 0x298c 0x0400>, - <&apps_smmu 0x29cc 0x04a0>, - <&apps_smmu 0x29ec 0x04a0>, - <&apps_smmu 0x2d4c 0x04a0>, - <&apps_smmu 0x2d6c 0x04a0>, - <&apps_smmu 0x2d8c 0x0400>, - <&apps_smmu 0x2dcc 0x04a0>, - <&apps_smmu 0x2dec 0x04a0>; + <&apps_smmu 0x298c 0x0400>; dma-coherent; }; @@ -4432,15 +4240,7 @@ compatible = "qcom,fastrpc-compute-cb"; reg = <13>; iommus = <&apps_smmu 0x294d 0x04a0>, - <&apps_smmu 0x296d 0x04a0>, - <&apps_smmu 0x298d 0x0400>, - <&apps_smmu 0x29Cd 0x04a0>, - <&apps_smmu 0x29ed 0x04a0>, - <&apps_smmu 0x2d4d 0x04a0>, - <&apps_smmu 0x2d6d 0x04a0>, - <&apps_smmu 0x2d8d 0x0400>, - <&apps_smmu 0x2dcd 0x04a0>, - <&apps_smmu 0x2ded 0x04a0>; + <&apps_smmu 0x298d 0x0400>; dma-coherent; }; }; diff --git a/arch/arm64/boot/dts/qcom/sc8280xp-lenovo-thinkpad-x13s.dts b/arch/arm64/boot/dts/qcom/sc8280xp-lenovo-thinkpad-x13s.dts index 6a28cab971891..8e5951da5920d 100644 --- a/arch/arm64/boot/dts/qcom/sc8280xp-lenovo-thinkpad-x13s.dts +++ b/arch/arm64/boot/dts/qcom/sc8280xp-lenovo-thinkpad-x13s.dts @@ -1131,9 +1131,6 @@ "VA DMIC0", "MIC BIAS1", "VA DMIC1", "MIC BIAS1", "VA DMIC2", "MIC BIAS3", - "VA DMIC0", "VA MIC BIAS1", - "VA DMIC1", "VA MIC BIAS1", - "VA DMIC2", "VA MIC BIAS3", "TX SWR_ADC1", "ADC2_OUTPUT"; wcd-playback-dai-link { diff --git a/arch/arm64/boot/dts/qcom/sda660-inforce-ifc6560.dts b/arch/arm64/boot/dts/qcom/sda660-inforce-ifc6560.dts index 962c8aa400440..dc604be4afc63 100644 --- a/arch/arm64/boot/dts/qcom/sda660-inforce-ifc6560.dts +++ b/arch/arm64/boot/dts/qcom/sda660-inforce-ifc6560.dts @@ -167,6 +167,7 @@ * BAM DMA interconnects support is in place. */ /delete-property/ clocks; + /delete-property/ clock-names; }; &blsp1_uart2 { @@ -179,6 +180,7 @@ * BAM DMA interconnects support is in place. */ /delete-property/ clocks; + /delete-property/ clock-names; }; &blsp2_uart1 { diff --git a/arch/arm64/boot/dts/qcom/sdm660-xiaomi-lavender.dts b/arch/arm64/boot/dts/qcom/sdm660-xiaomi-lavender.dts index 7167f75bced3f..a9926ad6c6f9f 100644 --- a/arch/arm64/boot/dts/qcom/sdm660-xiaomi-lavender.dts +++ b/arch/arm64/boot/dts/qcom/sdm660-xiaomi-lavender.dts @@ -107,6 +107,7 @@ status = "okay"; vdd-supply = <&vreg_l1b_0p925>; + vdda-pll-supply = <&vreg_l10a_1p8>; vdda-phy-dpdm-supply = <&vreg_l7b_3p125>; }; @@ -404,6 +405,8 @@ &sdhc_2 { status = "okay"; + cd-gpios = <&tlmm 54 GPIO_ACTIVE_HIGH>; + vmmc-supply = <&vreg_l5b_2p95>; vqmmc-supply = <&vreg_l2b_2p95>; }; diff --git a/arch/arm64/boot/dts/qcom/sdm845-samsung-starqltechn.dts b/arch/arm64/boot/dts/qcom/sdm845-samsung-starqltechn.dts index d37a433130b98..5948b401165ce 100644 --- a/arch/arm64/boot/dts/qcom/sdm845-samsung-starqltechn.dts +++ b/arch/arm64/boot/dts/qcom/sdm845-samsung-starqltechn.dts @@ -135,8 +135,6 @@ vdda_sp_sensor: vdda_ufs1_core: vdda_ufs2_core: - vdda_usb1_ss_core: - vdda_usb2_ss_core: vreg_l1a_0p875: ldo1 { regulator-min-microvolt = <880000>; regulator-max-microvolt = <880000>; @@ -157,6 +155,7 @@ regulator-initial-mode = ; }; + vdda_usb1_ss_core: vdd_wcss_cx: vdd_wcss_mx: vdda_wcss_pll: @@ -383,8 +382,8 @@ }; &sdhc_2 { - pinctrl-names = "default"; pinctrl-0 = <&sdc2_clk_state &sdc2_cmd_state &sdc2_data_state &sd_card_det_n_state>; + pinctrl-names = "default"; cd-gpios = <&tlmm 126 GPIO_ACTIVE_LOW>; vmmc-supply = <&vreg_l21a_2p95>; vqmmc-supply = <&vddpx_2>; @@ -418,16 +417,9 @@ status = "okay"; }; -&wifi { - vdd-0.8-cx-mx-supply = <&vreg_l5a_0p8>; - vdd-1.8-xo-supply = <&vreg_l7a_1p8>; - vdd-1.3-rfa-supply = <&vreg_l17a_1p3>; - vdd-3.3-ch0-supply = <&vreg_l25a_3p3>; - status = "okay"; -}; - &tlmm { - gpio-reserved-ranges = <0 4>, <27 4>, <81 4>, <85 4>; + gpio-reserved-ranges = <27 4>, /* SPI (eSE - embedded Secure Element) */ + <85 4>; /* SPI (fingerprint reader) */ sdc2_clk_state: sdc2-clk-state { pins = "sdc2_clk"; diff --git a/arch/arm64/boot/dts/qcom/sm8250.dtsi b/arch/arm64/boot/dts/qcom/sm8250.dtsi index faa36d17b9f2c..e17937f76806c 100644 --- a/arch/arm64/boot/dts/qcom/sm8250.dtsi +++ b/arch/arm64/boot/dts/qcom/sm8250.dtsi @@ -606,7 +606,7 @@ }; cpu7_opp9: opp-1747200000 { - opp-hz = /bits/ 64 <1708800000>; + opp-hz = /bits/ 64 <1747200000>; opp-peak-kBps = <5412000 42393600>; }; diff --git a/arch/arm64/boot/dts/qcom/sm8350.dtsi b/arch/arm64/boot/dts/qcom/sm8350.dtsi index 46adf10e5fe4d..0be8f2befec7c 100644 --- a/arch/arm64/boot/dts/qcom/sm8350.dtsi +++ b/arch/arm64/boot/dts/qcom/sm8350.dtsi @@ -455,7 +455,7 @@ no-map; }; - pil_camera_mem: mmeory@85200000 { + pil_camera_mem: memory@85200000 { reg = <0x0 0x85200000 0x0 0x500000>; no-map; }; @@ -1806,11 +1806,11 @@ interrupts = ; #dma-cells = <1>; qcom,ee = <0>; + qcom,num-ees = <4>; + num-channels = <16>; qcom,controlled-remotely; iommus = <&apps_smmu 0x594 0x0011>, <&apps_smmu 0x596 0x0011>; - /* FIXME: Probing BAM DMA causes some abort and system hang */ - status = "fail"; }; crypto: crypto@1dfa000 { @@ -1822,8 +1822,6 @@ <&apps_smmu 0x596 0x0011>; interconnects = <&aggre2_noc MASTER_CRYPTO 0 &mc_virt SLAVE_EBI1 0>; interconnect-names = "memory"; - /* FIXME: dependency BAM DMA is disabled */ - status = "disabled"; }; ipa: ipa@1e40000 { diff --git a/arch/arm64/boot/dts/qcom/sm8450.dtsi b/arch/arm64/boot/dts/qcom/sm8450.dtsi index d664a88a018ef..58ed68f534e50 100644 --- a/arch/arm64/boot/dts/qcom/sm8450.dtsi +++ b/arch/arm64/boot/dts/qcom/sm8450.dtsi @@ -4553,6 +4553,8 @@ interrupts = ; #dma-cells = <1>; qcom,ee = <0>; + qcom,num-ees = <4>; + num-channels = <16>; qcom,controlled-remotely; iommus = <&apps_smmu 0x584 0x11>, <&apps_smmu 0x588 0x0>, diff --git a/arch/arm64/boot/dts/qcom/sm8550.dtsi b/arch/arm64/boot/dts/qcom/sm8550.dtsi index 9ecf4a7fc3287..cfdd30009015f 100644 --- a/arch/arm64/boot/dts/qcom/sm8550.dtsi +++ b/arch/arm64/boot/dts/qcom/sm8550.dtsi @@ -1952,6 +1952,8 @@ interrupts = ; #dma-cells = <1>; qcom,ee = <0>; + qcom,num-ees = <4>; + num-channels = <20>; qcom,controlled-remotely; iommus = <&apps_smmu 0x480 0x0>, <&apps_smmu 0x481 0x0>; diff --git a/arch/arm64/boot/dts/qcom/sm8650.dtsi b/arch/arm64/boot/dts/qcom/sm8650.dtsi index 416cfb71878a5..bd91624bd3bfc 100644 --- a/arch/arm64/boot/dts/qcom/sm8650.dtsi +++ b/arch/arm64/boot/dts/qcom/sm8650.dtsi @@ -68,18 +68,18 @@ #address-cells = <2>; #size-cells = <0>; - CPU0: cpu@0 { + cpu0: cpu@0 { device_type = "cpu"; compatible = "arm,cortex-a520"; reg = <0 0>; clocks = <&cpufreq_hw 0>; - power-domains = <&CPU_PD0>; + power-domains = <&cpu_pd0>; power-domain-names = "psci"; enable-method = "psci"; - next-level-cache = <&L2_0>; + next-level-cache = <&l2_0>; capacity-dmips-mhz = <1024>; dynamic-power-coefficient = <100>; @@ -87,13 +87,13 @@ #cooling-cells = <2>; - L2_0: l2-cache { + l2_0: l2-cache { compatible = "cache"; cache-level = <2>; cache-unified; - next-level-cache = <&L3_0>; + next-level-cache = <&l3_0>; - L3_0: l3-cache { + l3_0: l3-cache { compatible = "cache"; cache-level = <3>; cache-unified; @@ -101,18 +101,18 @@ }; }; - CPU1: cpu@100 { + cpu1: cpu@100 { device_type = "cpu"; compatible = "arm,cortex-a520"; reg = <0 0x100>; clocks = <&cpufreq_hw 0>; - power-domains = <&CPU_PD1>; + power-domains = <&cpu_pd1>; power-domain-names = "psci"; enable-method = "psci"; - next-level-cache = <&L2_0>; + next-level-cache = <&l2_0>; capacity-dmips-mhz = <1024>; dynamic-power-coefficient = <100>; @@ -121,18 +121,18 @@ #cooling-cells = <2>; }; - CPU2: cpu@200 { + cpu2: cpu@200 { device_type = "cpu"; compatible = "arm,cortex-a720"; reg = <0 0x200>; clocks = <&cpufreq_hw 3>; - power-domains = <&CPU_PD2>; + power-domains = <&cpu_pd2>; power-domain-names = "psci"; enable-method = "psci"; - next-level-cache = <&L2_200>; + next-level-cache = <&l2_200>; capacity-dmips-mhz = <1792>; dynamic-power-coefficient = <238>; @@ -140,46 +140,53 @@ #cooling-cells = <2>; - L2_200: l2-cache { + l2_200: l2-cache { compatible = "cache"; cache-level = <2>; cache-unified; - next-level-cache = <&L3_0>; + next-level-cache = <&l3_0>; }; }; - CPU3: cpu@300 { + cpu3: cpu@300 { device_type = "cpu"; compatible = "arm,cortex-a720"; reg = <0 0x300>; clocks = <&cpufreq_hw 3>; - power-domains = <&CPU_PD3>; + power-domains = <&cpu_pd3>; power-domain-names = "psci"; enable-method = "psci"; - next-level-cache = <&L2_200>; + next-level-cache = <&l2_300>; capacity-dmips-mhz = <1792>; dynamic-power-coefficient = <238>; qcom,freq-domain = <&cpufreq_hw 3>; #cooling-cells = <2>; + + l2_300: l2-cache { + compatible = "cache"; + cache-level = <2>; + cache-unified; + next-level-cache = <&l3_0>; + }; }; - CPU4: cpu@400 { + cpu4: cpu@400 { device_type = "cpu"; compatible = "arm,cortex-a720"; reg = <0 0x400>; clocks = <&cpufreq_hw 3>; - power-domains = <&CPU_PD4>; + power-domains = <&cpu_pd4>; power-domain-names = "psci"; enable-method = "psci"; - next-level-cache = <&L2_400>; + next-level-cache = <&l2_400>; capacity-dmips-mhz = <1792>; dynamic-power-coefficient = <238>; @@ -187,26 +194,26 @@ #cooling-cells = <2>; - L2_400: l2-cache { + l2_400: l2-cache { compatible = "cache"; cache-level = <2>; cache-unified; - next-level-cache = <&L3_0>; + next-level-cache = <&l3_0>; }; }; - CPU5: cpu@500 { + cpu5: cpu@500 { device_type = "cpu"; compatible = "arm,cortex-a720"; reg = <0 0x500>; clocks = <&cpufreq_hw 1>; - power-domains = <&CPU_PD5>; + power-domains = <&cpu_pd5>; power-domain-names = "psci"; enable-method = "psci"; - next-level-cache = <&L2_500>; + next-level-cache = <&l2_500>; capacity-dmips-mhz = <1792>; dynamic-power-coefficient = <238>; @@ -214,26 +221,26 @@ #cooling-cells = <2>; - L2_500: l2-cache { + l2_500: l2-cache { compatible = "cache"; cache-level = <2>; cache-unified; - next-level-cache = <&L3_0>; + next-level-cache = <&l3_0>; }; }; - CPU6: cpu@600 { + cpu6: cpu@600 { device_type = "cpu"; compatible = "arm,cortex-a720"; reg = <0 0x600>; clocks = <&cpufreq_hw 1>; - power-domains = <&CPU_PD6>; + power-domains = <&cpu_pd6>; power-domain-names = "psci"; enable-method = "psci"; - next-level-cache = <&L2_600>; + next-level-cache = <&l2_600>; capacity-dmips-mhz = <1792>; dynamic-power-coefficient = <238>; @@ -241,26 +248,26 @@ #cooling-cells = <2>; - L2_600: l2-cache { + l2_600: l2-cache { compatible = "cache"; cache-level = <2>; cache-unified; - next-level-cache = <&L3_0>; + next-level-cache = <&l3_0>; }; }; - CPU7: cpu@700 { + cpu7: cpu@700 { device_type = "cpu"; compatible = "arm,cortex-x4"; reg = <0 0x700>; clocks = <&cpufreq_hw 2>; - power-domains = <&CPU_PD7>; + power-domains = <&cpu_pd7>; power-domain-names = "psci"; enable-method = "psci"; - next-level-cache = <&L2_700>; + next-level-cache = <&l2_700>; capacity-dmips-mhz = <1894>; dynamic-power-coefficient = <588>; @@ -268,46 +275,46 @@ #cooling-cells = <2>; - L2_700: l2-cache { + l2_700: l2-cache { compatible = "cache"; cache-level = <2>; cache-unified; - next-level-cache = <&L3_0>; + next-level-cache = <&l3_0>; }; }; cpu-map { cluster0 { core0 { - cpu = <&CPU0>; + cpu = <&cpu0>; }; core1 { - cpu = <&CPU1>; + cpu = <&cpu1>; }; core2 { - cpu = <&CPU2>; + cpu = <&cpu2>; }; core3 { - cpu = <&CPU3>; + cpu = <&cpu3>; }; core4 { - cpu = <&CPU4>; + cpu = <&cpu4>; }; core5 { - cpu = <&CPU5>; + cpu = <&cpu5>; }; core6 { - cpu = <&CPU6>; + cpu = <&cpu6>; }; core7 { - cpu = <&CPU7>; + cpu = <&cpu7>; }; }; }; @@ -315,7 +322,7 @@ idle-states { entry-method = "psci"; - SILVER_CPU_SLEEP_0: cpu-sleep-0-0 { + silver_cpu_sleep_0: cpu-sleep-0-0 { compatible = "arm,idle-state"; idle-state-name = "silver-rail-power-collapse"; arm,psci-suspend-param = <0x40000004>; @@ -325,7 +332,7 @@ local-timer-stop; }; - GOLD_CPU_SLEEP_0: cpu-sleep-1-0 { + gold_cpu_sleep_0: cpu-sleep-1-0 { compatible = "arm,idle-state"; idle-state-name = "gold-rail-power-collapse"; arm,psci-suspend-param = <0x40000004>; @@ -335,7 +342,7 @@ local-timer-stop; }; - GOLD_PLUS_CPU_SLEEP_0: cpu-sleep-2-0 { + gold_plus_cpu_sleep_0: cpu-sleep-2-0 { compatible = "arm,idle-state"; idle-state-name = "gold-plus-rail-power-collapse"; arm,psci-suspend-param = <0x40000004>; @@ -347,7 +354,7 @@ }; domain-idle-states { - CLUSTER_SLEEP_0: cluster-sleep-0 { + cluster_sleep_0: cluster-sleep-0 { compatible = "domain-idle-state"; arm,psci-suspend-param = <0x41000044>; entry-latency-us = <750>; @@ -355,7 +362,7 @@ min-residency-us = <9144>; }; - CLUSTER_SLEEP_1: cluster-sleep-1 { + cluster_sleep_1: cluster-sleep-1 { compatible = "domain-idle-state"; arm,psci-suspend-param = <0x4100c344>; entry-latency-us = <2800>; @@ -411,58 +418,58 @@ compatible = "arm,psci-1.0"; method = "smc"; - CPU_PD0: power-domain-cpu0 { + cpu_pd0: power-domain-cpu0 { #power-domain-cells = <0>; - power-domains = <&CLUSTER_PD>; - domain-idle-states = <&SILVER_CPU_SLEEP_0>; + power-domains = <&cluster_pd>; + domain-idle-states = <&silver_cpu_sleep_0>; }; - CPU_PD1: power-domain-cpu1 { + cpu_pd1: power-domain-cpu1 { #power-domain-cells = <0>; - power-domains = <&CLUSTER_PD>; - domain-idle-states = <&SILVER_CPU_SLEEP_0>; + power-domains = <&cluster_pd>; + domain-idle-states = <&silver_cpu_sleep_0>; }; - CPU_PD2: power-domain-cpu2 { + cpu_pd2: power-domain-cpu2 { #power-domain-cells = <0>; - power-domains = <&CLUSTER_PD>; - domain-idle-states = <&SILVER_CPU_SLEEP_0>; + power-domains = <&cluster_pd>; + domain-idle-states = <&gold_cpu_sleep_0>; }; - CPU_PD3: power-domain-cpu3 { + cpu_pd3: power-domain-cpu3 { #power-domain-cells = <0>; - power-domains = <&CLUSTER_PD>; - domain-idle-states = <&GOLD_CPU_SLEEP_0>; + power-domains = <&cluster_pd>; + domain-idle-states = <&gold_cpu_sleep_0>; }; - CPU_PD4: power-domain-cpu4 { + cpu_pd4: power-domain-cpu4 { #power-domain-cells = <0>; - power-domains = <&CLUSTER_PD>; - domain-idle-states = <&GOLD_CPU_SLEEP_0>; + power-domains = <&cluster_pd>; + domain-idle-states = <&gold_cpu_sleep_0>; }; - CPU_PD5: power-domain-cpu5 { + cpu_pd5: power-domain-cpu5 { #power-domain-cells = <0>; - power-domains = <&CLUSTER_PD>; - domain-idle-states = <&GOLD_CPU_SLEEP_0>; + power-domains = <&cluster_pd>; + domain-idle-states = <&gold_cpu_sleep_0>; }; - CPU_PD6: power-domain-cpu6 { + cpu_pd6: power-domain-cpu6 { #power-domain-cells = <0>; - power-domains = <&CLUSTER_PD>; - domain-idle-states = <&GOLD_CPU_SLEEP_0>; + power-domains = <&cluster_pd>; + domain-idle-states = <&gold_cpu_sleep_0>; }; - CPU_PD7: power-domain-cpu7 { + cpu_pd7: power-domain-cpu7 { #power-domain-cells = <0>; - power-domains = <&CLUSTER_PD>; - domain-idle-states = <&GOLD_PLUS_CPU_SLEEP_0>; + power-domains = <&cluster_pd>; + domain-idle-states = <&gold_plus_cpu_sleep_0>; }; - CLUSTER_PD: power-domain-cluster { + cluster_pd: power-domain-cluster { #power-domain-cells = <0>; - domain-idle-states = <&CLUSTER_SLEEP_0>, - <&CLUSTER_SLEEP_1>; + domain-idle-states = <&cluster_sleep_0>, + <&cluster_sleep_1>; }; }; @@ -2495,6 +2502,8 @@ <&apps_smmu 0x481 0>; qcom,ee = <0>; + qcom,num-ees = <4>; + num-channels = <20>; qcom,controlled-remotely; }; @@ -3603,8 +3612,11 @@ resets = <&dispcc DISP_CC_MDSS_CORE_BCR>; interconnects = <&mmss_noc MASTER_MDP QCOM_ICC_TAG_ALWAYS - &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>; - interconnect-names = "mdp0-mem"; + &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>, + <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY + &config_noc SLAVE_DISPLAY_CFG QCOM_ICC_TAG_ACTIVE_ONLY>; + interconnect-names = "mdp0-mem", + "cpu-cfg"; power-domains = <&dispcc MDSS_GDSC>; @@ -5228,7 +5240,7 @@ , ; - power-domains = <&CLUSTER_PD>; + power-domains = <&cluster_pd>; qcom,tcs-offset = <0xd00>; qcom,drv-id = <2>; @@ -6352,20 +6364,20 @@ trips { gpu0_alert0: trip-point0 { - temperature = <85000>; + temperature = <95000>; hysteresis = <1000>; type = "passive"; }; trip-point1 { - temperature = <90000>; + temperature = <110000>; hysteresis = <1000>; type = "hot"; }; trip-point2 { - temperature = <110000>; - hysteresis = <1000>; + temperature = <115000>; + hysteresis = <0>; type = "critical"; }; }; @@ -6385,20 +6397,20 @@ trips { gpu1_alert0: trip-point0 { - temperature = <85000>; + temperature = <95000>; hysteresis = <1000>; type = "passive"; }; trip-point1 { - temperature = <90000>; + temperature = <110000>; hysteresis = <1000>; type = "hot"; }; trip-point2 { - temperature = <110000>; - hysteresis = <1000>; + temperature = <115000>; + hysteresis = <0>; type = "critical"; }; }; @@ -6418,20 +6430,20 @@ trips { gpu2_alert0: trip-point0 { - temperature = <85000>; + temperature = <95000>; hysteresis = <1000>; type = "passive"; }; trip-point1 { - temperature = <90000>; + temperature = <110000>; hysteresis = <1000>; type = "hot"; }; trip-point2 { - temperature = <110000>; - hysteresis = <1000>; + temperature = <115000>; + hysteresis = <0>; type = "critical"; }; }; @@ -6451,20 +6463,20 @@ trips { gpu3_alert0: trip-point0 { - temperature = <85000>; + temperature = <95000>; hysteresis = <1000>; type = "passive"; }; trip-point1 { - temperature = <90000>; + temperature = <110000>; hysteresis = <1000>; type = "hot"; }; trip-point2 { - temperature = <110000>; - hysteresis = <1000>; + temperature = <115000>; + hysteresis = <0>; type = "critical"; }; }; @@ -6484,20 +6496,20 @@ trips { gpu4_alert0: trip-point0 { - temperature = <85000>; + temperature = <95000>; hysteresis = <1000>; type = "passive"; }; trip-point1 { - temperature = <90000>; + temperature = <110000>; hysteresis = <1000>; type = "hot"; }; trip-point2 { - temperature = <110000>; - hysteresis = <1000>; + temperature = <115000>; + hysteresis = <0>; type = "critical"; }; }; @@ -6517,20 +6529,20 @@ trips { gpu5_alert0: trip-point0 { - temperature = <85000>; + temperature = <95000>; hysteresis = <1000>; type = "passive"; }; trip-point1 { - temperature = <90000>; + temperature = <110000>; hysteresis = <1000>; type = "hot"; }; trip-point2 { - temperature = <110000>; - hysteresis = <1000>; + temperature = <115000>; + hysteresis = <0>; type = "critical"; }; }; @@ -6550,20 +6562,20 @@ trips { gpu6_alert0: trip-point0 { - temperature = <85000>; + temperature = <95000>; hysteresis = <1000>; type = "passive"; }; trip-point1 { - temperature = <90000>; + temperature = <110000>; hysteresis = <1000>; type = "hot"; }; trip-point2 { - temperature = <110000>; - hysteresis = <1000>; + temperature = <115000>; + hysteresis = <0>; type = "critical"; }; }; @@ -6583,20 +6595,20 @@ trips { gpu7_alert0: trip-point0 { - temperature = <85000>; + temperature = <95000>; hysteresis = <1000>; type = "passive"; }; trip-point1 { - temperature = <90000>; + temperature = <110000>; hysteresis = <1000>; type = "hot"; }; trip-point2 { - temperature = <110000>; - hysteresis = <1000>; + temperature = <115000>; + hysteresis = <0>; type = "critical"; }; }; diff --git a/arch/arm64/boot/dts/qcom/x1e80100-asus-vivobook-s15.dts b/arch/arm64/boot/dts/qcom/x1e80100-asus-vivobook-s15.dts index b2cf080cab562..ce3fa29de7b4a 100644 --- a/arch/arm64/boot/dts/qcom/x1e80100-asus-vivobook-s15.dts +++ b/arch/arm64/boot/dts/qcom/x1e80100-asus-vivobook-s15.dts @@ -314,8 +314,8 @@ vreg_l2j_1p2: ldo2 { regulator-name = "vreg_l2j_1p2"; - regulator-min-microvolt = <1200000>; - regulator-max-microvolt = <1200000>; + regulator-min-microvolt = <1256000>; + regulator-max-microvolt = <1256000>; regulator-initial-mode = ; }; diff --git a/arch/arm64/boot/dts/qcom/x1e80100-crd.dts b/arch/arm64/boot/dts/qcom/x1e80100-crd.dts index 044a2f1432fe3..2a504a449b0bb 100644 --- a/arch/arm64/boot/dts/qcom/x1e80100-crd.dts +++ b/arch/arm64/boot/dts/qcom/x1e80100-crd.dts @@ -419,6 +419,7 @@ regulator-min-microvolt = <1200000>; regulator-max-microvolt = <1200000>; regulator-initial-mode = ; + regulator-always-on; }; vreg_l13b_3p0: ldo13 { @@ -440,6 +441,7 @@ regulator-min-microvolt = <1800000>; regulator-max-microvolt = <1800000>; regulator-initial-mode = ; + regulator-always-on; }; vreg_l16b_2p9: ldo16 { diff --git a/arch/arm64/boot/dts/qcom/x1e80100-lenovo-yoga-slim7x.dts b/arch/arm64/boot/dts/qcom/x1e80100-lenovo-yoga-slim7x.dts index e9ed723f90381..07c2fdfe7ce13 100644 --- a/arch/arm64/boot/dts/qcom/x1e80100-lenovo-yoga-slim7x.dts +++ b/arch/arm64/boot/dts/qcom/x1e80100-lenovo-yoga-slim7x.dts @@ -266,6 +266,7 @@ regulator-min-microvolt = <1200000>; regulator-max-microvolt = <1200000>; regulator-initial-mode = ; + regulator-always-on; }; vreg_l14b_3p0: ldo14 { @@ -280,8 +281,8 @@ regulator-min-microvolt = <1800000>; regulator-max-microvolt = <1800000>; regulator-initial-mode = ; + regulator-always-on; }; - }; regulators-1 { @@ -484,8 +485,8 @@ vreg_l2j_1p2: ldo2 { regulator-name = "vreg_l2j_1p2"; - regulator-min-microvolt = <1200000>; - regulator-max-microvolt = <1200000>; + regulator-min-microvolt = <1256000>; + regulator-max-microvolt = <1256000>; regulator-initial-mode = ; }; diff --git a/arch/arm64/boot/dts/qcom/x1e80100-microsoft-romulus.dtsi b/arch/arm64/boot/dts/qcom/x1e80100-microsoft-romulus.dtsi index 19da90704b7cb..001a9dc0a4baa 100644 --- a/arch/arm64/boot/dts/qcom/x1e80100-microsoft-romulus.dtsi +++ b/arch/arm64/boot/dts/qcom/x1e80100-microsoft-romulus.dtsi @@ -267,6 +267,7 @@ regulator-min-microvolt = <1200000>; regulator-max-microvolt = <1200000>; regulator-initial-mode = ; + regulator-always-on; }; vreg_l13b: ldo13 { @@ -288,6 +289,7 @@ regulator-min-microvolt = <1800000>; regulator-max-microvolt = <1800000>; regulator-initial-mode = ; + regulator-always-on; }; vreg_l16b: ldo16 { diff --git a/arch/arm64/boot/dts/qcom/x1e80100-qcp.dts b/arch/arm64/boot/dts/qcom/x1e80100-qcp.dts index af76aa034d0e1..9062eb6766f2c 100644 --- a/arch/arm64/boot/dts/qcom/x1e80100-qcp.dts +++ b/arch/arm64/boot/dts/qcom/x1e80100-qcp.dts @@ -356,6 +356,7 @@ regulator-min-microvolt = <1200000>; regulator-max-microvolt = <1200000>; regulator-initial-mode = ; + regulator-always-on; }; vreg_l13b_3p0: ldo13 { @@ -377,6 +378,7 @@ regulator-min-microvolt = <1800000>; regulator-max-microvolt = <1800000>; regulator-initial-mode = ; + regulator-always-on; }; vreg_l16b_2p9: ldo16 { @@ -594,8 +596,8 @@ vreg_l2j_1p2: ldo2 { regulator-name = "vreg_l2j_1p2"; - regulator-min-microvolt = <1200000>; - regulator-max-microvolt = <1200000>; + regulator-min-microvolt = <1256000>; + regulator-max-microvolt = <1256000>; regulator-initial-mode = ; }; diff --git a/arch/arm64/boot/dts/qcom/x1e80100.dtsi b/arch/arm64/boot/dts/qcom/x1e80100.dtsi index 91e4fbca19f99..5082ecb32089b 100644 --- a/arch/arm64/boot/dts/qcom/x1e80100.dtsi +++ b/arch/arm64/boot/dts/qcom/x1e80100.dtsi @@ -20,6 +20,7 @@ #include #include #include +#include / { interrupt-parent = <&intc>; @@ -4284,6 +4285,8 @@ phy-names = "usb2-phy"; maximum-speed = "high-speed"; + dma-coherent; + ports { #address-cells = <1>; #size-cells = <0>; @@ -6412,8 +6415,8 @@ }; aoss0-critical { - temperature = <125000>; - hysteresis = <0>; + temperature = <115000>; + hysteresis = <1000>; type = "critical"; }; }; @@ -6438,7 +6441,7 @@ }; cpu-critical { - temperature = <110000>; + temperature = <115000>; hysteresis = <1000>; type = "critical"; }; @@ -6464,7 +6467,7 @@ }; cpu-critical { - temperature = <110000>; + temperature = <115000>; hysteresis = <1000>; type = "critical"; }; @@ -6490,7 +6493,7 @@ }; cpu-critical { - temperature = <110000>; + temperature = <115000>; hysteresis = <1000>; type = "critical"; }; @@ -6516,7 +6519,7 @@ }; cpu-critical { - temperature = <110000>; + temperature = <115000>; hysteresis = <1000>; type = "critical"; }; @@ -6542,7 +6545,7 @@ }; cpu-critical { - temperature = <110000>; + temperature = <115000>; hysteresis = <1000>; type = "critical"; }; @@ -6568,7 +6571,7 @@ }; cpu-critical { - temperature = <110000>; + temperature = <115000>; hysteresis = <1000>; type = "critical"; }; @@ -6594,7 +6597,7 @@ }; cpu-critical { - temperature = <110000>; + temperature = <115000>; hysteresis = <1000>; type = "critical"; }; @@ -6620,7 +6623,7 @@ }; cpu-critical { - temperature = <110000>; + temperature = <115000>; hysteresis = <1000>; type = "critical"; }; @@ -6638,8 +6641,8 @@ }; cpuss2-critical { - temperature = <125000>; - hysteresis = <0>; + temperature = <115000>; + hysteresis = <1000>; type = "critical"; }; }; @@ -6656,8 +6659,8 @@ }; cpuss2-critical { - temperature = <125000>; - hysteresis = <0>; + temperature = <115000>; + hysteresis = <1000>; type = "critical"; }; }; @@ -6674,7 +6677,7 @@ }; mem-critical { - temperature = <125000>; + temperature = <115000>; hysteresis = <0>; type = "critical"; }; @@ -6682,15 +6685,19 @@ }; video-thermal { - polling-delay-passive = <250>; - thermal-sensors = <&tsens0 12>; trips { trip-point0 { - temperature = <125000>; + temperature = <90000>; + hysteresis = <2000>; + type = "hot"; + }; + + video-critical { + temperature = <115000>; hysteresis = <1000>; - type = "passive"; + type = "critical"; }; }; }; @@ -6706,8 +6713,8 @@ }; aoss0-critical { - temperature = <125000>; - hysteresis = <0>; + temperature = <115000>; + hysteresis = <1000>; type = "critical"; }; }; @@ -6732,7 +6739,7 @@ }; cpu-critical { - temperature = <110000>; + temperature = <115000>; hysteresis = <1000>; type = "critical"; }; @@ -6758,7 +6765,7 @@ }; cpu-critical { - temperature = <110000>; + temperature = <115000>; hysteresis = <1000>; type = "critical"; }; @@ -6784,7 +6791,7 @@ }; cpu-critical { - temperature = <110000>; + temperature = <115000>; hysteresis = <1000>; type = "critical"; }; @@ -6810,7 +6817,7 @@ }; cpu-critical { - temperature = <110000>; + temperature = <115000>; hysteresis = <1000>; type = "critical"; }; @@ -6836,7 +6843,7 @@ }; cpu-critical { - temperature = <110000>; + temperature = <115000>; hysteresis = <1000>; type = "critical"; }; @@ -6862,7 +6869,7 @@ }; cpu-critical { - temperature = <110000>; + temperature = <115000>; hysteresis = <1000>; type = "critical"; }; @@ -6888,7 +6895,7 @@ }; cpu-critical { - temperature = <110000>; + temperature = <115000>; hysteresis = <1000>; type = "critical"; }; @@ -6914,7 +6921,7 @@ }; cpu-critical { - temperature = <110000>; + temperature = <115000>; hysteresis = <1000>; type = "critical"; }; @@ -6932,8 +6939,8 @@ }; cpuss2-critical { - temperature = <125000>; - hysteresis = <0>; + temperature = <115000>; + hysteresis = <1000>; type = "critical"; }; }; @@ -6950,8 +6957,8 @@ }; cpuss2-critical { - temperature = <125000>; - hysteresis = <0>; + temperature = <115000>; + hysteresis = <1000>; type = "critical"; }; }; @@ -6968,8 +6975,8 @@ }; aoss0-critical { - temperature = <125000>; - hysteresis = <0>; + temperature = <115000>; + hysteresis = <1000>; type = "critical"; }; }; @@ -6994,7 +7001,7 @@ }; cpu-critical { - temperature = <110000>; + temperature = <115000>; hysteresis = <1000>; type = "critical"; }; @@ -7020,7 +7027,7 @@ }; cpu-critical { - temperature = <110000>; + temperature = <115000>; hysteresis = <1000>; type = "critical"; }; @@ -7046,7 +7053,7 @@ }; cpu-critical { - temperature = <110000>; + temperature = <115000>; hysteresis = <1000>; type = "critical"; }; @@ -7072,7 +7079,7 @@ }; cpu-critical { - temperature = <110000>; + temperature = <115000>; hysteresis = <1000>; type = "critical"; }; @@ -7098,7 +7105,7 @@ }; cpu-critical { - temperature = <110000>; + temperature = <115000>; hysteresis = <1000>; type = "critical"; }; @@ -7124,7 +7131,7 @@ }; cpu-critical { - temperature = <110000>; + temperature = <115000>; hysteresis = <1000>; type = "critical"; }; @@ -7150,7 +7157,7 @@ }; cpu-critical { - temperature = <110000>; + temperature = <115000>; hysteresis = <1000>; type = "critical"; }; @@ -7176,7 +7183,7 @@ }; cpu-critical { - temperature = <110000>; + temperature = <115000>; hysteresis = <1000>; type = "critical"; }; @@ -7194,8 +7201,8 @@ }; cpuss2-critical { - temperature = <125000>; - hysteresis = <0>; + temperature = <115000>; + hysteresis = <1000>; type = "critical"; }; }; @@ -7212,8 +7219,8 @@ }; cpuss2-critical { - temperature = <125000>; - hysteresis = <0>; + temperature = <115000>; + hysteresis = <1000>; type = "critical"; }; }; @@ -7230,8 +7237,8 @@ }; aoss0-critical { - temperature = <125000>; - hysteresis = <0>; + temperature = <115000>; + hysteresis = <1000>; type = "critical"; }; }; @@ -7248,8 +7255,8 @@ }; nsp0-critical { - temperature = <125000>; - hysteresis = <0>; + temperature = <115000>; + hysteresis = <1000>; type = "critical"; }; }; @@ -7266,8 +7273,8 @@ }; nsp1-critical { - temperature = <125000>; - hysteresis = <0>; + temperature = <115000>; + hysteresis = <1000>; type = "critical"; }; }; @@ -7284,8 +7291,8 @@ }; nsp2-critical { - temperature = <125000>; - hysteresis = <0>; + temperature = <115000>; + hysteresis = <1000>; type = "critical"; }; }; @@ -7302,33 +7309,34 @@ }; nsp3-critical { - temperature = <125000>; - hysteresis = <0>; + temperature = <115000>; + hysteresis = <1000>; type = "critical"; }; }; }; gpuss-0-thermal { - polling-delay-passive = <10>; + polling-delay-passive = <200>; thermal-sensors = <&tsens3 5>; - trips { - trip-point0 { - temperature = <85000>; - hysteresis = <1000>; - type = "passive"; + cooling-maps { + map0 { + trip = <&gpuss0_alert0>; + cooling-device = <&gpu THERMAL_NO_LIMIT THERMAL_NO_LIMIT>; }; + }; - trip-point1 { - temperature = <90000>; + trips { + gpuss0_alert0: trip-point0 { + temperature = <95000>; hysteresis = <1000>; - type = "hot"; + type = "passive"; }; - trip-point2 { - temperature = <125000>; + gpu-critical { + temperature = <115000>; hysteresis = <1000>; type = "critical"; }; @@ -7336,25 +7344,26 @@ }; gpuss-1-thermal { - polling-delay-passive = <10>; + polling-delay-passive = <200>; thermal-sensors = <&tsens3 6>; - trips { - trip-point0 { - temperature = <85000>; - hysteresis = <1000>; - type = "passive"; + cooling-maps { + map0 { + trip = <&gpuss1_alert0>; + cooling-device = <&gpu THERMAL_NO_LIMIT THERMAL_NO_LIMIT>; }; + }; - trip-point1 { - temperature = <90000>; + trips { + gpuss1_alert0: trip-point0 { + temperature = <95000>; hysteresis = <1000>; - type = "hot"; + type = "passive"; }; - trip-point2 { - temperature = <125000>; + gpu-critical { + temperature = <115000>; hysteresis = <1000>; type = "critical"; }; @@ -7362,25 +7371,26 @@ }; gpuss-2-thermal { - polling-delay-passive = <10>; + polling-delay-passive = <200>; thermal-sensors = <&tsens3 7>; - trips { - trip-point0 { - temperature = <85000>; - hysteresis = <1000>; - type = "passive"; + cooling-maps { + map0 { + trip = <&gpuss2_alert0>; + cooling-device = <&gpu THERMAL_NO_LIMIT THERMAL_NO_LIMIT>; }; + }; - trip-point1 { - temperature = <90000>; + trips { + gpuss2_alert0: trip-point0 { + temperature = <95000>; hysteresis = <1000>; - type = "hot"; + type = "passive"; }; - trip-point2 { - temperature = <125000>; + gpu-critical { + temperature = <115000>; hysteresis = <1000>; type = "critical"; }; @@ -7388,25 +7398,26 @@ }; gpuss-3-thermal { - polling-delay-passive = <10>; + polling-delay-passive = <200>; thermal-sensors = <&tsens3 8>; - trips { - trip-point0 { - temperature = <85000>; - hysteresis = <1000>; - type = "passive"; + cooling-maps { + map0 { + trip = <&gpuss3_alert0>; + cooling-device = <&gpu THERMAL_NO_LIMIT THERMAL_NO_LIMIT>; }; + }; - trip-point1 { - temperature = <90000>; + trips { + gpuss3_alert0: trip-point0 { + temperature = <95000>; hysteresis = <1000>; - type = "hot"; + type = "passive"; }; - trip-point2 { - temperature = <125000>; + gpu-critical { + temperature = <115000>; hysteresis = <1000>; type = "critical"; }; @@ -7414,25 +7425,26 @@ }; gpuss-4-thermal { - polling-delay-passive = <10>; + polling-delay-passive = <200>; thermal-sensors = <&tsens3 9>; - trips { - trip-point0 { - temperature = <85000>; - hysteresis = <1000>; - type = "passive"; + cooling-maps { + map0 { + trip = <&gpuss4_alert0>; + cooling-device = <&gpu THERMAL_NO_LIMIT THERMAL_NO_LIMIT>; }; + }; - trip-point1 { - temperature = <90000>; + trips { + gpuss4_alert0: trip-point0 { + temperature = <95000>; hysteresis = <1000>; - type = "hot"; + type = "passive"; }; - trip-point2 { - temperature = <125000>; + gpu-critical { + temperature = <115000>; hysteresis = <1000>; type = "critical"; }; @@ -7440,25 +7452,26 @@ }; gpuss-5-thermal { - polling-delay-passive = <10>; + polling-delay-passive = <200>; thermal-sensors = <&tsens3 10>; - trips { - trip-point0 { - temperature = <85000>; - hysteresis = <1000>; - type = "passive"; + cooling-maps { + map0 { + trip = <&gpuss5_alert0>; + cooling-device = <&gpu THERMAL_NO_LIMIT THERMAL_NO_LIMIT>; }; + }; - trip-point1 { - temperature = <90000>; + trips { + gpuss5_alert0: trip-point0 { + temperature = <95000>; hysteresis = <1000>; - type = "hot"; + type = "passive"; }; - trip-point2 { - temperature = <125000>; + gpu-critical { + temperature = <115000>; hysteresis = <1000>; type = "critical"; }; @@ -7466,25 +7479,26 @@ }; gpuss-6-thermal { - polling-delay-passive = <10>; + polling-delay-passive = <200>; thermal-sensors = <&tsens3 11>; - trips { - trip-point0 { - temperature = <85000>; - hysteresis = <1000>; - type = "passive"; + cooling-maps { + map0 { + trip = <&gpuss6_alert0>; + cooling-device = <&gpu THERMAL_NO_LIMIT THERMAL_NO_LIMIT>; }; + }; - trip-point1 { - temperature = <90000>; + trips { + gpuss6_alert0: trip-point0 { + temperature = <95000>; hysteresis = <1000>; - type = "hot"; + type = "passive"; }; - trip-point2 { - temperature = <125000>; + gpu-critical { + temperature = <115000>; hysteresis = <1000>; type = "critical"; }; @@ -7492,25 +7506,26 @@ }; gpuss-7-thermal { - polling-delay-passive = <10>; + polling-delay-passive = <200>; thermal-sensors = <&tsens3 12>; - trips { - trip-point0 { - temperature = <85000>; - hysteresis = <1000>; - type = "passive"; + cooling-maps { + map0 { + trip = <&gpuss7_alert0>; + cooling-device = <&gpu THERMAL_NO_LIMIT THERMAL_NO_LIMIT>; }; + }; - trip-point1 { - temperature = <90000>; + trips { + gpuss7_alert0: trip-point0 { + temperature = <95000>; hysteresis = <1000>; - type = "hot"; + type = "passive"; }; - trip-point2 { - temperature = <125000>; + gpu-critical { + temperature = <115000>; hysteresis = <1000>; type = "critical"; }; @@ -7529,7 +7544,7 @@ camera0-critical { temperature = <115000>; - hysteresis = <0>; + hysteresis = <1000>; type = "critical"; }; }; @@ -7547,7 +7562,7 @@ camera0-critical { temperature = <115000>; - hysteresis = <0>; + hysteresis = <1000>; type = "critical"; }; }; diff --git a/arch/arm64/boot/dts/renesas/beacon-renesom-som.dtsi b/arch/arm64/boot/dts/renesas/beacon-renesom-som.dtsi index 68b04e56ae562..5a15a956702a6 100644 --- a/arch/arm64/boot/dts/renesas/beacon-renesom-som.dtsi +++ b/arch/arm64/boot/dts/renesas/beacon-renesom-som.dtsi @@ -62,8 +62,7 @@ compatible = "ethernet-phy-id0022.1640", "ethernet-phy-ieee802.3-c22"; reg = <0>; - interrupt-parent = <&gpio2>; - interrupts = <11 IRQ_TYPE_LEVEL_LOW>; + interrupts-extended = <&gpio2 11 IRQ_TYPE_LEVEL_LOW>; reset-gpios = <&gpio2 10 GPIO_ACTIVE_LOW>; }; }; diff --git a/arch/arm64/boot/dts/renesas/cat875.dtsi b/arch/arm64/boot/dts/renesas/cat875.dtsi index 8c9da8b4bd60b..191b051ecfd45 100644 --- a/arch/arm64/boot/dts/renesas/cat875.dtsi +++ b/arch/arm64/boot/dts/renesas/cat875.dtsi @@ -25,8 +25,7 @@ compatible = "ethernet-phy-id001c.c915", "ethernet-phy-ieee802.3-c22"; reg = <0>; - interrupt-parent = <&gpio2>; - interrupts = <21 IRQ_TYPE_LEVEL_LOW>; + interrupts-extended = <&gpio2 21 IRQ_TYPE_LEVEL_LOW>; reset-gpios = <&gpio1 20 GPIO_ACTIVE_LOW>; }; }; diff --git a/arch/arm64/boot/dts/renesas/condor-common.dtsi b/arch/arm64/boot/dts/renesas/condor-common.dtsi index 8b7c0c34eadce..b2d99dfaa0cdf 100644 --- a/arch/arm64/boot/dts/renesas/condor-common.dtsi +++ b/arch/arm64/boot/dts/renesas/condor-common.dtsi @@ -166,8 +166,7 @@ "ethernet-phy-ieee802.3-c22"; rxc-skew-ps = <1500>; reg = <0>; - interrupt-parent = <&gpio4>; - interrupts = <23 IRQ_TYPE_LEVEL_LOW>; + interrupts-extended = <&gpio4 23 IRQ_TYPE_LEVEL_LOW>; reset-gpios = <&gpio4 22 GPIO_ACTIVE_LOW>; }; }; diff --git a/arch/arm64/boot/dts/renesas/draak.dtsi b/arch/arm64/boot/dts/renesas/draak.dtsi index 6f133f54ded54..402112a37d75a 100644 --- a/arch/arm64/boot/dts/renesas/draak.dtsi +++ b/arch/arm64/boot/dts/renesas/draak.dtsi @@ -247,8 +247,7 @@ "ethernet-phy-ieee802.3-c22"; rxc-skew-ps = <1500>; reg = <0>; - interrupt-parent = <&gpio5>; - interrupts = <19 IRQ_TYPE_LEVEL_LOW>; + interrupts-extended = <&gpio5 19 IRQ_TYPE_LEVEL_LOW>; reset-gpios = <&gpio5 18 GPIO_ACTIVE_LOW>; /* * TX clock internal delay mode is required for reliable diff --git a/arch/arm64/boot/dts/renesas/ebisu.dtsi b/arch/arm64/boot/dts/renesas/ebisu.dtsi index cba2fde9dd368..1aedd093fb41b 100644 --- a/arch/arm64/boot/dts/renesas/ebisu.dtsi +++ b/arch/arm64/boot/dts/renesas/ebisu.dtsi @@ -314,8 +314,7 @@ "ethernet-phy-ieee802.3-c22"; rxc-skew-ps = <1500>; reg = <0>; - interrupt-parent = <&gpio2>; - interrupts = <21 IRQ_TYPE_LEVEL_LOW>; + interrupts-extended = <&gpio2 21 IRQ_TYPE_LEVEL_LOW>; reset-gpios = <&gpio1 20 GPIO_ACTIVE_LOW>; /* * TX clock internal delay mode is required for reliable diff --git a/arch/arm64/boot/dts/renesas/hihope-rzg2-ex.dtsi b/arch/arm64/boot/dts/renesas/hihope-rzg2-ex.dtsi index ad898c6db4e62..4113710d55226 100644 --- a/arch/arm64/boot/dts/renesas/hihope-rzg2-ex.dtsi +++ b/arch/arm64/boot/dts/renesas/hihope-rzg2-ex.dtsi @@ -27,8 +27,7 @@ compatible = "ethernet-phy-id001c.c915", "ethernet-phy-ieee802.3-c22"; reg = <0>; - interrupt-parent = <&gpio2>; - interrupts = <11 IRQ_TYPE_LEVEL_LOW>; + interrupts-extended = <&gpio2 11 IRQ_TYPE_LEVEL_LOW>; reset-gpios = <&gpio2 10 GPIO_ACTIVE_LOW>; }; }; diff --git a/arch/arm64/boot/dts/renesas/r8a77970-eagle.dts b/arch/arm64/boot/dts/renesas/r8a77970-eagle.dts index 0608dce92e405..7dd9e13cf0074 100644 --- a/arch/arm64/boot/dts/renesas/r8a77970-eagle.dts +++ b/arch/arm64/boot/dts/renesas/r8a77970-eagle.dts @@ -111,8 +111,7 @@ "ethernet-phy-ieee802.3-c22"; rxc-skew-ps = <1500>; reg = <0>; - interrupt-parent = <&gpio1>; - interrupts = <17 IRQ_TYPE_LEVEL_LOW>; + interrupts-extended = <&gpio1 17 IRQ_TYPE_LEVEL_LOW>; reset-gpios = <&gpio1 16 GPIO_ACTIVE_LOW>; }; }; diff --git a/arch/arm64/boot/dts/renesas/r8a77970-v3msk.dts b/arch/arm64/boot/dts/renesas/r8a77970-v3msk.dts index e36999e91af53..0a103f93b14d7 100644 --- a/arch/arm64/boot/dts/renesas/r8a77970-v3msk.dts +++ b/arch/arm64/boot/dts/renesas/r8a77970-v3msk.dts @@ -117,8 +117,7 @@ "ethernet-phy-ieee802.3-c22"; rxc-skew-ps = <1500>; reg = <0>; - interrupt-parent = <&gpio1>; - interrupts = <17 IRQ_TYPE_LEVEL_LOW>; + interrupts-extended = <&gpio1 17 IRQ_TYPE_LEVEL_LOW>; reset-gpios = <&gpio1 16 GPIO_ACTIVE_LOW>; }; }; diff --git a/arch/arm64/boot/dts/renesas/r8a77980-v3hsk.dts b/arch/arm64/boot/dts/renesas/r8a77980-v3hsk.dts index 77d22df25fffa..a8a20c748ffcd 100644 --- a/arch/arm64/boot/dts/renesas/r8a77980-v3hsk.dts +++ b/arch/arm64/boot/dts/renesas/r8a77980-v3hsk.dts @@ -124,8 +124,7 @@ "ethernet-phy-ieee802.3-c22"; rxc-skew-ps = <1500>; reg = <0>; - interrupt-parent = <&gpio4>; - interrupts = <23 IRQ_TYPE_LEVEL_LOW>; + interrupts-extended = <&gpio4 23 IRQ_TYPE_LEVEL_LOW>; reset-gpios = <&gpio4 22 GPIO_ACTIVE_LOW>; }; }; diff --git a/arch/arm64/boot/dts/renesas/r8a779a0-falcon.dts b/arch/arm64/boot/dts/renesas/r8a779a0-falcon.dts index 63db822e5f466..6bd580737f25d 100644 --- a/arch/arm64/boot/dts/renesas/r8a779a0-falcon.dts +++ b/arch/arm64/boot/dts/renesas/r8a779a0-falcon.dts @@ -31,8 +31,7 @@ "ethernet-phy-ieee802.3-c22"; rxc-skew-ps = <1500>; reg = <0>; - interrupt-parent = <&gpio4>; - interrupts = <16 IRQ_TYPE_LEVEL_LOW>; + interrupts-extended = <&gpio4 16 IRQ_TYPE_LEVEL_LOW>; reset-gpios = <&gpio4 15 GPIO_ACTIVE_LOW>; }; }; diff --git a/arch/arm64/boot/dts/renesas/r8a779f0-spider-ethernet.dtsi b/arch/arm64/boot/dts/renesas/r8a779f0-spider-ethernet.dtsi index 33c1015e9ab38..5d38669ed1ec3 100644 --- a/arch/arm64/boot/dts/renesas/r8a779f0-spider-ethernet.dtsi +++ b/arch/arm64/boot/dts/renesas/r8a779f0-spider-ethernet.dtsi @@ -60,8 +60,7 @@ u101: ethernet-phy@1 { reg = <1>; compatible = "ethernet-phy-ieee802.3-c45"; - interrupt-parent = <&gpio3>; - interrupts = <10 IRQ_TYPE_LEVEL_LOW>; + interrupts-extended = <&gpio3 10 IRQ_TYPE_LEVEL_LOW>; }; }; }; @@ -78,8 +77,7 @@ u201: ethernet-phy@2 { reg = <2>; compatible = "ethernet-phy-ieee802.3-c45"; - interrupt-parent = <&gpio3>; - interrupts = <11 IRQ_TYPE_LEVEL_LOW>; + interrupts-extended = <&gpio3 11 IRQ_TYPE_LEVEL_LOW>; }; }; }; @@ -96,8 +94,7 @@ u301: ethernet-phy@3 { reg = <3>; compatible = "ethernet-phy-ieee802.3-c45"; - interrupt-parent = <&gpio3>; - interrupts = <9 IRQ_TYPE_LEVEL_LOW>; + interrupts-extended = <&gpio3 9 IRQ_TYPE_LEVEL_LOW>; }; }; }; diff --git a/arch/arm64/boot/dts/renesas/r8a779f4-s4sk.dts b/arch/arm64/boot/dts/renesas/r8a779f4-s4sk.dts index fa910b85859e9..5d71d52f9c654 100644 --- a/arch/arm64/boot/dts/renesas/r8a779f4-s4sk.dts +++ b/arch/arm64/boot/dts/renesas/r8a779f4-s4sk.dts @@ -197,8 +197,7 @@ ic99: ethernet-phy@1 { reg = <1>; compatible = "ethernet-phy-ieee802.3-c45"; - interrupt-parent = <&gpio3>; - interrupts = <10 IRQ_TYPE_LEVEL_LOW>; + interrupts-extended = <&gpio3 10 IRQ_TYPE_LEVEL_LOW>; }; }; }; @@ -216,8 +215,7 @@ ic102: ethernet-phy@2 { reg = <2>; compatible = "ethernet-phy-ieee802.3-c45"; - interrupt-parent = <&gpio3>; - interrupts = <11 IRQ_TYPE_LEVEL_LOW>; + interrupts-extended = <&gpio3 11 IRQ_TYPE_LEVEL_LOW>; }; }; }; diff --git a/arch/arm64/boot/dts/renesas/r8a779g0-white-hawk-ard-audio-da7212.dtso b/arch/arm64/boot/dts/renesas/r8a779g0-white-hawk-ard-audio-da7212.dtso index e6cf304c77ee9..5d820bd32ff67 100644 --- a/arch/arm64/boot/dts/renesas/r8a779g0-white-hawk-ard-audio-da7212.dtso +++ b/arch/arm64/boot/dts/renesas/r8a779g0-white-hawk-ard-audio-da7212.dtso @@ -108,7 +108,7 @@ }; tpu0_pins: tpu0 { - groups = "tpu_to0_a"; + groups = "tpu_to0_b"; function = "tpu"; }; }; diff --git a/arch/arm64/boot/dts/renesas/r8a779g2-white-hawk-single.dts b/arch/arm64/boot/dts/renesas/r8a779g2-white-hawk-single.dts index 50a428572d9bd..48befde389376 100644 --- a/arch/arm64/boot/dts/renesas/r8a779g2-white-hawk-single.dts +++ b/arch/arm64/boot/dts/renesas/r8a779g2-white-hawk-single.dts @@ -7,71 +7,10 @@ /dts-v1/; #include "r8a779g2.dtsi" -#include "white-hawk-cpu-common.dtsi" -#include "white-hawk-common.dtsi" +#include "white-hawk-single.dtsi" / { model = "Renesas White Hawk Single board based on r8a779g2"; compatible = "renesas,white-hawk-single", "renesas,r8a779g2", "renesas,r8a779g0"; }; - -&hscif0 { - uart-has-rtscts; -}; - -&hscif0_pins { - groups = "hscif0_data", "hscif0_ctrl"; - function = "hscif0"; -}; - -&pfc { - tsn0_pins: tsn0 { - mux { - groups = "tsn0_link", "tsn0_mdio", "tsn0_rgmii", - "tsn0_txcrefclk"; - function = "tsn0"; - }; - - link { - groups = "tsn0_link"; - bias-disable; - }; - - mdio { - groups = "tsn0_mdio"; - drive-strength = <24>; - bias-disable; - }; - - rgmii { - groups = "tsn0_rgmii"; - drive-strength = <24>; - bias-disable; - }; - }; -}; - -&tsn0 { - pinctrl-0 = <&tsn0_pins>; - pinctrl-names = "default"; - phy-mode = "rgmii"; - phy-handle = <&phy3>; - status = "okay"; - - mdio { - #address-cells = <1>; - #size-cells = <0>; - - reset-gpios = <&gpio1 23 GPIO_ACTIVE_LOW>; - reset-post-delay-us = <4000>; - - phy3: ethernet-phy@0 { - compatible = "ethernet-phy-id002b.0980", - "ethernet-phy-ieee802.3-c22"; - reg = <0>; - interrupt-parent = <&gpio4>; - interrupts = <3 IRQ_TYPE_LEVEL_LOW>; - }; - }; -}; diff --git a/arch/arm64/boot/dts/renesas/r8a779h0-gray-hawk-single.dts b/arch/arm64/boot/dts/renesas/r8a779h0-gray-hawk-single.dts index 9a1917b87f613..f4d721a7f505c 100644 --- a/arch/arm64/boot/dts/renesas/r8a779h0-gray-hawk-single.dts +++ b/arch/arm64/boot/dts/renesas/r8a779h0-gray-hawk-single.dts @@ -175,8 +175,7 @@ "ethernet-phy-ieee802.3-c22"; rxc-skew-ps = <1500>; reg = <0>; - interrupt-parent = <&gpio7>; - interrupts = <5 IRQ_TYPE_LEVEL_LOW>; + interrupts-extended = <&gpio7 5 IRQ_TYPE_LEVEL_LOW>; reset-gpios = <&gpio7 10 GPIO_ACTIVE_LOW>; }; }; diff --git a/arch/arm64/boot/dts/renesas/rzg2l-smarc-som.dtsi b/arch/arm64/boot/dts/renesas/rzg2l-smarc-som.dtsi index 83f5642d0d35c..502d9f17bf16d 100644 --- a/arch/arm64/boot/dts/renesas/rzg2l-smarc-som.dtsi +++ b/arch/arm64/boot/dts/renesas/rzg2l-smarc-som.dtsi @@ -102,8 +102,7 @@ compatible = "ethernet-phy-id0022.1640", "ethernet-phy-ieee802.3-c22"; reg = <7>; - interrupt-parent = <&irqc>; - interrupts = ; + interrupts-extended = <&irqc RZG2L_IRQ2 IRQ_TYPE_LEVEL_LOW>; rxc-skew-psec = <2400>; txc-skew-psec = <2400>; rxdv-skew-psec = <0>; @@ -130,8 +129,7 @@ compatible = "ethernet-phy-id0022.1640", "ethernet-phy-ieee802.3-c22"; reg = <7>; - interrupt-parent = <&irqc>; - interrupts = ; + interrupts-extended = <&irqc RZG2L_IRQ3 IRQ_TYPE_LEVEL_LOW>; rxc-skew-psec = <2400>; txc-skew-psec = <2400>; rxdv-skew-psec = <0>; diff --git a/arch/arm64/boot/dts/renesas/rzg2lc-smarc-som.dtsi b/arch/arm64/boot/dts/renesas/rzg2lc-smarc-som.dtsi index b4ef5ea8a9e34..de39311a77dc2 100644 --- a/arch/arm64/boot/dts/renesas/rzg2lc-smarc-som.dtsi +++ b/arch/arm64/boot/dts/renesas/rzg2lc-smarc-som.dtsi @@ -82,8 +82,7 @@ compatible = "ethernet-phy-id0022.1640", "ethernet-phy-ieee802.3-c22"; reg = <7>; - interrupt-parent = <&irqc>; - interrupts = ; + interrupts-extended = <&irqc RZG2L_IRQ0 IRQ_TYPE_LEVEL_LOW>; rxc-skew-psec = <2400>; txc-skew-psec = <2400>; rxdv-skew-psec = <0>; diff --git a/arch/arm64/boot/dts/renesas/rzg2ul-smarc-som.dtsi b/arch/arm64/boot/dts/renesas/rzg2ul-smarc-som.dtsi index 79443fb3f5810..1a6fd58bd3682 100644 --- a/arch/arm64/boot/dts/renesas/rzg2ul-smarc-som.dtsi +++ b/arch/arm64/boot/dts/renesas/rzg2ul-smarc-som.dtsi @@ -78,8 +78,7 @@ compatible = "ethernet-phy-id0022.1640", "ethernet-phy-ieee802.3-c22"; reg = <7>; - interrupt-parent = <&irqc>; - interrupts = ; + interrupts-extended = <&irqc RZG2L_IRQ2 IRQ_TYPE_LEVEL_LOW>; rxc-skew-psec = <2400>; txc-skew-psec = <2400>; rxdv-skew-psec = <0>; @@ -107,8 +106,7 @@ compatible = "ethernet-phy-id0022.1640", "ethernet-phy-ieee802.3-c22"; reg = <7>; - interrupt-parent = <&irqc>; - interrupts = ; + interrupts-extended = <&irqc RZG2L_IRQ7 IRQ_TYPE_LEVEL_LOW>; rxc-skew-psec = <2400>; txc-skew-psec = <2400>; rxdv-skew-psec = <0>; diff --git a/arch/arm64/boot/dts/renesas/rzg3s-smarc-som.dtsi b/arch/arm64/boot/dts/renesas/rzg3s-smarc-som.dtsi index 612cdc7efabbc..d2d367c09abd4 100644 --- a/arch/arm64/boot/dts/renesas/rzg3s-smarc-som.dtsi +++ b/arch/arm64/boot/dts/renesas/rzg3s-smarc-som.dtsi @@ -98,8 +98,7 @@ phy0: ethernet-phy@7 { reg = <7>; - interrupt-parent = <&pinctrl>; - interrupts = ; + interrupts-extended = <&pinctrl RZG2L_GPIO(12, 0) IRQ_TYPE_EDGE_FALLING>; rxc-skew-psec = <0>; txc-skew-psec = <0>; rxdv-skew-psec = <0>; @@ -124,8 +123,7 @@ phy1: ethernet-phy@7 { reg = <7>; - interrupt-parent = <&pinctrl>; - interrupts = ; + interrupts-extended = <&pinctrl RZG2L_GPIO(12, 1) IRQ_TYPE_EDGE_FALLING>; rxc-skew-psec = <0>; txc-skew-psec = <0>; rxdv-skew-psec = <0>; diff --git a/arch/arm64/boot/dts/renesas/salvator-common.dtsi b/arch/arm64/boot/dts/renesas/salvator-common.dtsi index 1eb4883b32197..c5035232956a8 100644 --- a/arch/arm64/boot/dts/renesas/salvator-common.dtsi +++ b/arch/arm64/boot/dts/renesas/salvator-common.dtsi @@ -353,8 +353,7 @@ "ethernet-phy-ieee802.3-c22"; rxc-skew-ps = <1500>; reg = <0>; - interrupt-parent = <&gpio2>; - interrupts = <11 IRQ_TYPE_LEVEL_LOW>; + interrupts-extended = <&gpio2 11 IRQ_TYPE_LEVEL_LOW>; reset-gpios = <&gpio2 10 GPIO_ACTIVE_LOW>; }; }; diff --git a/arch/arm64/boot/dts/renesas/ulcb.dtsi b/arch/arm64/boot/dts/renesas/ulcb.dtsi index a2f66f9160484..4cf141a701c06 100644 --- a/arch/arm64/boot/dts/renesas/ulcb.dtsi +++ b/arch/arm64/boot/dts/renesas/ulcb.dtsi @@ -150,8 +150,7 @@ "ethernet-phy-ieee802.3-c22"; rxc-skew-ps = <1500>; reg = <0>; - interrupt-parent = <&gpio2>; - interrupts = <11 IRQ_TYPE_LEVEL_LOW>; + interrupts-extended = <&gpio2 11 IRQ_TYPE_LEVEL_LOW>; reset-gpios = <&gpio2 10 GPIO_ACTIVE_LOW>; }; }; diff --git a/arch/arm64/boot/dts/renesas/white-hawk-cpu-common.dtsi b/arch/arm64/boot/dts/renesas/white-hawk-cpu-common.dtsi index 3845b413bd24c..69e4fddebd4e4 100644 --- a/arch/arm64/boot/dts/renesas/white-hawk-cpu-common.dtsi +++ b/arch/arm64/boot/dts/renesas/white-hawk-cpu-common.dtsi @@ -167,8 +167,7 @@ "ethernet-phy-ieee802.3-c22"; rxc-skew-ps = <1500>; reg = <0>; - interrupt-parent = <&gpio7>; - interrupts = <5 IRQ_TYPE_LEVEL_LOW>; + interrupts-extended = <&gpio7 5 IRQ_TYPE_LEVEL_LOW>; reset-gpios = <&gpio7 10 GPIO_ACTIVE_LOW>; }; }; diff --git a/arch/arm64/boot/dts/renesas/white-hawk-ethernet.dtsi b/arch/arm64/boot/dts/renesas/white-hawk-ethernet.dtsi index 595ec4ff4cdd0..ad94bf3f5e6c4 100644 --- a/arch/arm64/boot/dts/renesas/white-hawk-ethernet.dtsi +++ b/arch/arm64/boot/dts/renesas/white-hawk-ethernet.dtsi @@ -29,8 +29,7 @@ avb1_phy: ethernet-phy@0 { compatible = "ethernet-phy-ieee802.3-c45"; reg = <0>; - interrupt-parent = <&gpio6>; - interrupts = <3 IRQ_TYPE_LEVEL_LOW>; + interrupts-extended = <&gpio6 3 IRQ_TYPE_LEVEL_LOW>; }; }; }; @@ -51,8 +50,7 @@ avb2_phy: ethernet-phy@0 { compatible = "ethernet-phy-ieee802.3-c45"; reg = <0>; - interrupt-parent = <&gpio5>; - interrupts = <4 IRQ_TYPE_LEVEL_LOW>; + interrupts-extended = <&gpio5 4 IRQ_TYPE_LEVEL_LOW>; }; }; }; diff --git a/arch/arm64/boot/dts/renesas/white-hawk-single.dtsi b/arch/arm64/boot/dts/renesas/white-hawk-single.dtsi new file mode 100644 index 0000000000000..976a3ab44e5a5 --- /dev/null +++ b/arch/arm64/boot/dts/renesas/white-hawk-single.dtsi @@ -0,0 +1,77 @@ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* + * Device Tree Source for the White Hawk Single board + * + * Copyright (C) 2023-2024 Glider bv + */ + +#include "white-hawk-cpu-common.dtsi" +#include "white-hawk-common.dtsi" + +/ { + model = "Renesas White Hawk Single board"; + compatible = "renesas,white-hawk-single"; + + aliases { + ethernet3 = &tsn0; + }; +}; + +&hscif0 { + uart-has-rtscts; +}; + +&hscif0_pins { + groups = "hscif0_data", "hscif0_ctrl"; + function = "hscif0"; +}; + +&pfc { + tsn0_pins: tsn0 { + mux { + groups = "tsn0_link", "tsn0_mdio", "tsn0_rgmii", + "tsn0_txcrefclk"; + function = "tsn0"; + }; + + link { + groups = "tsn0_link"; + bias-disable; + }; + + mdio { + groups = "tsn0_mdio"; + drive-strength = <24>; + bias-disable; + }; + + rgmii { + groups = "tsn0_rgmii"; + drive-strength = <24>; + bias-disable; + }; + }; +}; + +&tsn0 { + pinctrl-0 = <&tsn0_pins>; + pinctrl-names = "default"; + phy-mode = "rgmii"; + phy-handle = <&tsn0_phy>; + status = "okay"; + + mdio { + #address-cells = <1>; + #size-cells = <0>; + + reset-gpios = <&gpio1 23 GPIO_ACTIVE_LOW>; + reset-post-delay-us = <4000>; + + tsn0_phy: ethernet-phy@0 { + compatible = "ethernet-phy-id002b.0980", + "ethernet-phy-ieee802.3-c22"; + reg = <0>; + interrupts-extended = <&gpio4 3 IRQ_TYPE_LEVEL_LOW>; + }; + }; +}; diff --git a/arch/arm64/boot/dts/rockchip/rk3399-puma-haikou.dts b/arch/arm64/boot/dts/rockchip/rk3399-puma-haikou.dts index f6f15946579eb..57466fbfd3f9a 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399-puma-haikou.dts +++ b/arch/arm64/boot/dts/rockchip/rk3399-puma-haikou.dts @@ -284,14 +284,6 @@ status = "okay"; }; -&usb_host0_ehci { - status = "okay"; -}; - -&usb_host0_ohci { - status = "okay"; -}; - &vopb { status = "okay"; }; diff --git a/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi index 257636d0d2cbb..0a73218ea37b3 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi @@ -59,17 +59,7 @@ vin-supply = <&vcc5v0_sys>; }; - vcc5v0_host: vcc5v0-host-regulator { - compatible = "regulator-fixed"; - gpio = <&gpio4 RK_PA3 GPIO_ACTIVE_LOW>; - pinctrl-names = "default"; - pinctrl-0 = <&vcc5v0_host_en>; - regulator-name = "vcc5v0_host"; - regulator-always-on; - vin-supply = <&vcc5v0_sys>; - }; - - vcc5v0_sys: vcc5v0-sys { + vcc5v0_sys: regulator-vcc5v0-sys { compatible = "regulator-fixed"; regulator-name = "vcc5v0_sys"; regulator-always-on; @@ -509,10 +499,10 @@ }; }; - usb2 { - vcc5v0_host_en: vcc5v0-host-en { + usb { + cy3304_reset: cy3304-reset { rockchip,pins = - <4 RK_PA3 RK_FUNC_GPIO &pcfg_pull_none>; + <4 RK_PA3 RK_FUNC_GPIO &pcfg_output_high>; }; }; @@ -579,7 +569,6 @@ }; u2phy1_host: host-port { - phy-supply = <&vcc5v0_host>; status = "okay"; }; }; @@ -591,6 +580,29 @@ &usbdrd_dwc3_1 { status = "okay"; dr_mode = "host"; + pinctrl-names = "default"; + pinctrl-0 = <&cy3304_reset>; + #address-cells = <1>; + #size-cells = <0>; + + hub_2_0: hub@1 { + compatible = "usb4b4,6502", "usb4b4,6506"; + reg = <1>; + peer-hub = <&hub_3_0>; + reset-gpios = <&gpio4 RK_PA3 GPIO_ACTIVE_HIGH>; + vdd-supply = <&vcc1v2_phy>; + vdd2-supply = <&vcc3v3_sys>; + + }; + + hub_3_0: hub@2 { + compatible = "usb4b4,6500", "usb4b4,6504"; + reg = <2>; + peer-hub = <&hub_2_0>; + reset-gpios = <&gpio4 RK_PA3 GPIO_ACTIVE_HIGH>; + vdd-supply = <&vcc1v2_phy>; + vdd2-supply = <&vcc3v3_sys>; + }; }; &usb_host1_ehci { diff --git a/arch/arm64/boot/dts/rockchip/rk3399-rockpro64.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-rockpro64.dtsi index 11d99d8b34a2b..66d010a9e8c31 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399-rockpro64.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3399-rockpro64.dtsi @@ -227,6 +227,16 @@ vin-supply = <&vcc12v_dcin>; }; + vcca_0v9: vcca-0v9 { + compatible = "regulator-fixed"; + regulator-name = "vcca_0v9"; + regulator-always-on; + regulator-boot-on; + regulator-min-microvolt = <900000>; + regulator-max-microvolt = <900000>; + vin-supply = <&vcc3v3_sys>; + }; + vdd_log: vdd-log { compatible = "pwm-regulator"; pwms = <&pwm2 0 25000 1>; @@ -312,6 +322,8 @@ }; &hdmi { + avdd-0v9-supply = <&vcca_0v9>; + avdd-1v8-supply = <&vcc1v8_dvp>; ddc-i2c-bus = <&i2c3>; pinctrl-names = "default"; pinctrl-0 = <&hdmi_cec>; diff --git a/arch/arm64/boot/dts/rockchip/rk3566-rock-3c.dts b/arch/arm64/boot/dts/rockchip/rk3566-rock-3c.dts index f2cc086e5001a..887c9be1b4100 100644 --- a/arch/arm64/boot/dts/rockchip/rk3566-rock-3c.dts +++ b/arch/arm64/boot/dts/rockchip/rk3566-rock-3c.dts @@ -636,6 +636,7 @@ spi-max-frequency = <104000000>; spi-rx-bus-width = <4>; spi-tx-bus-width = <1>; + vcc-supply = <&vcc_1v8>; }; }; diff --git a/arch/arm64/boot/dts/rockchip/rk3568-nanopi-r5s.dtsi b/arch/arm64/boot/dts/rockchip/rk3568-nanopi-r5s.dtsi index 93189f8306400..c30354268c8f5 100644 --- a/arch/arm64/boot/dts/rockchip/rk3568-nanopi-r5s.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3568-nanopi-r5s.dtsi @@ -486,9 +486,12 @@ &sdhci { bus-width = <8>; max-frequency = <200000000>; + mmc-hs200-1_8v; non-removable; pinctrl-names = "default"; - pinctrl-0 = <&emmc_bus8 &emmc_clk &emmc_cmd>; + pinctrl-0 = <&emmc_bus8 &emmc_clk &emmc_cmd &emmc_datastrobe>; + vmmc-supply = <&vcc_3v3>; + vqmmc-supply = <&vcc_1v8>; status = "okay"; }; diff --git a/arch/arm64/boot/dts/rockchip/rk3588-base.dtsi b/arch/arm64/boot/dts/rockchip/rk3588-base.dtsi index 83e7e0fbe7839..ad4331bc07806 100644 --- a/arch/arm64/boot/dts/rockchip/rk3588-base.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3588-base.dtsi @@ -428,16 +428,15 @@ #clock-cells = <0>; }; - pmu_sram: sram@10f000 { - compatible = "mmio-sram"; - reg = <0x0 0x0010f000 0x0 0x100>; - ranges = <0 0x0 0x0010f000 0x100>; - #address-cells = <1>; - #size-cells = <1>; + reserved-memory { + #address-cells = <2>; + #size-cells = <2>; + ranges; - scmi_shmem: sram@0 { + scmi_shmem: shmem@10f000 { compatible = "arm,scmi-shmem"; - reg = <0x0 0x100>; + reg = <0x0 0x0010f000 0x0 0x100>; + no-map; }; }; diff --git a/arch/arm64/boot/dts/ti/k3-am62-main.dtsi b/arch/arm64/boot/dts/ti/k3-am62-main.dtsi index 60c6814206a1f..3f3a31eced970 100644 --- a/arch/arm64/boot/dts/ti/k3-am62-main.dtsi +++ b/arch/arm64/boot/dts/ti/k3-am62-main.dtsi @@ -552,8 +552,6 @@ power-domains = <&k3_pds 57 TI_SCI_PD_EXCLUSIVE>; clocks = <&k3_clks 57 5>, <&k3_clks 57 6>; clock-names = "clk_ahb", "clk_xin"; - assigned-clocks = <&k3_clks 57 6>; - assigned-clock-parents = <&k3_clks 57 8>; bus-width = <8>; mmc-ddr-1_8v; mmc-hs200-1_8v; diff --git a/arch/arm64/boot/dts/ti/k3-am62a-main.dtsi b/arch/arm64/boot/dts/ti/k3-am62a-main.dtsi index 56945d29e0150..45d68a0d1b593 100644 --- a/arch/arm64/boot/dts/ti/k3-am62a-main.dtsi +++ b/arch/arm64/boot/dts/ti/k3-am62a-main.dtsi @@ -575,8 +575,6 @@ power-domains = <&k3_pds 57 TI_SCI_PD_EXCLUSIVE>; clocks = <&k3_clks 57 5>, <&k3_clks 57 6>; clock-names = "clk_ahb", "clk_xin"; - assigned-clocks = <&k3_clks 57 6>; - assigned-clock-parents = <&k3_clks 57 8>; bus-width = <8>; mmc-hs200-1_8v; ti,clkbuf-sel = <0x7>; diff --git a/arch/arm64/boot/dts/ti/k3-am62p-j722s-common-main.dtsi b/arch/arm64/boot/dts/ti/k3-am62p-j722s-common-main.dtsi index 9b6f513791083..77fe2b27cb58d 100644 --- a/arch/arm64/boot/dts/ti/k3-am62p-j722s-common-main.dtsi +++ b/arch/arm64/boot/dts/ti/k3-am62p-j722s-common-main.dtsi @@ -564,8 +564,6 @@ power-domains = <&k3_pds 57 TI_SCI_PD_EXCLUSIVE>; clocks = <&k3_clks 57 1>, <&k3_clks 57 2>; clock-names = "clk_ahb", "clk_xin"; - assigned-clocks = <&k3_clks 57 2>; - assigned-clock-parents = <&k3_clks 57 4>; bus-width = <8>; mmc-ddr-1_8v; mmc-hs200-1_8v; diff --git a/arch/arm64/boot/dts/ti/k3-am62x-sk-csi2-imx219.dtso b/arch/arm64/boot/dts/ti/k3-am62x-sk-csi2-imx219.dtso index 76ca02127f95f..dd090813a32d6 100644 --- a/arch/arm64/boot/dts/ti/k3-am62x-sk-csi2-imx219.dtso +++ b/arch/arm64/boot/dts/ti/k3-am62x-sk-csi2-imx219.dtso @@ -22,7 +22,7 @@ #size-cells = <0>; status = "okay"; - i2c-switch@71 { + i2c-mux@71 { compatible = "nxp,pca9543"; #address-cells = <1>; #size-cells = <0>; @@ -39,7 +39,6 @@ reg = <0x10>; clocks = <&clk_imx219_fixed>; - clock-names = "xclk"; reset-gpios = <&exp1 13 GPIO_ACTIVE_HIGH>; diff --git a/arch/arm64/boot/dts/ti/k3-am62x-sk-csi2-ov5640.dtso b/arch/arm64/boot/dts/ti/k3-am62x-sk-csi2-ov5640.dtso index ccc7f5e43184f..7fc7c95f5cd57 100644 --- a/arch/arm64/boot/dts/ti/k3-am62x-sk-csi2-ov5640.dtso +++ b/arch/arm64/boot/dts/ti/k3-am62x-sk-csi2-ov5640.dtso @@ -22,7 +22,7 @@ #size-cells = <0>; status = "okay"; - i2c-switch@71 { + i2c-mux@71 { compatible = "nxp,pca9543"; #address-cells = <1>; #size-cells = <0>; diff --git a/arch/arm64/boot/dts/ti/k3-am62x-sk-csi2-tevi-ov5640.dtso b/arch/arm64/boot/dts/ti/k3-am62x-sk-csi2-tevi-ov5640.dtso index 4eaf9d757dd0a..b6bfdfbbdd984 100644 --- a/arch/arm64/boot/dts/ti/k3-am62x-sk-csi2-tevi-ov5640.dtso +++ b/arch/arm64/boot/dts/ti/k3-am62x-sk-csi2-tevi-ov5640.dtso @@ -22,7 +22,7 @@ #size-cells = <0>; status = "okay"; - i2c-switch@71 { + i2c-mux@71 { compatible = "nxp,pca9543"; #address-cells = <1>; #size-cells = <0>; diff --git a/arch/arm64/boot/dts/ti/k3-am65-main.dtsi b/arch/arm64/boot/dts/ti/k3-am65-main.dtsi index 1f1af7ea23305..0534b53483473 100644 --- a/arch/arm64/boot/dts/ti/k3-am65-main.dtsi +++ b/arch/arm64/boot/dts/ti/k3-am65-main.dtsi @@ -449,6 +449,8 @@ ti,otap-del-sel-mmc-hs = <0x0>; ti,otap-del-sel-ddr52 = <0x5>; ti,otap-del-sel-hs200 = <0x5>; + ti,itap-del-sel-legacy = <0xa>; + ti,itap-del-sel-mmc-hs = <0x1>; ti,itap-del-sel-ddr52 = <0x0>; dma-coherent; status = "disabled"; diff --git a/arch/arm64/boot/dts/ti/k3-am68-sk-base-board.dts b/arch/arm64/boot/dts/ti/k3-am68-sk-base-board.dts index d5ceab79536ca..b40496097f82d 100644 --- a/arch/arm64/boot/dts/ti/k3-am68-sk-base-board.dts +++ b/arch/arm64/boot/dts/ti/k3-am68-sk-base-board.dts @@ -44,6 +44,17 @@ regulator-boot-on; }; + vsys_5v0: regulator-vsys5v0 { + /* Output of LM61460 */ + compatible = "regulator-fixed"; + regulator-name = "vsys_5v0"; + regulator-min-microvolt = <5000000>; + regulator-max-microvolt = <5000000>; + vin-supply = <&vusb_main>; + regulator-always-on; + regulator-boot-on; + }; + vsys_3v3: regulator-vsys3v3 { /* Output of LM5141 */ compatible = "regulator-fixed"; @@ -76,7 +87,7 @@ regulator-min-microvolt = <1800000>; regulator-max-microvolt = <3300000>; regulator-boot-on; - vin-supply = <&vsys_3v3>; + vin-supply = <&vsys_5v0>; gpios = <&main_gpio0 49 GPIO_ACTIVE_HIGH>; states = <1800000 0x0>, <3300000 0x1>; diff --git a/arch/arm64/boot/dts/ti/k3-j721e-common-proc-board.dts b/arch/arm64/boot/dts/ti/k3-j721e-common-proc-board.dts index 8230d53cd6960..f7a557e6af547 100644 --- a/arch/arm64/boot/dts/ti/k3-j721e-common-proc-board.dts +++ b/arch/arm64/boot/dts/ti/k3-j721e-common-proc-board.dts @@ -557,6 +557,7 @@ &ospi1 { pinctrl-names = "default"; pinctrl-0 = <&mcu_fss0_ospi1_pins_default>; + status = "okay"; flash@0 { compatible = "jedec,spi-nor"; diff --git a/arch/arm64/boot/dts/ti/k3-j721e-sk-csi2-dual-imx219.dtso b/arch/arm64/boot/dts/ti/k3-j721e-sk-csi2-dual-imx219.dtso index 47bb5480b5b00..4eb3cffab0321 100644 --- a/arch/arm64/boot/dts/ti/k3-j721e-sk-csi2-dual-imx219.dtso +++ b/arch/arm64/boot/dts/ti/k3-j721e-sk-csi2-dual-imx219.dtso @@ -19,6 +19,33 @@ #clock-cells = <0>; clock-frequency = <24000000>; }; + + reg_2p8v: regulator-2p8v { + compatible = "regulator-fixed"; + regulator-name = "2P8V"; + regulator-min-microvolt = <2800000>; + regulator-max-microvolt = <2800000>; + vin-supply = <&vdd_sd_dv>; + regulator-always-on; + }; + + reg_1p8v: regulator-1p8v { + compatible = "regulator-fixed"; + regulator-name = "1P8V"; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + vin-supply = <&vdd_sd_dv>; + regulator-always-on; + }; + + reg_1p2v: regulator-1p2v { + compatible = "regulator-fixed"; + regulator-name = "1P2V"; + regulator-min-microvolt = <1200000>; + regulator-max-microvolt = <1200000>; + vin-supply = <&vdd_sd_dv>; + regulator-always-on; + }; }; &csi_mux { @@ -34,7 +61,9 @@ reg = <0x10>; clocks = <&clk_imx219_fixed>; - clock-names = "xclk"; + VANA-supply = <®_2p8v>; + VDIG-supply = <®_1p8v>; + VDDL-supply = <®_1p2v>; port { csi2_cam0: endpoint { @@ -56,7 +85,9 @@ reg = <0x10>; clocks = <&clk_imx219_fixed>; - clock-names = "xclk"; + VANA-supply = <®_2p8v>; + VDIG-supply = <®_1p8v>; + VDDL-supply = <®_1p2v>; port { csi2_cam1: endpoint { diff --git a/arch/arm64/boot/dts/ti/k3-j721e-sk.dts b/arch/arm64/boot/dts/ti/k3-j721e-sk.dts index 6285e8d94ddeb..c8d7eb1814f06 100644 --- a/arch/arm64/boot/dts/ti/k3-j721e-sk.dts +++ b/arch/arm64/boot/dts/ti/k3-j721e-sk.dts @@ -184,6 +184,17 @@ regulator-boot-on; }; + vsys_5v0: fixedregulator-vsys5v0 { + /* Output of LM61460 */ + compatible = "regulator-fixed"; + regulator-name = "vsys_5v0"; + regulator-min-microvolt = <5000000>; + regulator-max-microvolt = <5000000>; + vin-supply = <&vusb_main>; + regulator-always-on; + regulator-boot-on; + }; + vdd_mmc1: fixedregulator-sd { compatible = "regulator-fixed"; pinctrl-names = "default"; @@ -211,6 +222,20 @@ <3300000 0x1>; }; + vdd_sd_dv: gpio-regulator-TLV71033 { + compatible = "regulator-gpio"; + pinctrl-names = "default"; + pinctrl-0 = <&vdd_sd_dv_pins_default>; + regulator-name = "tlv71033"; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <3300000>; + regulator-boot-on; + vin-supply = <&vsys_5v0>; + gpios = <&main_gpio0 118 GPIO_ACTIVE_HIGH>; + states = <1800000 0x0>, + <3300000 0x1>; + }; + transceiver1: can-phy1 { compatible = "ti,tcan1042"; #phy-cells = <0>; @@ -608,6 +633,12 @@ >; }; + vdd_sd_dv_pins_default: vdd-sd-dv-default-pins { + pinctrl-single,pins = < + J721E_IOPAD(0x1dc, PIN_OUTPUT, 7) /* (Y1) SPI1_CLK.GPIO0_118 */ + >; + }; + wkup_uart0_pins_default: wkup-uart0-default-pins { pinctrl-single,pins = < J721E_WKUP_IOPAD(0xa0, PIN_INPUT, 0) /* (J29) WKUP_UART0_RXD */ diff --git a/arch/arm64/boot/dts/ti/k3-j722s-evm.dts b/arch/arm64/boot/dts/ti/k3-j722s-evm.dts index a00f4a7d20d98..710f80a14b647 100644 --- a/arch/arm64/boot/dts/ti/k3-j722s-evm.dts +++ b/arch/arm64/boot/dts/ti/k3-j722s-evm.dts @@ -720,6 +720,10 @@ ; }; +&serdes_wiz0 { + status = "okay"; +}; + &serdes0 { status = "okay"; serdes0_usb_link: phy@0 { @@ -731,6 +735,10 @@ }; }; +&serdes_wiz1 { + status = "okay"; +}; + &serdes1 { status = "okay"; serdes1_pcie_link: phy@0 { diff --git a/arch/arm64/boot/dts/ti/k3-j722s-main.dtsi b/arch/arm64/boot/dts/ti/k3-j722s-main.dtsi index ed6f4ba08afca..ec8fcf9d16d6a 100644 --- a/arch/arm64/boot/dts/ti/k3-j722s-main.dtsi +++ b/arch/arm64/boot/dts/ti/k3-j722s-main.dtsi @@ -32,6 +32,8 @@ assigned-clocks = <&k3_clks 279 1>; assigned-clock-parents = <&k3_clks 279 5>; + status = "disabled"; + serdes0: serdes@f000000 { compatible = "ti,j721e-serdes-10g"; reg = <0x0f000000 0x00010000>; @@ -70,6 +72,8 @@ assigned-clocks = <&k3_clks 280 1>; assigned-clock-parents = <&k3_clks 280 5>; + status = "disabled"; + serdes1: serdes@f010000 { compatible = "ti,j721e-serdes-10g"; reg = <0x0f010000 0x00010000>; diff --git a/arch/arm64/boot/dts/ti/k3-j784s4-j742s2-main-common.dtsi b/arch/arm64/boot/dts/ti/k3-j784s4-j742s2-main-common.dtsi index 2bf4547485e1b..013c0d25d3481 100644 --- a/arch/arm64/boot/dts/ti/k3-j784s4-j742s2-main-common.dtsi +++ b/arch/arm64/boot/dts/ti/k3-j784s4-j742s2-main-common.dtsi @@ -77,7 +77,7 @@ serdes_ln_ctrl: mux-controller@4080 { compatible = "reg-mux"; - reg = <0x00004080 0x30>; + reg = <0x00004080 0x50>; #mux-control-cells = <1>; mux-reg-masks = <0x0 0x3>, <0x4 0x3>, /* SERDES0 lane0/1 select */ <0x8 0x3>, <0xc 0x3>, /* SERDES0 lane2/3 select */ diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig index 8fe7dbae33bf9..f988dd79add89 100644 --- a/arch/arm64/configs/defconfig +++ b/arch/arm64/configs/defconfig @@ -1536,6 +1536,9 @@ CONFIG_PHY_HISTB_COMBPHY=y CONFIG_PHY_HISI_INNO_USB2=y CONFIG_PHY_MVEBU_CP110_COMPHY=y CONFIG_PHY_MTK_TPHY=y +CONFIG_PHY_MTK_HDMI=m +CONFIG_PHY_MTK_MIPI_DSI=m +CONFIG_PHY_MTK_DP=m CONFIG_PHY_QCOM_EDP=m CONFIG_PHY_QCOM_PCIE2=m CONFIG_PHY_QCOM_QMP=m diff --git a/arch/arm64/include/asm/esr.h b/arch/arm64/include/asm/esr.h index da6d2c1c0b030..5f4dc6364dbb9 100644 --- a/arch/arm64/include/asm/esr.h +++ b/arch/arm64/include/asm/esr.h @@ -370,12 +370,14 @@ /* * ISS values for SME traps */ - -#define ESR_ELx_SME_ISS_SME_DISABLED 0 -#define ESR_ELx_SME_ISS_ILL 1 -#define ESR_ELx_SME_ISS_SM_DISABLED 2 -#define ESR_ELx_SME_ISS_ZA_DISABLED 3 -#define ESR_ELx_SME_ISS_ZT_DISABLED 4 +#define ESR_ELx_SME_ISS_SMTC_MASK GENMASK(2, 0) +#define ESR_ELx_SME_ISS_SMTC(esr) ((esr) & ESR_ELx_SME_ISS_SMTC_MASK) + +#define ESR_ELx_SME_ISS_SMTC_SME_DISABLED 0 +#define ESR_ELx_SME_ISS_SMTC_ILL 1 +#define ESR_ELx_SME_ISS_SMTC_SM_DISABLED 2 +#define ESR_ELx_SME_ISS_SMTC_ZA_DISABLED 3 +#define ESR_ELx_SME_ISS_SMTC_ZT_DISABLED 4 /* ISS field definitions for MOPS exceptions */ #define ESR_ELx_MOPS_ISS_MEM_INST (UL(1) << 24) diff --git a/arch/arm64/include/asm/fpsimd.h b/arch/arm64/include/asm/fpsimd.h index f2a84efc36185..c8dcb67b81a72 100644 --- a/arch/arm64/include/asm/fpsimd.h +++ b/arch/arm64/include/asm/fpsimd.h @@ -6,6 +6,7 @@ #define __ASM_FP_H #include +#include #include #include #include @@ -94,6 +95,8 @@ struct cpu_fp_state { enum fp_type to_save; }; +DECLARE_PER_CPU(struct cpu_fp_state, fpsimd_last_state); + extern void fpsimd_bind_state_to_cpu(struct cpu_fp_state *fp_state); extern void fpsimd_flush_task_state(struct task_struct *target); diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h index 9edbd871c31bf..5f12cdc2b9671 100644 --- a/arch/arm64/include/asm/tlbflush.h +++ b/arch/arm64/include/asm/tlbflush.h @@ -330,13 +330,14 @@ static inline void arch_tlbbatch_add_pending(struct arch_tlbflush_unmap_batch *b } /* - * If mprotect/munmap/etc occurs during TLB batched flushing, we need to - * synchronise all the TLBI issued with a DSB to avoid the race mentioned in - * flush_tlb_batched_pending(). + * If mprotect/munmap/etc occurs during TLB batched flushing, we need to ensure + * all the previously issued TLBIs targeting mm have completed. But since we + * can be executing on a remote CPU, a DSB cannot guarantee this like it can + * for arch_tlbbatch_flush(). Our only option is to flush the entire mm. */ static inline void arch_flush_tlb_batched_pending(struct mm_struct *mm) { - dsb(ish); + flush_tlb_mm(mm); } /* diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 05ccf4ec278f7..9ca5ffd8d817f 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -2959,6 +2959,13 @@ static bool has_sve_feature(const struct arm64_cpu_capabilities *cap, int scope) } #endif +#ifdef CONFIG_ARM64_SME +static bool has_sme_feature(const struct arm64_cpu_capabilities *cap, int scope) +{ + return system_supports_sme() && has_user_cpuid_feature(cap, scope); +} +#endif + static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = { HWCAP_CAP(ID_AA64ISAR0_EL1, AES, PMULL, CAP_HWCAP, KERNEL_HWCAP_PMULL), HWCAP_CAP(ID_AA64ISAR0_EL1, AES, AES, CAP_HWCAP, KERNEL_HWCAP_AES), @@ -3037,25 +3044,25 @@ static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = { HWCAP_CAP(ID_AA64ISAR2_EL1, BC, IMP, CAP_HWCAP, KERNEL_HWCAP_HBC), #ifdef CONFIG_ARM64_SME HWCAP_CAP(ID_AA64PFR1_EL1, SME, IMP, CAP_HWCAP, KERNEL_HWCAP_SME), - HWCAP_CAP(ID_AA64SMFR0_EL1, FA64, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_FA64), - HWCAP_CAP(ID_AA64SMFR0_EL1, LUTv2, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_LUTV2), - HWCAP_CAP(ID_AA64SMFR0_EL1, SMEver, SME2p1, CAP_HWCAP, KERNEL_HWCAP_SME2P1), - HWCAP_CAP(ID_AA64SMFR0_EL1, SMEver, SME2, CAP_HWCAP, KERNEL_HWCAP_SME2), - HWCAP_CAP(ID_AA64SMFR0_EL1, I16I64, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_I16I64), - HWCAP_CAP(ID_AA64SMFR0_EL1, F64F64, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_F64F64), - HWCAP_CAP(ID_AA64SMFR0_EL1, I16I32, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_I16I32), - HWCAP_CAP(ID_AA64SMFR0_EL1, B16B16, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_B16B16), - HWCAP_CAP(ID_AA64SMFR0_EL1, F16F16, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_F16F16), - HWCAP_CAP(ID_AA64SMFR0_EL1, F8F16, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_F8F16), - HWCAP_CAP(ID_AA64SMFR0_EL1, F8F32, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_F8F32), - HWCAP_CAP(ID_AA64SMFR0_EL1, I8I32, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_I8I32), - HWCAP_CAP(ID_AA64SMFR0_EL1, F16F32, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_F16F32), - HWCAP_CAP(ID_AA64SMFR0_EL1, B16F32, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_B16F32), - HWCAP_CAP(ID_AA64SMFR0_EL1, BI32I32, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_BI32I32), - HWCAP_CAP(ID_AA64SMFR0_EL1, F32F32, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_F32F32), - HWCAP_CAP(ID_AA64SMFR0_EL1, SF8FMA, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_SF8FMA), - HWCAP_CAP(ID_AA64SMFR0_EL1, SF8DP4, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_SF8DP4), - HWCAP_CAP(ID_AA64SMFR0_EL1, SF8DP2, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_SF8DP2), + HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, FA64, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_FA64), + HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, LUTv2, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_LUTV2), + HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, SMEver, SME2p1, CAP_HWCAP, KERNEL_HWCAP_SME2P1), + HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, SMEver, SME2, CAP_HWCAP, KERNEL_HWCAP_SME2), + HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, I16I64, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_I16I64), + HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, F64F64, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_F64F64), + HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, I16I32, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_I16I32), + HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, B16B16, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_B16B16), + HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, F16F16, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_F16F16), + HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, F8F16, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_F8F16), + HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, F8F32, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_F8F32), + HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, I8I32, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_I8I32), + HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, F16F32, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_F16F32), + HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, B16F32, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_B16F32), + HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, BI32I32, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_BI32I32), + HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, F32F32, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_F32F32), + HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, SF8FMA, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_SF8FMA), + HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, SF8DP4, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_SF8DP4), + HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, SF8DP2, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_SF8DP2), #endif /* CONFIG_ARM64_SME */ HWCAP_CAP(ID_AA64FPFR0_EL1, F8CVT, IMP, CAP_HWCAP, KERNEL_HWCAP_F8CVT), HWCAP_CAP(ID_AA64FPFR0_EL1, F8FMA, IMP, CAP_HWCAP, KERNEL_HWCAP_F8FMA), diff --git a/arch/arm64/kernel/entry-common.c b/arch/arm64/kernel/entry-common.c index 3fcd9d080bf2a..d23315ef7b679 100644 --- a/arch/arm64/kernel/entry-common.c +++ b/arch/arm64/kernel/entry-common.c @@ -393,20 +393,16 @@ static bool cortex_a76_erratum_1463225_debug_handler(struct pt_regs *regs) * As per the ABI exit SME streaming mode and clear the SVE state not * shared with FPSIMD on syscall entry. */ -static inline void fp_user_discard(void) +static inline void fpsimd_syscall_enter(void) { - /* - * If SME is active then exit streaming mode. If ZA is active - * then flush the SVE registers but leave userspace access to - * both SVE and SME enabled, otherwise disable SME for the - * task and fall through to disabling SVE too. This means - * that after a syscall we never have any streaming mode - * register state to track, if this changes the KVM code will - * need updating. - */ + /* Ensure PSTATE.SM is clear, but leave PSTATE.ZA as-is. */ if (system_supports_sme()) sme_smstop_sm(); + /* + * The CPU is not in streaming mode. If non-streaming SVE is not + * supported, there is no SVE state that needs to be discarded. + */ if (!system_supports_sve()) return; @@ -416,6 +412,33 @@ static inline void fp_user_discard(void) sve_vq_minus_one = sve_vq_from_vl(task_get_sve_vl(current)) - 1; sve_flush_live(true, sve_vq_minus_one); } + + /* + * Any live non-FPSIMD SVE state has been zeroed. Allow + * fpsimd_save_user_state() to lazily discard SVE state until either + * the live state is unbound or fpsimd_syscall_exit() is called. + */ + __this_cpu_write(fpsimd_last_state.to_save, FP_STATE_FPSIMD); +} + +static __always_inline void fpsimd_syscall_exit(void) +{ + if (!system_supports_sve()) + return; + + /* + * The current task's user FPSIMD/SVE/SME state is now bound to this + * CPU. The fpsimd_last_state.to_save value is either: + * + * - FP_STATE_FPSIMD, if the state has not been reloaded on this CPU + * since fpsimd_syscall_enter(). + * + * - FP_STATE_CURRENT, if the state has been reloaded on this CPU at + * any point. + * + * Reset this to FP_STATE_CURRENT to stop lazy discarding. + */ + __this_cpu_write(fpsimd_last_state.to_save, FP_STATE_CURRENT); } UNHANDLED(el1t, 64, sync) @@ -707,10 +730,11 @@ static void noinstr el0_svc(struct pt_regs *regs) { enter_from_user_mode(regs); cortex_a76_erratum_1463225_svc_handler(); - fp_user_discard(); + fpsimd_syscall_enter(); local_daif_restore(DAIF_PROCCTX); do_el0_svc(regs); exit_to_user_mode(regs); + fpsimd_syscall_exit(); } static void noinstr el0_fpac(struct pt_regs *regs, unsigned long esr) diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c index f38d22dac140f..8854bce5cfe20 100644 --- a/arch/arm64/kernel/fpsimd.c +++ b/arch/arm64/kernel/fpsimd.c @@ -119,7 +119,7 @@ * whatever is in the FPSIMD registers is not saved to memory, but discarded. */ -static DEFINE_PER_CPU(struct cpu_fp_state, fpsimd_last_state); +DEFINE_PER_CPU(struct cpu_fp_state, fpsimd_last_state); __ro_after_init struct vl_info vl_info[ARM64_VEC_MAX] = { #ifdef CONFIG_ARM64_SVE @@ -359,9 +359,6 @@ static void task_fpsimd_load(void) WARN_ON(preemptible()); WARN_ON(test_thread_flag(TIF_KERNEL_FPSTATE)); - if (system_supports_fpmr()) - write_sysreg_s(current->thread.uw.fpmr, SYS_FPMR); - if (system_supports_sve() || system_supports_sme()) { switch (current->thread.fp_type) { case FP_STATE_FPSIMD: @@ -413,6 +410,9 @@ static void task_fpsimd_load(void) restore_ffr = system_supports_fa64(); } + if (system_supports_fpmr()) + write_sysreg_s(current->thread.uw.fpmr, SYS_FPMR); + if (restore_sve_regs) { WARN_ON_ONCE(current->thread.fp_type != FP_STATE_SVE); sve_load_state(sve_pffr(¤t->thread), @@ -453,12 +453,15 @@ static void fpsimd_save_user_state(void) *(last->fpmr) = read_sysreg_s(SYS_FPMR); /* - * If a task is in a syscall the ABI allows us to only - * preserve the state shared with FPSIMD so don't bother - * saving the full SVE state in that case. + * Save SVE state if it is live. + * + * The syscall ABI discards live SVE state at syscall entry. When + * entering a syscall, fpsimd_syscall_enter() sets to_save to + * FP_STATE_FPSIMD to allow the SVE state to be lazily discarded until + * either new SVE state is loaded+bound or fpsimd_syscall_exit() is + * called prior to a return to userspace. */ - if ((last->to_save == FP_STATE_CURRENT && test_thread_flag(TIF_SVE) && - !in_syscall(current_pt_regs())) || + if ((last->to_save == FP_STATE_CURRENT && test_thread_flag(TIF_SVE)) || last->to_save == FP_STATE_SVE) { save_sve_regs = true; save_ffr = true; @@ -651,7 +654,7 @@ static void __fpsimd_to_sve(void *sst, struct user_fpsimd_state const *fst, * task->thread.uw.fpsimd_state must be up to date before calling this * function. */ -static void fpsimd_to_sve(struct task_struct *task) +static inline void fpsimd_to_sve(struct task_struct *task) { unsigned int vq; void *sst = task->thread.sve_state; @@ -675,7 +678,7 @@ static void fpsimd_to_sve(struct task_struct *task) * bytes of allocated kernel memory. * task->thread.sve_state must be up to date before calling this function. */ -static void sve_to_fpsimd(struct task_struct *task) +static inline void sve_to_fpsimd(struct task_struct *task) { unsigned int vq, vl; void const *sst = task->thread.sve_state; @@ -1436,7 +1439,7 @@ void do_sme_acc(unsigned long esr, struct pt_regs *regs) * If this not a trap due to SME being disabled then something * is being used in the wrong mode, report as SIGILL. */ - if (ESR_ELx_ISS(esr) != ESR_ELx_SME_ISS_SME_DISABLED) { + if (ESR_ELx_SME_ISS_SMTC(esr) != ESR_ELx_SME_ISS_SMTC_SME_DISABLED) { force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc, 0); return; } @@ -1460,6 +1463,8 @@ void do_sme_acc(unsigned long esr, struct pt_regs *regs) sme_set_vq(vq_minus_one); fpsimd_bind_task_to_cpu(); + } else { + fpsimd_flush_task_state(current); } put_cpu_fpsimd_context(); @@ -1573,8 +1578,8 @@ void fpsimd_thread_switch(struct task_struct *next) fpsimd_save_user_state(); if (test_tsk_thread_flag(next, TIF_KERNEL_FPSTATE)) { - fpsimd_load_kernel_state(next); fpsimd_flush_cpu_state(); + fpsimd_load_kernel_state(next); } else { /* * Fix up TIF_FOREIGN_FPSTATE to correctly describe next's @@ -1661,6 +1666,9 @@ void fpsimd_flush_thread(void) current->thread.svcr = 0; } + if (system_supports_fpmr()) + current->thread.uw.fpmr = 0; + current->thread.fp_type = FP_STATE_FPSIMD; put_cpu_fpsimd_context(); @@ -1801,7 +1809,7 @@ void fpsimd_update_current_state(struct user_fpsimd_state const *state) get_cpu_fpsimd_context(); current->thread.uw.fpsimd_state = *state; - if (test_thread_flag(TIF_SVE)) + if (current->thread.fp_type == FP_STATE_SVE) fpsimd_to_sve(current); task_fpsimd_load(); diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c index 2bbcbb11d844c..2edf88c1c6957 100644 --- a/arch/arm64/kernel/process.c +++ b/arch/arm64/kernel/process.c @@ -544,6 +544,11 @@ static void permission_overlay_switch(struct task_struct *next) current->thread.por_el0 = read_sysreg_s(SYS_POR_EL0); if (current->thread.por_el0 != next->thread.por_el0) { write_sysreg_s(next->thread.por_el0, SYS_POR_EL0); + /* + * No ISB required as we can tolerate spurious Overlay faults - + * the fault handler will check again based on the new value + * of POR_EL0. + */ } } diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c index 1559a239137f3..1a8f4284cb69a 100644 --- a/arch/arm64/kernel/ptrace.c +++ b/arch/arm64/kernel/ptrace.c @@ -140,7 +140,7 @@ unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n) addr += n; if (regs_within_kernel_stack(regs, (unsigned long)addr)) - return *addr; + return READ_ONCE_NOCHECK(*addr); else return 0; } diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index 8b281cf308b30..850307b49babd 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -487,17 +487,29 @@ static void do_bad_area(unsigned long far, unsigned long esr, } } -static bool fault_from_pkey(unsigned long esr, struct vm_area_struct *vma, - unsigned int mm_flags) +static bool fault_from_pkey(struct vm_area_struct *vma, unsigned int mm_flags) { - unsigned long iss2 = ESR_ELx_ISS2(esr); - if (!system_supports_poe()) return false; - if (esr_fsc_is_permission_fault(esr) && (iss2 & ESR_ELx_Overlay)) - return true; - + /* + * We do not check whether an Overlay fault has occurred because we + * cannot make a decision based solely on its value: + * + * - If Overlay is set, a fault did occur due to POE, but it may be + * spurious in those cases where we update POR_EL0 without ISB (e.g. + * on context-switch). We would then need to manually check POR_EL0 + * against vma_pkey(vma), which is exactly what + * arch_vma_access_permitted() does. + * + * - If Overlay is not set, we may still need to report a pkey fault. + * This is the case if an access was made within a mapping but with no + * page mapped, and POR_EL0 forbids the access (according to + * vma_pkey()). Such access will result in a SIGSEGV regardless + * because core code checks arch_vma_access_permitted(), but in order + * to report the correct error code - SEGV_PKUERR - we must handle + * that case here. + */ return !arch_vma_access_permitted(vma, mm_flags & FAULT_FLAG_WRITE, mm_flags & FAULT_FLAG_INSTRUCTION, @@ -595,7 +607,7 @@ static int __kprobes do_page_fault(unsigned long far, unsigned long esr, goto bad_area; } - if (fault_from_pkey(esr, vma, mm_flags)) { + if (fault_from_pkey(vma, mm_flags)) { pkey = vma_pkey(vma); vma_end_read(vma); fault = 0; @@ -639,7 +651,7 @@ static int __kprobes do_page_fault(unsigned long far, unsigned long esr, goto bad_area; } - if (fault_from_pkey(esr, vma, mm_flags)) { + if (fault_from_pkey(vma, mm_flags)) { pkey = vma_pkey(vma); mmap_read_unlock(mm); fault = 0; diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index 9bcd51fd67d4e..aed8d32979d9c 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -1285,7 +1285,8 @@ int pud_free_pmd_page(pud_t *pudp, unsigned long addr) next = addr; end = addr + PUD_SIZE; do { - pmd_free_pte_page(pmdp, next); + if (pmd_present(pmdp_get(pmdp))) + pmd_free_pte_page(pmdp, next); } while (pmdp++, next += PMD_SIZE, next != end); pud_clear(pudp); diff --git a/arch/arm64/xen/hypercall.S b/arch/arm64/xen/hypercall.S index 9d01361696a14..ae551b8571374 100644 --- a/arch/arm64/xen/hypercall.S +++ b/arch/arm64/xen/hypercall.S @@ -83,7 +83,26 @@ HYPERCALL3(vcpu_op); HYPERCALL1(platform_op_raw); HYPERCALL2(multicall); HYPERCALL2(vm_assist); -HYPERCALL3(dm_op); + +SYM_FUNC_START(HYPERVISOR_dm_op) + mov x16, #__HYPERVISOR_dm_op; \ + /* + * dm_op hypercalls are issued by the userspace. The kernel needs to + * enable access to TTBR0_EL1 as the hypervisor would issue stage 1 + * translations to user memory via AT instructions. Since AT + * instructions are not affected by the PAN bit (ARMv8.1), we only + * need the explicit uaccess_enable/disable if the TTBR0 PAN emulation + * is enabled (it implies that hardware UAO and PAN disabled). + */ + uaccess_ttbr0_enable x6, x7, x8 + hvc XEN_IMM + + /* + * Disable userspace access from kernel once the hyp call completed. + */ + uaccess_ttbr0_disable x6, x7 + ret +SYM_FUNC_END(HYPERVISOR_dm_op); SYM_FUNC_START(privcmd_call) mov x16, x0 diff --git a/arch/loongarch/include/asm/irqflags.h b/arch/loongarch/include/asm/irqflags.h index 319a8c616f1f5..003172b8406be 100644 --- a/arch/loongarch/include/asm/irqflags.h +++ b/arch/loongarch/include/asm/irqflags.h @@ -14,40 +14,48 @@ static inline void arch_local_irq_enable(void) { u32 flags = CSR_CRMD_IE; + register u32 mask asm("t0") = CSR_CRMD_IE; + __asm__ __volatile__( "csrxchg %[val], %[mask], %[reg]\n\t" : [val] "+r" (flags) - : [mask] "r" (CSR_CRMD_IE), [reg] "i" (LOONGARCH_CSR_CRMD) + : [mask] "r" (mask), [reg] "i" (LOONGARCH_CSR_CRMD) : "memory"); } static inline void arch_local_irq_disable(void) { u32 flags = 0; + register u32 mask asm("t0") = CSR_CRMD_IE; + __asm__ __volatile__( "csrxchg %[val], %[mask], %[reg]\n\t" : [val] "+r" (flags) - : [mask] "r" (CSR_CRMD_IE), [reg] "i" (LOONGARCH_CSR_CRMD) + : [mask] "r" (mask), [reg] "i" (LOONGARCH_CSR_CRMD) : "memory"); } static inline unsigned long arch_local_irq_save(void) { u32 flags = 0; + register u32 mask asm("t0") = CSR_CRMD_IE; + __asm__ __volatile__( "csrxchg %[val], %[mask], %[reg]\n\t" : [val] "+r" (flags) - : [mask] "r" (CSR_CRMD_IE), [reg] "i" (LOONGARCH_CSR_CRMD) + : [mask] "r" (mask), [reg] "i" (LOONGARCH_CSR_CRMD) : "memory"); return flags; } static inline void arch_local_irq_restore(unsigned long flags) { + register u32 mask asm("t0") = CSR_CRMD_IE; + __asm__ __volatile__( "csrxchg %[val], %[mask], %[reg]\n\t" : [val] "+r" (flags) - : [mask] "r" (CSR_CRMD_IE), [reg] "i" (LOONGARCH_CSR_CRMD) + : [mask] "r" (mask), [reg] "i" (LOONGARCH_CSR_CRMD) : "memory"); } diff --git a/arch/loongarch/include/asm/vdso/getrandom.h b/arch/loongarch/include/asm/vdso/getrandom.h index 02f36772541b7..7e9edc1cb610d 100644 --- a/arch/loongarch/include/asm/vdso/getrandom.h +++ b/arch/loongarch/include/asm/vdso/getrandom.h @@ -20,7 +20,7 @@ static __always_inline ssize_t getrandom_syscall(void *_buffer, size_t _len, uns asm volatile( " syscall 0\n" - : "+r" (ret) + : "=r" (ret) : "r" (nr), "r" (buffer), "r" (len), "r" (flags) : "$t0", "$t1", "$t2", "$t3", "$t4", "$t5", "$t6", "$t7", "$t8", "memory"); diff --git a/arch/loongarch/include/asm/vdso/gettimeofday.h b/arch/loongarch/include/asm/vdso/gettimeofday.h index 89e6b222c2f2d..2d1a9c27af292 100644 --- a/arch/loongarch/include/asm/vdso/gettimeofday.h +++ b/arch/loongarch/include/asm/vdso/gettimeofday.h @@ -25,7 +25,7 @@ static __always_inline long gettimeofday_fallback( asm volatile( " syscall 0\n" - : "+r" (ret) + : "=r" (ret) : "r" (nr), "r" (tv), "r" (tz) : "$t0", "$t1", "$t2", "$t3", "$t4", "$t5", "$t6", "$t7", "$t8", "memory"); @@ -44,7 +44,7 @@ static __always_inline long clock_gettime_fallback( asm volatile( " syscall 0\n" - : "+r" (ret) + : "=r" (ret) : "r" (nr), "r" (clkid), "r" (ts) : "$t0", "$t1", "$t2", "$t3", "$t4", "$t5", "$t6", "$t7", "$t8", "memory"); @@ -63,7 +63,7 @@ static __always_inline int clock_getres_fallback( asm volatile( " syscall 0\n" - : "+r" (ret) + : "=r" (ret) : "r" (nr), "r" (clkid), "r" (ts) : "$t0", "$t1", "$t2", "$t3", "$t4", "$t5", "$t6", "$t7", "$t8", "memory"); diff --git a/arch/loongarch/mm/hugetlbpage.c b/arch/loongarch/mm/hugetlbpage.c index cea84d7f2b91a..02dad4624fe32 100644 --- a/arch/loongarch/mm/hugetlbpage.c +++ b/arch/loongarch/mm/hugetlbpage.c @@ -47,7 +47,8 @@ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr, pmd = pmd_offset(pud, addr); } } - return pmd_none(pmdp_get(pmd)) ? NULL : (pte_t *) pmd; + + return (!pmd || pmd_none(pmdp_get(pmd))) ? NULL : (pte_t *) pmd; } uint64_t pmd_to_entrylo(unsigned long pmd_val) diff --git a/arch/m68k/mac/config.c b/arch/m68k/mac/config.c index e324410ef239c..d26c7f4f8c360 100644 --- a/arch/m68k/mac/config.c +++ b/arch/m68k/mac/config.c @@ -793,7 +793,7 @@ static void __init mac_identify(void) } macintosh_config = mac_data_table; - for (m = macintosh_config; m->ident != -1; m++) { + for (m = &mac_data_table[1]; m->ident != -1; m++) { if (m->ident == model) { macintosh_config = m; break; diff --git a/arch/mips/boot/dts/loongson/loongson64c_4core_ls7a.dts b/arch/mips/boot/dts/loongson/loongson64c_4core_ls7a.dts index c7ea4f1c0bb21..6c277ab83d4b9 100644 --- a/arch/mips/boot/dts/loongson/loongson64c_4core_ls7a.dts +++ b/arch/mips/boot/dts/loongson/loongson64c_4core_ls7a.dts @@ -29,6 +29,7 @@ compatible = "loongson,pch-msi-1.0"; reg = <0 0x2ff00000 0 0x8>; interrupt-controller; + #interrupt-cells = <1>; msi-controller; loongson,msi-base-vec = <64>; loongson,msi-num-vecs = <64>; diff --git a/arch/mips/vdso/Makefile b/arch/mips/vdso/Makefile index b289b2c1b2946..c729bd6878042 100644 --- a/arch/mips/vdso/Makefile +++ b/arch/mips/vdso/Makefile @@ -27,6 +27,7 @@ endif # offsets. cflags-vdso := $(ccflags-vdso) \ $(filter -W%,$(filter-out -Wa$(comma)%,$(KBUILD_CFLAGS))) \ + $(filter -std=%,$(KBUILD_CFLAGS)) \ -O3 -g -fPIC -fno-strict-aliasing -fno-common -fno-builtin -G 0 \ -mrelax-pic-calls $(call cc-option, -mexplicit-relocs) \ -fno-stack-protector -fno-jump-tables -DDISABLE_BRANCH_PROFILING \ diff --git a/arch/parisc/boot/compressed/Makefile b/arch/parisc/boot/compressed/Makefile index 92227fa813dc3..17c42d718eb33 100644 --- a/arch/parisc/boot/compressed/Makefile +++ b/arch/parisc/boot/compressed/Makefile @@ -18,6 +18,7 @@ KBUILD_CFLAGS += -fno-PIE -mno-space-regs -mdisable-fpregs -Os ifndef CONFIG_64BIT KBUILD_CFLAGS += -mfast-indirect-calls endif +KBUILD_CFLAGS += -std=gnu11 LDFLAGS_vmlinux := -X -e startup --as-needed -T $(obj)/vmlinux: $(obj)/vmlinux.lds $(addprefix $(obj)/, $(OBJECTS)) $(LIBGCC) FORCE diff --git a/arch/parisc/kernel/unaligned.c b/arch/parisc/kernel/unaligned.c index f4626943633ad..00e97204783ed 100644 --- a/arch/parisc/kernel/unaligned.c +++ b/arch/parisc/kernel/unaligned.c @@ -25,7 +25,7 @@ #define DPRINTF(fmt, args...) #endif -#define RFMT "%#08lx" +#define RFMT "0x%08lx" /* 1111 1100 0000 0000 0001 0011 1100 0000 */ #define OPCODE1(a,b,c) ((a)<<26|(b)<<12|(c)<<6) diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h index 02897f4b0dbf8..b891910fce8a6 100644 --- a/arch/powerpc/include/asm/ppc_asm.h +++ b/arch/powerpc/include/asm/ppc_asm.h @@ -183,7 +183,7 @@ /* * Used to name C functions called from asm */ -#ifdef CONFIG_PPC_KERNEL_PCREL +#if defined(__powerpc64__) && defined(CONFIG_PPC_KERNEL_PCREL) #define CFUNC(name) name@notoc #else #define CFUNC(name) name diff --git a/arch/powerpc/include/uapi/asm/ioctls.h b/arch/powerpc/include/uapi/asm/ioctls.h index 2c145da3b774a..b5211e413829a 100644 --- a/arch/powerpc/include/uapi/asm/ioctls.h +++ b/arch/powerpc/include/uapi/asm/ioctls.h @@ -23,10 +23,10 @@ #define TCSETSW _IOW('t', 21, struct termios) #define TCSETSF _IOW('t', 22, struct termios) -#define TCGETA _IOR('t', 23, struct termio) -#define TCSETA _IOW('t', 24, struct termio) -#define TCSETAW _IOW('t', 25, struct termio) -#define TCSETAF _IOW('t', 28, struct termio) +#define TCGETA 0x40147417 /* _IOR('t', 23, struct termio) */ +#define TCSETA 0x80147418 /* _IOW('t', 24, struct termio) */ +#define TCSETAW 0x80147419 /* _IOW('t', 25, struct termio) */ +#define TCSETAF 0x8014741c /* _IOW('t', 28, struct termio) */ #define TCSBRK _IO('t', 29) #define TCXONC _IO('t', 30) diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile index f43c1198768c6..04d6a1e8ff9a2 100644 --- a/arch/powerpc/kernel/Makefile +++ b/arch/powerpc/kernel/Makefile @@ -162,9 +162,7 @@ endif obj64-$(CONFIG_PPC_TRANSACTIONAL_MEM) += tm.o -ifneq ($(CONFIG_XMON)$(CONFIG_KEXEC_CORE)(CONFIG_PPC_BOOK3S),) obj-y += ppc_save_regs.o -endif obj-$(CONFIG_EPAPR_PARAVIRT) += epapr_paravirt.o epapr_hcalls.o obj-$(CONFIG_KVM_GUEST) += kvm.o kvm_emul.o diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c index 83fe99861eb17..ca7f7bb2b4786 100644 --- a/arch/powerpc/kernel/eeh.c +++ b/arch/powerpc/kernel/eeh.c @@ -1509,6 +1509,8 @@ int eeh_pe_configure(struct eeh_pe *pe) /* Invalid PE ? */ if (!pe) return -ENODEV; + else + ret = eeh_ops->configure_bridge(pe); return ret; } diff --git a/arch/powerpc/kernel/vdso/Makefile b/arch/powerpc/kernel/vdso/Makefile index c568cad6a22e6..6ba68b28ed870 100644 --- a/arch/powerpc/kernel/vdso/Makefile +++ b/arch/powerpc/kernel/vdso/Makefile @@ -53,7 +53,7 @@ ldflags-$(CONFIG_LD_ORPHAN_WARN) += -Wl,--orphan-handling=$(CONFIG_LD_ORPHAN_WAR ldflags-y += $(filter-out $(CC_AUTO_VAR_INIT_ZERO_ENABLER) $(CC_FLAGS_FTRACE) -Wa$(comma)%, $(KBUILD_CFLAGS)) CC32FLAGS := -m32 -CC32FLAGSREMOVE := -mcmodel=medium -mabi=elfv1 -mabi=elfv2 -mcall-aixdesc +CC32FLAGSREMOVE := -mcmodel=medium -mabi=elfv1 -mabi=elfv2 -mcall-aixdesc -mpcrel ifdef CONFIG_CC_IS_CLANG # This flag is supported by clang for 64-bit but not 32-bit so it will cause # an unused command line flag warning for this file. diff --git a/arch/powerpc/kexec/crash.c b/arch/powerpc/kexec/crash.c index 9ac3266e49652..a325c1c02f96d 100644 --- a/arch/powerpc/kexec/crash.c +++ b/arch/powerpc/kexec/crash.c @@ -359,7 +359,10 @@ void default_machine_crash_shutdown(struct pt_regs *regs) if (TRAP(regs) == INTERRUPT_SYSTEM_RESET) is_via_system_reset = 1; - crash_smp_send_stop(); + if (IS_ENABLED(CONFIG_SMP)) + crash_smp_send_stop(); + else + crash_kexec_prepare(); crash_save_cpu(regs, crashing_cpu); diff --git a/arch/powerpc/platforms/book3s/vas-api.c b/arch/powerpc/platforms/book3s/vas-api.c index 0b6365d85d117..dc6f75d3ac6ef 100644 --- a/arch/powerpc/platforms/book3s/vas-api.c +++ b/arch/powerpc/platforms/book3s/vas-api.c @@ -521,6 +521,15 @@ static int coproc_mmap(struct file *fp, struct vm_area_struct *vma) return -EINVAL; } + /* + * Map complete page to the paste address. So the user + * space should pass 0ULL to the offset parameter. + */ + if (vma->vm_pgoff) { + pr_debug("Page offset unsupported to map paste address\n"); + return -EINVAL; + } + /* Ensure instance has an open send window */ if (!txwin) { pr_err("No send window open?\n"); diff --git a/arch/powerpc/platforms/powernv/memtrace.c b/arch/powerpc/platforms/powernv/memtrace.c index 877720c645151..35471b679638a 100644 --- a/arch/powerpc/platforms/powernv/memtrace.c +++ b/arch/powerpc/platforms/powernv/memtrace.c @@ -48,11 +48,15 @@ static ssize_t memtrace_read(struct file *filp, char __user *ubuf, static int memtrace_mmap(struct file *filp, struct vm_area_struct *vma) { struct memtrace_entry *ent = filp->private_data; + unsigned long ent_nrpages = ent->size >> PAGE_SHIFT; + unsigned long vma_nrpages = vma_pages(vma); - if (ent->size < vma->vm_end - vma->vm_start) + /* The requested page offset should be within object's page count */ + if (vma->vm_pgoff >= ent_nrpages) return -EINVAL; - if (vma->vm_pgoff << PAGE_SHIFT >= ent->size) + /* The requested mapping range should remain within the bounds */ + if (vma_nrpages > ent_nrpages - vma->vm_pgoff) return -EINVAL; vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c index 34657e0288e94..0b952ad638f9c 100644 --- a/arch/powerpc/platforms/pseries/iommu.c +++ b/arch/powerpc/platforms/pseries/iommu.c @@ -198,7 +198,7 @@ static void tce_iommu_userspace_view_free(struct iommu_table *tbl) static void tce_free_pSeries(struct iommu_table *tbl) { - if (!tbl->it_userspace) + if (tbl->it_userspace) tce_iommu_userspace_view_free(tbl); } diff --git a/arch/powerpc/platforms/pseries/msi.c b/arch/powerpc/platforms/pseries/msi.c index 6dfb55b52d363..ba98a680a12e6 100644 --- a/arch/powerpc/platforms/pseries/msi.c +++ b/arch/powerpc/platforms/pseries/msi.c @@ -524,7 +524,12 @@ static struct msi_domain_info pseries_msi_domain_info = { static void pseries_msi_compose_msg(struct irq_data *data, struct msi_msg *msg) { - __pci_read_msi_msg(irq_data_get_msi_desc(data), msg); + struct pci_dev *dev = msi_desc_to_pci_dev(irq_data_get_msi_desc(data)); + + if (dev->current_state == PCI_D0) + __pci_read_msi_msg(irq_data_get_msi_desc(data), msg); + else + get_cached_msi_msg(data->irq, msg); } static struct irq_chip pseries_msi_irq_chip = { diff --git a/arch/riscv/include/asm/cmpxchg.h b/arch/riscv/include/asm/cmpxchg.h index ebbce134917cc..6efa95ad033ab 100644 --- a/arch/riscv/include/asm/cmpxchg.h +++ b/arch/riscv/include/asm/cmpxchg.h @@ -169,7 +169,7 @@ break; \ case 4: \ __arch_cmpxchg(".w", ".w" sc_sfx, prepend, append, \ - __ret, __ptr, (long), __old, __new); \ + __ret, __ptr, (long)(int)(long), __old, __new); \ break; \ case 8: \ __arch_cmpxchg(".d", ".d" sc_sfx, prepend, append, \ diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h index 479550cdb440f..03881122506a7 100644 --- a/arch/riscv/include/asm/pgtable.h +++ b/arch/riscv/include/asm/pgtable.h @@ -916,7 +916,6 @@ static inline pte_t pte_swp_clear_exclusive(pte_t pte) */ #ifdef CONFIG_64BIT #define TASK_SIZE_64 (PGDIR_SIZE * PTRS_PER_PGD / 2) -#define TASK_SIZE_MAX LONG_MAX #ifdef CONFIG_COMPAT #define TASK_SIZE_32 (_AC(0x80000000, UL) - PAGE_SIZE) diff --git a/arch/riscv/kernel/cpu_ops_sbi.c b/arch/riscv/kernel/cpu_ops_sbi.c index e6fbaaf549562..87d6559448039 100644 --- a/arch/riscv/kernel/cpu_ops_sbi.c +++ b/arch/riscv/kernel/cpu_ops_sbi.c @@ -18,10 +18,10 @@ const struct cpu_operations cpu_ops_sbi; /* * Ordered booting via HSM brings one cpu at a time. However, cpu hotplug can - * be invoked from multiple threads in parallel. Define a per cpu data + * be invoked from multiple threads in parallel. Define an array of boot data * to handle that. */ -static DEFINE_PER_CPU(struct sbi_hart_boot_data, boot_data); +static struct sbi_hart_boot_data boot_data[NR_CPUS]; static int sbi_hsm_hart_start(unsigned long hartid, unsigned long saddr, unsigned long priv) @@ -67,7 +67,7 @@ static int sbi_cpu_start(unsigned int cpuid, struct task_struct *tidle) unsigned long boot_addr = __pa_symbol(secondary_start_sbi); unsigned long hartid = cpuid_to_hartid_map(cpuid); unsigned long hsm_data; - struct sbi_hart_boot_data *bdata = &per_cpu(boot_data, cpuid); + struct sbi_hart_boot_data *bdata = &boot_data[cpuid]; /* Make sure tidle is updated */ smp_mb(); diff --git a/arch/riscv/kernel/vdso/vdso.lds.S b/arch/riscv/kernel/vdso/vdso.lds.S index cbe2a179331d2..99e51f7755393 100644 --- a/arch/riscv/kernel/vdso/vdso.lds.S +++ b/arch/riscv/kernel/vdso/vdso.lds.S @@ -31,7 +31,7 @@ SECTIONS *(.data .data.* .gnu.linkonce.d.*) *(.dynbss) *(.bss .bss.* .gnu.linkonce.b.*) - } + } :text .note : { *(.note.*) } :text :note diff --git a/arch/riscv/kvm/vcpu_sbi.c b/arch/riscv/kvm/vcpu_sbi.c index 6e704ed86a83a..635c67ed36653 100644 --- a/arch/riscv/kvm/vcpu_sbi.c +++ b/arch/riscv/kvm/vcpu_sbi.c @@ -139,9 +139,9 @@ void kvm_riscv_vcpu_sbi_system_reset(struct kvm_vcpu *vcpu, struct kvm_vcpu *tmp; kvm_for_each_vcpu(i, tmp, vcpu->kvm) { - spin_lock(&vcpu->arch.mp_state_lock); + spin_lock(&tmp->arch.mp_state_lock); WRITE_ONCE(tmp->arch.mp_state.mp_state, KVM_MP_STATE_STOPPED); - spin_unlock(&vcpu->arch.mp_state_lock); + spin_unlock(&tmp->arch.mp_state_lock); } kvm_make_all_cpus_request(vcpu->kvm, KVM_REQ_SLEEP); diff --git a/arch/riscv/kvm/vcpu_sbi_replace.c b/arch/riscv/kvm/vcpu_sbi_replace.c index 5fbf3f94f1e85..b17fad091babd 100644 --- a/arch/riscv/kvm/vcpu_sbi_replace.c +++ b/arch/riscv/kvm/vcpu_sbi_replace.c @@ -103,7 +103,7 @@ static int kvm_sbi_ext_rfence_handler(struct kvm_vcpu *vcpu, struct kvm_run *run kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_FENCE_I_SENT); break; case SBI_EXT_RFENCE_REMOTE_SFENCE_VMA: - if (cp->a2 == 0 && cp->a3 == 0) + if ((cp->a2 == 0 && cp->a3 == 0) || cp->a3 == -1UL) kvm_riscv_hfence_vvma_all(vcpu->kvm, hbase, hmask); else kvm_riscv_hfence_vvma_gva(vcpu->kvm, hbase, hmask, @@ -111,7 +111,7 @@ static int kvm_sbi_ext_rfence_handler(struct kvm_vcpu *vcpu, struct kvm_run *run kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_HFENCE_VVMA_SENT); break; case SBI_EXT_RFENCE_REMOTE_SFENCE_VMA_ASID: - if (cp->a2 == 0 && cp->a3 == 0) + if ((cp->a2 == 0 && cp->a3 == 0) || cp->a3 == -1UL) kvm_riscv_hfence_vvma_asid_all(vcpu->kvm, hbase, hmask, cp->a4); else @@ -127,9 +127,9 @@ static int kvm_sbi_ext_rfence_handler(struct kvm_vcpu *vcpu, struct kvm_run *run case SBI_EXT_RFENCE_REMOTE_HFENCE_VVMA_ASID: /* * Until nested virtualization is implemented, the - * SBI HFENCE calls should be treated as NOPs + * SBI HFENCE calls should return not supported + * hence fallthrough. */ - break; default: retdata->err_val = SBI_ERR_NOT_SUPPORTED; } diff --git a/arch/riscv/mm/cacheflush.c b/arch/riscv/mm/cacheflush.c index b816727298872..b2e4b81763f88 100644 --- a/arch/riscv/mm/cacheflush.c +++ b/arch/riscv/mm/cacheflush.c @@ -24,7 +24,20 @@ void flush_icache_all(void) if (num_online_cpus() < 2) return; - else if (riscv_use_sbi_for_rfence()) + + /* + * Make sure all previous writes to the D$ are ordered before making + * the IPI. The RISC-V spec states that a hart must execute a data fence + * before triggering a remote fence.i in order to make the modification + * visable for remote harts. + * + * IPIs on RISC-V are triggered by MMIO writes to either CLINT or + * S-IMSIC, so the fence ensures previous data writes "happen before" + * the MMIO. + */ + RISCV_FENCE(w, o); + + if (riscv_use_sbi_for_rfence()) sbi_remote_fence_i(NULL); else on_each_cpu(ipi_remote_fence_i, NULL, 1); diff --git a/arch/s390/crypto/sha1_s390.c b/arch/s390/crypto/sha1_s390.c index bc3a22704e093..10950953429e6 100644 --- a/arch/s390/crypto/sha1_s390.c +++ b/arch/s390/crypto/sha1_s390.c @@ -38,6 +38,7 @@ static int s390_sha1_init(struct shash_desc *desc) sctx->state[4] = SHA1_H4; sctx->count = 0; sctx->func = CPACF_KIMD_SHA_1; + sctx->first_message_part = 0; return 0; } @@ -62,6 +63,7 @@ static int s390_sha1_import(struct shash_desc *desc, const void *in) memcpy(sctx->state, ictx->state, sizeof(ictx->state)); memcpy(sctx->buf, ictx->buffer, sizeof(ictx->buffer)); sctx->func = CPACF_KIMD_SHA_1; + sctx->first_message_part = 0; return 0; } diff --git a/arch/s390/crypto/sha256_s390.c b/arch/s390/crypto/sha256_s390.c index 6f1ccdf93d3e5..0204d4bca3403 100644 --- a/arch/s390/crypto/sha256_s390.c +++ b/arch/s390/crypto/sha256_s390.c @@ -31,6 +31,7 @@ static int s390_sha256_init(struct shash_desc *desc) sctx->state[7] = SHA256_H7; sctx->count = 0; sctx->func = CPACF_KIMD_SHA_256; + sctx->first_message_part = 0; return 0; } @@ -55,6 +56,7 @@ static int sha256_import(struct shash_desc *desc, const void *in) memcpy(sctx->state, ictx->state, sizeof(ictx->state)); memcpy(sctx->buf, ictx->buf, sizeof(ictx->buf)); sctx->func = CPACF_KIMD_SHA_256; + sctx->first_message_part = 0; return 0; } @@ -90,6 +92,7 @@ static int s390_sha224_init(struct shash_desc *desc) sctx->state[7] = SHA224_H7; sctx->count = 0; sctx->func = CPACF_KIMD_SHA_256; + sctx->first_message_part = 0; return 0; } diff --git a/arch/s390/crypto/sha512_s390.c b/arch/s390/crypto/sha512_s390.c index 04f11c4077634..b53a7793bd244 100644 --- a/arch/s390/crypto/sha512_s390.c +++ b/arch/s390/crypto/sha512_s390.c @@ -32,6 +32,7 @@ static int sha512_init(struct shash_desc *desc) *(__u64 *)&ctx->state[14] = SHA512_H7; ctx->count = 0; ctx->func = CPACF_KIMD_SHA_512; + ctx->first_message_part = 0; return 0; } @@ -60,6 +61,7 @@ static int sha512_import(struct shash_desc *desc, const void *in) memcpy(sctx->state, ictx->state, sizeof(ictx->state)); memcpy(sctx->buf, ictx->buf, sizeof(ictx->buf)); sctx->func = CPACF_KIMD_SHA_512; + sctx->first_message_part = 0; return 0; } @@ -97,6 +99,7 @@ static int sha384_init(struct shash_desc *desc) *(__u64 *)&ctx->state[14] = SHA384_H7; ctx->count = 0; ctx->func = CPACF_KIMD_SHA_512; + ctx->first_message_part = 0; return 0; } diff --git a/arch/s390/kvm/gaccess.c b/arch/s390/kvm/gaccess.c index a688351f4ab52..7bc97ebd60d5d 100644 --- a/arch/s390/kvm/gaccess.c +++ b/arch/s390/kvm/gaccess.c @@ -317,7 +317,7 @@ enum prot_type { PROT_TYPE_DAT = 3, PROT_TYPE_IEP = 4, /* Dummy value for passing an initialized value when code != PGM_PROTECTION */ - PROT_NONE, + PROT_TYPE_DUMMY, }; static int trans_exc_ending(struct kvm_vcpu *vcpu, int code, unsigned long gva, u8 ar, @@ -333,7 +333,7 @@ static int trans_exc_ending(struct kvm_vcpu *vcpu, int code, unsigned long gva, switch (code) { case PGM_PROTECTION: switch (prot) { - case PROT_NONE: + case PROT_TYPE_DUMMY: /* We should never get here, acts like termination */ WARN_ON_ONCE(1); break; @@ -803,7 +803,7 @@ static int guest_range_to_gpas(struct kvm_vcpu *vcpu, unsigned long ga, u8 ar, gpa = kvm_s390_real_to_abs(vcpu, ga); if (!kvm_is_gpa_in_memslot(vcpu->kvm, gpa)) { rc = PGM_ADDRESSING; - prot = PROT_NONE; + prot = PROT_TYPE_DUMMY; } } if (rc) @@ -961,7 +961,7 @@ int access_guest_with_key(struct kvm_vcpu *vcpu, unsigned long ga, u8 ar, if (rc == PGM_PROTECTION) prot = PROT_TYPE_KEYC; else - prot = PROT_NONE; + prot = PROT_TYPE_DUMMY; rc = trans_exc_ending(vcpu, rc, ga, ar, mode, prot, terminate); } out_unlock: diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c index 9d440a0b729eb..64bb8b71013ae 100644 --- a/arch/s390/net/bpf_jit_comp.c +++ b/arch/s390/net/bpf_jit_comp.c @@ -605,17 +605,15 @@ static void bpf_jit_prologue(struct bpf_jit *jit, struct bpf_prog *fp, } /* Setup stack and backchain */ if (is_first_pass(jit) || (jit->seen & SEEN_STACK)) { - if (is_first_pass(jit) || (jit->seen & SEEN_FUNC)) - /* lgr %w1,%r15 (backchain) */ - EMIT4(0xb9040000, REG_W1, REG_15); + /* lgr %w1,%r15 (backchain) */ + EMIT4(0xb9040000, REG_W1, REG_15); /* la %bfp,STK_160_UNUSED(%r15) (BPF frame pointer) */ EMIT4_DISP(0x41000000, BPF_REG_FP, REG_15, STK_160_UNUSED); /* aghi %r15,-STK_OFF */ EMIT4_IMM(0xa70b0000, REG_15, -(STK_OFF + stack_depth)); - if (is_first_pass(jit) || (jit->seen & SEEN_FUNC)) - /* stg %w1,152(%r15) (backchain) */ - EMIT6_DISP_LH(0xe3000000, 0x0024, REG_W1, REG_0, - REG_15, 152); + /* stg %w1,152(%r15) (backchain) */ + EMIT6_DISP_LH(0xe3000000, 0x0024, REG_W1, REG_0, + REG_15, 152); } } diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c index 88f72745fa59e..9e19d6076d3e8 100644 --- a/arch/s390/pci/pci.c +++ b/arch/s390/pci/pci.c @@ -44,6 +44,7 @@ /* list of all detected zpci devices */ static LIST_HEAD(zpci_list); static DEFINE_SPINLOCK(zpci_list_lock); +static DEFINE_MUTEX(zpci_add_remove_lock); static DECLARE_BITMAP(zpci_domain, ZPCI_DOMAIN_BITMAP_SIZE); static DEFINE_SPINLOCK(zpci_domain_lock); @@ -69,6 +70,15 @@ EXPORT_SYMBOL_GPL(zpci_aipb); struct airq_iv *zpci_aif_sbv; EXPORT_SYMBOL_GPL(zpci_aif_sbv); +void zpci_zdev_put(struct zpci_dev *zdev) +{ + if (!zdev) + return; + mutex_lock(&zpci_add_remove_lock); + kref_put_lock(&zdev->kref, zpci_release_device, &zpci_list_lock); + mutex_unlock(&zpci_add_remove_lock); +} + struct zpci_dev *get_zdev_by_fid(u32 fid) { struct zpci_dev *tmp, *zdev = NULL; @@ -831,6 +841,7 @@ int zpci_add_device(struct zpci_dev *zdev) { int rc; + mutex_lock(&zpci_add_remove_lock); zpci_dbg(1, "add fid:%x, fh:%x, c:%d\n", zdev->fid, zdev->fh, zdev->state); rc = zpci_init_iommu(zdev); if (rc) @@ -844,12 +855,14 @@ int zpci_add_device(struct zpci_dev *zdev) spin_lock(&zpci_list_lock); list_add_tail(&zdev->entry, &zpci_list); spin_unlock(&zpci_list_lock); + mutex_unlock(&zpci_add_remove_lock); return 0; error_destroy_iommu: zpci_destroy_iommu(zdev); error: zpci_dbg(0, "add fid:%x, rc:%d\n", zdev->fid, rc); + mutex_unlock(&zpci_add_remove_lock); return rc; } @@ -919,21 +932,20 @@ int zpci_deconfigure_device(struct zpci_dev *zdev) * @zdev: the zpci_dev that was reserved * * Handle the case that a given zPCI function was reserved by another system. - * After a call to this function the zpci_dev can not be found via - * get_zdev_by_fid() anymore but may still be accessible via existing - * references though it will not be functional anymore. */ void zpci_device_reserved(struct zpci_dev *zdev) { - /* - * Remove device from zpci_list as it is going away. This also - * makes sure we ignore subsequent zPCI events for this device. - */ - spin_lock(&zpci_list_lock); - list_del(&zdev->entry); - spin_unlock(&zpci_list_lock); + lockdep_assert_held(&zdev->state_lock); + /* We may declare the device reserved multiple times */ + if (zdev->state == ZPCI_FN_STATE_RESERVED) + return; zdev->state = ZPCI_FN_STATE_RESERVED; zpci_dbg(3, "rsv fid:%x\n", zdev->fid); + /* + * The underlying device is gone. Allow the zdev to be freed + * as soon as all other references are gone by accounting for + * the removal as a dropped reference. + */ zpci_zdev_put(zdev); } @@ -941,13 +953,14 @@ void zpci_release_device(struct kref *kref) { struct zpci_dev *zdev = container_of(kref, struct zpci_dev, kref); + lockdep_assert_held(&zpci_add_remove_lock); WARN_ON(zdev->state != ZPCI_FN_STATE_RESERVED); - - if (zdev->zbus->bus) - zpci_bus_remove_device(zdev, false); - - if (zdev_enabled(zdev)) - zpci_disable_device(zdev); + /* + * We already hold zpci_list_lock thanks to kref_put_lock(). + * This makes sure no new reference can be taken from the list. + */ + list_del(&zdev->entry); + spin_unlock(&zpci_list_lock); if (zdev->has_hp_slot) zpci_exit_slot(zdev); diff --git a/arch/s390/pci/pci_bus.h b/arch/s390/pci/pci_bus.h index af9f0ac79a1b1..3febb3b297c0c 100644 --- a/arch/s390/pci/pci_bus.h +++ b/arch/s390/pci/pci_bus.h @@ -17,11 +17,8 @@ int zpci_bus_scan_device(struct zpci_dev *zdev); void zpci_bus_remove_device(struct zpci_dev *zdev, bool set_error); void zpci_release_device(struct kref *kref); -static inline void zpci_zdev_put(struct zpci_dev *zdev) -{ - if (zdev) - kref_put(&zdev->kref, zpci_release_device); -} + +void zpci_zdev_put(struct zpci_dev *zdev); static inline void zpci_zdev_get(struct zpci_dev *zdev) { diff --git a/arch/s390/pci/pci_event.c b/arch/s390/pci/pci_event.c index 7f7b732b3f3ef..ef44feb1a9daa 100644 --- a/arch/s390/pci/pci_event.c +++ b/arch/s390/pci/pci_event.c @@ -105,6 +105,10 @@ static pci_ers_result_t zpci_event_do_error_state_clear(struct pci_dev *pdev, struct zpci_dev *zdev = to_zpci(pdev); int rc; + /* The underlying device may have been disabled by the event */ + if (!zdev_enabled(zdev)) + return PCI_ERS_RESULT_NEED_RESET; + pr_info("%s: Unblocking device access for examination\n", pci_name(pdev)); rc = zpci_reset_load_store_blocked(zdev); if (rc) { @@ -260,6 +264,8 @@ static void __zpci_event_error(struct zpci_ccdf_err *ccdf) struct zpci_dev *zdev = get_zdev_by_fid(ccdf->fid); struct pci_dev *pdev = NULL; pci_ers_result_t ers_res; + u32 fh = 0; + int rc; zpci_dbg(3, "err fid:%x, fh:%x, pec:%x\n", ccdf->fid, ccdf->fh, ccdf->pec); @@ -268,6 +274,15 @@ static void __zpci_event_error(struct zpci_ccdf_err *ccdf) if (zdev) { mutex_lock(&zdev->state_lock); + rc = clp_refresh_fh(zdev->fid, &fh); + if (rc) + goto no_pdev; + if (!fh || ccdf->fh != fh) { + /* Ignore events with stale handles */ + zpci_dbg(3, "err fid:%x, fh:%x (stale %x)\n", + ccdf->fid, fh, ccdf->fh); + goto no_pdev; + } zpci_update_fh(zdev, ccdf->fh); if (zdev->zbus->bus) pdev = pci_get_slot(zdev->zbus->bus, zdev->devfn); @@ -322,6 +337,22 @@ static void zpci_event_hard_deconfigured(struct zpci_dev *zdev, u32 fh) zdev->state = ZPCI_FN_STATE_STANDBY; } +static void zpci_event_reappear(struct zpci_dev *zdev) +{ + lockdep_assert_held(&zdev->state_lock); + /* + * The zdev is in the reserved state. This means that it was presumed to + * go away but there are still undropped references. Now, the platform + * announced its availability again. Bring back the lingering zdev + * to standby. This is safe because we hold a temporary reference + * now so that it won't go away. Account for the re-appearance of the + * underlying device by incrementing the reference count. + */ + zdev->state = ZPCI_FN_STATE_STANDBY; + zpci_zdev_get(zdev); + zpci_dbg(1, "rea fid:%x, fh:%x\n", zdev->fid, zdev->fh); +} + static void __zpci_event_availability(struct zpci_ccdf_avail *ccdf) { struct zpci_dev *zdev = get_zdev_by_fid(ccdf->fid); @@ -345,8 +376,10 @@ static void __zpci_event_availability(struct zpci_ccdf_avail *ccdf) break; } } else { + if (zdev->state == ZPCI_FN_STATE_RESERVED) + zpci_event_reappear(zdev); /* the configuration request may be stale */ - if (zdev->state != ZPCI_FN_STATE_STANDBY) + else if (zdev->state != ZPCI_FN_STATE_STANDBY) break; zdev->state = ZPCI_FN_STATE_CONFIGURED; } @@ -362,6 +395,8 @@ static void __zpci_event_availability(struct zpci_ccdf_avail *ccdf) break; } } else { + if (zdev->state == ZPCI_FN_STATE_RESERVED) + zpci_event_reappear(zdev); zpci_update_fh(zdev, ccdf->fh); } break; diff --git a/arch/s390/pci/pci_mmio.c b/arch/s390/pci/pci_mmio.c index 4779c3cb6cfab..0fa34c5012967 100644 --- a/arch/s390/pci/pci_mmio.c +++ b/arch/s390/pci/pci_mmio.c @@ -228,7 +228,7 @@ static inline int __pcilg_mio_inuser( [ioaddr_len] "+&d" (ioaddr_len.pair), [cc] "+d" (cc), [val] "=d" (val), [dst] "+a" (dst), [cnt] "+d" (cnt), [tmp] "=d" (tmp), - [shift] "+d" (shift) + [shift] "+a" (shift) :: "cc", "memory"); /* did we write everything to the user space buffer? */ diff --git a/arch/um/Makefile b/arch/um/Makefile index 00b63bac5effb..3317d87e20920 100644 --- a/arch/um/Makefile +++ b/arch/um/Makefile @@ -151,5 +151,6 @@ MRPROPER_FILES += $(HOST_DIR)/include/generated archclean: @find . \( -name '*.bb' -o -name '*.bbg' -o -name '*.da' \ -o -name '*.gcov' \) -type f -print | xargs rm -f + $(Q)$(MAKE) -f $(srctree)/Makefile ARCH=$(HEADER_ARCH) clean export HEADER_ARCH SUBARCH USER_CFLAGS CFLAGS_NO_HARDENING DEV_NULL_PATH diff --git a/arch/um/drivers/ubd_user.c b/arch/um/drivers/ubd_user.c index b4f8b8e605644..592b899820d64 100644 --- a/arch/um/drivers/ubd_user.c +++ b/arch/um/drivers/ubd_user.c @@ -41,7 +41,7 @@ int start_io_thread(unsigned long sp, int *fd_out) *fd_out = fds[1]; err = os_set_fd_block(*fd_out, 0); - err = os_set_fd_block(kernel_fd, 0); + err |= os_set_fd_block(kernel_fd, 0); if (err) { printk("start_io_thread - failed to set nonblocking I/O.\n"); goto out_close; diff --git a/arch/um/drivers/vector_kern.c b/arch/um/drivers/vector_kern.c index 64c09db392c16..7a88b13d289f1 100644 --- a/arch/um/drivers/vector_kern.c +++ b/arch/um/drivers/vector_kern.c @@ -1592,35 +1592,19 @@ static void vector_eth_configure( device->dev = dev; - *vp = ((struct vector_private) - { - .list = LIST_HEAD_INIT(vp->list), - .dev = dev, - .unit = n, - .options = get_transport_options(def), - .rx_irq = 0, - .tx_irq = 0, - .parsed = def, - .max_packet = get_mtu(def) + ETH_HEADER_OTHER, - /* TODO - we need to calculate headroom so that ip header - * is 16 byte aligned all the time - */ - .headroom = get_headroom(def), - .form_header = NULL, - .verify_header = NULL, - .header_rxbuffer = NULL, - .header_txbuffer = NULL, - .header_size = 0, - .rx_header_size = 0, - .rexmit_scheduled = false, - .opened = false, - .transport_data = NULL, - .in_write_poll = false, - .coalesce = 2, - .req_size = get_req_size(def), - .in_error = false, - .bpf = NULL - }); + INIT_LIST_HEAD(&vp->list); + vp->dev = dev; + vp->unit = n; + vp->options = get_transport_options(def); + vp->parsed = def; + vp->max_packet = get_mtu(def) + ETH_HEADER_OTHER; + /* + * TODO - we need to calculate headroom so that ip header + * is 16 byte aligned all the time + */ + vp->headroom = get_headroom(def); + vp->coalesce = 2; + vp->req_size = get_req_size(def); dev->features = dev->hw_features = (NETIF_F_SG | NETIF_F_FRAGLIST); INIT_WORK(&vp->reset_tx, vector_reset_tx); diff --git a/arch/um/include/asm/asm-prototypes.h b/arch/um/include/asm/asm-prototypes.h index 5898a26daa0dd..408b31d591279 100644 --- a/arch/um/include/asm/asm-prototypes.h +++ b/arch/um/include/asm/asm-prototypes.h @@ -1 +1,6 @@ #include +#include + +#ifdef CONFIG_UML_X86 +extern void cmpxchg8b_emu(void); +#endif diff --git a/arch/um/kernel/trap.c b/arch/um/kernel/trap.c index 97c8df9c44017..9077bdb26cc35 100644 --- a/arch/um/kernel/trap.c +++ b/arch/um/kernel/trap.c @@ -17,6 +17,122 @@ #include #include +/* + * NOTE: UML does not have exception tables. As such, this is almost a copy + * of the code in mm/memory.c, only adjusting the logic to simply check whether + * we are coming from the kernel instead of doing an additional lookup in the + * exception table. + * We can do this simplification because we never get here if the exception was + * fixable. + */ +static inline bool get_mmap_lock_carefully(struct mm_struct *mm, bool is_user) +{ + if (likely(mmap_read_trylock(mm))) + return true; + + if (!is_user) + return false; + + return !mmap_read_lock_killable(mm); +} + +static inline bool mmap_upgrade_trylock(struct mm_struct *mm) +{ + /* + * We don't have this operation yet. + * + * It should be easy enough to do: it's basically a + * atomic_long_try_cmpxchg_acquire() + * from RWSEM_READER_BIAS -> RWSEM_WRITER_LOCKED, but + * it also needs the proper lockdep magic etc. + */ + return false; +} + +static inline bool upgrade_mmap_lock_carefully(struct mm_struct *mm, bool is_user) +{ + mmap_read_unlock(mm); + if (!is_user) + return false; + + return !mmap_write_lock_killable(mm); +} + +/* + * Helper for page fault handling. + * + * This is kind of equivalend to "mmap_read_lock()" followed + * by "find_extend_vma()", except it's a lot more careful about + * the locking (and will drop the lock on failure). + * + * For example, if we have a kernel bug that causes a page + * fault, we don't want to just use mmap_read_lock() to get + * the mm lock, because that would deadlock if the bug were + * to happen while we're holding the mm lock for writing. + * + * So this checks the exception tables on kernel faults in + * order to only do this all for instructions that are actually + * expected to fault. + * + * We can also actually take the mm lock for writing if we + * need to extend the vma, which helps the VM layer a lot. + */ +static struct vm_area_struct * +um_lock_mm_and_find_vma(struct mm_struct *mm, + unsigned long addr, bool is_user) +{ + struct vm_area_struct *vma; + + if (!get_mmap_lock_carefully(mm, is_user)) + return NULL; + + vma = find_vma(mm, addr); + if (likely(vma && (vma->vm_start <= addr))) + return vma; + + /* + * Well, dang. We might still be successful, but only + * if we can extend a vma to do so. + */ + if (!vma || !(vma->vm_flags & VM_GROWSDOWN)) { + mmap_read_unlock(mm); + return NULL; + } + + /* + * We can try to upgrade the mmap lock atomically, + * in which case we can continue to use the vma + * we already looked up. + * + * Otherwise we'll have to drop the mmap lock and + * re-take it, and also look up the vma again, + * re-checking it. + */ + if (!mmap_upgrade_trylock(mm)) { + if (!upgrade_mmap_lock_carefully(mm, is_user)) + return NULL; + + vma = find_vma(mm, addr); + if (!vma) + goto fail; + if (vma->vm_start <= addr) + goto success; + if (!(vma->vm_flags & VM_GROWSDOWN)) + goto fail; + } + + if (expand_stack_locked(vma, addr)) + goto fail; + +success: + mmap_write_downgrade(mm); + return vma; + +fail: + mmap_write_unlock(mm); + return NULL; +} + /* * Note this is constrained to return 0, -EFAULT, -EACCES, -ENOMEM by * segv(). @@ -43,21 +159,10 @@ int handle_page_fault(unsigned long address, unsigned long ip, if (is_user) flags |= FAULT_FLAG_USER; retry: - mmap_read_lock(mm); - vma = find_vma(mm, address); - if (!vma) - goto out; - if (vma->vm_start <= address) - goto good_area; - if (!(vma->vm_flags & VM_GROWSDOWN)) - goto out; - if (is_user && !ARCH_IS_STACKGROW(address)) - goto out; - vma = expand_stack(mm, address); + vma = um_lock_mm_and_find_vma(mm, address, is_user); if (!vma) goto out_nosemaphore; -good_area: *code_out = SEGV_ACCERR; if (is_write) { if (!(vma->vm_flags & VM_WRITE)) diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 5828d4ef03ead..09ae1537c3b27 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -138,7 +138,7 @@ config X86 select ARCH_WANTS_DYNAMIC_TASK_STRUCT select ARCH_WANTS_NO_INSTR select ARCH_WANT_GENERAL_HUGETLB - select ARCH_WANT_HUGE_PMD_SHARE + select ARCH_WANT_HUGE_PMD_SHARE if X86_64 select ARCH_WANT_LD_ORPHAN_WARN select ARCH_WANT_OPTIMIZE_DAX_VMEMMAP if X86_64 select ARCH_WANT_OPTIMIZE_HUGETLB_VMEMMAP if X86_64 @@ -2765,6 +2765,15 @@ config MITIGATION_ITS disabled, mitigation cannot be enabled via cmdline. See +config MITIGATION_TSA + bool "Mitigate Transient Scheduler Attacks" + depends on CPU_SUP_AMD + default y + help + Enable mitigation for Transient Scheduler Attacks. TSA is a hardware + security vulnerability on AMD CPUs which can lead to forwarding of + invalid info to subsequent instructions and thus can affect their + timing and thereby cause a leakage. endif config ARCH_HAS_ADD_PAGES diff --git a/arch/x86/entry/entry.S b/arch/x86/entry/entry.S index 5b96249734ada..b0d5ab951231c 100644 --- a/arch/x86/entry/entry.S +++ b/arch/x86/entry/entry.S @@ -33,20 +33,20 @@ EXPORT_SYMBOL_GPL(entry_ibpb); /* * Define the VERW operand that is disguised as entry code so that - * it can be referenced with KPTI enabled. This ensure VERW can be + * it can be referenced with KPTI enabled. This ensures VERW can be * used late in exit-to-user path after page tables are switched. */ .pushsection .entry.text, "ax" .align L1_CACHE_BYTES, 0xcc -SYM_CODE_START_NOALIGN(mds_verw_sel) +SYM_CODE_START_NOALIGN(x86_verw_sel) UNWIND_HINT_UNDEFINED ANNOTATE_NOENDBR .word __KERNEL_DS .align L1_CACHE_BYTES, 0xcc -SYM_CODE_END(mds_verw_sel); +SYM_CODE_END(x86_verw_sel); /* For KVM */ -EXPORT_SYMBOL_GPL(mds_verw_sel); +EXPORT_SYMBOL_GPL(x86_verw_sel); .popsection diff --git a/arch/x86/events/amd/uncore.c b/arch/x86/events/amd/uncore.c index 0bfde2ea5cb8c..cdf7bf0298362 100644 --- a/arch/x86/events/amd/uncore.c +++ b/arch/x86/events/amd/uncore.c @@ -38,7 +38,6 @@ struct amd_uncore_ctx { int refcnt; int cpu; struct perf_event **events; - struct hlist_node node; }; struct amd_uncore_pmu { @@ -890,6 +889,39 @@ static void amd_uncore_umc_start(struct perf_event *event, int flags) perf_event_update_userpage(event); } +static void amd_uncore_umc_read(struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + u64 prev, new, shift; + s64 delta; + + shift = COUNTER_SHIFT + 1; + prev = local64_read(&hwc->prev_count); + + /* + * UMC counters do not have RDPMC assignments. Read counts directly + * from the corresponding PERF_CTR. + */ + rdmsrl(hwc->event_base, new); + + /* + * Unlike the other uncore counters, UMC counters saturate and set the + * Overflow bit (bit 48) on overflow. Since they do not roll over, + * proactively reset the corresponding PERF_CTR when bit 47 is set so + * that the counter never gets a chance to saturate. + */ + if (new & BIT_ULL(63 - COUNTER_SHIFT)) { + wrmsrl(hwc->event_base, 0); + local64_set(&hwc->prev_count, 0); + } else { + local64_set(&hwc->prev_count, new); + } + + delta = (new << shift) - (prev << shift); + delta >>= shift; + local64_add(delta, &event->count); +} + static void amd_uncore_umc_ctx_scan(struct amd_uncore *uncore, unsigned int cpu) { @@ -967,7 +999,7 @@ int amd_uncore_umc_ctx_init(struct amd_uncore *uncore, unsigned int cpu) .del = amd_uncore_del, .start = amd_uncore_umc_start, .stop = amd_uncore_stop, - .read = amd_uncore_read, + .read = amd_uncore_umc_read, .capabilities = PERF_PMU_CAP_NO_EXCLUDE | PERF_PMU_CAP_NO_INTERRUPT, .module = THIS_MODULE, }; diff --git a/arch/x86/include/asm/cpu.h b/arch/x86/include/asm/cpu.h index aa30fd8cad7f5..b6099456477cd 100644 --- a/arch/x86/include/asm/cpu.h +++ b/arch/x86/include/asm/cpu.h @@ -69,4 +69,16 @@ int intel_microcode_sanity_check(void *mc, bool print_err, int hdr_type); extern struct cpumask cpus_stop_mask; +union zen_patch_rev { + struct { + __u32 rev : 8, + stepping : 4, + model : 4, + __reserved : 4, + ext_model : 4, + ext_fam : 8; + }; + __u32 ucode_rev; +}; + #endif /* _ASM_X86_CPU_H */ diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h index 308e7d97135cf..ef5749a0d8c24 100644 --- a/arch/x86/include/asm/cpufeatures.h +++ b/arch/x86/include/asm/cpufeatures.h @@ -455,6 +455,7 @@ #define X86_FEATURE_NO_NESTED_DATA_BP (20*32+ 0) /* No Nested Data Breakpoints */ #define X86_FEATURE_WRMSR_XX_BASE_NS (20*32+ 1) /* WRMSR to {FS,GS,KERNEL_GS}_BASE is non-serializing */ #define X86_FEATURE_LFENCE_RDTSC (20*32+ 2) /* LFENCE always serializing / synchronizes RDTSC */ +#define X86_FEATURE_VERW_CLEAR (20*32+ 5) /* The memory form of VERW mitigates TSA */ #define X86_FEATURE_NULL_SEL_CLR_BASE (20*32+ 6) /* Null Selector Clears Base */ #define X86_FEATURE_AUTOIBRS (20*32+ 8) /* Automatic IBRS */ #define X86_FEATURE_NO_SMM_CTL_MSR (20*32+ 9) /* SMM_CTL MSR is not present */ @@ -477,6 +478,10 @@ #define X86_FEATURE_FAST_CPPC (21*32 + 5) /* AMD Fast CPPC */ #define X86_FEATURE_INDIRECT_THUNK_ITS (21*32 + 6) /* Use thunk for indirect branches in lower half of cacheline */ +#define X86_FEATURE_TSA_SQ_NO (21*32+11) /* AMD CPU not vulnerable to TSA-SQ */ +#define X86_FEATURE_TSA_L1_NO (21*32+12) /* AMD CPU not vulnerable to TSA-L1 */ +#define X86_FEATURE_CLEAR_CPU_BUF_VM (21*32+13) /* Clear CPU buffers using VERW before VMRUN */ + /* * BUG word(s) */ @@ -529,4 +534,5 @@ #define X86_BUG_IBPB_NO_RET X86_BUG(1*32 + 4) /* "ibpb_no_ret" IBPB omits return target predictions */ #define X86_BUG_ITS X86_BUG(1*32 + 5) /* "its" CPU is affected by Indirect Target Selection */ #define X86_BUG_ITS_NATIVE_ONLY X86_BUG(1*32 + 6) /* "its_native_only" CPU is affected by ITS, VMX is not affected */ +#define X86_BUG_TSA X86_BUG( 1*32+ 9) /* "tsa" CPU is affected by Transient Scheduler Attacks */ #endif /* _ASM_X86_CPUFEATURES_H */ diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h index 1c2db11a2c3cb..2b75fe80fcb20 100644 --- a/arch/x86/include/asm/irqflags.h +++ b/arch/x86/include/asm/irqflags.h @@ -44,13 +44,13 @@ static __always_inline void native_irq_enable(void) static __always_inline void native_safe_halt(void) { - mds_idle_clear_cpu_buffers(); + x86_idle_clear_cpu_buffers(); asm volatile("sti; hlt": : :"memory"); } static __always_inline void native_halt(void) { - mds_idle_clear_cpu_buffers(); + x86_idle_clear_cpu_buffers(); asm volatile("hlt": : :"memory"); } diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h index ac25f9eb59120..7ebe76f69417a 100644 --- a/arch/x86/include/asm/msr-index.h +++ b/arch/x86/include/asm/msr-index.h @@ -621,6 +621,7 @@ #define MSR_AMD64_OSVW_STATUS 0xc0010141 #define MSR_AMD_PPIN_CTL 0xc00102f0 #define MSR_AMD_PPIN 0xc00102f1 +#define MSR_AMD64_CPUID_FN_7 0xc0011002 #define MSR_AMD64_CPUID_FN_1 0xc0011004 #define MSR_AMD64_LS_CFG 0xc0011020 #define MSR_AMD64_DC_CFG 0xc0011022 diff --git a/arch/x86/include/asm/mwait.h b/arch/x86/include/asm/mwait.h index 920426d691ce7..7f9a97c572fe2 100644 --- a/arch/x86/include/asm/mwait.h +++ b/arch/x86/include/asm/mwait.h @@ -44,8 +44,6 @@ static __always_inline void __monitorx(const void *eax, unsigned long ecx, static __always_inline void __mwait(unsigned long eax, unsigned long ecx) { - mds_idle_clear_cpu_buffers(); - /* "mwait %eax, %ecx;" */ asm volatile(".byte 0x0f, 0x01, 0xc9;" :: "a" (eax), "c" (ecx)); @@ -80,7 +78,7 @@ static __always_inline void __mwait(unsigned long eax, unsigned long ecx) static __always_inline void __mwaitx(unsigned long eax, unsigned long ebx, unsigned long ecx) { - /* No MDS buffer clear as this is AMD/HYGON only */ + /* No need for TSA buffer clearing on AMD */ /* "mwaitx %eax, %ebx, %ecx;" */ asm volatile(".byte 0x0f, 0x01, 0xfb;" @@ -98,7 +96,7 @@ static __always_inline void __mwaitx(unsigned long eax, unsigned long ebx, */ static __always_inline void __sti_mwait(unsigned long eax, unsigned long ecx) { - mds_idle_clear_cpu_buffers(); + /* "mwait %eax, %ecx;" */ asm volatile("sti; .byte 0x0f, 0x01, 0xc9;" :: "a" (eax), "c" (ecx)); @@ -116,24 +114,29 @@ static __always_inline void __sti_mwait(unsigned long eax, unsigned long ecx) */ static __always_inline void mwait_idle_with_hints(unsigned long eax, unsigned long ecx) { + if (need_resched()) + return; + + x86_idle_clear_cpu_buffers(); + if (static_cpu_has_bug(X86_BUG_MONITOR) || !current_set_polling_and_test()) { - if (static_cpu_has_bug(X86_BUG_CLFLUSH_MONITOR)) { - mb(); - clflush((void *)¤t_thread_info()->flags); - mb(); - } + const void *addr = ¤t_thread_info()->flags; + + alternative_input("", "clflush (%[addr])", X86_BUG_CLFLUSH_MONITOR, [addr] "a" (addr)); + __monitor(addr, 0, 0); - __monitor((void *)¤t_thread_info()->flags, 0, 0); + if (need_resched()) + goto out; - if (!need_resched()) { - if (ecx & 1) { - __mwait(eax, ecx); - } else { - __sti_mwait(eax, ecx); - raw_local_irq_disable(); - } + if (ecx & 1) { + __mwait(eax, ecx); + } else { + __sti_mwait(eax, ecx); + raw_local_irq_disable(); } } + +out: current_clr_polling(); } diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h index f7bb0016d7d9e..331f6a05535d4 100644 --- a/arch/x86/include/asm/nospec-branch.h +++ b/arch/x86/include/asm/nospec-branch.h @@ -315,25 +315,31 @@ .endm /* - * Macro to execute VERW instruction that mitigate transient data sampling - * attacks such as MDS. On affected systems a microcode update overloaded VERW - * instruction to also clear the CPU buffers. VERW clobbers CFLAGS.ZF. - * + * Macro to execute VERW insns that mitigate transient data sampling + * attacks such as MDS or TSA. On affected systems a microcode update + * overloaded VERW insns to also clear the CPU buffers. VERW clobbers + * CFLAGS.ZF. * Note: Only the memory operand variant of VERW clears the CPU buffers. */ -.macro CLEAR_CPU_BUFFERS +.macro __CLEAR_CPU_BUFFERS feature #ifdef CONFIG_X86_64 - ALTERNATIVE "", "verw mds_verw_sel(%rip)", X86_FEATURE_CLEAR_CPU_BUF + ALTERNATIVE "", "verw x86_verw_sel(%rip)", \feature #else /* * In 32bit mode, the memory operand must be a %cs reference. The data * segments may not be usable (vm86 mode), and the stack segment may not * be flat (ESPFIX32). */ - ALTERNATIVE "", "verw %cs:mds_verw_sel", X86_FEATURE_CLEAR_CPU_BUF + ALTERNATIVE "", "verw %cs:x86_verw_sel", \feature #endif .endm +#define CLEAR_CPU_BUFFERS \ + __CLEAR_CPU_BUFFERS X86_FEATURE_CLEAR_CPU_BUF + +#define VM_CLEAR_CPU_BUFFERS \ + __CLEAR_CPU_BUFFERS X86_FEATURE_CLEAR_CPU_BUF_VM + #ifdef CONFIG_X86_64 .macro CLEAR_BRANCH_HISTORY ALTERNATIVE "", "call clear_bhb_loop", X86_FEATURE_CLEAR_BHB_LOOP @@ -582,24 +588,24 @@ DECLARE_STATIC_KEY_FALSE(switch_to_cond_stibp); DECLARE_STATIC_KEY_FALSE(switch_mm_cond_ibpb); DECLARE_STATIC_KEY_FALSE(switch_mm_always_ibpb); -DECLARE_STATIC_KEY_FALSE(mds_idle_clear); +DECLARE_STATIC_KEY_FALSE(cpu_buf_idle_clear); DECLARE_STATIC_KEY_FALSE(switch_mm_cond_l1d_flush); DECLARE_STATIC_KEY_FALSE(mmio_stale_data_clear); -extern u16 mds_verw_sel; +extern u16 x86_verw_sel; #include /** - * mds_clear_cpu_buffers - Mitigation for MDS and TAA vulnerability + * x86_clear_cpu_buffers - Buffer clearing support for different x86 CPU vulns * * This uses the otherwise unused and obsolete VERW instruction in * combination with microcode which triggers a CPU buffer flush when the * instruction is executed. */ -static __always_inline void mds_clear_cpu_buffers(void) +static __always_inline void x86_clear_cpu_buffers(void) { static const u16 ds = __KERNEL_DS; @@ -616,14 +622,15 @@ static __always_inline void mds_clear_cpu_buffers(void) } /** - * mds_idle_clear_cpu_buffers - Mitigation for MDS vulnerability + * x86_idle_clear_cpu_buffers - Buffer clearing support in idle for the MDS + * and TSA vulnerabilities. * * Clear CPU buffers if the corresponding static key is enabled */ -static __always_inline void mds_idle_clear_cpu_buffers(void) +static __always_inline void x86_idle_clear_cpu_buffers(void) { - if (static_branch_likely(&mds_idle_clear)) - mds_clear_cpu_buffers(); + if (static_branch_likely(&cpu_buf_idle_clear)) + x86_clear_cpu_buffers(); } #endif /* __ASSEMBLY__ */ diff --git a/arch/x86/include/asm/sighandling.h b/arch/x86/include/asm/sighandling.h index e770c4fc47f4c..8727c7e21dd1e 100644 --- a/arch/x86/include/asm/sighandling.h +++ b/arch/x86/include/asm/sighandling.h @@ -24,4 +24,26 @@ int ia32_setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs); int x64_setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs); int x32_setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs); +/* + * To prevent immediate repeat of single step trap on return from SIGTRAP + * handler if the trap flag (TF) is set without an external debugger attached, + * clear the software event flag in the augmented SS, ensuring no single-step + * trap is pending upon ERETU completion. + * + * Note, this function should be called in sigreturn() before the original + * state is restored to make sure the TF is read from the entry frame. + */ +static __always_inline void prevent_single_step_upon_eretu(struct pt_regs *regs) +{ + /* + * If the trap flag (TF) is set, i.e., the sigreturn() SYSCALL instruction + * is being single-stepped, do not clear the software event flag in the + * augmented SS, thus a debugger won't skip over the following instruction. + */ +#ifdef CONFIG_X86_FRED + if (!(regs->flags & X86_EFLAGS_TF)) + regs->fred_ss.swevent = 0; +#endif +} + #endif /* _ASM_X86_SIGHANDLING_H */ diff --git a/arch/x86/include/asm/tdx.h b/arch/x86/include/asm/tdx.h index b5b6332940613..2d13ef1f4b057 100644 --- a/arch/x86/include/asm/tdx.h +++ b/arch/x86/include/asm/tdx.h @@ -97,7 +97,7 @@ void tdx_init(void); typedef u64 (*sc_func_t)(u64 fn, struct tdx_module_args *args); -static inline u64 sc_retry(sc_func_t func, u64 fn, +static __always_inline u64 sc_retry(sc_func_t func, u64 fn, struct tdx_module_args *args) { int retry = RDRAND_RETRY_LOOPS; diff --git a/arch/x86/include/uapi/asm/debugreg.h b/arch/x86/include/uapi/asm/debugreg.h index 0007ba077c0c2..41da492dfb01f 100644 --- a/arch/x86/include/uapi/asm/debugreg.h +++ b/arch/x86/include/uapi/asm/debugreg.h @@ -15,7 +15,26 @@ which debugging register was responsible for the trap. The other bits are either reserved or not of interest to us. */ -/* Define reserved bits in DR6 which are always set to 1 */ +/* + * Define bits in DR6 which are set to 1 by default. + * + * This is also the DR6 architectural value following Power-up, Reset or INIT. + * + * Note, with the introduction of Bus Lock Detection (BLD) and Restricted + * Transactional Memory (RTM), the DR6 register has been modified: + * + * 1) BLD flag (bit 11) is no longer reserved to 1 if the CPU supports + * Bus Lock Detection. The assertion of a bus lock could clear it. + * + * 2) RTM flag (bit 16) is no longer reserved to 1 if the CPU supports + * restricted transactional memory. #DB occurred inside an RTM region + * could clear it. + * + * Apparently, DR6.BLD and DR6.RTM are active low bits. + * + * As a result, DR6_RESERVED is an incorrect name now, but it is kept for + * compatibility. + */ #define DR6_RESERVED (0xFFFF0FF0) #define DR_TRAP0 (0x1) /* db0 */ diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index e432910859cb1..efd42ee9d1cc6 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c @@ -368,6 +368,64 @@ static void bsp_determine_snp(struct cpuinfo_x86 *c) #endif } +static bool amd_check_tsa_microcode(void) +{ + struct cpuinfo_x86 *c = &boot_cpu_data; + union zen_patch_rev p; + u32 min_rev = 0; + + p.ext_fam = c->x86 - 0xf; + p.model = c->x86_model; + p.ext_model = c->x86_model >> 4; + p.stepping = c->x86_stepping; + + if (cpu_has(c, X86_FEATURE_ZEN3) || + cpu_has(c, X86_FEATURE_ZEN4)) { + switch (p.ucode_rev >> 8) { + case 0xa0011: min_rev = 0x0a0011d7; break; + case 0xa0012: min_rev = 0x0a00123b; break; + case 0xa0082: min_rev = 0x0a00820d; break; + case 0xa1011: min_rev = 0x0a10114c; break; + case 0xa1012: min_rev = 0x0a10124c; break; + case 0xa1081: min_rev = 0x0a108109; break; + case 0xa2010: min_rev = 0x0a20102e; break; + case 0xa2012: min_rev = 0x0a201211; break; + case 0xa4041: min_rev = 0x0a404108; break; + case 0xa5000: min_rev = 0x0a500012; break; + case 0xa6012: min_rev = 0x0a60120a; break; + case 0xa7041: min_rev = 0x0a704108; break; + case 0xa7052: min_rev = 0x0a705208; break; + case 0xa7080: min_rev = 0x0a708008; break; + case 0xa70c0: min_rev = 0x0a70c008; break; + case 0xaa002: min_rev = 0x0aa00216; break; + default: + pr_debug("%s: ucode_rev: 0x%x, current revision: 0x%x\n", + __func__, p.ucode_rev, c->microcode); + return false; + } + } + + if (!min_rev) + return false; + + return c->microcode >= min_rev; +} + +static void tsa_init(struct cpuinfo_x86 *c) +{ + if (cpu_has(c, X86_FEATURE_HYPERVISOR)) + return; + + if (cpu_has(c, X86_FEATURE_ZEN3) || + cpu_has(c, X86_FEATURE_ZEN4)) { + if (amd_check_tsa_microcode()) + setup_force_cpu_cap(X86_FEATURE_VERW_CLEAR); + } else { + setup_force_cpu_cap(X86_FEATURE_TSA_SQ_NO); + setup_force_cpu_cap(X86_FEATURE_TSA_L1_NO); + } +} + static void bsp_init_amd(struct cpuinfo_x86 *c) { if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) { @@ -475,6 +533,9 @@ static void bsp_init_amd(struct cpuinfo_x86 *c) } bsp_determine_snp(c); + + tsa_init(c); + return; warn: @@ -916,6 +977,13 @@ static void init_amd_zen2(struct cpuinfo_x86 *c) init_spectral_chicken(c); fix_erratum_1386(c); zen2_zenbleed_check(c); + + /* Disable RDSEED on AMD Cyan Skillfish because of an error. */ + if (c->x86_model == 0x47 && c->x86_stepping == 0x0) { + clear_cpu_cap(c, X86_FEATURE_RDSEED); + msr_clear_bit(MSR_AMD64_CPUID_FN_7, 18); + pr_emerg("RDSEED is not reliable on this platform; disabling.\n"); + } } static void init_amd_zen3(struct cpuinfo_x86 *c) diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index 0e9ab0b9a4942..c2c7b76d953f7 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c @@ -50,6 +50,7 @@ static void __init l1d_flush_select_mitigation(void); static void __init srso_select_mitigation(void); static void __init gds_select_mitigation(void); static void __init its_select_mitigation(void); +static void __init tsa_select_mitigation(void); /* The base value of the SPEC_CTRL MSR without task-specific bits set */ u64 x86_spec_ctrl_base; @@ -122,9 +123,9 @@ DEFINE_STATIC_KEY_FALSE(switch_mm_cond_ibpb); /* Control unconditional IBPB in switch_mm() */ DEFINE_STATIC_KEY_FALSE(switch_mm_always_ibpb); -/* Control MDS CPU buffer clear before idling (halt, mwait) */ -DEFINE_STATIC_KEY_FALSE(mds_idle_clear); -EXPORT_SYMBOL_GPL(mds_idle_clear); +/* Control CPU buffer clear before idling (halt, mwait) */ +DEFINE_STATIC_KEY_FALSE(cpu_buf_idle_clear); +EXPORT_SYMBOL_GPL(cpu_buf_idle_clear); /* * Controls whether l1d flush based mitigations are enabled, @@ -185,6 +186,7 @@ void __init cpu_select_mitigations(void) srso_select_mitigation(); gds_select_mitigation(); its_select_mitigation(); + tsa_select_mitigation(); } /* @@ -448,7 +450,7 @@ static void __init mmio_select_mitigation(void) * is required irrespective of SMT state. */ if (!(x86_arch_cap_msr & ARCH_CAP_FBSDP_NO)) - static_branch_enable(&mds_idle_clear); + static_branch_enable(&cpu_buf_idle_clear); /* * Check if the system has the right microcode. @@ -2092,10 +2094,10 @@ static void update_mds_branch_idle(void) return; if (sched_smt_active()) { - static_branch_enable(&mds_idle_clear); + static_branch_enable(&cpu_buf_idle_clear); } else if (mmio_mitigation == MMIO_MITIGATION_OFF || (x86_arch_cap_msr & ARCH_CAP_FBSDP_NO)) { - static_branch_disable(&mds_idle_clear); + static_branch_disable(&cpu_buf_idle_clear); } } @@ -2103,6 +2105,94 @@ static void update_mds_branch_idle(void) #define TAA_MSG_SMT "TAA CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/tsx_async_abort.html for more details.\n" #define MMIO_MSG_SMT "MMIO Stale Data CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/processor_mmio_stale_data.html for more details.\n" +#undef pr_fmt +#define pr_fmt(fmt) "Transient Scheduler Attacks: " fmt + +enum tsa_mitigations { + TSA_MITIGATION_NONE, + TSA_MITIGATION_UCODE_NEEDED, + TSA_MITIGATION_USER_KERNEL, + TSA_MITIGATION_VM, + TSA_MITIGATION_FULL, +}; + +static const char * const tsa_strings[] = { + [TSA_MITIGATION_NONE] = "Vulnerable", + [TSA_MITIGATION_UCODE_NEEDED] = "Vulnerable: Clear CPU buffers attempted, no microcode", + [TSA_MITIGATION_USER_KERNEL] = "Mitigation: Clear CPU buffers: user/kernel boundary", + [TSA_MITIGATION_VM] = "Mitigation: Clear CPU buffers: VM", + [TSA_MITIGATION_FULL] = "Mitigation: Clear CPU buffers", +}; + +static enum tsa_mitigations tsa_mitigation __ro_after_init = + IS_ENABLED(CONFIG_MITIGATION_TSA) ? TSA_MITIGATION_FULL : TSA_MITIGATION_NONE; + +static int __init tsa_parse_cmdline(char *str) +{ + if (!str) + return -EINVAL; + + if (!strcmp(str, "off")) + tsa_mitigation = TSA_MITIGATION_NONE; + else if (!strcmp(str, "on")) + tsa_mitigation = TSA_MITIGATION_FULL; + else if (!strcmp(str, "user")) + tsa_mitigation = TSA_MITIGATION_USER_KERNEL; + else if (!strcmp(str, "vm")) + tsa_mitigation = TSA_MITIGATION_VM; + else + pr_err("Ignoring unknown tsa=%s option.\n", str); + + return 0; +} +early_param("tsa", tsa_parse_cmdline); + +static void __init tsa_select_mitigation(void) +{ + if (tsa_mitigation == TSA_MITIGATION_NONE) + return; + + if (cpu_mitigations_off() || !boot_cpu_has_bug(X86_BUG_TSA)) { + tsa_mitigation = TSA_MITIGATION_NONE; + return; + } + + if (!boot_cpu_has(X86_FEATURE_VERW_CLEAR)) + tsa_mitigation = TSA_MITIGATION_UCODE_NEEDED; + + switch (tsa_mitigation) { + case TSA_MITIGATION_USER_KERNEL: + setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF); + break; + + case TSA_MITIGATION_VM: + setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF_VM); + break; + + case TSA_MITIGATION_UCODE_NEEDED: + if (!boot_cpu_has(X86_FEATURE_HYPERVISOR)) + goto out; + + pr_notice("Forcing mitigation on in a VM\n"); + + /* + * On the off-chance that microcode has been updated + * on the host, enable the mitigation in the guest just + * in case. + */ + fallthrough; + case TSA_MITIGATION_FULL: + setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF); + setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF_VM); + break; + default: + break; + } + +out: + pr_info("%s\n", tsa_strings[tsa_mitigation]); +} + void cpu_bugs_smt_update(void) { mutex_lock(&spec_ctrl_mutex); @@ -2156,6 +2246,24 @@ void cpu_bugs_smt_update(void) break; } + switch (tsa_mitigation) { + case TSA_MITIGATION_USER_KERNEL: + case TSA_MITIGATION_VM: + case TSA_MITIGATION_FULL: + case TSA_MITIGATION_UCODE_NEEDED: + /* + * TSA-SQ can potentially lead to info leakage between + * SMT threads. + */ + if (sched_smt_active()) + static_branch_enable(&cpu_buf_idle_clear); + else + static_branch_disable(&cpu_buf_idle_clear); + break; + case TSA_MITIGATION_NONE: + break; + } + mutex_unlock(&spec_ctrl_mutex); } @@ -3084,6 +3192,11 @@ static ssize_t gds_show_state(char *buf) return sysfs_emit(buf, "%s\n", gds_strings[gds_mitigation]); } +static ssize_t tsa_show_state(char *buf) +{ + return sysfs_emit(buf, "%s\n", tsa_strings[tsa_mitigation]); +} + static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr, char *buf, unsigned int bug) { @@ -3145,6 +3258,9 @@ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr case X86_BUG_ITS: return its_show_state(buf); + case X86_BUG_TSA: + return tsa_show_state(buf); + default: break; } @@ -3229,6 +3345,11 @@ ssize_t cpu_show_indirect_target_selection(struct device *dev, struct device_att { return cpu_show_common(dev, attr, buf, X86_BUG_ITS); } + +ssize_t cpu_show_tsa(struct device *dev, struct device_attribute *attr, char *buf) +{ + return cpu_show_common(dev, attr, buf, X86_BUG_TSA); +} #endif void __warn_thunk(void) diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 39e9ec3dea985..ed072b126111c 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -1007,17 +1007,18 @@ void get_cpu_cap(struct cpuinfo_x86 *c) c->x86_capability[CPUID_D_1_EAX] = eax; } - /* AMD-defined flags: level 0x80000001 */ + /* + * Check if extended CPUID leaves are implemented: Max extended + * CPUID leaf must be in the 0x80000001-0x8000ffff range. + */ eax = cpuid_eax(0x80000000); - c->extended_cpuid_level = eax; + c->extended_cpuid_level = ((eax & 0xffff0000) == 0x80000000) ? eax : 0; - if ((eax & 0xffff0000) == 0x80000000) { - if (eax >= 0x80000001) { - cpuid(0x80000001, &eax, &ebx, &ecx, &edx); + if (c->extended_cpuid_level >= 0x80000001) { + cpuid(0x80000001, &eax, &ebx, &ecx, &edx); - c->x86_capability[CPUID_8000_0001_ECX] = ecx; - c->x86_capability[CPUID_8000_0001_EDX] = edx; - } + c->x86_capability[CPUID_8000_0001_ECX] = ecx; + c->x86_capability[CPUID_8000_0001_EDX] = edx; } if (c->extended_cpuid_level >= 0x80000007) { @@ -1232,6 +1233,8 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = { #define ITS BIT(8) /* CPU is affected by Indirect Target Selection, but guest-host isolation is not affected */ #define ITS_NATIVE_ONLY BIT(9) +/* CPU is affected by Transient Scheduler Attacks */ +#define TSA BIT(10) static const struct x86_cpu_id cpu_vuln_blacklist[] __initconst = { VULNBL_INTEL_STEPPINGS(INTEL_IVYBRIDGE, X86_STEPPING_ANY, SRBDS), @@ -1279,7 +1282,7 @@ static const struct x86_cpu_id cpu_vuln_blacklist[] __initconst = { VULNBL_AMD(0x16, RETBLEED), VULNBL_AMD(0x17, RETBLEED | SMT_RSB | SRSO), VULNBL_HYGON(0x18, RETBLEED | SMT_RSB | SRSO), - VULNBL_AMD(0x19, SRSO), + VULNBL_AMD(0x19, SRSO | TSA), {} }; @@ -1489,6 +1492,16 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c) setup_force_cpu_bug(X86_BUG_ITS_NATIVE_ONLY); } + if (c->x86_vendor == X86_VENDOR_AMD) { + if (!cpu_has(c, X86_FEATURE_TSA_SQ_NO) || + !cpu_has(c, X86_FEATURE_TSA_L1_NO)) { + if (cpu_matches(cpu_vuln_blacklist, TSA) || + /* Enable bug on Zen guests to allow for live migration. */ + (cpu_has(c, X86_FEATURE_HYPERVISOR) && cpu_has(c, X86_FEATURE_ZEN))) + setup_force_cpu_bug(X86_BUG_TSA); + } + } + if (cpu_matches(cpu_vuln_whitelist, NO_MELTDOWN)) return; @@ -2144,20 +2157,16 @@ EXPORT_PER_CPU_SYMBOL(__stack_chk_guard); #endif /* CONFIG_X86_64 */ -/* - * Clear all 6 debug registers: - */ -static void clear_all_debug_regs(void) +static void initialize_debug_regs(void) { - int i; - - for (i = 0; i < 8; i++) { - /* Ignore db4, db5 */ - if ((i == 4) || (i == 5)) - continue; - - set_debugreg(0, i); - } + /* Control register first -- to make sure everything is disabled. */ + set_debugreg(0, 7); + set_debugreg(DR6_RESERVED, 6); + /* dr5 and dr4 don't exist */ + set_debugreg(0, 3); + set_debugreg(0, 2); + set_debugreg(0, 1); + set_debugreg(0, 0); } #ifdef CONFIG_KGDB @@ -2318,7 +2327,7 @@ void cpu_init(void) load_mm_ldt(&init_mm); - clear_all_debug_regs(); + initialize_debug_regs(); dbg_restore_debug_regs(); doublefault_init_cpu_tss(); diff --git a/arch/x86/kernel/cpu/mce/amd.c b/arch/x86/kernel/cpu/mce/amd.c index 14bf8c232e457..dac4564e1d7ca 100644 --- a/arch/x86/kernel/cpu/mce/amd.c +++ b/arch/x86/kernel/cpu/mce/amd.c @@ -327,7 +327,6 @@ static void smca_configure(unsigned int bank, unsigned int cpu) struct thresh_restart { struct threshold_block *b; - int reset; int set_lvt_off; int lvt_off; u16 old_limit; @@ -422,13 +421,13 @@ static void threshold_restart_bank(void *_tr) rdmsr(tr->b->address, lo, hi); - if (tr->b->threshold_limit < (hi & THRESHOLD_MAX)) - tr->reset = 1; /* limit cannot be lower than err count */ - - if (tr->reset) { /* reset err count and overflow bit */ - hi = - (hi & ~(MASK_ERR_COUNT_HI | MASK_OVERFLOW_HI)) | - (THRESHOLD_MAX - tr->b->threshold_limit); + /* + * Reset error count and overflow bit. + * This is done during init or after handling an interrupt. + */ + if (hi & MASK_OVERFLOW_HI || tr->set_lvt_off) { + hi &= ~(MASK_ERR_COUNT_HI | MASK_OVERFLOW_HI); + hi |= THRESHOLD_MAX - tr->b->threshold_limit; } else if (tr->old_limit) { /* change limit w/o reset */ int new_count = (hi & THRESHOLD_MAX) + (tr->old_limit - tr->b->threshold_limit); @@ -1099,13 +1098,20 @@ static const char *get_name(unsigned int cpu, unsigned int bank, struct threshol } bank_type = smca_get_bank_type(cpu, bank); - if (bank_type >= N_SMCA_BANK_TYPES) - return NULL; if (b && (bank_type == SMCA_UMC || bank_type == SMCA_UMC_V2)) { if (b->block < ARRAY_SIZE(smca_umc_block_names)) return smca_umc_block_names[b->block]; - return NULL; + } + + if (b && b->block) { + snprintf(buf_mcatype, MAX_MCATYPE_NAME_LEN, "th_block_%u", b->block); + return buf_mcatype; + } + + if (bank_type >= N_SMCA_BANK_TYPES) { + snprintf(buf_mcatype, MAX_MCATYPE_NAME_LEN, "th_bank_%u", bank); + return buf_mcatype; } if (per_cpu(smca_bank_counts, cpu)[bank_type] == 1) diff --git a/arch/x86/kernel/cpu/mce/core.c b/arch/x86/kernel/cpu/mce/core.c index 2a938f429c4d5..d8f3d9af8acf0 100644 --- a/arch/x86/kernel/cpu/mce/core.c +++ b/arch/x86/kernel/cpu/mce/core.c @@ -1688,6 +1688,11 @@ static void mc_poll_banks_default(void) void (*mc_poll_banks)(void) = mc_poll_banks_default; +static bool should_enable_timer(unsigned long iv) +{ + return !mca_cfg.ignore_ce && iv; +} + static void mce_timer_fn(struct timer_list *t) { struct timer_list *cpu_t = this_cpu_ptr(&mce_timer); @@ -1711,7 +1716,7 @@ static void mce_timer_fn(struct timer_list *t) if (mce_get_storm_mode()) { __start_timer(t, HZ); - } else { + } else if (should_enable_timer(iv)) { __this_cpu_write(mce_next_interval, iv); __start_timer(t, iv); } @@ -2111,11 +2116,10 @@ static void mce_start_timer(struct timer_list *t) { unsigned long iv = check_interval * HZ; - if (mca_cfg.ignore_ce || !iv) - return; - - this_cpu_write(mce_next_interval, iv); - __start_timer(t, iv); + if (should_enable_timer(iv)) { + this_cpu_write(mce_next_interval, iv); + __start_timer(t, iv); + } } static void __mcheck_cpu_setup_timer(void) @@ -2756,15 +2760,9 @@ static int mce_cpu_dead(unsigned int cpu) static int mce_cpu_online(unsigned int cpu) { struct timer_list *t = this_cpu_ptr(&mce_timer); - int ret; mce_device_create(cpu); - - ret = mce_threshold_create_device(cpu); - if (ret) { - mce_device_remove(cpu); - return ret; - } + mce_threshold_create_device(cpu); mce_reenable_cpu(); mce_start_timer(t); return 0; diff --git a/arch/x86/kernel/cpu/mce/intel.c b/arch/x86/kernel/cpu/mce/intel.c index f6103e6bf69a8..bb0a60b1ed637 100644 --- a/arch/x86/kernel/cpu/mce/intel.c +++ b/arch/x86/kernel/cpu/mce/intel.c @@ -477,6 +477,7 @@ void mce_intel_feature_init(struct cpuinfo_x86 *c) void mce_intel_feature_clear(struct cpuinfo_x86 *c) { intel_clear_lmce(); + cmci_clear(); } bool intel_filter_mce(struct mce *m) diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c index 2f84164b20e01..765b4646648f7 100644 --- a/arch/x86/kernel/cpu/microcode/amd.c +++ b/arch/x86/kernel/cpu/microcode/amd.c @@ -94,18 +94,6 @@ static struct equiv_cpu_table { struct equiv_cpu_entry *entry; } equiv_table; -union zen_patch_rev { - struct { - __u32 rev : 8, - stepping : 4, - model : 4, - __reserved : 4, - ext_model : 4, - ext_fam : 8; - }; - __u32 ucode_rev; -}; - union cpuid_1_eax { struct { __u32 stepping : 4, diff --git a/arch/x86/kernel/cpu/microcode/amd_shas.c b/arch/x86/kernel/cpu/microcode/amd_shas.c index 2a1655b1fdd88..1fd349cfc8024 100644 --- a/arch/x86/kernel/cpu/microcode/amd_shas.c +++ b/arch/x86/kernel/cpu/microcode/amd_shas.c @@ -231,6 +231,13 @@ static const struct patch_digest phashes[] = { 0x0d,0x5b,0x65,0x34,0x69,0xb2,0x62,0x21, } }, + { 0xa0011d7, { + 0x35,0x07,0xcd,0x40,0x94,0xbc,0x81,0x6b, + 0xfc,0x61,0x56,0x1a,0xe2,0xdb,0x96,0x12, + 0x1c,0x1c,0x31,0xb1,0x02,0x6f,0xe5,0xd2, + 0xfe,0x1b,0x04,0x03,0x2c,0x8f,0x4c,0x36, + } + }, { 0xa001223, { 0xfb,0x32,0x5f,0xc6,0x83,0x4f,0x8c,0xb8, 0xa4,0x05,0xf9,0x71,0x53,0x01,0x16,0xc4, @@ -294,6 +301,13 @@ static const struct patch_digest phashes[] = { 0xc0,0xcd,0x33,0xf2,0x8d,0xf9,0xef,0x59, } }, + { 0xa00123b, { + 0xef,0xa1,0x1e,0x71,0xf1,0xc3,0x2c,0xe2, + 0xc3,0xef,0x69,0x41,0x7a,0x54,0xca,0xc3, + 0x8f,0x62,0x84,0xee,0xc2,0x39,0xd9,0x28, + 0x95,0xa7,0x12,0x49,0x1e,0x30,0x71,0x72, + } + }, { 0xa00820c, { 0xa8,0x0c,0x81,0xc0,0xa6,0x00,0xe7,0xf3, 0x5f,0x65,0xd3,0xb9,0x6f,0xea,0x93,0x63, @@ -301,6 +315,13 @@ static const struct patch_digest phashes[] = { 0xe1,0x3b,0x8d,0xb2,0xf8,0x22,0x03,0xe2, } }, + { 0xa00820d, { + 0xf9,0x2a,0xc0,0xf4,0x9e,0xa4,0x87,0xa4, + 0x7d,0x87,0x00,0xfd,0xab,0xda,0x19,0xca, + 0x26,0x51,0x32,0xc1,0x57,0x91,0xdf,0xc1, + 0x05,0xeb,0x01,0x7c,0x5a,0x95,0x21,0xb7, + } + }, { 0xa10113e, { 0x05,0x3c,0x66,0xd7,0xa9,0x5a,0x33,0x10, 0x1b,0xf8,0x9c,0x8f,0xed,0xfc,0xa7,0xa0, @@ -322,6 +343,13 @@ static const struct patch_digest phashes[] = { 0xf1,0x5e,0xb0,0xde,0xb4,0x98,0xae,0xc4, } }, + { 0xa10114c, { + 0x9e,0xb6,0xa2,0xd9,0x87,0x38,0xc5,0x64, + 0xd8,0x88,0xfa,0x78,0x98,0xf9,0x6f,0x74, + 0x39,0x90,0x1b,0xa5,0xcf,0x5e,0xb4,0x2a, + 0x02,0xff,0xd4,0x8c,0x71,0x8b,0xe2,0xc0, + } + }, { 0xa10123e, { 0x03,0xb9,0x2c,0x76,0x48,0x93,0xc9,0x18, 0xfb,0x56,0xfd,0xf7,0xe2,0x1d,0xca,0x4d, @@ -343,6 +371,13 @@ static const struct patch_digest phashes[] = { 0x1b,0x7d,0x64,0x9d,0x4b,0x53,0x13,0x75, } }, + { 0xa10124c, { + 0x29,0xea,0xf1,0x2c,0xb2,0xe4,0xef,0x90, + 0xa4,0xcd,0x1d,0x86,0x97,0x17,0x61,0x46, + 0xfc,0x22,0xcb,0x57,0x75,0x19,0xc8,0xcc, + 0x0c,0xf5,0xbc,0xac,0x81,0x9d,0x9a,0xd2, + } + }, { 0xa108108, { 0xed,0xc2,0xec,0xa1,0x15,0xc6,0x65,0xe9, 0xd0,0xef,0x39,0xaa,0x7f,0x55,0x06,0xc6, @@ -350,6 +385,13 @@ static const struct patch_digest phashes[] = { 0x28,0x1e,0x9c,0x59,0x69,0x99,0x4d,0x16, } }, + { 0xa108109, { + 0x85,0xb4,0xbd,0x7c,0x49,0xa7,0xbd,0xfa, + 0x49,0x36,0x80,0x81,0xc5,0xb7,0x39,0x1b, + 0x9a,0xaa,0x50,0xde,0x9b,0xe9,0x32,0x35, + 0x42,0x7e,0x51,0x4f,0x52,0x2c,0x28,0x59, + } + }, { 0xa20102d, { 0xf9,0x6e,0xf2,0x32,0xd3,0x0f,0x5f,0x11, 0x59,0xa1,0xfe,0xcc,0xcd,0x9b,0x42,0x89, @@ -357,6 +399,13 @@ static const struct patch_digest phashes[] = { 0x8c,0xe9,0x19,0x3e,0xcc,0x3f,0x7b,0xb4, } }, + { 0xa20102e, { + 0xbe,0x1f,0x32,0x04,0x0d,0x3c,0x9c,0xdd, + 0xe1,0xa4,0xbf,0x76,0x3a,0xec,0xc2,0xf6, + 0x11,0x00,0xa7,0xaf,0x0f,0xe5,0x02,0xc5, + 0x54,0x3a,0x1f,0x8c,0x16,0xb5,0xff,0xbe, + } + }, { 0xa201210, { 0xe8,0x6d,0x51,0x6a,0x8e,0x72,0xf3,0xfe, 0x6e,0x16,0xbc,0x62,0x59,0x40,0x17,0xe9, @@ -364,6 +413,13 @@ static const struct patch_digest phashes[] = { 0xf7,0x55,0xf0,0x13,0xbb,0x22,0xf6,0x41, } }, + { 0xa201211, { + 0x69,0xa1,0x17,0xec,0xd0,0xf6,0x6c,0x95, + 0xe2,0x1e,0xc5,0x59,0x1a,0x52,0x0a,0x27, + 0xc4,0xed,0xd5,0x59,0x1f,0xbf,0x00,0xff, + 0x08,0x88,0xb5,0xe1,0x12,0xb6,0xcc,0x27, + } + }, { 0xa404107, { 0xbb,0x04,0x4e,0x47,0xdd,0x5e,0x26,0x45, 0x1a,0xc9,0x56,0x24,0xa4,0x4c,0x82,0xb0, @@ -371,6 +427,13 @@ static const struct patch_digest phashes[] = { 0x13,0xbc,0xc5,0x25,0xe4,0xc5,0xc3,0x99, } }, + { 0xa404108, { + 0x69,0x67,0x43,0x06,0xf8,0x0c,0x62,0xdc, + 0xa4,0x21,0x30,0x4f,0x0f,0x21,0x2c,0xcb, + 0xcc,0x37,0xf1,0x1c,0xc3,0xf8,0x2f,0x19, + 0xdf,0x53,0x53,0x46,0xb1,0x15,0xea,0x00, + } + }, { 0xa500011, { 0x23,0x3d,0x70,0x7d,0x03,0xc3,0xc4,0xf4, 0x2b,0x82,0xc6,0x05,0xda,0x80,0x0a,0xf1, @@ -378,6 +441,13 @@ static const struct patch_digest phashes[] = { 0x11,0x5e,0x96,0x7e,0x71,0xe9,0xfc,0x74, } }, + { 0xa500012, { + 0xeb,0x74,0x0d,0x47,0xa1,0x8e,0x09,0xe4, + 0x93,0x4c,0xad,0x03,0x32,0x4c,0x38,0x16, + 0x10,0x39,0xdd,0x06,0xaa,0xce,0xd6,0x0f, + 0x62,0x83,0x9d,0x8e,0x64,0x55,0xbe,0x63, + } + }, { 0xa601209, { 0x66,0x48,0xd4,0x09,0x05,0xcb,0x29,0x32, 0x66,0xb7,0x9a,0x76,0xcd,0x11,0xf3,0x30, @@ -385,6 +455,13 @@ static const struct patch_digest phashes[] = { 0xe8,0x73,0xe2,0xd6,0xdb,0xd2,0x77,0x1d, } }, + { 0xa60120a, { + 0x0c,0x8b,0x3d,0xfd,0x52,0x52,0x85,0x7d, + 0x20,0x3a,0xe1,0x7e,0xa4,0x21,0x3b,0x7b, + 0x17,0x86,0xae,0xac,0x13,0xb8,0x63,0x9d, + 0x06,0x01,0xd0,0xa0,0x51,0x9a,0x91,0x2c, + } + }, { 0xa704107, { 0xf3,0xc6,0x58,0x26,0xee,0xac,0x3f,0xd6, 0xce,0xa1,0x72,0x47,0x3b,0xba,0x2b,0x93, @@ -392,6 +469,13 @@ static const struct patch_digest phashes[] = { 0x64,0x39,0x71,0x8c,0xce,0xe7,0x41,0x39, } }, + { 0xa704108, { + 0xd7,0x55,0x15,0x2b,0xfe,0xc4,0xbc,0x93, + 0xec,0x91,0xa0,0xae,0x45,0xb7,0xc3,0x98, + 0x4e,0xff,0x61,0x77,0x88,0xc2,0x70,0x49, + 0xe0,0x3a,0x1d,0x84,0x38,0x52,0xbf,0x5a, + } + }, { 0xa705206, { 0x8d,0xc0,0x76,0xbd,0x58,0x9f,0x8f,0xa4, 0x12,0x9d,0x21,0xfb,0x48,0x21,0xbc,0xe7, @@ -399,6 +483,13 @@ static const struct patch_digest phashes[] = { 0x03,0x35,0xe9,0xbe,0xfb,0x06,0xdf,0xfc, } }, + { 0xa705208, { + 0x30,0x1d,0x55,0x24,0xbc,0x6b,0x5a,0x19, + 0x0c,0x7d,0x1d,0x74,0xaa,0xd1,0xeb,0xd2, + 0x16,0x62,0xf7,0x5b,0xe1,0x1f,0x18,0x11, + 0x5c,0xf0,0x94,0x90,0x26,0xec,0x69,0xff, + } + }, { 0xa708007, { 0x6b,0x76,0xcc,0x78,0xc5,0x8a,0xa3,0xe3, 0x32,0x2d,0x79,0xe4,0xc3,0x80,0xdb,0xb2, @@ -406,6 +497,13 @@ static const struct patch_digest phashes[] = { 0xdf,0x92,0x73,0x84,0x87,0x3c,0x73,0x93, } }, + { 0xa708008, { + 0x08,0x6e,0xf0,0x22,0x4b,0x8e,0xc4,0x46, + 0x58,0x34,0xe6,0x47,0xa2,0x28,0xfd,0xab, + 0x22,0x3d,0xdd,0xd8,0x52,0x9e,0x1d,0x16, + 0xfa,0x01,0x68,0x14,0x79,0x3e,0xe8,0x6b, + } + }, { 0xa70c005, { 0x88,0x5d,0xfb,0x79,0x64,0xd8,0x46,0x3b, 0x4a,0x83,0x8e,0x77,0x7e,0xcf,0xb3,0x0f, @@ -413,6 +511,13 @@ static const struct patch_digest phashes[] = { 0xee,0x49,0xac,0xe1,0x8b,0x13,0xc5,0x13, } }, + { 0xa70c008, { + 0x0f,0xdb,0x37,0xa1,0x10,0xaf,0xd4,0x21, + 0x94,0x0d,0xa4,0xa2,0xe9,0x86,0x6c,0x0e, + 0x85,0x7c,0x36,0x30,0xa3,0x3a,0x78,0x66, + 0x18,0x10,0x60,0x0d,0x78,0x3d,0x44,0xd0, + } + }, { 0xaa00116, { 0xe8,0x4c,0x2c,0x88,0xa1,0xac,0x24,0x63, 0x65,0xe5,0xaa,0x2d,0x16,0xa9,0xc3,0xf5, @@ -441,4 +546,11 @@ static const struct patch_digest phashes[] = { 0x68,0x2f,0x46,0xee,0xfe,0xc6,0x6d,0xef, } }, + { 0xaa00216, { + 0x79,0xfb,0x5b,0x9f,0xb6,0xe6,0xa8,0xf5, + 0x4e,0x7c,0x4f,0x8e,0x1d,0xad,0xd0,0x08, + 0xc2,0x43,0x7c,0x8b,0xe6,0xdb,0xd0,0xd2, + 0xe8,0x39,0x26,0xc1,0xe5,0x5a,0x48,0xf1, + } + }, }; diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c index 079f046ee26d1..e8021d3e58824 100644 --- a/arch/x86/kernel/cpu/microcode/core.c +++ b/arch/x86/kernel/cpu/microcode/core.c @@ -696,6 +696,8 @@ static int load_late_locked(void) return load_late_stop_cpus(true); case UCODE_NFOUND: return -ENOENT; + case UCODE_OK: + return 0; default: return -EBADFD; } diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c index 7b29ebda024f4..1ececfce7a46a 100644 --- a/arch/x86/kernel/cpu/mtrr/generic.c +++ b/arch/x86/kernel/cpu/mtrr/generic.c @@ -591,7 +591,7 @@ static void get_fixed_ranges(mtrr_type *frs) void mtrr_save_fixed_ranges(void *info) { - if (boot_cpu_has(X86_FEATURE_MTRR)) + if (mtrr_state.have_fixed) get_fixed_ranges(mtrr_state.fixed_ranges); } diff --git a/arch/x86/kernel/cpu/scattered.c b/arch/x86/kernel/cpu/scattered.c index c84c30188fdf2..bc4993aa41edf 100644 --- a/arch/x86/kernel/cpu/scattered.c +++ b/arch/x86/kernel/cpu/scattered.c @@ -49,6 +49,8 @@ static const struct cpuid_bit cpuid_bits[] = { { X86_FEATURE_MBA, CPUID_EBX, 6, 0x80000008, 0 }, { X86_FEATURE_SMBA, CPUID_EBX, 2, 0x80000020, 0 }, { X86_FEATURE_BMEC, CPUID_EBX, 3, 0x80000020, 0 }, + { X86_FEATURE_TSA_SQ_NO, CPUID_ECX, 1, 0x80000021, 0 }, + { X86_FEATURE_TSA_L1_NO, CPUID_ECX, 2, 0x80000021, 0 }, { X86_FEATURE_PERFMON_V2, CPUID_EAX, 0, 0x80000022, 0 }, { X86_FEATURE_AMD_LBR_V2, CPUID_EAX, 1, 0x80000022, 0 }, { X86_FEATURE_AMD_LBR_PMC_FREEZE, CPUID_EAX, 2, 0x80000022, 0 }, diff --git a/arch/x86/kernel/cpu/sgx/main.c b/arch/x86/kernel/cpu/sgx/main.c index 9ace84486499b..147ea26dfdad6 100644 --- a/arch/x86/kernel/cpu/sgx/main.c +++ b/arch/x86/kernel/cpu/sgx/main.c @@ -719,6 +719,8 @@ int arch_memory_failure(unsigned long pfn, int flags) goto out; } + sgx_unmark_page_reclaimable(page); + /* * TBD: Add additional plumbing to enable pre-emptive * action for asynchronous poison notification. Until diff --git a/arch/x86/kernel/fpu/signal.c b/arch/x86/kernel/fpu/signal.c index 8f62e0666dea5..8abe60919e2f9 100644 --- a/arch/x86/kernel/fpu/signal.c +++ b/arch/x86/kernel/fpu/signal.c @@ -119,7 +119,6 @@ static inline bool save_xstate_epilog(void __user *buf, int ia32_frame, { struct xregs_state __user *x = buf; struct _fpx_sw_bytes sw_bytes = {}; - u32 xfeatures; int err; /* Setup the bytes not touched by the [f]xsave and reserved for SW. */ @@ -132,12 +131,6 @@ static inline bool save_xstate_epilog(void __user *buf, int ia32_frame, err |= __put_user(FP_XSTATE_MAGIC2, (__u32 __user *)(buf + fpstate->user_size)); - /* - * Read the xfeatures which we copied (directly from the cpu or - * from the state in task struct) to the user buffers. - */ - err |= __get_user(xfeatures, (__u32 __user *)&x->header.xfeatures); - /* * For legacy compatible, we always set FP/SSE bits in the bit * vector while saving the state to the user context. This will @@ -149,9 +142,7 @@ static inline bool save_xstate_epilog(void __user *buf, int ia32_frame, * header as well as change any contents in the memory layout. * xrestore as part of sigreturn will capture all the changes. */ - xfeatures |= XFEATURE_MASK_FPSSE; - - err |= __put_user(xfeatures, (__u32 __user *)&x->header.xfeatures); + err |= set_xfeature_in_sigframe(x, XFEATURE_MASK_FPSSE); return !err; } diff --git a/arch/x86/kernel/fpu/xstate.h b/arch/x86/kernel/fpu/xstate.h index aa16f1a1bbcf1..f7d8f3d73599e 100644 --- a/arch/x86/kernel/fpu/xstate.h +++ b/arch/x86/kernel/fpu/xstate.h @@ -69,21 +69,31 @@ static inline u64 xfeatures_mask_independent(void) return fpu_kernel_cfg.independent_features; } +static inline int set_xfeature_in_sigframe(struct xregs_state __user *xbuf, u64 mask) +{ + u64 xfeatures; + int err; + + /* Read the xfeatures value already saved in the user buffer */ + err = __get_user(xfeatures, &xbuf->header.xfeatures); + xfeatures |= mask; + err |= __put_user(xfeatures, &xbuf->header.xfeatures); + + return err; +} + /* * Update the value of PKRU register that was already pushed onto the signal frame. */ -static inline int update_pkru_in_sigframe(struct xregs_state __user *buf, u64 mask, u32 pkru) +static inline int update_pkru_in_sigframe(struct xregs_state __user *buf, u32 pkru) { - u64 xstate_bv; int err; if (unlikely(!cpu_feature_enabled(X86_FEATURE_OSPKE))) return 0; /* Mark PKRU as in-use so that it is restored correctly. */ - xstate_bv = (mask & xfeatures_in_use()) | XFEATURE_MASK_PKRU; - - err = __put_user(xstate_bv, &buf->header.xfeatures); + err = set_xfeature_in_sigframe(buf, XFEATURE_MASK_PKRU); if (err) return err; @@ -304,7 +314,7 @@ static inline int xsave_to_user_sigframe(struct xregs_state __user *buf, u32 pkr clac(); if (!err) - err = update_pkru_in_sigframe(buf, mask, pkru); + err = update_pkru_in_sigframe(buf, pkru); return err; } diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c index e2fab3ceb09fb..9a101150376db 100644 --- a/arch/x86/kernel/ioport.c +++ b/arch/x86/kernel/ioport.c @@ -33,8 +33,9 @@ void io_bitmap_share(struct task_struct *tsk) set_tsk_thread_flag(tsk, TIF_IO_BITMAP); } -static void task_update_io_bitmap(struct task_struct *tsk) +static void task_update_io_bitmap(void) { + struct task_struct *tsk = current; struct thread_struct *t = &tsk->thread; if (t->iopl_emul == 3 || t->io_bitmap) { @@ -54,7 +55,12 @@ void io_bitmap_exit(struct task_struct *tsk) struct io_bitmap *iobm = tsk->thread.io_bitmap; tsk->thread.io_bitmap = NULL; - task_update_io_bitmap(tsk); + /* + * Don't touch the TSS when invoked on a failed fork(). TSS + * reflects the state of @current and not the state of @tsk. + */ + if (tsk == current) + task_update_io_bitmap(); if (iobm && refcount_dec_and_test(&iobm->refcnt)) kfree(iobm); } @@ -192,8 +198,7 @@ SYSCALL_DEFINE1(iopl, unsigned int, level) } t->iopl_emul = level; - task_update_io_bitmap(current); - + task_update_io_bitmap(); return 0; } diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c index feca4f20b06aa..85fa2db38dc42 100644 --- a/arch/x86/kernel/irq.c +++ b/arch/x86/kernel/irq.c @@ -414,7 +414,7 @@ static __always_inline bool handle_pending_pir(u64 *pir, struct pt_regs *regs) bool handled = false; for (i = 0; i < 4; i++) - pir_copy[i] = pir[i]; + pir_copy[i] = READ_ONCE(pir[i]); for (i = 0; i < 4; i++) { if (!pir_copy[i]) diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index c7ce3655b7078..4c9c98c5deabd 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c @@ -180,6 +180,7 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args) frame->ret_addr = (unsigned long) ret_from_fork_asm; p->thread.sp = (unsigned long) fork_frame; p->thread.io_bitmap = NULL; + clear_tsk_thread_flag(p, TIF_IO_BITMAP); p->thread.iopl_warn = 0; memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps)); @@ -468,6 +469,11 @@ void native_tss_update_io_bitmap(void) } else { struct io_bitmap *iobm = t->io_bitmap; + if (WARN_ON_ONCE(!iobm)) { + clear_thread_flag(TIF_IO_BITMAP); + native_tss_invalidate_io_bitmap(); + } + /* * Only copy bitmap data when the sequence number differs. The * update time is accounted to the incoming task. @@ -905,19 +911,24 @@ static __init bool prefer_mwait_c1_over_halt(void) */ static __cpuidle void mwait_idle(void) { + if (need_resched()) + return; + + x86_idle_clear_cpu_buffers(); + if (!current_set_polling_and_test()) { - if (this_cpu_has(X86_BUG_CLFLUSH_MONITOR)) { - mb(); /* quirk */ - clflush((void *)¤t_thread_info()->flags); - mb(); /* quirk */ - } + const void *addr = ¤t_thread_info()->flags; - __monitor((void *)¤t_thread_info()->flags, 0, 0); - if (!need_resched()) { - __sti_mwait(0, 0); - raw_local_irq_disable(); - } + alternative_input("", "clflush (%[addr])", X86_BUG_CLFLUSH_MONITOR, [addr] "a" (addr)); + __monitor(addr, 0, 0); + if (need_resched()) + goto out; + + __sti_mwait(0, 0); + raw_local_irq_disable(); } + +out: __current_clr_polling(); } diff --git a/arch/x86/kernel/signal_32.c b/arch/x86/kernel/signal_32.c index 98123ff10506c..42bbc42bd3503 100644 --- a/arch/x86/kernel/signal_32.c +++ b/arch/x86/kernel/signal_32.c @@ -152,6 +152,8 @@ SYSCALL32_DEFINE0(sigreturn) struct sigframe_ia32 __user *frame = (struct sigframe_ia32 __user *)(regs->sp-8); sigset_t set; + prevent_single_step_upon_eretu(regs); + if (!access_ok(frame, sizeof(*frame))) goto badframe; if (__get_user(set.sig[0], &frame->sc.oldmask) @@ -175,6 +177,8 @@ SYSCALL32_DEFINE0(rt_sigreturn) struct rt_sigframe_ia32 __user *frame; sigset_t set; + prevent_single_step_upon_eretu(regs); + frame = (struct rt_sigframe_ia32 __user *)(regs->sp - 4); if (!access_ok(frame, sizeof(*frame))) diff --git a/arch/x86/kernel/signal_64.c b/arch/x86/kernel/signal_64.c index ee9453891901b..d483b585c6c60 100644 --- a/arch/x86/kernel/signal_64.c +++ b/arch/x86/kernel/signal_64.c @@ -250,6 +250,8 @@ SYSCALL_DEFINE0(rt_sigreturn) sigset_t set; unsigned long uc_flags; + prevent_single_step_upon_eretu(regs); + frame = (struct rt_sigframe __user *)(regs->sp - sizeof(long)); if (!access_ok(frame, sizeof(*frame))) goto badframe; @@ -366,6 +368,8 @@ COMPAT_SYSCALL_DEFINE0(x32_rt_sigreturn) sigset_t set; unsigned long uc_flags; + prevent_single_step_upon_eretu(regs); + frame = (struct rt_sigframe_x32 __user *)(regs->sp - 8); if (!access_ok(frame, sizeof(*frame))) diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index b18fc7539b8d7..243f3bc9b4dc5 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c @@ -977,24 +977,32 @@ static bool is_sysenter_singlestep(struct pt_regs *regs) #endif } -static __always_inline unsigned long debug_read_clear_dr6(void) +static __always_inline unsigned long debug_read_reset_dr6(void) { unsigned long dr6; + get_debugreg(dr6, 6); + dr6 ^= DR6_RESERVED; /* Flip to positive polarity */ + /* * The Intel SDM says: * - * Certain debug exceptions may clear bits 0-3. The remaining - * contents of the DR6 register are never cleared by the - * processor. To avoid confusion in identifying debug - * exceptions, debug handlers should clear the register before - * returning to the interrupted task. + * Certain debug exceptions may clear bits 0-3 of DR6. + * + * BLD induced #DB clears DR6.BLD and any other debug + * exception doesn't modify DR6.BLD. * - * Keep it simple: clear DR6 immediately. + * RTM induced #DB clears DR6.RTM and any other debug + * exception sets DR6.RTM. + * + * To avoid confusion in identifying debug exceptions, + * debug handlers should set DR6.BLD and DR6.RTM, and + * clear other DR6 bits before returning. + * + * Keep it simple: write DR6 with its architectural reset + * value 0xFFFF0FF0, defined as DR6_RESERVED, immediately. */ - get_debugreg(dr6, 6); set_debugreg(DR6_RESERVED, 6); - dr6 ^= DR6_RESERVED; /* Flip to positive polarity */ return dr6; } @@ -1194,13 +1202,13 @@ static noinstr void exc_debug_user(struct pt_regs *regs, unsigned long dr6) /* IST stack entry */ DEFINE_IDTENTRY_DEBUG(exc_debug) { - exc_debug_kernel(regs, debug_read_clear_dr6()); + exc_debug_kernel(regs, debug_read_reset_dr6()); } /* User entry, runs on regular task stack */ DEFINE_IDTENTRY_DEBUG_USER(exc_debug) { - exc_debug_user(regs, debug_read_clear_dr6()); + exc_debug_user(regs, debug_read_reset_dr6()); } #ifdef CONFIG_X86_FRED @@ -1219,7 +1227,7 @@ DEFINE_FREDENTRY_DEBUG(exc_debug) { /* * FRED #DB stores DR6 on the stack in the format which - * debug_read_clear_dr6() returns for the IDT entry points. + * debug_read_reset_dr6() returns for the IDT entry points. */ unsigned long dr6 = fred_event_data(regs); @@ -1234,7 +1242,7 @@ DEFINE_FREDENTRY_DEBUG(exc_debug) /* 32 bit does not have separate entry points. */ DEFINE_IDTENTRY_RAW(exc_debug) { - unsigned long dr6 = debug_read_clear_dr6(); + unsigned long dr6 = debug_read_reset_dr6(); if (user_mode(regs)) exc_debug_user(regs, dr6); diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c index c92e43f2d0c4e..8f587c5bb6bc4 100644 --- a/arch/x86/kvm/cpuid.c +++ b/arch/x86/kvm/cpuid.c @@ -814,6 +814,7 @@ void kvm_set_cpu_caps(void) kvm_cpu_cap_mask(CPUID_8000_0021_EAX, F(NO_NESTED_DATA_BP) | F(LFENCE_RDTSC) | 0 /* SmmPgCfgLock */ | + F(VERW_CLEAR) | F(NULL_SEL_CLR_BASE) | F(AUTOIBRS) | 0 /* PrefetchCtlMsr */ | F(WRMSR_XX_BASE_NS) ); @@ -821,11 +822,19 @@ void kvm_set_cpu_caps(void) kvm_cpu_cap_check_and_set(X86_FEATURE_SBPB); kvm_cpu_cap_check_and_set(X86_FEATURE_IBPB_BRTYPE); kvm_cpu_cap_check_and_set(X86_FEATURE_SRSO_NO); + kvm_cpu_cap_check_and_set(X86_FEATURE_VERW_CLEAR); kvm_cpu_cap_init_kvm_defined(CPUID_8000_0022_EAX, F(PERFMON_V2) ); + kvm_cpu_cap_init_kvm_defined(CPUID_8000_0021_ECX, + F(TSA_SQ_NO) | F(TSA_L1_NO) + ); + + kvm_cpu_cap_check_and_set(X86_FEATURE_TSA_SQ_NO); + kvm_cpu_cap_check_and_set(X86_FEATURE_TSA_L1_NO); + /* * Synthesize "LFENCE is serializing" into the AMD-defined entry in * KVM's supported CPUID if the feature is reported as supported by the @@ -1376,8 +1385,9 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function) entry->eax = entry->ebx = entry->ecx = entry->edx = 0; break; case 0x80000021: - entry->ebx = entry->ecx = entry->edx = 0; + entry->ebx = entry->edx = 0; cpuid_entry_override(entry, CPUID_8000_0021_EAX); + cpuid_entry_override(entry, CPUID_8000_0021_ECX); break; /* AMD Extended Performance Monitoring and Debug */ case 0x80000022: { diff --git a/arch/x86/kvm/reverse_cpuid.h b/arch/x86/kvm/reverse_cpuid.h index 0d17d6b706396..0ea847b823541 100644 --- a/arch/x86/kvm/reverse_cpuid.h +++ b/arch/x86/kvm/reverse_cpuid.h @@ -18,6 +18,7 @@ enum kvm_only_cpuid_leafs { CPUID_8000_0022_EAX, CPUID_7_2_EDX, CPUID_24_0_EBX, + CPUID_8000_0021_ECX, NR_KVM_CPU_CAPS, NKVMCAPINTS = NR_KVM_CPU_CAPS - NCAPINTS, @@ -68,6 +69,10 @@ enum kvm_only_cpuid_leafs { /* CPUID level 0x80000022 (EAX) */ #define KVM_X86_FEATURE_PERFMON_V2 KVM_X86_FEATURE(CPUID_8000_0022_EAX, 0) +/* CPUID level 0x80000021 (ECX) */ +#define KVM_X86_FEATURE_TSA_SQ_NO KVM_X86_FEATURE(CPUID_8000_0021_ECX, 1) +#define KVM_X86_FEATURE_TSA_L1_NO KVM_X86_FEATURE(CPUID_8000_0021_ECX, 2) + struct cpuid_reg { u32 function; u32 index; @@ -98,6 +103,7 @@ static const struct cpuid_reg reverse_cpuid[] = { [CPUID_8000_0022_EAX] = {0x80000022, 0, CPUID_EAX}, [CPUID_7_2_EDX] = { 7, 2, CPUID_EDX}, [CPUID_24_0_EBX] = { 0x24, 0, CPUID_EBX}, + [CPUID_8000_0021_ECX] = {0x80000021, 0, CPUID_ECX}, }; /* @@ -137,6 +143,8 @@ static __always_inline u32 __feature_translate(int x86_feature) KVM_X86_TRANSLATE_FEATURE(PERFMON_V2); KVM_X86_TRANSLATE_FEATURE(RRSBA_CTRL); KVM_X86_TRANSLATE_FEATURE(BHI_CTRL); + KVM_X86_TRANSLATE_FEATURE(TSA_SQ_NO); + KVM_X86_TRANSLATE_FEATURE(TSA_L1_NO); default: return x86_feature; } diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index 6154cb450b448..c4ae73541fc56 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -2058,6 +2058,10 @@ static int sev_check_source_vcpus(struct kvm *dst, struct kvm *src) struct kvm_vcpu *src_vcpu; unsigned long i; + if (src->created_vcpus != atomic_read(&src->online_vcpus) || + dst->created_vcpus != atomic_read(&dst->online_vcpus)) + return -EBUSY; + if (!sev_es_guest(src)) return 0; diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 7cbacd0439211..1f42a71b15c02 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -1483,7 +1483,7 @@ static void svm_clear_current_vmcb(struct vmcb *vmcb) { int i; - for_each_online_cpu(i) + for_each_possible_cpu(i) cmpxchg(per_cpu_ptr(&svm_data.current_vmcb, i), vmcb, NULL); } diff --git a/arch/x86/kvm/svm/vmenter.S b/arch/x86/kvm/svm/vmenter.S index 0c61153b275f6..235c4af6b692a 100644 --- a/arch/x86/kvm/svm/vmenter.S +++ b/arch/x86/kvm/svm/vmenter.S @@ -169,6 +169,9 @@ SYM_FUNC_START(__svm_vcpu_run) #endif mov VCPU_RDI(%_ASM_DI), %_ASM_DI + /* Clobbers EFLAGS.ZF */ + VM_CLEAR_CPU_BUFFERS + /* Enter guest mode */ 3: vmrun %_ASM_AX 4: @@ -335,6 +338,9 @@ SYM_FUNC_START(__svm_sev_es_vcpu_run) mov SVM_current_vmcb(%rdi), %rax mov KVM_VMCB_pa(%rax), %rax + /* Clobbers EFLAGS.ZF */ + VM_CLEAR_CPU_BUFFERS + /* Enter guest mode */ 1: vmrun %rax 2: diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index a3d45b01dbadf..029fbf3791f17 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -770,8 +770,11 @@ void vmx_emergency_disable_virtualization_cpu(void) return; list_for_each_entry(v, &per_cpu(loaded_vmcss_on_cpu, cpu), - loaded_vmcss_on_cpu_link) + loaded_vmcss_on_cpu_link) { vmcs_clear(v->vmcs); + if (v->shadow_vmcs) + vmcs_clear(v->shadow_vmcs); + } kvm_cpu_vmxoff(); } @@ -7310,7 +7313,7 @@ static noinstr void vmx_vcpu_enter_exit(struct kvm_vcpu *vcpu, vmx_l1d_flush(vcpu); else if (static_branch_unlikely(&mmio_stale_data_clear) && kvm_arch_has_assigned_device(vcpu->kvm)) - mds_clear_cpu_buffers(); + x86_clear_cpu_buffers(); vmx_disable_fb_clear(vmx); diff --git a/arch/x86/kvm/xen.c b/arch/x86/kvm/xen.c index 622fe24da9106..759cc3e9c0fac 100644 --- a/arch/x86/kvm/xen.c +++ b/arch/x86/kvm/xen.c @@ -1916,8 +1916,19 @@ int kvm_xen_setup_evtchn(struct kvm *kvm, { struct kvm_vcpu *vcpu; - if (ue->u.xen_evtchn.port >= max_evtchn_port(kvm)) - return -EINVAL; + /* + * Don't check for the port being within range of max_evtchn_port(). + * Userspace can configure what ever targets it likes; events just won't + * be delivered if/while the target is invalid, just like userspace can + * configure MSIs which target non-existent APICs. + * + * This allow on Live Migration and Live Update, the IRQ routing table + * can be restored *independently* of other things like creating vCPUs, + * without imposing an ordering dependency on userspace. In this + * particular case, the problematic ordering would be with setting the + * Xen 'long mode' flag, which changes max_evtchn_port() to allow 4096 + * instead of 1024 event channels. + */ /* We only support 2 level event channels for now */ if (ue->u.xen_evtchn.priority != KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL) diff --git a/arch/x86/lib/x86-opcode-map.txt b/arch/x86/lib/x86-opcode-map.txt index f5dd84eb55dcd..cd3fd5155f6ec 100644 --- a/arch/x86/lib/x86-opcode-map.txt +++ b/arch/x86/lib/x86-opcode-map.txt @@ -35,7 +35,7 @@ # - (!F3) : the last prefix is not 0xF3 (including non-last prefix case) # - (66&F2): Both 0x66 and 0xF2 prefixes are specified. # -# REX2 Prefix +# REX2 Prefix Superscripts # - (!REX2): REX2 is not allowed # - (REX2): REX2 variant e.g. JMPABS @@ -286,10 +286,10 @@ df: ESC # Note: "forced64" is Intel CPU behavior: they ignore 0x66 prefix # in 64-bit mode. AMD CPUs accept 0x66 prefix, it causes RIP truncation # to 16 bits. In 32-bit mode, 0x66 is accepted by both Intel and AMD. -e0: LOOPNE/LOOPNZ Jb (f64) (!REX2) -e1: LOOPE/LOOPZ Jb (f64) (!REX2) -e2: LOOP Jb (f64) (!REX2) -e3: JrCXZ Jb (f64) (!REX2) +e0: LOOPNE/LOOPNZ Jb (f64),(!REX2) +e1: LOOPE/LOOPZ Jb (f64),(!REX2) +e2: LOOP Jb (f64),(!REX2) +e3: JrCXZ Jb (f64),(!REX2) e4: IN AL,Ib (!REX2) e5: IN eAX,Ib (!REX2) e6: OUT Ib,AL (!REX2) @@ -298,10 +298,10 @@ e7: OUT Ib,eAX (!REX2) # in "near" jumps and calls is 16-bit. For CALL, # push of return address is 16-bit wide, RSP is decremented by 2 # but is not truncated to 16 bits, unlike RIP. -e8: CALL Jz (f64) (!REX2) -e9: JMP-near Jz (f64) (!REX2) -ea: JMP-far Ap (i64) (!REX2) -eb: JMP-short Jb (f64) (!REX2) +e8: CALL Jz (f64),(!REX2) +e9: JMP-near Jz (f64),(!REX2) +ea: JMP-far Ap (i64),(!REX2) +eb: JMP-short Jb (f64),(!REX2) ec: IN AL,DX (!REX2) ed: IN eAX,DX (!REX2) ee: OUT DX,AL (!REX2) @@ -478,22 +478,22 @@ AVXcode: 1 7f: movq Qq,Pq | vmovdqa Wx,Vx (66) | vmovdqa32/64 Wx,Vx (66),(evo) | vmovdqu Wx,Vx (F3) | vmovdqu32/64 Wx,Vx (F3),(evo) | vmovdqu8/16 Wx,Vx (F2),(ev) # 0x0f 0x80-0x8f # Note: "forced64" is Intel CPU behavior (see comment about CALL insn). -80: JO Jz (f64) (!REX2) -81: JNO Jz (f64) (!REX2) -82: JB/JC/JNAE Jz (f64) (!REX2) -83: JAE/JNB/JNC Jz (f64) (!REX2) -84: JE/JZ Jz (f64) (!REX2) -85: JNE/JNZ Jz (f64) (!REX2) -86: JBE/JNA Jz (f64) (!REX2) -87: JA/JNBE Jz (f64) (!REX2) -88: JS Jz (f64) (!REX2) -89: JNS Jz (f64) (!REX2) -8a: JP/JPE Jz (f64) (!REX2) -8b: JNP/JPO Jz (f64) (!REX2) -8c: JL/JNGE Jz (f64) (!REX2) -8d: JNL/JGE Jz (f64) (!REX2) -8e: JLE/JNG Jz (f64) (!REX2) -8f: JNLE/JG Jz (f64) (!REX2) +80: JO Jz (f64),(!REX2) +81: JNO Jz (f64),(!REX2) +82: JB/JC/JNAE Jz (f64),(!REX2) +83: JAE/JNB/JNC Jz (f64),(!REX2) +84: JE/JZ Jz (f64),(!REX2) +85: JNE/JNZ Jz (f64),(!REX2) +86: JBE/JNA Jz (f64),(!REX2) +87: JA/JNBE Jz (f64),(!REX2) +88: JS Jz (f64),(!REX2) +89: JNS Jz (f64),(!REX2) +8a: JP/JPE Jz (f64),(!REX2) +8b: JNP/JPO Jz (f64),(!REX2) +8c: JL/JNGE Jz (f64),(!REX2) +8d: JNL/JGE Jz (f64),(!REX2) +8e: JLE/JNG Jz (f64),(!REX2) +8f: JNLE/JG Jz (f64),(!REX2) # 0x0f 0x90-0x9f 90: SETO Eb | kmovw/q Vk,Wk | kmovb/d Vk,Wk (66) 91: SETNO Eb | kmovw/q Mv,Vk | kmovb/d Mv,Vk (66) diff --git a/arch/x86/tools/insn_decoder_test.c b/arch/x86/tools/insn_decoder_test.c index 472540aeabc23..08cd913cbd4e9 100644 --- a/arch/x86/tools/insn_decoder_test.c +++ b/arch/x86/tools/insn_decoder_test.c @@ -10,8 +10,7 @@ #include #include #include - -#define unlikely(cond) (cond) +#include #include #include @@ -106,7 +105,7 @@ static void parse_args(int argc, char **argv) } } -#define BUFSIZE 256 +#define BUFSIZE (256 + KSYM_NAME_LEN) int main(int argc, char **argv) { diff --git a/arch/x86/um/asm/checksum.h b/arch/x86/um/asm/checksum.h index b07824500363f..ddc144657efad 100644 --- a/arch/x86/um/asm/checksum.h +++ b/arch/x86/um/asm/checksum.h @@ -20,6 +20,9 @@ */ extern __wsum csum_partial(const void *buff, int len, __wsum sum); +/* Do not call this directly. Declared for export type visibility. */ +extern __visible __wsum csum_partial_copy_generic(const void *src, void *dst, int len); + /** * csum_fold - Fold and invert a 32bit checksum. * sum: 32bit unfolded sum diff --git a/arch/x86/virt/vmx/tdx/tdx.c b/arch/x86/virt/vmx/tdx/tdx.c index 4e2b2e2ac9f90..eb91bc5448de2 100644 --- a/arch/x86/virt/vmx/tdx/tdx.c +++ b/arch/x86/virt/vmx/tdx/tdx.c @@ -69,8 +69,9 @@ static inline void seamcall_err_ret(u64 fn, u64 err, args->r9, args->r10, args->r11); } -static inline int sc_retry_prerr(sc_func_t func, sc_err_func_t err_func, - u64 fn, struct tdx_module_args *args) +static __always_inline int sc_retry_prerr(sc_func_t func, + sc_err_func_t err_func, + u64 fn, struct tdx_module_args *args) { u64 sret = sc_retry(func, fn, args); diff --git a/block/bio.c b/block/bio.c index 20c74696bf23b..094a5adf79d23 100644 --- a/block/bio.c +++ b/block/bio.c @@ -1156,9 +1156,10 @@ EXPORT_SYMBOL(bio_add_page); void bio_add_folio_nofail(struct bio *bio, struct folio *folio, size_t len, size_t off) { + unsigned long nr = off / PAGE_SIZE; + WARN_ON_ONCE(len > UINT_MAX); - WARN_ON_ONCE(off > UINT_MAX); - __bio_add_page(bio, &folio->page, len, off); + __bio_add_page(bio, folio_page(folio, nr), len, off % PAGE_SIZE); } EXPORT_SYMBOL_GPL(bio_add_folio_nofail); @@ -1179,9 +1180,11 @@ EXPORT_SYMBOL_GPL(bio_add_folio_nofail); bool bio_add_folio(struct bio *bio, struct folio *folio, size_t len, size_t off) { - if (len > UINT_MAX || off > UINT_MAX) + unsigned long nr = off / PAGE_SIZE; + + if (len > UINT_MAX) return false; - return bio_add_page(bio, &folio->page, len, off) > 0; + return bio_add_page(bio, folio_page(folio, nr), len, off % PAGE_SIZE) > 0; } EXPORT_SYMBOL(bio_add_folio); diff --git a/block/blk-merge.c b/block/blk-merge.c index f575cc1705b3f..7ddd7dd23dda8 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c @@ -1180,20 +1180,20 @@ bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio, if (!plug || rq_list_empty(&plug->mq_list)) return false; - rq_list_for_each(&plug->mq_list, rq) { - if (rq->q == q) { - if (blk_attempt_bio_merge(q, rq, bio, nr_segs, false) == - BIO_MERGE_OK) - return true; - break; - } + rq = plug->mq_list.tail; + if (rq->q == q) + return blk_attempt_bio_merge(q, rq, bio, nr_segs, false) == + BIO_MERGE_OK; + else if (!plug->multiple_queues) + return false; - /* - * Only keep iterating plug list for merges if we have multiple - * queues - */ - if (!plug->multiple_queues) - break; + rq_list_for_each(&plug->mq_list, rq) { + if (rq->q != q) + continue; + if (blk_attempt_bio_merge(q, rq, bio, nr_segs, false) == + BIO_MERGE_OK) + return true; + break; } return false; } diff --git a/block/blk-zoned.c b/block/blk-zoned.c index 414118435240a..d84946eb2f21e 100644 --- a/block/blk-zoned.c +++ b/block/blk-zoned.c @@ -1240,6 +1240,7 @@ void blk_zone_write_plug_bio_endio(struct bio *bio) if (bio_flagged(bio, BIO_EMULATES_ZONE_APPEND)) { bio->bi_opf &= ~REQ_OP_MASK; bio->bi_opf |= REQ_OP_ZONE_APPEND; + bio_clear_flag(bio, BIO_EMULATES_ZONE_APPEND); } /* @@ -1321,7 +1322,6 @@ static void blk_zone_wplug_bio_work(struct work_struct *work) spin_unlock_irqrestore(&zwplug->lock, flags); bdev = bio->bi_bdev; - submit_bio_noacct_nocheck(bio); /* * blk-mq devices will reuse the extra reference on the request queue @@ -1329,8 +1329,12 @@ static void blk_zone_wplug_bio_work(struct work_struct *work) * path for BIO-based devices will not do that. So drop this extra * reference here. */ - if (bdev_test_flag(bdev, BD_HAS_SUBMIT_BIO)) + if (bdev_test_flag(bdev, BD_HAS_SUBMIT_BIO)) { + bdev->bd_disk->fops->submit_bio(bio); blk_queue_exit(bdev->bd_disk->queue); + } else { + blk_mq_submit_bio(bio); + } put_zwplug: /* Drop the reference we took in disk_zone_wplug_schedule_bio_work(). */ diff --git a/block/elevator.c b/block/elevator.c index 43ba4ab1ada7f..1f76e9efd7717 100644 --- a/block/elevator.c +++ b/block/elevator.c @@ -752,7 +752,6 @@ ssize_t elv_iosched_store(struct gendisk *disk, const char *buf, ssize_t elv_iosched_show(struct gendisk *disk, char *name) { struct request_queue *q = disk->queue; - struct elevator_queue *eq = q->elevator; struct elevator_type *cur = NULL, *e; int len = 0; @@ -763,7 +762,7 @@ ssize_t elv_iosched_show(struct gendisk *disk, char *name) len += sprintf(name+len, "[none] "); } else { len += sprintf(name+len, "none "); - cur = eq->type; + cur = q->elevator->type; } spin_lock(&elv_list_lock); diff --git a/crypto/api.c b/crypto/api.c index c2c4eb14ef955..5ce54328fef11 100644 --- a/crypto/api.c +++ b/crypto/api.c @@ -220,10 +220,19 @@ static struct crypto_alg *crypto_larval_wait(struct crypto_alg *alg, if (crypto_is_test_larval(larval)) crypto_larval_kill(larval); alg = ERR_PTR(-ETIMEDOUT); - } else if (!alg) { + } else if (!alg || PTR_ERR(alg) == -EEXIST) { + int err = alg ? -EEXIST : -EAGAIN; + + /* + * EEXIST is expected because two probes can be scheduled + * at the same time with one using alg_name and the other + * using driver_name. Do a re-lookup but do not retry in + * case we hit a quirk like gcm_base(ctr(aes),...) which + * will never match. + */ alg = &larval->alg; alg = crypto_alg_lookup(alg->cra_name, type, mask) ?: - ERR_PTR(-EAGAIN); + ERR_PTR(err); } else if (IS_ERR(alg)) ; else if (crypto_is_test_larval(larval) && diff --git a/crypto/ecc.c b/crypto/ecc.c index 50ad2d4ed672c..6cf9a945fc6c2 100644 --- a/crypto/ecc.c +++ b/crypto/ecc.c @@ -71,7 +71,7 @@ EXPORT_SYMBOL(ecc_get_curve); void ecc_digits_from_bytes(const u8 *in, unsigned int nbytes, u64 *out, unsigned int ndigits) { - int diff = ndigits - DIV_ROUND_UP(nbytes, sizeof(u64)); + int diff = ndigits - DIV_ROUND_UP_POW2(nbytes, sizeof(u64)); unsigned int o = nbytes & 7; __be64 msd = 0; diff --git a/crypto/lrw.c b/crypto/lrw.c index e216fbf2b7866..4bede0031c63c 100644 --- a/crypto/lrw.c +++ b/crypto/lrw.c @@ -322,7 +322,7 @@ static int lrw_create(struct crypto_template *tmpl, struct rtattr **tb) err = crypto_grab_skcipher(spawn, skcipher_crypto_instance(inst), cipher_name, 0, mask); - if (err == -ENOENT) { + if (err == -ENOENT && memcmp(cipher_name, "ecb(", 4)) { err = -ENAMETOOLONG; if (snprintf(ecb_name, CRYPTO_MAX_ALG_NAME, "ecb(%s)", cipher_name) >= CRYPTO_MAX_ALG_NAME) @@ -356,7 +356,7 @@ static int lrw_create(struct crypto_template *tmpl, struct rtattr **tb) /* Alas we screwed up the naming so we have to mangle the * cipher name. */ - if (!strncmp(cipher_name, "ecb(", 4)) { + if (!memcmp(cipher_name, "ecb(", 4)) { int len; len = strscpy(ecb_name, cipher_name + 4, sizeof(ecb_name)); diff --git a/crypto/xts.c b/crypto/xts.c index 672e1a3f0b0c9..91e391a6ba270 100644 --- a/crypto/xts.c +++ b/crypto/xts.c @@ -363,7 +363,7 @@ static int xts_create(struct crypto_template *tmpl, struct rtattr **tb) err = crypto_grab_skcipher(&ctx->spawn, skcipher_crypto_instance(inst), cipher_name, 0, mask); - if (err == -ENOENT) { + if (err == -ENOENT && memcmp(cipher_name, "ecb(", 4)) { err = -ENAMETOOLONG; if (snprintf(name, CRYPTO_MAX_ALG_NAME, "ecb(%s)", cipher_name) >= CRYPTO_MAX_ALG_NAME) @@ -397,7 +397,7 @@ static int xts_create(struct crypto_template *tmpl, struct rtattr **tb) /* Alas we screwed up the naming so we have to mangle the * cipher name. */ - if (!strncmp(cipher_name, "ecb(", 4)) { + if (!memcmp(cipher_name, "ecb(", 4)) { int len; len = strscpy(name, cipher_name + 4, sizeof(name)); diff --git a/drivers/accel/ivpu/ivpu_debugfs.c b/drivers/accel/ivpu/ivpu_debugfs.c index 05a0d99ce95c4..1edf6e5644026 100644 --- a/drivers/accel/ivpu/ivpu_debugfs.c +++ b/drivers/accel/ivpu/ivpu_debugfs.c @@ -423,6 +423,88 @@ static int dct_active_set(void *data, u64 active_percent) DEFINE_DEBUGFS_ATTRIBUTE(ivpu_dct_fops, dct_active_get, dct_active_set, "%llu\n"); +static int priority_bands_show(struct seq_file *s, void *v) +{ + struct ivpu_device *vdev = s->private; + struct ivpu_hw_info *hw = vdev->hw; + + for (int band = VPU_JOB_SCHEDULING_PRIORITY_BAND_IDLE; + band < VPU_JOB_SCHEDULING_PRIORITY_BAND_COUNT; band++) { + switch (band) { + case VPU_JOB_SCHEDULING_PRIORITY_BAND_IDLE: + seq_puts(s, "Idle: "); + break; + + case VPU_JOB_SCHEDULING_PRIORITY_BAND_NORMAL: + seq_puts(s, "Normal: "); + break; + + case VPU_JOB_SCHEDULING_PRIORITY_BAND_FOCUS: + seq_puts(s, "Focus: "); + break; + + case VPU_JOB_SCHEDULING_PRIORITY_BAND_REALTIME: + seq_puts(s, "Realtime: "); + break; + } + + seq_printf(s, "grace_period %9u process_grace_period %9u process_quantum %9u\n", + hw->hws.grace_period[band], hw->hws.process_grace_period[band], + hw->hws.process_quantum[band]); + } + + return 0; +} + +static int priority_bands_fops_open(struct inode *inode, struct file *file) +{ + return single_open(file, priority_bands_show, inode->i_private); +} + +static ssize_t +priority_bands_fops_write(struct file *file, const char __user *user_buf, size_t size, loff_t *pos) +{ + struct seq_file *s = file->private_data; + struct ivpu_device *vdev = s->private; + char buf[64]; + u32 grace_period; + u32 process_grace_period; + u32 process_quantum; + u32 band; + int ret; + + if (size >= sizeof(buf)) + return -EINVAL; + + ret = simple_write_to_buffer(buf, sizeof(buf) - 1, pos, user_buf, size); + if (ret < 0) + return ret; + + buf[size] = '\0'; + ret = sscanf(buf, "%u %u %u %u", &band, &grace_period, &process_grace_period, + &process_quantum); + if (ret != 4) + return -EINVAL; + + if (band >= VPU_JOB_SCHEDULING_PRIORITY_BAND_COUNT) + return -EINVAL; + + vdev->hw->hws.grace_period[band] = grace_period; + vdev->hw->hws.process_grace_period[band] = process_grace_period; + vdev->hw->hws.process_quantum[band] = process_quantum; + + return size; +} + +static const struct file_operations ivpu_hws_priority_bands_fops = { + .owner = THIS_MODULE, + .open = priority_bands_fops_open, + .write = priority_bands_fops_write, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + void ivpu_debugfs_init(struct ivpu_device *vdev) { struct dentry *debugfs_root = vdev->drm.debugfs_root; @@ -445,6 +527,8 @@ void ivpu_debugfs_init(struct ivpu_device *vdev) &fw_trace_hw_comp_mask_fops); debugfs_create_file("fw_trace_level", 0200, debugfs_root, vdev, &fw_trace_level_fops); + debugfs_create_file("hws_priority_bands", 0200, debugfs_root, vdev, + &ivpu_hws_priority_bands_fops); debugfs_create_file("reset_engine", 0200, debugfs_root, vdev, &ivpu_reset_engine_fops); diff --git a/drivers/accel/ivpu/ivpu_drv.c b/drivers/accel/ivpu/ivpu_drv.c index 168d03d5aa1d0..00208c4a65807 100644 --- a/drivers/accel/ivpu/ivpu_drv.c +++ b/drivers/accel/ivpu/ivpu_drv.c @@ -102,6 +102,8 @@ static void file_priv_release(struct kref *ref) pm_runtime_get_sync(vdev->drm.dev); mutex_lock(&vdev->context_list_lock); file_priv_unbind(vdev, file_priv); + drm_WARN_ON(&vdev->drm, !xa_empty(&file_priv->cmdq_xa)); + xa_destroy(&file_priv->cmdq_xa); mutex_unlock(&vdev->context_list_lock); pm_runtime_put_autosuspend(vdev->drm.dev); @@ -261,6 +263,10 @@ static int ivpu_open(struct drm_device *dev, struct drm_file *file) file_priv->job_limit.min = FIELD_PREP(IVPU_JOB_ID_CONTEXT_MASK, (file_priv->ctx.id - 1)); file_priv->job_limit.max = file_priv->job_limit.min | IVPU_JOB_ID_JOB_MASK; + xa_init_flags(&file_priv->cmdq_xa, XA_FLAGS_ALLOC1); + file_priv->cmdq_limit.min = IVPU_CMDQ_MIN_ID; + file_priv->cmdq_limit.max = IVPU_CMDQ_MAX_ID; + mutex_unlock(&vdev->context_list_lock); drm_dev_exit(idx); @@ -709,6 +715,7 @@ static struct pci_device_id ivpu_pci_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_MTL) }, { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_ARL) }, { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_LNL) }, + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_PTL_P) }, { } }; MODULE_DEVICE_TABLE(pci, ivpu_pci_ids); diff --git a/drivers/accel/ivpu/ivpu_drv.h b/drivers/accel/ivpu/ivpu_drv.h index a5707a85e7255..f2ba3ed8b3fc5 100644 --- a/drivers/accel/ivpu/ivpu_drv.h +++ b/drivers/accel/ivpu/ivpu_drv.h @@ -23,9 +23,10 @@ #define DRIVER_DESC "Driver for Intel NPU (Neural Processing Unit)" #define DRIVER_DATE "20230117" -#define PCI_DEVICE_ID_MTL 0x7d1d -#define PCI_DEVICE_ID_ARL 0xad1d -#define PCI_DEVICE_ID_LNL 0x643e +#define PCI_DEVICE_ID_MTL 0x7d1d +#define PCI_DEVICE_ID_ARL 0xad1d +#define PCI_DEVICE_ID_LNL 0x643e +#define PCI_DEVICE_ID_PTL_P 0xb03e #define IVPU_HW_IP_37XX 37 #define IVPU_HW_IP_40XX 40 @@ -49,11 +50,11 @@ #define IVPU_JOB_ID_JOB_MASK GENMASK(7, 0) #define IVPU_JOB_ID_CONTEXT_MASK GENMASK(31, 8) -#define IVPU_NUM_ENGINES 2 #define IVPU_NUM_PRIORITIES 4 -#define IVPU_NUM_CMDQS_PER_CTX (IVPU_NUM_ENGINES * IVPU_NUM_PRIORITIES) +#define IVPU_NUM_CMDQS_PER_CTX (IVPU_NUM_PRIORITIES) -#define IVPU_CMDQ_INDEX(engine, priority) ((engine) * IVPU_NUM_PRIORITIES + (priority)) +#define IVPU_CMDQ_MIN_ID 1 +#define IVPU_CMDQ_MAX_ID 255 #define IVPU_PLATFORM_SILICON 0 #define IVPU_PLATFORM_SIMICS 2 @@ -173,13 +174,15 @@ struct ivpu_file_priv { struct kref ref; struct ivpu_device *vdev; struct mutex lock; /* Protects cmdq */ - struct ivpu_cmdq *cmdq[IVPU_NUM_CMDQS_PER_CTX]; + struct xarray cmdq_xa; struct ivpu_mmu_context ctx; struct mutex ms_lock; /* Protects ms_instance_list, ms_info_bo */ struct list_head ms_instance_list; struct ivpu_bo *ms_info_bo; struct xa_limit job_limit; u32 job_id_next; + struct xa_limit cmdq_limit; + u32 cmdq_id_next; bool has_mmu_faults; bool bound; bool aborted; @@ -227,6 +230,8 @@ static inline int ivpu_hw_ip_gen(struct ivpu_device *vdev) return IVPU_HW_IP_37XX; case PCI_DEVICE_ID_LNL: return IVPU_HW_IP_40XX; + case PCI_DEVICE_ID_PTL_P: + return IVPU_HW_IP_50XX; default: dump_stack(); ivpu_err(vdev, "Unknown NPU IP generation\n"); @@ -241,6 +246,7 @@ static inline int ivpu_hw_btrs_gen(struct ivpu_device *vdev) case PCI_DEVICE_ID_ARL: return IVPU_HW_BTRS_MTL; case PCI_DEVICE_ID_LNL: + case PCI_DEVICE_ID_PTL_P: return IVPU_HW_BTRS_LNL; default: dump_stack(); diff --git a/drivers/accel/ivpu/ivpu_fw.c b/drivers/accel/ivpu/ivpu_fw.c index d12188730ac7f..cd40446a22a57 100644 --- a/drivers/accel/ivpu/ivpu_fw.c +++ b/drivers/accel/ivpu/ivpu_fw.c @@ -53,15 +53,18 @@ static struct { int gen; const char *name; } fw_names[] = { - { IVPU_HW_IP_37XX, "vpu_37xx.bin" }, + { IVPU_HW_IP_37XX, "intel/vpu/vpu_37xx_v1.bin" }, { IVPU_HW_IP_37XX, "intel/vpu/vpu_37xx_v0.0.bin" }, - { IVPU_HW_IP_40XX, "vpu_40xx.bin" }, + { IVPU_HW_IP_40XX, "intel/vpu/vpu_40xx_v1.bin" }, { IVPU_HW_IP_40XX, "intel/vpu/vpu_40xx_v0.0.bin" }, + { IVPU_HW_IP_50XX, "intel/vpu/vpu_50xx_v1.bin" }, + { IVPU_HW_IP_50XX, "intel/vpu/vpu_50xx_v0.0.bin" }, }; /* Production fw_names from the table above */ -MODULE_FIRMWARE("intel/vpu/vpu_37xx_v0.0.bin"); -MODULE_FIRMWARE("intel/vpu/vpu_40xx_v0.0.bin"); +MODULE_FIRMWARE("intel/vpu/vpu_37xx_v1.bin"); +MODULE_FIRMWARE("intel/vpu/vpu_40xx_v1.bin"); +MODULE_FIRMWARE("intel/vpu/vpu_50xx_v1.bin"); static int ivpu_fw_request(struct ivpu_device *vdev) { diff --git a/drivers/accel/ivpu/ivpu_gem.c b/drivers/accel/ivpu/ivpu_gem.c index c8daffd90f300..6b1bda7e130d0 100644 --- a/drivers/accel/ivpu/ivpu_gem.c +++ b/drivers/accel/ivpu/ivpu_gem.c @@ -26,11 +26,21 @@ static inline void ivpu_dbg_bo(struct ivpu_device *vdev, struct ivpu_bo *bo, con { ivpu_dbg(vdev, BO, "%6s: bo %8p vpu_addr %9llx size %8zu ctx %d has_pages %d dma_mapped %d mmu_mapped %d wc %d imported %d\n", - action, bo, bo->vpu_addr, ivpu_bo_size(bo), bo->ctx ? bo->ctx->id : 0, + action, bo, bo->vpu_addr, ivpu_bo_size(bo), bo->ctx_id, (bool)bo->base.pages, (bool)bo->base.sgt, bo->mmu_mapped, bo->base.map_wc, (bool)bo->base.base.import_attach); } +static inline int ivpu_bo_lock(struct ivpu_bo *bo) +{ + return dma_resv_lock(bo->base.base.resv, NULL); +} + +static inline void ivpu_bo_unlock(struct ivpu_bo *bo) +{ + dma_resv_unlock(bo->base.base.resv); +} + /* * ivpu_bo_pin() - pin the backing physical pages and map them to VPU. * @@ -41,22 +51,22 @@ static inline void ivpu_dbg_bo(struct ivpu_device *vdev, struct ivpu_bo *bo, con int __must_check ivpu_bo_pin(struct ivpu_bo *bo) { struct ivpu_device *vdev = ivpu_bo_to_vdev(bo); + struct sg_table *sgt; int ret = 0; - mutex_lock(&bo->lock); - ivpu_dbg_bo(vdev, bo, "pin"); - drm_WARN_ON(&vdev->drm, !bo->ctx); - if (!bo->mmu_mapped) { - struct sg_table *sgt = drm_gem_shmem_get_pages_sgt(&bo->base); + sgt = drm_gem_shmem_get_pages_sgt(&bo->base); + if (IS_ERR(sgt)) { + ret = PTR_ERR(sgt); + ivpu_err(vdev, "Failed to map BO in IOMMU: %d\n", ret); + return ret; + } - if (IS_ERR(sgt)) { - ret = PTR_ERR(sgt); - ivpu_err(vdev, "Failed to map BO in IOMMU: %d\n", ret); - goto unlock; - } + ivpu_bo_lock(bo); + if (!bo->mmu_mapped) { + drm_WARN_ON(&vdev->drm, !bo->ctx); ret = ivpu_mmu_context_map_sgt(vdev, bo->ctx, bo->vpu_addr, sgt, ivpu_bo_is_snooped(bo)); if (ret) { @@ -67,7 +77,7 @@ int __must_check ivpu_bo_pin(struct ivpu_bo *bo) } unlock: - mutex_unlock(&bo->lock); + ivpu_bo_unlock(bo); return ret; } @@ -82,7 +92,7 @@ ivpu_bo_alloc_vpu_addr(struct ivpu_bo *bo, struct ivpu_mmu_context *ctx, if (!drm_dev_enter(&vdev->drm, &idx)) return -ENODEV; - mutex_lock(&bo->lock); + ivpu_bo_lock(bo); ret = ivpu_mmu_context_insert_node(ctx, range, ivpu_bo_size(bo), &bo->mm_node); if (!ret) { @@ -92,9 +102,7 @@ ivpu_bo_alloc_vpu_addr(struct ivpu_bo *bo, struct ivpu_mmu_context *ctx, ivpu_err(vdev, "Failed to add BO to context %u: %d\n", ctx->id, ret); } - ivpu_dbg_bo(vdev, bo, "alloc"); - - mutex_unlock(&bo->lock); + ivpu_bo_unlock(bo); drm_dev_exit(idx); @@ -105,7 +113,7 @@ static void ivpu_bo_unbind_locked(struct ivpu_bo *bo) { struct ivpu_device *vdev = ivpu_bo_to_vdev(bo); - lockdep_assert(lockdep_is_held(&bo->lock) || !kref_read(&bo->base.base.refcount)); + lockdep_assert(dma_resv_held(bo->base.base.resv) || !kref_read(&bo->base.base.refcount)); if (bo->mmu_mapped) { drm_WARN_ON(&vdev->drm, !bo->ctx); @@ -123,14 +131,12 @@ static void ivpu_bo_unbind_locked(struct ivpu_bo *bo) if (bo->base.base.import_attach) return; - dma_resv_lock(bo->base.base.resv, NULL); if (bo->base.sgt) { dma_unmap_sgtable(vdev->drm.dev, bo->base.sgt, DMA_BIDIRECTIONAL, 0); sg_free_table(bo->base.sgt); kfree(bo->base.sgt); bo->base.sgt = NULL; } - dma_resv_unlock(bo->base.base.resv); } void ivpu_bo_unbind_all_bos_from_context(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx) @@ -142,12 +148,12 @@ void ivpu_bo_unbind_all_bos_from_context(struct ivpu_device *vdev, struct ivpu_m mutex_lock(&vdev->bo_list_lock); list_for_each_entry(bo, &vdev->bo_list, bo_list_node) { - mutex_lock(&bo->lock); + ivpu_bo_lock(bo); if (bo->ctx == ctx) { ivpu_dbg_bo(vdev, bo, "unbind"); ivpu_bo_unbind_locked(bo); } - mutex_unlock(&bo->lock); + ivpu_bo_unlock(bo); } mutex_unlock(&vdev->bo_list_lock); } @@ -167,12 +173,11 @@ struct drm_gem_object *ivpu_gem_create_object(struct drm_device *dev, size_t siz bo->base.pages_mark_dirty_on_put = true; /* VPU can dirty a BO anytime */ INIT_LIST_HEAD(&bo->bo_list_node); - mutex_init(&bo->lock); return &bo->base.base; } -static struct ivpu_bo *ivpu_bo_alloc(struct ivpu_device *vdev, u64 size, u32 flags) +static struct ivpu_bo *ivpu_bo_alloc(struct ivpu_device *vdev, u64 size, u32 flags, u32 ctx_id) { struct drm_gem_shmem_object *shmem; struct ivpu_bo *bo; @@ -190,6 +195,7 @@ static struct ivpu_bo *ivpu_bo_alloc(struct ivpu_device *vdev, u64 size, u32 fla return ERR_CAST(shmem); bo = to_ivpu_bo(&shmem->base); + bo->ctx_id = ctx_id; bo->base.map_wc = flags & DRM_IVPU_BO_WC; bo->flags = flags; @@ -197,6 +203,8 @@ static struct ivpu_bo *ivpu_bo_alloc(struct ivpu_device *vdev, u64 size, u32 fla list_add_tail(&bo->bo_list_node, &vdev->bo_list); mutex_unlock(&vdev->bo_list_lock); + ivpu_dbg_bo(vdev, bo, "alloc"); + return bo; } @@ -234,10 +242,14 @@ static void ivpu_gem_bo_free(struct drm_gem_object *obj) list_del(&bo->bo_list_node); mutex_unlock(&vdev->bo_list_lock); - drm_WARN_ON(&vdev->drm, !dma_resv_test_signaled(obj->resv, DMA_RESV_USAGE_READ)); + drm_WARN_ON(&vdev->drm, !drm_gem_is_imported(&bo->base.base) && + !dma_resv_test_signaled(obj->resv, DMA_RESV_USAGE_READ)); + drm_WARN_ON(&vdev->drm, ivpu_bo_size(bo) == 0); + drm_WARN_ON(&vdev->drm, bo->base.vaddr); ivpu_bo_unbind_locked(bo); - mutex_destroy(&bo->lock); + drm_WARN_ON(&vdev->drm, bo->mmu_mapped); + drm_WARN_ON(&vdev->drm, bo->ctx); drm_WARN_ON(obj->dev, bo->base.pages_use_count > 1); drm_gem_shmem_free(&bo->base); @@ -271,7 +283,7 @@ int ivpu_bo_create_ioctl(struct drm_device *dev, void *data, struct drm_file *fi if (size == 0) return -EINVAL; - bo = ivpu_bo_alloc(vdev, size, args->flags); + bo = ivpu_bo_alloc(vdev, size, args->flags, file_priv->ctx.id); if (IS_ERR(bo)) { ivpu_err(vdev, "Failed to allocate BO: %pe (ctx %u size %llu flags 0x%x)", bo, file_priv->ctx.id, args->size, args->flags); @@ -279,7 +291,10 @@ int ivpu_bo_create_ioctl(struct drm_device *dev, void *data, struct drm_file *fi } ret = drm_gem_handle_create(file, &bo->base.base, &args->handle); - if (!ret) + if (ret) + ivpu_err(vdev, "Failed to create handle for BO: %pe (ctx %u size %llu flags 0x%x)", + bo, file_priv->ctx.id, args->size, args->flags); + else args->vpu_addr = bo->vpu_addr; drm_gem_object_put(&bo->base.base); @@ -302,7 +317,7 @@ ivpu_bo_create(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, drm_WARN_ON(&vdev->drm, !PAGE_ALIGNED(range->end)); drm_WARN_ON(&vdev->drm, !PAGE_ALIGNED(size)); - bo = ivpu_bo_alloc(vdev, size, flags); + bo = ivpu_bo_alloc(vdev, size, flags, IVPU_GLOBAL_CONTEXT_MMU_SSID); if (IS_ERR(bo)) { ivpu_err(vdev, "Failed to allocate BO: %pe (vpu_addr 0x%llx size %llu flags 0x%x)", bo, range->start, size, flags); @@ -318,9 +333,9 @@ ivpu_bo_create(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, goto err_put; if (flags & DRM_IVPU_BO_MAPPABLE) { - dma_resv_lock(bo->base.base.resv, NULL); + ivpu_bo_lock(bo); ret = drm_gem_shmem_vmap(&bo->base, &map); - dma_resv_unlock(bo->base.base.resv); + ivpu_bo_unlock(bo); if (ret) goto err_put; @@ -343,9 +358,9 @@ void ivpu_bo_free(struct ivpu_bo *bo) struct iosys_map map = IOSYS_MAP_INIT_VADDR(bo->base.vaddr); if (bo->flags & DRM_IVPU_BO_MAPPABLE) { - dma_resv_lock(bo->base.base.resv, NULL); + ivpu_bo_lock(bo); drm_gem_shmem_vunmap(&bo->base, &map); - dma_resv_unlock(bo->base.base.resv); + ivpu_bo_unlock(bo); } drm_gem_object_put(&bo->base.base); @@ -364,12 +379,12 @@ int ivpu_bo_info_ioctl(struct drm_device *dev, void *data, struct drm_file *file bo = to_ivpu_bo(obj); - mutex_lock(&bo->lock); + ivpu_bo_lock(bo); args->flags = bo->flags; args->mmap_offset = drm_vma_node_offset_addr(&obj->vma_node); args->vpu_addr = bo->vpu_addr; args->size = obj->size; - mutex_unlock(&bo->lock); + ivpu_bo_unlock(bo); drm_gem_object_put(obj); return ret; @@ -403,10 +418,10 @@ int ivpu_bo_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file static void ivpu_bo_print_info(struct ivpu_bo *bo, struct drm_printer *p) { - mutex_lock(&bo->lock); + ivpu_bo_lock(bo); drm_printf(p, "%-9p %-3u 0x%-12llx %-10lu 0x%-8x %-4u", - bo, bo->ctx ? bo->ctx->id : 0, bo->vpu_addr, bo->base.base.size, + bo, bo->ctx_id, bo->vpu_addr, bo->base.base.size, bo->flags, kref_read(&bo->base.base.refcount)); if (bo->base.pages) @@ -420,7 +435,7 @@ static void ivpu_bo_print_info(struct ivpu_bo *bo, struct drm_printer *p) drm_printf(p, "\n"); - mutex_unlock(&bo->lock); + ivpu_bo_unlock(bo); } void ivpu_bo_list(struct drm_device *dev, struct drm_printer *p) diff --git a/drivers/accel/ivpu/ivpu_gem.h b/drivers/accel/ivpu/ivpu_gem.h index d975000abd785..07bffe98c9635 100644 --- a/drivers/accel/ivpu/ivpu_gem.h +++ b/drivers/accel/ivpu/ivpu_gem.h @@ -17,10 +17,10 @@ struct ivpu_bo { struct list_head bo_list_node; struct drm_mm_node mm_node; - struct mutex lock; /* Protects: ctx, mmu_mapped, vpu_addr */ u64 vpu_addr; u32 flags; u32 job_status; /* Valid only for command buffer */ + u32 ctx_id; bool mmu_mapped; }; diff --git a/drivers/accel/ivpu/ivpu_hw.c b/drivers/accel/ivpu/ivpu_hw.c index 1214f155afa11..37ef8ce642109 100644 --- a/drivers/accel/ivpu/ivpu_hw.c +++ b/drivers/accel/ivpu/ivpu_hw.c @@ -110,6 +110,26 @@ static void timeouts_init(struct ivpu_device *vdev) } } +static void priority_bands_init(struct ivpu_device *vdev) +{ + /* Idle */ + vdev->hw->hws.grace_period[VPU_JOB_SCHEDULING_PRIORITY_BAND_IDLE] = 0; + vdev->hw->hws.process_grace_period[VPU_JOB_SCHEDULING_PRIORITY_BAND_IDLE] = 50000; + vdev->hw->hws.process_quantum[VPU_JOB_SCHEDULING_PRIORITY_BAND_IDLE] = 160000; + /* Normal */ + vdev->hw->hws.grace_period[VPU_JOB_SCHEDULING_PRIORITY_BAND_NORMAL] = 50000; + vdev->hw->hws.process_grace_period[VPU_JOB_SCHEDULING_PRIORITY_BAND_NORMAL] = 50000; + vdev->hw->hws.process_quantum[VPU_JOB_SCHEDULING_PRIORITY_BAND_NORMAL] = 300000; + /* Focus */ + vdev->hw->hws.grace_period[VPU_JOB_SCHEDULING_PRIORITY_BAND_FOCUS] = 50000; + vdev->hw->hws.process_grace_period[VPU_JOB_SCHEDULING_PRIORITY_BAND_FOCUS] = 50000; + vdev->hw->hws.process_quantum[VPU_JOB_SCHEDULING_PRIORITY_BAND_FOCUS] = 200000; + /* Realtime */ + vdev->hw->hws.grace_period[VPU_JOB_SCHEDULING_PRIORITY_BAND_REALTIME] = 0; + vdev->hw->hws.process_grace_period[VPU_JOB_SCHEDULING_PRIORITY_BAND_REALTIME] = 50000; + vdev->hw->hws.process_quantum[VPU_JOB_SCHEDULING_PRIORITY_BAND_REALTIME] = 200000; +} + static void memory_ranges_init(struct ivpu_device *vdev) { if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX) { @@ -248,6 +268,7 @@ int ivpu_hw_init(struct ivpu_device *vdev) { ivpu_hw_btrs_info_init(vdev); ivpu_hw_btrs_freq_ratios_init(vdev); + priority_bands_init(vdev); memory_ranges_init(vdev); platform_init(vdev); wa_init(vdev); diff --git a/drivers/accel/ivpu/ivpu_hw.h b/drivers/accel/ivpu/ivpu_hw.h index 1e85306bcd065..1c016b99a0fdd 100644 --- a/drivers/accel/ivpu/ivpu_hw.h +++ b/drivers/accel/ivpu/ivpu_hw.h @@ -45,6 +45,11 @@ struct ivpu_hw_info { u8 pn_ratio; u32 profiling_freq; } pll; + struct { + u32 grace_period[VPU_HWS_NUM_PRIORITY_BANDS]; + u32 process_quantum[VPU_HWS_NUM_PRIORITY_BANDS]; + u32 process_grace_period[VPU_HWS_NUM_PRIORITY_BANDS]; + } hws; u32 tile_fuse; u32 sku; u16 config; diff --git a/drivers/accel/ivpu/ivpu_hw_40xx_reg.h b/drivers/accel/ivpu/ivpu_hw_40xx_reg.h index d0b795b344c7f..fc0ee8d637f96 100644 --- a/drivers/accel/ivpu/ivpu_hw_40xx_reg.h +++ b/drivers/accel/ivpu/ivpu_hw_40xx_reg.h @@ -115,6 +115,8 @@ #define VPU_50XX_HOST_SS_AON_PWR_ISLAND_EN_POST_DLY 0x00030068u #define VPU_50XX_HOST_SS_AON_PWR_ISLAND_EN_POST_DLY_POST_DLY_MASK GENMASK(7, 0) +#define VPU_50XX_HOST_SS_AON_PWR_ISLAND_EN_POST_DLY_POST1_DLY_MASK GENMASK(15, 8) +#define VPU_50XX_HOST_SS_AON_PWR_ISLAND_EN_POST_DLY_POST2_DLY_MASK GENMASK(23, 16) #define VPU_50XX_HOST_SS_AON_PWR_ISLAND_STATUS_DLY 0x0003006cu #define VPU_50XX_HOST_SS_AON_PWR_ISLAND_STATUS_DLY_STATUS_DLY_MASK GENMASK(7, 0) diff --git a/drivers/accel/ivpu/ivpu_hw_ip.c b/drivers/accel/ivpu/ivpu_hw_ip.c index 60b33fc59d96e..bd2582a8c80f3 100644 --- a/drivers/accel/ivpu/ivpu_hw_ip.c +++ b/drivers/accel/ivpu/ivpu_hw_ip.c @@ -8,15 +8,12 @@ #include "ivpu_hw.h" #include "ivpu_hw_37xx_reg.h" #include "ivpu_hw_40xx_reg.h" +#include "ivpu_hw_btrs.h" #include "ivpu_hw_ip.h" #include "ivpu_hw_reg_io.h" #include "ivpu_mmu.h" #include "ivpu_pm.h" -#define PWR_ISLAND_EN_POST_DLY_FREQ_DEFAULT 0 -#define PWR_ISLAND_EN_POST_DLY_FREQ_HIGH 18 -#define PWR_ISLAND_STATUS_DLY_FREQ_DEFAULT 3 -#define PWR_ISLAND_STATUS_DLY_FREQ_HIGH 46 #define PWR_ISLAND_STATUS_TIMEOUT_US (5 * USEC_PER_MSEC) #define TIM_SAFE_ENABLE 0xf1d0dead @@ -268,20 +265,15 @@ void ivpu_hw_ip_idle_gen_disable(struct ivpu_device *vdev) idle_gen_drive_40xx(vdev, false); } -static void pwr_island_delay_set_50xx(struct ivpu_device *vdev) +static void +pwr_island_delay_set_50xx(struct ivpu_device *vdev, u32 post, u32 post1, u32 post2, u32 status) { - u32 val, post, status; - - if (vdev->hw->pll.profiling_freq == PLL_PROFILING_FREQ_DEFAULT) { - post = PWR_ISLAND_EN_POST_DLY_FREQ_DEFAULT; - status = PWR_ISLAND_STATUS_DLY_FREQ_DEFAULT; - } else { - post = PWR_ISLAND_EN_POST_DLY_FREQ_HIGH; - status = PWR_ISLAND_STATUS_DLY_FREQ_HIGH; - } + u32 val; val = REGV_RD32(VPU_50XX_HOST_SS_AON_PWR_ISLAND_EN_POST_DLY); val = REG_SET_FLD_NUM(VPU_50XX_HOST_SS_AON_PWR_ISLAND_EN_POST_DLY, POST_DLY, post, val); + val = REG_SET_FLD_NUM(VPU_50XX_HOST_SS_AON_PWR_ISLAND_EN_POST_DLY, POST1_DLY, post1, val); + val = REG_SET_FLD_NUM(VPU_50XX_HOST_SS_AON_PWR_ISLAND_EN_POST_DLY, POST2_DLY, post2, val); REGV_WR32(VPU_50XX_HOST_SS_AON_PWR_ISLAND_EN_POST_DLY, val); val = REGV_RD32(VPU_50XX_HOST_SS_AON_PWR_ISLAND_STATUS_DLY); @@ -686,13 +678,36 @@ static void dpu_active_drive_37xx(struct ivpu_device *vdev, bool enable) REGV_WR32(VPU_37XX_HOST_SS_AON_DPU_ACTIVE, val); } +static void pwr_island_delay_set(struct ivpu_device *vdev) +{ + bool high = vdev->hw->pll.profiling_freq == PLL_PROFILING_FREQ_HIGH; + u32 post, post1, post2, status; + + if (ivpu_hw_ip_gen(vdev) < IVPU_HW_IP_50XX) + return; + + switch (ivpu_device_id(vdev)) { + case PCI_DEVICE_ID_PTL_P: + post = high ? 18 : 0; + post1 = 0; + post2 = 0; + status = high ? 46 : 3; + break; + + default: + dump_stack(); + ivpu_err(vdev, "Unknown device ID\n"); + return; + } + + pwr_island_delay_set_50xx(vdev, post, post1, post2, status); +} + int ivpu_hw_ip_pwr_domain_enable(struct ivpu_device *vdev) { int ret; - if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_50XX) - pwr_island_delay_set_50xx(vdev); - + pwr_island_delay_set(vdev); pwr_island_enable(vdev); ret = wait_for_pwr_island_status(vdev, 0x1); diff --git a/drivers/accel/ivpu/ivpu_job.c b/drivers/accel/ivpu/ivpu_job.c index 27121c66e48f8..e631098718b15 100644 --- a/drivers/accel/ivpu/ivpu_job.c +++ b/drivers/accel/ivpu/ivpu_job.c @@ -60,6 +60,7 @@ static int ivpu_preemption_buffers_create(struct ivpu_device *vdev, err_free_primary: ivpu_bo_free(cmdq->primary_preempt_buf); + cmdq->primary_preempt_buf = NULL; return -ENOMEM; } @@ -69,10 +70,10 @@ static void ivpu_preemption_buffers_free(struct ivpu_device *vdev, if (vdev->fw->sched_mode != VPU_SCHEDULING_MODE_HW) return; - drm_WARN_ON(&vdev->drm, !cmdq->primary_preempt_buf); - drm_WARN_ON(&vdev->drm, !cmdq->secondary_preempt_buf); - ivpu_bo_free(cmdq->primary_preempt_buf); - ivpu_bo_free(cmdq->secondary_preempt_buf); + if (cmdq->primary_preempt_buf) + ivpu_bo_free(cmdq->primary_preempt_buf); + if (cmdq->secondary_preempt_buf) + ivpu_bo_free(cmdq->secondary_preempt_buf); } static struct ivpu_cmdq *ivpu_cmdq_alloc(struct ivpu_file_priv *file_priv) @@ -85,27 +86,16 @@ static struct ivpu_cmdq *ivpu_cmdq_alloc(struct ivpu_file_priv *file_priv) if (!cmdq) return NULL; - ret = xa_alloc_cyclic(&vdev->db_xa, &cmdq->db_id, NULL, vdev->db_limit, &vdev->db_next, - GFP_KERNEL); - if (ret < 0) { - ivpu_err(vdev, "Failed to allocate doorbell id: %d\n", ret); - goto err_free_cmdq; - } - cmdq->mem = ivpu_bo_create_global(vdev, SZ_4K, DRM_IVPU_BO_WC | DRM_IVPU_BO_MAPPABLE); if (!cmdq->mem) - goto err_erase_xa; + goto err_free_cmdq; ret = ivpu_preemption_buffers_create(vdev, file_priv, cmdq); if (ret) - goto err_free_cmdq_mem; + ivpu_warn(vdev, "Failed to allocate preemption buffers, preemption limited\n"); return cmdq; -err_free_cmdq_mem: - ivpu_bo_free(cmdq->mem); -err_erase_xa: - xa_erase(&vdev->db_xa, cmdq->db_id); err_free_cmdq: kfree(cmdq); return NULL; @@ -128,13 +118,13 @@ static int ivpu_hws_cmdq_init(struct ivpu_file_priv *file_priv, struct ivpu_cmdq struct ivpu_device *vdev = file_priv->vdev; int ret; - ret = ivpu_jsm_hws_create_cmdq(vdev, file_priv->ctx.id, file_priv->ctx.id, cmdq->db_id, + ret = ivpu_jsm_hws_create_cmdq(vdev, file_priv->ctx.id, file_priv->ctx.id, cmdq->id, task_pid_nr(current), engine, cmdq->mem->vpu_addr, ivpu_bo_size(cmdq->mem)); if (ret) return ret; - ret = ivpu_jsm_hws_set_context_sched_properties(vdev, file_priv->ctx.id, cmdq->db_id, + ret = ivpu_jsm_hws_set_context_sched_properties(vdev, file_priv->ctx.id, cmdq->id, priority); if (ret) return ret; @@ -148,20 +138,21 @@ static int ivpu_register_db(struct ivpu_file_priv *file_priv, struct ivpu_cmdq * int ret; if (vdev->fw->sched_mode == VPU_SCHEDULING_MODE_HW) - ret = ivpu_jsm_hws_register_db(vdev, file_priv->ctx.id, cmdq->db_id, cmdq->db_id, + ret = ivpu_jsm_hws_register_db(vdev, file_priv->ctx.id, cmdq->id, cmdq->db_id, cmdq->mem->vpu_addr, ivpu_bo_size(cmdq->mem)); else ret = ivpu_jsm_register_db(vdev, file_priv->ctx.id, cmdq->db_id, cmdq->mem->vpu_addr, ivpu_bo_size(cmdq->mem)); if (!ret) - ivpu_dbg(vdev, JOB, "DB %d registered to ctx %d\n", cmdq->db_id, file_priv->ctx.id); + ivpu_dbg(vdev, JOB, "DB %d registered to cmdq %d ctx %d\n", + cmdq->db_id, cmdq->id, file_priv->ctx.id); return ret; } static int -ivpu_cmdq_init(struct ivpu_file_priv *file_priv, struct ivpu_cmdq *cmdq, u16 engine, u8 priority) +ivpu_cmdq_init(struct ivpu_file_priv *file_priv, struct ivpu_cmdq *cmdq, u8 priority) { struct ivpu_device *vdev = file_priv->vdev; struct vpu_job_queue_header *jobq_header; @@ -177,13 +168,13 @@ ivpu_cmdq_init(struct ivpu_file_priv *file_priv, struct ivpu_cmdq *cmdq, u16 eng cmdq->jobq = (struct vpu_job_queue *)ivpu_bo_vaddr(cmdq->mem); jobq_header = &cmdq->jobq->header; - jobq_header->engine_idx = engine; + jobq_header->engine_idx = VPU_ENGINE_COMPUTE; jobq_header->head = 0; jobq_header->tail = 0; wmb(); /* Flush WC buffer for jobq->header */ if (vdev->fw->sched_mode == VPU_SCHEDULING_MODE_HW) { - ret = ivpu_hws_cmdq_init(file_priv, cmdq, engine, priority); + ret = ivpu_hws_cmdq_init(file_priv, cmdq, VPU_ENGINE_COMPUTE, priority); if (ret) return ret; } @@ -210,9 +201,9 @@ static int ivpu_cmdq_fini(struct ivpu_file_priv *file_priv, struct ivpu_cmdq *cm cmdq->db_registered = false; if (vdev->fw->sched_mode == VPU_SCHEDULING_MODE_HW) { - ret = ivpu_jsm_hws_destroy_cmdq(vdev, file_priv->ctx.id, cmdq->db_id); + ret = ivpu_jsm_hws_destroy_cmdq(vdev, file_priv->ctx.id, cmdq->id); if (!ret) - ivpu_dbg(vdev, JOB, "Command queue %d destroyed\n", cmdq->db_id); + ivpu_dbg(vdev, JOB, "Command queue %d destroyed\n", cmdq->id); } ret = ivpu_jsm_unregister_db(vdev, cmdq->db_id); @@ -222,55 +213,104 @@ static int ivpu_cmdq_fini(struct ivpu_file_priv *file_priv, struct ivpu_cmdq *cm return 0; } -static struct ivpu_cmdq *ivpu_cmdq_acquire(struct ivpu_file_priv *file_priv, u16 engine, - u8 priority) +static int ivpu_db_id_alloc(struct ivpu_device *vdev, u32 *db_id) +{ + int ret; + u32 id; + + ret = xa_alloc_cyclic(&vdev->db_xa, &id, NULL, vdev->db_limit, &vdev->db_next, GFP_KERNEL); + if (ret < 0) + return ret; + + *db_id = id; + return 0; +} + +static int ivpu_cmdq_id_alloc(struct ivpu_file_priv *file_priv, u32 *cmdq_id) { - int cmdq_idx = IVPU_CMDQ_INDEX(engine, priority); - struct ivpu_cmdq *cmdq = file_priv->cmdq[cmdq_idx]; + int ret; + u32 id; + + ret = xa_alloc_cyclic(&file_priv->cmdq_xa, &id, NULL, file_priv->cmdq_limit, + &file_priv->cmdq_id_next, GFP_KERNEL); + if (ret < 0) + return ret; + + *cmdq_id = id; + return 0; +} + +static struct ivpu_cmdq *ivpu_cmdq_acquire(struct ivpu_file_priv *file_priv, u8 priority) +{ + struct ivpu_device *vdev = file_priv->vdev; + struct ivpu_cmdq *cmdq; + unsigned long id; int ret; lockdep_assert_held(&file_priv->lock); + xa_for_each(&file_priv->cmdq_xa, id, cmdq) + if (cmdq->priority == priority) + break; + if (!cmdq) { cmdq = ivpu_cmdq_alloc(file_priv); - if (!cmdq) + if (!cmdq) { + ivpu_err(vdev, "Failed to allocate command queue\n"); return NULL; - file_priv->cmdq[cmdq_idx] = cmdq; + } + + ret = ivpu_db_id_alloc(vdev, &cmdq->db_id); + if (ret) { + ivpu_err(file_priv->vdev, "Failed to allocate doorbell ID: %d\n", ret); + goto err_free_cmdq; + } + + ret = ivpu_cmdq_id_alloc(file_priv, &cmdq->id); + if (ret) { + ivpu_err(vdev, "Failed to allocate command queue ID: %d\n", ret); + goto err_erase_db_id; + } + + cmdq->priority = priority; + ret = xa_err(xa_store(&file_priv->cmdq_xa, cmdq->id, cmdq, GFP_KERNEL)); + if (ret) { + ivpu_err(vdev, "Failed to store command queue in cmdq_xa: %d\n", ret); + goto err_erase_cmdq_id; + } } - ret = ivpu_cmdq_init(file_priv, cmdq, engine, priority); - if (ret) - return NULL; + ret = ivpu_cmdq_init(file_priv, cmdq, priority); + if (ret) { + ivpu_err(vdev, "Failed to initialize command queue: %d\n", ret); + goto err_free_cmdq; + } return cmdq; + +err_erase_cmdq_id: + xa_erase(&file_priv->cmdq_xa, cmdq->id); +err_erase_db_id: + xa_erase(&vdev->db_xa, cmdq->db_id); +err_free_cmdq: + ivpu_cmdq_free(file_priv, cmdq); + return NULL; } -static void ivpu_cmdq_release_locked(struct ivpu_file_priv *file_priv, u16 engine, u8 priority) +void ivpu_cmdq_release_all_locked(struct ivpu_file_priv *file_priv) { - int cmdq_idx = IVPU_CMDQ_INDEX(engine, priority); - struct ivpu_cmdq *cmdq = file_priv->cmdq[cmdq_idx]; + struct ivpu_cmdq *cmdq; + unsigned long cmdq_id; lockdep_assert_held(&file_priv->lock); - if (cmdq) { - file_priv->cmdq[cmdq_idx] = NULL; + xa_for_each(&file_priv->cmdq_xa, cmdq_id, cmdq) { + xa_erase(&file_priv->cmdq_xa, cmdq_id); ivpu_cmdq_fini(file_priv, cmdq); ivpu_cmdq_free(file_priv, cmdq); } } -void ivpu_cmdq_release_all_locked(struct ivpu_file_priv *file_priv) -{ - u16 engine; - u8 priority; - - lockdep_assert_held(&file_priv->lock); - - for (engine = 0; engine < IVPU_NUM_ENGINES; engine++) - for (priority = 0; priority < IVPU_NUM_PRIORITIES; priority++) - ivpu_cmdq_release_locked(file_priv, engine, priority); -} - /* * Mark the doorbell as unregistered * This function needs to be called when the VPU hardware is restarted @@ -279,20 +319,13 @@ void ivpu_cmdq_release_all_locked(struct ivpu_file_priv *file_priv) */ static void ivpu_cmdq_reset(struct ivpu_file_priv *file_priv) { - u16 engine; - u8 priority; + struct ivpu_cmdq *cmdq; + unsigned long cmdq_id; mutex_lock(&file_priv->lock); - for (engine = 0; engine < IVPU_NUM_ENGINES; engine++) { - for (priority = 0; priority < IVPU_NUM_PRIORITIES; priority++) { - int cmdq_idx = IVPU_CMDQ_INDEX(engine, priority); - struct ivpu_cmdq *cmdq = file_priv->cmdq[cmdq_idx]; - - if (cmdq) - cmdq->db_registered = false; - } - } + xa_for_each(&file_priv->cmdq_xa, cmdq_id, cmdq) + cmdq->db_registered = false; mutex_unlock(&file_priv->lock); } @@ -312,17 +345,11 @@ void ivpu_cmdq_reset_all_contexts(struct ivpu_device *vdev) static void ivpu_cmdq_fini_all(struct ivpu_file_priv *file_priv) { - u16 engine; - u8 priority; - - for (engine = 0; engine < IVPU_NUM_ENGINES; engine++) { - for (priority = 0; priority < IVPU_NUM_PRIORITIES; priority++) { - int cmdq_idx = IVPU_CMDQ_INDEX(engine, priority); + struct ivpu_cmdq *cmdq; + unsigned long cmdq_id; - if (file_priv->cmdq[cmdq_idx]) - ivpu_cmdq_fini(file_priv, file_priv->cmdq[cmdq_idx]); - } - } + xa_for_each(&file_priv->cmdq_xa, cmdq_id, cmdq) + ivpu_cmdq_fini(file_priv, cmdq); } void ivpu_context_abort_locked(struct ivpu_file_priv *file_priv) @@ -349,8 +376,8 @@ static int ivpu_cmdq_push_job(struct ivpu_cmdq *cmdq, struct ivpu_job *job) /* Check if there is space left in job queue */ if (next_entry == header->head) { - ivpu_dbg(vdev, JOB, "Job queue full: ctx %d engine %d db %d head %d tail %d\n", - job->file_priv->ctx.id, job->engine_idx, cmdq->db_id, header->head, tail); + ivpu_dbg(vdev, JOB, "Job queue full: ctx %d cmdq %d db %d head %d tail %d\n", + job->file_priv->ctx.id, cmdq->id, cmdq->db_id, header->head, tail); return -EBUSY; } @@ -363,10 +390,16 @@ static int ivpu_cmdq_push_job(struct ivpu_cmdq *cmdq, struct ivpu_job *job) if (vdev->fw->sched_mode == VPU_SCHEDULING_MODE_HW && (unlikely(!(ivpu_test_mode & IVPU_TEST_MODE_PREEMPTION_DISABLE)))) { - entry->primary_preempt_buf_addr = cmdq->primary_preempt_buf->vpu_addr; - entry->primary_preempt_buf_size = ivpu_bo_size(cmdq->primary_preempt_buf); - entry->secondary_preempt_buf_addr = cmdq->secondary_preempt_buf->vpu_addr; - entry->secondary_preempt_buf_size = ivpu_bo_size(cmdq->secondary_preempt_buf); + if (cmdq->primary_preempt_buf) { + entry->primary_preempt_buf_addr = cmdq->primary_preempt_buf->vpu_addr; + entry->primary_preempt_buf_size = ivpu_bo_size(cmdq->primary_preempt_buf); + } + + if (cmdq->secondary_preempt_buf) { + entry->secondary_preempt_buf_addr = cmdq->secondary_preempt_buf->vpu_addr; + entry->secondary_preempt_buf_size = + ivpu_bo_size(cmdq->secondary_preempt_buf); + } } wmb(); /* Ensure that tail is updated after filling entry */ @@ -558,7 +591,7 @@ static int ivpu_job_submit(struct ivpu_job *job, u8 priority) mutex_lock(&vdev->submitted_jobs_lock); mutex_lock(&file_priv->lock); - cmdq = ivpu_cmdq_acquire(file_priv, job->engine_idx, priority); + cmdq = ivpu_cmdq_acquire(file_priv, priority); if (!cmdq) { ivpu_warn_ratelimited(vdev, "Failed to get job queue, ctx %d engine %d prio %d\n", file_priv->ctx.id, job->engine_idx, priority); @@ -698,7 +731,7 @@ int ivpu_submit_ioctl(struct drm_device *dev, void *data, struct drm_file *file) int idx, ret; u8 priority; - if (params->engine > DRM_IVPU_ENGINE_COPY) + if (params->engine != DRM_IVPU_ENGINE_COMPUTE) return -EINVAL; if (params->priority > DRM_IVPU_JOB_PRIORITY_REALTIME) @@ -816,7 +849,8 @@ void ivpu_context_abort_thread_handler(struct work_struct *work) unsigned long id; if (vdev->fw->sched_mode == VPU_SCHEDULING_MODE_HW) - ivpu_jsm_reset_engine(vdev, 0); + if (ivpu_jsm_reset_engine(vdev, 0)) + return; mutex_lock(&vdev->context_list_lock); xa_for_each(&vdev->context_xa, ctx_id, file_priv) { @@ -832,7 +866,8 @@ void ivpu_context_abort_thread_handler(struct work_struct *work) if (vdev->fw->sched_mode != VPU_SCHEDULING_MODE_HW) return; - ivpu_jsm_hws_resume_engine(vdev, 0); + if (ivpu_jsm_hws_resume_engine(vdev, 0)) + return; /* * In hardware scheduling mode NPU already has stopped processing jobs * and won't send us any further notifications, thus we have to free job related resources diff --git a/drivers/accel/ivpu/ivpu_job.h b/drivers/accel/ivpu/ivpu_job.h index 0ae77f0638fad..af1ed039569cd 100644 --- a/drivers/accel/ivpu/ivpu_job.h +++ b/drivers/accel/ivpu/ivpu_job.h @@ -28,8 +28,10 @@ struct ivpu_cmdq { struct ivpu_bo *secondary_preempt_buf; struct ivpu_bo *mem; u32 entry_count; + u32 id; u32 db_id; bool db_registered; + u8 priority; }; /** diff --git a/drivers/accel/ivpu/ivpu_jsm_msg.c b/drivers/accel/ivpu/ivpu_jsm_msg.c index ae91ad24d10d8..7c08308d5725d 100644 --- a/drivers/accel/ivpu/ivpu_jsm_msg.c +++ b/drivers/accel/ivpu/ivpu_jsm_msg.c @@ -7,6 +7,8 @@ #include "ivpu_hw.h" #include "ivpu_ipc.h" #include "ivpu_jsm_msg.h" +#include "ivpu_pm.h" +#include "vpu_jsm_api.h" const char *ivpu_jsm_msg_type_to_str(enum vpu_ipc_msg_type type) { @@ -132,7 +134,7 @@ int ivpu_jsm_get_heartbeat(struct ivpu_device *vdev, u32 engine, u64 *heartbeat) struct vpu_jsm_msg resp; int ret; - if (engine > VPU_ENGINE_COPY) + if (engine != VPU_ENGINE_COMPUTE) return -EINVAL; req.payload.query_engine_hb.engine_idx = engine; @@ -155,15 +157,17 @@ int ivpu_jsm_reset_engine(struct ivpu_device *vdev, u32 engine) struct vpu_jsm_msg resp; int ret; - if (engine > VPU_ENGINE_COPY) + if (engine != VPU_ENGINE_COMPUTE) return -EINVAL; req.payload.engine_reset.engine_idx = engine; ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_ENGINE_RESET_DONE, &resp, VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm); - if (ret) + if (ret) { ivpu_err_ratelimited(vdev, "Failed to reset engine %d: %d\n", engine, ret); + ivpu_pm_trigger_recovery(vdev, "Engine reset failed"); + } return ret; } @@ -174,7 +178,7 @@ int ivpu_jsm_preempt_engine(struct ivpu_device *vdev, u32 engine, u32 preempt_id struct vpu_jsm_msg resp; int ret; - if (engine > VPU_ENGINE_COPY) + if (engine != VPU_ENGINE_COMPUTE) return -EINVAL; req.payload.engine_preempt.engine_idx = engine; @@ -346,15 +350,17 @@ int ivpu_jsm_hws_resume_engine(struct ivpu_device *vdev, u32 engine) struct vpu_jsm_msg resp; int ret; - if (engine >= VPU_ENGINE_NB) + if (engine != VPU_ENGINE_COMPUTE) return -EINVAL; req.payload.hws_resume_engine.engine_idx = engine; ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_HWS_RESUME_ENGINE_DONE, &resp, VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm); - if (ret) + if (ret) { ivpu_err_ratelimited(vdev, "Failed to resume engine %d: %d\n", engine, ret); + ivpu_pm_trigger_recovery(vdev, "Engine resume failed"); + } return ret; } @@ -409,26 +415,18 @@ int ivpu_jsm_hws_setup_priority_bands(struct ivpu_device *vdev) { struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_SET_PRIORITY_BAND_SETUP }; struct vpu_jsm_msg resp; + struct ivpu_hw_info *hw = vdev->hw; + struct vpu_ipc_msg_payload_hws_priority_band_setup *setup = + &req.payload.hws_priority_band_setup; int ret; - /* Idle */ - req.payload.hws_priority_band_setup.grace_period[0] = 0; - req.payload.hws_priority_band_setup.process_grace_period[0] = 50000; - req.payload.hws_priority_band_setup.process_quantum[0] = 160000; - /* Normal */ - req.payload.hws_priority_band_setup.grace_period[1] = 50000; - req.payload.hws_priority_band_setup.process_grace_period[1] = 50000; - req.payload.hws_priority_band_setup.process_quantum[1] = 300000; - /* Focus */ - req.payload.hws_priority_band_setup.grace_period[2] = 50000; - req.payload.hws_priority_band_setup.process_grace_period[2] = 50000; - req.payload.hws_priority_band_setup.process_quantum[2] = 200000; - /* Realtime */ - req.payload.hws_priority_band_setup.grace_period[3] = 0; - req.payload.hws_priority_band_setup.process_grace_period[3] = 50000; - req.payload.hws_priority_band_setup.process_quantum[3] = 200000; - - req.payload.hws_priority_band_setup.normal_band_percentage = 10; + for (int band = VPU_JOB_SCHEDULING_PRIORITY_BAND_IDLE; + band < VPU_JOB_SCHEDULING_PRIORITY_BAND_COUNT; band++) { + setup->grace_period[band] = hw->hws.grace_period[band]; + setup->process_grace_period[band] = hw->hws.process_grace_period[band]; + setup->process_quantum[band] = hw->hws.process_quantum[band]; + } + setup->normal_band_percentage = 10; ret = ivpu_ipc_send_receive_internal(vdev, &req, VPU_JSM_MSG_SET_PRIORITY_BAND_SETUP_RSP, &resp, VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm); diff --git a/drivers/acpi/acpica/amlresrc.h b/drivers/acpi/acpica/amlresrc.h index 4e88f9fc2a289..b6588b7fa8986 100644 --- a/drivers/acpi/acpica/amlresrc.h +++ b/drivers/acpi/acpica/amlresrc.h @@ -504,10 +504,6 @@ struct aml_resource_pin_group_config { #define AML_RESOURCE_PIN_GROUP_CONFIG_REVISION 1 /* ACPI 6.2 */ -/* restore default alignment */ - -#pragma pack() - /* Union of all resource descriptors, so we can allocate the worst case */ union aml_resource { @@ -562,6 +558,10 @@ union aml_resource { u8 byte_item; }; +/* restore default alignment */ + +#pragma pack() + /* Interfaces used by both the disassembler and compiler */ void diff --git a/drivers/acpi/acpica/dsmethod.c b/drivers/acpi/acpica/dsmethod.c index e809c2aed78ae..a232746d150a7 100644 --- a/drivers/acpi/acpica/dsmethod.c +++ b/drivers/acpi/acpica/dsmethod.c @@ -483,6 +483,13 @@ acpi_ds_call_control_method(struct acpi_thread_state *thread, return_ACPI_STATUS(AE_NULL_OBJECT); } + if (this_walk_state->num_operands < obj_desc->method.param_count) { + ACPI_ERROR((AE_INFO, "Missing argument for method [%4.4s]", + acpi_ut_get_node_name(method_node))); + + return_ACPI_STATUS(AE_AML_UNINITIALIZED_ARG); + } + /* Init for new method, possibly wait on method mutex */ status = diff --git a/drivers/acpi/acpica/dsutils.c b/drivers/acpi/acpica/dsutils.c index fb9ed5e1da89d..2bdae8a25e084 100644 --- a/drivers/acpi/acpica/dsutils.c +++ b/drivers/acpi/acpica/dsutils.c @@ -668,6 +668,8 @@ acpi_ds_create_operands(struct acpi_walk_state *walk_state, union acpi_parse_object *arguments[ACPI_OBJ_NUM_OPERANDS]; u32 arg_count = 0; u32 index = walk_state->num_operands; + u32 prev_num_operands = walk_state->num_operands; + u32 new_num_operands; u32 i; ACPI_FUNCTION_TRACE_PTR(ds_create_operands, first_arg); @@ -696,6 +698,7 @@ acpi_ds_create_operands(struct acpi_walk_state *walk_state, /* Create the interpreter arguments, in reverse order */ + new_num_operands = index; index--; for (i = 0; i < arg_count; i++) { arg = arguments[index]; @@ -720,7 +723,11 @@ acpi_ds_create_operands(struct acpi_walk_state *walk_state, * pop everything off of the operand stack and delete those * objects */ - acpi_ds_obj_stack_pop_and_delete(arg_count, walk_state); + walk_state->num_operands = i; + acpi_ds_obj_stack_pop_and_delete(new_num_operands, walk_state); + + /* Restore operand count */ + walk_state->num_operands = prev_num_operands; ACPI_EXCEPTION((AE_INFO, status, "While creating Arg %u", index)); return_ACPI_STATUS(status); diff --git a/drivers/acpi/acpica/exserial.c b/drivers/acpi/acpica/exserial.c index 5241f4c01c765..89a4ac447a2be 100644 --- a/drivers/acpi/acpica/exserial.c +++ b/drivers/acpi/acpica/exserial.c @@ -201,6 +201,12 @@ acpi_ex_read_serial_bus(union acpi_operand_object *obj_desc, function = ACPI_READ; break; + case ACPI_ADR_SPACE_FIXED_HARDWARE: + + buffer_length = ACPI_FFH_INPUT_BUFFER_SIZE; + function = ACPI_READ; + break; + default: return_ACPI_STATUS(AE_AML_INVALID_SPACE_ID); } diff --git a/drivers/acpi/acpica/psobject.c b/drivers/acpi/acpica/psobject.c index 54471083ba545..0bce1baaa62b3 100644 --- a/drivers/acpi/acpica/psobject.c +++ b/drivers/acpi/acpica/psobject.c @@ -636,7 +636,8 @@ acpi_status acpi_ps_complete_final_op(struct acpi_walk_state *walk_state, union acpi_parse_object *op, acpi_status status) { - acpi_status status2; + acpi_status return_status = status; + u8 ascending = TRUE; ACPI_FUNCTION_TRACE_PTR(ps_complete_final_op, walk_state); @@ -650,7 +651,7 @@ acpi_ps_complete_final_op(struct acpi_walk_state *walk_state, op)); do { if (op) { - if (walk_state->ascending_callback != NULL) { + if (ascending && walk_state->ascending_callback != NULL) { walk_state->op = op; walk_state->op_info = acpi_ps_get_opcode_info(op->common. @@ -672,49 +673,26 @@ acpi_ps_complete_final_op(struct acpi_walk_state *walk_state, } if (status == AE_CTRL_TERMINATE) { - status = AE_OK; - - /* Clean up */ - do { - if (op) { - status2 = - acpi_ps_complete_this_op - (walk_state, op); - if (ACPI_FAILURE - (status2)) { - return_ACPI_STATUS - (status2); - } - } - - acpi_ps_pop_scope(& - (walk_state-> - parser_state), - &op, - &walk_state-> - arg_types, - &walk_state-> - arg_count); - - } while (op); - - return_ACPI_STATUS(status); + ascending = FALSE; + return_status = AE_CTRL_TERMINATE; } else if (ACPI_FAILURE(status)) { /* First error is most important */ - (void) - acpi_ps_complete_this_op(walk_state, - op); - return_ACPI_STATUS(status); + ascending = FALSE; + return_status = status; } } - status2 = acpi_ps_complete_this_op(walk_state, op); - if (ACPI_FAILURE(status2)) { - return_ACPI_STATUS(status2); + status = acpi_ps_complete_this_op(walk_state, op); + if (ACPI_FAILURE(status)) { + ascending = FALSE; + if (ACPI_SUCCESS(return_status) || + return_status == AE_CTRL_TERMINATE) { + return_status = status; + } } } @@ -724,5 +702,5 @@ acpi_ps_complete_final_op(struct acpi_walk_state *walk_state, } while (op); - return_ACPI_STATUS(status); + return_ACPI_STATUS(return_status); } diff --git a/drivers/acpi/acpica/rsaddr.c b/drivers/acpi/acpica/rsaddr.c index 27384ee245f09..f92010e667cda 100644 --- a/drivers/acpi/acpica/rsaddr.c +++ b/drivers/acpi/acpica/rsaddr.c @@ -272,18 +272,13 @@ u8 acpi_rs_get_address_common(struct acpi_resource *resource, union aml_resource *aml) { - struct aml_resource_address address; - ACPI_FUNCTION_ENTRY(); - /* Avoid undefined behavior: member access within misaligned address */ - - memcpy(&address, aml, sizeof(address)); - /* Validate the Resource Type */ - if ((address.resource_type > 2) && - (address.resource_type < 0xC0) && (address.resource_type != 0x0A)) { + if ((aml->address.resource_type > 2) && + (aml->address.resource_type < 0xC0) && + (aml->address.resource_type != 0x0A)) { return (FALSE); } @@ -304,7 +299,7 @@ acpi_rs_get_address_common(struct acpi_resource *resource, /* Generic resource type, just grab the type_specific byte */ resource->data.address.info.type_specific = - address.specific_flags; + aml->address.specific_flags; } return (TRUE); diff --git a/drivers/acpi/acpica/rscalc.c b/drivers/acpi/acpica/rscalc.c index 6e7a152d64595..242daf45e20ef 100644 --- a/drivers/acpi/acpica/rscalc.c +++ b/drivers/acpi/acpica/rscalc.c @@ -608,18 +608,12 @@ acpi_rs_get_list_length(u8 *aml_buffer, case ACPI_RESOURCE_NAME_SERIAL_BUS:{ - /* Avoid undefined behavior: member access within misaligned address */ - - struct aml_resource_common_serialbus - common_serial_bus; - memcpy(&common_serial_bus, aml_resource, - sizeof(common_serial_bus)); - minimum_aml_resource_length = acpi_gbl_resource_aml_serial_bus_sizes - [common_serial_bus.type]; + [aml_resource->common_serial_bus.type]; extra_struct_bytes += - common_serial_bus.resource_length - + aml_resource->common_serial_bus. + resource_length - minimum_aml_resource_length; break; } @@ -688,16 +682,10 @@ acpi_rs_get_list_length(u8 *aml_buffer, */ if (acpi_ut_get_resource_type(aml_buffer) == ACPI_RESOURCE_NAME_SERIAL_BUS) { - - /* Avoid undefined behavior: member access within misaligned address */ - - struct aml_resource_common_serialbus common_serial_bus; - memcpy(&common_serial_bus, aml_resource, - sizeof(common_serial_bus)); - buffer_size = acpi_gbl_resource_struct_serial_bus_sizes - [common_serial_bus.type] + extra_struct_bytes; + [aml_resource->common_serial_bus.type] + + extra_struct_bytes; } else { buffer_size = acpi_gbl_resource_struct_sizes[resource_index] + diff --git a/drivers/acpi/acpica/rslist.c b/drivers/acpi/acpica/rslist.c index 164c96e063c6e..e46efaa889cdd 100644 --- a/drivers/acpi/acpica/rslist.c +++ b/drivers/acpi/acpica/rslist.c @@ -55,21 +55,15 @@ acpi_rs_convert_aml_to_resources(u8 * aml, aml_resource = ACPI_CAST_PTR(union aml_resource, aml); if (acpi_ut_get_resource_type(aml) == ACPI_RESOURCE_NAME_SERIAL_BUS) { - - /* Avoid undefined behavior: member access within misaligned address */ - - struct aml_resource_common_serialbus common_serial_bus; - memcpy(&common_serial_bus, aml_resource, - sizeof(common_serial_bus)); - - if (common_serial_bus.type > AML_RESOURCE_MAX_SERIALBUSTYPE) { + if (aml_resource->common_serial_bus.type > + AML_RESOURCE_MAX_SERIALBUSTYPE) { conversion_table = NULL; } else { /* This is an I2C, SPI, UART, or CSI2 serial_bus descriptor */ conversion_table = acpi_gbl_convert_resource_serial_bus_dispatch - [common_serial_bus.type]; + [aml_resource->common_serial_bus.type]; } } else { conversion_table = diff --git a/drivers/acpi/acpica/utprint.c b/drivers/acpi/acpica/utprint.c index 42b30b9f93128..7fad03c5252c3 100644 --- a/drivers/acpi/acpica/utprint.c +++ b/drivers/acpi/acpica/utprint.c @@ -333,11 +333,8 @@ int vsnprintf(char *string, acpi_size size, const char *format, va_list args) pos = string; - if (size != ACPI_UINT32_MAX) { - end = string + size; - } else { - end = ACPI_CAST_PTR(char, ACPI_UINT32_MAX); - } + size = ACPI_MIN(size, ACPI_PTR_DIFF(ACPI_MAX_PTR, string)); + end = string + size; for (; *format; ++format) { if (*format != '%') { diff --git a/drivers/acpi/acpica/utresrc.c b/drivers/acpi/acpica/utresrc.c index cff7901f7866e..e1cc3d3487508 100644 --- a/drivers/acpi/acpica/utresrc.c +++ b/drivers/acpi/acpica/utresrc.c @@ -361,20 +361,16 @@ acpi_ut_validate_resource(struct acpi_walk_state *walk_state, aml_resource = ACPI_CAST_PTR(union aml_resource, aml); if (resource_type == ACPI_RESOURCE_NAME_SERIAL_BUS) { - /* Avoid undefined behavior: member access within misaligned address */ - - struct aml_resource_common_serialbus common_serial_bus; - memcpy(&common_serial_bus, aml_resource, - sizeof(common_serial_bus)); - /* Validate the bus_type field */ - if ((common_serial_bus.type == 0) || - (common_serial_bus.type > AML_RESOURCE_MAX_SERIALBUSTYPE)) { + if ((aml_resource->common_serial_bus.type == 0) || + (aml_resource->common_serial_bus.type > + AML_RESOURCE_MAX_SERIALBUSTYPE)) { if (walk_state) { ACPI_ERROR((AE_INFO, "Invalid/unsupported SerialBus resource descriptor: BusType 0x%2.2X", - common_serial_bus.type)); + aml_resource->common_serial_bus. + type)); } return (AE_AML_INVALID_RESOURCE_TYPE); } diff --git a/drivers/acpi/apei/Kconfig b/drivers/acpi/apei/Kconfig index 3cfe7e7475f2f..070c07d68dfb2 100644 --- a/drivers/acpi/apei/Kconfig +++ b/drivers/acpi/apei/Kconfig @@ -23,6 +23,7 @@ config ACPI_APEI_GHES select ACPI_HED select IRQ_WORK select GENERIC_ALLOCATOR + select ARM_SDE_INTERFACE if ARM64 help Generic Hardware Error Source provides a way to report platform hardware errors (such as that from chipset). It diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c index cff6685fa6cc6..6cf40e8ac321e 100644 --- a/drivers/acpi/apei/ghes.c +++ b/drivers/acpi/apei/ghes.c @@ -1612,7 +1612,7 @@ void __init acpi_ghes_init(void) { int rc; - sdei_init(); + acpi_sdei_init(); if (acpi_disabled) return; diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c index 16917dc3ad604..6234055d25984 100644 --- a/drivers/acpi/bus.c +++ b/drivers/acpi/bus.c @@ -1444,8 +1444,10 @@ static int __init acpi_init(void) } acpi_kobj = kobject_create_and_add("acpi", firmware_kobj); - if (!acpi_kobj) - pr_debug("%s: kset create error\n", __func__); + if (!acpi_kobj) { + pr_err("Failed to register kobject\n"); + return -ENOMEM; + } init_prmt(); acpi_init_pcc(); diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c index e78e3754d99e1..dab941dc984a9 100644 --- a/drivers/acpi/cppc_acpi.c +++ b/drivers/acpi/cppc_acpi.c @@ -463,7 +463,7 @@ bool cppc_allow_fast_switch(void) struct cpc_desc *cpc_ptr; int cpu; - for_each_possible_cpu(cpu) { + for_each_present_cpu(cpu) { cpc_ptr = per_cpu(cpc_desc_ptr, cpu); desired_reg = &cpc_ptr->cpc_regs[DESIRED_PERF]; if (!CPC_IN_SYSTEM_MEMORY(desired_reg) && diff --git a/drivers/acpi/osi.c b/drivers/acpi/osi.c index df9328c850bd3..f2c943b934be0 100644 --- a/drivers/acpi/osi.c +++ b/drivers/acpi/osi.c @@ -42,7 +42,6 @@ static struct acpi_osi_entry osi_setup_entries[OSI_STRING_ENTRIES_MAX] __initdata = { {"Module Device", true}, {"Processor Device", true}, - {"3.0 _SCP Extensions", true}, {"Processor Aggregator Device", true}, }; diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c index 14c7bac4100b4..7d59c6c9185fc 100644 --- a/drivers/acpi/resource.c +++ b/drivers/acpi/resource.c @@ -534,7 +534,7 @@ static const struct dmi_system_id irq1_level_low_skip_override[] = { */ static const struct dmi_system_id irq1_edge_low_force_override[] = { { - /* MECHREV Jiaolong17KS Series GM7XG0M */ + /* MECHREVO Jiaolong17KS Series GM7XG0M */ .matches = { DMI_MATCH(DMI_BOARD_NAME, "GM7XG0M"), }, diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c index 78db38c7076e4..125d7df8f30ae 100644 --- a/drivers/acpi/thermal.c +++ b/drivers/acpi/thermal.c @@ -803,7 +803,13 @@ static int acpi_thermal_add(struct acpi_device *device) acpi_thermal_aml_dependency_fix(tz); - /* Get trip points [_CRT, _PSV, etc.] (required). */ + /* + * Set the cooling mode [_SCP] to active cooling. This needs to happen before + * we retrieve the trip point values. + */ + acpi_execute_simple_method(tz->device->handle, "_SCP", ACPI_THERMAL_MODE_ACTIVE); + + /* Get trip points [_ACi, _PSV, etc.] (required). */ acpi_thermal_get_trip_points(tz); crit_temp = acpi_thermal_get_critical_trip(tz); @@ -814,10 +820,6 @@ static int acpi_thermal_add(struct acpi_device *device) if (result) goto free_memory; - /* Set the cooling mode [_SCP] to active cooling. */ - acpi_execute_simple_method(tz->device->handle, "_SCP", - ACPI_THERMAL_MODE_ACTIVE); - /* Determine the default polling frequency [_TZP]. */ if (tzp) tz->polling_frequency = tzp; diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c index 650122deb480d..a6a66d7947638 100644 --- a/drivers/ata/ahci.c +++ b/drivers/ata/ahci.c @@ -1410,8 +1410,15 @@ static bool ahci_broken_suspend(struct pci_dev *pdev) static bool ahci_broken_lpm(struct pci_dev *pdev) { + /* + * Platforms with LPM problems. + * If driver_data is NULL, there is no existing BIOS version with + * functioning LPM. + * If driver_data is non-NULL, then driver_data contains the DMI BIOS + * build date of the first BIOS version with functioning LPM (i.e. older + * BIOS versions have broken LPM). + */ static const struct dmi_system_id sysids[] = { - /* Various Lenovo 50 series have LPM issues with older BIOSen */ { .matches = { DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), @@ -1446,6 +1453,29 @@ static bool ahci_broken_lpm(struct pci_dev *pdev) */ .driver_data = "20180310", /* 2.35 */ }, + { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_PRODUCT_NAME, "ASUSPRO D840MB_M840SA"), + }, + /* 320 is broken, there is no known good version. */ + }, + { + /* + * AMD 500 Series Chipset SATA Controller [1022:43eb] + * on this motherboard timeouts on ports 5 and 6 when + * LPM is enabled, at least with WDC WD20EFAX-68FB5N0 + * hard drives. LPM with the same drive works fine on + * all other ports on the same controller. + */ + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, + "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_BOARD_NAME, + "ROG STRIX B550-F GAMING (WI-FI)"), + }, + /* 3621 is broken, there is no known good version. */ + }, { } /* terminate list */ }; const struct dmi_system_id *dmi = dmi_first_match(sysids); @@ -1455,6 +1485,9 @@ static bool ahci_broken_lpm(struct pci_dev *pdev) if (!dmi) return false; + if (!dmi->driver_data) + return true; + dmi_get_date(DMI_BIOS_DATE, &year, &month, &date); snprintf(buf, sizeof(buf), "%04d%02d%02d", year, month, date); diff --git a/drivers/ata/libata-acpi.c b/drivers/ata/libata-acpi.c index d36e71f475abd..39a350755a1ba 100644 --- a/drivers/ata/libata-acpi.c +++ b/drivers/ata/libata-acpi.c @@ -514,15 +514,19 @@ unsigned int ata_acpi_gtm_xfermask(struct ata_device *dev, EXPORT_SYMBOL_GPL(ata_acpi_gtm_xfermask); /** - * ata_acpi_cbl_80wire - Check for 80 wire cable + * ata_acpi_cbl_pata_type - Return PATA cable type * @ap: Port to check - * @gtm: GTM data to use * - * Return 1 if the @gtm indicates the BIOS selected an 80wire mode. + * Return ATA_CBL_PATA* according to the transfer mode selected by BIOS */ -int ata_acpi_cbl_80wire(struct ata_port *ap, const struct ata_acpi_gtm *gtm) +int ata_acpi_cbl_pata_type(struct ata_port *ap) { struct ata_device *dev; + int ret = ATA_CBL_PATA_UNK; + const struct ata_acpi_gtm *gtm = ata_acpi_init_gtm(ap); + + if (!gtm) + return ATA_CBL_PATA40; ata_for_each_dev(dev, &ap->link, ENABLED) { unsigned int xfer_mask, udma_mask; @@ -530,13 +534,17 @@ int ata_acpi_cbl_80wire(struct ata_port *ap, const struct ata_acpi_gtm *gtm) xfer_mask = ata_acpi_gtm_xfermask(dev, gtm); ata_unpack_xfermask(xfer_mask, NULL, NULL, &udma_mask); - if (udma_mask & ~ATA_UDMA_MASK_40C) - return 1; + ret = ATA_CBL_PATA40; + + if (udma_mask & ~ATA_UDMA_MASK_40C) { + ret = ATA_CBL_PATA80; + break; + } } - return 0; + return ret; } -EXPORT_SYMBOL_GPL(ata_acpi_cbl_80wire); +EXPORT_SYMBOL_GPL(ata_acpi_cbl_pata_type); static void ata_acpi_gtf_to_tf(struct ata_device *dev, const struct ata_acpi_gtf *gtf, diff --git a/drivers/ata/pata_cs5536.c b/drivers/ata/pata_cs5536.c index b811efd2cc346..73e81e160c91f 100644 --- a/drivers/ata/pata_cs5536.c +++ b/drivers/ata/pata_cs5536.c @@ -27,7 +27,7 @@ #include #include -#ifdef CONFIG_X86_32 +#if defined(CONFIG_X86) && defined(CONFIG_X86_32) #include static int use_msr; module_param_named(msr, use_msr, int, 0644); diff --git a/drivers/ata/pata_via.c b/drivers/ata/pata_via.c index 696b99720dcbd..bb80e7800dcbe 100644 --- a/drivers/ata/pata_via.c +++ b/drivers/ata/pata_via.c @@ -201,11 +201,9 @@ static int via_cable_detect(struct ata_port *ap) { two drives */ if (ata66 & (0x10100000 >> (16 * ap->port_no))) return ATA_CBL_PATA80; + /* Check with ACPI so we can spot BIOS reported SATA bridges */ - if (ata_acpi_init_gtm(ap) && - ata_acpi_cbl_80wire(ap, ata_acpi_init_gtm(ap))) - return ATA_CBL_PATA80; - return ATA_CBL_PATA40; + return ata_acpi_cbl_pata_type(ap); } static int via_pre_reset(struct ata_link *link, unsigned long deadline) @@ -368,7 +366,8 @@ static unsigned int via_mode_filter(struct ata_device *dev, unsigned int mask) } if (dev->class == ATA_DEV_ATAPI && - dmi_check_system(no_atapi_dma_dmi_table)) { + (dmi_check_system(no_atapi_dma_dmi_table) || + config->id == PCI_DEVICE_ID_VIA_6415)) { ata_dev_warn(dev, "controller locks up on ATAPI DMA, forcing PIO\n"); mask &= ATA_MASK_PIO; } diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c index d4aa0f353b6c8..eeae160c898d3 100644 --- a/drivers/atm/atmtcp.c +++ b/drivers/atm/atmtcp.c @@ -288,7 +288,9 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb) struct sk_buff *new_skb; int result = 0; - if (!skb->len) return 0; + if (skb->len < sizeof(struct atmtcp_hdr)) + goto done; + dev = vcc->dev_data; hdr = (struct atmtcp_hdr *) skb->data; if (hdr->length == ATMTCP_HDR_MAGIC) { diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c index a876024d8a05f..63d41320cd5cf 100644 --- a/drivers/atm/idt77252.c +++ b/drivers/atm/idt77252.c @@ -852,6 +852,8 @@ queue_skb(struct idt77252_dev *card, struct vc_map *vc, IDT77252_PRV_PADDR(skb) = dma_map_single(&card->pcidev->dev, skb->data, skb->len, DMA_TO_DEVICE); + if (dma_mapping_error(&card->pcidev->dev, IDT77252_PRV_PADDR(skb))) + return -ENOMEM; error = -EINVAL; @@ -1857,6 +1859,8 @@ add_rx_skb(struct idt77252_dev *card, int queue, paddr = dma_map_single(&card->pcidev->dev, skb->data, skb_end_pointer(skb) - skb->data, DMA_FROM_DEVICE); + if (dma_mapping_error(&card->pcidev->dev, paddr)) + goto outpoolrm; IDT77252_PRV_PADDR(skb) = paddr; if (push_rx_skb(card, skb, queue)) { @@ -1871,6 +1875,7 @@ add_rx_skb(struct idt77252_dev *card, int queue, dma_unmap_single(&card->pcidev->dev, IDT77252_PRV_PADDR(skb), skb_end_pointer(skb) - skb->data, DMA_FROM_DEVICE); +outpoolrm: handle = IDT77252_PRV_POOL(skb); card->sbpool[POOL_QUEUE(handle)].skb[POOL_INDEX(handle)] = NULL; diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c index d88f721cf68cd..02870e70ed595 100644 --- a/drivers/base/cpu.c +++ b/drivers/base/cpu.c @@ -600,6 +600,7 @@ CPU_SHOW_VULN_FALLBACK(spec_rstack_overflow); CPU_SHOW_VULN_FALLBACK(gds); CPU_SHOW_VULN_FALLBACK(reg_file_data_sampling); CPU_SHOW_VULN_FALLBACK(indirect_target_selection); +CPU_SHOW_VULN_FALLBACK(tsa); static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL); static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL); @@ -616,6 +617,7 @@ static DEVICE_ATTR(spec_rstack_overflow, 0444, cpu_show_spec_rstack_overflow, NU static DEVICE_ATTR(gather_data_sampling, 0444, cpu_show_gds, NULL); static DEVICE_ATTR(reg_file_data_sampling, 0444, cpu_show_reg_file_data_sampling, NULL); static DEVICE_ATTR(indirect_target_selection, 0444, cpu_show_indirect_target_selection, NULL); +static DEVICE_ATTR(tsa, 0444, cpu_show_tsa, NULL); static struct attribute *cpu_root_vulnerabilities_attrs[] = { &dev_attr_meltdown.attr, @@ -633,6 +635,7 @@ static struct attribute *cpu_root_vulnerabilities_attrs[] = { &dev_attr_gather_data_sampling.attr, &dev_attr_reg_file_data_sampling.attr, &dev_attr_indirect_target_selection.attr, + &dev_attr_tsa.attr, NULL }; diff --git a/drivers/base/platform-msi.c b/drivers/base/platform-msi.c index 0e60dd650b5e0..70db08f3ac6fa 100644 --- a/drivers/base/platform-msi.c +++ b/drivers/base/platform-msi.c @@ -95,5 +95,6 @@ EXPORT_SYMBOL_GPL(platform_device_msi_init_and_alloc_irqs); void platform_device_msi_free_irqs_all(struct device *dev) { msi_domain_free_irqs_all(dev, MSI_DEFAULT_DOMAIN); + msi_remove_device_irq_domain(dev, MSI_DEFAULT_DOMAIN); } EXPORT_SYMBOL_GPL(platform_device_msi_free_irqs_all); diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index 1abe61f11525d..faf4cdec23f04 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c @@ -916,6 +916,8 @@ static void device_resume(struct device *dev, pm_message_t state, bool async) if (!dev->power.is_suspended) goto Complete; + dev->power.is_suspended = false; + if (dev->power.direct_complete) { /* Match the pm_runtime_disable() in __device_suspend(). */ pm_runtime_enable(dev); @@ -971,7 +973,6 @@ static void device_resume(struct device *dev, pm_message_t state, bool async) End: error = dpm_run_callback(callback, dev, state, info); - dev->power.is_suspended = false; device_unlock(dev); dpm_watchdog_clear(&wd); diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c index 04113adb092b5..99f25d6b2027a 100644 --- a/drivers/base/power/runtime.c +++ b/drivers/base/power/runtime.c @@ -1003,7 +1003,7 @@ static enum hrtimer_restart pm_suspend_timer_fn(struct hrtimer *timer) * If 'expires' is after the current time, we've been called * too early. */ - if (expires > 0 && expires < ktime_get_mono_fast_ns()) { + if (expires > 0 && expires <= ktime_get_mono_fast_ns()) { dev->power.timer_expires = 0; rpm_suspend(dev, dev->power.timer_autosuspends ? (RPM_ASYNC | RPM_AUTO) : RPM_ASYNC); diff --git a/drivers/base/swnode.c b/drivers/base/swnode.c index eb6eb25b343ba..53b3f0061ad12 100644 --- a/drivers/base/swnode.c +++ b/drivers/base/swnode.c @@ -529,7 +529,7 @@ software_node_get_reference_args(const struct fwnode_handle *fwnode, if (prop->is_inline) return -EINVAL; - if (index * sizeof(*ref) >= prop->length) + if ((index + 1) * sizeof(*ref) > prop->length) return -ENOENT; ref_array = prop->pointer; diff --git a/drivers/block/aoe/aoe.h b/drivers/block/aoe/aoe.h index 749ae1246f4cf..d35caa3c69e15 100644 --- a/drivers/block/aoe/aoe.h +++ b/drivers/block/aoe/aoe.h @@ -80,6 +80,7 @@ enum { DEVFL_NEWSIZE = (1<<6), /* need to update dev size in block layer */ DEVFL_FREEING = (1<<7), /* set when device is being cleaned up */ DEVFL_FREED = (1<<8), /* device has been cleaned up */ + DEVFL_DEAD = (1<<9), /* device has timed out of aoe_deadsecs */ }; enum { diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c index 92b06d1de4cc7..6c94cfd1c480e 100644 --- a/drivers/block/aoe/aoecmd.c +++ b/drivers/block/aoe/aoecmd.c @@ -754,7 +754,7 @@ rexmit_timer(struct timer_list *timer) utgts = count_targets(d, NULL); - if (d->flags & DEVFL_TKILL) { + if (d->flags & (DEVFL_TKILL | DEVFL_DEAD)) { spin_unlock_irqrestore(&d->lock, flags); return; } @@ -786,7 +786,8 @@ rexmit_timer(struct timer_list *timer) * to clean up. */ list_splice(&flist, &d->factive[0]); - aoedev_downdev(d); + d->flags |= DEVFL_DEAD; + queue_work(aoe_wq, &d->work); goto out; } @@ -898,6 +899,9 @@ aoecmd_sleepwork(struct work_struct *work) { struct aoedev *d = container_of(work, struct aoedev, work); + if (d->flags & DEVFL_DEAD) + aoedev_downdev(d); + if (d->flags & DEVFL_GDALLOC) aoeblk_gdalloc(d); diff --git a/drivers/block/aoe/aoedev.c b/drivers/block/aoe/aoedev.c index 3523dd82d7a00..4240e11adfb76 100644 --- a/drivers/block/aoe/aoedev.c +++ b/drivers/block/aoe/aoedev.c @@ -198,9 +198,13 @@ aoedev_downdev(struct aoedev *d) { struct aoetgt *t, **tt, **te; struct list_head *head, *pos, *nx; + struct request *rq, *rqnext; int i; + unsigned long flags; - d->flags &= ~DEVFL_UP; + spin_lock_irqsave(&d->lock, flags); + d->flags &= ~(DEVFL_UP | DEVFL_DEAD); + spin_unlock_irqrestore(&d->lock, flags); /* clean out active and to-be-retransmitted buffers */ for (i = 0; i < NFACTIVE; i++) { @@ -223,6 +227,13 @@ aoedev_downdev(struct aoedev *d) /* clean out the in-process request (if any) */ aoe_failip(d); + /* clean out any queued block requests */ + list_for_each_entry_safe(rq, rqnext, &d->rq_list, queuelist) { + list_del_init(&rq->queuelist); + blk_mq_start_request(rq); + blk_mq_end_request(rq, BLK_STS_IOERR); + } + /* fast fail all pending I/O */ if (d->blkq) { /* UP is cleared, freeze+quiesce to insure all are errored */ diff --git a/drivers/block/brd.c b/drivers/block/brd.c index 292f127cae0ab..02fa8106ef549 100644 --- a/drivers/block/brd.c +++ b/drivers/block/brd.c @@ -224,19 +224,22 @@ static int brd_do_bvec(struct brd_device *brd, struct page *page, static void brd_do_discard(struct brd_device *brd, sector_t sector, u32 size) { - sector_t aligned_sector = (sector + PAGE_SECTORS) & ~PAGE_SECTORS; + sector_t aligned_sector = round_up(sector, PAGE_SECTORS); + sector_t aligned_end = round_down( + sector + (size >> SECTOR_SHIFT), PAGE_SECTORS); struct page *page; - size -= (aligned_sector - sector) * SECTOR_SIZE; + if (aligned_end <= aligned_sector) + return; + xa_lock(&brd->brd_pages); - while (size >= PAGE_SIZE && aligned_sector < rd_size * 2) { + while (aligned_sector < aligned_end && aligned_sector < rd_size * 2) { page = __xa_erase(&brd->brd_pages, aligned_sector >> PAGE_SECTORS_SHIFT); if (page) { __free_page(page); brd->brd_nr_pages--; } aligned_sector += PAGE_SECTORS; - size -= PAGE_SIZE; } xa_unlock(&brd->brd_pages); } diff --git a/drivers/block/loop.c b/drivers/block/loop.c index 0843d229b0f76..e9a197474b9d8 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -323,11 +323,14 @@ static void lo_complete_rq(struct request *rq) static void lo_rw_aio_do_completion(struct loop_cmd *cmd) { struct request *rq = blk_mq_rq_from_pdu(cmd); + struct loop_device *lo = rq->q->queuedata; if (!atomic_dec_and_test(&cmd->ref)) return; kfree(cmd->bvec); cmd->bvec = NULL; + if (req_op(rq) == REQ_OP_WRITE) + file_end_write(lo->lo_backing_file); if (likely(!blk_should_fake_timeout(rq->q))) blk_mq_complete_request(rq); } @@ -402,9 +405,10 @@ static int lo_rw_aio(struct loop_device *lo, struct loop_cmd *cmd, cmd->iocb.ki_flags = 0; } - if (rw == ITER_SOURCE) + if (rw == ITER_SOURCE) { + file_start_write(lo->lo_backing_file); ret = file->f_op->write_iter(&cmd->iocb, &iter); - else + } else ret = file->f_op->read_iter(&cmd->iocb, &iter); lo_rw_aio_do_completion(cmd); diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index 450458267e6e6..c705acc4d6f4b 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c @@ -2136,9 +2136,7 @@ static int nbd_genl_connect(struct sk_buff *skb, struct genl_info *info) goto out; } } - ret = nbd_start_device(nbd); - if (ret) - goto out; + if (info->attrs[NBD_ATTR_BACKEND_IDENTIFIER]) { nbd->backend = nla_strdup(info->attrs[NBD_ATTR_BACKEND_IDENTIFIER], GFP_KERNEL); @@ -2154,6 +2152,8 @@ static int nbd_genl_connect(struct sk_buff *skb, struct genl_info *info) goto out; } set_bit(NBD_RT_HAS_BACKEND_FILE, &config->runtime_flags); + + ret = nbd_start_device(nbd); out: mutex_unlock(&nbd->config_lock); if (!ret) { diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c index a01a547c562f3..3b1a5cdd63116 100644 --- a/drivers/block/ublk_drv.c +++ b/drivers/block/ublk_drv.c @@ -2457,6 +2457,10 @@ static int ublk_ctrl_add_dev(struct io_uring_cmd *cmd) if (copy_from_user(&info, argp, sizeof(info))) return -EFAULT; + if (info.queue_depth > UBLK_MAX_QUEUE_DEPTH || !info.queue_depth || + info.nr_hw_queues > UBLK_MAX_NR_QUEUES || !info.nr_hw_queues) + return -EINVAL; + if (capable(CAP_SYS_ADMIN)) info.flags &= ~UBLK_F_UNPRIVILEGED_DEV; else if (!(info.flags & UBLK_F_UNPRIVILEGED_DEV)) diff --git a/drivers/bluetooth/btintel.c b/drivers/bluetooth/btintel.c index 645047fb92fd2..51d6d91ed4041 100644 --- a/drivers/bluetooth/btintel.c +++ b/drivers/bluetooth/btintel.c @@ -2705,7 +2705,7 @@ static int btintel_uefi_get_dsbr(u32 *dsbr_var) } __packed data; efi_status_t status; - unsigned long data_size = 0; + unsigned long data_size = sizeof(data); efi_guid_t guid = EFI_GUID(0xe65d8884, 0xd4af, 0x4b20, 0x8d, 0x03, 0x77, 0x2e, 0xcc, 0x3d, 0xa5, 0x31); @@ -2715,16 +2715,10 @@ static int btintel_uefi_get_dsbr(u32 *dsbr_var) if (!efi_rt_services_supported(EFI_RT_SUPPORTED_GET_VARIABLE)) return -EOPNOTSUPP; - status = efi.get_variable(BTINTEL_EFI_DSBR, &guid, NULL, &data_size, - NULL); - - if (status != EFI_BUFFER_TOO_SMALL || !data_size) - return -EIO; - status = efi.get_variable(BTINTEL_EFI_DSBR, &guid, NULL, &data_size, &data); - if (status != EFI_SUCCESS) + if (status != EFI_SUCCESS || data_size != sizeof(data)) return -ENXIO; *dsbr_var = data.dsbr; diff --git a/drivers/bluetooth/btintel_pcie.c b/drivers/bluetooth/btintel_pcie.c index d225f0a37f985..34812bf7587d6 100644 --- a/drivers/bluetooth/btintel_pcie.c +++ b/drivers/bluetooth/btintel_pcie.c @@ -231,8 +231,13 @@ static int btintel_pcie_submit_rx(struct btintel_pcie_data *data) static int btintel_pcie_start_rx(struct btintel_pcie_data *data) { int i, ret; + struct rxq *rxq = &data->rxq; + + /* Post (BTINTEL_PCIE_RX_DESCS_COUNT - 3) buffers to overcome the + * hardware issues leading to race condition at the firmware. + */ - for (i = 0; i < BTINTEL_PCIE_RX_MAX_QUEUE; i++) { + for (i = 0; i < rxq->count - 3; i++) { ret = btintel_pcie_submit_rx(data); if (ret) return ret; @@ -1147,8 +1152,8 @@ static int btintel_pcie_alloc(struct btintel_pcie_data *data) * + size of index * Number of queues(2) * type of index array(4) * + size of context information */ - total = (sizeof(struct tfd) + sizeof(struct urbd0) + sizeof(struct frbd) - + sizeof(struct urbd1)) * BTINTEL_DESCS_COUNT; + total = (sizeof(struct tfd) + sizeof(struct urbd0)) * BTINTEL_PCIE_TX_DESCS_COUNT; + total += (sizeof(struct frbd) + sizeof(struct urbd1)) * BTINTEL_PCIE_RX_DESCS_COUNT; /* Add the sum of size of index array and size of ci struct */ total += (sizeof(u16) * BTINTEL_PCIE_NUM_QUEUES * 4) + sizeof(struct ctx_info); @@ -1173,36 +1178,36 @@ static int btintel_pcie_alloc(struct btintel_pcie_data *data) data->dma_v_addr = v_addr; /* Setup descriptor count */ - data->txq.count = BTINTEL_DESCS_COUNT; - data->rxq.count = BTINTEL_DESCS_COUNT; + data->txq.count = BTINTEL_PCIE_TX_DESCS_COUNT; + data->rxq.count = BTINTEL_PCIE_RX_DESCS_COUNT; /* Setup tfds */ data->txq.tfds_p_addr = p_addr; data->txq.tfds = v_addr; - p_addr += (sizeof(struct tfd) * BTINTEL_DESCS_COUNT); - v_addr += (sizeof(struct tfd) * BTINTEL_DESCS_COUNT); + p_addr += (sizeof(struct tfd) * BTINTEL_PCIE_TX_DESCS_COUNT); + v_addr += (sizeof(struct tfd) * BTINTEL_PCIE_TX_DESCS_COUNT); /* Setup urbd0 */ data->txq.urbd0s_p_addr = p_addr; data->txq.urbd0s = v_addr; - p_addr += (sizeof(struct urbd0) * BTINTEL_DESCS_COUNT); - v_addr += (sizeof(struct urbd0) * BTINTEL_DESCS_COUNT); + p_addr += (sizeof(struct urbd0) * BTINTEL_PCIE_TX_DESCS_COUNT); + v_addr += (sizeof(struct urbd0) * BTINTEL_PCIE_TX_DESCS_COUNT); /* Setup FRBD*/ data->rxq.frbds_p_addr = p_addr; data->rxq.frbds = v_addr; - p_addr += (sizeof(struct frbd) * BTINTEL_DESCS_COUNT); - v_addr += (sizeof(struct frbd) * BTINTEL_DESCS_COUNT); + p_addr += (sizeof(struct frbd) * BTINTEL_PCIE_RX_DESCS_COUNT); + v_addr += (sizeof(struct frbd) * BTINTEL_PCIE_RX_DESCS_COUNT); /* Setup urbd1 */ data->rxq.urbd1s_p_addr = p_addr; data->rxq.urbd1s = v_addr; - p_addr += (sizeof(struct urbd1) * BTINTEL_DESCS_COUNT); - v_addr += (sizeof(struct urbd1) * BTINTEL_DESCS_COUNT); + p_addr += (sizeof(struct urbd1) * BTINTEL_PCIE_RX_DESCS_COUNT); + v_addr += (sizeof(struct urbd1) * BTINTEL_PCIE_RX_DESCS_COUNT); /* Setup data buffers for txq */ err = btintel_pcie_setup_txq_bufs(data, &data->txq); diff --git a/drivers/bluetooth/btintel_pcie.h b/drivers/bluetooth/btintel_pcie.h index 8b7824ad005a2..ee0eec0237afd 100644 --- a/drivers/bluetooth/btintel_pcie.h +++ b/drivers/bluetooth/btintel_pcie.h @@ -81,8 +81,11 @@ enum { /* Default interrupt timeout in msec */ #define BTINTEL_DEFAULT_INTR_TIMEOUT_MS 3000 -/* The number of descriptors in TX/RX queues */ -#define BTINTEL_DESCS_COUNT 16 +/* The number of descriptors in TX queues */ +#define BTINTEL_PCIE_TX_DESCS_COUNT 32 + +/* The number of descriptors in RX queues */ +#define BTINTEL_PCIE_RX_DESCS_COUNT 64 /* Number of Queue for TX and RX * It indicates the index of the IA(Index Array) @@ -104,9 +107,6 @@ enum { /* Doorbell vector for TFD */ #define BTINTEL_PCIE_TX_DB_VEC 0 -/* Number of pending RX requests for downlink */ -#define BTINTEL_PCIE_RX_MAX_QUEUE 6 - /* Doorbell vector for FRBD */ #define BTINTEL_PCIE_RX_DB_VEC 513 diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index af2be0271806f..aa63852060500 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c @@ -672,6 +672,8 @@ static const struct usb_device_id quirks_table[] = { BTUSB_WIDEBAND_SPEECH }, { USB_DEVICE(0x13d3, 0x3568), .driver_info = BTUSB_MEDIATEK | BTUSB_WIDEBAND_SPEECH }, + { USB_DEVICE(0x13d3, 0x3584), .driver_info = BTUSB_MEDIATEK | + BTUSB_WIDEBAND_SPEECH }, { USB_DEVICE(0x13d3, 0x3605), .driver_info = BTUSB_MEDIATEK | BTUSB_WIDEBAND_SPEECH }, { USB_DEVICE(0x13d3, 0x3607), .driver_info = BTUSB_MEDIATEK | @@ -712,6 +714,8 @@ static const struct usb_device_id quirks_table[] = { BTUSB_WIDEBAND_SPEECH }, { USB_DEVICE(0x13d3, 0x3628), .driver_info = BTUSB_MEDIATEK | BTUSB_WIDEBAND_SPEECH }, + { USB_DEVICE(0x13d3, 0x3630), .driver_info = BTUSB_MEDIATEK | + BTUSB_WIDEBAND_SPEECH }, /* Additional Realtek 8723AE Bluetooth devices */ { USB_DEVICE(0x0930, 0x021d), .driver_info = BTUSB_REALTEK }, diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c index 1837622ea625a..e6ad01d5e1d5d 100644 --- a/drivers/bluetooth/hci_qca.c +++ b/drivers/bluetooth/hci_qca.c @@ -2363,10 +2363,17 @@ static int qca_serdev_probe(struct serdev_device *serdev) */ qcadev->bt_power->pwrseq = devm_pwrseq_get(&serdev->dev, "bluetooth"); - if (IS_ERR(qcadev->bt_power->pwrseq)) - return PTR_ERR(qcadev->bt_power->pwrseq); - break; + /* + * Some modules have BT_EN enabled via a hardware pull-up, + * meaning it is not defined in the DTS and is not controlled + * through the power sequence. In such cases, fall through + * to follow the legacy flow. + */ + if (IS_ERR(qcadev->bt_power->pwrseq)) + qcadev->bt_power->pwrseq = NULL; + else + break; } fallthrough; case QCA_WCN3988: @@ -2385,14 +2392,14 @@ static int qca_serdev_probe(struct serdev_device *serdev) qcadev->bt_en = devm_gpiod_get_optional(&serdev->dev, "enable", GPIOD_OUT_LOW); - if (IS_ERR(qcadev->bt_en) && - (data->soc_type == QCA_WCN6750 || - data->soc_type == QCA_WCN6855)) { - dev_err(&serdev->dev, "failed to acquire BT_EN gpio\n"); - return PTR_ERR(qcadev->bt_en); - } + if (IS_ERR(qcadev->bt_en)) + return dev_err_probe(&serdev->dev, + PTR_ERR(qcadev->bt_en), + "failed to acquire BT_EN gpio\n"); - if (!qcadev->bt_en) + if (!qcadev->bt_en && + (data->soc_type == QCA_WCN6750 || + data->soc_type == QCA_WCN6855)) power_ctrl_enabled = false; qcadev->sw_ctrl = devm_gpiod_get_optional(&serdev->dev, "swctrl", diff --git a/drivers/bus/fsl-mc/fsl-mc-bus.c b/drivers/bus/fsl-mc/fsl-mc-bus.c index 930d8a3ba722b..58d16ff166c2d 100644 --- a/drivers/bus/fsl-mc/fsl-mc-bus.c +++ b/drivers/bus/fsl-mc/fsl-mc-bus.c @@ -905,8 +905,10 @@ int fsl_mc_device_add(struct fsl_mc_obj_desc *obj_desc, error_cleanup_dev: kfree(mc_dev->regions); - kfree(mc_bus); - kfree(mc_dev); + if (mc_bus) + kfree(mc_bus); + else + kfree(mc_dev); return error; } diff --git a/drivers/bus/fsl-mc/fsl-mc-uapi.c b/drivers/bus/fsl-mc/fsl-mc-uapi.c index 9c4c1395fcdbf..a376ec6616534 100644 --- a/drivers/bus/fsl-mc/fsl-mc-uapi.c +++ b/drivers/bus/fsl-mc/fsl-mc-uapi.c @@ -275,13 +275,13 @@ static struct fsl_mc_cmd_desc fsl_mc_accepted_cmds[] = { .size = 8, }, [DPSW_GET_TAILDROP] = { - .cmdid_value = 0x0A80, + .cmdid_value = 0x0A90, .cmdid_mask = 0xFFF0, .token = true, .size = 14, }, [DPSW_SET_TAILDROP] = { - .cmdid_value = 0x0A90, + .cmdid_value = 0x0A80, .cmdid_mask = 0xFFF0, .token = true, .size = 24, diff --git a/drivers/bus/fsl-mc/mc-io.c b/drivers/bus/fsl-mc/mc-io.c index 95b10a6cf3073..8b7a34f4db94b 100644 --- a/drivers/bus/fsl-mc/mc-io.c +++ b/drivers/bus/fsl-mc/mc-io.c @@ -214,12 +214,19 @@ int __must_check fsl_mc_portal_allocate(struct fsl_mc_device *mc_dev, if (error < 0) goto error_cleanup_resource; - dpmcp_dev->consumer_link = device_link_add(&mc_dev->dev, - &dpmcp_dev->dev, - DL_FLAG_AUTOREMOVE_CONSUMER); - if (!dpmcp_dev->consumer_link) { - error = -EINVAL; - goto error_cleanup_mc_io; + /* If the DPRC device itself tries to allocate a portal (usually for + * UAPI interaction), don't add a device link between them since the + * DPMCP device is an actual child device of the DPRC and a reverse + * dependency is not allowed. + */ + if (mc_dev != mc_bus_dev) { + dpmcp_dev->consumer_link = device_link_add(&mc_dev->dev, + &dpmcp_dev->dev, + DL_FLAG_AUTOREMOVE_CONSUMER); + if (!dpmcp_dev->consumer_link) { + error = -EINVAL; + goto error_cleanup_mc_io; + } } *new_mc_io = mc_io; diff --git a/drivers/bus/fsl-mc/mc-sys.c b/drivers/bus/fsl-mc/mc-sys.c index f2052cd0a0517..b22c59d57c8f0 100644 --- a/drivers/bus/fsl-mc/mc-sys.c +++ b/drivers/bus/fsl-mc/mc-sys.c @@ -19,7 +19,7 @@ /* * Timeout in milliseconds to wait for the completion of an MC command */ -#define MC_CMD_COMPLETION_TIMEOUT_MS 500 +#define MC_CMD_COMPLETION_TIMEOUT_MS 15000 /* * usleep_range() min and max values used to throttle down polling diff --git a/drivers/bus/mhi/ep/ring.c b/drivers/bus/mhi/ep/ring.c index aeb53b2c34a8c..26357ee68dee9 100644 --- a/drivers/bus/mhi/ep/ring.c +++ b/drivers/bus/mhi/ep/ring.c @@ -131,19 +131,23 @@ int mhi_ep_ring_add_element(struct mhi_ep_ring *ring, struct mhi_ring_element *e } old_offset = ring->rd_offset; - mhi_ep_ring_inc_index(ring); dev_dbg(dev, "Adding an element to ring at offset (%zu)\n", ring->rd_offset); + buf_info.host_addr = ring->rbase + (old_offset * sizeof(*el)); + buf_info.dev_addr = el; + buf_info.size = sizeof(*el); + + ret = mhi_cntrl->write_sync(mhi_cntrl, &buf_info); + if (ret) + return ret; + + mhi_ep_ring_inc_index(ring); /* Update rp in ring context */ rp = cpu_to_le64(ring->rd_offset * sizeof(*el) + ring->rbase); memcpy_toio((void __iomem *) &ring->ring_ctx->generic.rp, &rp, sizeof(u64)); - buf_info.host_addr = ring->rbase + (old_offset * sizeof(*el)); - buf_info.dev_addr = el; - buf_info.size = sizeof(*el); - - return mhi_cntrl->write_sync(mhi_cntrl, &buf_info); + return ret; } void mhi_ep_ring_init(struct mhi_ep_ring *ring, enum mhi_ep_ring_type type, u32 id) diff --git a/drivers/bus/mhi/host/pm.c b/drivers/bus/mhi/host/pm.c index 11c0e751f2239..0ccbcb717955a 100644 --- a/drivers/bus/mhi/host/pm.c +++ b/drivers/bus/mhi/host/pm.c @@ -602,6 +602,7 @@ static void mhi_pm_sys_error_transition(struct mhi_controller *mhi_cntrl) struct mhi_cmd *mhi_cmd; struct mhi_event_ctxt *er_ctxt; struct device *dev = &mhi_cntrl->mhi_dev->dev; + bool reset_device = false; int ret, i; dev_dbg(dev, "Transitioning from PM state: %s to: %s\n", @@ -630,8 +631,23 @@ static void mhi_pm_sys_error_transition(struct mhi_controller *mhi_cntrl) /* Wake up threads waiting for state transition */ wake_up_all(&mhi_cntrl->state_event); - /* Trigger MHI RESET so that the device will not access host memory */ if (MHI_REG_ACCESS_VALID(prev_state)) { + /* + * If the device is in PBL or SBL, it will only respond to + * RESET if the device is in SYSERR state. SYSERR might + * already be cleared at this point. + */ + enum mhi_state cur_state = mhi_get_mhi_state(mhi_cntrl); + enum mhi_ee_type cur_ee = mhi_get_exec_env(mhi_cntrl); + + if (cur_state == MHI_STATE_SYS_ERR) + reset_device = true; + else if (cur_ee != MHI_EE_PBL && cur_ee != MHI_EE_SBL) + reset_device = true; + } + + /* Trigger MHI RESET so that the device will not access host memory */ + if (reset_device) { u32 in_reset = -1; unsigned long timeout = msecs_to_jiffies(mhi_cntrl->timeout_ms); diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c index 270a94a06e05c..f715c8d281293 100644 --- a/drivers/bus/ti-sysc.c +++ b/drivers/bus/ti-sysc.c @@ -677,51 +677,6 @@ static int sysc_parse_and_check_child_range(struct sysc *ddata) return 0; } -/* Interconnect instances to probe before l4_per instances */ -static struct resource early_bus_ranges[] = { - /* am3/4 l4_wkup */ - { .start = 0x44c00000, .end = 0x44c00000 + 0x300000, }, - /* omap4/5 and dra7 l4_cfg */ - { .start = 0x4a000000, .end = 0x4a000000 + 0x300000, }, - /* omap4 l4_wkup */ - { .start = 0x4a300000, .end = 0x4a300000 + 0x30000, }, - /* omap5 and dra7 l4_wkup without dra7 dcan segment */ - { .start = 0x4ae00000, .end = 0x4ae00000 + 0x30000, }, -}; - -static atomic_t sysc_defer = ATOMIC_INIT(10); - -/** - * sysc_defer_non_critical - defer non_critical interconnect probing - * @ddata: device driver data - * - * We want to probe l4_cfg and l4_wkup interconnect instances before any - * l4_per instances as l4_per instances depend on resources on l4_cfg and - * l4_wkup interconnects. - */ -static int sysc_defer_non_critical(struct sysc *ddata) -{ - struct resource *res; - int i; - - if (!atomic_read(&sysc_defer)) - return 0; - - for (i = 0; i < ARRAY_SIZE(early_bus_ranges); i++) { - res = &early_bus_ranges[i]; - if (ddata->module_pa >= res->start && - ddata->module_pa <= res->end) { - atomic_set(&sysc_defer, 0); - - return 0; - } - } - - atomic_dec_if_positive(&sysc_defer); - - return -EPROBE_DEFER; -} - static struct device_node *stdout_path; static void sysc_init_stdout_path(struct sysc *ddata) @@ -947,10 +902,6 @@ static int sysc_map_and_check_registers(struct sysc *ddata) if (error) return error; - error = sysc_defer_non_critical(ddata); - if (error) - return error; - sysc_check_children(ddata); if (!of_property_present(np, "reg")) diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c index e12b531f5c2f3..6a4a8ecd0edd0 100644 --- a/drivers/char/ipmi/ipmi_msghandler.c +++ b/drivers/char/ipmi/ipmi_msghandler.c @@ -1241,7 +1241,7 @@ int ipmi_create_user(unsigned int if_num, } /* Not found, return an error */ rv = -EINVAL; - goto out_kfree; + goto out_unlock; found: if (atomic_add_return(1, &intf->nr_users) > max_users) { @@ -1283,6 +1283,7 @@ int ipmi_create_user(unsigned int if_num, out_kfree: atomic_dec(&intf->nr_users); +out_unlock: srcu_read_unlock(&ipmi_interfaces_srcu, index); vfree(new_user); return rv; diff --git a/drivers/char/tpm/tpm-buf.c b/drivers/char/tpm/tpm-buf.c index e49a19fea3bdf..dc882fc9fa9ef 100644 --- a/drivers/char/tpm/tpm-buf.c +++ b/drivers/char/tpm/tpm-buf.c @@ -201,7 +201,7 @@ static void tpm_buf_read(struct tpm_buf *buf, off_t *offset, size_t count, void */ u8 tpm_buf_read_u8(struct tpm_buf *buf, off_t *offset) { - u8 value; + u8 value = 0; tpm_buf_read(buf, offset, sizeof(value), &value); @@ -218,7 +218,7 @@ EXPORT_SYMBOL_GPL(tpm_buf_read_u8); */ u16 tpm_buf_read_u16(struct tpm_buf *buf, off_t *offset) { - u16 value; + u16 value = 0; tpm_buf_read(buf, offset, sizeof(value), &value); @@ -235,7 +235,7 @@ EXPORT_SYMBOL_GPL(tpm_buf_read_u16); */ u32 tpm_buf_read_u32(struct tpm_buf *buf, off_t *offset) { - u32 value; + u32 value = 0; tpm_buf_read(buf, offset, sizeof(value), &value); diff --git a/drivers/clk/bcm/clk-raspberrypi.c b/drivers/clk/bcm/clk-raspberrypi.c index a18a8768feb40..6cb26b6e7347d 100644 --- a/drivers/clk/bcm/clk-raspberrypi.c +++ b/drivers/clk/bcm/clk-raspberrypi.c @@ -271,6 +271,8 @@ static struct clk_hw *raspberrypi_clk_register(struct raspberrypi_clk *rpi, init.name = devm_kasprintf(rpi->dev, GFP_KERNEL, "fw-clk-%s", rpi_firmware_clk_names[id]); + if (!init.name) + return ERR_PTR(-ENOMEM); init.ops = &raspberrypi_firmware_clk_ops; init.flags = CLK_GET_RATE_NOCACHE; diff --git a/drivers/clk/clk-scmi.c b/drivers/clk/clk-scmi.c index 15510c2ff21c0..1b1561c84127b 100644 --- a/drivers/clk/clk-scmi.c +++ b/drivers/clk/clk-scmi.c @@ -404,6 +404,7 @@ static int scmi_clocks_probe(struct scmi_device *sdev) const struct scmi_handle *handle = sdev->handle; struct scmi_protocol_handle *ph; const struct clk_ops *scmi_clk_ops_db[SCMI_MAX_CLK_OPS] = {}; + struct scmi_clk *sclks; if (!handle) return -ENODEV; @@ -430,18 +431,21 @@ static int scmi_clocks_probe(struct scmi_device *sdev) transport_is_atomic = handle->is_transport_atomic(handle, &atomic_threshold_us); + sclks = devm_kcalloc(dev, count, sizeof(*sclks), GFP_KERNEL); + if (!sclks) + return -ENOMEM; + + for (idx = 0; idx < count; idx++) + hws[idx] = &sclks[idx].hw; + for (idx = 0; idx < count; idx++) { - struct scmi_clk *sclk; + struct scmi_clk *sclk = &sclks[idx]; const struct clk_ops *scmi_ops; - sclk = devm_kzalloc(dev, sizeof(*sclk), GFP_KERNEL); - if (!sclk) - return -ENOMEM; - sclk->info = scmi_proto_clk_ops->info_get(ph, idx); if (!sclk->info) { dev_dbg(dev, "invalid clock info for idx %d\n", idx); - devm_kfree(dev, sclk); + hws[idx] = NULL; continue; } @@ -479,13 +483,11 @@ static int scmi_clocks_probe(struct scmi_device *sdev) if (err) { dev_err(dev, "failed to register clock %d\n", idx); devm_kfree(dev, sclk->parent_data); - devm_kfree(dev, sclk); hws[idx] = NULL; } else { dev_dbg(dev, "Registered clock:%s%s\n", sclk->info->name, scmi_ops->enable ? " (atomic ops)" : ""); - hws[idx] = &sclk->hw; } } diff --git a/drivers/clk/imx/clk-imx95-blk-ctl.c b/drivers/clk/imx/clk-imx95-blk-ctl.c index 19a62da74be45..564e9f3f7508d 100644 --- a/drivers/clk/imx/clk-imx95-blk-ctl.c +++ b/drivers/clk/imx/clk-imx95-blk-ctl.c @@ -219,11 +219,15 @@ static const struct imx95_blk_ctl_dev_data lvds_csr_dev_data = { .clk_reg_offset = 0, }; +static const char * const disp_engine_parents[] = { + "videopll1", "dsi_pll", "ldb_pll_div7" +}; + static const struct imx95_blk_ctl_clk_dev_data dispmix_csr_clk_dev_data[] = { [IMX95_CLK_DISPMIX_ENG0_SEL] = { .name = "disp_engine0_sel", - .parent_names = (const char *[]){"videopll1", "dsi_pll", "ldb_pll_div7", }, - .num_parents = 4, + .parent_names = disp_engine_parents, + .num_parents = ARRAY_SIZE(disp_engine_parents), .reg = 0, .bit_idx = 0, .bit_width = 2, @@ -232,8 +236,8 @@ static const struct imx95_blk_ctl_clk_dev_data dispmix_csr_clk_dev_data[] = { }, [IMX95_CLK_DISPMIX_ENG1_SEL] = { .name = "disp_engine1_sel", - .parent_names = (const char *[]){"videopll1", "dsi_pll", "ldb_pll_div7", }, - .num_parents = 4, + .parent_names = disp_engine_parents, + .num_parents = ARRAY_SIZE(disp_engine_parents), .reg = 0, .bit_idx = 2, .bit_width = 2, diff --git a/drivers/clk/meson/g12a.c b/drivers/clk/meson/g12a.c index 4f92b83965d5a..b72eebd0fa474 100644 --- a/drivers/clk/meson/g12a.c +++ b/drivers/clk/meson/g12a.c @@ -4099,6 +4099,7 @@ static const struct clk_parent_data spicc_sclk_parent_data[] = { { .hw = &g12a_clk81.hw }, { .hw = &g12a_fclk_div4.hw }, { .hw = &g12a_fclk_div3.hw }, + { .hw = &g12a_fclk_div2.hw }, { .hw = &g12a_fclk_div5.hw }, { .hw = &g12a_fclk_div7.hw }, }; diff --git a/drivers/clk/qcom/camcc-sm6350.c b/drivers/clk/qcom/camcc-sm6350.c index f6634cc8663ef..418668184ec35 100644 --- a/drivers/clk/qcom/camcc-sm6350.c +++ b/drivers/clk/qcom/camcc-sm6350.c @@ -1694,6 +1694,9 @@ static struct clk_branch camcc_sys_tmr_clk = { static struct gdsc bps_gdsc = { .gdscr = 0x6004, + .en_rest_wait_val = 0x2, + .en_few_wait_val = 0x2, + .clk_dis_wait_val = 0xf, .pd = { .name = "bps_gdsc", }, @@ -1703,6 +1706,9 @@ static struct gdsc bps_gdsc = { static struct gdsc ipe_0_gdsc = { .gdscr = 0x7004, + .en_rest_wait_val = 0x2, + .en_few_wait_val = 0x2, + .clk_dis_wait_val = 0xf, .pd = { .name = "ipe_0_gdsc", }, @@ -1712,6 +1718,9 @@ static struct gdsc ipe_0_gdsc = { static struct gdsc ife_0_gdsc = { .gdscr = 0x9004, + .en_rest_wait_val = 0x2, + .en_few_wait_val = 0x2, + .clk_dis_wait_val = 0xf, .pd = { .name = "ife_0_gdsc", }, @@ -1720,6 +1729,9 @@ static struct gdsc ife_0_gdsc = { static struct gdsc ife_1_gdsc = { .gdscr = 0xa004, + .en_rest_wait_val = 0x2, + .en_few_wait_val = 0x2, + .clk_dis_wait_val = 0xf, .pd = { .name = "ife_1_gdsc", }, @@ -1728,6 +1740,9 @@ static struct gdsc ife_1_gdsc = { static struct gdsc ife_2_gdsc = { .gdscr = 0xb004, + .en_rest_wait_val = 0x2, + .en_few_wait_val = 0x2, + .clk_dis_wait_val = 0xf, .pd = { .name = "ife_2_gdsc", }, @@ -1736,6 +1751,9 @@ static struct gdsc ife_2_gdsc = { static struct gdsc titan_top_gdsc = { .gdscr = 0x14004, + .en_rest_wait_val = 0x2, + .en_few_wait_val = 0x2, + .clk_dis_wait_val = 0xf, .pd = { .name = "titan_top_gdsc", }, diff --git a/drivers/clk/qcom/dispcc-sm6350.c b/drivers/clk/qcom/dispcc-sm6350.c index 2bc6b5f99f572..d52fd4b49a02f 100644 --- a/drivers/clk/qcom/dispcc-sm6350.c +++ b/drivers/clk/qcom/dispcc-sm6350.c @@ -680,6 +680,9 @@ static struct clk_branch disp_cc_xo_clk = { static struct gdsc mdss_gdsc = { .gdscr = 0x1004, + .en_rest_wait_val = 0x2, + .en_few_wait_val = 0x2, + .clk_dis_wait_val = 0xf, .pd = { .name = "mdss_gdsc", }, diff --git a/drivers/clk/qcom/gcc-msm8939.c b/drivers/clk/qcom/gcc-msm8939.c index 7431c9a65044f..45193b3d714ba 100644 --- a/drivers/clk/qcom/gcc-msm8939.c +++ b/drivers/clk/qcom/gcc-msm8939.c @@ -432,7 +432,7 @@ static const struct parent_map gcc_xo_gpll0_gpll1a_gpll6_sleep_map[] = { { P_XO, 0 }, { P_GPLL0, 1 }, { P_GPLL1_AUX, 2 }, - { P_GPLL6, 2 }, + { P_GPLL6, 3 }, { P_SLEEP_CLK, 6 }, }; @@ -1113,7 +1113,7 @@ static struct clk_rcg2 jpeg0_clk_src = { }; static const struct freq_tbl ftbl_gcc_camss_mclk0_1_clk[] = { - F(24000000, P_GPLL0, 1, 1, 45), + F(24000000, P_GPLL6, 1, 1, 45), F(66670000, P_GPLL0, 12, 0, 0), { } }; diff --git a/drivers/clk/qcom/gcc-sm6350.c b/drivers/clk/qcom/gcc-sm6350.c index 74346dc026068..a4d6dff9d0f7f 100644 --- a/drivers/clk/qcom/gcc-sm6350.c +++ b/drivers/clk/qcom/gcc-sm6350.c @@ -2320,6 +2320,9 @@ static struct clk_branch gcc_video_xo_clk = { static struct gdsc usb30_prim_gdsc = { .gdscr = 0x1a004, + .en_rest_wait_val = 0x2, + .en_few_wait_val = 0x2, + .clk_dis_wait_val = 0xf, .pd = { .name = "usb30_prim_gdsc", }, @@ -2328,6 +2331,9 @@ static struct gdsc usb30_prim_gdsc = { static struct gdsc ufs_phy_gdsc = { .gdscr = 0x3a004, + .en_rest_wait_val = 0x2, + .en_few_wait_val = 0x2, + .clk_dis_wait_val = 0xf, .pd = { .name = "ufs_phy_gdsc", }, diff --git a/drivers/clk/qcom/gcc-x1e80100.c b/drivers/clk/qcom/gcc-x1e80100.c index 009f39139b644..3e44757e25d32 100644 --- a/drivers/clk/qcom/gcc-x1e80100.c +++ b/drivers/clk/qcom/gcc-x1e80100.c @@ -6753,6 +6753,10 @@ static int gcc_x1e80100_probe(struct platform_device *pdev) /* Clear GDSC_SLEEP_ENA_VOTE to stop votes being auto-removed in sleep. */ regmap_write(regmap, 0x52224, 0x0); + /* FORCE_MEM_CORE_ON for ufs phy ice core and gcc ufs phy axi clocks */ + qcom_branch_set_force_mem_core(regmap, gcc_ufs_phy_ice_core_clk, true); + qcom_branch_set_force_mem_core(regmap, gcc_ufs_phy_axi_clk, true); + return qcom_cc_really_probe(&pdev->dev, &gcc_x1e80100_desc, regmap); } diff --git a/drivers/clk/qcom/gpucc-sm6350.c b/drivers/clk/qcom/gpucc-sm6350.c index 1e12ad8948dbd..644bdc41892c6 100644 --- a/drivers/clk/qcom/gpucc-sm6350.c +++ b/drivers/clk/qcom/gpucc-sm6350.c @@ -412,6 +412,9 @@ static struct clk_branch gpu_cc_gx_vsense_clk = { static struct gdsc gpu_cx_gdsc = { .gdscr = 0x106c, .gds_hw_ctrl = 0x1540, + .en_rest_wait_val = 0x2, + .en_few_wait_val = 0x2, + .clk_dis_wait_val = 0x8, .pd = { .name = "gpu_cx_gdsc", }, @@ -422,6 +425,9 @@ static struct gdsc gpu_cx_gdsc = { static struct gdsc gpu_gx_gdsc = { .gdscr = 0x100c, .clamp_io_ctrl = 0x1508, + .en_rest_wait_val = 0x2, + .en_few_wait_val = 0x2, + .clk_dis_wait_val = 0x2, .pd = { .name = "gpu_gx_gdsc", .power_on = gdsc_gx_do_nothing_enable, diff --git a/drivers/clk/rockchip/clk-rk3036.c b/drivers/clk/rockchip/clk-rk3036.c index d341ce0708aac..e4af3a9286379 100644 --- a/drivers/clk/rockchip/clk-rk3036.c +++ b/drivers/clk/rockchip/clk-rk3036.c @@ -431,6 +431,7 @@ static const char *const rk3036_critical_clocks[] __initconst = { "hclk_peri", "pclk_peri", "pclk_ddrupctl", + "ddrphy", }; static void __init rk3036_clk_init(struct device_node *np) diff --git a/drivers/counter/interrupt-cnt.c b/drivers/counter/interrupt-cnt.c index 229473855c5b3..bc762ba87a19b 100644 --- a/drivers/counter/interrupt-cnt.c +++ b/drivers/counter/interrupt-cnt.c @@ -3,12 +3,14 @@ * Copyright (c) 2021 Pengutronix, Oleksij Rempel */ +#include #include #include #include #include #include #include +#include #include #include @@ -19,6 +21,7 @@ struct interrupt_cnt_priv { struct gpio_desc *gpio; int irq; bool enabled; + struct mutex lock; struct counter_signal signals; struct counter_synapse synapses; struct counter_count cnts; @@ -41,6 +44,8 @@ static int interrupt_cnt_enable_read(struct counter_device *counter, { struct interrupt_cnt_priv *priv = counter_priv(counter); + guard(mutex)(&priv->lock); + *enable = priv->enabled; return 0; @@ -51,6 +56,8 @@ static int interrupt_cnt_enable_write(struct counter_device *counter, { struct interrupt_cnt_priv *priv = counter_priv(counter); + guard(mutex)(&priv->lock); + if (priv->enabled == enable) return 0; @@ -227,6 +234,8 @@ static int interrupt_cnt_probe(struct platform_device *pdev) if (ret) return ret; + mutex_init(&priv->lock); + ret = devm_counter_add(dev, counter); if (ret < 0) return dev_err_probe(dev, ret, "Failed to add counter\n"); diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c index 47e910c22a80b..0f1679817682f 100644 --- a/drivers/cpufreq/acpi-cpufreq.c +++ b/drivers/cpufreq/acpi-cpufreq.c @@ -663,7 +663,7 @@ static u64 get_max_boost_ratio(unsigned int cpu, u64 *nominal_freq) nominal_perf = perf_caps.nominal_perf; if (nominal_freq) - *nominal_freq = perf_caps.nominal_freq; + *nominal_freq = perf_caps.nominal_freq * 1000; if (!highest_perf || !nominal_perf) { pr_debug("CPU%d: highest or nominal performance missing\n", cpu); diff --git a/drivers/cpufreq/amd-pstate.c b/drivers/cpufreq/amd-pstate.c index 7a16d19322286..62dbc5701e993 100644 --- a/drivers/cpufreq/amd-pstate.c +++ b/drivers/cpufreq/amd-pstate.c @@ -482,6 +482,9 @@ static void amd_pstate_update(struct amd_cpudata *cpudata, u32 min_perf, u32 nominal_perf = READ_ONCE(cpudata->nominal_perf); u64 value = prev; + if (!policy) + return; + min_perf = clamp_t(unsigned long, min_perf, cpudata->min_limit_perf, cpudata->max_limit_perf); max_perf = clamp_t(unsigned long, max_perf, cpudata->min_limit_perf, diff --git a/drivers/cpufreq/scmi-cpufreq.c b/drivers/cpufreq/scmi-cpufreq.c index 7e7c1613a67c6..beb660ca240cc 100644 --- a/drivers/cpufreq/scmi-cpufreq.c +++ b/drivers/cpufreq/scmi-cpufreq.c @@ -367,6 +367,40 @@ static struct cpufreq_driver scmi_cpufreq_driver = { .register_em = scmi_cpufreq_register_em, }; +static bool scmi_dev_used_by_cpus(struct device *scmi_dev) +{ + struct device_node *scmi_np = dev_of_node(scmi_dev); + struct device_node *cpu_np, *np; + struct device *cpu_dev; + int cpu, idx; + + if (!scmi_np) + return false; + + for_each_possible_cpu(cpu) { + cpu_dev = get_cpu_device(cpu); + if (!cpu_dev) + continue; + + cpu_np = dev_of_node(cpu_dev); + + np = of_parse_phandle(cpu_np, "clocks", 0); + of_node_put(np); + + if (np == scmi_np) + return true; + + idx = of_property_match_string(cpu_np, "power-domain-names", "perf"); + np = of_parse_phandle(cpu_np, "power-domains", idx); + of_node_put(np); + + if (np == scmi_np) + return true; + } + + return false; +} + static int scmi_cpufreq_probe(struct scmi_device *sdev) { int ret; @@ -375,7 +409,7 @@ static int scmi_cpufreq_probe(struct scmi_device *sdev) handle = sdev->handle; - if (!handle) + if (!handle || !scmi_dev_used_by_cpus(dev)) return -ENODEV; perf_ops = handle->devm_protocol_get(sdev, SCMI_PROTOCOL_PERF, &ph); diff --git a/drivers/cpufreq/tegra186-cpufreq.c b/drivers/cpufreq/tegra186-cpufreq.c index 4e5b6f9a56d1b..7b8fcfa55038b 100644 --- a/drivers/cpufreq/tegra186-cpufreq.c +++ b/drivers/cpufreq/tegra186-cpufreq.c @@ -73,18 +73,11 @@ static int tegra186_cpufreq_init(struct cpufreq_policy *policy) { struct tegra186_cpufreq_data *data = cpufreq_get_driver_data(); unsigned int cluster = data->cpus[policy->cpu].bpmp_cluster_id; - u32 cpu; policy->freq_table = data->clusters[cluster].table; policy->cpuinfo.transition_latency = 300 * 1000; policy->driver_data = NULL; - /* set same policy for all cpus in a cluster */ - for (cpu = 0; cpu < ARRAY_SIZE(tegra186_cpus); cpu++) { - if (data->cpus[cpu].bpmp_cluster_id == cluster) - cpumask_set_cpu(cpu, policy->cpus); - } - return 0; } diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c index 19b7fb4a93e86..05f67661553c9 100644 --- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c +++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c @@ -275,13 +275,16 @@ static int sun8i_ce_cipher_prepare(struct crypto_engine *engine, void *async_req } else { if (nr_sgs > 0) dma_unmap_sg(ce->dev, areq->src, ns, DMA_TO_DEVICE); - dma_unmap_sg(ce->dev, areq->dst, nd, DMA_FROM_DEVICE); + + if (nr_sgd > 0) + dma_unmap_sg(ce->dev, areq->dst, nd, DMA_FROM_DEVICE); } theend_iv: if (areq->iv && ivsize > 0) { - if (rctx->addr_iv) + if (!dma_mapping_error(ce->dev, rctx->addr_iv)) dma_unmap_single(ce->dev, rctx->addr_iv, rctx->ivlen, DMA_TO_DEVICE); + offset = areq->cryptlen - ivsize; if (rctx->op_dir & CE_DECRYPTION) { memcpy(areq->iv, chan->backup_iv, ivsize); diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c index e55e58e164db3..fcc6832a065cb 100644 --- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c +++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c @@ -832,13 +832,12 @@ static int sun8i_ce_pm_init(struct sun8i_ce_dev *ce) err = pm_runtime_set_suspended(ce->dev); if (err) return err; - pm_runtime_enable(ce->dev); - return err; -} -static void sun8i_ce_pm_exit(struct sun8i_ce_dev *ce) -{ - pm_runtime_disable(ce->dev); + err = devm_pm_runtime_enable(ce->dev); + if (err) + return err; + + return 0; } static int sun8i_ce_get_clks(struct sun8i_ce_dev *ce) @@ -1041,7 +1040,7 @@ static int sun8i_ce_probe(struct platform_device *pdev) "sun8i-ce-ns", ce); if (err) { dev_err(ce->dev, "Cannot request CryptoEngine Non-secure IRQ (err=%d)\n", err); - goto error_irq; + goto error_pm; } err = sun8i_ce_register_algs(ce); @@ -1082,8 +1081,6 @@ static int sun8i_ce_probe(struct platform_device *pdev) return 0; error_alg: sun8i_ce_unregister_algs(ce); -error_irq: - sun8i_ce_pm_exit(ce); error_pm: sun8i_ce_free_chanlist(ce, MAXFLOW - 1); return err; @@ -1104,8 +1101,6 @@ static void sun8i_ce_remove(struct platform_device *pdev) #endif sun8i_ce_free_chanlist(ce, MAXFLOW - 1); - - sun8i_ce_pm_exit(ce); } static const struct of_device_id sun8i_ce_crypto_of_match_table[] = { diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c index 6072dd9f390b4..3f9d79ea01aaa 100644 --- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c +++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c @@ -343,9 +343,8 @@ int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq) u32 common; u64 byte_count; __le32 *bf; - void *buf = NULL; + void *buf, *result; int j, i, todo; - void *result = NULL; u64 bs; int digestsize; dma_addr_t addr_res, addr_pad; @@ -365,14 +364,14 @@ int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq) buf = kcalloc(2, bs, GFP_KERNEL | GFP_DMA); if (!buf) { err = -ENOMEM; - goto theend; + goto err_out; } bf = (__le32 *)buf; result = kzalloc(digestsize, GFP_KERNEL | GFP_DMA); if (!result) { err = -ENOMEM; - goto theend; + goto err_free_buf; } flow = rctx->flow; @@ -398,7 +397,7 @@ int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq) if (nr_sgs <= 0 || nr_sgs > MAX_SG) { dev_err(ce->dev, "Invalid sg number %d\n", nr_sgs); err = -EINVAL; - goto theend; + goto err_free_result; } len = areq->nbytes; @@ -411,7 +410,7 @@ int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq) if (len > 0) { dev_err(ce->dev, "remaining len %d\n", len); err = -EINVAL; - goto theend; + goto err_unmap_src; } addr_res = dma_map_single(ce->dev, result, digestsize, DMA_FROM_DEVICE); cet->t_dst[0].addr = desc_addr_val_le32(ce, addr_res); @@ -419,7 +418,7 @@ int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq) if (dma_mapping_error(ce->dev, addr_res)) { dev_err(ce->dev, "DMA map dest\n"); err = -EINVAL; - goto theend; + goto err_unmap_src; } byte_count = areq->nbytes; @@ -441,7 +440,7 @@ int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq) } if (!j) { err = -EINVAL; - goto theend; + goto err_unmap_result; } addr_pad = dma_map_single(ce->dev, buf, j * 4, DMA_TO_DEVICE); @@ -450,7 +449,7 @@ int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq) if (dma_mapping_error(ce->dev, addr_pad)) { dev_err(ce->dev, "DMA error on padding SG\n"); err = -EINVAL; - goto theend; + goto err_unmap_result; } if (ce->variant->hash_t_dlen_in_bits) @@ -463,16 +462,25 @@ int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq) err = sun8i_ce_run_task(ce, flow, crypto_ahash_alg_name(tfm)); dma_unmap_single(ce->dev, addr_pad, j * 4, DMA_TO_DEVICE); - dma_unmap_sg(ce->dev, areq->src, ns, DMA_TO_DEVICE); + +err_unmap_result: dma_unmap_single(ce->dev, addr_res, digestsize, DMA_FROM_DEVICE); + if (!err) + memcpy(areq->result, result, algt->alg.hash.base.halg.digestsize); +err_unmap_src: + dma_unmap_sg(ce->dev, areq->src, ns, DMA_TO_DEVICE); - memcpy(areq->result, result, algt->alg.hash.base.halg.digestsize); -theend: - kfree(buf); +err_free_result: kfree(result); + +err_free_buf: + kfree(buf); + +err_out: local_bh_disable(); crypto_finalize_hash_request(engine, breq, err); local_bh_enable(); + return 0; } diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h index 3b5c2af013d0d..83df4d7190531 100644 --- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h +++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h @@ -308,8 +308,8 @@ struct sun8i_ce_hash_tfm_ctx { * @flow: the flow to use for this request */ struct sun8i_ce_hash_reqctx { - struct ahash_request fallback_req; int flow; + struct ahash_request fallback_req; // keep at the end }; /* diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c index 9b9605ce8ee62..8831bcb230c2d 100644 --- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c +++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c @@ -141,7 +141,7 @@ static int sun8i_ss_setup_ivs(struct skcipher_request *areq) /* we need to copy all IVs from source in case DMA is bi-directionnal */ while (sg && len) { - if (sg_dma_len(sg) == 0) { + if (sg->length == 0) { sg = sg_next(sg); continue; } diff --git a/drivers/crypto/intel/iaa/iaa_crypto_main.c b/drivers/crypto/intel/iaa/iaa_crypto_main.c index e1f60f0f507c9..df2728cccf8b3 100644 --- a/drivers/crypto/intel/iaa/iaa_crypto_main.c +++ b/drivers/crypto/intel/iaa/iaa_crypto_main.c @@ -1126,8 +1126,7 @@ static int iaa_compress(struct crypto_tfm *tfm, struct acomp_req *req, struct idxd_wq *wq, dma_addr_t src_addr, unsigned int slen, dma_addr_t dst_addr, unsigned int *dlen, - u32 *compression_crc, - bool disable_async) + u32 *compression_crc) { struct iaa_device_compression_mode *active_compression_mode; struct iaa_compression_ctx *ctx = crypto_tfm_ctx(tfm); @@ -1170,7 +1169,7 @@ static int iaa_compress(struct crypto_tfm *tfm, struct acomp_req *req, desc->src2_size = sizeof(struct aecs_comp_table_record); desc->completion_addr = idxd_desc->compl_dma; - if (ctx->use_irq && !disable_async) { + if (ctx->use_irq) { desc->flags |= IDXD_OP_FLAG_RCI; idxd_desc->crypto.req = req; @@ -1183,8 +1182,7 @@ static int iaa_compress(struct crypto_tfm *tfm, struct acomp_req *req, " src_addr %llx, dst_addr %llx\n", __func__, active_compression_mode->name, src_addr, dst_addr); - } else if (ctx->async_mode && !disable_async) - req->base.data = idxd_desc; + } dev_dbg(dev, "%s: compression mode %s," " desc->src1_addr %llx, desc->src1_size %d," @@ -1204,7 +1202,7 @@ static int iaa_compress(struct crypto_tfm *tfm, struct acomp_req *req, update_total_comp_calls(); update_wq_comp_calls(wq); - if (ctx->async_mode && !disable_async) { + if (ctx->async_mode) { ret = -EINPROGRESS; dev_dbg(dev, "%s: returning -EINPROGRESS\n", __func__); goto out; @@ -1224,7 +1222,7 @@ static int iaa_compress(struct crypto_tfm *tfm, struct acomp_req *req, *compression_crc = idxd_desc->iax_completion->crc; - if (!ctx->async_mode || disable_async) + if (!ctx->async_mode) idxd_free_desc(wq, idxd_desc); out: return ret; @@ -1421,8 +1419,7 @@ static int iaa_decompress(struct crypto_tfm *tfm, struct acomp_req *req, " src_addr %llx, dst_addr %llx\n", __func__, active_compression_mode->name, src_addr, dst_addr); - } else if (ctx->async_mode && !disable_async) - req->base.data = idxd_desc; + } dev_dbg(dev, "%s: decompression mode %s," " desc->src1_addr %llx, desc->src1_size %d," @@ -1490,13 +1487,11 @@ static int iaa_comp_acompress(struct acomp_req *req) struct iaa_compression_ctx *compression_ctx; struct crypto_tfm *tfm = req->base.tfm; dma_addr_t src_addr, dst_addr; - bool disable_async = false; int nr_sgs, cpu, ret = 0; struct iaa_wq *iaa_wq; u32 compression_crc; struct idxd_wq *wq; struct device *dev; - int order = -1; compression_ctx = crypto_tfm_ctx(tfm); @@ -1526,21 +1521,6 @@ static int iaa_comp_acompress(struct acomp_req *req) iaa_wq = idxd_wq_get_private(wq); - if (!req->dst) { - gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : GFP_ATOMIC; - - /* incompressible data will always be < 2 * slen */ - req->dlen = 2 * req->slen; - order = order_base_2(round_up(req->dlen, PAGE_SIZE) / PAGE_SIZE); - req->dst = sgl_alloc_order(req->dlen, order, false, flags, NULL); - if (!req->dst) { - ret = -ENOMEM; - order = -1; - goto out; - } - disable_async = true; - } - dev = &wq->idxd->pdev->dev; nr_sgs = dma_map_sg(dev, req->src, sg_nents(req->src), DMA_TO_DEVICE); @@ -1570,7 +1550,7 @@ static int iaa_comp_acompress(struct acomp_req *req) req->dst, req->dlen, sg_dma_len(req->dst)); ret = iaa_compress(tfm, req, wq, src_addr, req->slen, dst_addr, - &req->dlen, &compression_crc, disable_async); + &req->dlen, &compression_crc); if (ret == -EINPROGRESS) return ret; @@ -1601,100 +1581,6 @@ static int iaa_comp_acompress(struct acomp_req *req) out: iaa_wq_put(wq); - if (order >= 0) - sgl_free_order(req->dst, order); - - return ret; -} - -static int iaa_comp_adecompress_alloc_dest(struct acomp_req *req) -{ - gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? - GFP_KERNEL : GFP_ATOMIC; - struct crypto_tfm *tfm = req->base.tfm; - dma_addr_t src_addr, dst_addr; - int nr_sgs, cpu, ret = 0; - struct iaa_wq *iaa_wq; - struct device *dev; - struct idxd_wq *wq; - int order = -1; - - cpu = get_cpu(); - wq = wq_table_next_wq(cpu); - put_cpu(); - if (!wq) { - pr_debug("no wq configured for cpu=%d\n", cpu); - return -ENODEV; - } - - ret = iaa_wq_get(wq); - if (ret) { - pr_debug("no wq available for cpu=%d\n", cpu); - return -ENODEV; - } - - iaa_wq = idxd_wq_get_private(wq); - - dev = &wq->idxd->pdev->dev; - - nr_sgs = dma_map_sg(dev, req->src, sg_nents(req->src), DMA_TO_DEVICE); - if (nr_sgs <= 0 || nr_sgs > 1) { - dev_dbg(dev, "couldn't map src sg for iaa device %d," - " wq %d: ret=%d\n", iaa_wq->iaa_device->idxd->id, - iaa_wq->wq->id, ret); - ret = -EIO; - goto out; - } - src_addr = sg_dma_address(req->src); - dev_dbg(dev, "dma_map_sg, src_addr %llx, nr_sgs %d, req->src %p," - " req->slen %d, sg_dma_len(sg) %d\n", src_addr, nr_sgs, - req->src, req->slen, sg_dma_len(req->src)); - - req->dlen = 4 * req->slen; /* start with ~avg comp rato */ -alloc_dest: - order = order_base_2(round_up(req->dlen, PAGE_SIZE) / PAGE_SIZE); - req->dst = sgl_alloc_order(req->dlen, order, false, flags, NULL); - if (!req->dst) { - ret = -ENOMEM; - order = -1; - goto out; - } - - nr_sgs = dma_map_sg(dev, req->dst, sg_nents(req->dst), DMA_FROM_DEVICE); - if (nr_sgs <= 0 || nr_sgs > 1) { - dev_dbg(dev, "couldn't map dst sg for iaa device %d," - " wq %d: ret=%d\n", iaa_wq->iaa_device->idxd->id, - iaa_wq->wq->id, ret); - ret = -EIO; - goto err_map_dst; - } - - dst_addr = sg_dma_address(req->dst); - dev_dbg(dev, "dma_map_sg, dst_addr %llx, nr_sgs %d, req->dst %p," - " req->dlen %d, sg_dma_len(sg) %d\n", dst_addr, nr_sgs, - req->dst, req->dlen, sg_dma_len(req->dst)); - ret = iaa_decompress(tfm, req, wq, src_addr, req->slen, - dst_addr, &req->dlen, true); - if (ret == -EOVERFLOW) { - dma_unmap_sg(dev, req->dst, sg_nents(req->dst), DMA_FROM_DEVICE); - req->dlen *= 2; - if (req->dlen > CRYPTO_ACOMP_DST_MAX) - goto err_map_dst; - goto alloc_dest; - } - - if (ret != 0) - dev_dbg(dev, "asynchronous decompress failed ret=%d\n", ret); - - dma_unmap_sg(dev, req->dst, sg_nents(req->dst), DMA_FROM_DEVICE); -err_map_dst: - dma_unmap_sg(dev, req->src, sg_nents(req->src), DMA_TO_DEVICE); -out: - iaa_wq_put(wq); - - if (order >= 0) - sgl_free_order(req->dst, order); - return ret; } @@ -1717,9 +1603,6 @@ static int iaa_comp_adecompress(struct acomp_req *req) return -EINVAL; } - if (!req->dst) - return iaa_comp_adecompress_alloc_dest(req); - cpu = get_cpu(); wq = wq_table_next_wq(cpu); put_cpu(); @@ -1800,19 +1683,10 @@ static int iaa_comp_init_fixed(struct crypto_acomp *acomp_tfm) return 0; } -static void dst_free(struct scatterlist *sgl) -{ - /* - * Called for req->dst = NULL cases but we free elsewhere - * using sgl_free_order(). - */ -} - static struct acomp_alg iaa_acomp_fixed_deflate = { .init = iaa_comp_init_fixed, .compress = iaa_comp_acompress, .decompress = iaa_comp_adecompress, - .dst_free = dst_free, .base = { .cra_name = "deflate", .cra_driver_name = "deflate-iaa", diff --git a/drivers/crypto/intel/qat/qat_420xx/adf_drv.c b/drivers/crypto/intel/qat/qat_420xx/adf_drv.c index f49818a13013a..41420e349572a 100644 --- a/drivers/crypto/intel/qat/qat_420xx/adf_drv.c +++ b/drivers/crypto/intel/qat/qat_420xx/adf_drv.c @@ -181,11 +181,19 @@ static void adf_remove(struct pci_dev *pdev) adf_cleanup_accel(accel_dev); } +static void adf_shutdown(struct pci_dev *pdev) +{ + struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev); + + adf_dev_down(accel_dev); +} + static struct pci_driver adf_driver = { .id_table = adf_pci_tbl, .name = ADF_420XX_DEVICE_NAME, .probe = adf_probe, .remove = adf_remove, + .shutdown = adf_shutdown, .sriov_configure = adf_sriov_configure, .err_handler = &adf_err_handler, }; diff --git a/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c b/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c index 659905e459503..01b34eda83e91 100644 --- a/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c +++ b/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c @@ -183,11 +183,19 @@ static void adf_remove(struct pci_dev *pdev) adf_cleanup_accel(accel_dev); } +static void adf_shutdown(struct pci_dev *pdev) +{ + struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev); + + adf_dev_down(accel_dev); +} + static struct pci_driver adf_driver = { .id_table = adf_pci_tbl, .name = ADF_4XXX_DEVICE_NAME, .probe = adf_probe, .remove = adf_remove, + .shutdown = adf_shutdown, .sriov_configure = adf_sriov_configure, .err_handler = &adf_err_handler, }; diff --git a/drivers/crypto/intel/qat/qat_c3xxx/adf_drv.c b/drivers/crypto/intel/qat/qat_c3xxx/adf_drv.c index 4d18057745d44..b776f7ea0dfb5 100644 --- a/drivers/crypto/intel/qat/qat_c3xxx/adf_drv.c +++ b/drivers/crypto/intel/qat/qat_c3xxx/adf_drv.c @@ -19,6 +19,13 @@ #include #include "adf_c3xxx_hw_data.h" +static void adf_shutdown(struct pci_dev *pdev) +{ + struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev); + + adf_dev_down(accel_dev); +} + static const struct pci_device_id adf_pci_tbl[] = { { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_QAT_C3XXX), }, { } @@ -33,6 +40,7 @@ static struct pci_driver adf_driver = { .name = ADF_C3XXX_DEVICE_NAME, .probe = adf_probe, .remove = adf_remove, + .shutdown = adf_shutdown, .sriov_configure = adf_sriov_configure, .err_handler = &adf_err_handler, }; diff --git a/drivers/crypto/intel/qat/qat_c62x/adf_drv.c b/drivers/crypto/intel/qat/qat_c62x/adf_drv.c index e6b5de55434ec..5310149c311e2 100644 --- a/drivers/crypto/intel/qat/qat_c62x/adf_drv.c +++ b/drivers/crypto/intel/qat/qat_c62x/adf_drv.c @@ -19,6 +19,13 @@ #include #include "adf_c62x_hw_data.h" +static void adf_shutdown(struct pci_dev *pdev) +{ + struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev); + + adf_dev_down(accel_dev); +} + static const struct pci_device_id adf_pci_tbl[] = { { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_QAT_C62X), }, { } @@ -33,6 +40,7 @@ static struct pci_driver adf_driver = { .name = ADF_C62X_DEVICE_NAME, .probe = adf_probe, .remove = adf_remove, + .shutdown = adf_shutdown, .sriov_configure = adf_sriov_configure, .err_handler = &adf_err_handler, }; diff --git a/drivers/crypto/intel/qat/qat_dh895xcc/adf_drv.c b/drivers/crypto/intel/qat/qat_dh895xcc/adf_drv.c index 2a50cce415151..5ddf567ffcad6 100644 --- a/drivers/crypto/intel/qat/qat_dh895xcc/adf_drv.c +++ b/drivers/crypto/intel/qat/qat_dh895xcc/adf_drv.c @@ -19,6 +19,13 @@ #include #include "adf_dh895xcc_hw_data.h" +static void adf_shutdown(struct pci_dev *pdev) +{ + struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev); + + adf_dev_down(accel_dev); +} + static const struct pci_device_id adf_pci_tbl[] = { { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_QAT_DH895XCC), }, { } @@ -33,6 +40,7 @@ static struct pci_driver adf_driver = { .name = ADF_DH895XCC_DEVICE_NAME, .probe = adf_probe, .remove = adf_remove, + .shutdown = adf_shutdown, .sriov_configure = adf_sriov_configure, .err_handler = &adf_err_handler, }; diff --git a/drivers/crypto/marvell/cesa/cesa.c b/drivers/crypto/marvell/cesa/cesa.c index 5fd31ba715c22..24273cb082ba5 100644 --- a/drivers/crypto/marvell/cesa/cesa.c +++ b/drivers/crypto/marvell/cesa/cesa.c @@ -94,7 +94,7 @@ static int mv_cesa_std_process(struct mv_cesa_engine *engine, u32 status) static int mv_cesa_int_process(struct mv_cesa_engine *engine, u32 status) { - if (engine->chain.first && engine->chain.last) + if (engine->chain_hw.first && engine->chain_hw.last) return mv_cesa_tdma_process(engine, status); return mv_cesa_std_process(engine, status); diff --git a/drivers/crypto/marvell/cesa/cesa.h b/drivers/crypto/marvell/cesa/cesa.h index d215a6bed6bc7..50ca1039fdaa7 100644 --- a/drivers/crypto/marvell/cesa/cesa.h +++ b/drivers/crypto/marvell/cesa/cesa.h @@ -440,8 +440,10 @@ struct mv_cesa_dev { * SRAM * @queue: fifo of the pending crypto requests * @load: engine load counter, useful for load balancing - * @chain: list of the current tdma descriptors being processed - * by this engine. + * @chain_hw: list of the current tdma descriptors being processed + * by the hardware. + * @chain_sw: list of the current tdma descriptors that will be + * submitted to the hardware. * @complete_queue: fifo of the processed requests by the engine * * Structure storing CESA engine information. @@ -463,7 +465,8 @@ struct mv_cesa_engine { struct gen_pool *pool; struct crypto_queue queue; atomic_t load; - struct mv_cesa_tdma_chain chain; + struct mv_cesa_tdma_chain chain_hw; + struct mv_cesa_tdma_chain chain_sw; struct list_head complete_queue; int irq; }; diff --git a/drivers/crypto/marvell/cesa/cipher.c b/drivers/crypto/marvell/cesa/cipher.c index 0f37dfd42d850..3876e3ce822f4 100644 --- a/drivers/crypto/marvell/cesa/cipher.c +++ b/drivers/crypto/marvell/cesa/cipher.c @@ -459,6 +459,9 @@ static int mv_cesa_skcipher_queue_req(struct skcipher_request *req, struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req); struct mv_cesa_engine *engine; + if (!req->cryptlen) + return 0; + ret = mv_cesa_skcipher_req_init(req, tmpl); if (ret) return ret; diff --git a/drivers/crypto/marvell/cesa/hash.c b/drivers/crypto/marvell/cesa/hash.c index f150861ceaf69..6815eddc90681 100644 --- a/drivers/crypto/marvell/cesa/hash.c +++ b/drivers/crypto/marvell/cesa/hash.c @@ -663,7 +663,7 @@ static int mv_cesa_ahash_dma_req_init(struct ahash_request *req) if (ret) goto err_free_tdma; - if (iter.src.sg) { + if (iter.base.len > iter.src.op_offset) { /* * Add all the new data, inserting an operation block and * launch command between each full SRAM block-worth of diff --git a/drivers/crypto/marvell/cesa/tdma.c b/drivers/crypto/marvell/cesa/tdma.c index 388a06e180d64..243305354420c 100644 --- a/drivers/crypto/marvell/cesa/tdma.c +++ b/drivers/crypto/marvell/cesa/tdma.c @@ -38,6 +38,15 @@ void mv_cesa_dma_step(struct mv_cesa_req *dreq) { struct mv_cesa_engine *engine = dreq->engine; + spin_lock_bh(&engine->lock); + if (engine->chain_sw.first == dreq->chain.first) { + engine->chain_sw.first = NULL; + engine->chain_sw.last = NULL; + } + engine->chain_hw.first = dreq->chain.first; + engine->chain_hw.last = dreq->chain.last; + spin_unlock_bh(&engine->lock); + writel_relaxed(0, engine->regs + CESA_SA_CFG); mv_cesa_set_int_mask(engine, CESA_SA_INT_ACC0_IDMA_DONE); @@ -96,25 +105,27 @@ void mv_cesa_dma_prepare(struct mv_cesa_req *dreq, void mv_cesa_tdma_chain(struct mv_cesa_engine *engine, struct mv_cesa_req *dreq) { - if (engine->chain.first == NULL && engine->chain.last == NULL) { - engine->chain.first = dreq->chain.first; - engine->chain.last = dreq->chain.last; - } else { - struct mv_cesa_tdma_desc *last; + struct mv_cesa_tdma_desc *last = engine->chain_sw.last; - last = engine->chain.last; + /* + * Break the DMA chain if the request being queued needs the IV + * regs to be set before lauching the request. + */ + if (!last || dreq->chain.first->flags & CESA_TDMA_SET_STATE) + engine->chain_sw.first = dreq->chain.first; + else { last->next = dreq->chain.first; - engine->chain.last = dreq->chain.last; - - /* - * Break the DMA chain if the CESA_TDMA_BREAK_CHAIN is set on - * the last element of the current chain, or if the request - * being queued needs the IV regs to be set before lauching - * the request. - */ - if (!(last->flags & CESA_TDMA_BREAK_CHAIN) && - !(dreq->chain.first->flags & CESA_TDMA_SET_STATE)) - last->next_dma = cpu_to_le32(dreq->chain.first->cur_dma); + last->next_dma = cpu_to_le32(dreq->chain.first->cur_dma); + } + last = dreq->chain.last; + engine->chain_sw.last = last; + /* + * Break the DMA chain if the CESA_TDMA_BREAK_CHAIN is set on + * the last element of the current chain. + */ + if (last->flags & CESA_TDMA_BREAK_CHAIN) { + engine->chain_sw.first = NULL; + engine->chain_sw.last = NULL; } } @@ -127,7 +138,7 @@ int mv_cesa_tdma_process(struct mv_cesa_engine *engine, u32 status) tdma_cur = readl(engine->regs + CESA_TDMA_CUR); - for (tdma = engine->chain.first; tdma; tdma = next) { + for (tdma = engine->chain_hw.first; tdma; tdma = next) { spin_lock_bh(&engine->lock); next = tdma->next; spin_unlock_bh(&engine->lock); @@ -149,12 +160,12 @@ int mv_cesa_tdma_process(struct mv_cesa_engine *engine, u32 status) &backlog); /* Re-chaining to the next request */ - engine->chain.first = tdma->next; + engine->chain_hw.first = tdma->next; tdma->next = NULL; /* If this is the last request, clear the chain */ - if (engine->chain.first == NULL) - engine->chain.last = NULL; + if (engine->chain_hw.first == NULL) + engine->chain_hw.last = NULL; spin_unlock_bh(&engine->lock); ctx = crypto_tfm_ctx(req->tfm); diff --git a/drivers/crypto/xilinx/zynqmp-sha.c b/drivers/crypto/xilinx/zynqmp-sha.c index 1bcec6f46c9c7..9b5345068604f 100644 --- a/drivers/crypto/xilinx/zynqmp-sha.c +++ b/drivers/crypto/xilinx/zynqmp-sha.c @@ -3,18 +3,19 @@ * Xilinx ZynqMP SHA Driver. * Copyright (c) 2022 Xilinx Inc. */ -#include #include #include #include -#include +#include +#include #include #include +#include #include -#include #include #include #include +#include #include #define ZYNQMP_DMA_BIT_MASK 32U @@ -43,6 +44,8 @@ struct zynqmp_sha_desc_ctx { static dma_addr_t update_dma_addr, final_dma_addr; static char *ubuf, *fbuf; +static DEFINE_SPINLOCK(zynqmp_sha_lock); + static int zynqmp_sha_init_tfm(struct crypto_shash *hash) { const char *fallback_driver_name = crypto_shash_alg_name(hash); @@ -124,7 +127,8 @@ static int zynqmp_sha_export(struct shash_desc *desc, void *out) return crypto_shash_export(&dctx->fbk_req, out); } -static int zynqmp_sha_digest(struct shash_desc *desc, const u8 *data, unsigned int len, u8 *out) +static int __zynqmp_sha_digest(struct shash_desc *desc, const u8 *data, + unsigned int len, u8 *out) { unsigned int remaining_len = len; int update_size; @@ -159,6 +163,12 @@ static int zynqmp_sha_digest(struct shash_desc *desc, const u8 *data, unsigned i return ret; } +static int zynqmp_sha_digest(struct shash_desc *desc, const u8 *data, unsigned int len, u8 *out) +{ + scoped_guard(spinlock_bh, &zynqmp_sha_lock) + return __zynqmp_sha_digest(desc, data, len, out); +} + static struct zynqmp_sha_drv_ctx sha3_drv_ctx = { .sha3_384 = { .init = zynqmp_sha_init, diff --git a/drivers/cxl/core/region.c b/drivers/cxl/core/region.c index a0d6e8d7f42c8..f5429666822f0 100644 --- a/drivers/cxl/core/region.c +++ b/drivers/cxl/core/region.c @@ -1781,6 +1781,13 @@ static int find_pos_and_ways(struct cxl_port *port, struct range *range, } put_device(dev); + if (rc) + dev_err(port->uport_dev, + "failed to find %s:%s in target list of %s\n", + dev_name(&port->dev), + dev_name(port->parent_dport->dport_dev), + dev_name(&cxlsd->cxld.dev)); + return rc; } diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c index b1ef4546346d4..bea3e9858aca5 100644 --- a/drivers/dma-buf/dma-resv.c +++ b/drivers/dma-buf/dma-resv.c @@ -685,11 +685,13 @@ long dma_resv_wait_timeout(struct dma_resv *obj, enum dma_resv_usage usage, dma_resv_iter_begin(&cursor, obj, usage); dma_resv_for_each_fence_unlocked(&cursor, fence) { - ret = dma_fence_wait_timeout(fence, intr, ret); - if (ret <= 0) { - dma_resv_iter_end(&cursor); - return ret; - } + ret = dma_fence_wait_timeout(fence, intr, timeout); + if (ret <= 0) + break; + + /* Even for zero timeout the return value is 1 */ + if (timeout) + timeout = ret; } dma_resv_iter_end(&cursor); diff --git a/drivers/dma-buf/udmabuf.c b/drivers/dma-buf/udmabuf.c index 5e836e4e5b449..959f690b12260 100644 --- a/drivers/dma-buf/udmabuf.c +++ b/drivers/dma-buf/udmabuf.c @@ -223,8 +223,7 @@ static int begin_cpu_udmabuf(struct dma_buf *buf, ubuf->sg = NULL; } } else { - dma_sync_sg_for_cpu(dev, ubuf->sg->sgl, ubuf->sg->nents, - direction); + dma_sync_sgtable_for_cpu(dev, ubuf->sg, direction); } return ret; @@ -239,7 +238,7 @@ static int end_cpu_udmabuf(struct dma_buf *buf, if (!ubuf->sg) return -EINVAL; - dma_sync_sg_for_device(dev, ubuf->sg->sgl, ubuf->sg->nents, direction); + dma_sync_sgtable_for_device(dev, ubuf->sg, direction); return 0; } diff --git a/drivers/dma/idxd/cdev.c b/drivers/dma/idxd/cdev.c index 22aa2bab3693c..8b27bd545685a 100644 --- a/drivers/dma/idxd/cdev.c +++ b/drivers/dma/idxd/cdev.c @@ -225,7 +225,7 @@ static int idxd_cdev_open(struct inode *inode, struct file *filp) struct idxd_wq *wq; struct device *dev, *fdev; int rc = 0; - struct iommu_sva *sva; + struct iommu_sva *sva = NULL; unsigned int pasid; struct idxd_cdev *idxd_cdev; @@ -322,7 +322,7 @@ static int idxd_cdev_open(struct inode *inode, struct file *filp) if (device_user_pasid_enabled(idxd)) idxd_xa_pasid_remove(ctx); failed_get_pasid: - if (device_user_pasid_enabled(idxd)) + if (device_user_pasid_enabled(idxd) && !IS_ERR_OR_NULL(sva)) iommu_sva_unbind_device(sva); failed: mutex_unlock(&wq->wq_lock); @@ -354,7 +354,9 @@ static void idxd_cdev_evl_drain_pasid(struct idxd_wq *wq, u32 pasid) set_bit(h, evl->bmap); h = (h + 1) % size; } - drain_workqueue(wq->wq); + if (wq->wq) + drain_workqueue(wq->wq); + mutex_unlock(&evl->lock); } diff --git a/drivers/dma/ti/k3-udma.c b/drivers/dma/ti/k3-udma.c index 7d89385c3c450..38b54719587cf 100644 --- a/drivers/dma/ti/k3-udma.c +++ b/drivers/dma/ti/k3-udma.c @@ -5572,7 +5572,8 @@ static int udma_probe(struct platform_device *pdev) uc->config.dir = DMA_MEM_TO_MEM; uc->name = devm_kasprintf(dev, GFP_KERNEL, "%s chan%d", dev_name(dev), i); - + if (!uc->name) + return -ENOMEM; vchan_init(&uc->vc, &ud->ddev); /* Use custom vchan completion handling */ tasklet_setup(&uc->vc.task, udma_vchan_complete); diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c index 5eb51ae93e89d..aa59b62cd83fb 100644 --- a/drivers/dma/xilinx/xilinx_dma.c +++ b/drivers/dma/xilinx/xilinx_dma.c @@ -2906,6 +2906,8 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev, return -EINVAL; } + xdev->common.directions |= chan->direction; + /* Request the interrupt */ chan->irq = of_irq_get(node, chan->tdest); if (chan->irq < 0) diff --git a/drivers/edac/altera_edac.c b/drivers/edac/altera_edac.c index 8420862c90a4d..a059964b97f8c 100644 --- a/drivers/edac/altera_edac.c +++ b/drivers/edac/altera_edac.c @@ -1746,9 +1746,9 @@ altr_edac_a10_device_trig(struct file *file, const char __user *user_buf, local_irq_save(flags); if (trig_type == ALTR_UE_TRIGGER_CHAR) - writel(priv->ue_set_mask, set_addr); + writew(priv->ue_set_mask, set_addr); else - writel(priv->ce_set_mask, set_addr); + writew(priv->ce_set_mask, set_addr); /* Ensure the interrupt test bits are set */ wmb(); @@ -1778,7 +1778,7 @@ altr_edac_a10_device_trig2(struct file *file, const char __user *user_buf, local_irq_save(flags); if (trig_type == ALTR_UE_TRIGGER_CHAR) { - writel(priv->ue_set_mask, set_addr); + writew(priv->ue_set_mask, set_addr); } else { /* Setup read/write of 4 bytes */ writel(ECC_WORD_WRITE, drvdata->base + ECC_BLK_DBYTECTRL_OFST); diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c index 5d356b7c45897..322ba16b31bf2 100644 --- a/drivers/edac/amd64_edac.c +++ b/drivers/edac/amd64_edac.c @@ -1208,7 +1208,9 @@ static int umc_get_cs_mode(int dimm, u8 ctrl, struct amd64_pvt *pvt) if (csrow_enabled(2 * dimm + 1, ctrl, pvt)) cs_mode |= CS_ODD_PRIMARY; - /* Asymmetric dual-rank DIMM support. */ + if (csrow_sec_enabled(2 * dimm, ctrl, pvt)) + cs_mode |= CS_EVEN_SECONDARY; + if (csrow_sec_enabled(2 * dimm + 1, ctrl, pvt)) cs_mode |= CS_ODD_SECONDARY; @@ -1229,12 +1231,13 @@ static int umc_get_cs_mode(int dimm, u8 ctrl, struct amd64_pvt *pvt) return cs_mode; } -static int __addr_mask_to_cs_size(u32 addr_mask_orig, unsigned int cs_mode, - int csrow_nr, int dimm) +static int calculate_cs_size(u32 mask, unsigned int cs_mode) { - u32 msb, weight, num_zero_bits; - u32 addr_mask_deinterleaved; - int size = 0; + int msb, weight, num_zero_bits; + u32 deinterleaved_mask; + + if (!mask) + return 0; /* * The number of zero bits in the mask is equal to the number of bits @@ -1247,19 +1250,30 @@ static int __addr_mask_to_cs_size(u32 addr_mask_orig, unsigned int cs_mode, * without swapping with the most significant bit. This can be handled * by keeping the MSB where it is and ignoring the single zero bit. */ - msb = fls(addr_mask_orig) - 1; - weight = hweight_long(addr_mask_orig); + msb = fls(mask) - 1; + weight = hweight_long(mask); num_zero_bits = msb - weight - !!(cs_mode & CS_3R_INTERLEAVE); /* Take the number of zero bits off from the top of the mask. */ - addr_mask_deinterleaved = GENMASK_ULL(msb - num_zero_bits, 1); + deinterleaved_mask = GENMASK(msb - num_zero_bits, 1); + edac_dbg(1, " Deinterleaved AddrMask: 0x%x\n", deinterleaved_mask); + + return (deinterleaved_mask >> 2) + 1; +} + +static int __addr_mask_to_cs_size(u32 addr_mask, u32 addr_mask_sec, + unsigned int cs_mode, int csrow_nr, int dimm) +{ + int size; edac_dbg(1, "CS%d DIMM%d AddrMasks:\n", csrow_nr, dimm); - edac_dbg(1, " Original AddrMask: 0x%x\n", addr_mask_orig); - edac_dbg(1, " Deinterleaved AddrMask: 0x%x\n", addr_mask_deinterleaved); + edac_dbg(1, " Primary AddrMask: 0x%x\n", addr_mask); /* Register [31:1] = Address [39:9]. Size is in kBs here. */ - size = (addr_mask_deinterleaved >> 2) + 1; + size = calculate_cs_size(addr_mask, cs_mode); + + edac_dbg(1, " Secondary AddrMask: 0x%x\n", addr_mask_sec); + size += calculate_cs_size(addr_mask_sec, cs_mode); /* Return size in MBs. */ return size >> 10; @@ -1268,8 +1282,8 @@ static int __addr_mask_to_cs_size(u32 addr_mask_orig, unsigned int cs_mode, static int umc_addr_mask_to_cs_size(struct amd64_pvt *pvt, u8 umc, unsigned int cs_mode, int csrow_nr) { + u32 addr_mask = 0, addr_mask_sec = 0; int cs_mask_nr = csrow_nr; - u32 addr_mask_orig; int dimm, size = 0; /* No Chip Selects are enabled. */ @@ -1307,13 +1321,13 @@ static int umc_addr_mask_to_cs_size(struct amd64_pvt *pvt, u8 umc, if (!pvt->flags.zn_regs_v2) cs_mask_nr >>= 1; - /* Asymmetric dual-rank DIMM support. */ - if ((csrow_nr & 1) && (cs_mode & CS_ODD_SECONDARY)) - addr_mask_orig = pvt->csels[umc].csmasks_sec[cs_mask_nr]; - else - addr_mask_orig = pvt->csels[umc].csmasks[cs_mask_nr]; + if (cs_mode & (CS_EVEN_PRIMARY | CS_ODD_PRIMARY)) + addr_mask = pvt->csels[umc].csmasks[cs_mask_nr]; - return __addr_mask_to_cs_size(addr_mask_orig, cs_mode, csrow_nr, dimm); + if (cs_mode & (CS_EVEN_SECONDARY | CS_ODD_SECONDARY)) + addr_mask_sec = pvt->csels[umc].csmasks_sec[cs_mask_nr]; + + return __addr_mask_to_cs_size(addr_mask, addr_mask_sec, cs_mode, csrow_nr, dimm); } static void umc_debug_display_dimm_sizes(struct amd64_pvt *pvt, u8 ctrl) @@ -3515,9 +3529,10 @@ static void gpu_get_err_info(struct mce *m, struct err_info *err) static int gpu_addr_mask_to_cs_size(struct amd64_pvt *pvt, u8 umc, unsigned int cs_mode, int csrow_nr) { - u32 addr_mask_orig = pvt->csels[umc].csmasks[csrow_nr]; + u32 addr_mask = pvt->csels[umc].csmasks[csrow_nr]; + u32 addr_mask_sec = pvt->csels[umc].csmasks_sec[csrow_nr]; - return __addr_mask_to_cs_size(addr_mask_orig, cs_mode, csrow_nr, csrow_nr >> 1); + return __addr_mask_to_cs_size(addr_mask, addr_mask_sec, cs_mode, csrow_nr, csrow_nr >> 1); } static void gpu_debug_display_dimm_sizes(struct amd64_pvt *pvt, u8 ctrl) @@ -3882,6 +3897,7 @@ static int per_family_init(struct amd64_pvt *pvt) break; case 0x70 ... 0x7f: pvt->ctl_name = "F19h_M70h"; + pvt->max_mcs = 4; pvt->flags.zn_regs_v2 = 1; break; case 0x90 ... 0x9f: diff --git a/drivers/edac/i10nm_base.c b/drivers/edac/i10nm_base.c index fbdf005bed3a4..ac4b3d95531c5 100644 --- a/drivers/edac/i10nm_base.c +++ b/drivers/edac/i10nm_base.c @@ -95,7 +95,7 @@ static u32 offsets_demand2_spr[] = {0x22c70, 0x22d80, 0x22f18, 0x22d58, 0x22c64, static u32 offsets_demand_spr_hbm0[] = {0x2a54, 0x2a60, 0x2b10, 0x2a58, 0x2a5c, 0x0ee0}; static u32 offsets_demand_spr_hbm1[] = {0x2e54, 0x2e60, 0x2f10, 0x2e58, 0x2e5c, 0x0fb0}; -static void __enable_retry_rd_err_log(struct skx_imc *imc, int chan, bool enable, +static void __enable_retry_rd_err_log(struct skx_imc *imc, int chan, bool enable, u32 *rrl_ctl, u32 *offsets_scrub, u32 *offsets_demand, u32 *offsets_demand2) { @@ -108,10 +108,10 @@ static void __enable_retry_rd_err_log(struct skx_imc *imc, int chan, bool enable if (enable) { /* Save default configurations */ - imc->chan[chan].retry_rd_err_log_s = s; - imc->chan[chan].retry_rd_err_log_d = d; + rrl_ctl[0] = s; + rrl_ctl[1] = d; if (offsets_demand2) - imc->chan[chan].retry_rd_err_log_d2 = d2; + rrl_ctl[2] = d2; s &= ~RETRY_RD_ERR_LOG_NOOVER_UC; s |= RETRY_RD_ERR_LOG_EN; @@ -125,25 +125,25 @@ static void __enable_retry_rd_err_log(struct skx_imc *imc, int chan, bool enable } } else { /* Restore default configurations */ - if (imc->chan[chan].retry_rd_err_log_s & RETRY_RD_ERR_LOG_UC) + if (rrl_ctl[0] & RETRY_RD_ERR_LOG_UC) s |= RETRY_RD_ERR_LOG_UC; - if (imc->chan[chan].retry_rd_err_log_s & RETRY_RD_ERR_LOG_NOOVER) + if (rrl_ctl[0] & RETRY_RD_ERR_LOG_NOOVER) s |= RETRY_RD_ERR_LOG_NOOVER; - if (!(imc->chan[chan].retry_rd_err_log_s & RETRY_RD_ERR_LOG_EN)) + if (!(rrl_ctl[0] & RETRY_RD_ERR_LOG_EN)) s &= ~RETRY_RD_ERR_LOG_EN; - if (imc->chan[chan].retry_rd_err_log_d & RETRY_RD_ERR_LOG_UC) + if (rrl_ctl[1] & RETRY_RD_ERR_LOG_UC) d |= RETRY_RD_ERR_LOG_UC; - if (imc->chan[chan].retry_rd_err_log_d & RETRY_RD_ERR_LOG_NOOVER) + if (rrl_ctl[1] & RETRY_RD_ERR_LOG_NOOVER) d |= RETRY_RD_ERR_LOG_NOOVER; - if (!(imc->chan[chan].retry_rd_err_log_d & RETRY_RD_ERR_LOG_EN)) + if (!(rrl_ctl[1] & RETRY_RD_ERR_LOG_EN)) d &= ~RETRY_RD_ERR_LOG_EN; if (offsets_demand2) { - if (imc->chan[chan].retry_rd_err_log_d2 & RETRY_RD_ERR_LOG_UC) + if (rrl_ctl[2] & RETRY_RD_ERR_LOG_UC) d2 |= RETRY_RD_ERR_LOG_UC; - if (!(imc->chan[chan].retry_rd_err_log_d2 & RETRY_RD_ERR_LOG_NOOVER)) + if (!(rrl_ctl[2] & RETRY_RD_ERR_LOG_NOOVER)) d2 &= ~RETRY_RD_ERR_LOG_NOOVER; - if (!(imc->chan[chan].retry_rd_err_log_d2 & RETRY_RD_ERR_LOG_EN)) + if (!(rrl_ctl[2] & RETRY_RD_ERR_LOG_EN)) d2 &= ~RETRY_RD_ERR_LOG_EN; } } @@ -157,6 +157,7 @@ static void __enable_retry_rd_err_log(struct skx_imc *imc, int chan, bool enable static void enable_retry_rd_err_log(bool enable) { int i, j, imc_num, chan_num; + struct skx_channel *chan; struct skx_imc *imc; struct skx_dev *d; @@ -171,8 +172,9 @@ static void enable_retry_rd_err_log(bool enable) if (!imc->mbase) continue; + chan = d->imc[i].chan; for (j = 0; j < chan_num; j++) - __enable_retry_rd_err_log(imc, j, enable, + __enable_retry_rd_err_log(imc, j, enable, chan[j].rrl_ctl[0], res_cfg->offsets_scrub, res_cfg->offsets_demand, res_cfg->offsets_demand2); @@ -186,12 +188,13 @@ static void enable_retry_rd_err_log(bool enable) if (!imc->mbase || !imc->hbm_mc) continue; + chan = d->imc[i].chan; for (j = 0; j < chan_num; j++) { - __enable_retry_rd_err_log(imc, j, enable, + __enable_retry_rd_err_log(imc, j, enable, chan[j].rrl_ctl[0], res_cfg->offsets_scrub_hbm0, res_cfg->offsets_demand_hbm0, NULL); - __enable_retry_rd_err_log(imc, j, enable, + __enable_retry_rd_err_log(imc, j, enable, chan[j].rrl_ctl[1], res_cfg->offsets_scrub_hbm1, res_cfg->offsets_demand_hbm1, NULL); diff --git a/drivers/edac/skx_common.c b/drivers/edac/skx_common.c index 85ec3196664d3..88f5ff249f2e0 100644 --- a/drivers/edac/skx_common.c +++ b/drivers/edac/skx_common.c @@ -115,6 +115,7 @@ EXPORT_SYMBOL_GPL(skx_adxl_get); void skx_adxl_put(void) { + adxl_component_count = 0; kfree(adxl_values); kfree(adxl_msg); } diff --git a/drivers/edac/skx_common.h b/drivers/edac/skx_common.h index 849198fd14da6..f40eb6e4f6319 100644 --- a/drivers/edac/skx_common.h +++ b/drivers/edac/skx_common.h @@ -79,6 +79,9 @@ */ #define MCACOD_EXT_MEM_ERR 0x280 +/* Max RRL register sets per {,sub-,pseudo-}channel. */ +#define NUM_RRL_SET 3 + /* * Each cpu socket contains some pci devices that provide global * information, and also some that are local to each of the two @@ -117,9 +120,11 @@ struct skx_dev { struct skx_channel { struct pci_dev *cdev; struct pci_dev *edev; - u32 retry_rd_err_log_s; - u32 retry_rd_err_log_d; - u32 retry_rd_err_log_d2; + /* + * Two groups of RRL control registers per channel to save default RRL + * settings of two {sub-,pseudo-}channels in Linux RRL control mode. + */ + u32 rrl_ctl[2][NUM_RRL_SET]; struct skx_dimm { u8 close_pg; u8 bank_xor_enable; diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig index 9f35f69e0f9e2..f7044bf53d1fc 100644 --- a/drivers/firmware/Kconfig +++ b/drivers/firmware/Kconfig @@ -31,7 +31,6 @@ config ARM_SCPI_PROTOCOL config ARM_SDE_INTERFACE bool "ARM Software Delegated Exception Interface (SDEI)" depends on ARM64 - depends on ACPI_APEI_GHES help The Software Delegated Exception Interface (SDEI) is an ARM standard for registering callbacks from the platform firmware diff --git a/drivers/firmware/arm_ffa/driver.c b/drivers/firmware/arm_ffa/driver.c index 47751b2c057ae..83dad9c2da064 100644 --- a/drivers/firmware/arm_ffa/driver.c +++ b/drivers/firmware/arm_ffa/driver.c @@ -110,7 +110,7 @@ struct ffa_drv_info { struct work_struct sched_recv_irq_work; struct xarray partition_info; DECLARE_HASHTABLE(notifier_hash, ilog2(FFA_MAX_NOTIFICATIONS)); - struct mutex notify_lock; /* lock to protect notifier hashtable */ + rwlock_t notify_lock; /* lock to protect notifier hashtable */ }; static struct ffa_drv_info *drv_info; @@ -1141,12 +1141,11 @@ notifier_hash_node_get(u16 notify_id, enum notify_type type) return NULL; } -static int -update_notifier_cb(int notify_id, enum notify_type type, ffa_notifier_cb cb, - void *cb_data, bool is_registration) +static int update_notifier_cb(int notify_id, enum notify_type type, + struct notifier_cb_info *cb) { struct notifier_cb_info *cb_info = NULL; - bool cb_found; + bool cb_found, is_registration = !!cb; cb_info = notifier_hash_node_get(notify_id, type); cb_found = !!cb_info; @@ -1155,17 +1154,10 @@ update_notifier_cb(int notify_id, enum notify_type type, ffa_notifier_cb cb, return -EINVAL; if (is_registration) { - cb_info = kzalloc(sizeof(*cb_info), GFP_KERNEL); - if (!cb_info) - return -ENOMEM; - - cb_info->type = type; - cb_info->cb = cb; - cb_info->cb_data = cb_data; - - hash_add(drv_info->notifier_hash, &cb_info->hnode, notify_id); + hash_add(drv_info->notifier_hash, &cb->hnode, notify_id); } else { hash_del(&cb_info->hnode); + kfree(cb_info); } return 0; @@ -1190,18 +1182,18 @@ static int ffa_notify_relinquish(struct ffa_device *dev, int notify_id) if (notify_id >= FFA_MAX_NOTIFICATIONS) return -EINVAL; - mutex_lock(&drv_info->notify_lock); + write_lock(&drv_info->notify_lock); - rc = update_notifier_cb(notify_id, type, NULL, NULL, false); + rc = update_notifier_cb(notify_id, type, NULL); if (rc) { pr_err("Could not unregister notification callback\n"); - mutex_unlock(&drv_info->notify_lock); + write_unlock(&drv_info->notify_lock); return rc; } rc = ffa_notification_unbind(dev->vm_id, BIT(notify_id)); - mutex_unlock(&drv_info->notify_lock); + write_unlock(&drv_info->notify_lock); return rc; } @@ -1211,6 +1203,7 @@ static int ffa_notify_request(struct ffa_device *dev, bool is_per_vcpu, { int rc; u32 flags = 0; + struct notifier_cb_info *cb_info = NULL; enum notify_type type = ffa_notify_type_get(dev->vm_id); if (ffa_notifications_disabled()) @@ -1219,24 +1212,34 @@ static int ffa_notify_request(struct ffa_device *dev, bool is_per_vcpu, if (notify_id >= FFA_MAX_NOTIFICATIONS) return -EINVAL; - mutex_lock(&drv_info->notify_lock); + cb_info = kzalloc(sizeof(*cb_info), GFP_KERNEL); + if (!cb_info) + return -ENOMEM; + + cb_info->type = type; + cb_info->cb_data = cb_data; + cb_info->cb = cb; + + write_lock(&drv_info->notify_lock); if (is_per_vcpu) flags = PER_VCPU_NOTIFICATION_FLAG; rc = ffa_notification_bind(dev->vm_id, BIT(notify_id), flags); - if (rc) { - mutex_unlock(&drv_info->notify_lock); - return rc; - } + if (rc) + goto out_unlock_free; - rc = update_notifier_cb(notify_id, type, cb, cb_data, true); + rc = update_notifier_cb(notify_id, type, cb_info); if (rc) { pr_err("Failed to register callback for %d - %d\n", notify_id, rc); ffa_notification_unbind(dev->vm_id, BIT(notify_id)); } - mutex_unlock(&drv_info->notify_lock); + +out_unlock_free: + write_unlock(&drv_info->notify_lock); + if (rc) + kfree(cb_info); return rc; } @@ -1266,9 +1269,9 @@ static void handle_notif_callbacks(u64 bitmap, enum notify_type type) if (!(bitmap & 1)) continue; - mutex_lock(&drv_info->notify_lock); + read_lock(&drv_info->notify_lock); cb_info = notifier_hash_node_get(notify_id, type); - mutex_unlock(&drv_info->notify_lock); + read_unlock(&drv_info->notify_lock); if (cb_info && cb_info->cb) cb_info->cb(notify_id, cb_info->cb_data); @@ -1718,7 +1721,7 @@ static void ffa_notifications_setup(void) goto cleanup; hash_init(drv_info->notifier_hash); - mutex_init(&drv_info->notify_lock); + rwlock_init(&drv_info->notify_lock); drv_info->notif_enabled = true; return; diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c index 993615fa490eb..f1abe605865ad 100644 --- a/drivers/firmware/arm_scmi/driver.c +++ b/drivers/firmware/arm_scmi/driver.c @@ -1708,6 +1708,39 @@ static int scmi_common_get_max_msg_size(const struct scmi_protocol_handle *ph) return info->desc->max_msg_size; } +/** + * scmi_protocol_msg_check - Check protocol message attributes + * + * @ph: A reference to the protocol handle. + * @message_id: The ID of the message to check. + * @attributes: A parameter to optionally return the retrieved message + * attributes, in case of Success. + * + * An helper to check protocol message attributes for a specific protocol + * and message pair. + * + * Return: 0 on SUCCESS + */ +static int scmi_protocol_msg_check(const struct scmi_protocol_handle *ph, + u32 message_id, u32 *attributes) +{ + int ret; + struct scmi_xfer *t; + + ret = xfer_get_init(ph, PROTOCOL_MESSAGE_ATTRIBUTES, + sizeof(__le32), 0, &t); + if (ret) + return ret; + + put_unaligned_le32(message_id, t->tx.buf); + ret = do_xfer(ph, t); + if (!ret && attributes) + *attributes = get_unaligned_le32(t->rx.buf); + xfer_put(ph, t); + + return ret; +} + /** * struct scmi_iterator - Iterator descriptor * @msg: A reference to the message TX buffer; filled by @prepare_message with @@ -1849,6 +1882,7 @@ scmi_common_fastchannel_init(const struct scmi_protocol_handle *ph, int ret; u32 flags; u64 phys_addr; + u32 attributes; u8 size; void __iomem *addr; struct scmi_xfer *t; @@ -1857,6 +1891,15 @@ scmi_common_fastchannel_init(const struct scmi_protocol_handle *ph, struct scmi_msg_resp_desc_fc *resp; const struct scmi_protocol_instance *pi = ph_to_pi(ph); + /* Check if the MSG_ID supports fastchannel */ + ret = scmi_protocol_msg_check(ph, message_id, &attributes); + if (ret || !MSG_SUPPORTS_FASTCHANNEL(attributes)) { + dev_dbg(ph->dev, + "Skip FC init for 0x%02X/%d domain:%d - ret:%d\n", + pi->proto->id, message_id, domain, ret); + return; + } + if (!p_addr) { ret = -EINVAL; goto err_out; @@ -1984,39 +2027,6 @@ static void scmi_common_fastchannel_db_ring(struct scmi_fc_db_info *db) #endif } -/** - * scmi_protocol_msg_check - Check protocol message attributes - * - * @ph: A reference to the protocol handle. - * @message_id: The ID of the message to check. - * @attributes: A parameter to optionally return the retrieved message - * attributes, in case of Success. - * - * An helper to check protocol message attributes for a specific protocol - * and message pair. - * - * Return: 0 on SUCCESS - */ -static int scmi_protocol_msg_check(const struct scmi_protocol_handle *ph, - u32 message_id, u32 *attributes) -{ - int ret; - struct scmi_xfer *t; - - ret = xfer_get_init(ph, PROTOCOL_MESSAGE_ATTRIBUTES, - sizeof(__le32), 0, &t); - if (ret) - return ret; - - put_unaligned_le32(message_id, t->tx.buf); - ret = do_xfer(ph, t); - if (!ret && attributes) - *attributes = get_unaligned_le32(t->rx.buf); - xfer_put(ph, t); - - return ret; -} - static const struct scmi_proto_helpers_ops helpers_ops = { .extended_name_get = scmi_common_extended_name_get, .get_max_msg_size = scmi_common_get_max_msg_size, diff --git a/drivers/firmware/arm_scmi/protocols.h b/drivers/firmware/arm_scmi/protocols.h index aaee57cdcd558..d62c4469d1fd9 100644 --- a/drivers/firmware/arm_scmi/protocols.h +++ b/drivers/firmware/arm_scmi/protocols.h @@ -31,6 +31,8 @@ #define SCMI_PROTOCOL_VENDOR_BASE 0x80 +#define MSG_SUPPORTS_FASTCHANNEL(x) ((x) & BIT(0)) + enum scmi_common_cmd { PROTOCOL_VERSION = 0x0, PROTOCOL_ATTRIBUTES = 0x1, diff --git a/drivers/firmware/arm_sdei.c b/drivers/firmware/arm_sdei.c index 3e8051fe82965..71e2a9a89f6ad 100644 --- a/drivers/firmware/arm_sdei.c +++ b/drivers/firmware/arm_sdei.c @@ -1062,13 +1062,12 @@ static bool __init sdei_present_acpi(void) return true; } -void __init sdei_init(void) +void __init acpi_sdei_init(void) { struct platform_device *pdev; int ret; - ret = platform_driver_register(&sdei_driver); - if (ret || !sdei_present_acpi()) + if (!sdei_present_acpi()) return; pdev = platform_device_register_simple(sdei_driver.driver.name, @@ -1081,6 +1080,12 @@ void __init sdei_init(void) } } +static int __init sdei_init(void) +{ + return platform_driver_register(&sdei_driver); +} +arch_initcall(sdei_init); + int sdei_event_handler(struct pt_regs *regs, struct sdei_registered_event *arg) { diff --git a/drivers/firmware/efi/libstub/efi-stub-helper.c b/drivers/firmware/efi/libstub/efi-stub-helper.c index de659f6a815fd..1ad414da9920a 100644 --- a/drivers/firmware/efi/libstub/efi-stub-helper.c +++ b/drivers/firmware/efi/libstub/efi-stub-helper.c @@ -603,6 +603,7 @@ efi_status_t efi_load_initrd_cmdline(efi_loaded_image_t *image, * @image: EFI loaded image protocol * @soft_limit: preferred address for loading the initrd * @hard_limit: upper limit address for loading the initrd + * @out: pointer to store the address of the initrd table * * Return: status code */ diff --git a/drivers/firmware/psci/psci.c b/drivers/firmware/psci/psci.c index 2328ca58bba61..d6701d81cf680 100644 --- a/drivers/firmware/psci/psci.c +++ b/drivers/firmware/psci/psci.c @@ -759,8 +759,10 @@ int __init psci_dt_init(void) np = of_find_matching_node_and_match(NULL, psci_of_match, &matched_np); - if (!np || !of_device_is_available(np)) + if (!np || !of_device_is_available(np)) { + of_node_put(np); return -ENODEV; + } init_fn = (psci_initcall_t)matched_np->data; ret = init_fn(np); diff --git a/drivers/firmware/sysfb.c b/drivers/firmware/sysfb.c index a3df782fa687b..e919940c8bf9a 100644 --- a/drivers/firmware/sysfb.c +++ b/drivers/firmware/sysfb.c @@ -124,6 +124,7 @@ static __init int sysfb_init(void) { struct screen_info *si = &screen_info; struct device *parent; + unsigned int type; struct simplefb_platform_data mode; const char *name; bool compatible; @@ -151,17 +152,26 @@ static __init int sysfb_init(void) goto put_device; } + type = screen_info_video_type(si); + /* if the FB is incompatible, create a legacy framebuffer device */ - if (si->orig_video_isVGA == VIDEO_TYPE_EFI) - name = "efi-framebuffer"; - else if (si->orig_video_isVGA == VIDEO_TYPE_VLFB) - name = "vesa-framebuffer"; - else if (si->orig_video_isVGA == VIDEO_TYPE_VGAC) - name = "vga-framebuffer"; - else if (si->orig_video_isVGA == VIDEO_TYPE_EGAC) + switch (type) { + case VIDEO_TYPE_EGAC: name = "ega-framebuffer"; - else + break; + case VIDEO_TYPE_VGAC: + name = "vga-framebuffer"; + break; + case VIDEO_TYPE_VLFB: + name = "vesa-framebuffer"; + break; + case VIDEO_TYPE_EFI: + name = "efi-framebuffer"; + break; + default: name = "platform-framebuffer"; + break; + } pd = platform_device_alloc(name, 0); if (!pd) { diff --git a/drivers/fpga/tests/fpga-mgr-test.c b/drivers/fpga/tests/fpga-mgr-test.c index 9cb37aefbac4b..1902ebf5a298f 100644 --- a/drivers/fpga/tests/fpga-mgr-test.c +++ b/drivers/fpga/tests/fpga-mgr-test.c @@ -263,6 +263,7 @@ static void fpga_mgr_test_img_load_sgt(struct kunit *test) img_buf = init_test_buffer(test, IMAGE_SIZE); sgt = kunit_kzalloc(test, sizeof(*sgt), GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, sgt); ret = sg_alloc_table(sgt, 1, GFP_KERNEL); KUNIT_ASSERT_EQ(test, ret, 0); sg_init_one(sgt->sgl, img_buf, IMAGE_SIZE); diff --git a/drivers/gpio/gpio-mlxbf3.c b/drivers/gpio/gpio-mlxbf3.c index 10ea71273c891..9875e34bde72a 100644 --- a/drivers/gpio/gpio-mlxbf3.c +++ b/drivers/gpio/gpio-mlxbf3.c @@ -190,7 +190,9 @@ static int mlxbf3_gpio_probe(struct platform_device *pdev) struct mlxbf3_gpio_context *gs; struct gpio_irq_chip *girq; struct gpio_chip *gc; + char *colon_ptr; int ret, irq; + long num; gs = devm_kzalloc(dev, sizeof(*gs), GFP_KERNEL); if (!gs) @@ -227,25 +229,39 @@ static int mlxbf3_gpio_probe(struct platform_device *pdev) gc->owner = THIS_MODULE; gc->add_pin_ranges = mlxbf3_gpio_add_pin_ranges; - irq = platform_get_irq(pdev, 0); - if (irq >= 0) { - girq = &gs->gc.irq; - gpio_irq_chip_set_chip(girq, &gpio_mlxbf3_irqchip); - girq->default_type = IRQ_TYPE_NONE; - /* This will let us handle the parent IRQ in the driver */ - girq->num_parents = 0; - girq->parents = NULL; - girq->parent_handler = NULL; - girq->handler = handle_bad_irq; - - /* - * Directly request the irq here instead of passing - * a flow-handler because the irq is shared. - */ - ret = devm_request_irq(dev, irq, mlxbf3_gpio_irq_handler, - IRQF_SHARED, dev_name(dev), gs); - if (ret) - return dev_err_probe(dev, ret, "failed to request IRQ"); + colon_ptr = strchr(dev_name(dev), ':'); + if (!colon_ptr) { + dev_err(dev, "invalid device name format\n"); + return -EINVAL; + } + + ret = kstrtol(++colon_ptr, 16, &num); + if (ret) { + dev_err(dev, "invalid device instance\n"); + return ret; + } + + if (!num) { + irq = platform_get_irq(pdev, 0); + if (irq >= 0) { + girq = &gs->gc.irq; + gpio_irq_chip_set_chip(girq, &gpio_mlxbf3_irqchip); + girq->default_type = IRQ_TYPE_NONE; + /* This will let us handle the parent IRQ in the driver */ + girq->num_parents = 0; + girq->parents = NULL; + girq->parent_handler = NULL; + girq->handler = handle_bad_irq; + + /* + * Directly request the irq here instead of passing + * a flow-handler because the irq is shared. + */ + ret = devm_request_irq(dev, irq, mlxbf3_gpio_irq_handler, + IRQF_SHARED, dev_name(dev), gs); + if (ret) + return dev_err_probe(dev, ret, "failed to request IRQ"); + } } platform_set_drvdata(pdev, gs); diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c index ef3aee1cabcfd..bb7c1bf5f856e 100644 --- a/drivers/gpio/gpio-pca953x.c +++ b/drivers/gpio/gpio-pca953x.c @@ -951,7 +951,7 @@ static int pca953x_irq_setup(struct pca953x_chip *chip, int irq_base) IRQF_ONESHOT | IRQF_SHARED, dev_name(dev), chip); if (ret) - return dev_err_probe(dev, client->irq, "failed to request irq\n"); + return dev_err_probe(dev, ret, "failed to request irq\n"); return 0; } diff --git a/drivers/gpio/gpio-virtuser.c b/drivers/gpio/gpio-virtuser.c index e89f299f21400..dcecb7a259117 100644 --- a/drivers/gpio/gpio-virtuser.c +++ b/drivers/gpio/gpio-virtuser.c @@ -400,10 +400,15 @@ static ssize_t gpio_virtuser_direction_do_write(struct file *file, char buf[32], *trimmed; int ret, dir, val = 0; - ret = simple_write_to_buffer(buf, sizeof(buf), ppos, user_buf, count); + if (count >= sizeof(buf)) + return -EINVAL; + + ret = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, user_buf, count); if (ret < 0) return ret; + buf[ret] = '\0'; + trimmed = strim(buf); if (strcmp(trimmed, "input") == 0) { @@ -622,12 +627,15 @@ static ssize_t gpio_virtuser_consumer_write(struct file *file, char buf[GPIO_VIRTUSER_NAME_BUF_LEN + 2]; int ret; + if (count >= sizeof(buf)) + return -EINVAL; + ret = simple_write_to_buffer(buf, GPIO_VIRTUSER_NAME_BUF_LEN, ppos, user_buf, count); if (ret < 0) return ret; - buf[strlen(buf) - 1] = '\0'; + buf[ret] = '\0'; ret = gpiod_set_consumer_name(data->ad.desc, buf); if (ret) diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c index 626daedb01698..36f8c7bb79d81 100644 --- a/drivers/gpio/gpiolib-of.c +++ b/drivers/gpio/gpiolib-of.c @@ -215,6 +215,15 @@ static void of_gpio_try_fixup_polarity(const struct device_node *np, */ { "lantiq,pci-xway", "gpio-reset", false }, #endif +#if IS_ENABLED(CONFIG_REGULATOR_S5M8767) + /* + * According to S5M8767, the DVS and DS pin are + * active-high signals. However, exynos5250-spring.dts use + * active-low setting. + */ + { "samsung,s5m8767-pmic", "s5m8767,pmic-buck-dvs-gpios", true }, + { "samsung,s5m8767-pmic", "s5m8767,pmic-buck-ds-gpios", true }, +#endif #if IS_ENABLED(CONFIG_TOUCHSCREEN_TSC2005) /* * DTS for Nokia N900 incorrectly specified "active high" diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c index 9da4414de6177..81f16e4447f8e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c @@ -1902,7 +1902,7 @@ static void amdgpu_ib_preempt_mark_partial_job(struct amdgpu_ring *ring) continue; } job = to_amdgpu_job(s_job); - if (preempted && (&job->hw_fence) == fence) + if (preempted && (&job->hw_fence.base) == fence) /* mark the job as preempted */ job->preemption_status |= AMDGPU_IB_PREEMPTED; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index ca0411c9500e7..24e41b42c638b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -4144,7 +4144,6 @@ int amdgpu_device_init(struct amdgpu_device *adev, mutex_init(&adev->grbm_idx_mutex); mutex_init(&adev->mn_lock); mutex_init(&adev->virt.vf_errors.lock); - mutex_init(&adev->virt.rlcg_reg_lock); hash_init(adev->mn_hash); mutex_init(&adev->psp.mutex); mutex_init(&adev->notifier_lock); @@ -4170,6 +4169,7 @@ int amdgpu_device_init(struct amdgpu_device *adev, spin_lock_init(&adev->se_cac_idx_lock); spin_lock_init(&adev->audio_endpt_idx_lock); spin_lock_init(&adev->mm_stats.lock); + spin_lock_init(&adev->virt.rlcg_reg_lock); spin_lock_init(&adev->wb.lock); INIT_LIST_HEAD(&adev->reset_list); @@ -5861,7 +5861,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev, * * job->base holds a reference to parent fence */ - if (job && dma_fence_is_signaled(&job->hw_fence)) { + if (job && dma_fence_is_signaled(&job->hw_fence.base)) { job_signaled = true; dev_info(adev->dev, "Guilty job already signaled, skipping HW reset"); goto skip_hw_reset; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c index 018240a2ab96a..eee434743deb4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c @@ -112,6 +112,14 @@ #endif MODULE_FIRMWARE("amdgpu/ip_discovery.bin"); +MODULE_FIRMWARE("amdgpu/vega10_ip_discovery.bin"); +MODULE_FIRMWARE("amdgpu/vega12_ip_discovery.bin"); +MODULE_FIRMWARE("amdgpu/vega20_ip_discovery.bin"); +MODULE_FIRMWARE("amdgpu/raven_ip_discovery.bin"); +MODULE_FIRMWARE("amdgpu/raven2_ip_discovery.bin"); +MODULE_FIRMWARE("amdgpu/picasso_ip_discovery.bin"); +MODULE_FIRMWARE("amdgpu/arcturus_ip_discovery.bin"); +MODULE_FIRMWARE("amdgpu/aldebaran_ip_discovery.bin"); #define mmIP_DISCOVERY_VERSION 0x16A00 #define mmRCC_CONFIG_MEMSIZE 0xde3 @@ -301,10 +309,12 @@ static int amdgpu_discovery_read_binary_from_file(struct amdgpu_device *adev, const struct firmware *fw; int r; - r = request_firmware(&fw, fw_name, adev->dev); + r = firmware_request_nowarn(&fw, fw_name, adev->dev); if (r) { - dev_err(adev->dev, "can't load firmware \"%s\"\n", - fw_name); + if (amdgpu_discovery == 2) + dev_err(adev->dev, "can't load firmware \"%s\"\n", fw_name); + else + drm_info(&adev->ddev, "Optional firmware \"%s\" was not found\n", fw_name); return r; } @@ -398,7 +408,27 @@ static const char *amdgpu_discovery_get_fw_name(struct amdgpu_device *adev) if (amdgpu_discovery == 2) return "amdgpu/ip_discovery.bin"; - return NULL; + switch (adev->asic_type) { + case CHIP_VEGA10: + return "amdgpu/vega10_ip_discovery.bin"; + case CHIP_VEGA12: + return "amdgpu/vega12_ip_discovery.bin"; + case CHIP_RAVEN: + if (adev->apu_flags & AMD_APU_IS_RAVEN2) + return "amdgpu/raven2_ip_discovery.bin"; + else if (adev->apu_flags & AMD_APU_IS_PICASSO) + return "amdgpu/picasso_ip_discovery.bin"; + else + return "amdgpu/raven_ip_discovery.bin"; + case CHIP_VEGA20: + return "amdgpu/vega20_ip_discovery.bin"; + case CHIP_ARCTURUS: + return "amdgpu/arcturus_ip_discovery.bin"; + case CHIP_ALDEBARAN: + return "amdgpu/aldebaran_ip_discovery.bin"; + default: + return NULL; + } } static int amdgpu_discovery_init(struct amdgpu_device *adev) @@ -419,16 +449,12 @@ static int amdgpu_discovery_init(struct amdgpu_device *adev) /* Read from file if it is the preferred option */ fw_name = amdgpu_discovery_get_fw_name(adev); if (fw_name != NULL) { - dev_info(adev->dev, "use ip discovery information from file"); + drm_dbg(&adev->ddev, "use ip discovery information from file"); r = amdgpu_discovery_read_binary_from_file(adev, adev->mman.discovery_bin, fw_name); - - if (r) { - dev_err(adev->dev, "failed to read ip discovery binary from file\n"); - r = -EINVAL; + if (r) goto out; - } - } else { + drm_dbg(&adev->ddev, "use ip discovery information from memory"); r = amdgpu_discovery_read_binary_from_mem( adev, adev->mman.discovery_bin); if (r) @@ -1286,10 +1312,8 @@ static int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev) int r; r = amdgpu_discovery_init(adev); - if (r) { - DRM_ERROR("amdgpu_discovery_init failed\n"); + if (r) return r; - } adev->gfx.xcc_mask = 0; adev->sdma.sdma_mask = 0; @@ -2429,6 +2453,40 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev) { int r; + switch (adev->asic_type) { + case CHIP_VEGA10: + case CHIP_VEGA12: + case CHIP_RAVEN: + case CHIP_VEGA20: + case CHIP_ARCTURUS: + case CHIP_ALDEBARAN: + /* this is not fatal. We have a fallback below + * if the new firmwares are not present. some of + * this will be overridden below to keep things + * consistent with the current behavior. + */ + r = amdgpu_discovery_reg_base_init(adev); + if (!r) { + amdgpu_discovery_harvest_ip(adev); + amdgpu_discovery_get_gfx_info(adev); + amdgpu_discovery_get_mall_info(adev); + amdgpu_discovery_get_vcn_info(adev); + } + break; + default: + r = amdgpu_discovery_reg_base_init(adev); + if (r) { + drm_err(&adev->ddev, "discovery failed: %d\n", r); + return r; + } + + amdgpu_discovery_harvest_ip(adev); + amdgpu_discovery_get_gfx_info(adev); + amdgpu_discovery_get_mall_info(adev); + amdgpu_discovery_get_vcn_info(adev); + break; + } + switch (adev->asic_type) { case CHIP_VEGA10: vega10_reg_base_init(adev); @@ -2591,14 +2649,6 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev) adev->ip_versions[XGMI_HWIP][0] = IP_VERSION(6, 1, 0); break; default: - r = amdgpu_discovery_reg_base_init(adev); - if (r) - return -EINVAL; - - amdgpu_discovery_harvest_ip(adev); - amdgpu_discovery_get_gfx_info(adev); - amdgpu_discovery_get_mall_info(adev); - amdgpu_discovery_get_vcn_info(adev); break; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c index 2f24a6aa13bf6..569e0e5373927 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c @@ -41,22 +41,6 @@ #include "amdgpu_trace.h" #include "amdgpu_reset.h" -/* - * Fences mark an event in the GPUs pipeline and are used - * for GPU/CPU synchronization. When the fence is written, - * it is expected that all buffers associated with that fence - * are no longer in use by the associated ring on the GPU and - * that the relevant GPU caches have been flushed. - */ - -struct amdgpu_fence { - struct dma_fence base; - - /* RB, DMA, etc. */ - struct amdgpu_ring *ring; - ktime_t start_timestamp; -}; - static struct kmem_cache *amdgpu_fence_slab; int amdgpu_fence_slab_init(void) @@ -151,12 +135,12 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f, struct amd am_fence = kmem_cache_alloc(amdgpu_fence_slab, GFP_ATOMIC); if (am_fence == NULL) return -ENOMEM; - fence = &am_fence->base; - am_fence->ring = ring; } else { /* take use of job-embedded fence */ - fence = &job->hw_fence; + am_fence = &job->hw_fence; } + fence = &am_fence->base; + am_fence->ring = ring; seq = ++ring->fence_drv.sync_seq; if (job && job->job_run_counter) { @@ -718,7 +702,7 @@ void amdgpu_fence_driver_clear_job_fences(struct amdgpu_ring *ring) * it right here or we won't be able to track them in fence_drv * and they will remain unsignaled during sa_bo free. */ - job = container_of(old, struct amdgpu_job, hw_fence); + job = container_of(old, struct amdgpu_job, hw_fence.base); if (!job->base.s_fence && !dma_fence_is_signaled(old)) dma_fence_signal(old); RCU_INIT_POINTER(*ptr, NULL); @@ -780,7 +764,7 @@ static const char *amdgpu_fence_get_timeline_name(struct dma_fence *f) static const char *amdgpu_job_fence_get_timeline_name(struct dma_fence *f) { - struct amdgpu_job *job = container_of(f, struct amdgpu_job, hw_fence); + struct amdgpu_job *job = container_of(f, struct amdgpu_job, hw_fence.base); return (const char *)to_amdgpu_ring(job->base.sched)->name; } @@ -810,7 +794,7 @@ static bool amdgpu_fence_enable_signaling(struct dma_fence *f) */ static bool amdgpu_job_fence_enable_signaling(struct dma_fence *f) { - struct amdgpu_job *job = container_of(f, struct amdgpu_job, hw_fence); + struct amdgpu_job *job = container_of(f, struct amdgpu_job, hw_fence.base); if (!timer_pending(&to_amdgpu_ring(job->base.sched)->fence_drv.fallback_timer)) amdgpu_fence_schedule_fallback(to_amdgpu_ring(job->base.sched)); @@ -845,7 +829,7 @@ static void amdgpu_job_fence_free(struct rcu_head *rcu) struct dma_fence *f = container_of(rcu, struct dma_fence, rcu); /* free job if fence has a parent job */ - kfree(container_of(f, struct amdgpu_job, hw_fence)); + kfree(container_of(f, struct amdgpu_job, hw_fence.base)); } /** diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c index 1ce20a19be8ba..7e6057a6e7f17 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c @@ -259,8 +259,8 @@ void amdgpu_job_free_resources(struct amdgpu_job *job) /* Check if any fences where initialized */ if (job->base.s_fence && job->base.s_fence->finished.ops) f = &job->base.s_fence->finished; - else if (job->hw_fence.ops) - f = &job->hw_fence; + else if (job->hw_fence.base.ops) + f = &job->hw_fence.base; else f = NULL; @@ -277,10 +277,10 @@ static void amdgpu_job_free_cb(struct drm_sched_job *s_job) amdgpu_sync_free(&job->explicit_sync); /* only put the hw fence if has embedded fence */ - if (!job->hw_fence.ops) + if (!job->hw_fence.base.ops) kfree(job); else - dma_fence_put(&job->hw_fence); + dma_fence_put(&job->hw_fence.base); } void amdgpu_job_set_gang_leader(struct amdgpu_job *job, @@ -309,10 +309,10 @@ void amdgpu_job_free(struct amdgpu_job *job) if (job->gang_submit != &job->base.s_fence->scheduled) dma_fence_put(job->gang_submit); - if (!job->hw_fence.ops) + if (!job->hw_fence.base.ops) kfree(job); else - dma_fence_put(&job->hw_fence); + dma_fence_put(&job->hw_fence.base); } struct dma_fence *amdgpu_job_submit(struct amdgpu_job *job) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h index ce6b9ba967fff..4fe033d8f3568 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h @@ -48,7 +48,7 @@ struct amdgpu_job { struct drm_sched_job base; struct amdgpu_vm *vm; struct amdgpu_sync explicit_sync; - struct dma_fence hw_fence; + struct amdgpu_fence hw_fence; struct dma_fence *gang_submit; uint32_t preamble_status; uint32_t preemption_status; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c index 7d4b540340e02..41b88e0ea98b8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c @@ -860,7 +860,9 @@ int amdgpu_mes_map_legacy_queue(struct amdgpu_device *adev, queue_input.mqd_addr = amdgpu_bo_gpu_offset(ring->mqd_obj); queue_input.wptr_addr = ring->wptr_gpu_addr; + amdgpu_mes_lock(&adev->mes); r = adev->mes.funcs->map_legacy_queue(&adev->mes, &queue_input); + amdgpu_mes_unlock(&adev->mes); if (r) DRM_ERROR("failed to map legacy queue\n"); @@ -883,7 +885,9 @@ int amdgpu_mes_unmap_legacy_queue(struct amdgpu_device *adev, queue_input.trail_fence_addr = gpu_addr; queue_input.trail_fence_data = seq; + amdgpu_mes_lock(&adev->mes); r = adev->mes.funcs->unmap_legacy_queue(&adev->mes, &queue_input); + amdgpu_mes_unlock(&adev->mes); if (r) DRM_ERROR("failed to unmap legacy queue\n"); @@ -910,7 +914,9 @@ int amdgpu_mes_reset_legacy_queue(struct amdgpu_device *adev, queue_input.vmid = vmid; queue_input.use_mmio = use_mmio; + amdgpu_mes_lock(&adev->mes); r = adev->mes.funcs->reset_legacy_queue(&adev->mes, &queue_input); + amdgpu_mes_unlock(&adev->mes); if (r) DRM_ERROR("failed to reset legacy queue\n"); @@ -931,7 +937,9 @@ uint32_t amdgpu_mes_rreg(struct amdgpu_device *adev, uint32_t reg) goto error; } + amdgpu_mes_lock(&adev->mes); r = adev->mes.funcs->misc_op(&adev->mes, &op_input); + amdgpu_mes_unlock(&adev->mes); if (r) DRM_ERROR("failed to read reg (0x%x)\n", reg); else @@ -957,7 +965,9 @@ int amdgpu_mes_wreg(struct amdgpu_device *adev, goto error; } + amdgpu_mes_lock(&adev->mes); r = adev->mes.funcs->misc_op(&adev->mes, &op_input); + amdgpu_mes_unlock(&adev->mes); if (r) DRM_ERROR("failed to write reg (0x%x)\n", reg); @@ -984,7 +994,9 @@ int amdgpu_mes_reg_write_reg_wait(struct amdgpu_device *adev, goto error; } + amdgpu_mes_lock(&adev->mes); r = adev->mes.funcs->misc_op(&adev->mes, &op_input); + amdgpu_mes_unlock(&adev->mes); if (r) DRM_ERROR("failed to reg_write_reg_wait\n"); @@ -1009,7 +1021,9 @@ int amdgpu_mes_reg_wait(struct amdgpu_device *adev, uint32_t reg, goto error; } + amdgpu_mes_lock(&adev->mes); r = adev->mes.funcs->misc_op(&adev->mes, &op_input); + amdgpu_mes_unlock(&adev->mes); if (r) DRM_ERROR("failed to reg_write_reg_wait\n"); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c index 48e30e5f83389..3d42f6c3308ed 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c @@ -3430,7 +3430,10 @@ int psp_init_sos_microcode(struct psp_context *psp, const char *chip_name) uint8_t *ucode_array_start_addr; int err = 0; - err = amdgpu_ucode_request(adev, &adev->psp.sos_fw, "amdgpu/%s_sos.bin", chip_name); + if (amdgpu_is_kicker_fw(adev)) + err = amdgpu_ucode_request(adev, &adev->psp.sos_fw, "amdgpu/%s_sos_kicker.bin", chip_name); + else + err = amdgpu_ucode_request(adev, &adev->psp.sos_fw, "amdgpu/%s_sos.bin", chip_name); if (err) goto out; @@ -3672,7 +3675,10 @@ int psp_init_ta_microcode(struct psp_context *psp, const char *chip_name) struct amdgpu_device *adev = psp->adev; int err; - err = amdgpu_ucode_request(adev, &adev->psp.ta_fw, "amdgpu/%s_ta.bin", chip_name); + if (amdgpu_is_kicker_fw(adev)) + err = amdgpu_ucode_request(adev, &adev->psp.ta_fw, "amdgpu/%s_ta_kicker.bin", chip_name); + else + err = amdgpu_ucode_request(adev, &adev->psp.ta_fw, "amdgpu/%s_ta.bin", chip_name); if (err) return err; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h index f93f510022018..9af2cda676ad7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h @@ -126,6 +126,22 @@ struct amdgpu_fence_driver { struct dma_fence **fences; }; +/* + * Fences mark an event in the GPUs pipeline and are used + * for GPU/CPU synchronization. When the fence is written, + * it is expected that all buffers associated with that fence + * are no longer in use by the associated ring on the GPU and + * that the relevant GPU caches have been flushed. + */ + +struct amdgpu_fence { + struct dma_fence base; + + /* RB, DMA, etc. */ + struct amdgpu_ring *ring; + ktime_t start_timestamp; +}; + extern const struct drm_sched_backend_ops amdgpu_sched_ops; void amdgpu_fence_driver_clear_job_fences(struct amdgpu_ring *ring); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.c index e22cb2b5cd926..dba8051b8c14b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.c @@ -133,7 +133,7 @@ void amdgpu_seq64_unmap(struct amdgpu_device *adev, struct amdgpu_fpriv *fpriv) vm = &fpriv->vm; - drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0); + drm_exec_init(&exec, 0, 0); drm_exec_until_all_locked(&exec) { r = amdgpu_vm_lock_pd(vm, &exec, 0); if (likely(!r)) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c index 4c7b53648a507..eb83d7c1f784c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c @@ -30,6 +30,10 @@ #define AMDGPU_UCODE_NAME_MAX (128) +static const struct kicker_device kicker_device_list[] = { + {0x744B, 0x00}, +}; + static void amdgpu_ucode_print_common_hdr(const struct common_firmware_header *hdr) { DRM_DEBUG("size_bytes: %u\n", le32_to_cpu(hdr->size_bytes)); @@ -1383,6 +1387,19 @@ static const char *amdgpu_ucode_legacy_naming(struct amdgpu_device *adev, int bl return NULL; } +bool amdgpu_is_kicker_fw(struct amdgpu_device *adev) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(kicker_device_list); i++) { + if (adev->pdev->device == kicker_device_list[i].device && + adev->pdev->revision == kicker_device_list[i].revision) + return true; + } + + return false; +} + void amdgpu_ucode_ip_version_decode(struct amdgpu_device *adev, int block_type, char *ucode_prefix, int len) { int maj, min, rev; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h index 4e23419b92d4e..fd08b015b2a7a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h @@ -595,6 +595,11 @@ struct amdgpu_firmware { uint64_t fw_buf_mc; }; +struct kicker_device{ + unsigned short device; + u8 revision; +}; + void amdgpu_ucode_print_mc_hdr(const struct common_firmware_header *hdr); void amdgpu_ucode_print_smc_hdr(const struct common_firmware_header *hdr); void amdgpu_ucode_print_imu_hdr(const struct common_firmware_header *hdr); @@ -622,5 +627,6 @@ amdgpu_ucode_get_load_type(struct amdgpu_device *adev, int load_type); const char *amdgpu_ucode_name(enum AMDGPU_UCODE_ID ucode_id); void amdgpu_ucode_ip_version_decode(struct amdgpu_device *adev, int block_type, char *ucode_prefix, int len); +bool amdgpu_is_kicker_fw(struct amdgpu_device *adev); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c index b6397d3229e1b..01dccd489a805 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c @@ -1010,6 +1010,7 @@ u32 amdgpu_virt_rlcg_reg_rw(struct amdgpu_device *adev, u32 offset, u32 v, u32 f void *scratch_reg2; void *scratch_reg3; void *spare_int; + unsigned long flags; if (!adev->gfx.rlc.rlcg_reg_access_supported) { dev_err(adev->dev, @@ -1031,7 +1032,7 @@ u32 amdgpu_virt_rlcg_reg_rw(struct amdgpu_device *adev, u32 offset, u32 v, u32 f scratch_reg2 = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->scratch_reg2; scratch_reg3 = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->scratch_reg3; - mutex_lock(&adev->virt.rlcg_reg_lock); + spin_lock_irqsave(&adev->virt.rlcg_reg_lock, flags); if (reg_access_ctrl->spare_int) spare_int = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->spare_int; @@ -1090,7 +1091,7 @@ u32 amdgpu_virt_rlcg_reg_rw(struct amdgpu_device *adev, u32 offset, u32 v, u32 f ret = readl(scratch_reg0); - mutex_unlock(&adev->virt.rlcg_reg_lock); + spin_unlock_irqrestore(&adev->virt.rlcg_reg_lock, flags); return ret; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h index b650a2032c42b..6a2087abfb7e4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h @@ -275,7 +275,8 @@ struct amdgpu_virt { /* the ucode id to signal the autoload */ uint32_t autoload_ucode_id; - struct mutex rlcg_reg_lock; + /* Spinlock to protect access to the RLCG register interface */ + spinlock_t rlcg_reg_lock; }; struct amdgpu_video_codec_info; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c index ff5e52025266c..8f58ec6f14009 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c @@ -463,7 +463,7 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man, int r; lpfn = (u64)place->lpfn << PAGE_SHIFT; - if (!lpfn) + if (!lpfn || lpfn > man->size) lpfn = man->size; fpfn = (u64)place->fpfn << PAGE_SHIFT; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c index 1f06b22dbe7c6..96e5c520af316 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c @@ -84,6 +84,7 @@ MODULE_FIRMWARE("amdgpu/gc_11_0_0_pfp.bin"); MODULE_FIRMWARE("amdgpu/gc_11_0_0_me.bin"); MODULE_FIRMWARE("amdgpu/gc_11_0_0_mec.bin"); MODULE_FIRMWARE("amdgpu/gc_11_0_0_rlc.bin"); +MODULE_FIRMWARE("amdgpu/gc_11_0_0_rlc_kicker.bin"); MODULE_FIRMWARE("amdgpu/gc_11_0_0_rlc_1.bin"); MODULE_FIRMWARE("amdgpu/gc_11_0_0_toc.bin"); MODULE_FIRMWARE("amdgpu/gc_11_0_1_pfp.bin"); @@ -734,6 +735,9 @@ static int gfx_v11_0_init_microcode(struct amdgpu_device *adev) adev->pdev->revision == 0xCE) err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw, "amdgpu/gc_11_0_0_rlc_1.bin"); + else if (amdgpu_is_kicker_fw(adev)) + err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw, + "amdgpu/%s_rlc_kicker.bin", ucode_prefix); else err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw, "amdgpu/%s_rlc.bin", ucode_prefix); diff --git a/drivers/gpu/drm/amd/amdgpu/imu_v11_0.c b/drivers/gpu/drm/amd/amdgpu/imu_v11_0.c index d4f72e47ae9e2..c4f5cbf1ecd7d 100644 --- a/drivers/gpu/drm/amd/amdgpu/imu_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/imu_v11_0.c @@ -32,6 +32,7 @@ #include "gc/gc_11_0_0_sh_mask.h" MODULE_FIRMWARE("amdgpu/gc_11_0_0_imu.bin"); +MODULE_FIRMWARE("amdgpu/gc_11_0_0_imu_kicker.bin"); MODULE_FIRMWARE("amdgpu/gc_11_0_1_imu.bin"); MODULE_FIRMWARE("amdgpu/gc_11_0_2_imu.bin"); MODULE_FIRMWARE("amdgpu/gc_11_0_3_imu.bin"); @@ -50,7 +51,10 @@ static int imu_v11_0_init_microcode(struct amdgpu_device *adev) DRM_DEBUG("\n"); amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix, sizeof(ucode_prefix)); - err = amdgpu_ucode_request(adev, &adev->gfx.imu_fw, "amdgpu/%s_imu.bin", ucode_prefix); + if (amdgpu_is_kicker_fw(adev)) + err = amdgpu_ucode_request(adev, &adev->gfx.imu_fw, "amdgpu/%s_imu_kicker.bin", ucode_prefix); + else + err = amdgpu_ucode_request(adev, &adev->gfx.imu_fw, "amdgpu/%s_imu.bin", ucode_prefix); if (err) goto out; diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c index bf00de763acb0..124f74e862d7f 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c @@ -42,7 +42,9 @@ MODULE_FIRMWARE("amdgpu/psp_13_0_5_ta.bin"); MODULE_FIRMWARE("amdgpu/psp_13_0_8_toc.bin"); MODULE_FIRMWARE("amdgpu/psp_13_0_8_ta.bin"); MODULE_FIRMWARE("amdgpu/psp_13_0_0_sos.bin"); +MODULE_FIRMWARE("amdgpu/psp_13_0_0_sos_kicker.bin"); MODULE_FIRMWARE("amdgpu/psp_13_0_0_ta.bin"); +MODULE_FIRMWARE("amdgpu/psp_13_0_0_ta_kicker.bin"); MODULE_FIRMWARE("amdgpu/psp_13_0_7_sos.bin"); MODULE_FIRMWARE("amdgpu/psp_13_0_7_ta.bin"); MODULE_FIRMWARE("amdgpu/psp_13_0_10_sos.bin"); diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c index 4dd86c682ee6a..1e4ce06f5f2c3 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c @@ -485,7 +485,7 @@ static void sdma_v4_4_2_inst_gfx_stop(struct amdgpu_device *adev, { struct amdgpu_ring *sdma[AMDGPU_MAX_SDMA_INSTANCES]; u32 doorbell_offset, doorbell; - u32 rb_cntl, ib_cntl; + u32 rb_cntl, ib_cntl, sdma_cntl; int i; for_each_inst(i, inst_mask) { @@ -497,6 +497,9 @@ static void sdma_v4_4_2_inst_gfx_stop(struct amdgpu_device *adev, ib_cntl = RREG32_SDMA(i, regSDMA_GFX_IB_CNTL); ib_cntl = REG_SET_FIELD(ib_cntl, SDMA_GFX_IB_CNTL, IB_ENABLE, 0); WREG32_SDMA(i, regSDMA_GFX_IB_CNTL, ib_cntl); + sdma_cntl = RREG32_SDMA(i, regSDMA_CNTL); + sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA_CNTL, UTC_L1_ENABLE, 0); + WREG32_SDMA(i, regSDMA_CNTL, sdma_cntl); if (sdma[i]->use_doorbell) { doorbell = RREG32_SDMA(i, regSDMA_GFX_DOORBELL); @@ -953,6 +956,7 @@ static int sdma_v4_4_2_inst_start(struct amdgpu_device *adev, /* set utc l1 enable flag always to 1 */ temp = RREG32_SDMA(i, regSDMA_CNTL); temp = REG_SET_FIELD(temp, SDMA_CNTL, UTC_L1_ENABLE, 1); + WREG32_SDMA(i, regSDMA_CNTL, temp); if (amdgpu_ip_version(adev, SDMA0_HWIP, 0) < IP_VERSION(4, 4, 5)) { /* enable context empty interrupt during initialization */ diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c index e0b02bf1c5639..3d114ea7049f7 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c @@ -985,6 +985,10 @@ static int vcn_v4_0_5_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, b ring->doorbell_index << VCN_RB1_DB_CTRL__OFFSET__SHIFT | VCN_RB1_DB_CTRL__EN_MASK); + /* Keeping one read-back to ensure all register writes are done, otherwise + * it may introduce race conditions */ + RREG32_SOC15(VCN, inst_idx, regVCN_RB1_DB_CTRL); + return 0; } @@ -1167,6 +1171,10 @@ static int vcn_v4_0_5_start(struct amdgpu_device *adev) tmp |= VCN_RB_ENABLE__RB1_EN_MASK; WREG32_SOC15(VCN, i, regVCN_RB_ENABLE, tmp); fw_shared->sq.queue_mode &= ~(FW_QUEUE_RING_RESET | FW_QUEUE_DPG_HOLD_OFF); + + /* Keeping one read-back to ensure all register writes are done, otherwise + * it may introduce race conditions */ + RREG32_SOC15(VCN, i, regVCN_RB_ENABLE); } return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.c b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.c new file mode 100644 index 0000000000000..cdefd7fcb0da6 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.c @@ -0,0 +1,1624 @@ +/* + * Copyright 2024 Advanced Micro Devices, Inc. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include +#include "amdgpu.h" +#include "amdgpu_vcn.h" +#include "amdgpu_pm.h" +#include "soc15.h" +#include "soc15d.h" +#include "soc15_hw_ip.h" +#include "vcn_v2_0.h" +#include "vcn_v4_0_3.h" +#include "mmsch_v5_0.h" + +#include "vcn/vcn_5_0_0_offset.h" +#include "vcn/vcn_5_0_0_sh_mask.h" +#include "ivsrcid/vcn/irqsrcs_vcn_5_0.h" +#include "vcn_v5_0_0.h" +#include "vcn_v5_0_1.h" + +#include + +static int vcn_v5_0_1_start_sriov(struct amdgpu_device *adev); +static void vcn_v5_0_1_set_unified_ring_funcs(struct amdgpu_device *adev); +static void vcn_v5_0_1_set_irq_funcs(struct amdgpu_device *adev); +static int vcn_v5_0_1_set_pg_state(struct amdgpu_vcn_inst *vinst, + enum amd_powergating_state state); +static void vcn_v5_0_1_unified_ring_set_wptr(struct amdgpu_ring *ring); +static void vcn_v5_0_1_set_ras_funcs(struct amdgpu_device *adev); +/** + * vcn_v5_0_1_early_init - set function pointers and load microcode + * + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. + * + * Set ring and irq function pointers + * Load microcode from filesystem + */ +static int vcn_v5_0_1_early_init(struct amdgpu_ip_block *ip_block) +{ + struct amdgpu_device *adev = ip_block->adev; + int i, r; + + for (i = 0; i < adev->vcn.num_vcn_inst; ++i) + /* re-use enc ring as unified ring */ + adev->vcn.inst[i].num_enc_rings = 1; + + vcn_v5_0_1_set_unified_ring_funcs(adev); + vcn_v5_0_1_set_irq_funcs(adev); + vcn_v5_0_1_set_ras_funcs(adev); + + for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { + adev->vcn.inst[i].set_pg_state = vcn_v5_0_1_set_pg_state; + + r = amdgpu_vcn_early_init(adev, i); + if (r) + return r; + } + + return 0; +} + +static void vcn_v5_0_1_fw_shared_init(struct amdgpu_device *adev, int inst_idx) +{ + struct amdgpu_vcn5_fw_shared *fw_shared; + + fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr; + + if (fw_shared->sq.is_enabled) + return; + fw_shared->present_flag_0 = + cpu_to_le32(AMDGPU_FW_SHARED_FLAG_0_UNIFIED_QUEUE); + fw_shared->sq.is_enabled = 1; + + if (amdgpu_vcnfw_log) + amdgpu_vcn_fwlog_init(&adev->vcn.inst[inst_idx]); +} + +/** + * vcn_v5_0_1_sw_init - sw init for VCN block + * + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. + * + * Load firmware and sw initialization + */ +static int vcn_v5_0_1_sw_init(struct amdgpu_ip_block *ip_block) +{ + struct amdgpu_device *adev = ip_block->adev; + struct amdgpu_ring *ring; + int i, r, vcn_inst; + + /* VCN UNIFIED TRAP */ + r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, + VCN_5_0__SRCID__UVD_ENC_GENERAL_PURPOSE, &adev->vcn.inst->irq); + if (r) + return r; + + /* VCN POISON TRAP */ + r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, + VCN_5_0__SRCID_UVD_POISON, &adev->vcn.inst->ras_poison_irq); + + for (i = 0; i < adev->vcn.num_vcn_inst; i++) { + vcn_inst = GET_INST(VCN, i); + + r = amdgpu_vcn_sw_init(adev, i); + if (r) + return r; + + amdgpu_vcn_setup_ucode(adev, i); + + r = amdgpu_vcn_resume(adev, i); + if (r) + return r; + + ring = &adev->vcn.inst[i].ring_enc[0]; + ring->use_doorbell = true; + if (!amdgpu_sriov_vf(adev)) + ring->doorbell_index = + (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + + 11 * vcn_inst; + else + ring->doorbell_index = + (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + + 32 * vcn_inst; + + ring->vm_hub = AMDGPU_MMHUB0(adev->vcn.inst[i].aid_id); + sprintf(ring->name, "vcn_unified_%d", adev->vcn.inst[i].aid_id); + + r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[i].irq, 0, + AMDGPU_RING_PRIO_DEFAULT, &adev->vcn.inst[i].sched_score); + if (r) + return r; + + vcn_v5_0_1_fw_shared_init(adev, i); + } + + /* TODO: Add queue reset mask when FW fully supports it */ + adev->vcn.supported_reset = + amdgpu_get_soft_full_reset_mask(&adev->vcn.inst[0].ring_enc[0]); + + if (amdgpu_sriov_vf(adev)) { + r = amdgpu_virt_alloc_mm_table(adev); + if (r) + return r; + } + + vcn_v5_0_0_alloc_ip_dump(adev); + + return amdgpu_vcn_sysfs_reset_mask_init(adev); +} + +/** + * vcn_v5_0_1_sw_fini - sw fini for VCN block + * + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. + * + * VCN suspend and free up sw allocation + */ +static int vcn_v5_0_1_sw_fini(struct amdgpu_ip_block *ip_block) +{ + struct amdgpu_device *adev = ip_block->adev; + int i, r, idx; + + if (drm_dev_enter(adev_to_drm(adev), &idx)) { + for (i = 0; i < adev->vcn.num_vcn_inst; i++) { + volatile struct amdgpu_vcn5_fw_shared *fw_shared; + + fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr; + fw_shared->present_flag_0 = 0; + fw_shared->sq.is_enabled = 0; + } + + drm_dev_exit(idx); + } + + if (amdgpu_sriov_vf(adev)) + amdgpu_virt_free_mm_table(adev); + + for (i = 0; i < adev->vcn.num_vcn_inst; i++) { + r = amdgpu_vcn_suspend(adev, i); + if (r) + return r; + } + + for (i = 0; i < adev->vcn.num_vcn_inst; i++) { + r = amdgpu_vcn_sw_fini(adev, i); + if (r) + return r; + } + + amdgpu_vcn_sysfs_reset_mask_fini(adev); + + kfree(adev->vcn.ip_dump); + + return 0; +} + +/** + * vcn_v5_0_1_hw_init - start and test VCN block + * + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. + * + * Initialize the hardware, boot up the VCPU and do some testing + */ +static int vcn_v5_0_1_hw_init(struct amdgpu_ip_block *ip_block) +{ + struct amdgpu_device *adev = ip_block->adev; + struct amdgpu_ring *ring; + int i, r, vcn_inst; + + if (amdgpu_sriov_vf(adev)) { + r = vcn_v5_0_1_start_sriov(adev); + if (r) + return r; + + for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { + ring = &adev->vcn.inst[i].ring_enc[0]; + ring->wptr = 0; + ring->wptr_old = 0; + vcn_v5_0_1_unified_ring_set_wptr(ring); + ring->sched.ready = true; + } + } else { + if (RREG32_SOC15(VCN, GET_INST(VCN, 0), regVCN_RRMT_CNTL) & 0x100) + adev->vcn.caps |= AMDGPU_VCN_CAPS(RRMT_ENABLED); + for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { + vcn_inst = GET_INST(VCN, i); + ring = &adev->vcn.inst[i].ring_enc[0]; + + if (ring->use_doorbell) + adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell, + ((adev->doorbell_index.vcn.vcn_ring0_1 << 1) + + 11 * vcn_inst), + adev->vcn.inst[i].aid_id); + + /* Re-init fw_shared, if required */ + vcn_v5_0_1_fw_shared_init(adev, i); + + r = amdgpu_ring_test_helper(ring); + if (r) + return r; + } + } + + return 0; +} + +/** + * vcn_v5_0_1_hw_fini - stop the hardware block + * + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. + * + * Stop the VCN block, mark ring as not ready any more + */ +static int vcn_v5_0_1_hw_fini(struct amdgpu_ip_block *ip_block) +{ + struct amdgpu_device *adev = ip_block->adev; + int i; + + for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { + struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[i]; + + cancel_delayed_work_sync(&adev->vcn.inst[i].idle_work); + if (vinst->cur_state != AMD_PG_STATE_GATE) + vinst->set_pg_state(vinst, AMD_PG_STATE_GATE); + } + + if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__VCN)) + amdgpu_irq_put(adev, &adev->vcn.inst->ras_poison_irq, 0); + + return 0; +} + +/** + * vcn_v5_0_1_suspend - suspend VCN block + * + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. + * + * HW fini and suspend VCN block + */ +static int vcn_v5_0_1_suspend(struct amdgpu_ip_block *ip_block) +{ + struct amdgpu_device *adev = ip_block->adev; + int r, i; + + r = vcn_v5_0_1_hw_fini(ip_block); + if (r) + return r; + + for (i = 0; i < adev->vcn.num_vcn_inst; i++) { + r = amdgpu_vcn_suspend(ip_block->adev, i); + if (r) + return r; + } + + return r; +} + +/** + * vcn_v5_0_1_resume - resume VCN block + * + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. + * + * Resume firmware and hw init VCN block + */ +static int vcn_v5_0_1_resume(struct amdgpu_ip_block *ip_block) +{ + struct amdgpu_device *adev = ip_block->adev; + int r, i; + + for (i = 0; i < adev->vcn.num_vcn_inst; i++) { + struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[i]; + + if (amdgpu_in_reset(adev)) + vinst->cur_state = AMD_PG_STATE_GATE; + + r = amdgpu_vcn_resume(ip_block->adev, i); + if (r) + return r; + } + + r = vcn_v5_0_1_hw_init(ip_block); + + return r; +} + +/** + * vcn_v5_0_1_mc_resume - memory controller programming + * + * @vinst: VCN instance + * + * Let the VCN memory controller know it's offsets + */ +static void vcn_v5_0_1_mc_resume(struct amdgpu_vcn_inst *vinst) +{ + struct amdgpu_device *adev = vinst->adev; + int inst = vinst->inst; + uint32_t offset, size, vcn_inst; + const struct common_firmware_header *hdr; + + hdr = (const struct common_firmware_header *)adev->vcn.inst[inst].fw->data; + size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8); + + vcn_inst = GET_INST(VCN, inst); + /* cache window 0: fw */ + if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { + WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW, + (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst].tmr_mc_addr_lo)); + WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH, + (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst].tmr_mc_addr_hi)); + WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_CACHE_OFFSET0, 0); + offset = 0; + } else { + WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW, + lower_32_bits(adev->vcn.inst[inst].gpu_addr)); + WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH, + upper_32_bits(adev->vcn.inst[inst].gpu_addr)); + offset = size; + WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_CACHE_OFFSET0, + AMDGPU_UVD_FIRMWARE_OFFSET >> 3); + } + WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_CACHE_SIZE0, size); + + /* cache window 1: stack */ + WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW, + lower_32_bits(adev->vcn.inst[inst].gpu_addr + offset)); + WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH, + upper_32_bits(adev->vcn.inst[inst].gpu_addr + offset)); + WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_CACHE_OFFSET1, 0); + WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_STACK_SIZE); + + /* cache window 2: context */ + WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW, + lower_32_bits(adev->vcn.inst[inst].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE)); + WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH, + upper_32_bits(adev->vcn.inst[inst].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE)); + WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_CACHE_OFFSET2, 0); + WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE); + + /* non-cache window */ + WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_VCPU_NC0_64BIT_BAR_LOW, + lower_32_bits(adev->vcn.inst[inst].fw_shared.gpu_addr)); + WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH, + upper_32_bits(adev->vcn.inst[inst].fw_shared.gpu_addr)); + WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_NONCACHE_OFFSET0, 0); + WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_NONCACHE_SIZE0, + AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_vcn5_fw_shared))); +} + +/** + * vcn_v5_0_1_mc_resume_dpg_mode - memory controller programming for dpg mode + * + * @vinst: VCN instance + * @indirect: indirectly write sram + * + * Let the VCN memory controller know it's offsets with dpg mode + */ +static void vcn_v5_0_1_mc_resume_dpg_mode(struct amdgpu_vcn_inst *vinst, + bool indirect) +{ + struct amdgpu_device *adev = vinst->adev; + int inst_idx = vinst->inst; + uint32_t offset, size; + const struct common_firmware_header *hdr; + + hdr = (const struct common_firmware_header *)adev->vcn.inst[inst_idx].fw->data; + size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8); + + /* cache window 0: fw */ + if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { + if (!indirect) { + WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( + VCN, 0, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), + (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + + inst_idx].tmr_mc_addr_lo), 0, indirect); + WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( + VCN, 0, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), + (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + + inst_idx].tmr_mc_addr_hi), 0, indirect); + WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( + VCN, 0, regUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect); + } else { + WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( + VCN, 0, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), 0, 0, indirect); + WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( + VCN, 0, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), 0, 0, indirect); + WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( + VCN, 0, regUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect); + } + offset = 0; + } else { + WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( + VCN, 0, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), + lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect); + WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( + VCN, 0, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), + upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect); + offset = size; + WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( + VCN, 0, regUVD_VCPU_CACHE_OFFSET0), + AMDGPU_UVD_FIRMWARE_OFFSET >> 3, 0, indirect); + } + + if (!indirect) + WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( + VCN, 0, regUVD_VCPU_CACHE_SIZE0), size, 0, indirect); + else + WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( + VCN, 0, regUVD_VCPU_CACHE_SIZE0), 0, 0, indirect); + + /* cache window 1: stack */ + if (!indirect) { + WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( + VCN, 0, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), + lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect); + WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( + VCN, 0, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), + upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect); + WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( + VCN, 0, regUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect); + } else { + WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( + VCN, 0, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), 0, 0, indirect); + WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( + VCN, 0, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), 0, 0, indirect); + WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( + VCN, 0, regUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect); + } + WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( + VCN, 0, regUVD_VCPU_CACHE_SIZE1), AMDGPU_VCN_STACK_SIZE, 0, indirect); + + /* cache window 2: context */ + WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( + VCN, 0, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW), + lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset + + AMDGPU_VCN_STACK_SIZE), 0, indirect); + WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( + VCN, 0, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH), + upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset + + AMDGPU_VCN_STACK_SIZE), 0, indirect); + WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( + VCN, 0, regUVD_VCPU_CACHE_OFFSET2), 0, 0, indirect); + WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( + VCN, 0, regUVD_VCPU_CACHE_SIZE2), AMDGPU_VCN_CONTEXT_SIZE, 0, indirect); + + /* non-cache window */ + WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( + VCN, 0, regUVD_LMI_VCPU_NC0_64BIT_BAR_LOW), + lower_32_bits(adev->vcn.inst[inst_idx].fw_shared.gpu_addr), 0, indirect); + WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( + VCN, 0, regUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH), + upper_32_bits(adev->vcn.inst[inst_idx].fw_shared.gpu_addr), 0, indirect); + WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( + VCN, 0, regUVD_VCPU_NONCACHE_OFFSET0), 0, 0, indirect); + WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( + VCN, 0, regUVD_VCPU_NONCACHE_SIZE0), + AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_vcn5_fw_shared)), 0, indirect); + + /* VCN global tiling registers */ + WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( + VCN, 0, regUVD_GFX10_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect); +} + +/** + * vcn_v5_0_1_disable_clock_gating - disable VCN clock gating + * + * @vinst: VCN instance + * + * Disable clock gating for VCN block + */ +static void vcn_v5_0_1_disable_clock_gating(struct amdgpu_vcn_inst *vinst) +{ +} + +/** + * vcn_v5_0_1_enable_clock_gating - enable VCN clock gating + * + * @vinst: VCN instance + * + * Enable clock gating for VCN block + */ +static void vcn_v5_0_1_enable_clock_gating(struct amdgpu_vcn_inst *vinst) +{ +} + +/** + * vcn_v5_0_1_pause_dpg_mode - VCN pause with dpg mode + * + * @vinst: VCN instance + * @new_state: pause state + * + * Pause dpg mode for VCN block + */ +static int vcn_v5_0_1_pause_dpg_mode(struct amdgpu_vcn_inst *vinst, + struct dpg_pause_state *new_state) +{ + struct amdgpu_device *adev = vinst->adev; + uint32_t reg_data = 0; + int vcn_inst; + + vcn_inst = GET_INST(VCN, vinst->inst); + + /* pause/unpause if state is changed */ + if (vinst->pause_state.fw_based != new_state->fw_based) { + DRM_DEV_DEBUG(adev->dev, "dpg pause state changed %d -> %d %s\n", + vinst->pause_state.fw_based, new_state->fw_based, + new_state->fw_based ? "VCN_DPG_STATE__PAUSE" : "VCN_DPG_STATE__UNPAUSE"); + reg_data = RREG32_SOC15(VCN, vcn_inst, regUVD_DPG_PAUSE) & + (~UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK); + + if (new_state->fw_based == VCN_DPG_STATE__PAUSE) { + /* pause DPG */ + reg_data |= UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK; + WREG32_SOC15(VCN, vcn_inst, regUVD_DPG_PAUSE, reg_data); + + /* wait for ACK */ + SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_DPG_PAUSE, + UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK, + UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK); + } else { + /* unpause DPG, no need to wait */ + reg_data &= ~UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK; + WREG32_SOC15(VCN, vcn_inst, regUVD_DPG_PAUSE, reg_data); + } + vinst->pause_state.fw_based = new_state->fw_based; + } + + return 0; +} + + +/** + * vcn_v5_0_1_start_dpg_mode - VCN start with dpg mode + * + * @vinst: VCN instance + * @indirect: indirectly write sram + * + * Start VCN block with dpg mode + */ +static int vcn_v5_0_1_start_dpg_mode(struct amdgpu_vcn_inst *vinst, + bool indirect) +{ + struct amdgpu_device *adev = vinst->adev; + int inst_idx = vinst->inst; + volatile struct amdgpu_vcn5_fw_shared *fw_shared = + adev->vcn.inst[inst_idx].fw_shared.cpu_addr; + struct amdgpu_ring *ring; + struct dpg_pause_state state = {.fw_based = VCN_DPG_STATE__PAUSE}; + int vcn_inst; + uint32_t tmp; + + vcn_inst = GET_INST(VCN, inst_idx); + + /* disable register anti-hang mechanism */ + WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_POWER_STATUS), 1, + ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK); + + /* enable dynamic power gating mode */ + tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_POWER_STATUS); + tmp |= UVD_POWER_STATUS__UVD_PG_MODE_MASK; + WREG32_SOC15(VCN, vcn_inst, regUVD_POWER_STATUS, tmp); + + if (indirect) { + adev->vcn.inst[inst_idx].dpg_sram_curr_addr = + (uint32_t *)adev->vcn.inst[inst_idx].dpg_sram_cpu_addr; + /* Use dummy register 0xDEADBEEF passing AID selection to PSP FW */ + WREG32_SOC24_DPG_MODE(inst_idx, 0xDEADBEEF, + adev->vcn.inst[inst_idx].aid_id, 0, true); + } + + /* enable VCPU clock */ + tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT); + tmp |= UVD_VCPU_CNTL__CLK_EN_MASK | UVD_VCPU_CNTL__BLK_RST_MASK; + WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( + VCN, 0, regUVD_VCPU_CNTL), tmp, 0, indirect); + + /* disable master interrupt */ + WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( + VCN, 0, regUVD_MASTINT_EN), 0, 0, indirect); + + /* setup regUVD_LMI_CTRL */ + tmp = (UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK | + UVD_LMI_CTRL__REQ_MODE_MASK | + UVD_LMI_CTRL__CRC_RESET_MASK | + UVD_LMI_CTRL__MASK_MC_URGENT_MASK | + UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK | + UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK | + (8 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) | + 0x00100000L); + WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( + VCN, 0, regUVD_LMI_CTRL), tmp, 0, indirect); + + vcn_v5_0_1_mc_resume_dpg_mode(vinst, indirect); + + tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT); + tmp |= UVD_VCPU_CNTL__CLK_EN_MASK; + WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( + VCN, 0, regUVD_VCPU_CNTL), tmp, 0, indirect); + + /* enable LMI MC and UMC channels */ + tmp = 0x1f << UVD_LMI_CTRL2__RE_OFLD_MIF_WR_REQ_NUM__SHIFT; + WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( + VCN, 0, regUVD_LMI_CTRL2), tmp, 0, indirect); + + /* enable master interrupt */ + WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( + VCN, 0, regUVD_MASTINT_EN), + UVD_MASTINT_EN__VCPU_EN_MASK, 0, indirect); + + if (indirect) + amdgpu_vcn_psp_update_sram(adev, inst_idx, AMDGPU_UCODE_ID_VCN0_RAM); + + /* resetting ring, fw should not check RB ring */ + fw_shared->sq.queue_mode |= FW_QUEUE_RING_RESET; + + /* Pause dpg */ + vcn_v5_0_1_pause_dpg_mode(vinst, &state); + + ring = &adev->vcn.inst[inst_idx].ring_enc[0]; + + WREG32_SOC15(VCN, vcn_inst, regUVD_RB_BASE_LO, lower_32_bits(ring->gpu_addr)); + WREG32_SOC15(VCN, vcn_inst, regUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr)); + WREG32_SOC15(VCN, vcn_inst, regUVD_RB_SIZE, ring->ring_size / sizeof(uint32_t)); + + tmp = RREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE); + tmp &= ~(VCN_RB_ENABLE__RB1_EN_MASK); + WREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE, tmp); + + WREG32_SOC15(VCN, vcn_inst, regUVD_RB_RPTR, 0); + WREG32_SOC15(VCN, vcn_inst, regUVD_RB_WPTR, 0); + + tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_RB_RPTR); + WREG32_SOC15(VCN, vcn_inst, regUVD_RB_WPTR, tmp); + ring->wptr = RREG32_SOC15(VCN, vcn_inst, regUVD_RB_WPTR); + + tmp = RREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE); + tmp |= VCN_RB_ENABLE__RB1_EN_MASK; + WREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE, tmp); + /* resetting done, fw can check RB ring */ + fw_shared->sq.queue_mode &= ~(FW_QUEUE_RING_RESET | FW_QUEUE_DPG_HOLD_OFF); + + WREG32_SOC15(VCN, vcn_inst, regVCN_RB1_DB_CTRL, + ring->doorbell_index << VCN_RB1_DB_CTRL__OFFSET__SHIFT | + VCN_RB1_DB_CTRL__EN_MASK); + /* Read DB_CTRL to flush the write DB_CTRL command. */ + RREG32_SOC15(VCN, vcn_inst, regVCN_RB1_DB_CTRL); + + return 0; +} + +static int vcn_v5_0_1_start_sriov(struct amdgpu_device *adev) +{ + int i, vcn_inst; + struct amdgpu_ring *ring_enc; + uint64_t cache_addr; + uint64_t rb_enc_addr; + uint64_t ctx_addr; + uint32_t param, resp, expected; + uint32_t offset, cache_size; + uint32_t tmp, timeout; + + struct amdgpu_mm_table *table = &adev->virt.mm_table; + uint32_t *table_loc; + uint32_t table_size; + uint32_t size, size_dw; + uint32_t init_status; + uint32_t enabled_vcn; + + struct mmsch_v5_0_cmd_direct_write + direct_wt = { {0} }; + struct mmsch_v5_0_cmd_direct_read_modify_write + direct_rd_mod_wt = { {0} }; + struct mmsch_v5_0_cmd_end end = { {0} }; + struct mmsch_v5_0_init_header header; + + volatile struct amdgpu_vcn5_fw_shared *fw_shared; + volatile struct amdgpu_fw_shared_rb_setup *rb_setup; + + direct_wt.cmd_header.command_type = + MMSCH_COMMAND__DIRECT_REG_WRITE; + direct_rd_mod_wt.cmd_header.command_type = + MMSCH_COMMAND__DIRECT_REG_READ_MODIFY_WRITE; + end.cmd_header.command_type = MMSCH_COMMAND__END; + + for (i = 0; i < adev->vcn.num_vcn_inst; i++) { + vcn_inst = GET_INST(VCN, i); + + vcn_v5_0_1_fw_shared_init(adev, vcn_inst); + + memset(&header, 0, sizeof(struct mmsch_v5_0_init_header)); + header.version = MMSCH_VERSION; + header.total_size = sizeof(struct mmsch_v5_0_init_header) >> 2; + + table_loc = (uint32_t *)table->cpu_addr; + table_loc += header.total_size; + + table_size = 0; + + MMSCH_V5_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(VCN, 0, regUVD_STATUS), + ~UVD_STATUS__UVD_BUSY, UVD_STATUS__UVD_BUSY); + + cache_size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.inst[i].fw->size + 4); + + if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { + MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0, + regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), + adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_lo); + + MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0, + regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), + adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_hi); + + offset = 0; + MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0, + regUVD_VCPU_CACHE_OFFSET0), 0); + } else { + MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0, + regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), + lower_32_bits(adev->vcn.inst[i].gpu_addr)); + MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0, + regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), + upper_32_bits(adev->vcn.inst[i].gpu_addr)); + offset = cache_size; + MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0, + regUVD_VCPU_CACHE_OFFSET0), + AMDGPU_UVD_FIRMWARE_OFFSET >> 3); + } + + MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0, + regUVD_VCPU_CACHE_SIZE0), + cache_size); + + cache_addr = adev->vcn.inst[vcn_inst].gpu_addr + offset; + MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0, + regUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), lower_32_bits(cache_addr)); + MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0, + regUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), upper_32_bits(cache_addr)); + MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0, + regUVD_VCPU_CACHE_OFFSET1), 0); + MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0, + regUVD_VCPU_CACHE_SIZE1), AMDGPU_VCN_STACK_SIZE); + + cache_addr = adev->vcn.inst[vcn_inst].gpu_addr + offset + + AMDGPU_VCN_STACK_SIZE; + + MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0, + regUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW), lower_32_bits(cache_addr)); + + MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0, + regUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH), upper_32_bits(cache_addr)); + + MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0, + regUVD_VCPU_CACHE_OFFSET2), 0); + + MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0, + regUVD_VCPU_CACHE_SIZE2), AMDGPU_VCN_CONTEXT_SIZE); + + fw_shared = adev->vcn.inst[vcn_inst].fw_shared.cpu_addr; + rb_setup = &fw_shared->rb_setup; + + ring_enc = &adev->vcn.inst[vcn_inst].ring_enc[0]; + ring_enc->wptr = 0; + rb_enc_addr = ring_enc->gpu_addr; + + rb_setup->is_rb_enabled_flags |= RB_ENABLED; + rb_setup->rb_addr_lo = lower_32_bits(rb_enc_addr); + rb_setup->rb_addr_hi = upper_32_bits(rb_enc_addr); + rb_setup->rb_size = ring_enc->ring_size / 4; + fw_shared->present_flag_0 |= cpu_to_le32(AMDGPU_VCN_VF_RB_SETUP_FLAG); + + MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0, + regUVD_LMI_VCPU_NC0_64BIT_BAR_LOW), + lower_32_bits(adev->vcn.inst[vcn_inst].fw_shared.gpu_addr)); + MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0, + regUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH), + upper_32_bits(adev->vcn.inst[vcn_inst].fw_shared.gpu_addr)); + MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0, + regUVD_VCPU_NONCACHE_SIZE0), + AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_vcn4_fw_shared))); + MMSCH_V5_0_INSERT_END(); + + header.vcn0.init_status = 0; + header.vcn0.table_offset = header.total_size; + header.vcn0.table_size = table_size; + header.total_size += table_size; + + /* Send init table to mmsch */ + size = sizeof(struct mmsch_v5_0_init_header); + table_loc = (uint32_t *)table->cpu_addr; + memcpy((void *)table_loc, &header, size); + + ctx_addr = table->gpu_addr; + WREG32_SOC15(VCN, vcn_inst, regMMSCH_VF_CTX_ADDR_LO, lower_32_bits(ctx_addr)); + WREG32_SOC15(VCN, vcn_inst, regMMSCH_VF_CTX_ADDR_HI, upper_32_bits(ctx_addr)); + + tmp = RREG32_SOC15(VCN, vcn_inst, regMMSCH_VF_VMID); + tmp &= ~MMSCH_VF_VMID__VF_CTX_VMID_MASK; + tmp |= (0 << MMSCH_VF_VMID__VF_CTX_VMID__SHIFT); + WREG32_SOC15(VCN, vcn_inst, regMMSCH_VF_VMID, tmp); + + size = header.total_size; + WREG32_SOC15(VCN, vcn_inst, regMMSCH_VF_CTX_SIZE, size); + + WREG32_SOC15(VCN, vcn_inst, regMMSCH_VF_MAILBOX_RESP, 0); + + param = 0x00000001; + WREG32_SOC15(VCN, vcn_inst, regMMSCH_VF_MAILBOX_HOST, param); + tmp = 0; + timeout = 1000; + resp = 0; + expected = MMSCH_VF_MAILBOX_RESP__OK; + while (resp != expected) { + resp = RREG32_SOC15(VCN, vcn_inst, regMMSCH_VF_MAILBOX_RESP); + if (resp != 0) + break; + + udelay(10); + tmp = tmp + 10; + if (tmp >= timeout) { + DRM_ERROR("failed to init MMSCH. TIME-OUT after %d usec"\ + " waiting for regMMSCH_VF_MAILBOX_RESP "\ + "(expected=0x%08x, readback=0x%08x)\n", + tmp, expected, resp); + return -EBUSY; + } + } + + enabled_vcn = amdgpu_vcn_is_disabled_vcn(adev, VCN_DECODE_RING, 0) ? 1 : 0; + init_status = ((struct mmsch_v5_0_init_header *)(table_loc))->vcn0.init_status; + if (resp != expected && resp != MMSCH_VF_MAILBOX_RESP__INCOMPLETE + && init_status != MMSCH_VF_ENGINE_STATUS__PASS) { + DRM_ERROR("MMSCH init status is incorrect! readback=0x%08x, header init "\ + "status for VCN%x: 0x%x\n", resp, enabled_vcn, init_status); + } + } + + return 0; +} + +/** + * vcn_v5_0_1_start - VCN start + * + * @vinst: VCN instance + * + * Start VCN block + */ +static int vcn_v5_0_1_start(struct amdgpu_vcn_inst *vinst) +{ + struct amdgpu_device *adev = vinst->adev; + int i = vinst->inst; + volatile struct amdgpu_vcn5_fw_shared *fw_shared; + struct amdgpu_ring *ring; + uint32_t tmp; + int j, k, r, vcn_inst; + + fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr; + + if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) + return vcn_v5_0_1_start_dpg_mode(vinst, adev->vcn.inst[i].indirect_sram); + + vcn_inst = GET_INST(VCN, i); + + /* set VCN status busy */ + tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_STATUS) | UVD_STATUS__UVD_BUSY; + WREG32_SOC15(VCN, vcn_inst, regUVD_STATUS, tmp); + + /* enable VCPU clock */ + WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL), + UVD_VCPU_CNTL__CLK_EN_MASK, ~UVD_VCPU_CNTL__CLK_EN_MASK); + + /* disable master interrupt */ + WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_MASTINT_EN), 0, + ~UVD_MASTINT_EN__VCPU_EN_MASK); + + /* enable LMI MC and UMC channels */ + WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_LMI_CTRL2), 0, + ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK); + + tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET); + tmp &= ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK; + tmp &= ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK; + WREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET, tmp); + + /* setup regUVD_LMI_CTRL */ + tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_LMI_CTRL); + WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_CTRL, tmp | + UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK | + UVD_LMI_CTRL__MASK_MC_URGENT_MASK | + UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK | + UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK); + + vcn_v5_0_1_mc_resume(vinst); + + /* VCN global tiling registers */ + WREG32_SOC15(VCN, vcn_inst, regUVD_GFX10_ADDR_CONFIG, + adev->gfx.config.gb_addr_config); + + /* unblock VCPU register access */ + WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_RB_ARB_CTRL), 0, + ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK); + + /* release VCPU reset to boot */ + WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL), 0, + ~UVD_VCPU_CNTL__BLK_RST_MASK); + + for (j = 0; j < 10; ++j) { + uint32_t status; + + for (k = 0; k < 100; ++k) { + status = RREG32_SOC15(VCN, vcn_inst, regUVD_STATUS); + if (status & 2) + break; + mdelay(100); + if (amdgpu_emu_mode == 1) + msleep(20); + } + + if (amdgpu_emu_mode == 1) { + r = -1; + if (status & 2) { + r = 0; + break; + } + } else { + r = 0; + if (status & 2) + break; + + dev_err(adev->dev, + "VCN[%d] is not responding, trying to reset the VCPU!!!\n", i); + WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL), + UVD_VCPU_CNTL__BLK_RST_MASK, + ~UVD_VCPU_CNTL__BLK_RST_MASK); + mdelay(10); + WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL), 0, + ~UVD_VCPU_CNTL__BLK_RST_MASK); + + mdelay(10); + r = -1; + } + } + + if (r) { + dev_err(adev->dev, "VCN[%d] is not responding, giving up!!!\n", i); + return r; + } + + /* enable master interrupt */ + WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_MASTINT_EN), + UVD_MASTINT_EN__VCPU_EN_MASK, + ~UVD_MASTINT_EN__VCPU_EN_MASK); + + /* clear the busy bit of VCN_STATUS */ + WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_STATUS), 0, + ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT)); + + ring = &adev->vcn.inst[i].ring_enc[0]; + + WREG32_SOC15(VCN, vcn_inst, regVCN_RB1_DB_CTRL, + ring->doorbell_index << VCN_RB1_DB_CTRL__OFFSET__SHIFT | + VCN_RB1_DB_CTRL__EN_MASK); + + /* Read DB_CTRL to flush the write DB_CTRL command. */ + RREG32_SOC15(VCN, vcn_inst, regVCN_RB1_DB_CTRL); + + WREG32_SOC15(VCN, vcn_inst, regUVD_RB_BASE_LO, ring->gpu_addr); + WREG32_SOC15(VCN, vcn_inst, regUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr)); + WREG32_SOC15(VCN, vcn_inst, regUVD_RB_SIZE, ring->ring_size / 4); + + tmp = RREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE); + tmp &= ~(VCN_RB_ENABLE__RB1_EN_MASK); + WREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE, tmp); + fw_shared->sq.queue_mode |= FW_QUEUE_RING_RESET; + WREG32_SOC15(VCN, vcn_inst, regUVD_RB_RPTR, 0); + WREG32_SOC15(VCN, vcn_inst, regUVD_RB_WPTR, 0); + + tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_RB_RPTR); + WREG32_SOC15(VCN, vcn_inst, regUVD_RB_WPTR, tmp); + ring->wptr = RREG32_SOC15(VCN, vcn_inst, regUVD_RB_WPTR); + + tmp = RREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE); + tmp |= VCN_RB_ENABLE__RB1_EN_MASK; + WREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE, tmp); + fw_shared->sq.queue_mode &= ~(FW_QUEUE_RING_RESET | FW_QUEUE_DPG_HOLD_OFF); + + /* Keeping one read-back to ensure all register writes are done, + * otherwise it may introduce race conditions. + */ + RREG32_SOC15(VCN, vcn_inst, regUVD_STATUS); + + return 0; +} + +/** + * vcn_v5_0_1_stop_dpg_mode - VCN stop with dpg mode + * + * @vinst: VCN instance + * + * Stop VCN block with dpg mode + */ +static void vcn_v5_0_1_stop_dpg_mode(struct amdgpu_vcn_inst *vinst) +{ + struct amdgpu_device *adev = vinst->adev; + int inst_idx = vinst->inst; + uint32_t tmp; + int vcn_inst; + struct dpg_pause_state state = {.fw_based = VCN_DPG_STATE__UNPAUSE}; + + vcn_inst = GET_INST(VCN, inst_idx); + + /* Unpause dpg */ + vcn_v5_0_1_pause_dpg_mode(vinst, &state); + + /* Wait for power status to be 1 */ + SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_POWER_STATUS, 1, + UVD_POWER_STATUS__UVD_POWER_STATUS_MASK); + + /* wait for read ptr to be equal to write ptr */ + tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_RB_WPTR); + SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_RB_RPTR, tmp, 0xFFFFFFFF); + + /* disable dynamic power gating mode */ + WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_POWER_STATUS), 0, + ~UVD_POWER_STATUS__UVD_PG_MODE_MASK); + + /* Keeping one read-back to ensure all register writes are done, + * otherwise it may introduce race conditions. + */ + RREG32_SOC15(VCN, vcn_inst, regUVD_STATUS); +} + +/** + * vcn_v5_0_1_stop - VCN stop + * + * @vinst: VCN instance + * + * Stop VCN block + */ +static int vcn_v5_0_1_stop(struct amdgpu_vcn_inst *vinst) +{ + struct amdgpu_device *adev = vinst->adev; + int i = vinst->inst; + volatile struct amdgpu_vcn5_fw_shared *fw_shared; + uint32_t tmp; + int r = 0, vcn_inst; + + vcn_inst = GET_INST(VCN, i); + + fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr; + fw_shared->sq.queue_mode |= FW_QUEUE_DPG_HOLD_OFF; + + if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) { + vcn_v5_0_1_stop_dpg_mode(vinst); + return 0; + } + + /* wait for vcn idle */ + r = SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_STATUS, UVD_STATUS__IDLE, 0x7); + if (r) + return r; + + tmp = UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN_MASK | + UVD_LMI_STATUS__READ_CLEAN_MASK | + UVD_LMI_STATUS__WRITE_CLEAN_MASK | + UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK; + r = SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_LMI_STATUS, tmp, tmp); + if (r) + return r; + + /* disable LMI UMC channel */ + tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_LMI_CTRL2); + tmp |= UVD_LMI_CTRL2__STALL_ARB_UMC_MASK; + WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_CTRL2, tmp); + tmp = UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK | + UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK; + r = SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_LMI_STATUS, tmp, tmp); + if (r) + return r; + + /* block VCPU register access */ + WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_RB_ARB_CTRL), + UVD_RB_ARB_CTRL__VCPU_DIS_MASK, + ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK); + + /* reset VCPU */ + WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL), + UVD_VCPU_CNTL__BLK_RST_MASK, + ~UVD_VCPU_CNTL__BLK_RST_MASK); + + /* disable VCPU clock */ + WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL), 0, + ~(UVD_VCPU_CNTL__CLK_EN_MASK)); + + /* apply soft reset */ + tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET); + tmp |= UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK; + WREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET, tmp); + tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET); + tmp |= UVD_SOFT_RESET__LMI_SOFT_RESET_MASK; + WREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET, tmp); + + /* clear status */ + WREG32_SOC15(VCN, vcn_inst, regUVD_STATUS, 0); + + /* Keeping one read-back to ensure all register writes are done, + * otherwise it may introduce race conditions. + */ + RREG32_SOC15(VCN, vcn_inst, regUVD_STATUS); + + return 0; +} + +/** + * vcn_v5_0_1_unified_ring_get_rptr - get unified read pointer + * + * @ring: amdgpu_ring pointer + * + * Returns the current hardware unified read pointer + */ +static uint64_t vcn_v5_0_1_unified_ring_get_rptr(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + + if (ring != &adev->vcn.inst[ring->me].ring_enc[0]) + DRM_ERROR("wrong ring id is identified in %s", __func__); + + return RREG32_SOC15(VCN, GET_INST(VCN, ring->me), regUVD_RB_RPTR); +} + +/** + * vcn_v5_0_1_unified_ring_get_wptr - get unified write pointer + * + * @ring: amdgpu_ring pointer + * + * Returns the current hardware unified write pointer + */ +static uint64_t vcn_v5_0_1_unified_ring_get_wptr(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + + if (ring != &adev->vcn.inst[ring->me].ring_enc[0]) + DRM_ERROR("wrong ring id is identified in %s", __func__); + + if (ring->use_doorbell) + return *ring->wptr_cpu_addr; + else + return RREG32_SOC15(VCN, GET_INST(VCN, ring->me), regUVD_RB_WPTR); +} + +/** + * vcn_v5_0_1_unified_ring_set_wptr - set enc write pointer + * + * @ring: amdgpu_ring pointer + * + * Commits the enc write pointer to the hardware + */ +static void vcn_v5_0_1_unified_ring_set_wptr(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + + if (ring != &adev->vcn.inst[ring->me].ring_enc[0]) + DRM_ERROR("wrong ring id is identified in %s", __func__); + + if (ring->use_doorbell) { + *ring->wptr_cpu_addr = lower_32_bits(ring->wptr); + WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr)); + } else { + WREG32_SOC15(VCN, GET_INST(VCN, ring->me), regUVD_RB_WPTR, + lower_32_bits(ring->wptr)); + } +} + +static const struct amdgpu_ring_funcs vcn_v5_0_1_unified_ring_vm_funcs = { + .type = AMDGPU_RING_TYPE_VCN_ENC, + .align_mask = 0x3f, + .nop = VCN_ENC_CMD_NO_OP, + .get_rptr = vcn_v5_0_1_unified_ring_get_rptr, + .get_wptr = vcn_v5_0_1_unified_ring_get_wptr, + .set_wptr = vcn_v5_0_1_unified_ring_set_wptr, + .emit_frame_size = SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 + + SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 + + 4 + /* vcn_v2_0_enc_ring_emit_vm_flush */ + 5 + + 5 + /* vcn_v2_0_enc_ring_emit_fence x2 vm fence */ + 1, /* vcn_v2_0_enc_ring_insert_end */ + .emit_ib_size = 5, /* vcn_v2_0_enc_ring_emit_ib */ + .emit_ib = vcn_v2_0_enc_ring_emit_ib, + .emit_fence = vcn_v2_0_enc_ring_emit_fence, + .emit_vm_flush = vcn_v4_0_3_enc_ring_emit_vm_flush, + .emit_hdp_flush = vcn_v4_0_3_ring_emit_hdp_flush, + .test_ring = amdgpu_vcn_enc_ring_test_ring, + .test_ib = amdgpu_vcn_unified_ring_test_ib, + .insert_nop = amdgpu_ring_insert_nop, + .insert_end = vcn_v2_0_enc_ring_insert_end, + .pad_ib = amdgpu_ring_generic_pad_ib, + .begin_use = amdgpu_vcn_ring_begin_use, + .end_use = amdgpu_vcn_ring_end_use, + .emit_wreg = vcn_v4_0_3_enc_ring_emit_wreg, + .emit_reg_wait = vcn_v4_0_3_enc_ring_emit_reg_wait, + .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper, +}; + +/** + * vcn_v5_0_1_set_unified_ring_funcs - set unified ring functions + * + * @adev: amdgpu_device pointer + * + * Set unified ring functions + */ +static void vcn_v5_0_1_set_unified_ring_funcs(struct amdgpu_device *adev) +{ + int i, vcn_inst; + + for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { + adev->vcn.inst[i].ring_enc[0].funcs = &vcn_v5_0_1_unified_ring_vm_funcs; + adev->vcn.inst[i].ring_enc[0].me = i; + vcn_inst = GET_INST(VCN, i); + adev->vcn.inst[i].aid_id = vcn_inst / adev->vcn.num_inst_per_aid; + } +} + +/** + * vcn_v5_0_1_is_idle - check VCN block is idle + * + * @ip_block: Pointer to the amdgpu_ip_block structure + * + * Check whether VCN block is idle + */ +static bool vcn_v5_0_1_is_idle(struct amdgpu_ip_block *ip_block) +{ + struct amdgpu_device *adev = ip_block->adev; + int i, ret = 1; + + for (i = 0; i < adev->vcn.num_vcn_inst; ++i) + ret &= (RREG32_SOC15(VCN, GET_INST(VCN, i), regUVD_STATUS) == UVD_STATUS__IDLE); + + return ret; +} + +/** + * vcn_v5_0_1_wait_for_idle - wait for VCN block idle + * + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. + * + * Wait for VCN block idle + */ +static int vcn_v5_0_1_wait_for_idle(struct amdgpu_ip_block *ip_block) +{ + struct amdgpu_device *adev = ip_block->adev; + int i, ret = 0; + + for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { + ret = SOC15_WAIT_ON_RREG(VCN, GET_INST(VCN, i), regUVD_STATUS, UVD_STATUS__IDLE, + UVD_STATUS__IDLE); + if (ret) + return ret; + } + + return ret; +} + +/** + * vcn_v5_0_1_set_clockgating_state - set VCN block clockgating state + * + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. + * @state: clock gating state + * + * Set VCN block clockgating state + */ +static int vcn_v5_0_1_set_clockgating_state(struct amdgpu_ip_block *ip_block, + enum amd_clockgating_state state) +{ + struct amdgpu_device *adev = ip_block->adev; + bool enable = state == AMD_CG_STATE_GATE; + int i; + + for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { + struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[i]; + + if (enable) { + if (RREG32_SOC15(VCN, GET_INST(VCN, i), regUVD_STATUS) != UVD_STATUS__IDLE) + return -EBUSY; + vcn_v5_0_1_enable_clock_gating(vinst); + } else { + vcn_v5_0_1_disable_clock_gating(vinst); + } + } + + return 0; +} + +static int vcn_v5_0_1_set_pg_state(struct amdgpu_vcn_inst *vinst, + enum amd_powergating_state state) +{ + struct amdgpu_device *adev = vinst->adev; + int ret = 0; + + /* for SRIOV, guest should not control VCN Power-gating + * MMSCH FW should control Power-gating and clock-gating + * guest should avoid touching CGC and PG + */ + if (amdgpu_sriov_vf(adev)) { + vinst->cur_state = AMD_PG_STATE_UNGATE; + return 0; + } + + if (state == vinst->cur_state) + return 0; + + if (state == AMD_PG_STATE_GATE) + ret = vcn_v5_0_1_stop(vinst); + else + ret = vcn_v5_0_1_start(vinst); + + if (!ret) + vinst->cur_state = state; + + return ret; +} + +/** + * vcn_v5_0_1_process_interrupt - process VCN block interrupt + * + * @adev: amdgpu_device pointer + * @source: interrupt sources + * @entry: interrupt entry from clients and sources + * + * Process VCN block interrupt + */ +static int vcn_v5_0_1_process_interrupt(struct amdgpu_device *adev, struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry) +{ + uint32_t i, inst; + + i = node_id_to_phys_map[entry->node_id]; + + DRM_DEV_DEBUG(adev->dev, "IH: VCN TRAP\n"); + + for (inst = 0; inst < adev->vcn.num_vcn_inst; ++inst) + if (adev->vcn.inst[inst].aid_id == i) + break; + if (inst >= adev->vcn.num_vcn_inst) { + dev_WARN_ONCE(adev->dev, 1, + "Interrupt received for unknown VCN instance %d", + entry->node_id); + return 0; + } + + switch (entry->src_id) { + case VCN_5_0__SRCID__UVD_ENC_GENERAL_PURPOSE: + amdgpu_fence_process(&adev->vcn.inst[inst].ring_enc[0]); + break; + default: + DRM_DEV_ERROR(adev->dev, "Unhandled interrupt: %d %d\n", + entry->src_id, entry->src_data[0]); + break; + } + + return 0; +} + +static int vcn_v5_0_1_set_ras_interrupt_state(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + unsigned int type, + enum amdgpu_interrupt_state state) +{ + return 0; +} + +static const struct amdgpu_irq_src_funcs vcn_v5_0_1_irq_funcs = { + .process = vcn_v5_0_1_process_interrupt, +}; + +static const struct amdgpu_irq_src_funcs vcn_v5_0_1_ras_irq_funcs = { + .set = vcn_v5_0_1_set_ras_interrupt_state, + .process = amdgpu_vcn_process_poison_irq, +}; + + +/** + * vcn_v5_0_1_set_irq_funcs - set VCN block interrupt irq functions + * + * @adev: amdgpu_device pointer + * + * Set VCN block interrupt irq functions + */ +static void vcn_v5_0_1_set_irq_funcs(struct amdgpu_device *adev) +{ + int i; + + for (i = 0; i < adev->vcn.num_vcn_inst; ++i) + adev->vcn.inst->irq.num_types++; + + adev->vcn.inst->irq.funcs = &vcn_v5_0_1_irq_funcs; + + adev->vcn.inst->ras_poison_irq.num_types = 1; + adev->vcn.inst->ras_poison_irq.funcs = &vcn_v5_0_1_ras_irq_funcs; + +} + +static const struct amd_ip_funcs vcn_v5_0_1_ip_funcs = { + .name = "vcn_v5_0_1", + .early_init = vcn_v5_0_1_early_init, + .late_init = NULL, + .sw_init = vcn_v5_0_1_sw_init, + .sw_fini = vcn_v5_0_1_sw_fini, + .hw_init = vcn_v5_0_1_hw_init, + .hw_fini = vcn_v5_0_1_hw_fini, + .suspend = vcn_v5_0_1_suspend, + .resume = vcn_v5_0_1_resume, + .is_idle = vcn_v5_0_1_is_idle, + .wait_for_idle = vcn_v5_0_1_wait_for_idle, + .check_soft_reset = NULL, + .pre_soft_reset = NULL, + .soft_reset = NULL, + .post_soft_reset = NULL, + .set_clockgating_state = vcn_v5_0_1_set_clockgating_state, + .set_powergating_state = vcn_set_powergating_state, + .dump_ip_state = vcn_v5_0_0_dump_ip_state, + .print_ip_state = vcn_v5_0_0_print_ip_state, +}; + +const struct amdgpu_ip_block_version vcn_v5_0_1_ip_block = { + .type = AMD_IP_BLOCK_TYPE_VCN, + .major = 5, + .minor = 0, + .rev = 1, + .funcs = &vcn_v5_0_1_ip_funcs, +}; + +static uint32_t vcn_v5_0_1_query_poison_by_instance(struct amdgpu_device *adev, + uint32_t instance, uint32_t sub_block) +{ + uint32_t poison_stat = 0, reg_value = 0; + + switch (sub_block) { + case AMDGPU_VCN_V5_0_1_VCPU_VCODEC: + reg_value = RREG32_SOC15(VCN, instance, regUVD_RAS_VCPU_VCODEC_STATUS); + poison_stat = REG_GET_FIELD(reg_value, UVD_RAS_VCPU_VCODEC_STATUS, POISONED_PF); + break; + default: + break; + } + + if (poison_stat) + dev_info(adev->dev, "Poison detected in VCN%d, sub_block%d\n", + instance, sub_block); + + return poison_stat; +} + +static bool vcn_v5_0_1_query_poison_status(struct amdgpu_device *adev) +{ + uint32_t inst, sub; + uint32_t poison_stat = 0; + + for (inst = 0; inst < adev->vcn.num_vcn_inst; inst++) + for (sub = 0; sub < AMDGPU_VCN_V5_0_1_MAX_SUB_BLOCK; sub++) + poison_stat += + vcn_v5_0_1_query_poison_by_instance(adev, inst, sub); + + return !!poison_stat; +} + +static const struct amdgpu_ras_block_hw_ops vcn_v5_0_1_ras_hw_ops = { + .query_poison_status = vcn_v5_0_1_query_poison_status, +}; + +static int vcn_v5_0_1_aca_bank_parser(struct aca_handle *handle, struct aca_bank *bank, + enum aca_smu_type type, void *data) +{ + struct aca_bank_info info; + u64 misc0; + int ret; + + ret = aca_bank_info_decode(bank, &info); + if (ret) + return ret; + + misc0 = bank->regs[ACA_REG_IDX_MISC0]; + switch (type) { + case ACA_SMU_TYPE_UE: + bank->aca_err_type = ACA_ERROR_TYPE_UE; + ret = aca_error_cache_log_bank_error(handle, &info, ACA_ERROR_TYPE_UE, + 1ULL); + break; + case ACA_SMU_TYPE_CE: + bank->aca_err_type = ACA_ERROR_TYPE_CE; + ret = aca_error_cache_log_bank_error(handle, &info, bank->aca_err_type, + ACA_REG__MISC0__ERRCNT(misc0)); + break; + default: + return -EINVAL; + } + + return ret; +} + +/* reference to smu driver if header file */ +static int vcn_v5_0_1_err_codes[] = { + 14, 15, /* VCN */ +}; + +static bool vcn_v5_0_1_aca_bank_is_valid(struct aca_handle *handle, struct aca_bank *bank, + enum aca_smu_type type, void *data) +{ + u32 instlo; + + instlo = ACA_REG__IPID__INSTANCEIDLO(bank->regs[ACA_REG_IDX_IPID]); + instlo &= GENMASK(31, 1); + + if (instlo != mmSMNAID_AID0_MCA_SMU) + return false; + + if (aca_bank_check_error_codes(handle->adev, bank, + vcn_v5_0_1_err_codes, + ARRAY_SIZE(vcn_v5_0_1_err_codes))) + return false; + + return true; +} + +static const struct aca_bank_ops vcn_v5_0_1_aca_bank_ops = { + .aca_bank_parser = vcn_v5_0_1_aca_bank_parser, + .aca_bank_is_valid = vcn_v5_0_1_aca_bank_is_valid, +}; + +static const struct aca_info vcn_v5_0_1_aca_info = { + .hwip = ACA_HWIP_TYPE_SMU, + .mask = ACA_ERROR_UE_MASK, + .bank_ops = &vcn_v5_0_1_aca_bank_ops, +}; + +static int vcn_v5_0_1_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block) +{ + int r; + + r = amdgpu_ras_block_late_init(adev, ras_block); + if (r) + return r; + + r = amdgpu_ras_bind_aca(adev, AMDGPU_RAS_BLOCK__VCN, + &vcn_v5_0_1_aca_info, NULL); + if (r) + goto late_fini; + + return 0; + +late_fini: + amdgpu_ras_block_late_fini(adev, ras_block); + + return r; +} + +static struct amdgpu_vcn_ras vcn_v5_0_1_ras = { + .ras_block = { + .hw_ops = &vcn_v5_0_1_ras_hw_ops, + .ras_late_init = vcn_v5_0_1_ras_late_init, + }, +}; + +static void vcn_v5_0_1_set_ras_funcs(struct amdgpu_device *adev) +{ + adev->vcn.ras = &vcn_v5_0_1_ras; +} diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h index 7062f12b5b751..6c8c9935a0f2e 100644 --- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h +++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h @@ -3640,7 +3640,7 @@ static const uint32_t cwsr_trap_gfx9_4_3_hex[] = { }; static const uint32_t cwsr_trap_gfx12_hex[] = { - 0xbfa00001, 0xbfa0024b, + 0xbfa00001, 0xbfa002a2, 0xb0804009, 0xb8f8f804, 0x9178ff78, 0x00008c00, 0xb8fbf811, 0x8b6eff78, @@ -3714,7 +3714,15 @@ static const uint32_t cwsr_trap_gfx12_hex[] = { 0x00011677, 0xd7610000, 0x00011a79, 0xd7610000, 0x00011c7e, 0xd7610000, - 0x00011e7f, 0xbefe00ff, + 0x00011e7f, 0xd8500000, + 0x00000000, 0xd8500000, + 0x00000000, 0xd8500000, + 0x00000000, 0xd8500000, + 0x00000000, 0xd8500000, + 0x00000000, 0xd8500000, + 0x00000000, 0xd8500000, + 0x00000000, 0xd8500000, + 0x00000000, 0xbefe00ff, 0x00003fff, 0xbeff0080, 0xee0a407a, 0x000c0000, 0x00004000, 0xd760007a, @@ -3751,38 +3759,46 @@ static const uint32_t cwsr_trap_gfx12_hex[] = { 0x00000200, 0xbef600ff, 0x01000000, 0x7e000280, 0x7e020280, 0x7e040280, - 0xbefd0080, 0xbe804ec2, - 0xbf94fffe, 0xb8faf804, - 0x8b7a847a, 0x91788478, - 0x8c787a78, 0xd7610002, - 0x0000fa71, 0x807d817d, - 0xd7610002, 0x0000fa6c, - 0x807d817d, 0x917aff6d, - 0x80000000, 0xd7610002, - 0x0000fa7a, 0x807d817d, - 0xd7610002, 0x0000fa6e, - 0x807d817d, 0xd7610002, - 0x0000fa6f, 0x807d817d, - 0xd7610002, 0x0000fa78, - 0x807d817d, 0xb8faf811, - 0xd7610002, 0x0000fa7a, - 0x807d817d, 0xd7610002, - 0x0000fa7b, 0x807d817d, - 0xb8f1f801, 0xd7610002, - 0x0000fa71, 0x807d817d, - 0xb8f1f814, 0xd7610002, - 0x0000fa71, 0x807d817d, - 0xb8f1f815, 0xd7610002, - 0x0000fa71, 0x807d817d, - 0xb8f1f812, 0xd7610002, - 0x0000fa71, 0x807d817d, - 0xb8f1f813, 0xd7610002, - 0x0000fa71, 0x807d817d, + 0xbe804ec2, 0xbf94fffe, + 0xb8faf804, 0x8b7a847a, + 0x91788478, 0x8c787a78, + 0x917aff6d, 0x80000000, + 0xd7610002, 0x00010071, + 0xd7610002, 0x0001026c, + 0xd7610002, 0x0001047a, + 0xd7610002, 0x0001066e, + 0xd7610002, 0x0001086f, + 0xd7610002, 0x00010a78, + 0xd7610002, 0x00010e7b, + 0xd8500000, 0x00000000, + 0xd8500000, 0x00000000, + 0xd8500000, 0x00000000, + 0xd8500000, 0x00000000, + 0xd8500000, 0x00000000, + 0xd8500000, 0x00000000, + 0xd8500000, 0x00000000, + 0xd8500000, 0x00000000, + 0xb8faf811, 0xd7610002, + 0x00010c7a, 0xb8faf801, + 0xd7610002, 0x0001107a, + 0xb8faf814, 0xd7610002, + 0x0001127a, 0xb8faf815, + 0xd7610002, 0x0001147a, + 0xb8faf812, 0xd7610002, + 0x0001167a, 0xb8faf813, + 0xd7610002, 0x0001187a, 0xb8faf802, 0xd7610002, - 0x0000fa7a, 0x807d817d, - 0xbefa50c1, 0xbfc70000, - 0xd7610002, 0x0000fa7a, - 0x807d817d, 0xbefe00ff, + 0x00011a7a, 0xbefa50c1, + 0xbfc70000, 0xd7610002, + 0x00011c7a, 0xd8500000, + 0x00000000, 0xd8500000, + 0x00000000, 0xd8500000, + 0x00000000, 0xd8500000, + 0x00000000, 0xd8500000, + 0x00000000, 0xd8500000, + 0x00000000, 0xd8500000, + 0x00000000, 0xd8500000, + 0x00000000, 0xbefe00ff, 0x0000ffff, 0xbeff0080, 0xc4068070, 0x008ce802, 0x00000000, 0xbefe00c1, @@ -3797,329 +3813,356 @@ static const uint32_t cwsr_trap_gfx12_hex[] = { 0xbe824102, 0xbe844104, 0xbe864106, 0xbe884108, 0xbe8a410a, 0xbe8c410c, - 0xbe8e410e, 0xd7610002, - 0x0000f200, 0x80798179, - 0xd7610002, 0x0000f201, - 0x80798179, 0xd7610002, - 0x0000f202, 0x80798179, - 0xd7610002, 0x0000f203, - 0x80798179, 0xd7610002, - 0x0000f204, 0x80798179, - 0xd7610002, 0x0000f205, - 0x80798179, 0xd7610002, - 0x0000f206, 0x80798179, - 0xd7610002, 0x0000f207, - 0x80798179, 0xd7610002, - 0x0000f208, 0x80798179, - 0xd7610002, 0x0000f209, - 0x80798179, 0xd7610002, - 0x0000f20a, 0x80798179, - 0xd7610002, 0x0000f20b, - 0x80798179, 0xd7610002, - 0x0000f20c, 0x80798179, - 0xd7610002, 0x0000f20d, - 0x80798179, 0xd7610002, - 0x0000f20e, 0x80798179, - 0xd7610002, 0x0000f20f, - 0x80798179, 0xbf06a079, - 0xbfa10007, 0xc4068070, + 0xbe8e410e, 0xbf068079, + 0xbfa10032, 0xd7610002, + 0x00010000, 0xd7610002, + 0x00010201, 0xd7610002, + 0x00010402, 0xd7610002, + 0x00010603, 0xd7610002, + 0x00010804, 0xd7610002, + 0x00010a05, 0xd7610002, + 0x00010c06, 0xd7610002, + 0x00010e07, 0xd7610002, + 0x00011008, 0xd7610002, + 0x00011209, 0xd7610002, + 0x0001140a, 0xd7610002, + 0x0001160b, 0xd7610002, + 0x0001180c, 0xd7610002, + 0x00011a0d, 0xd7610002, + 0x00011c0e, 0xd7610002, + 0x00011e0f, 0xd8500000, + 0x00000000, 0xd8500000, + 0x00000000, 0xd8500000, + 0x00000000, 0xd8500000, + 0x00000000, 0xd8500000, + 0x00000000, 0xd8500000, + 0x00000000, 0xd8500000, + 0x00000000, 0xd8500000, + 0x00000000, 0x80799079, + 0xbfa00038, 0xd7610002, + 0x00012000, 0xd7610002, + 0x00012201, 0xd7610002, + 0x00012402, 0xd7610002, + 0x00012603, 0xd7610002, + 0x00012804, 0xd7610002, + 0x00012a05, 0xd7610002, + 0x00012c06, 0xd7610002, + 0x00012e07, 0xd7610002, + 0x00013008, 0xd7610002, + 0x00013209, 0xd7610002, + 0x0001340a, 0xd7610002, + 0x0001360b, 0xd7610002, + 0x0001380c, 0xd7610002, + 0x00013a0d, 0xd7610002, + 0x00013c0e, 0xd7610002, + 0x00013e0f, 0xd8500000, + 0x00000000, 0xd8500000, + 0x00000000, 0xd8500000, + 0x00000000, 0xd8500000, + 0x00000000, 0xd8500000, + 0x00000000, 0xd8500000, + 0x00000000, 0xd8500000, + 0x00000000, 0xd8500000, + 0x00000000, 0x80799079, + 0xc4068070, 0x008ce802, + 0x00000000, 0x8070ff70, + 0x00000080, 0xbef90080, + 0x7e040280, 0x807d907d, + 0xbf0aff7d, 0x00000060, + 0xbfa2ff88, 0xbe804100, + 0xbe824102, 0xbe844104, + 0xbe864106, 0xbe884108, + 0xbe8a410a, 0xd7610002, + 0x00010000, 0xd7610002, + 0x00010201, 0xd7610002, + 0x00010402, 0xd7610002, + 0x00010603, 0xd7610002, + 0x00010804, 0xd7610002, + 0x00010a05, 0xd7610002, + 0x00010c06, 0xd7610002, + 0x00010e07, 0xd7610002, + 0x00011008, 0xd7610002, + 0x00011209, 0xd7610002, + 0x0001140a, 0xd7610002, + 0x0001160b, 0xd8500000, + 0x00000000, 0xd8500000, + 0x00000000, 0xd8500000, + 0x00000000, 0xd8500000, + 0x00000000, 0xd8500000, + 0x00000000, 0xd8500000, + 0x00000000, 0xd8500000, + 0x00000000, 0xd8500000, + 0x00000000, 0xc4068070, 0x008ce802, 0x00000000, + 0xbefe00c1, 0x857d9973, + 0x8b7d817d, 0xbf06817d, + 0xbfa20002, 0xbeff0080, + 0xbfa00001, 0xbeff00c1, + 0xb8fb4306, 0x8b7bc17b, + 0xbfa10044, 0x8b7aff6d, + 0x80000000, 0xbfa10041, + 0x847b897b, 0xbef6007b, + 0xb8f03b05, 0x80708170, + 0xbf0d9973, 0xbfa20002, + 0x84708970, 0xbfa00001, + 0x84708a70, 0xb8fa1e06, + 0x847a8a7a, 0x80707a70, + 0x8070ff70, 0x00000200, 0x8070ff70, 0x00000080, - 0xbef90080, 0x7e040280, - 0x807d907d, 0xbf0aff7d, - 0x00000060, 0xbfa2ffbb, - 0xbe804100, 0xbe824102, - 0xbe844104, 0xbe864106, - 0xbe884108, 0xbe8a410a, - 0xd7610002, 0x0000f200, - 0x80798179, 0xd7610002, - 0x0000f201, 0x80798179, - 0xd7610002, 0x0000f202, - 0x80798179, 0xd7610002, - 0x0000f203, 0x80798179, - 0xd7610002, 0x0000f204, - 0x80798179, 0xd7610002, - 0x0000f205, 0x80798179, - 0xd7610002, 0x0000f206, - 0x80798179, 0xd7610002, - 0x0000f207, 0x80798179, - 0xd7610002, 0x0000f208, - 0x80798179, 0xd7610002, - 0x0000f209, 0x80798179, - 0xd7610002, 0x0000f20a, - 0x80798179, 0xd7610002, - 0x0000f20b, 0x80798179, - 0xc4068070, 0x008ce802, - 0x00000000, 0xbefe00c1, - 0x857d9973, 0x8b7d817d, - 0xbf06817d, 0xbfa20002, - 0xbeff0080, 0xbfa00001, - 0xbeff00c1, 0xb8fb4306, - 0x8b7bc17b, 0xbfa10044, - 0x8b7aff6d, 0x80000000, - 0xbfa10041, 0x847b897b, - 0xbef6007b, 0xb8f03b05, - 0x80708170, 0xbf0d9973, - 0xbfa20002, 0x84708970, - 0xbfa00001, 0x84708a70, - 0xb8fa1e06, 0x847a8a7a, - 0x80707a70, 0x8070ff70, - 0x00000200, 0x8070ff70, - 0x00000080, 0xbef600ff, - 0x01000000, 0xd71f0000, - 0x000100c1, 0xd7200000, - 0x000200c1, 0x16000084, - 0x857d9973, 0x8b7d817d, - 0xbf06817d, 0xbefd0080, - 0xbfa20013, 0xbe8300ff, - 0x00000080, 0xbf800000, - 0xbf800000, 0xbf800000, - 0xd8d80000, 0x01000000, - 0xbf8a0000, 0xc4068070, - 0x008ce801, 0x00000000, - 0x807d037d, 0x80700370, - 0xd5250000, 0x0001ff00, - 0x00000080, 0xbf0a7b7d, - 0xbfa2fff3, 0xbfa00012, - 0xbe8300ff, 0x00000100, + 0xbef600ff, 0x01000000, + 0xd71f0000, 0x000100c1, + 0xd7200000, 0x000200c1, + 0x16000084, 0x857d9973, + 0x8b7d817d, 0xbf06817d, + 0xbefd0080, 0xbfa20013, + 0xbe8300ff, 0x00000080, 0xbf800000, 0xbf800000, 0xbf800000, 0xd8d80000, 0x01000000, 0xbf8a0000, 0xc4068070, 0x008ce801, 0x00000000, 0x807d037d, 0x80700370, 0xd5250000, - 0x0001ff00, 0x00000100, + 0x0001ff00, 0x00000080, 0xbf0a7b7d, 0xbfa2fff3, - 0xbefe00c1, 0x857d9973, - 0x8b7d817d, 0xbf06817d, - 0xbfa20004, 0xbef000ff, - 0x00000200, 0xbeff0080, - 0xbfa00003, 0xbef000ff, - 0x00000400, 0xbeff00c1, - 0xb8fb3b05, 0x807b817b, - 0x847b827b, 0x857d9973, - 0x8b7d817d, 0xbf06817d, - 0xbfa2001b, 0xbef600ff, - 0x01000000, 0xbefd0084, - 0xbf0a7b7d, 0xbfa10040, - 0x7e008700, 0x7e028701, - 0x7e048702, 0x7e068703, - 0xc4068070, 0x008ce800, - 0x00000000, 0xc4068070, - 0x008ce801, 0x00008000, - 0xc4068070, 0x008ce802, - 0x00010000, 0xc4068070, - 0x008ce803, 0x00018000, - 0x807d847d, 0x8070ff70, - 0x00000200, 0xbf0a7b7d, - 0xbfa2ffeb, 0xbfa0002a, + 0xbfa00012, 0xbe8300ff, + 0x00000100, 0xbf800000, + 0xbf800000, 0xbf800000, + 0xd8d80000, 0x01000000, + 0xbf8a0000, 0xc4068070, + 0x008ce801, 0x00000000, + 0x807d037d, 0x80700370, + 0xd5250000, 0x0001ff00, + 0x00000100, 0xbf0a7b7d, + 0xbfa2fff3, 0xbefe00c1, + 0x857d9973, 0x8b7d817d, + 0xbf06817d, 0xbfa20004, + 0xbef000ff, 0x00000200, + 0xbeff0080, 0xbfa00003, + 0xbef000ff, 0x00000400, + 0xbeff00c1, 0xb8fb3b05, + 0x807b817b, 0x847b827b, + 0x857d9973, 0x8b7d817d, + 0xbf06817d, 0xbfa2001b, 0xbef600ff, 0x01000000, 0xbefd0084, 0xbf0a7b7d, - 0xbfa10015, 0x7e008700, + 0xbfa10040, 0x7e008700, 0x7e028701, 0x7e048702, 0x7e068703, 0xc4068070, 0x008ce800, 0x00000000, 0xc4068070, 0x008ce801, - 0x00010000, 0xc4068070, - 0x008ce802, 0x00020000, + 0x00008000, 0xc4068070, + 0x008ce802, 0x00010000, 0xc4068070, 0x008ce803, - 0x00030000, 0x807d847d, - 0x8070ff70, 0x00000400, + 0x00018000, 0x807d847d, + 0x8070ff70, 0x00000200, 0xbf0a7b7d, 0xbfa2ffeb, - 0xb8fb1e06, 0x8b7bc17b, - 0xbfa1000d, 0x847b837b, - 0x807b7d7b, 0xbefe00c1, - 0xbeff0080, 0x7e008700, + 0xbfa0002a, 0xbef600ff, + 0x01000000, 0xbefd0084, + 0xbf0a7b7d, 0xbfa10015, + 0x7e008700, 0x7e028701, + 0x7e048702, 0x7e068703, 0xc4068070, 0x008ce800, - 0x00000000, 0x807d817d, - 0x8070ff70, 0x00000080, - 0xbf0a7b7d, 0xbfa2fff7, - 0xbfa0016e, 0xbef4007e, - 0x8b75ff7f, 0x0000ffff, - 0x8c75ff75, 0x00040000, - 0xbef60080, 0xbef700ff, - 0x10807fac, 0xbef1007f, - 0xb8f20742, 0x84729972, - 0x8b6eff7f, 0x04000000, - 0xbfa1003b, 0xbefe00c1, - 0x857d9972, 0x8b7d817d, - 0xbf06817d, 0xbfa20002, - 0xbeff0080, 0xbfa00001, - 0xbeff00c1, 0xb8ef4306, - 0x8b6fc16f, 0xbfa10030, - 0x846f896f, 0xbef6006f, + 0x00000000, 0xc4068070, + 0x008ce801, 0x00010000, + 0xc4068070, 0x008ce802, + 0x00020000, 0xc4068070, + 0x008ce803, 0x00030000, + 0x807d847d, 0x8070ff70, + 0x00000400, 0xbf0a7b7d, + 0xbfa2ffeb, 0xb8fb1e06, + 0x8b7bc17b, 0xbfa1000d, + 0x847b837b, 0x807b7d7b, + 0xbefe00c1, 0xbeff0080, + 0x7e008700, 0xc4068070, + 0x008ce800, 0x00000000, + 0x807d817d, 0x8070ff70, + 0x00000080, 0xbf0a7b7d, + 0xbfa2fff7, 0xbfa0016e, + 0xbef4007e, 0x8b75ff7f, + 0x0000ffff, 0x8c75ff75, + 0x00040000, 0xbef60080, + 0xbef700ff, 0x10807fac, + 0xbef1007f, 0xb8f20742, + 0x84729972, 0x8b6eff7f, + 0x04000000, 0xbfa1003b, + 0xbefe00c1, 0x857d9972, + 0x8b7d817d, 0xbf06817d, + 0xbfa20002, 0xbeff0080, + 0xbfa00001, 0xbeff00c1, + 0xb8ef4306, 0x8b6fc16f, + 0xbfa10030, 0x846f896f, + 0xbef6006f, 0xb8f83b05, + 0x80788178, 0xbf0d9972, + 0xbfa20002, 0x84788978, + 0xbfa00001, 0x84788a78, + 0xb8ee1e06, 0x846e8a6e, + 0x80786e78, 0x8078ff78, + 0x00000200, 0x8078ff78, + 0x00000080, 0xbef600ff, + 0x01000000, 0x857d9972, + 0x8b7d817d, 0xbf06817d, + 0xbefd0080, 0xbfa2000d, + 0xc4050078, 0x0080e800, + 0x00000000, 0xbf8a0000, + 0xdac00000, 0x00000000, + 0x807dff7d, 0x00000080, + 0x8078ff78, 0x00000080, + 0xbf0a6f7d, 0xbfa2fff4, + 0xbfa0000c, 0xc4050078, + 0x0080e800, 0x00000000, + 0xbf8a0000, 0xdac00000, + 0x00000000, 0x807dff7d, + 0x00000100, 0x8078ff78, + 0x00000100, 0xbf0a6f7d, + 0xbfa2fff4, 0xbef80080, + 0xbefe00c1, 0x857d9972, + 0x8b7d817d, 0xbf06817d, + 0xbfa20002, 0xbeff0080, + 0xbfa00001, 0xbeff00c1, + 0xb8ef3b05, 0x806f816f, + 0x846f826f, 0x857d9972, + 0x8b7d817d, 0xbf06817d, + 0xbfa2002c, 0xbef600ff, + 0x01000000, 0xbeee0078, + 0x8078ff78, 0x00000200, + 0xbefd0084, 0xbf0a6f7d, + 0xbfa10061, 0xc4050078, + 0x008ce800, 0x00000000, + 0xc4050078, 0x008ce801, + 0x00008000, 0xc4050078, + 0x008ce802, 0x00010000, + 0xc4050078, 0x008ce803, + 0x00018000, 0xbf8a0000, + 0x7e008500, 0x7e028501, + 0x7e048502, 0x7e068503, + 0x807d847d, 0x8078ff78, + 0x00000200, 0xbf0a6f7d, + 0xbfa2ffea, 0xc405006e, + 0x008ce800, 0x00000000, + 0xc405006e, 0x008ce801, + 0x00008000, 0xc405006e, + 0x008ce802, 0x00010000, + 0xc405006e, 0x008ce803, + 0x00018000, 0xbf8a0000, + 0xbfa0003d, 0xbef600ff, + 0x01000000, 0xbeee0078, + 0x8078ff78, 0x00000400, + 0xbefd0084, 0xbf0a6f7d, + 0xbfa10016, 0xc4050078, + 0x008ce800, 0x00000000, + 0xc4050078, 0x008ce801, + 0x00010000, 0xc4050078, + 0x008ce802, 0x00020000, + 0xc4050078, 0x008ce803, + 0x00030000, 0xbf8a0000, + 0x7e008500, 0x7e028501, + 0x7e048502, 0x7e068503, + 0x807d847d, 0x8078ff78, + 0x00000400, 0xbf0a6f7d, + 0xbfa2ffea, 0xb8ef1e06, + 0x8b6fc16f, 0xbfa1000f, + 0x846f836f, 0x806f7d6f, + 0xbefe00c1, 0xbeff0080, + 0xc4050078, 0x008ce800, + 0x00000000, 0xbf8a0000, + 0x7e008500, 0x807d817d, + 0x8078ff78, 0x00000080, + 0xbf0a6f7d, 0xbfa2fff6, + 0xbeff00c1, 0xc405006e, + 0x008ce800, 0x00000000, + 0xc405006e, 0x008ce801, + 0x00010000, 0xc405006e, + 0x008ce802, 0x00020000, + 0xc405006e, 0x008ce803, + 0x00030000, 0xbf8a0000, 0xb8f83b05, 0x80788178, 0xbf0d9972, 0xbfa20002, 0x84788978, 0xbfa00001, 0x84788a78, 0xb8ee1e06, 0x846e8a6e, 0x80786e78, 0x8078ff78, 0x00000200, - 0x8078ff78, 0x00000080, - 0xbef600ff, 0x01000000, - 0x857d9972, 0x8b7d817d, - 0xbf06817d, 0xbefd0080, - 0xbfa2000d, 0xc4050078, - 0x0080e800, 0x00000000, - 0xbf8a0000, 0xdac00000, - 0x00000000, 0x807dff7d, - 0x00000080, 0x8078ff78, - 0x00000080, 0xbf0a6f7d, - 0xbfa2fff4, 0xbfa0000c, - 0xc4050078, 0x0080e800, - 0x00000000, 0xbf8a0000, - 0xdac00000, 0x00000000, - 0x807dff7d, 0x00000100, - 0x8078ff78, 0x00000100, - 0xbf0a6f7d, 0xbfa2fff4, - 0xbef80080, 0xbefe00c1, - 0x857d9972, 0x8b7d817d, - 0xbf06817d, 0xbfa20002, - 0xbeff0080, 0xbfa00001, - 0xbeff00c1, 0xb8ef3b05, - 0x806f816f, 0x846f826f, - 0x857d9972, 0x8b7d817d, - 0xbf06817d, 0xbfa2002c, + 0x80f8ff78, 0x00000050, 0xbef600ff, 0x01000000, - 0xbeee0078, 0x8078ff78, - 0x00000200, 0xbefd0084, - 0xbf0a6f7d, 0xbfa10061, - 0xc4050078, 0x008ce800, - 0x00000000, 0xc4050078, - 0x008ce801, 0x00008000, - 0xc4050078, 0x008ce802, - 0x00010000, 0xc4050078, - 0x008ce803, 0x00018000, - 0xbf8a0000, 0x7e008500, - 0x7e028501, 0x7e048502, - 0x7e068503, 0x807d847d, + 0xbefd00ff, 0x0000006c, + 0x80f89078, 0xf462403a, + 0xf0000000, 0xbf8a0000, + 0x80fd847d, 0xbf800000, + 0xbe804300, 0xbe824302, + 0x80f8a078, 0xf462603a, + 0xf0000000, 0xbf8a0000, + 0x80fd887d, 0xbf800000, + 0xbe804300, 0xbe824302, + 0xbe844304, 0xbe864306, + 0x80f8c078, 0xf462803a, + 0xf0000000, 0xbf8a0000, + 0x80fd907d, 0xbf800000, + 0xbe804300, 0xbe824302, + 0xbe844304, 0xbe864306, + 0xbe884308, 0xbe8a430a, + 0xbe8c430c, 0xbe8e430e, + 0xbf06807d, 0xbfa1fff0, + 0xb980f801, 0x00000000, + 0xb8f83b05, 0x80788178, + 0xbf0d9972, 0xbfa20002, + 0x84788978, 0xbfa00001, + 0x84788a78, 0xb8ee1e06, + 0x846e8a6e, 0x80786e78, 0x8078ff78, 0x00000200, - 0xbf0a6f7d, 0xbfa2ffea, - 0xc405006e, 0x008ce800, - 0x00000000, 0xc405006e, - 0x008ce801, 0x00008000, - 0xc405006e, 0x008ce802, - 0x00010000, 0xc405006e, - 0x008ce803, 0x00018000, - 0xbf8a0000, 0xbfa0003d, 0xbef600ff, 0x01000000, - 0xbeee0078, 0x8078ff78, - 0x00000400, 0xbefd0084, - 0xbf0a6f7d, 0xbfa10016, - 0xc4050078, 0x008ce800, - 0x00000000, 0xc4050078, - 0x008ce801, 0x00010000, - 0xc4050078, 0x008ce802, - 0x00020000, 0xc4050078, - 0x008ce803, 0x00030000, - 0xbf8a0000, 0x7e008500, - 0x7e028501, 0x7e048502, - 0x7e068503, 0x807d847d, - 0x8078ff78, 0x00000400, - 0xbf0a6f7d, 0xbfa2ffea, - 0xb8ef1e06, 0x8b6fc16f, - 0xbfa1000f, 0x846f836f, - 0x806f7d6f, 0xbefe00c1, - 0xbeff0080, 0xc4050078, - 0x008ce800, 0x00000000, - 0xbf8a0000, 0x7e008500, - 0x807d817d, 0x8078ff78, - 0x00000080, 0xbf0a6f7d, - 0xbfa2fff6, 0xbeff00c1, - 0xc405006e, 0x008ce800, - 0x00000000, 0xc405006e, - 0x008ce801, 0x00010000, - 0xc405006e, 0x008ce802, - 0x00020000, 0xc405006e, - 0x008ce803, 0x00030000, - 0xbf8a0000, 0xb8f83b05, - 0x80788178, 0xbf0d9972, - 0xbfa20002, 0x84788978, - 0xbfa00001, 0x84788a78, - 0xb8ee1e06, 0x846e8a6e, - 0x80786e78, 0x8078ff78, - 0x00000200, 0x80f8ff78, - 0x00000050, 0xbef600ff, - 0x01000000, 0xbefd00ff, - 0x0000006c, 0x80f89078, - 0xf462403a, 0xf0000000, - 0xbf8a0000, 0x80fd847d, - 0xbf800000, 0xbe804300, - 0xbe824302, 0x80f8a078, - 0xf462603a, 0xf0000000, - 0xbf8a0000, 0x80fd887d, - 0xbf800000, 0xbe804300, - 0xbe824302, 0xbe844304, - 0xbe864306, 0x80f8c078, - 0xf462803a, 0xf0000000, - 0xbf8a0000, 0x80fd907d, - 0xbf800000, 0xbe804300, - 0xbe824302, 0xbe844304, - 0xbe864306, 0xbe884308, - 0xbe8a430a, 0xbe8c430c, - 0xbe8e430e, 0xbf06807d, - 0xbfa1fff0, 0xb980f801, - 0x00000000, 0xb8f83b05, - 0x80788178, 0xbf0d9972, - 0xbfa20002, 0x84788978, - 0xbfa00001, 0x84788a78, - 0xb8ee1e06, 0x846e8a6e, - 0x80786e78, 0x8078ff78, - 0x00000200, 0xbef600ff, - 0x01000000, 0xbeff0071, - 0xf4621bfa, 0xf0000000, - 0x80788478, 0xf4621b3a, + 0xbeff0071, 0xf4621bfa, 0xf0000000, 0x80788478, - 0xf4621b7a, 0xf0000000, - 0x80788478, 0xf4621c3a, + 0xf4621b3a, 0xf0000000, + 0x80788478, 0xf4621b7a, 0xf0000000, 0x80788478, - 0xf4621c7a, 0xf0000000, - 0x80788478, 0xf4621eba, + 0xf4621c3a, 0xf0000000, + 0x80788478, 0xf4621c7a, 0xf0000000, 0x80788478, - 0xf4621efa, 0xf0000000, - 0x80788478, 0xf4621e7a, + 0xf4621eba, 0xf0000000, + 0x80788478, 0xf4621efa, 0xf0000000, 0x80788478, - 0xf4621cfa, 0xf0000000, - 0x80788478, 0xf4621bba, + 0xf4621e7a, 0xf0000000, + 0x80788478, 0xf4621cfa, 0xf0000000, 0x80788478, - 0xbf8a0000, 0xb96ef814, 0xf4621bba, 0xf0000000, 0x80788478, 0xbf8a0000, - 0xb96ef815, 0xf4621bba, + 0xb96ef814, 0xf4621bba, 0xf0000000, 0x80788478, - 0xbf8a0000, 0xb96ef812, + 0xbf8a0000, 0xb96ef815, 0xf4621bba, 0xf0000000, 0x80788478, 0xbf8a0000, - 0xb96ef813, 0x8b6eff7f, - 0x04000000, 0xbfa1000d, - 0x80788478, 0xf4621bba, + 0xb96ef812, 0xf4621bba, 0xf0000000, 0x80788478, - 0xbf8a0000, 0xbf0d806e, - 0xbfa10006, 0x856e906e, - 0x8b6e6e6e, 0xbfa10003, - 0xbe804ec1, 0x816ec16e, - 0xbfa0fffb, 0xbefd006f, - 0xbefe0070, 0xbeff0071, - 0xb97b2011, 0x857b867b, - 0xb97b0191, 0x857b827b, - 0xb97bba11, 0xb973f801, - 0xb8ee3b05, 0x806e816e, - 0xbf0d9972, 0xbfa20002, - 0x846e896e, 0xbfa00001, - 0x846e8a6e, 0xb8ef1e06, - 0x846f8a6f, 0x806e6f6e, - 0x806eff6e, 0x00000200, - 0x806e746e, 0x826f8075, - 0x8b6fff6f, 0x0000ffff, - 0xf4605c37, 0xf8000050, - 0xf4605d37, 0xf8000060, - 0xf4601e77, 0xf8000074, - 0xbf8a0000, 0x8b6dff6d, - 0x0000ffff, 0x8bfe7e7e, - 0x8bea6a6a, 0xb97af804, + 0xbf8a0000, 0xb96ef813, + 0x8b6eff7f, 0x04000000, + 0xbfa1000d, 0x80788478, + 0xf4621bba, 0xf0000000, + 0x80788478, 0xbf8a0000, + 0xbf0d806e, 0xbfa10006, + 0x856e906e, 0x8b6e6e6e, + 0xbfa10003, 0xbe804ec1, + 0x816ec16e, 0xbfa0fffb, + 0xbefd006f, 0xbefe0070, + 0xbeff0071, 0xb97b2011, + 0x857b867b, 0xb97b0191, + 0x857b827b, 0xb97bba11, + 0xb973f801, 0xb8ee3b05, + 0x806e816e, 0xbf0d9972, + 0xbfa20002, 0x846e896e, + 0xbfa00001, 0x846e8a6e, + 0xb8ef1e06, 0x846f8a6f, + 0x806e6f6e, 0x806eff6e, + 0x00000200, 0x806e746e, + 0x826f8075, 0x8b6fff6f, + 0x0000ffff, 0xf4605c37, + 0xf8000050, 0xf4605d37, + 0xf8000060, 0xf4601e77, + 0xf8000074, 0xbf8a0000, + 0x8b6dff6d, 0x0000ffff, + 0x8bfe7e7e, 0x8bea6a6a, + 0xb97af804, 0xbe804ec2, + 0xbf94fffe, 0xbe804a6c, 0xbe804ec2, 0xbf94fffe, - 0xbe804a6c, 0xbe804ec2, - 0xbf94fffe, 0xbfb10000, + 0xbfb10000, 0xbf9f0000, 0xbf9f0000, 0xbf9f0000, 0xbf9f0000, 0xbf9f0000, - 0xbf9f0000, 0x00000000, }; diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx12.asm b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx12.asm index 7b9d36e5fa437..5a1a1b1f897fe 100644 --- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx12.asm +++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx12.asm @@ -30,6 +30,7 @@ #define CHIP_GFX12 37 #define SINGLE_STEP_MISSED_WORKAROUND 1 //workaround for lost TRAP_AFTER_INST exception when SAVECTX raised +#define HAVE_VALU_SGPR_HAZARD (ASIC_FAMILY == CHIP_GFX12) var SQ_WAVE_STATE_PRIV_BARRIER_COMPLETE_MASK = 0x4 var SQ_WAVE_STATE_PRIV_SCC_SHIFT = 9 @@ -351,6 +352,7 @@ L_HAVE_VGPRS: v_writelane_b32 v0, ttmp13, 0xD v_writelane_b32 v0, exec_lo, 0xE v_writelane_b32 v0, exec_hi, 0xF + valu_sgpr_hazard() s_mov_b32 exec_lo, 0x3FFF s_mov_b32 exec_hi, 0x0 @@ -417,7 +419,6 @@ L_SAVE_HWREG: v_mov_b32 v0, 0x0 //Offset[31:0] from buffer resource v_mov_b32 v1, 0x0 //Offset[63:32] from buffer resource v_mov_b32 v2, 0x0 //Set of SGPRs for TCP store - s_mov_b32 m0, 0x0 //Next lane of v2 to write to // Ensure no further changes to barrier or LDS state. // STATE_PRIV.BARRIER_COMPLETE may change up to this point. @@ -430,40 +431,41 @@ L_SAVE_HWREG: s_andn2_b32 s_save_state_priv, s_save_state_priv, SQ_WAVE_STATE_PRIV_BARRIER_COMPLETE_MASK s_or_b32 s_save_state_priv, s_save_state_priv, s_save_tmp - write_hwreg_to_v2(s_save_m0) - write_hwreg_to_v2(s_save_pc_lo) s_andn2_b32 s_save_tmp, s_save_pc_hi, S_SAVE_PC_HI_FIRST_WAVE_MASK - write_hwreg_to_v2(s_save_tmp) - write_hwreg_to_v2(s_save_exec_lo) - write_hwreg_to_v2(s_save_exec_hi) - write_hwreg_to_v2(s_save_state_priv) + v_writelane_b32 v2, s_save_m0, 0x0 + v_writelane_b32 v2, s_save_pc_lo, 0x1 + v_writelane_b32 v2, s_save_tmp, 0x2 + v_writelane_b32 v2, s_save_exec_lo, 0x3 + v_writelane_b32 v2, s_save_exec_hi, 0x4 + v_writelane_b32 v2, s_save_state_priv, 0x5 + v_writelane_b32 v2, s_save_xnack_mask, 0x7 + valu_sgpr_hazard() s_getreg_b32 s_save_tmp, hwreg(HW_REG_WAVE_EXCP_FLAG_PRIV) - write_hwreg_to_v2(s_save_tmp) + v_writelane_b32 v2, s_save_tmp, 0x6 - write_hwreg_to_v2(s_save_xnack_mask) + s_getreg_b32 s_save_tmp, hwreg(HW_REG_WAVE_MODE) + v_writelane_b32 v2, s_save_tmp, 0x8 - s_getreg_b32 s_save_m0, hwreg(HW_REG_WAVE_MODE) - write_hwreg_to_v2(s_save_m0) + s_getreg_b32 s_save_tmp, hwreg(HW_REG_WAVE_SCRATCH_BASE_LO) + v_writelane_b32 v2, s_save_tmp, 0x9 - s_getreg_b32 s_save_m0, hwreg(HW_REG_WAVE_SCRATCH_BASE_LO) - write_hwreg_to_v2(s_save_m0) + s_getreg_b32 s_save_tmp, hwreg(HW_REG_WAVE_SCRATCH_BASE_HI) + v_writelane_b32 v2, s_save_tmp, 0xA - s_getreg_b32 s_save_m0, hwreg(HW_REG_WAVE_SCRATCH_BASE_HI) - write_hwreg_to_v2(s_save_m0) + s_getreg_b32 s_save_tmp, hwreg(HW_REG_WAVE_EXCP_FLAG_USER) + v_writelane_b32 v2, s_save_tmp, 0xB - s_getreg_b32 s_save_m0, hwreg(HW_REG_WAVE_EXCP_FLAG_USER) - write_hwreg_to_v2(s_save_m0) - - s_getreg_b32 s_save_m0, hwreg(HW_REG_WAVE_TRAP_CTRL) - write_hwreg_to_v2(s_save_m0) + s_getreg_b32 s_save_tmp, hwreg(HW_REG_WAVE_TRAP_CTRL) + v_writelane_b32 v2, s_save_tmp, 0xC s_getreg_b32 s_save_tmp, hwreg(HW_REG_WAVE_STATUS) - write_hwreg_to_v2(s_save_tmp) + v_writelane_b32 v2, s_save_tmp, 0xD s_get_barrier_state s_save_tmp, -1 s_wait_kmcnt (0) - write_hwreg_to_v2(s_save_tmp) + v_writelane_b32 v2, s_save_tmp, 0xE + valu_sgpr_hazard() // Write HWREGs with 16 VGPR lanes. TTMPs occupy space after this. s_mov_b32 exec_lo, 0xFFFF @@ -497,10 +499,12 @@ L_SAVE_SGPR_LOOP: s_movrels_b64 s12, s12 //s12 = s[12+m0], s13 = s[13+m0] s_movrels_b64 s14, s14 //s14 = s[14+m0], s15 = s[15+m0] - write_16sgpr_to_v2(s0) - - s_cmp_eq_u32 ttmp13, 0x20 //have 32 VGPR lanes filled? - s_cbranch_scc0 L_SAVE_SGPR_SKIP_TCP_STORE + s_cmp_eq_u32 ttmp13, 0x0 + s_cbranch_scc0 L_WRITE_V2_SECOND_HALF + write_16sgpr_to_v2(s0, 0x0) + s_branch L_SAVE_SGPR_SKIP_TCP_STORE +L_WRITE_V2_SECOND_HALF: + write_16sgpr_to_v2(s0, 0x10) buffer_store_dword v2, v0, s_save_buf_rsrc0, s_save_mem_offset scope:SCOPE_SYS s_add_u32 s_save_mem_offset, s_save_mem_offset, 0x80 @@ -1056,27 +1060,21 @@ L_END_PGM: s_endpgm_saved end -function write_hwreg_to_v2(s) - // Copy into VGPR for later TCP store. - v_writelane_b32 v2, s, m0 - s_add_u32 m0, m0, 0x1 -end - - -function write_16sgpr_to_v2(s) +function write_16sgpr_to_v2(s, lane_offset) // Copy into VGPR for later TCP store. for var sgpr_idx = 0; sgpr_idx < 16; sgpr_idx ++ - v_writelane_b32 v2, s[sgpr_idx], ttmp13 - s_add_u32 ttmp13, ttmp13, 0x1 + v_writelane_b32 v2, s[sgpr_idx], sgpr_idx + lane_offset end + valu_sgpr_hazard() + s_add_u32 ttmp13, ttmp13, 0x10 end function write_12sgpr_to_v2(s) // Copy into VGPR for later TCP store. for var sgpr_idx = 0; sgpr_idx < 12; sgpr_idx ++ - v_writelane_b32 v2, s[sgpr_idx], ttmp13 - s_add_u32 ttmp13, ttmp13, 0x1 + v_writelane_b32 v2, s[sgpr_idx], sgpr_idx end + valu_sgpr_hazard() end function read_hwreg_from_mem(s, s_rsrc, s_mem_offset) @@ -1128,3 +1126,11 @@ function get_wave_size2(s_reg) s_getreg_b32 s_reg, hwreg(HW_REG_WAVE_STATUS,SQ_WAVE_STATUS_WAVE64_SHIFT,SQ_WAVE_STATUS_WAVE64_SIZE) s_lshl_b32 s_reg, s_reg, S_WAVE_SIZE end + +function valu_sgpr_hazard +#if HAVE_VALU_SGPR_HAZARD + for var rep = 0; rep < 8; rep ++ + ds_nop + end +#endif +end diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index 9186ef0bd2a32..07eadab4c1c4d 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c @@ -537,7 +537,8 @@ static void kfd_cwsr_init(struct kfd_dev *kfd) kfd->cwsr_isa = cwsr_trap_gfx11_hex; kfd->cwsr_isa_size = sizeof(cwsr_trap_gfx11_hex); } else { - BUILD_BUG_ON(sizeof(cwsr_trap_gfx12_hex) > PAGE_SIZE); + BUILD_BUG_ON(sizeof(cwsr_trap_gfx12_hex) + > KFD_CWSR_TMA_OFFSET); kfd->cwsr_isa = cwsr_trap_gfx12_hex; kfd->cwsr_isa_size = sizeof(cwsr_trap_gfx12_hex); } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_events.c index ea37922492093..6798510c4a707 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_events.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.c @@ -1315,6 +1315,7 @@ void kfd_signal_poison_consumed_event(struct kfd_node *dev, u32 pasid) user_gpu_id = kfd_process_get_user_gpu_id(p, dev->id); if (unlikely(user_gpu_id == -EINVAL)) { WARN_ONCE(1, "Could not get user_gpu_id from dev->id:%x\n", dev->id); + kfd_unref_process(p); return; } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c index 1f9f5bfeaf868..d87b895660c21 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c @@ -237,7 +237,7 @@ static int pm_map_queues_v9(struct packet_manager *pm, uint32_t *buffer, packet->bitfields2.engine_sel = engine_sel__mes_map_queues__compute_vi; - packet->bitfields2.gws_control_queue = q->gws ? 1 : 0; + packet->bitfields2.gws_control_queue = q->properties.is_gws ? 1 : 0; packet->bitfields2.extended_engine_sel = extended_engine_sel__mes_map_queues__legacy_engine_sel; packet->bitfields2.queue_type = diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c index f00d41be7fca2..3e9e0f36cd3f4 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c @@ -1170,13 +1170,12 @@ svm_range_split_head(struct svm_range *prange, uint64_t new_start, } static void -svm_range_add_child(struct svm_range *prange, struct mm_struct *mm, - struct svm_range *pchild, enum svm_work_list_ops op) +svm_range_add_child(struct svm_range *prange, struct svm_range *pchild, enum svm_work_list_ops op) { pr_debug("add child 0x%p [0x%lx 0x%lx] to prange 0x%p child list %d\n", pchild, pchild->start, pchild->last, prange, op); - pchild->work_item.mm = mm; + pchild->work_item.mm = NULL; pchild->work_item.op = op; list_add_tail(&pchild->child_list, &prange->child_list); } @@ -2384,15 +2383,17 @@ svm_range_add_list_work(struct svm_range_list *svms, struct svm_range *prange, prange->work_item.op != SVM_OP_UNMAP_RANGE) prange->work_item.op = op; } else { - prange->work_item.op = op; - - /* Pairs with mmput in deferred_list_work */ - mmget(mm); - prange->work_item.mm = mm; - list_add_tail(&prange->deferred_list, - &prange->svms->deferred_range_list); - pr_debug("add prange 0x%p [0x%lx 0x%lx] to work list op %d\n", - prange, prange->start, prange->last, op); + /* Pairs with mmput in deferred_list_work. + * If process is exiting and mm is gone, don't update mmu notifier. + */ + if (mmget_not_zero(mm)) { + prange->work_item.mm = mm; + prange->work_item.op = op; + list_add_tail(&prange->deferred_list, + &prange->svms->deferred_range_list); + pr_debug("add prange 0x%p [0x%lx 0x%lx] to work list op %d\n", + prange, prange->start, prange->last, op); + } } spin_unlock(&svms->deferred_list_lock); } @@ -2406,8 +2407,7 @@ void schedule_deferred_list_work(struct svm_range_list *svms) } static void -svm_range_unmap_split(struct mm_struct *mm, struct svm_range *parent, - struct svm_range *prange, unsigned long start, +svm_range_unmap_split(struct svm_range *parent, struct svm_range *prange, unsigned long start, unsigned long last) { struct svm_range *head; @@ -2428,12 +2428,12 @@ svm_range_unmap_split(struct mm_struct *mm, struct svm_range *parent, svm_range_split(tail, last + 1, tail->last, &head); if (head != prange && tail != prange) { - svm_range_add_child(parent, mm, head, SVM_OP_UNMAP_RANGE); - svm_range_add_child(parent, mm, tail, SVM_OP_ADD_RANGE); + svm_range_add_child(parent, head, SVM_OP_UNMAP_RANGE); + svm_range_add_child(parent, tail, SVM_OP_ADD_RANGE); } else if (tail != prange) { - svm_range_add_child(parent, mm, tail, SVM_OP_UNMAP_RANGE); + svm_range_add_child(parent, tail, SVM_OP_UNMAP_RANGE); } else if (head != prange) { - svm_range_add_child(parent, mm, head, SVM_OP_UNMAP_RANGE); + svm_range_add_child(parent, head, SVM_OP_UNMAP_RANGE); } else if (parent != prange) { prange->work_item.op = SVM_OP_UNMAP_RANGE; } @@ -2510,14 +2510,14 @@ svm_range_unmap_from_cpu(struct mm_struct *mm, struct svm_range *prange, l = min(last, pchild->last); if (l >= s) svm_range_unmap_from_gpus(pchild, s, l, trigger); - svm_range_unmap_split(mm, prange, pchild, start, last); + svm_range_unmap_split(prange, pchild, start, last); mutex_unlock(&pchild->lock); } s = max(start, prange->start); l = min(last, prange->last); if (l >= s) svm_range_unmap_from_gpus(prange, s, l, trigger); - svm_range_unmap_split(mm, prange, prange, start, last); + svm_range_unmap_split(prange, prange, start, last); if (unmap_parent) svm_range_add_list_work(svms, prange, mm, SVM_OP_UNMAP_RANGE); @@ -2560,8 +2560,6 @@ svm_range_cpu_invalidate_pagetables(struct mmu_interval_notifier *mni, if (range->event == MMU_NOTIFY_RELEASE) return true; - if (!mmget_not_zero(mni->mm)) - return true; start = mni->interval_tree.start; last = mni->interval_tree.last; @@ -2588,7 +2586,6 @@ svm_range_cpu_invalidate_pagetables(struct mmu_interval_notifier *mni, } svm_range_unlock(prange); - mmput(mni->mm); return true; } diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 5f9452b22596a..084d9ed325af6 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -668,21 +668,15 @@ static void dm_crtc_high_irq(void *interrupt_params) spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags); if (acrtc->dm_irq_params.stream && - acrtc->dm_irq_params.vrr_params.supported) { - bool replay_en = acrtc->dm_irq_params.stream->link->replay_settings.replay_feature_enabled; - bool psr_en = acrtc->dm_irq_params.stream->link->psr_settings.psr_feature_enabled; - bool fs_active_var_en = acrtc->dm_irq_params.freesync_config.state == VRR_STATE_ACTIVE_VARIABLE; - + acrtc->dm_irq_params.vrr_params.supported && + acrtc->dm_irq_params.freesync_config.state == + VRR_STATE_ACTIVE_VARIABLE) { mod_freesync_handle_v_update(adev->dm.freesync_module, acrtc->dm_irq_params.stream, &acrtc->dm_irq_params.vrr_params); - /* update vmin_vmax only if freesync is enabled, or only if PSR and REPLAY are disabled */ - if (fs_active_var_en || (!fs_active_var_en && !replay_en && !psr_en)) { - dc_stream_adjust_vmin_vmax(adev->dm.dc, - acrtc->dm_irq_params.stream, - &acrtc->dm_irq_params.vrr_params.adjust); - } + dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream, + &acrtc->dm_irq_params.vrr_params.adjust); } /* diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c index 55014c1521167..f3aa93ddbf9c9 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c @@ -762,6 +762,7 @@ static void populate_dml21_plane_config_from_plane_state(struct dml2_context *dm plane->pixel_format = dml2_420_10; break; case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616: + case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616: case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F: case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F: plane->pixel_format = dml2_444_64; @@ -887,7 +888,7 @@ static void populate_dml21_plane_config_from_plane_state(struct dml2_context *dm } //TODO : Could be possibly moved to a common helper layer. -static bool dml21_wrapper_get_plane_id(const struct dc_state *context, const struct dc_plane_state *plane, unsigned int *plane_id) +static bool dml21_wrapper_get_plane_id(const struct dc_state *context, unsigned int stream_id, const struct dc_plane_state *plane, unsigned int *plane_id) { int i, j; @@ -895,10 +896,12 @@ static bool dml21_wrapper_get_plane_id(const struct dc_state *context, const str return false; for (i = 0; i < context->stream_count; i++) { - for (j = 0; j < context->stream_status[i].plane_count; j++) { - if (context->stream_status[i].plane_states[j] == plane) { - *plane_id = (i << 16) | j; - return true; + if (context->streams[i]->stream_id == stream_id) { + for (j = 0; j < context->stream_status[i].plane_count; j++) { + if (context->stream_status[i].plane_states[j] == plane) { + *plane_id = (i << 16) | j; + return true; + } } } } @@ -921,14 +924,14 @@ static unsigned int map_stream_to_dml21_display_cfg(const struct dml2_context *d return location; } -static unsigned int map_plane_to_dml21_display_cfg(const struct dml2_context *dml_ctx, +static unsigned int map_plane_to_dml21_display_cfg(const struct dml2_context *dml_ctx, unsigned int stream_id, const struct dc_plane_state *plane, const struct dc_state *context) { unsigned int plane_id; int i = 0; int location = -1; - if (!dml21_wrapper_get_plane_id(context, plane, &plane_id)) { + if (!dml21_wrapper_get_plane_id(context, stream_id, plane, &plane_id)) { ASSERT(false); return -1; } @@ -1013,7 +1016,7 @@ bool dml21_map_dc_state_into_dml_display_cfg(const struct dc *in_dc, struct dc_s dml_dispcfg->plane_descriptors[disp_cfg_plane_location].stream_index = disp_cfg_stream_location; } else { for (plane_index = 0; plane_index < context->stream_status[stream_index].plane_count; plane_index++) { - disp_cfg_plane_location = map_plane_to_dml21_display_cfg(dml_ctx, context->stream_status[stream_index].plane_states[plane_index], context); + disp_cfg_plane_location = map_plane_to_dml21_display_cfg(dml_ctx, context->streams[stream_index]->stream_id, context->stream_status[stream_index].plane_states[plane_index], context); if (disp_cfg_plane_location < 0) disp_cfg_plane_location = dml_dispcfg->num_planes++; @@ -1024,7 +1027,7 @@ bool dml21_map_dc_state_into_dml_display_cfg(const struct dc *in_dc, struct dc_s populate_dml21_plane_config_from_plane_state(dml_ctx, &dml_dispcfg->plane_descriptors[disp_cfg_plane_location], context->stream_status[stream_index].plane_states[plane_index], context, stream_index); dml_dispcfg->plane_descriptors[disp_cfg_plane_location].stream_index = disp_cfg_stream_location; - if (dml21_wrapper_get_plane_id(context, context->stream_status[stream_index].plane_states[plane_index], &dml_ctx->v21.dml_to_dc_pipe_mapping.disp_cfg_to_plane_id[disp_cfg_plane_location])) + if (dml21_wrapper_get_plane_id(context, context->streams[stream_index]->stream_id, context->stream_status[stream_index].plane_states[plane_index], &dml_ctx->v21.dml_to_dc_pipe_mapping.disp_cfg_to_plane_id[disp_cfg_plane_location])) dml_ctx->v21.dml_to_dc_pipe_mapping.disp_cfg_to_plane_id_valid[disp_cfg_plane_location] = true; /* apply forced pstate policy */ diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c index 0090b7bc232bf..157903115f3b4 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c @@ -4651,7 +4651,10 @@ static void calculate_tdlut_setting( //the tdlut is fetched during the 2 row times of prefetch. if (p->setup_for_tdlut) { *p->tdlut_groups_per_2row_ub = (unsigned int)math_ceil2((double) *p->tdlut_bytes_per_frame / *p->tdlut_bytes_per_group, 1); - *p->tdlut_opt_time = (*p->tdlut_bytes_per_frame - p->cursor_buffer_size * 1024) / tdlut_drain_rate; + if (*p->tdlut_bytes_per_frame > p->cursor_buffer_size * 1024) + *p->tdlut_opt_time = (*p->tdlut_bytes_per_frame - p->cursor_buffer_size * 1024) / tdlut_drain_rate; + else + *p->tdlut_opt_time = 0; *p->tdlut_drain_time = p->cursor_buffer_size * 1024 / tdlut_drain_rate; } diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c index 92a3fff1e2616..405aefd14d9b4 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c @@ -909,6 +909,7 @@ static void populate_dml_surface_cfg_from_plane_state(enum dml_project_id dml2_p out->SourcePixelFormat[location] = dml_420_10; break; case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616: + case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616: case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F: case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F: out->SourcePixelFormat[location] = dml_444_64; diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c index 59457ca24e1dc..03b22e9115ea8 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c @@ -951,8 +951,8 @@ void dce110_edp_backlight_control( struct dc_context *ctx = link->ctx; struct bp_transmitter_control cntl = { 0 }; uint8_t pwrseq_instance = 0; - unsigned int pre_T11_delay = OLED_PRE_T11_DELAY; - unsigned int post_T7_delay = OLED_POST_T7_DELAY; + unsigned int pre_T11_delay = (link->dpcd_sink_ext_caps.bits.oled ? OLED_PRE_T11_DELAY : 0); + unsigned int post_T7_delay = (link->dpcd_sink_ext_caps.bits.oled ? OLED_POST_T7_DELAY : 0); if (dal_graphics_object_id_get_connector_id(link->link_enc->connector) != CONNECTOR_ID_EDP) { @@ -1067,7 +1067,8 @@ void dce110_edp_backlight_control( if (!enable) { /*follow oem panel config's requirement*/ pre_T11_delay += link->panel_config.pps.extra_pre_t11_ms; - msleep(pre_T11_delay); + if (pre_T11_delay) + msleep(pre_T11_delay); } } @@ -1216,7 +1217,7 @@ void dce110_blank_stream(struct pipe_ctx *pipe_ctx) struct dce_hwseq *hws = link->dc->hwseq; if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP) { - if (!link->skip_implict_edp_power_control) + if (!link->skip_implict_edp_power_control && hws) hws->funcs.edp_backlight_control(link, false); link->dc->hwss.set_abm_immediate_disable(pipe_ctx); } diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c index ca446e08f6a27..21aff7fa6375d 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c @@ -1019,8 +1019,22 @@ void dcn35_calc_blocks_to_gate(struct dc *dc, struct dc_state *context, if (pipe_ctx->plane_res.dpp || pipe_ctx->stream_res.opp) update_state->pg_pipe_res_update[PG_MPCC][pipe_ctx->plane_res.mpcc_inst] = false; - if (pipe_ctx->stream_res.dsc) + if (pipe_ctx->stream_res.dsc) { update_state->pg_pipe_res_update[PG_DSC][pipe_ctx->stream_res.dsc->inst] = false; + if (dc->caps.sequential_ono) { + update_state->pg_pipe_res_update[PG_HUBP][pipe_ctx->stream_res.dsc->inst] = false; + update_state->pg_pipe_res_update[PG_DPP][pipe_ctx->stream_res.dsc->inst] = false; + + /* All HUBP/DPP instances must be powered if the DSC inst != HUBP inst */ + if (!pipe_ctx->top_pipe && pipe_ctx->plane_res.hubp && + pipe_ctx->plane_res.hubp->inst != pipe_ctx->stream_res.dsc->inst) { + for (j = 0; j < dc->res_pool->pipe_count; ++j) { + update_state->pg_pipe_res_update[PG_HUBP][j] = false; + update_state->pg_pipe_res_update[PG_DPP][j] = false; + } + } + } + } if (pipe_ctx->stream_res.opp) update_state->pg_pipe_res_update[PG_OPP][pipe_ctx->stream_res.opp->inst] = false; @@ -1165,6 +1179,25 @@ void dcn35_calc_blocks_to_ungate(struct dc *dc, struct dc_state *context, update_state->pg_pipe_res_update[PG_HDMISTREAM][0] = true; if (dc->caps.sequential_ono) { + for (i = 0; i < dc->res_pool->pipe_count; i++) { + struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i]; + + if (new_pipe->stream_res.dsc && !new_pipe->top_pipe && + update_state->pg_pipe_res_update[PG_DSC][new_pipe->stream_res.dsc->inst]) { + update_state->pg_pipe_res_update[PG_HUBP][new_pipe->stream_res.dsc->inst] = true; + update_state->pg_pipe_res_update[PG_DPP][new_pipe->stream_res.dsc->inst] = true; + + /* All HUBP/DPP instances must be powered if the DSC inst != HUBP inst */ + if (new_pipe->plane_res.hubp && + new_pipe->plane_res.hubp->inst != new_pipe->stream_res.dsc->inst) { + for (j = 0; j < dc->res_pool->pipe_count; ++j) { + update_state->pg_pipe_res_update[PG_HUBP][j] = true; + update_state->pg_pipe_res_update[PG_DPP][j] = true; + } + } + } + } + for (i = dc->res_pool->pipe_count - 1; i >= 0; i--) { if (update_state->pg_pipe_res_update[PG_HUBP][i] && update_state->pg_pipe_res_update[PG_DPP][i]) { diff --git a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c index c4e03482ba9ae..aa28001297675 100644 --- a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c +++ b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c @@ -148,6 +148,7 @@ void link_blank_dp_stream(struct dc_link *link, bool hw_init) void link_set_all_streams_dpms_off_for_link(struct dc_link *link) { struct pipe_ctx *pipes[MAX_PIPES]; + struct dc_stream_state *streams[MAX_PIPES]; struct dc_state *state = link->dc->current_state; uint8_t count; int i; @@ -160,10 +161,18 @@ void link_set_all_streams_dpms_off_for_link(struct dc_link *link) link_get_master_pipes_with_dpms_on(link, state, &count, pipes); + /* The subsequent call to dc_commit_updates_for_stream for a full update + * will release the current state and swap to a new state. Releasing the + * current state results in the stream pointers in the pipe_ctx structs + * to be zero'd. Hence, cache all streams prior to dc_commit_updates_for_stream. + */ + for (i = 0; i < count; i++) + streams[i] = pipes[i]->stream; + for (i = 0; i < count; i++) { - stream_update.stream = pipes[i]->stream; + stream_update.stream = streams[i]; dc_commit_updates_for_stream(link->ctx->dc, NULL, 0, - pipes[i]->stream, &stream_update, + streams[i], &stream_update, state); } diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c index 8c137d7c032e1..e58e7b93810be 100644 --- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c @@ -368,6 +368,9 @@ enum mod_hdcp_status mod_hdcp_hdcp1_enable_encryption(struct mod_hdcp *hdcp) struct mod_hdcp_display *display = get_first_active_display(hdcp); enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; + if (!display) + return MOD_HDCP_STATUS_DISPLAY_NOT_FOUND; + mutex_lock(&psp->hdcp_context.mutex); hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.context.mem_context.shared_buf; memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory)); diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c index 5c54c9fd44619..a76fc15a55f5b 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c @@ -144,6 +144,10 @@ int atomctrl_initialize_mc_reg_table( vram_info = (ATOM_VRAM_INFO_HEADER_V2_1 *) smu_atom_get_data_table(hwmgr->adev, GetIndexIntoMasterTable(DATA, VRAM_Info), &size, &frev, &crev); + if (!vram_info) { + pr_err("Could not retrieve the VramInfo table!"); + return -EINVAL; + } if (module_index >= vram_info->ucNumOfVRAMModule) { pr_err("Invalid VramInfo table."); @@ -181,6 +185,10 @@ int atomctrl_initialize_mc_reg_table_v2_2( vram_info = (ATOM_VRAM_INFO_HEADER_V2_2 *) smu_atom_get_data_table(hwmgr->adev, GetIndexIntoMasterTable(DATA, VRAM_Info), &size, &frev, &crev); + if (!vram_info) { + pr_err("Could not retrieve the VramInfo table!"); + return -EINVAL; + } if (module_index >= vram_info->ucNumOfVRAMModule) { pr_err("Invalid VramInfo table."); diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c index 4f78c84da780c..c5bca3019de07 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c @@ -58,6 +58,7 @@ MODULE_FIRMWARE("amdgpu/aldebaran_smc.bin"); MODULE_FIRMWARE("amdgpu/smu_13_0_0.bin"); +MODULE_FIRMWARE("amdgpu/smu_13_0_0_kicker.bin"); MODULE_FIRMWARE("amdgpu/smu_13_0_7.bin"); MODULE_FIRMWARE("amdgpu/smu_13_0_10.bin"); @@ -92,7 +93,7 @@ const int pmfw_decoded_link_width[7] = {0, 1, 2, 4, 8, 12, 16}; int smu_v13_0_init_microcode(struct smu_context *smu) { struct amdgpu_device *adev = smu->adev; - char ucode_prefix[15]; + char ucode_prefix[30]; int err = 0; const struct smc_firmware_header_v1_0 *hdr; const struct common_firmware_header *header; @@ -103,7 +104,10 @@ int smu_v13_0_init_microcode(struct smu_context *smu) return 0; amdgpu_ucode_ip_version_decode(adev, MP1_HWIP, ucode_prefix, sizeof(ucode_prefix)); - err = amdgpu_ucode_request(adev, &adev->pm.fw, "amdgpu/%s.bin", ucode_prefix); + if (amdgpu_is_kicker_fw(adev)) + err = amdgpu_ucode_request(adev, &adev->pm.fw, "amdgpu/%s_kicker.bin", ucode_prefix); + else + err = amdgpu_ucode_request(adev, &adev->pm.fw, "amdgpu/%s.bin", ucode_prefix); if (err) goto out; diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c index 24ed1cd3caf17..162dc0698f4ac 100644 --- a/drivers/gpu/drm/ast/ast_mode.c +++ b/drivers/gpu/drm/ast/ast_mode.c @@ -1319,9 +1319,9 @@ static void ast_mode_config_helper_atomic_commit_tail(struct drm_atomic_state *s /* * Concurrent operations could possibly trigger a call to - * drm_connector_helper_funcs.get_modes by trying to read the - * display modes. Protect access to I/O registers by acquiring - * the I/O-register lock. Released in atomic_flush(). + * drm_connector_helper_funcs.get_modes by reading the display + * modes. Protect access to registers by acquiring the modeset + * lock. */ mutex_lock(&ast->modeset_lock); drm_atomic_helper_commit_tail(state); diff --git a/drivers/gpu/drm/bridge/aux-hpd-bridge.c b/drivers/gpu/drm/bridge/aux-hpd-bridge.c index 6886db2d9e00c..8e889a38fad00 100644 --- a/drivers/gpu/drm/bridge/aux-hpd-bridge.c +++ b/drivers/gpu/drm/bridge/aux-hpd-bridge.c @@ -64,10 +64,11 @@ struct auxiliary_device *devm_drm_dp_hpd_bridge_alloc(struct device *parent, str adev->id = ret; adev->name = "dp_hpd_bridge"; adev->dev.parent = parent; - adev->dev.of_node = of_node_get(parent->of_node); adev->dev.release = drm_aux_hpd_bridge_release; adev->dev.platform_data = of_node_get(np); + device_set_of_node_from_dev(&adev->dev, parent); + ret = auxiliary_device_init(adev); if (ret) { of_node_put(adev->dev.platform_data); diff --git a/drivers/gpu/drm/bridge/cadence/cdns-dsi-core.c b/drivers/gpu/drm/bridge/cadence/cdns-dsi-core.c index 7457d38622b0c..89eed0668bfb2 100644 --- a/drivers/gpu/drm/bridge/cadence/cdns-dsi-core.c +++ b/drivers/gpu/drm/bridge/cadence/cdns-dsi-core.c @@ -568,15 +568,18 @@ static int cdns_dsi_check_conf(struct cdns_dsi *dsi, struct phy_configure_opts_mipi_dphy *phy_cfg = &output->phy_opts.mipi_dphy; unsigned long dsi_hss_hsa_hse_hbp; unsigned int nlanes = output->dev->lanes; + int mode_clock = (mode_valid_check ? mode->clock : mode->crtc_clock); int ret; ret = cdns_dsi_mode2cfg(dsi, mode, dsi_cfg, mode_valid_check); if (ret) return ret; - phy_mipi_dphy_get_default_config(mode->crtc_clock * 1000, - mipi_dsi_pixel_format_to_bpp(output->dev->format), - nlanes, phy_cfg); + ret = phy_mipi_dphy_get_default_config(mode_clock * 1000, + mipi_dsi_pixel_format_to_bpp(output->dev->format), + nlanes, phy_cfg); + if (ret) + return ret; ret = cdns_dsi_adjust_phy_config(dsi, dsi_cfg, phy_cfg, mode, mode_valid_check); if (ret) @@ -680,6 +683,11 @@ static void cdns_dsi_bridge_post_disable(struct drm_bridge *bridge) struct cdns_dsi_input *input = bridge_to_cdns_dsi_input(bridge); struct cdns_dsi *dsi = input_to_dsi(input); + dsi->phy_initialized = false; + dsi->link_initialized = false; + phy_power_off(dsi->dphy); + phy_exit(dsi->dphy); + pm_runtime_put(dsi->base.dev); } @@ -761,7 +769,7 @@ static void cdns_dsi_bridge_enable(struct drm_bridge *bridge) struct phy_configure_opts_mipi_dphy *phy_cfg = &output->phy_opts.mipi_dphy; unsigned long tx_byte_period; struct cdns_dsi_cfg dsi_cfg; - u32 tmp, reg_wakeup, div; + u32 tmp, reg_wakeup, div, status; int nlanes; if (WARN_ON(pm_runtime_get_sync(dsi->base.dev) < 0)) @@ -778,6 +786,19 @@ static void cdns_dsi_bridge_enable(struct drm_bridge *bridge) cdns_dsi_hs_init(dsi); cdns_dsi_init_link(dsi); + /* + * Now that the DSI Link and DSI Phy are initialized, + * wait for the CLK and Data Lanes to be ready. + */ + tmp = CLK_LANE_RDY; + for (int i = 0; i < nlanes; i++) + tmp |= DATA_LANE_RDY(i); + + if (readl_poll_timeout(dsi->regs + MCTL_MAIN_STS, status, + (tmp == (status & tmp)), 100, 500000)) + dev_err(dsi->base.dev, + "Timed Out: DSI-DPhy Clock and Data Lanes not ready.\n"); + writel(HBP_LEN(dsi_cfg.hbp) | HSA_LEN(dsi_cfg.hsa), dsi->regs + VID_HSIZE1); writel(HFP_LEN(dsi_cfg.hfp) | HACT_LEN(dsi_cfg.hact), @@ -952,7 +973,7 @@ static int cdns_dsi_attach(struct mipi_dsi_host *host, bridge = drm_panel_bridge_add_typed(panel, DRM_MODE_CONNECTOR_DSI); } else { - bridge = of_drm_find_bridge(dev->dev.of_node); + bridge = of_drm_find_bridge(np); if (!bridge) bridge = ERR_PTR(-EINVAL); } @@ -1152,7 +1173,6 @@ static int __maybe_unused cdns_dsi_suspend(struct device *dev) clk_disable_unprepare(dsi->dsi_sys_clk); clk_disable_unprepare(dsi->dsi_p_clk); reset_control_assert(dsi->dsi_p_rst); - dsi->link_initialized = false; return 0; } diff --git a/drivers/gpu/drm/bridge/lontium-lt9611uxc.c b/drivers/gpu/drm/bridge/lontium-lt9611uxc.c index 4d1d40e1f1b4d..748bed8acd2d9 100644 --- a/drivers/gpu/drm/bridge/lontium-lt9611uxc.c +++ b/drivers/gpu/drm/bridge/lontium-lt9611uxc.c @@ -879,7 +879,11 @@ static int lt9611uxc_probe(struct i2c_client *client) } } - return lt9611uxc_audio_init(dev, lt9611uxc); + ret = lt9611uxc_audio_init(dev, lt9611uxc); + if (ret) + goto err_remove_bridge; + + return 0; err_remove_bridge: free_irq(client->irq, lt9611uxc); diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi86.c b/drivers/gpu/drm/bridge/ti-sn65dsi86.c index 95ce50ed53acf..5500767cda7e4 100644 --- a/drivers/gpu/drm/bridge/ti-sn65dsi86.c +++ b/drivers/gpu/drm/bridge/ti-sn65dsi86.c @@ -331,12 +331,18 @@ static void ti_sn65dsi86_enable_comms(struct ti_sn65dsi86 *pdata) * 200 ms. We'll assume that the panel driver will have the hardcoded * delay in its prepare and always disable HPD. * - * If HPD somehow makes sense on some future panel we'll have to - * change this to be conditional on someone specifying that HPD should - * be used. + * For DisplayPort bridge type, we need HPD. So we use the bridge type + * to conditionally disable HPD. + * NOTE: The bridge type is set in ti_sn_bridge_probe() but enable_comms() + * can be called before. So for DisplayPort, HPD will be enabled once + * bridge type is set. We are using bridge type instead of "no-hpd" + * property because it is not used properly in devicetree description + * and hence is unreliable. */ - regmap_update_bits(pdata->regmap, SN_HPD_DISABLE_REG, HPD_DISABLE, - HPD_DISABLE); + + if (pdata->bridge.type != DRM_MODE_CONNECTOR_DisplayPort) + regmap_update_bits(pdata->regmap, SN_HPD_DISABLE_REG, HPD_DISABLE, + HPD_DISABLE); pdata->comms_enabled = true; @@ -424,36 +430,8 @@ static int status_show(struct seq_file *s, void *data) return 0; } - DEFINE_SHOW_ATTRIBUTE(status); -static void ti_sn65dsi86_debugfs_remove(void *data) -{ - debugfs_remove_recursive(data); -} - -static void ti_sn65dsi86_debugfs_init(struct ti_sn65dsi86 *pdata) -{ - struct device *dev = pdata->dev; - struct dentry *debugfs; - int ret; - - debugfs = debugfs_create_dir(dev_name(dev), NULL); - - /* - * We might get an error back if debugfs wasn't enabled in the kernel - * so let's just silently return upon failure. - */ - if (IS_ERR_OR_NULL(debugfs)) - return; - - ret = devm_add_action_or_reset(dev, ti_sn65dsi86_debugfs_remove, debugfs); - if (ret) - return; - - debugfs_create_file("status", 0600, debugfs, pdata, &status_fops); -} - /* ----------------------------------------------------------------------------- * Auxiliary Devices (*not* AUX) */ @@ -1201,9 +1179,14 @@ static enum drm_connector_status ti_sn_bridge_detect(struct drm_bridge *bridge) struct ti_sn65dsi86 *pdata = bridge_to_ti_sn65dsi86(bridge); int val = 0; - pm_runtime_get_sync(pdata->dev); + /* + * Runtime reference is grabbed in ti_sn_bridge_hpd_enable() + * as the chip won't report HPD just after being powered on. + * HPD_DEBOUNCED_STATE reflects correct state only after the + * debounce time (~100-400 ms). + */ + regmap_read(pdata->regmap, SN_HPD_DISABLE_REG, &val); - pm_runtime_put_autosuspend(pdata->dev); return val & HPD_DEBOUNCED_STATE ? connector_status_connected : connector_status_disconnected; @@ -1217,6 +1200,35 @@ static const struct drm_edid *ti_sn_bridge_edid_read(struct drm_bridge *bridge, return drm_edid_read_ddc(connector, &pdata->aux.ddc); } +static void ti_sn65dsi86_debugfs_init(struct drm_bridge *bridge, struct dentry *root) +{ + struct ti_sn65dsi86 *pdata = bridge_to_ti_sn65dsi86(bridge); + struct dentry *debugfs; + + debugfs = debugfs_create_dir(dev_name(pdata->dev), root); + debugfs_create_file("status", 0600, debugfs, pdata, &status_fops); +} + +static void ti_sn_bridge_hpd_enable(struct drm_bridge *bridge) +{ + struct ti_sn65dsi86 *pdata = bridge_to_ti_sn65dsi86(bridge); + + /* + * Device needs to be powered on before reading the HPD state + * for reliable hpd detection in ti_sn_bridge_detect() due to + * the high debounce time. + */ + + pm_runtime_get_sync(pdata->dev); +} + +static void ti_sn_bridge_hpd_disable(struct drm_bridge *bridge) +{ + struct ti_sn65dsi86 *pdata = bridge_to_ti_sn65dsi86(bridge); + + pm_runtime_put_autosuspend(pdata->dev); +} + static const struct drm_bridge_funcs ti_sn_bridge_funcs = { .attach = ti_sn_bridge_attach, .detach = ti_sn_bridge_detach, @@ -1230,6 +1242,9 @@ static const struct drm_bridge_funcs ti_sn_bridge_funcs = { .atomic_reset = drm_atomic_helper_bridge_reset, .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state, .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state, + .debugfs_init = ti_sn65dsi86_debugfs_init, + .hpd_enable = ti_sn_bridge_hpd_enable, + .hpd_disable = ti_sn_bridge_hpd_disable, }; static void ti_sn_bridge_parse_lanes(struct ti_sn65dsi86 *pdata, @@ -1318,8 +1333,26 @@ static int ti_sn_bridge_probe(struct auxiliary_device *adev, pdata->bridge.type = pdata->next_bridge->type == DRM_MODE_CONNECTOR_DisplayPort ? DRM_MODE_CONNECTOR_DisplayPort : DRM_MODE_CONNECTOR_eDP; - if (pdata->bridge.type == DRM_MODE_CONNECTOR_DisplayPort) - pdata->bridge.ops = DRM_BRIDGE_OP_EDID | DRM_BRIDGE_OP_DETECT; + if (pdata->bridge.type == DRM_MODE_CONNECTOR_DisplayPort) { + pdata->bridge.ops = DRM_BRIDGE_OP_EDID | DRM_BRIDGE_OP_DETECT | + DRM_BRIDGE_OP_HPD; + /* + * If comms were already enabled they would have been enabled + * with the wrong value of HPD_DISABLE. Update it now. Comms + * could be enabled if anyone is holding a pm_runtime reference + * (like if a GPIO is in use). Note that in most cases nobody + * is doing AUX channel xfers before the bridge is added so + * HPD doesn't _really_ matter then. The only exception is in + * the eDP case where the panel wants to read the EDID before + * the bridge is added. We always consistently have HPD disabled + * for eDP. + */ + mutex_lock(&pdata->comms_mutex); + if (pdata->comms_enabled) + regmap_update_bits(pdata->regmap, SN_HPD_DISABLE_REG, + HPD_DISABLE, 0); + mutex_unlock(&pdata->comms_mutex); + }; drm_bridge_add(&pdata->bridge); @@ -1938,8 +1971,6 @@ static int ti_sn65dsi86_probe(struct i2c_client *client) if (ret) return ret; - ti_sn65dsi86_debugfs_init(pdata); - /* * Break ourselves up into a collection of aux devices. The only real * motiviation here is to solve the chicken-and-egg problem of probe diff --git a/drivers/gpu/drm/drm_fbdev_dma.c b/drivers/gpu/drm/drm_fbdev_dma.c index 7c8287c18e381..6fcf2a8bf6762 100644 --- a/drivers/gpu/drm/drm_fbdev_dma.c +++ b/drivers/gpu/drm/drm_fbdev_dma.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: MIT #include +#include #include #include @@ -72,43 +73,108 @@ static const struct fb_ops drm_fbdev_dma_fb_ops = { .fb_destroy = drm_fbdev_dma_fb_destroy, }; -FB_GEN_DEFAULT_DEFERRED_DMAMEM_OPS(drm_fbdev_dma, +FB_GEN_DEFAULT_DEFERRED_DMAMEM_OPS(drm_fbdev_dma_shadowed, drm_fb_helper_damage_range, drm_fb_helper_damage_area); -static int drm_fbdev_dma_deferred_fb_mmap(struct fb_info *info, struct vm_area_struct *vma) +static void drm_fbdev_dma_shadowed_fb_destroy(struct fb_info *info) { struct drm_fb_helper *fb_helper = info->par; - struct drm_framebuffer *fb = fb_helper->fb; - struct drm_gem_dma_object *dma = drm_fb_dma_get_gem_obj(fb, 0); + void *shadow = info->screen_buffer; + + if (!fb_helper->dev) + return; - if (!dma->map_noncoherent) - vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); + if (info->fbdefio) + fb_deferred_io_cleanup(info); + drm_fb_helper_fini(fb_helper); + vfree(shadow); - return fb_deferred_io_mmap(info, vma); + drm_client_buffer_vunmap(fb_helper->buffer); + drm_client_framebuffer_delete(fb_helper->buffer); + drm_client_release(&fb_helper->client); + drm_fb_helper_unprepare(fb_helper); + kfree(fb_helper); } -static const struct fb_ops drm_fbdev_dma_deferred_fb_ops = { +static const struct fb_ops drm_fbdev_dma_shadowed_fb_ops = { .owner = THIS_MODULE, .fb_open = drm_fbdev_dma_fb_open, .fb_release = drm_fbdev_dma_fb_release, - __FB_DEFAULT_DEFERRED_OPS_RDWR(drm_fbdev_dma), + FB_DEFAULT_DEFERRED_OPS(drm_fbdev_dma_shadowed), DRM_FB_HELPER_DEFAULT_OPS, - __FB_DEFAULT_DEFERRED_OPS_DRAW(drm_fbdev_dma), - .fb_mmap = drm_fbdev_dma_deferred_fb_mmap, - .fb_destroy = drm_fbdev_dma_fb_destroy, + .fb_destroy = drm_fbdev_dma_shadowed_fb_destroy, }; /* * struct drm_fb_helper */ +static void drm_fbdev_dma_damage_blit_real(struct drm_fb_helper *fb_helper, + struct drm_clip_rect *clip, + struct iosys_map *dst) +{ + struct drm_framebuffer *fb = fb_helper->fb; + size_t offset = clip->y1 * fb->pitches[0]; + size_t len = clip->x2 - clip->x1; + unsigned int y; + void *src; + + switch (drm_format_info_bpp(fb->format, 0)) { + case 1: + offset += clip->x1 / 8; + len = DIV_ROUND_UP(len + clip->x1 % 8, 8); + break; + case 2: + offset += clip->x1 / 4; + len = DIV_ROUND_UP(len + clip->x1 % 4, 4); + break; + case 4: + offset += clip->x1 / 2; + len = DIV_ROUND_UP(len + clip->x1 % 2, 2); + break; + default: + offset += clip->x1 * fb->format->cpp[0]; + len *= fb->format->cpp[0]; + break; + } + + src = fb_helper->info->screen_buffer + offset; + iosys_map_incr(dst, offset); /* go to first pixel within clip rect */ + + for (y = clip->y1; y < clip->y2; y++) { + iosys_map_memcpy_to(dst, 0, src, len); + iosys_map_incr(dst, fb->pitches[0]); + src += fb->pitches[0]; + } +} + +static int drm_fbdev_dma_damage_blit(struct drm_fb_helper *fb_helper, + struct drm_clip_rect *clip) +{ + struct drm_client_buffer *buffer = fb_helper->buffer; + struct iosys_map dst; + + /* + * For fbdev emulation, we only have to protect against fbdev modeset + * operations. Nothing else will involve the client buffer's BO. So it + * is sufficient to acquire struct drm_fb_helper.lock here. + */ + mutex_lock(&fb_helper->lock); + + dst = buffer->map; + drm_fbdev_dma_damage_blit_real(fb_helper, clip, &dst); + + mutex_unlock(&fb_helper->lock); + + return 0; +} + static int drm_fbdev_dma_helper_fb_probe(struct drm_fb_helper *fb_helper, struct drm_fb_helper_surface_size *sizes) { return drm_fbdev_dma_driver_fbdev_probe(fb_helper, sizes); } - static int drm_fbdev_dma_helper_fb_dirty(struct drm_fb_helper *helper, struct drm_clip_rect *clip) { @@ -120,6 +186,10 @@ static int drm_fbdev_dma_helper_fb_dirty(struct drm_fb_helper *helper, return 0; if (helper->fb->funcs->dirty) { + ret = drm_fbdev_dma_damage_blit(helper, clip); + if (drm_WARN_ONCE(dev, ret, "Damage blitter failed: ret=%d\n", ret)) + return ret; + ret = helper->fb->funcs->dirty(helper->fb, NULL, 0, 0, clip, 1); if (drm_WARN_ONCE(dev, ret, "Dirty helper failed: ret=%d\n", ret)) return ret; @@ -137,14 +207,80 @@ static const struct drm_fb_helper_funcs drm_fbdev_dma_helper_funcs = { * struct drm_fb_helper */ +static int drm_fbdev_dma_driver_fbdev_probe_tail(struct drm_fb_helper *fb_helper, + struct drm_fb_helper_surface_size *sizes) +{ + struct drm_device *dev = fb_helper->dev; + struct drm_client_buffer *buffer = fb_helper->buffer; + struct drm_gem_dma_object *dma_obj = to_drm_gem_dma_obj(buffer->gem); + struct drm_framebuffer *fb = fb_helper->fb; + struct fb_info *info = fb_helper->info; + struct iosys_map map = buffer->map; + + info->fbops = &drm_fbdev_dma_fb_ops; + + /* screen */ + info->flags |= FBINFO_VIRTFB; /* system memory */ + if (dma_obj->map_noncoherent) + info->flags |= FBINFO_READS_FAST; /* signal caching */ + info->screen_size = sizes->surface_height * fb->pitches[0]; + info->screen_buffer = map.vaddr; + if (!(info->flags & FBINFO_HIDE_SMEM_START)) { + if (!drm_WARN_ON(dev, is_vmalloc_addr(info->screen_buffer))) + info->fix.smem_start = page_to_phys(virt_to_page(info->screen_buffer)); + } + info->fix.smem_len = info->screen_size; + + return 0; +} + +static int drm_fbdev_dma_driver_fbdev_probe_tail_shadowed(struct drm_fb_helper *fb_helper, + struct drm_fb_helper_surface_size *sizes) +{ + struct drm_client_buffer *buffer = fb_helper->buffer; + struct fb_info *info = fb_helper->info; + size_t screen_size = buffer->gem->size; + void *screen_buffer; + int ret; + + /* + * Deferred I/O requires struct page for framebuffer memory, + * which is not guaranteed for all DMA ranges. We thus create + * a shadow buffer in system memory. + */ + screen_buffer = vzalloc(screen_size); + if (!screen_buffer) + return -ENOMEM; + + info->fbops = &drm_fbdev_dma_shadowed_fb_ops; + + /* screen */ + info->flags |= FBINFO_VIRTFB; /* system memory */ + info->flags |= FBINFO_READS_FAST; /* signal caching */ + info->screen_buffer = screen_buffer; + info->fix.smem_len = screen_size; + + fb_helper->fbdefio.delay = HZ / 20; + fb_helper->fbdefio.deferred_io = drm_fb_helper_deferred_io; + + info->fbdefio = &fb_helper->fbdefio; + ret = fb_deferred_io_init(info); + if (ret) + goto err_vfree; + + return 0; + +err_vfree: + vfree(screen_buffer); + return ret; +} + int drm_fbdev_dma_driver_fbdev_probe(struct drm_fb_helper *fb_helper, struct drm_fb_helper_surface_size *sizes) { struct drm_client_dev *client = &fb_helper->client; struct drm_device *dev = fb_helper->dev; - bool use_deferred_io = false; struct drm_client_buffer *buffer; - struct drm_gem_dma_object *dma_obj; struct drm_framebuffer *fb; struct fb_info *info; u32 format; @@ -161,19 +297,9 @@ int drm_fbdev_dma_driver_fbdev_probe(struct drm_fb_helper *fb_helper, sizes->surface_height, format); if (IS_ERR(buffer)) return PTR_ERR(buffer); - dma_obj = to_drm_gem_dma_obj(buffer->gem); fb = buffer->fb; - /* - * Deferred I/O requires struct page for framebuffer memory, - * which is not guaranteed for all DMA ranges. We thus only - * install deferred I/O if we have a framebuffer that requires - * it. - */ - if (fb->funcs->dirty) - use_deferred_io = true; - ret = drm_client_buffer_vmap(buffer, &map); if (ret) { goto err_drm_client_buffer_delete; @@ -194,45 +320,12 @@ int drm_fbdev_dma_driver_fbdev_probe(struct drm_fb_helper *fb_helper, drm_fb_helper_fill_info(info, fb_helper, sizes); - if (use_deferred_io) - info->fbops = &drm_fbdev_dma_deferred_fb_ops; + if (fb->funcs->dirty) + ret = drm_fbdev_dma_driver_fbdev_probe_tail_shadowed(fb_helper, sizes); else - info->fbops = &drm_fbdev_dma_fb_ops; - - /* screen */ - info->flags |= FBINFO_VIRTFB; /* system memory */ - if (dma_obj->map_noncoherent) - info->flags |= FBINFO_READS_FAST; /* signal caching */ - info->screen_size = sizes->surface_height * fb->pitches[0]; - info->screen_buffer = map.vaddr; - if (!(info->flags & FBINFO_HIDE_SMEM_START)) { - if (!drm_WARN_ON(dev, is_vmalloc_addr(info->screen_buffer))) - info->fix.smem_start = page_to_phys(virt_to_page(info->screen_buffer)); - } - info->fix.smem_len = info->screen_size; - - /* - * Only set up deferred I/O if the screen buffer supports - * it. If this disagrees with the previous test for ->dirty, - * mmap on the /dev/fb file might not work correctly. - */ - if (!is_vmalloc_addr(info->screen_buffer) && info->fix.smem_start) { - unsigned long pfn = info->fix.smem_start >> PAGE_SHIFT; - - if (drm_WARN_ON(dev, !pfn_to_page(pfn))) - use_deferred_io = false; - } - - /* deferred I/O */ - if (use_deferred_io) { - fb_helper->fbdefio.delay = HZ / 20; - fb_helper->fbdefio.deferred_io = drm_fb_helper_deferred_io; - - info->fbdefio = &fb_helper->fbdefio; - ret = fb_deferred_io_init(info); - if (ret) - goto err_drm_fb_helper_release_info; - } + ret = drm_fbdev_dma_driver_fbdev_probe_tail(fb_helper, sizes); + if (ret) + goto err_drm_fb_helper_release_info; return 0; diff --git a/drivers/gpu/drm/drm_framebuffer.c b/drivers/gpu/drm/drm_framebuffer.c index 888aadb6a4acb..d6550b54fac16 100644 --- a/drivers/gpu/drm/drm_framebuffer.c +++ b/drivers/gpu/drm/drm_framebuffer.c @@ -860,11 +860,23 @@ void drm_framebuffer_free(struct kref *kref) int drm_framebuffer_init(struct drm_device *dev, struct drm_framebuffer *fb, const struct drm_framebuffer_funcs *funcs) { + unsigned int i; int ret; + bool exists; if (WARN_ON_ONCE(fb->dev != dev || !fb->format)) return -EINVAL; + for (i = 0; i < fb->format->num_planes; i++) { + if (drm_WARN_ON_ONCE(dev, fb->internal_flags & DRM_FRAMEBUFFER_HAS_HANDLE_REF(i))) + fb->internal_flags &= ~DRM_FRAMEBUFFER_HAS_HANDLE_REF(i); + if (fb->obj[i]) { + exists = drm_gem_object_handle_get_if_exists_unlocked(fb->obj[i]); + if (exists) + fb->internal_flags |= DRM_FRAMEBUFFER_HAS_HANDLE_REF(i); + } + } + INIT_LIST_HEAD(&fb->filp_head); fb->funcs = funcs; @@ -873,7 +885,7 @@ int drm_framebuffer_init(struct drm_device *dev, struct drm_framebuffer *fb, ret = __drm_mode_object_add(dev, &fb->base, DRM_MODE_OBJECT_FB, false, drm_framebuffer_free); if (ret) - goto out; + goto err; mutex_lock(&dev->mode_config.fb_lock); dev->mode_config.num_fb++; @@ -881,7 +893,16 @@ int drm_framebuffer_init(struct drm_device *dev, struct drm_framebuffer *fb, mutex_unlock(&dev->mode_config.fb_lock); drm_mode_object_register(dev, &fb->base); -out: + + return 0; + +err: + for (i = 0; i < fb->format->num_planes; i++) { + if (fb->internal_flags & DRM_FRAMEBUFFER_HAS_HANDLE_REF(i)) { + drm_gem_object_handle_put_unlocked(fb->obj[i]); + fb->internal_flags &= ~DRM_FRAMEBUFFER_HAS_HANDLE_REF(i); + } + } return ret; } EXPORT_SYMBOL(drm_framebuffer_init); @@ -958,6 +979,12 @@ EXPORT_SYMBOL(drm_framebuffer_unregister_private); void drm_framebuffer_cleanup(struct drm_framebuffer *fb) { struct drm_device *dev = fb->dev; + unsigned int i; + + for (i = 0; i < fb->format->num_planes; i++) { + if (fb->internal_flags & DRM_FRAMEBUFFER_HAS_HANDLE_REF(i)) + drm_gem_object_handle_put_unlocked(fb->obj[i]); + } mutex_lock(&dev->mode_config.fb_lock); list_del(&fb->head); diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index 426d0867882df..9e8a4da313a0e 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c @@ -186,6 +186,46 @@ void drm_gem_private_object_fini(struct drm_gem_object *obj) } EXPORT_SYMBOL(drm_gem_private_object_fini); +static void drm_gem_object_handle_get(struct drm_gem_object *obj) +{ + struct drm_device *dev = obj->dev; + + drm_WARN_ON(dev, !mutex_is_locked(&dev->object_name_lock)); + + if (obj->handle_count++ == 0) + drm_gem_object_get(obj); +} + +/** + * drm_gem_object_handle_get_if_exists_unlocked - acquire reference on user-space handle, if any + * @obj: GEM object + * + * Acquires a reference on the GEM buffer object's handle. Required to keep + * the GEM object alive. Call drm_gem_object_handle_put_if_exists_unlocked() + * to release the reference. Does nothing if the buffer object has no handle. + * + * Returns: + * True if a handle exists, or false otherwise + */ +bool drm_gem_object_handle_get_if_exists_unlocked(struct drm_gem_object *obj) +{ + struct drm_device *dev = obj->dev; + + guard(mutex)(&dev->object_name_lock); + + /* + * First ref taken during GEM object creation, if any. Some + * drivers set up internal framebuffers with GEM objects that + * do not have a GEM handle. Hence, this counter can be zero. + */ + if (!obj->handle_count) + return false; + + drm_gem_object_handle_get(obj); + + return true; +} + /** * drm_gem_object_handle_free - release resources bound to userspace handles * @obj: GEM object to clean up. @@ -216,20 +256,26 @@ static void drm_gem_object_exported_dma_buf_free(struct drm_gem_object *obj) } } -static void -drm_gem_object_handle_put_unlocked(struct drm_gem_object *obj) +/** + * drm_gem_object_handle_put_unlocked - releases reference on user-space handle + * @obj: GEM object + * + * Releases a reference on the GEM buffer object's handle. Possibly releases + * the GEM buffer object and associated dma-buf objects. + */ +void drm_gem_object_handle_put_unlocked(struct drm_gem_object *obj) { struct drm_device *dev = obj->dev; bool final = false; - if (WARN_ON(READ_ONCE(obj->handle_count) == 0)) + if (drm_WARN_ON(dev, READ_ONCE(obj->handle_count) == 0)) return; /* - * Must bump handle count first as this may be the last - * ref, in which case the object would disappear before we - * checked for a name - */ + * Must bump handle count first as this may be the last + * ref, in which case the object would disappear before + * we checked for a name. + */ mutex_lock(&dev->object_name_lock); if (--obj->handle_count == 0) { @@ -253,6 +299,9 @@ drm_gem_object_release_handle(int id, void *ptr, void *data) struct drm_file *file_priv = data; struct drm_gem_object *obj = ptr; + if (drm_WARN_ON(obj->dev, !data)) + return 0; + if (obj->funcs->close) obj->funcs->close(obj, file_priv); @@ -363,8 +412,8 @@ drm_gem_handle_create_tail(struct drm_file *file_priv, int ret; WARN_ON(!mutex_is_locked(&dev->object_name_lock)); - if (obj->handle_count++ == 0) - drm_gem_object_get(obj); + + drm_gem_object_handle_get(obj); /* * Get the user-visible handle using idr. Preload and perform @@ -373,7 +422,7 @@ drm_gem_handle_create_tail(struct drm_file *file_priv, idr_preload(GFP_KERNEL); spin_lock(&file_priv->table_lock); - ret = idr_alloc(&file_priv->object_idr, obj, 1, 0, GFP_NOWAIT); + ret = idr_alloc(&file_priv->object_idr, NULL, 1, 0, GFP_NOWAIT); spin_unlock(&file_priv->table_lock); idr_preload_end(); @@ -394,6 +443,11 @@ drm_gem_handle_create_tail(struct drm_file *file_priv, goto err_revoke; } + /* mirrors drm_gem_handle_delete to avoid races */ + spin_lock(&file_priv->table_lock); + obj = idr_replace(&file_priv->object_idr, obj, handle); + WARN_ON(obj != NULL); + spin_unlock(&file_priv->table_lock); *handlep = handle; return 0; diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h index 1705bfc90b1e7..98b73c581c426 100644 --- a/drivers/gpu/drm/drm_internal.h +++ b/drivers/gpu/drm/drm_internal.h @@ -153,6 +153,8 @@ void drm_sysfs_lease_event(struct drm_device *dev); /* drm_gem.c */ int drm_gem_init(struct drm_device *dev); +bool drm_gem_object_handle_get_if_exists_unlocked(struct drm_gem_object *obj); +void drm_gem_object_handle_put_unlocked(struct drm_gem_object *obj); int drm_gem_handle_create_tail(struct drm_file *file_priv, struct drm_gem_object *obj, u32 *handlep); diff --git a/drivers/gpu/drm/etnaviv/etnaviv_sched.c b/drivers/gpu/drm/etnaviv/etnaviv_sched.c index ab9ca4824b62e..e60288af35027 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_sched.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_sched.c @@ -34,6 +34,7 @@ static enum drm_gpu_sched_stat etnaviv_sched_timedout_job(struct drm_sched_job *sched_job) { struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job); + struct drm_gpu_scheduler *sched = sched_job->sched; struct etnaviv_gpu *gpu = submit->gpu; u32 dma_addr; int change; @@ -76,7 +77,9 @@ static enum drm_gpu_sched_stat etnaviv_sched_timedout_job(struct drm_sched_job return DRM_GPU_SCHED_STAT_NOMINAL; out_no_timeout: - list_add(&sched_job->list, &sched_job->sched->pending_list); + spin_lock(&sched->job_list_lock); + list_add(&sched_job->list, &sched->pending_list); + spin_unlock(&sched->job_list_lock); return DRM_GPU_SCHED_STAT_NOMINAL; } diff --git a/drivers/gpu/drm/exynos/exynos7_drm_decon.c b/drivers/gpu/drm/exynos/exynos7_drm_decon.c index 0d185c0564b91..9eeba254cf45d 100644 --- a/drivers/gpu/drm/exynos/exynos7_drm_decon.c +++ b/drivers/gpu/drm/exynos/exynos7_drm_decon.c @@ -601,6 +601,10 @@ static irqreturn_t decon_irq_handler(int irq, void *dev_id) if (!ctx->drm_dev) goto out; + /* check if crtc and vblank have been initialized properly */ + if (!drm_dev_has_vblank(ctx->drm_dev)) + goto out; + if (!ctx->i80_if) { drm_crtc_handle_vblank(&ctx->crtc->base); diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c index f57df8c481391..05e4a5a63f5d8 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c @@ -187,6 +187,7 @@ struct fimd_context { u32 i80ifcon; bool i80_if; bool suspended; + bool dp_clk_enabled; wait_queue_head_t wait_vsync_queue; atomic_t wait_vsync_event; atomic_t win_updated; @@ -1047,7 +1048,18 @@ static void fimd_dp_clock_enable(struct exynos_drm_clk *clk, bool enable) struct fimd_context *ctx = container_of(clk, struct fimd_context, dp_clk); u32 val = enable ? DP_MIE_CLK_DP_ENABLE : DP_MIE_CLK_DISABLE; + + if (enable == ctx->dp_clk_enabled) + return; + + if (enable) + pm_runtime_resume_and_get(ctx->dev); + + ctx->dp_clk_enabled = enable; writel(val, ctx->regs + DP_MIE_CLKCON); + + if (!enable) + pm_runtime_put(ctx->dev); } static const struct exynos_drm_crtc_ops fimd_crtc_ops = { diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c index 45cca965c11b4..ca9e0c730013d 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.c +++ b/drivers/gpu/drm/i915/display/intel_dp.c @@ -4300,6 +4300,24 @@ intel_dp_mst_disconnect(struct intel_dp *intel_dp) static bool intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *esi) { + struct intel_display *display = to_intel_display(intel_dp); + struct drm_i915_private *i915 = dp_to_i915(intel_dp); + + /* + * Display WA for HSD #13013007775: mtl/arl/lnl + * Read the sink count and link service IRQ registers in separate + * transactions to prevent disconnecting the sink on a TBT link + * inadvertently. + */ + if (IS_DISPLAY_VER(display, 14, 20) && !IS_BATTLEMAGE(i915)) { + if (drm_dp_dpcd_read(&intel_dp->aux, DP_SINK_COUNT_ESI, esi, 3) != 3) + return false; + + /* DP_SINK_COUNT_ESI + 3 == DP_LINK_SERVICE_IRQ_VECTOR_ESI0 */ + return drm_dp_dpcd_readb(&intel_dp->aux, DP_LINK_SERVICE_IRQ_VECTOR_ESI0, + &esi[3]) == 1; + } + return drm_dp_dpcd_read(&intel_dp->aux, DP_SINK_COUNT_ESI, esi, 4) == 4; } diff --git a/drivers/gpu/drm/i915/display/intel_psr_regs.h b/drivers/gpu/drm/i915/display/intel_psr_regs.h index 642bb15fb5475..25c0424e34db2 100644 --- a/drivers/gpu/drm/i915/display/intel_psr_regs.h +++ b/drivers/gpu/drm/i915/display/intel_psr_regs.h @@ -314,8 +314,8 @@ #define PORT_ALPM_LFPS_CTL_LFPS_HALF_CYCLE_DURATION_MASK REG_GENMASK(20, 16) #define PORT_ALPM_LFPS_CTL_LFPS_HALF_CYCLE_DURATION(val) REG_FIELD_PREP(PORT_ALPM_LFPS_CTL_LFPS_HALF_CYCLE_DURATION_MASK, val) #define PORT_ALPM_LFPS_CTL_FIRST_LFPS_HALF_CYCLE_DURATION_MASK REG_GENMASK(12, 8) -#define PORT_ALPM_LFPS_CTL_FIRST_LFPS_HALF_CYCLE_DURATION(val) REG_FIELD_PREP(PORT_ALPM_LFPS_CTL_LFPS_HALF_CYCLE_DURATION_MASK, val) +#define PORT_ALPM_LFPS_CTL_FIRST_LFPS_HALF_CYCLE_DURATION(val) REG_FIELD_PREP(PORT_ALPM_LFPS_CTL_FIRST_LFPS_HALF_CYCLE_DURATION_MASK, val) #define PORT_ALPM_LFPS_CTL_LAST_LFPS_HALF_CYCLE_DURATION_MASK REG_GENMASK(4, 0) -#define PORT_ALPM_LFPS_CTL_LAST_LFPS_HALF_CYCLE_DURATION(val) REG_FIELD_PREP(PORT_ALPM_LFPS_CTL_LFPS_HALF_CYCLE_DURATION_MASK, val) +#define PORT_ALPM_LFPS_CTL_LAST_LFPS_HALF_CYCLE_DURATION(val) REG_FIELD_PREP(PORT_ALPM_LFPS_CTL_LAST_LFPS_HALF_CYCLE_DURATION_MASK, val) #endif /* __INTEL_PSR_REGS_H__ */ diff --git a/drivers/gpu/drm/i915/display/vlv_dsi.c b/drivers/gpu/drm/i915/display/vlv_dsi.c index d21f3fb397060..3c7789ca62075 100644 --- a/drivers/gpu/drm/i915/display/vlv_dsi.c +++ b/drivers/gpu/drm/i915/display/vlv_dsi.c @@ -1059,7 +1059,7 @@ static void bxt_dsi_get_pipe_config(struct intel_encoder *encoder, BXT_MIPI_TRANS_VACTIVE(port)); adjusted_mode->crtc_vtotal = intel_de_read(display, - BXT_MIPI_TRANS_VTOTAL(port)); + BXT_MIPI_TRANS_VTOTAL(port)) + 1; hactive = adjusted_mode->crtc_hdisplay; hfp = intel_de_read(display, MIPI_HFP_COUNT(display, port)); @@ -1264,7 +1264,7 @@ static void set_dsi_timings(struct intel_encoder *encoder, intel_de_write(display, BXT_MIPI_TRANS_VACTIVE(port), adjusted_mode->crtc_vdisplay); intel_de_write(display, BXT_MIPI_TRANS_VTOTAL(port), - adjusted_mode->crtc_vtotal); + adjusted_mode->crtc_vtotal - 1); } intel_de_write(display, MIPI_HACTIVE_AREA_COUNT(display, port), diff --git a/drivers/gpu/drm/i915/gt/intel_gsc.c b/drivers/gpu/drm/i915/gt/intel_gsc.c index 1e925c75fb080..c43febc862dc3 100644 --- a/drivers/gpu/drm/i915/gt/intel_gsc.c +++ b/drivers/gpu/drm/i915/gt/intel_gsc.c @@ -284,7 +284,7 @@ static void gsc_irq_handler(struct intel_gt *gt, unsigned int intf_id) if (gt->gsc.intf[intf_id].irq < 0) return; - ret = generic_handle_irq(gt->gsc.intf[intf_id].irq); + ret = generic_handle_irq_safe(gt->gsc.intf[intf_id].irq); if (ret) gt_err_ratelimited(gt, "error handling GSC irq: %d\n", ret); } diff --git a/drivers/gpu/drm/i915/gt/intel_ring_submission.c b/drivers/gpu/drm/i915/gt/intel_ring_submission.c index 72277bc8322e8..f84fa09cdb339 100644 --- a/drivers/gpu/drm/i915/gt/intel_ring_submission.c +++ b/drivers/gpu/drm/i915/gt/intel_ring_submission.c @@ -575,7 +575,6 @@ static int ring_context_alloc(struct intel_context *ce) /* One ringbuffer to rule them all */ GEM_BUG_ON(!engine->legacy.ring); ce->ring = engine->legacy.ring; - ce->timeline = intel_timeline_get(engine->legacy.timeline); GEM_BUG_ON(ce->state); if (engine->context_size) { @@ -588,6 +587,8 @@ static int ring_context_alloc(struct intel_context *ce) ce->state = vma; } + ce->timeline = intel_timeline_get(engine->legacy.timeline); + return 0; } diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c index 8aaadbb702df6..b48373b166779 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c @@ -633,7 +633,7 @@ static int guc_submission_send_busy_loop(struct intel_guc *guc, atomic_inc(&guc->outstanding_submission_g2h); ret = intel_guc_send_busy_loop(guc, action, len, g2h_len_dw, loop); - if (ret) + if (ret && g2h_len_dw) atomic_dec(&guc->outstanding_submission_g2h); return ret; @@ -3422,18 +3422,29 @@ static inline int guc_lrc_desc_unpin(struct intel_context *ce) * GuC is active, lets destroy this context, but at this point we can still be racing * with suspend, so we undo everything if the H2G fails in deregister_context so * that GuC reset will find this context during clean up. + * + * There is a race condition where the reset code could have altered + * this context's state and done a wakeref put before we try to + * deregister it here. So check if the context is still set to be + * destroyed before undoing earlier changes, to avoid two wakeref puts + * on the same context. */ ret = deregister_context(ce, ce->guc_id.id); if (ret) { + bool pending_destroyed; spin_lock_irqsave(&ce->guc_state.lock, flags); - set_context_registered(ce); - clr_context_destroyed(ce); + pending_destroyed = context_destroyed(ce); + if (pending_destroyed) { + set_context_registered(ce); + clr_context_destroyed(ce); + } spin_unlock_irqrestore(&ce->guc_state.lock, flags); /* * As gt-pm is awake at function entry, intel_wakeref_put_async merely decrements * the wakeref immediately but per function spec usage call this after unlock. */ - intel_wakeref_put_async(>->wakeref); + if (pending_destroyed) + intel_wakeref_put_async(>->wakeref); } return ret; diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c index 21eb0c5b320d5..5cc302ad13e16 100644 --- a/drivers/gpu/drm/i915/i915_pmu.c +++ b/drivers/gpu/drm/i915/i915_pmu.c @@ -111,11 +111,11 @@ static unsigned int config_bit(const u64 config) return other_bit(config); } -static u32 config_mask(const u64 config) +static __always_inline u32 config_mask(const u64 config) { unsigned int bit = config_bit(config); - if (__builtin_constant_p(config)) + if (__builtin_constant_p(bit)) BUILD_BUG_ON(bit > BITS_PER_TYPE(typeof_member(struct i915_pmu, enable)) - 1); @@ -124,7 +124,7 @@ static u32 config_mask(const u64 config) BITS_PER_TYPE(typeof_member(struct i915_pmu, enable)) - 1); - return BIT(config_bit(config)); + return BIT(bit); } static bool is_engine_event(struct perf_event *event) diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c index acae30a04a947..0122719ee9218 100644 --- a/drivers/gpu/drm/i915/selftests/i915_request.c +++ b/drivers/gpu/drm/i915/selftests/i915_request.c @@ -73,8 +73,8 @@ static int igt_add_request(void *arg) /* Basic preliminary test to create a request and let it loose! */ request = mock_request(rcs0(i915)->kernel_context, HZ / 10); - if (!request) - return -ENOMEM; + if (IS_ERR(request)) + return PTR_ERR(request); i915_request_add(request); @@ -91,8 +91,8 @@ static int igt_wait_request(void *arg) /* Submit a request, then wait upon it */ request = mock_request(rcs0(i915)->kernel_context, T); - if (!request) - return -ENOMEM; + if (IS_ERR(request)) + return PTR_ERR(request); i915_request_get(request); @@ -160,8 +160,8 @@ static int igt_fence_wait(void *arg) /* Submit a request, treat it as a fence and wait upon it */ request = mock_request(rcs0(i915)->kernel_context, T); - if (!request) - return -ENOMEM; + if (IS_ERR(request)) + return PTR_ERR(request); if (dma_fence_wait_timeout(&request->fence, false, T) != -ETIME) { pr_err("fence wait success before submit (expected timeout)!\n"); @@ -219,8 +219,8 @@ static int igt_request_rewind(void *arg) GEM_BUG_ON(IS_ERR(ce)); request = mock_request(ce, 2 * HZ); intel_context_put(ce); - if (!request) { - err = -ENOMEM; + if (IS_ERR(request)) { + err = PTR_ERR(request); goto err_context_0; } @@ -237,8 +237,8 @@ static int igt_request_rewind(void *arg) GEM_BUG_ON(IS_ERR(ce)); vip = mock_request(ce, 0); intel_context_put(ce); - if (!vip) { - err = -ENOMEM; + if (IS_ERR(vip)) { + err = PTR_ERR(vip); goto err_context_1; } diff --git a/drivers/gpu/drm/i915/selftests/mock_request.c b/drivers/gpu/drm/i915/selftests/mock_request.c index 09f747228dff5..1b0cf073e9643 100644 --- a/drivers/gpu/drm/i915/selftests/mock_request.c +++ b/drivers/gpu/drm/i915/selftests/mock_request.c @@ -35,7 +35,7 @@ mock_request(struct intel_context *ce, unsigned long delay) /* NB the i915->requests slab cache is enlarged to fit mock_request */ request = intel_context_create_request(ce); if (IS_ERR(request)) - return NULL; + return request; request->mock.delay = delay; return request; diff --git a/drivers/gpu/drm/imagination/pvr_power.c b/drivers/gpu/drm/imagination/pvr_power.c index ba7816fd28ec7..850b318605da4 100644 --- a/drivers/gpu/drm/imagination/pvr_power.c +++ b/drivers/gpu/drm/imagination/pvr_power.c @@ -363,13 +363,13 @@ pvr_power_reset(struct pvr_device *pvr_dev, bool hard_reset) if (!err) { if (hard_reset) { pvr_dev->fw_dev.booted = false; - WARN_ON(pm_runtime_force_suspend(from_pvr_device(pvr_dev)->dev)); + WARN_ON(pvr_power_device_suspend(from_pvr_device(pvr_dev)->dev)); err = pvr_fw_hard_reset(pvr_dev); if (err) goto err_device_lost; - err = pm_runtime_force_resume(from_pvr_device(pvr_dev)->dev); + err = pvr_power_device_resume(from_pvr_device(pvr_dev)->dev); pvr_dev->fw_dev.booted = true; if (err) goto err_device_lost; diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c index 4e93fd075e03c..42e62b0409612 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c @@ -463,7 +463,7 @@ static int mtk_drm_kms_init(struct drm_device *drm) ret = drmm_mode_config_init(drm); if (ret) - goto put_mutex_dev; + return ret; drm->mode_config.min_width = 64; drm->mode_config.min_height = 64; @@ -481,8 +481,11 @@ static int mtk_drm_kms_init(struct drm_device *drm) for (i = 0; i < private->data->mmsys_dev_num; i++) { drm->dev_private = private->all_drm_private[i]; ret = component_bind_all(private->all_drm_private[i]->dev, drm); - if (ret) - goto put_mutex_dev; + if (ret) { + while (--i >= 0) + component_unbind_all(private->all_drm_private[i]->dev, drm); + return ret; + } } /* @@ -575,9 +578,6 @@ static int mtk_drm_kms_init(struct drm_device *drm) err_component_unbind: for (i = 0; i < private->data->mmsys_dev_num; i++) component_unbind_all(private->all_drm_private[i]->dev, drm); -put_mutex_dev: - for (i = 0; i < private->data->mmsys_dev_num; i++) - put_device(private->all_drm_private[i]->mutex_dev); return ret; } @@ -648,8 +648,10 @@ static int mtk_drm_bind(struct device *dev) return 0; drm = drm_dev_alloc(&mtk_drm_driver, dev); - if (IS_ERR(drm)) - return PTR_ERR(drm); + if (IS_ERR(drm)) { + ret = PTR_ERR(drm); + goto err_put_dev; + } private->drm_master = true; drm->dev_private = private; @@ -675,18 +677,31 @@ static int mtk_drm_bind(struct device *dev) drm_dev_put(drm); for (i = 0; i < private->data->mmsys_dev_num; i++) private->all_drm_private[i]->drm = NULL; +err_put_dev: + for (i = 0; i < private->data->mmsys_dev_num; i++) { + /* For device_find_child in mtk_drm_get_all_priv() */ + put_device(private->all_drm_private[i]->dev); + } + put_device(private->mutex_dev); return ret; } static void mtk_drm_unbind(struct device *dev) { struct mtk_drm_private *private = dev_get_drvdata(dev); + int i; /* for multi mmsys dev, unregister drm dev in mmsys master */ if (private->drm_master) { drm_dev_unregister(private->drm); mtk_drm_kms_deinit(private->drm); drm_dev_put(private->drm); + + for (i = 0; i < private->data->mmsys_dev_num; i++) { + /* For device_find_child in mtk_drm_get_all_priv() */ + put_device(private->all_drm_private[i]->dev); + } + put_device(private->mutex_dev); } private->mtk_drm_bound = false; private->drm_master = false; diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c index 4bd0baa2a4f55..f59452e8fa6fb 100644 --- a/drivers/gpu/drm/meson/meson_drv.c +++ b/drivers/gpu/drm/meson/meson_drv.c @@ -168,7 +168,7 @@ static const struct meson_drm_soc_attr meson_drm_soc_attrs[] = { /* S805X/S805Y HDMI PLL won't lock for HDMI PHY freq > 1,65GHz */ { .limits = { - .max_hdmi_phy_freq = 1650000, + .max_hdmi_phy_freq = 1650000000, }, .attrs = (const struct soc_device_attribute []) { { .soc_id = "GXL (S805*)", }, diff --git a/drivers/gpu/drm/meson/meson_drv.h b/drivers/gpu/drm/meson/meson_drv.h index 3f9345c14f31c..be4b0e4df6e13 100644 --- a/drivers/gpu/drm/meson/meson_drv.h +++ b/drivers/gpu/drm/meson/meson_drv.h @@ -37,7 +37,7 @@ struct meson_drm_match_data { }; struct meson_drm_soc_limits { - unsigned int max_hdmi_phy_freq; + unsigned long long max_hdmi_phy_freq; }; struct meson_drm { diff --git a/drivers/gpu/drm/meson/meson_encoder_hdmi.c b/drivers/gpu/drm/meson/meson_encoder_hdmi.c index 0593a1cde906f..2ad8383fcaed5 100644 --- a/drivers/gpu/drm/meson/meson_encoder_hdmi.c +++ b/drivers/gpu/drm/meson/meson_encoder_hdmi.c @@ -70,12 +70,12 @@ static void meson_encoder_hdmi_set_vclk(struct meson_encoder_hdmi *encoder_hdmi, { struct meson_drm *priv = encoder_hdmi->priv; int vic = drm_match_cea_mode(mode); - unsigned int phy_freq; - unsigned int vclk_freq; - unsigned int venc_freq; - unsigned int hdmi_freq; + unsigned long long phy_freq; + unsigned long long vclk_freq; + unsigned long long venc_freq; + unsigned long long hdmi_freq; - vclk_freq = mode->clock; + vclk_freq = mode->clock * 1000ULL; /* For 420, pixel clock is half unlike venc clock */ if (encoder_hdmi->output_bus_fmt == MEDIA_BUS_FMT_UYYVYY8_0_5X24) @@ -107,7 +107,8 @@ static void meson_encoder_hdmi_set_vclk(struct meson_encoder_hdmi *encoder_hdmi, if (mode->flags & DRM_MODE_FLAG_DBLCLK) venc_freq /= 2; - dev_dbg(priv->dev, "vclk:%d phy=%d venc=%d hdmi=%d enci=%d\n", + dev_dbg(priv->dev, + "phy:%lluHz vclk=%lluHz venc=%lluHz hdmi=%lluHz enci=%d\n", phy_freq, vclk_freq, venc_freq, hdmi_freq, priv->venc.hdmi_use_enci); @@ -122,10 +123,11 @@ static enum drm_mode_status meson_encoder_hdmi_mode_valid(struct drm_bridge *bri struct meson_encoder_hdmi *encoder_hdmi = bridge_to_meson_encoder_hdmi(bridge); struct meson_drm *priv = encoder_hdmi->priv; bool is_hdmi2_sink = display_info->hdmi.scdc.supported; - unsigned int phy_freq; - unsigned int vclk_freq; - unsigned int venc_freq; - unsigned int hdmi_freq; + unsigned long long clock = mode->clock * 1000ULL; + unsigned long long phy_freq; + unsigned long long vclk_freq; + unsigned long long venc_freq; + unsigned long long hdmi_freq; int vic = drm_match_cea_mode(mode); enum drm_mode_status status; @@ -144,12 +146,12 @@ static enum drm_mode_status meson_encoder_hdmi_mode_valid(struct drm_bridge *bri if (status != MODE_OK) return status; - return meson_vclk_dmt_supported_freq(priv, mode->clock); + return meson_vclk_dmt_supported_freq(priv, clock); /* Check against supported VIC modes */ } else if (!meson_venc_hdmi_supported_vic(vic)) return MODE_BAD; - vclk_freq = mode->clock; + vclk_freq = clock; /* For 420, pixel clock is half unlike venc clock */ if (drm_mode_is_420_only(display_info, mode) || @@ -179,7 +181,8 @@ static enum drm_mode_status meson_encoder_hdmi_mode_valid(struct drm_bridge *bri if (mode->flags & DRM_MODE_FLAG_DBLCLK) venc_freq /= 2; - dev_dbg(priv->dev, "%s: vclk:%d phy=%d venc=%d hdmi=%d\n", + dev_dbg(priv->dev, + "%s: vclk:%lluHz phy=%lluHz venc=%lluHz hdmi=%lluHz\n", __func__, phy_freq, vclk_freq, venc_freq, hdmi_freq); return meson_vclk_vic_supported_freq(priv, phy_freq, vclk_freq); diff --git a/drivers/gpu/drm/meson/meson_vclk.c b/drivers/gpu/drm/meson/meson_vclk.c index 2a82119eb58ed..dfe0c28a0f054 100644 --- a/drivers/gpu/drm/meson/meson_vclk.c +++ b/drivers/gpu/drm/meson/meson_vclk.c @@ -110,7 +110,7 @@ #define HDMI_PLL_LOCK BIT(31) #define HDMI_PLL_LOCK_G12A (3 << 30) -#define FREQ_1000_1001(_freq) DIV_ROUND_CLOSEST(_freq * 1000, 1001) +#define FREQ_1000_1001(_freq) DIV_ROUND_CLOSEST_ULL((_freq) * 1000ULL, 1001ULL) /* VID PLL Dividers */ enum { @@ -360,11 +360,11 @@ enum { }; struct meson_vclk_params { - unsigned int pll_freq; - unsigned int phy_freq; - unsigned int vclk_freq; - unsigned int venc_freq; - unsigned int pixel_freq; + unsigned long long pll_freq; + unsigned long long phy_freq; + unsigned long long vclk_freq; + unsigned long long venc_freq; + unsigned long long pixel_freq; unsigned int pll_od1; unsigned int pll_od2; unsigned int pll_od3; @@ -372,11 +372,11 @@ struct meson_vclk_params { unsigned int vclk_div; } params[] = { [MESON_VCLK_HDMI_ENCI_54000] = { - .pll_freq = 4320000, - .phy_freq = 270000, - .vclk_freq = 54000, - .venc_freq = 54000, - .pixel_freq = 54000, + .pll_freq = 4320000000, + .phy_freq = 270000000, + .vclk_freq = 54000000, + .venc_freq = 54000000, + .pixel_freq = 54000000, .pll_od1 = 4, .pll_od2 = 4, .pll_od3 = 1, @@ -384,11 +384,11 @@ struct meson_vclk_params { .vclk_div = 1, }, [MESON_VCLK_HDMI_DDR_54000] = { - .pll_freq = 4320000, - .phy_freq = 270000, - .vclk_freq = 54000, - .venc_freq = 54000, - .pixel_freq = 27000, + .pll_freq = 4320000000, + .phy_freq = 270000000, + .vclk_freq = 54000000, + .venc_freq = 54000000, + .pixel_freq = 27000000, .pll_od1 = 4, .pll_od2 = 4, .pll_od3 = 1, @@ -396,11 +396,11 @@ struct meson_vclk_params { .vclk_div = 1, }, [MESON_VCLK_HDMI_DDR_148500] = { - .pll_freq = 2970000, - .phy_freq = 742500, - .vclk_freq = 148500, - .venc_freq = 148500, - .pixel_freq = 74250, + .pll_freq = 2970000000, + .phy_freq = 742500000, + .vclk_freq = 148500000, + .venc_freq = 148500000, + .pixel_freq = 74250000, .pll_od1 = 4, .pll_od2 = 1, .pll_od3 = 1, @@ -408,11 +408,11 @@ struct meson_vclk_params { .vclk_div = 1, }, [MESON_VCLK_HDMI_74250] = { - .pll_freq = 2970000, - .phy_freq = 742500, - .vclk_freq = 74250, - .venc_freq = 74250, - .pixel_freq = 74250, + .pll_freq = 2970000000, + .phy_freq = 742500000, + .vclk_freq = 74250000, + .venc_freq = 74250000, + .pixel_freq = 74250000, .pll_od1 = 2, .pll_od2 = 2, .pll_od3 = 2, @@ -420,11 +420,11 @@ struct meson_vclk_params { .vclk_div = 1, }, [MESON_VCLK_HDMI_148500] = { - .pll_freq = 2970000, - .phy_freq = 1485000, - .vclk_freq = 148500, - .venc_freq = 148500, - .pixel_freq = 148500, + .pll_freq = 2970000000, + .phy_freq = 1485000000, + .vclk_freq = 148500000, + .venc_freq = 148500000, + .pixel_freq = 148500000, .pll_od1 = 1, .pll_od2 = 2, .pll_od3 = 2, @@ -432,11 +432,11 @@ struct meson_vclk_params { .vclk_div = 1, }, [MESON_VCLK_HDMI_297000] = { - .pll_freq = 5940000, - .phy_freq = 2970000, - .venc_freq = 297000, - .vclk_freq = 297000, - .pixel_freq = 297000, + .pll_freq = 5940000000, + .phy_freq = 2970000000, + .venc_freq = 297000000, + .vclk_freq = 297000000, + .pixel_freq = 297000000, .pll_od1 = 2, .pll_od2 = 1, .pll_od3 = 1, @@ -444,11 +444,11 @@ struct meson_vclk_params { .vclk_div = 2, }, [MESON_VCLK_HDMI_594000] = { - .pll_freq = 5940000, - .phy_freq = 5940000, - .venc_freq = 594000, - .vclk_freq = 594000, - .pixel_freq = 594000, + .pll_freq = 5940000000, + .phy_freq = 5940000000, + .venc_freq = 594000000, + .vclk_freq = 594000000, + .pixel_freq = 594000000, .pll_od1 = 1, .pll_od2 = 1, .pll_od3 = 2, @@ -456,11 +456,11 @@ struct meson_vclk_params { .vclk_div = 1, }, [MESON_VCLK_HDMI_594000_YUV420] = { - .pll_freq = 5940000, - .phy_freq = 2970000, - .venc_freq = 594000, - .vclk_freq = 594000, - .pixel_freq = 297000, + .pll_freq = 5940000000, + .phy_freq = 2970000000, + .venc_freq = 594000000, + .vclk_freq = 594000000, + .pixel_freq = 297000000, .pll_od1 = 2, .pll_od2 = 1, .pll_od3 = 1, @@ -617,16 +617,16 @@ static void meson_hdmi_pll_set_params(struct meson_drm *priv, unsigned int m, 3 << 20, pll_od_to_reg(od3) << 20); } -#define XTAL_FREQ 24000 +#define XTAL_FREQ (24 * 1000 * 1000) static unsigned int meson_hdmi_pll_get_m(struct meson_drm *priv, - unsigned int pll_freq) + unsigned long long pll_freq) { /* The GXBB PLL has a /2 pre-multiplier */ if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXBB)) - pll_freq /= 2; + pll_freq = DIV_ROUND_DOWN_ULL(pll_freq, 2); - return pll_freq / XTAL_FREQ; + return DIV_ROUND_DOWN_ULL(pll_freq, XTAL_FREQ); } #define HDMI_FRAC_MAX_GXBB 4096 @@ -635,12 +635,13 @@ static unsigned int meson_hdmi_pll_get_m(struct meson_drm *priv, static unsigned int meson_hdmi_pll_get_frac(struct meson_drm *priv, unsigned int m, - unsigned int pll_freq) + unsigned long long pll_freq) { - unsigned int parent_freq = XTAL_FREQ; + unsigned long long parent_freq = XTAL_FREQ; unsigned int frac_max = HDMI_FRAC_MAX_GXL; unsigned int frac_m; unsigned int frac; + u32 remainder; /* The GXBB PLL has a /2 pre-multiplier and a larger FRAC width */ if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXBB)) { @@ -652,11 +653,11 @@ static unsigned int meson_hdmi_pll_get_frac(struct meson_drm *priv, frac_max = HDMI_FRAC_MAX_G12A; /* We can have a perfect match !*/ - if (pll_freq / m == parent_freq && - pll_freq % m == 0) + if (div_u64_rem(pll_freq, m, &remainder) == parent_freq && + remainder == 0) return 0; - frac = div_u64((u64)pll_freq * (u64)frac_max, parent_freq); + frac = mul_u64_u64_div_u64(pll_freq, frac_max, parent_freq); frac_m = m * frac_max; if (frac_m > frac) return frac_max; @@ -666,7 +667,7 @@ static unsigned int meson_hdmi_pll_get_frac(struct meson_drm *priv, } static bool meson_hdmi_pll_validate_params(struct meson_drm *priv, - unsigned int m, + unsigned long long m, unsigned int frac) { if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXBB)) { @@ -694,7 +695,7 @@ static bool meson_hdmi_pll_validate_params(struct meson_drm *priv, } static bool meson_hdmi_pll_find_params(struct meson_drm *priv, - unsigned int freq, + unsigned long long freq, unsigned int *m, unsigned int *frac, unsigned int *od) @@ -706,7 +707,7 @@ static bool meson_hdmi_pll_find_params(struct meson_drm *priv, continue; *frac = meson_hdmi_pll_get_frac(priv, *m, freq * *od); - DRM_DEBUG_DRIVER("PLL params for %dkHz: m=%x frac=%x od=%d\n", + DRM_DEBUG_DRIVER("PLL params for %lluHz: m=%x frac=%x od=%d\n", freq, *m, *frac, *od); if (meson_hdmi_pll_validate_params(priv, *m, *frac)) @@ -718,7 +719,7 @@ static bool meson_hdmi_pll_find_params(struct meson_drm *priv, /* pll_freq is the frequency after the OD dividers */ enum drm_mode_status -meson_vclk_dmt_supported_freq(struct meson_drm *priv, unsigned int freq) +meson_vclk_dmt_supported_freq(struct meson_drm *priv, unsigned long long freq) { unsigned int od, m, frac; @@ -741,7 +742,7 @@ EXPORT_SYMBOL_GPL(meson_vclk_dmt_supported_freq); /* pll_freq is the frequency after the OD dividers */ static void meson_hdmi_pll_generic_set(struct meson_drm *priv, - unsigned int pll_freq) + unsigned long long pll_freq) { unsigned int od, m, frac, od1, od2, od3; @@ -756,7 +757,7 @@ static void meson_hdmi_pll_generic_set(struct meson_drm *priv, od1 = od / od2; } - DRM_DEBUG_DRIVER("PLL params for %dkHz: m=%x frac=%x od=%d/%d/%d\n", + DRM_DEBUG_DRIVER("PLL params for %lluHz: m=%x frac=%x od=%d/%d/%d\n", pll_freq, m, frac, od1, od2, od3); meson_hdmi_pll_set_params(priv, m, frac, od1, od2, od3); @@ -764,17 +765,48 @@ static void meson_hdmi_pll_generic_set(struct meson_drm *priv, return; } - DRM_ERROR("Fatal, unable to find parameters for PLL freq %d\n", + DRM_ERROR("Fatal, unable to find parameters for PLL freq %lluHz\n", pll_freq); } +static bool meson_vclk_freqs_are_matching_param(unsigned int idx, + unsigned long long phy_freq, + unsigned long long vclk_freq) +{ + DRM_DEBUG_DRIVER("i = %d vclk_freq = %lluHz alt = %lluHz\n", + idx, params[idx].vclk_freq, + FREQ_1000_1001(params[idx].vclk_freq)); + DRM_DEBUG_DRIVER("i = %d phy_freq = %lluHz alt = %lluHz\n", + idx, params[idx].phy_freq, + FREQ_1000_1001(params[idx].phy_freq)); + + /* Match strict frequency */ + if (phy_freq == params[idx].phy_freq && + vclk_freq == params[idx].vclk_freq) + return true; + + /* Match 1000/1001 variant: vclk deviation has to be less than 1kHz + * (drm EDID is defined in 1kHz steps, so everything smaller must be + * rounding error) and the PHY freq deviation has to be less than + * 10kHz (as the TMDS clock is 10 times the pixel clock, so anything + * smaller must be rounding error as well). + */ + if (abs(vclk_freq - FREQ_1000_1001(params[idx].vclk_freq)) < 1000 && + abs(phy_freq - FREQ_1000_1001(params[idx].phy_freq)) < 10000) + return true; + + /* no match */ + return false; +} + enum drm_mode_status -meson_vclk_vic_supported_freq(struct meson_drm *priv, unsigned int phy_freq, - unsigned int vclk_freq) +meson_vclk_vic_supported_freq(struct meson_drm *priv, + unsigned long long phy_freq, + unsigned long long vclk_freq) { int i; - DRM_DEBUG_DRIVER("phy_freq = %d vclk_freq = %d\n", + DRM_DEBUG_DRIVER("phy_freq = %lluHz vclk_freq = %lluHz\n", phy_freq, vclk_freq); /* Check against soc revision/package limits */ @@ -785,19 +817,7 @@ meson_vclk_vic_supported_freq(struct meson_drm *priv, unsigned int phy_freq, } for (i = 0 ; params[i].pixel_freq ; ++i) { - DRM_DEBUG_DRIVER("i = %d pixel_freq = %d alt = %d\n", - i, params[i].pixel_freq, - FREQ_1000_1001(params[i].pixel_freq)); - DRM_DEBUG_DRIVER("i = %d phy_freq = %d alt = %d\n", - i, params[i].phy_freq, - FREQ_1000_1001(params[i].phy_freq/10)*10); - /* Match strict frequency */ - if (phy_freq == params[i].phy_freq && - vclk_freq == params[i].vclk_freq) - return MODE_OK; - /* Match 1000/1001 variant */ - if (phy_freq == (FREQ_1000_1001(params[i].phy_freq/10)*10) && - vclk_freq == FREQ_1000_1001(params[i].vclk_freq)) + if (meson_vclk_freqs_are_matching_param(i, phy_freq, vclk_freq)) return MODE_OK; } @@ -805,8 +825,9 @@ meson_vclk_vic_supported_freq(struct meson_drm *priv, unsigned int phy_freq, } EXPORT_SYMBOL_GPL(meson_vclk_vic_supported_freq); -static void meson_vclk_set(struct meson_drm *priv, unsigned int pll_base_freq, - unsigned int od1, unsigned int od2, unsigned int od3, +static void meson_vclk_set(struct meson_drm *priv, + unsigned long long pll_base_freq, unsigned int od1, + unsigned int od2, unsigned int od3, unsigned int vid_pll_div, unsigned int vclk_div, unsigned int hdmi_tx_div, unsigned int venc_div, bool hdmi_use_enci, bool vic_alternate_clock) @@ -826,15 +847,15 @@ static void meson_vclk_set(struct meson_drm *priv, unsigned int pll_base_freq, meson_hdmi_pll_generic_set(priv, pll_base_freq); } else if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXBB)) { switch (pll_base_freq) { - case 2970000: + case 2970000000: m = 0x3d; frac = vic_alternate_clock ? 0xd02 : 0xe00; break; - case 4320000: + case 4320000000: m = vic_alternate_clock ? 0x59 : 0x5a; frac = vic_alternate_clock ? 0xe8f : 0; break; - case 5940000: + case 5940000000: m = 0x7b; frac = vic_alternate_clock ? 0xa05 : 0xc00; break; @@ -844,15 +865,15 @@ static void meson_vclk_set(struct meson_drm *priv, unsigned int pll_base_freq, } else if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXM) || meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXL)) { switch (pll_base_freq) { - case 2970000: + case 2970000000: m = 0x7b; frac = vic_alternate_clock ? 0x281 : 0x300; break; - case 4320000: + case 4320000000: m = vic_alternate_clock ? 0xb3 : 0xb4; frac = vic_alternate_clock ? 0x347 : 0; break; - case 5940000: + case 5940000000: m = 0xf7; frac = vic_alternate_clock ? 0x102 : 0x200; break; @@ -861,15 +882,15 @@ static void meson_vclk_set(struct meson_drm *priv, unsigned int pll_base_freq, meson_hdmi_pll_set_params(priv, m, frac, od1, od2, od3); } else if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A)) { switch (pll_base_freq) { - case 2970000: + case 2970000000: m = 0x7b; frac = vic_alternate_clock ? 0x140b4 : 0x18000; break; - case 4320000: + case 4320000000: m = vic_alternate_clock ? 0xb3 : 0xb4; frac = vic_alternate_clock ? 0x1a3ee : 0; break; - case 5940000: + case 5940000000: m = 0xf7; frac = vic_alternate_clock ? 0x8148 : 0x10000; break; @@ -1025,14 +1046,14 @@ static void meson_vclk_set(struct meson_drm *priv, unsigned int pll_base_freq, } void meson_vclk_setup(struct meson_drm *priv, unsigned int target, - unsigned int phy_freq, unsigned int vclk_freq, - unsigned int venc_freq, unsigned int dac_freq, + unsigned long long phy_freq, unsigned long long vclk_freq, + unsigned long long venc_freq, unsigned long long dac_freq, bool hdmi_use_enci) { bool vic_alternate_clock = false; - unsigned int freq; - unsigned int hdmi_tx_div; - unsigned int venc_div; + unsigned long long freq; + unsigned long long hdmi_tx_div; + unsigned long long venc_div; if (target == MESON_VCLK_TARGET_CVBS) { meson_venci_cvbs_clock_config(priv); @@ -1052,27 +1073,25 @@ void meson_vclk_setup(struct meson_drm *priv, unsigned int target, return; } - hdmi_tx_div = vclk_freq / dac_freq; + hdmi_tx_div = DIV_ROUND_DOWN_ULL(vclk_freq, dac_freq); if (hdmi_tx_div == 0) { - pr_err("Fatal Error, invalid HDMI-TX freq %d\n", + pr_err("Fatal Error, invalid HDMI-TX freq %lluHz\n", dac_freq); return; } - venc_div = vclk_freq / venc_freq; + venc_div = DIV_ROUND_DOWN_ULL(vclk_freq, venc_freq); if (venc_div == 0) { - pr_err("Fatal Error, invalid HDMI venc freq %d\n", + pr_err("Fatal Error, invalid HDMI venc freq %lluHz\n", venc_freq); return; } for (freq = 0 ; params[freq].pixel_freq ; ++freq) { - if ((phy_freq == params[freq].phy_freq || - phy_freq == FREQ_1000_1001(params[freq].phy_freq/10)*10) && - (vclk_freq == params[freq].vclk_freq || - vclk_freq == FREQ_1000_1001(params[freq].vclk_freq))) { + if (meson_vclk_freqs_are_matching_param(freq, phy_freq, + vclk_freq)) { if (vclk_freq != params[freq].vclk_freq) vic_alternate_clock = true; else @@ -1098,7 +1117,8 @@ void meson_vclk_setup(struct meson_drm *priv, unsigned int target, } if (!params[freq].pixel_freq) { - pr_err("Fatal Error, invalid HDMI vclk freq %d\n", vclk_freq); + pr_err("Fatal Error, invalid HDMI vclk freq %lluHz\n", + vclk_freq); return; } diff --git a/drivers/gpu/drm/meson/meson_vclk.h b/drivers/gpu/drm/meson/meson_vclk.h index 60617aaf18dd1..7ac55744e5749 100644 --- a/drivers/gpu/drm/meson/meson_vclk.h +++ b/drivers/gpu/drm/meson/meson_vclk.h @@ -20,17 +20,18 @@ enum { }; /* 27MHz is the CVBS Pixel Clock */ -#define MESON_VCLK_CVBS 27000 +#define MESON_VCLK_CVBS (27 * 1000 * 1000) enum drm_mode_status -meson_vclk_dmt_supported_freq(struct meson_drm *priv, unsigned int freq); +meson_vclk_dmt_supported_freq(struct meson_drm *priv, unsigned long long freq); enum drm_mode_status -meson_vclk_vic_supported_freq(struct meson_drm *priv, unsigned int phy_freq, - unsigned int vclk_freq); +meson_vclk_vic_supported_freq(struct meson_drm *priv, + unsigned long long phy_freq, + unsigned long long vclk_freq); void meson_vclk_setup(struct meson_drm *priv, unsigned int target, - unsigned int phy_freq, unsigned int vclk_freq, - unsigned int venc_freq, unsigned int dac_freq, + unsigned long long phy_freq, unsigned long long vclk_freq, + unsigned long long venc_freq, unsigned long long dac_freq, bool hdmi_use_enci); #endif /* __MESON_VCLK_H */ diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c index d903ad9c0b5fb..80c78aff96433 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c @@ -123,6 +123,20 @@ static void a6xx_set_pagetable(struct a6xx_gpu *a6xx_gpu, OUT_RING(ring, lower_32_bits(rbmemptr(ring, fence))); OUT_RING(ring, upper_32_bits(rbmemptr(ring, fence))); OUT_RING(ring, submit->seqno - 1); + + OUT_PKT7(ring, CP_THREAD_CONTROL, 1); + OUT_RING(ring, CP_SET_THREAD_BOTH); + + /* Reset state used to synchronize BR and BV */ + OUT_PKT7(ring, CP_RESET_CONTEXT_STATE, 1); + OUT_RING(ring, + CP_RESET_CONTEXT_STATE_0_CLEAR_ON_CHIP_TS | + CP_RESET_CONTEXT_STATE_0_CLEAR_RESOURCE_TABLE | + CP_RESET_CONTEXT_STATE_0_CLEAR_BV_BR_COUNTER | + CP_RESET_CONTEXT_STATE_0_RESET_GLOBAL_LOCAL_TS); + + OUT_PKT7(ring, CP_THREAD_CONTROL, 1); + OUT_RING(ring, CP_SET_THREAD_BR); } if (!sysprof) { @@ -554,7 +568,6 @@ static void a6xx_calc_ubwc_config(struct adreno_gpu *gpu) if (adreno_is_7c3(gpu)) { gpu->ubwc_config.highest_bank_bit = 14; gpu->ubwc_config.amsbc = 1; - gpu->ubwc_config.rgb565_predicator = 1; gpu->ubwc_config.uavflagprd_inv = 2; gpu->ubwc_config.macrotile_mode = 1; } diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h index 36cc9dbc00b5c..d8d5a91c00ec8 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h +++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h @@ -76,7 +76,7 @@ static const struct dpu_sspp_cfg sm8150_sspp[] = { { .name = "sspp_0", .id = SSPP_VIG0, .base = 0x4000, .len = 0x1f0, - .features = VIG_SDM845_MASK, + .features = VIG_SDM845_MASK_SDMA, .sblk = &dpu_vig_sblk_qseed3_1_4, .xin_id = 0, .type = SSPP_TYPE_VIG, @@ -84,7 +84,7 @@ static const struct dpu_sspp_cfg sm8150_sspp[] = { }, { .name = "sspp_1", .id = SSPP_VIG1, .base = 0x6000, .len = 0x1f0, - .features = VIG_SDM845_MASK, + .features = VIG_SDM845_MASK_SDMA, .sblk = &dpu_vig_sblk_qseed3_1_4, .xin_id = 4, .type = SSPP_TYPE_VIG, @@ -92,7 +92,7 @@ static const struct dpu_sspp_cfg sm8150_sspp[] = { }, { .name = "sspp_2", .id = SSPP_VIG2, .base = 0x8000, .len = 0x1f0, - .features = VIG_SDM845_MASK, + .features = VIG_SDM845_MASK_SDMA, .sblk = &dpu_vig_sblk_qseed3_1_4, .xin_id = 8, .type = SSPP_TYPE_VIG, @@ -100,7 +100,7 @@ static const struct dpu_sspp_cfg sm8150_sspp[] = { }, { .name = "sspp_3", .id = SSPP_VIG3, .base = 0xa000, .len = 0x1f0, - .features = VIG_SDM845_MASK, + .features = VIG_SDM845_MASK_SDMA, .sblk = &dpu_vig_sblk_qseed3_1_4, .xin_id = 12, .type = SSPP_TYPE_VIG, @@ -108,7 +108,7 @@ static const struct dpu_sspp_cfg sm8150_sspp[] = { }, { .name = "sspp_8", .id = SSPP_DMA0, .base = 0x24000, .len = 0x1f0, - .features = DMA_SDM845_MASK, + .features = DMA_SDM845_MASK_SDMA, .sblk = &dpu_dma_sblk, .xin_id = 1, .type = SSPP_TYPE_DMA, @@ -116,7 +116,7 @@ static const struct dpu_sspp_cfg sm8150_sspp[] = { }, { .name = "sspp_9", .id = SSPP_DMA1, .base = 0x26000, .len = 0x1f0, - .features = DMA_SDM845_MASK, + .features = DMA_SDM845_MASK_SDMA, .sblk = &dpu_dma_sblk, .xin_id = 5, .type = SSPP_TYPE_DMA, @@ -124,7 +124,7 @@ static const struct dpu_sspp_cfg sm8150_sspp[] = { }, { .name = "sspp_10", .id = SSPP_DMA2, .base = 0x28000, .len = 0x1f0, - .features = DMA_CURSOR_SDM845_MASK, + .features = DMA_CURSOR_SDM845_MASK_SDMA, .sblk = &dpu_dma_sblk, .xin_id = 9, .type = SSPP_TYPE_DMA, @@ -132,7 +132,7 @@ static const struct dpu_sspp_cfg sm8150_sspp[] = { }, { .name = "sspp_11", .id = SSPP_DMA3, .base = 0x2a000, .len = 0x1f0, - .features = DMA_CURSOR_SDM845_MASK, + .features = DMA_CURSOR_SDM845_MASK_SDMA, .sblk = &dpu_dma_sblk, .xin_id = 13, .type = SSPP_TYPE_DMA, diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h index e8eacdb47967a..485c3041c8018 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h +++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h @@ -75,7 +75,7 @@ static const struct dpu_sspp_cfg sc8180x_sspp[] = { { .name = "sspp_0", .id = SSPP_VIG0, .base = 0x4000, .len = 0x1f0, - .features = VIG_SDM845_MASK, + .features = VIG_SDM845_MASK_SDMA, .sblk = &dpu_vig_sblk_qseed3_1_4, .xin_id = 0, .type = SSPP_TYPE_VIG, @@ -83,7 +83,7 @@ static const struct dpu_sspp_cfg sc8180x_sspp[] = { }, { .name = "sspp_1", .id = SSPP_VIG1, .base = 0x6000, .len = 0x1f0, - .features = VIG_SDM845_MASK, + .features = VIG_SDM845_MASK_SDMA, .sblk = &dpu_vig_sblk_qseed3_1_4, .xin_id = 4, .type = SSPP_TYPE_VIG, @@ -91,7 +91,7 @@ static const struct dpu_sspp_cfg sc8180x_sspp[] = { }, { .name = "sspp_2", .id = SSPP_VIG2, .base = 0x8000, .len = 0x1f0, - .features = VIG_SDM845_MASK, + .features = VIG_SDM845_MASK_SDMA, .sblk = &dpu_vig_sblk_qseed3_1_4, .xin_id = 8, .type = SSPP_TYPE_VIG, @@ -99,7 +99,7 @@ static const struct dpu_sspp_cfg sc8180x_sspp[] = { }, { .name = "sspp_3", .id = SSPP_VIG3, .base = 0xa000, .len = 0x1f0, - .features = VIG_SDM845_MASK, + .features = VIG_SDM845_MASK_SDMA, .sblk = &dpu_vig_sblk_qseed3_1_4, .xin_id = 12, .type = SSPP_TYPE_VIG, @@ -107,7 +107,7 @@ static const struct dpu_sspp_cfg sc8180x_sspp[] = { }, { .name = "sspp_8", .id = SSPP_DMA0, .base = 0x24000, .len = 0x1f0, - .features = DMA_SDM845_MASK, + .features = DMA_SDM845_MASK_SDMA, .sblk = &dpu_dma_sblk, .xin_id = 1, .type = SSPP_TYPE_DMA, @@ -115,7 +115,7 @@ static const struct dpu_sspp_cfg sc8180x_sspp[] = { }, { .name = "sspp_9", .id = SSPP_DMA1, .base = 0x26000, .len = 0x1f0, - .features = DMA_SDM845_MASK, + .features = DMA_SDM845_MASK_SDMA, .sblk = &dpu_dma_sblk, .xin_id = 5, .type = SSPP_TYPE_DMA, @@ -123,7 +123,7 @@ static const struct dpu_sspp_cfg sc8180x_sspp[] = { }, { .name = "sspp_10", .id = SSPP_DMA2, .base = 0x28000, .len = 0x1f0, - .features = DMA_CURSOR_SDM845_MASK, + .features = DMA_CURSOR_SDM845_MASK_SDMA, .sblk = &dpu_dma_sblk, .xin_id = 9, .type = SSPP_TYPE_DMA, @@ -131,7 +131,7 @@ static const struct dpu_sspp_cfg sc8180x_sspp[] = { }, { .name = "sspp_11", .id = SSPP_DMA3, .base = 0x2a000, .len = 0x1f0, - .features = DMA_CURSOR_SDM845_MASK, + .features = DMA_CURSOR_SDM845_MASK_SDMA, .sblk = &dpu_dma_sblk, .xin_id = 13, .type = SSPP_TYPE_DMA, diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c index d8a2edebfe8c3..b7699ca89dcc5 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c @@ -94,17 +94,21 @@ static void drm_mode_to_intf_timing_params( timing->vsync_polarity = 0; } - /* for DP/EDP, Shift timings to align it to bottom right */ - if (phys_enc->hw_intf->cap->type == INTF_DP) { + timing->wide_bus_en = dpu_encoder_is_widebus_enabled(phys_enc->parent); + timing->compression_en = dpu_encoder_is_dsc_enabled(phys_enc->parent); + + /* + * For DP/EDP, Shift timings to align it to bottom right. + * wide_bus_en is set for everything excluding SDM845 & + * porch changes cause DisplayPort failure and HDMI tearing. + */ + if (phys_enc->hw_intf->cap->type == INTF_DP && timing->wide_bus_en) { timing->h_back_porch += timing->h_front_porch; timing->h_front_porch = 0; timing->v_back_porch += timing->v_front_porch; timing->v_front_porch = 0; } - timing->wide_bus_en = dpu_encoder_is_widebus_enabled(phys_enc->parent); - timing->compression_en = dpu_encoder_is_dsc_enabled(phys_enc->parent); - /* * for DP, divide the horizonal parameters by 2 when * widebus is enabled diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c index e1228fb093ee0..a5c1534eafdb1 100644 --- a/drivers/gpu/drm/msm/dp/dp_display.c +++ b/drivers/gpu/drm/msm/dp/dp_display.c @@ -928,16 +928,17 @@ enum drm_mode_status dp_bridge_mode_valid(struct drm_bridge *bridge, return -EINVAL; } - if (mode->clock > DP_MAX_PIXEL_CLK_KHZ) - return MODE_CLOCK_HIGH; - dp_display = container_of(dp, struct dp_display_private, dp_display); link_info = &dp_display->panel->link_info; - if (drm_mode_is_420_only(&dp->connector->display_info, mode) && - dp_display->panel->vsc_sdp_supported) + if ((drm_mode_is_420_only(&dp->connector->display_info, mode) && + dp_display->panel->vsc_sdp_supported) || + msm_dp_wide_bus_available(dp)) mode_pclk_khz /= 2; + if (mode_pclk_khz > DP_MAX_PIXEL_CLK_KHZ) + return MODE_CLOCK_HIGH; + mode_bpp = dp->connector->display_info.bpc * num_components; if (!mode_bpp) mode_bpp = default_bpp; diff --git a/drivers/gpu/drm/msm/dp/dp_drm.c b/drivers/gpu/drm/msm/dp/dp_drm.c index 1b9be5bd97f12..da0176eae3fe3 100644 --- a/drivers/gpu/drm/msm/dp/dp_drm.c +++ b/drivers/gpu/drm/msm/dp/dp_drm.c @@ -257,7 +257,10 @@ static enum drm_mode_status edp_bridge_mode_valid(struct drm_bridge *bridge, return -EINVAL; } - if (mode->clock > DP_MAX_PIXEL_CLK_KHZ) + if (msm_dp_wide_bus_available(dp)) + mode_pclk_khz /= 2; + + if (mode_pclk_khz > DP_MAX_PIXEL_CLK_KHZ) return MODE_CLOCK_HIGH; /* diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c index 677c625718119..28cc550e22a88 100644 --- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c +++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c @@ -703,6 +703,13 @@ static int dsi_pll_10nm_init(struct msm_dsi_phy *phy) /* TODO: Remove this when we have proper display handover support */ msm_dsi_phy_pll_save_state(phy); + /* + * Store also proper vco_current_rate, because its value will be used in + * dsi_10nm_pll_restore_state(). + */ + if (!dsi_pll_10nm_vco_recalc_rate(&pll_10nm->clk_hw, VCO_REF_CLK_RATE)) + pll_10nm->vco_current_rate = pll_10nm->phy->cfg->min_pll_rate; + return 0; } diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c index f775638d239a5..4b3a8ee8e278f 100644 --- a/drivers/gpu/drm/msm/msm_gem_submit.c +++ b/drivers/gpu/drm/msm/msm_gem_submit.c @@ -85,6 +85,15 @@ void __msm_gem_submit_destroy(struct kref *kref) container_of(kref, struct msm_gem_submit, ref); unsigned i; + /* + * In error paths, we could unref the submit without calling + * drm_sched_entity_push_job(), so msm_job_free() will never + * get called. Since drm_sched_job_cleanup() will NULL out + * s_fence, we can use that to detect this case. + */ + if (submit->base.s_fence) + drm_sched_job_cleanup(&submit->base); + if (submit->fence_id) { spin_lock(&submit->queue->idr_lock); idr_remove(&submit->queue->fence_idr, submit->fence_id); @@ -658,6 +667,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, struct msm_ringbuffer *ring; struct msm_submit_post_dep *post_deps = NULL; struct drm_syncobj **syncobjs_to_reset = NULL; + struct sync_file *sync_file = NULL; int out_fence_fd = -1; unsigned i; int ret; @@ -868,7 +878,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, } if (ret == 0 && args->flags & MSM_SUBMIT_FENCE_FD_OUT) { - struct sync_file *sync_file = sync_file_create(submit->user_fence); + sync_file = sync_file_create(submit->user_fence); if (!sync_file) { ret = -ENOMEM; } else { @@ -902,8 +912,11 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, out_unlock: mutex_unlock(&queue->lock); out_post_unlock: - if (ret && (out_fence_fd >= 0)) + if (ret && (out_fence_fd >= 0)) { put_unused_fd(out_fence_fd); + if (sync_file) + fput(sync_file->file); + } if (!IS_ERR_OR_NULL(submit)) { msm_gem_submit_put(submit); diff --git a/drivers/gpu/drm/msm/msm_gpu_devfreq.c b/drivers/gpu/drm/msm/msm_gpu_devfreq.c index 6970b0f7f457c..2e1d5c3432728 100644 --- a/drivers/gpu/drm/msm/msm_gpu_devfreq.c +++ b/drivers/gpu/drm/msm/msm_gpu_devfreq.c @@ -156,6 +156,7 @@ void msm_devfreq_init(struct msm_gpu *gpu) priv->gpu_devfreq_config.downdifferential = 10; mutex_init(&df->lock); + df->suspended = true; ret = dev_pm_qos_add_request(&gpu->pdev->dev, &df->boost_freq, DEV_PM_QOS_MIN_FREQUENCY, 0); diff --git a/drivers/gpu/drm/msm/registers/adreno/adreno_pm4.xml b/drivers/gpu/drm/msm/registers/adreno/adreno_pm4.xml index c6cdc5c003dc0..2ca0ad6efc96e 100644 --- a/drivers/gpu/drm/msm/registers/adreno/adreno_pm4.xml +++ b/drivers/gpu/drm/msm/registers/adreno/adreno_pm4.xml @@ -2260,7 +2260,8 @@ opcode: CP_LOAD_STATE4 (30) (4 dwords) - + + diff --git a/drivers/gpu/drm/nouveau/nouveau_backlight.c b/drivers/gpu/drm/nouveau/nouveau_backlight.c index d47442125fa18..9aae26eb7d8fb 100644 --- a/drivers/gpu/drm/nouveau/nouveau_backlight.c +++ b/drivers/gpu/drm/nouveau/nouveau_backlight.c @@ -42,7 +42,7 @@ #include "nouveau_acpi.h" static struct ida bl_ida; -#define BL_NAME_SIZE 15 // 12 for name + 2 for digits + 1 for '\0' +#define BL_NAME_SIZE 24 // 12 for name + 11 for digits + 1 for '\0' static bool nouveau_get_backlight_name(char backlight_name[BL_NAME_SIZE], diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c index fc84ca214f247..3ad4f6e9a8ac2 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c @@ -1454,7 +1454,6 @@ r535_gsp_acpi_caps(acpi_handle handle, CAPS_METHOD_DATA *caps) union acpi_object argv4 = { .buffer.type = ACPI_TYPE_BUFFER, .buffer.length = 4, - .buffer.pointer = kmalloc(argv4.buffer.length, GFP_KERNEL), }, *obj; caps->status = 0xffff; @@ -1462,17 +1461,22 @@ r535_gsp_acpi_caps(acpi_handle handle, CAPS_METHOD_DATA *caps) if (!acpi_check_dsm(handle, &NVOP_DSM_GUID, NVOP_DSM_REV, BIT_ULL(0x1a))) return; + argv4.buffer.pointer = kmalloc(argv4.buffer.length, GFP_KERNEL); + if (!argv4.buffer.pointer) + return; + obj = acpi_evaluate_dsm(handle, &NVOP_DSM_GUID, NVOP_DSM_REV, 0x1a, &argv4); if (!obj) - return; + goto done; if (WARN_ON(obj->type != ACPI_TYPE_BUFFER) || WARN_ON(obj->buffer.length != 4)) - return; + goto done; caps->status = 0; caps->optimusCaps = *(u32 *)obj->buffer.pointer; +done: ACPI_FREE(obj); kfree(argv4.buffer.pointer); @@ -1489,24 +1493,28 @@ r535_gsp_acpi_jt(acpi_handle handle, JT_METHOD_DATA *jt) union acpi_object argv4 = { .buffer.type = ACPI_TYPE_BUFFER, .buffer.length = sizeof(caps), - .buffer.pointer = kmalloc(argv4.buffer.length, GFP_KERNEL), }, *obj; jt->status = 0xffff; + argv4.buffer.pointer = kmalloc(argv4.buffer.length, GFP_KERNEL); + if (!argv4.buffer.pointer) + return; + obj = acpi_evaluate_dsm(handle, &JT_DSM_GUID, JT_DSM_REV, 0x1, &argv4); if (!obj) - return; + goto done; if (WARN_ON(obj->type != ACPI_TYPE_BUFFER) || WARN_ON(obj->buffer.length != 4)) - return; + goto done; jt->status = 0; jt->jtCaps = *(u32 *)obj->buffer.pointer; jt->jtRevId = (jt->jtCaps & 0xfff00000) >> 20; jt->bSBIOSCaps = 0; +done: ACPI_FREE(obj); kfree(argv4.buffer.pointer); diff --git a/drivers/gpu/drm/panel/panel-samsung-sofef00.c b/drivers/gpu/drm/panel/panel-samsung-sofef00.c index 04ce925b3d9db..49cfa84b34f0c 100644 --- a/drivers/gpu/drm/panel/panel-samsung-sofef00.c +++ b/drivers/gpu/drm/panel/panel-samsung-sofef00.c @@ -22,7 +22,6 @@ struct sofef00_panel { struct mipi_dsi_device *dsi; struct regulator *supply; struct gpio_desc *reset_gpio; - const struct drm_display_mode *mode; }; static inline @@ -159,26 +158,11 @@ static const struct drm_display_mode enchilada_panel_mode = { .height_mm = 145, }; -static const struct drm_display_mode fajita_panel_mode = { - .clock = (1080 + 72 + 16 + 36) * (2340 + 32 + 4 + 18) * 60 / 1000, - .hdisplay = 1080, - .hsync_start = 1080 + 72, - .hsync_end = 1080 + 72 + 16, - .htotal = 1080 + 72 + 16 + 36, - .vdisplay = 2340, - .vsync_start = 2340 + 32, - .vsync_end = 2340 + 32 + 4, - .vtotal = 2340 + 32 + 4 + 18, - .width_mm = 68, - .height_mm = 145, -}; - static int sofef00_panel_get_modes(struct drm_panel *panel, struct drm_connector *connector) { struct drm_display_mode *mode; - struct sofef00_panel *ctx = to_sofef00_panel(panel); - mode = drm_mode_duplicate(connector->dev, ctx->mode); + mode = drm_mode_duplicate(connector->dev, &enchilada_panel_mode); if (!mode) return -ENOMEM; @@ -239,13 +223,6 @@ static int sofef00_panel_probe(struct mipi_dsi_device *dsi) if (!ctx) return -ENOMEM; - ctx->mode = of_device_get_match_data(dev); - - if (!ctx->mode) { - dev_err(dev, "Missing device mode\n"); - return -ENODEV; - } - ctx->supply = devm_regulator_get(dev, "vddio"); if (IS_ERR(ctx->supply)) return dev_err_probe(dev, PTR_ERR(ctx->supply), @@ -295,14 +272,7 @@ static void sofef00_panel_remove(struct mipi_dsi_device *dsi) } static const struct of_device_id sofef00_panel_of_match[] = { - { // OnePlus 6 / enchilada - .compatible = "samsung,sofef00", - .data = &enchilada_panel_mode, - }, - { // OnePlus 6T / fajita - .compatible = "samsung,s6e3fc2x01", - .data = &fajita_panel_mode, - }, + { .compatible = "samsung,sofef00" }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, sofef00_panel_of_match); diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c index d041ff542a4ee..82db3daf4f81a 100644 --- a/drivers/gpu/drm/panel/panel-simple.c +++ b/drivers/gpu/drm/panel/panel-simple.c @@ -2141,13 +2141,14 @@ static const struct display_timing evervision_vgg644804_timing = { static const struct panel_desc evervision_vgg644804 = { .timings = &evervision_vgg644804_timing, .num_timings = 1, - .bpc = 8, + .bpc = 6, .size = { .width = 115, .height = 86, }, .bus_format = MEDIA_BUS_FMT_RGB666_1X7X3_SPWG, - .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_SAMPLE_NEGEDGE, + .bus_flags = DRM_BUS_FLAG_DE_HIGH, + .connector_type = DRM_MODE_CONNECTOR_LVDS, }; static const struct display_timing evervision_vgg804821_timing = { diff --git a/drivers/gpu/drm/panthor/panthor_mmu.c b/drivers/gpu/drm/panthor/panthor_mmu.c index 0e6f94df690dd..b57824abeb9ee 100644 --- a/drivers/gpu/drm/panthor/panthor_mmu.c +++ b/drivers/gpu/drm/panthor/panthor_mmu.c @@ -780,6 +780,7 @@ int panthor_vm_active(struct panthor_vm *vm) if (ptdev->mmu->as.faulty_mask & panthor_mmu_as_fault_mask(ptdev, as)) { gpu_write(ptdev, MMU_INT_CLEAR, panthor_mmu_as_fault_mask(ptdev, as)); ptdev->mmu->as.faulty_mask &= ~panthor_mmu_as_fault_mask(ptdev, as); + ptdev->mmu->irq.mask |= panthor_mmu_as_fault_mask(ptdev, as); gpu_write(ptdev, MMU_INT_MASK, ~ptdev->mmu->as.faulty_mask); } diff --git a/drivers/gpu/drm/panthor/panthor_regs.h b/drivers/gpu/drm/panthor/panthor_regs.h index b7b3b3add1662..a7a323dc5cf92 100644 --- a/drivers/gpu/drm/panthor/panthor_regs.h +++ b/drivers/gpu/drm/panthor/panthor_regs.h @@ -133,8 +133,8 @@ #define GPU_COHERENCY_PROT_BIT(name) BIT(GPU_COHERENCY_ ## name) #define GPU_COHERENCY_PROTOCOL 0x304 -#define GPU_COHERENCY_ACE 0 -#define GPU_COHERENCY_ACE_LITE 1 +#define GPU_COHERENCY_ACE_LITE 0 +#define GPU_COHERENCY_ACE 1 #define GPU_COHERENCY_NONE 31 #define MCU_CONTROL 0x700 diff --git a/drivers/gpu/drm/renesas/rcar-du/rcar_du_kms.c b/drivers/gpu/drm/renesas/rcar-du/rcar_du_kms.c index 70d8ad065bfa1..4c8fe83dd6101 100644 --- a/drivers/gpu/drm/renesas/rcar-du/rcar_du_kms.c +++ b/drivers/gpu/drm/renesas/rcar-du/rcar_du_kms.c @@ -705,7 +705,7 @@ static int rcar_du_vsps_init(struct rcar_du_device *rcdu) ret = of_parse_phandle_with_fixed_args(np, vsps_prop_name, cells, i, &args); if (ret < 0) - goto error; + goto done; /* * Add the VSP to the list or update the corresponding existing @@ -743,13 +743,11 @@ static int rcar_du_vsps_init(struct rcar_du_device *rcdu) vsp->dev = rcdu; ret = rcar_du_vsp_init(vsp, vsps[i].np, vsps[i].crtcs_mask); - if (ret < 0) - goto error; + if (ret) + goto done; } - return 0; - -error: +done: for (i = 0; i < ARRAY_SIZE(vsps); ++i) of_node_put(vsps[i].np); diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c index 002057be0d84a..c9c50e3b18a23 100644 --- a/drivers/gpu/drm/scheduler/sched_entity.c +++ b/drivers/gpu/drm/scheduler/sched_entity.c @@ -189,6 +189,7 @@ static void drm_sched_entity_kill_jobs_work(struct work_struct *wrk) { struct drm_sched_job *job = container_of(wrk, typeof(*job), work); + drm_sched_fence_scheduled(job->s_fence, NULL); drm_sched_fence_finished(job->s_fence, -ESRCH); WARN_ON(job->s_fence->parent); job->sched->ops->free_job(job); diff --git a/drivers/gpu/drm/solomon/ssd130x.c b/drivers/gpu/drm/solomon/ssd130x.c index 06f5057690bd8..e0fc12d514d76 100644 --- a/drivers/gpu/drm/solomon/ssd130x.c +++ b/drivers/gpu/drm/solomon/ssd130x.c @@ -974,7 +974,7 @@ static void ssd130x_clear_screen(struct ssd130x_device *ssd130x, u8 *data_array) static void ssd132x_clear_screen(struct ssd130x_device *ssd130x, u8 *data_array) { - unsigned int columns = DIV_ROUND_UP(ssd130x->height, SSD132X_SEGMENT_WIDTH); + unsigned int columns = DIV_ROUND_UP(ssd130x->width, SSD132X_SEGMENT_WIDTH); unsigned int height = ssd130x->height; memset(data_array, 0, columns * height); diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c index be61c9d1a4f0e..51ca78551b57e 100644 --- a/drivers/gpu/drm/tegra/dc.c +++ b/drivers/gpu/drm/tegra/dc.c @@ -1320,10 +1320,16 @@ static struct drm_plane *tegra_dc_add_shared_planes(struct drm_device *drm, if (wgrp->dc == dc->pipe) { for (j = 0; j < wgrp->num_windows; j++) { unsigned int index = wgrp->windows[j]; + enum drm_plane_type type; + + if (primary) + type = DRM_PLANE_TYPE_OVERLAY; + else + type = DRM_PLANE_TYPE_PRIMARY; plane = tegra_shared_plane_create(drm, dc, wgrp->index, - index); + index, type); if (IS_ERR(plane)) return plane; @@ -1331,10 +1337,8 @@ static struct drm_plane *tegra_dc_add_shared_planes(struct drm_device *drm, * Choose the first shared plane owned by this * head as the primary plane. */ - if (!primary) { - plane->type = DRM_PLANE_TYPE_PRIMARY; + if (!primary) primary = plane; - } } } } @@ -1388,7 +1392,10 @@ static void tegra_crtc_reset(struct drm_crtc *crtc) if (crtc->state) tegra_crtc_atomic_destroy_state(crtc, crtc->state); - __drm_atomic_helper_crtc_reset(crtc, &state->base); + if (state) + __drm_atomic_helper_crtc_reset(crtc, &state->base); + else + __drm_atomic_helper_crtc_reset(crtc, NULL); } static struct drm_crtc_state * diff --git a/drivers/gpu/drm/tegra/hub.c b/drivers/gpu/drm/tegra/hub.c index e0c2019a591b1..3507dd6e90234 100644 --- a/drivers/gpu/drm/tegra/hub.c +++ b/drivers/gpu/drm/tegra/hub.c @@ -755,9 +755,9 @@ static const struct drm_plane_helper_funcs tegra_shared_plane_helper_funcs = { struct drm_plane *tegra_shared_plane_create(struct drm_device *drm, struct tegra_dc *dc, unsigned int wgrp, - unsigned int index) + unsigned int index, + enum drm_plane_type type) { - enum drm_plane_type type = DRM_PLANE_TYPE_OVERLAY; struct tegra_drm *tegra = drm->dev_private; struct tegra_display_hub *hub = tegra->hub; struct tegra_shared_plane *plane; diff --git a/drivers/gpu/drm/tegra/hub.h b/drivers/gpu/drm/tegra/hub.h index 23c4b2115ed1e..a66f18c4facc9 100644 --- a/drivers/gpu/drm/tegra/hub.h +++ b/drivers/gpu/drm/tegra/hub.h @@ -80,7 +80,8 @@ void tegra_display_hub_cleanup(struct tegra_display_hub *hub); struct drm_plane *tegra_shared_plane_create(struct drm_device *drm, struct tegra_dc *dc, unsigned int wgrp, - unsigned int index); + unsigned int index, + enum drm_plane_type type); int tegra_display_hub_atomic_check(struct drm_device *drm, struct drm_atomic_state *state); diff --git a/drivers/gpu/drm/tegra/nvdec.c b/drivers/gpu/drm/tegra/nvdec.c index 4860790666af5..14ef61b44f47c 100644 --- a/drivers/gpu/drm/tegra/nvdec.c +++ b/drivers/gpu/drm/tegra/nvdec.c @@ -261,10 +261,8 @@ static int nvdec_load_falcon_firmware(struct nvdec *nvdec) if (!client->group) { virt = dma_alloc_coherent(nvdec->dev, size, &iova, GFP_KERNEL); - - err = dma_mapping_error(nvdec->dev, iova); - if (err < 0) - return err; + if (!virt) + return -ENOMEM; } else { virt = tegra_drm_alloc(tegra, size, &iova); if (IS_ERR(virt)) diff --git a/drivers/gpu/drm/tegra/rgb.c b/drivers/gpu/drm/tegra/rgb.c index 1e8ec50b759e4..ff5a749710db3 100644 --- a/drivers/gpu/drm/tegra/rgb.c +++ b/drivers/gpu/drm/tegra/rgb.c @@ -200,6 +200,11 @@ static const struct drm_encoder_helper_funcs tegra_rgb_encoder_helper_funcs = { .atomic_check = tegra_rgb_encoder_atomic_check, }; +static void tegra_dc_of_node_put(void *data) +{ + of_node_put(data); +} + int tegra_dc_rgb_probe(struct tegra_dc *dc) { struct device_node *np; @@ -207,7 +212,14 @@ int tegra_dc_rgb_probe(struct tegra_dc *dc) int err; np = of_get_child_by_name(dc->dev->of_node, "rgb"); - if (!np || !of_device_is_available(np)) + if (!np) + return -ENODEV; + + err = devm_add_action_or_reset(dc->dev, tegra_dc_of_node_put, np); + if (err < 0) + return err; + + if (!of_device_is_available(np)) return -ENODEV; rgb = devm_kzalloc(dc->dev, sizeof(*rgb), GFP_KERNEL); diff --git a/drivers/gpu/drm/tiny/cirrus.c b/drivers/gpu/drm/tiny/cirrus.c index 751326e3d9c37..c7e81f2610f8c 100644 --- a/drivers/gpu/drm/tiny/cirrus.c +++ b/drivers/gpu/drm/tiny/cirrus.c @@ -318,7 +318,6 @@ static void cirrus_pitch_set(struct cirrus_device *cirrus, unsigned int pitch) /* Enable extended blanking and pitch bits, and enable full memory */ cr1b = 0x22; cr1b |= (pitch >> 7) & 0x10; - cr1b |= (pitch >> 6) & 0x40; wreg_crt(cirrus, 0x1b, cr1b); cirrus_set_start_address(cirrus, 0); diff --git a/drivers/gpu/drm/tiny/simpledrm.c b/drivers/gpu/drm/tiny/simpledrm.c index d19e102894282..07abaf27315f7 100644 --- a/drivers/gpu/drm/tiny/simpledrm.c +++ b/drivers/gpu/drm/tiny/simpledrm.c @@ -284,7 +284,7 @@ static struct simpledrm_device *simpledrm_device_of_dev(struct drm_device *dev) static void simpledrm_device_release_clocks(void *res) { - struct simpledrm_device *sdev = simpledrm_device_of_dev(res); + struct simpledrm_device *sdev = res; unsigned int i; for (i = 0; i < sdev->clk_count; ++i) { @@ -382,7 +382,7 @@ static int simpledrm_device_init_clocks(struct simpledrm_device *sdev) static void simpledrm_device_release_regulators(void *res) { - struct simpledrm_device *sdev = simpledrm_device_of_dev(res); + struct simpledrm_device *sdev = res; unsigned int i; for (i = 0; i < sdev->regulator_count; ++i) { diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index 3c07f4712d5cc..b600be2a5c849 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c @@ -254,6 +254,13 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo, ret = dma_resv_trylock(&fbo->base.base._resv); WARN_ON(!ret); + ret = dma_resv_reserve_fences(&fbo->base.base._resv, 1); + if (ret) { + dma_resv_unlock(&fbo->base.base._resv); + kfree(fbo); + return ret; + } + if (fbo->base.resource) { ttm_resource_set_bo(fbo->base.resource, &fbo->base); bo->resource = NULL; @@ -262,12 +269,6 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo, fbo->base.bulk_move = NULL; } - ret = dma_resv_reserve_fences(&fbo->base.base._resv, 1); - if (ret) { - kfree(fbo); - return ret; - } - ttm_bo_get(bo); fbo->bo = bo; diff --git a/drivers/gpu/drm/udl/udl_drv.c b/drivers/gpu/drm/udl/udl_drv.c index 280a09a6e2ad7..0f712eb685ba2 100644 --- a/drivers/gpu/drm/udl/udl_drv.c +++ b/drivers/gpu/drm/udl/udl_drv.c @@ -126,9 +126,9 @@ static void udl_usb_disconnect(struct usb_interface *interface) { struct drm_device *dev = usb_get_intfdata(interface); + drm_dev_unplug(dev); drm_kms_helper_poll_fini(dev); udl_drop_usb(dev); - drm_dev_unplug(dev); } /* diff --git a/drivers/gpu/drm/v3d/v3d_drv.h b/drivers/gpu/drm/v3d/v3d_drv.h index 75b4725d49c7e..d4b0549205c29 100644 --- a/drivers/gpu/drm/v3d/v3d_drv.h +++ b/drivers/gpu/drm/v3d/v3d_drv.h @@ -95,6 +95,12 @@ struct v3d_perfmon { u64 values[] __counted_by(ncounters); }; +enum v3d_irq { + V3D_CORE_IRQ, + V3D_HUB_IRQ, + V3D_MAX_IRQS, +}; + struct v3d_dev { struct drm_device drm; @@ -106,6 +112,8 @@ struct v3d_dev { bool single_irq_line; + int irq[V3D_MAX_IRQS]; + struct v3d_perfmon_info perfmon_info; void __iomem *hub_regs; diff --git a/drivers/gpu/drm/v3d/v3d_gem.c b/drivers/gpu/drm/v3d/v3d_gem.c index da8faf3b90116..6b6ba7a68fcb4 100644 --- a/drivers/gpu/drm/v3d/v3d_gem.c +++ b/drivers/gpu/drm/v3d/v3d_gem.c @@ -118,6 +118,8 @@ v3d_reset(struct v3d_dev *v3d) if (false) v3d_idle_axi(v3d, 0); + v3d_irq_disable(v3d); + v3d_idle_gca(v3d); v3d_reset_v3d(v3d); diff --git a/drivers/gpu/drm/v3d/v3d_irq.c b/drivers/gpu/drm/v3d/v3d_irq.c index 72b6a119412fa..b98e1a4b33c71 100644 --- a/drivers/gpu/drm/v3d/v3d_irq.c +++ b/drivers/gpu/drm/v3d/v3d_irq.c @@ -228,7 +228,7 @@ v3d_hub_irq(int irq, void *arg) int v3d_irq_init(struct v3d_dev *v3d) { - int irq1, ret, core; + int irq, ret, core; INIT_WORK(&v3d->overflow_mem_work, v3d_overflow_mem_work); @@ -239,17 +239,24 @@ v3d_irq_init(struct v3d_dev *v3d) V3D_CORE_WRITE(core, V3D_CTL_INT_CLR, V3D_CORE_IRQS(v3d->ver)); V3D_WRITE(V3D_HUB_INT_CLR, V3D_HUB_IRQS(v3d->ver)); - irq1 = platform_get_irq_optional(v3d_to_pdev(v3d), 1); - if (irq1 == -EPROBE_DEFER) - return irq1; - if (irq1 > 0) { - ret = devm_request_irq(v3d->drm.dev, irq1, + irq = platform_get_irq_optional(v3d_to_pdev(v3d), 1); + if (irq == -EPROBE_DEFER) + return irq; + if (irq > 0) { + v3d->irq[V3D_CORE_IRQ] = irq; + + ret = devm_request_irq(v3d->drm.dev, v3d->irq[V3D_CORE_IRQ], v3d_irq, IRQF_SHARED, "v3d_core0", v3d); if (ret) goto fail; - ret = devm_request_irq(v3d->drm.dev, - platform_get_irq(v3d_to_pdev(v3d), 0), + + irq = platform_get_irq(v3d_to_pdev(v3d), 0); + if (irq < 0) + return irq; + v3d->irq[V3D_HUB_IRQ] = irq; + + ret = devm_request_irq(v3d->drm.dev, v3d->irq[V3D_HUB_IRQ], v3d_hub_irq, IRQF_SHARED, "v3d_hub", v3d); if (ret) @@ -257,8 +264,12 @@ v3d_irq_init(struct v3d_dev *v3d) } else { v3d->single_irq_line = true; - ret = devm_request_irq(v3d->drm.dev, - platform_get_irq(v3d_to_pdev(v3d), 0), + irq = platform_get_irq(v3d_to_pdev(v3d), 0); + if (irq < 0) + return irq; + v3d->irq[V3D_CORE_IRQ] = irq; + + ret = devm_request_irq(v3d->drm.dev, v3d->irq[V3D_CORE_IRQ], v3d_irq, IRQF_SHARED, "v3d", v3d); if (ret) @@ -299,6 +310,12 @@ v3d_irq_disable(struct v3d_dev *v3d) V3D_CORE_WRITE(core, V3D_CTL_INT_MSK_SET, ~0); V3D_WRITE(V3D_HUB_INT_MSK_SET, ~0); + /* Finish any interrupt handler still in flight. */ + for (int i = 0; i < V3D_MAX_IRQS; i++) { + if (v3d->irq[i]) + synchronize_irq(v3d->irq[i]); + } + /* Clear any pending interrupts we might have left. */ for (core = 0; core < v3d->cores; core++) V3D_CORE_WRITE(core, V3D_CTL_INT_CLR, V3D_CORE_IRQS(v3d->ver)); diff --git a/drivers/gpu/drm/v3d/v3d_sched.c b/drivers/gpu/drm/v3d/v3d_sched.c index ad32e584deeec..c9c88d3ad6698 100644 --- a/drivers/gpu/drm/v3d/v3d_sched.c +++ b/drivers/gpu/drm/v3d/v3d_sched.c @@ -191,7 +191,6 @@ v3d_job_update_stats(struct v3d_job *job, enum v3d_queue queue) struct v3d_dev *v3d = job->v3d; struct v3d_file_priv *file = job->file->driver_priv; struct v3d_stats *global_stats = &v3d->queue[queue].stats; - struct v3d_stats *local_stats = &file->stats[queue]; u64 now = local_clock(); unsigned long flags; @@ -201,7 +200,12 @@ v3d_job_update_stats(struct v3d_job *job, enum v3d_queue queue) else preempt_disable(); - v3d_stats_update(local_stats, now); + /* Don't update the local stats if the file context has already closed */ + if (file) + v3d_stats_update(&file->stats[queue], now); + else + drm_dbg(&v3d->drm, "The file descriptor was closed before job completion\n"); + v3d_stats_update(global_stats, now); if (IS_ENABLED(CONFIG_LOCKDEP)) diff --git a/drivers/gpu/drm/vc4/tests/vc4_mock_output.c b/drivers/gpu/drm/vc4/tests/vc4_mock_output.c index e70d7c3076acf..f0ddc223c1f83 100644 --- a/drivers/gpu/drm/vc4/tests/vc4_mock_output.c +++ b/drivers/gpu/drm/vc4/tests/vc4_mock_output.c @@ -75,24 +75,30 @@ int vc4_mock_atomic_add_output(struct kunit *test, int ret; encoder = vc4_find_encoder_by_type(drm, type); - KUNIT_ASSERT_NOT_ERR_OR_NULL(test, encoder); + if (!encoder) + return -ENODEV; crtc = vc4_find_crtc_for_encoder(test, drm, encoder); - KUNIT_ASSERT_NOT_ERR_OR_NULL(test, crtc); + if (!crtc) + return -ENODEV; output = encoder_to_vc4_dummy_output(encoder); conn = &output->connector; conn_state = drm_atomic_get_connector_state(state, conn); - KUNIT_ASSERT_NOT_ERR_OR_NULL(test, conn_state); + if (IS_ERR(conn_state)) + return PTR_ERR(conn_state); ret = drm_atomic_set_crtc_for_connector(conn_state, crtc); - KUNIT_EXPECT_EQ(test, ret, 0); + if (ret) + return ret; crtc_state = drm_atomic_get_crtc_state(state, crtc); - KUNIT_ASSERT_NOT_ERR_OR_NULL(test, crtc_state); + if (IS_ERR(crtc_state)) + return PTR_ERR(crtc_state); ret = drm_atomic_set_mode_for_crtc(crtc_state, &default_mode); - KUNIT_EXPECT_EQ(test, ret, 0); + if (ret) + return ret; crtc_state->active = true; @@ -113,26 +119,32 @@ int vc4_mock_atomic_del_output(struct kunit *test, int ret; encoder = vc4_find_encoder_by_type(drm, type); - KUNIT_ASSERT_NOT_ERR_OR_NULL(test, encoder); + if (!encoder) + return -ENODEV; crtc = vc4_find_crtc_for_encoder(test, drm, encoder); - KUNIT_ASSERT_NOT_ERR_OR_NULL(test, crtc); + if (!crtc) + return -ENODEV; crtc_state = drm_atomic_get_crtc_state(state, crtc); - KUNIT_ASSERT_NOT_ERR_OR_NULL(test, crtc_state); + if (IS_ERR(crtc_state)) + return PTR_ERR(crtc_state); crtc_state->active = false; ret = drm_atomic_set_mode_for_crtc(crtc_state, NULL); - KUNIT_ASSERT_EQ(test, ret, 0); + if (ret) + return ret; output = encoder_to_vc4_dummy_output(encoder); conn = &output->connector; conn_state = drm_atomic_get_connector_state(state, conn); - KUNIT_ASSERT_NOT_ERR_OR_NULL(test, conn_state); + if (IS_ERR(conn_state)) + return PTR_ERR(conn_state); ret = drm_atomic_set_crtc_for_connector(conn_state, NULL); - KUNIT_ASSERT_EQ(test, ret, 0); + if (ret) + return ret; return 0; } diff --git a/drivers/gpu/drm/vkms/vkms_crtc.c b/drivers/gpu/drm/vkms/vkms_crtc.c index 40b4d084e3cee..91b589a497d02 100644 --- a/drivers/gpu/drm/vkms/vkms_crtc.c +++ b/drivers/gpu/drm/vkms/vkms_crtc.c @@ -198,7 +198,7 @@ static int vkms_crtc_atomic_check(struct drm_crtc *crtc, i++; } - vkms_state->active_planes = kcalloc(i, sizeof(plane), GFP_KERNEL); + vkms_state->active_planes = kcalloc(i, sizeof(*vkms_state->active_planes), GFP_KERNEL); if (!vkms_state->active_planes) return -ENOMEM; vkms_state->num_active_planes = i; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c index 183cda50094cb..e8e49f13cfa2c 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c @@ -51,11 +51,13 @@ static void vmw_bo_release(struct vmw_bo *vbo) mutex_lock(&res->dev_priv->cmdbuf_mutex); (void)vmw_resource_reserve(res, false, true); vmw_resource_mob_detach(res); + if (res->dirty) + res->func->dirty_free(res); if (res->coherent) vmw_bo_dirty_release(res->guest_memory_bo); res->guest_memory_bo = NULL; res->guest_memory_offset = 0; - vmw_resource_unreserve(res, false, false, false, NULL, + vmw_resource_unreserve(res, true, false, false, NULL, 0); mutex_unlock(&res->dev_priv->cmdbuf_mutex); } @@ -73,9 +75,9 @@ static void vmw_bo_free(struct ttm_buffer_object *bo) { struct vmw_bo *vbo = to_vmw_bo(&bo->base); - WARN_ON(vbo->dirty); WARN_ON(!RB_EMPTY_ROOT(&vbo->res_tree)); vmw_bo_release(vbo); + WARN_ON(vbo->dirty); kfree(vbo); } @@ -849,9 +851,9 @@ void vmw_bo_placement_set_default_accelerated(struct vmw_bo *bo) vmw_bo_placement_set(bo, domain, domain); } -void vmw_bo_add_detached_resource(struct vmw_bo *vbo, struct vmw_resource *res) +int vmw_bo_add_detached_resource(struct vmw_bo *vbo, struct vmw_resource *res) { - xa_store(&vbo->detached_resources, (unsigned long)res, res, GFP_KERNEL); + return xa_err(xa_store(&vbo->detached_resources, (unsigned long)res, res, GFP_KERNEL)); } void vmw_bo_del_detached_resource(struct vmw_bo *vbo, struct vmw_resource *res) diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.h b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.h index c21ba7ff77368..940c0a0b9c451 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.h @@ -142,7 +142,7 @@ void vmw_bo_move_notify(struct ttm_buffer_object *bo, struct ttm_resource *mem); void vmw_bo_swap_notify(struct ttm_buffer_object *bo); -void vmw_bo_add_detached_resource(struct vmw_bo *vbo, struct vmw_resource *res); +int vmw_bo_add_detached_resource(struct vmw_bo *vbo, struct vmw_resource *res); void vmw_bo_del_detached_resource(struct vmw_bo *vbo, struct vmw_resource *res); struct vmw_surface *vmw_bo_surface(struct vmw_bo *vbo); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c index 2e52d73eba484..ea741bc4ac3fc 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c @@ -4086,6 +4086,23 @@ static int vmw_execbuf_tie_context(struct vmw_private *dev_priv, return 0; } +/* + * DMA fence callback to remove a seqno_waiter + */ +struct seqno_waiter_rm_context { + struct dma_fence_cb base; + struct vmw_private *dev_priv; +}; + +static void seqno_waiter_rm_cb(struct dma_fence *f, struct dma_fence_cb *cb) +{ + struct seqno_waiter_rm_context *ctx = + container_of(cb, struct seqno_waiter_rm_context, base); + + vmw_seqno_waiter_remove(ctx->dev_priv); + kfree(ctx); +} + int vmw_execbuf_process(struct drm_file *file_priv, struct vmw_private *dev_priv, void __user *user_commands, void *kernel_commands, @@ -4266,6 +4283,15 @@ int vmw_execbuf_process(struct drm_file *file_priv, } else { /* Link the fence with the FD created earlier */ fd_install(out_fence_fd, sync_file->file); + struct seqno_waiter_rm_context *ctx = + kmalloc(sizeof(*ctx), GFP_KERNEL); + ctx->dev_priv = dev_priv; + vmw_seqno_waiter_add(dev_priv); + if (dma_fence_add_callback(&fence->base, &ctx->base, + seqno_waiter_rm_cb) < 0) { + vmw_seqno_waiter_remove(dev_priv); + kfree(ctx); + } } } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c index a73af8a355fbf..c4d5fe5f330f9 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c @@ -273,7 +273,7 @@ int vmw_user_resource_lookup_handle(struct vmw_private *dev_priv, goto out_bad_resource; res = converter->base_obj_to_res(base); - kref_get(&res->kref); + vmw_resource_reference(res); *p_res = res; ret = 0; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c index 5721c74da3e0b..d7a8070330ba5 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c @@ -658,7 +658,7 @@ static void vmw_user_surface_free(struct vmw_resource *res) struct vmw_user_surface *user_srf = container_of(srf, struct vmw_user_surface, srf); - WARN_ON_ONCE(res->dirty); + WARN_ON(res->dirty); if (user_srf->master) drm_master_put(&user_srf->master); kfree(srf->offsets); @@ -689,8 +689,7 @@ static void vmw_user_surface_base_release(struct ttm_base_object **p_base) * Dumb buffers own the resource and they'll unref the * resource themselves */ - if (res && res->guest_memory_bo && res->guest_memory_bo->is_dumb) - return; + WARN_ON(res && res->guest_memory_bo && res->guest_memory_bo->is_dumb); vmw_resource_unreference(&res); } @@ -871,7 +870,12 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data, vmw_resource_unreference(&res); goto out_unlock; } - vmw_bo_add_detached_resource(res->guest_memory_bo, res); + + ret = vmw_bo_add_detached_resource(res->guest_memory_bo, res); + if (unlikely(ret != 0)) { + vmw_resource_unreference(&res); + goto out_unlock; + } } tmp = vmw_resource_reference(&srf->res); @@ -1670,6 +1674,14 @@ vmw_gb_surface_define_internal(struct drm_device *dev, } + if (res->guest_memory_bo) { + ret = vmw_bo_add_detached_resource(res->guest_memory_bo, res); + if (unlikely(ret != 0)) { + vmw_resource_unreference(&res); + goto out_unlock; + } + } + tmp = vmw_resource_reference(res); ret = ttm_prime_object_init(tfile, res->guest_memory_size, &user_srf->prime, VMW_RES_SURFACE, @@ -1684,7 +1696,6 @@ vmw_gb_surface_define_internal(struct drm_device *dev, rep->handle = user_srf->prime.base.handle; rep->backup_size = res->guest_memory_size; if (res->guest_memory_bo) { - vmw_bo_add_detached_resource(res->guest_memory_bo, res); rep->buffer_map_handle = drm_vma_node_offset_addr(&res->guest_memory_bo->tbo.base.vma_node); rep->buffer_size = res->guest_memory_bo->tbo.base.size; @@ -2358,12 +2369,19 @@ int vmw_dumb_create(struct drm_file *file_priv, vbo = res->guest_memory_bo; vbo->is_dumb = true; vbo->dumb_surface = vmw_res_to_srf(res); - + drm_gem_object_put(&vbo->tbo.base); + /* + * Unset the user surface dtor since this in not actually exposed + * to userspace. The suface is owned via the dumb_buffer's GEM handle + */ + struct vmw_user_surface *usurf = container_of(vbo->dumb_surface, + struct vmw_user_surface, srf); + usurf->prime.base.refcount_release = NULL; err: if (res) vmw_resource_unreference(&res); - if (ret) - ttm_ref_object_base_unref(tfile, arg.rep.handle); + + ttm_ref_object_base_unref(tfile, arg.rep.handle); return ret; } diff --git a/drivers/gpu/drm/xe/Kconfig b/drivers/gpu/drm/xe/Kconfig index 7bbe46a98ff1f..93e742c1f21e7 100644 --- a/drivers/gpu/drm/xe/Kconfig +++ b/drivers/gpu/drm/xe/Kconfig @@ -1,7 +1,8 @@ # SPDX-License-Identifier: GPL-2.0-only config DRM_XE tristate "Intel Xe Graphics" - depends on DRM && PCI && MMU && (m || (y && KUNIT=y)) + depends on DRM && PCI && MMU + depends on KUNIT || !KUNIT select INTERVAL_TREE # we need shmfs for the swappable backing store, and in particular # the shmem_readpage() which depends upon tmpfs diff --git a/drivers/gpu/drm/xe/abi/guc_communication_ctb_abi.h b/drivers/gpu/drm/xe/abi/guc_communication_ctb_abi.h index 8f86a16dc5777..f58198cf2cf63 100644 --- a/drivers/gpu/drm/xe/abi/guc_communication_ctb_abi.h +++ b/drivers/gpu/drm/xe/abi/guc_communication_ctb_abi.h @@ -52,6 +52,7 @@ struct guc_ct_buffer_desc { #define GUC_CTB_STATUS_OVERFLOW (1 << 0) #define GUC_CTB_STATUS_UNDERFLOW (1 << 1) #define GUC_CTB_STATUS_MISMATCH (1 << 2) +#define GUC_CTB_STATUS_DISABLED (1 << 3) u32 reserved[13]; } __packed; static_assert(sizeof(struct guc_ct_buffer_desc) == 64); diff --git a/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_stolen.h b/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_stolen.h index cb6c7598824be..9c4cf050059ac 100644 --- a/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_stolen.h +++ b/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_stolen.h @@ -29,7 +29,7 @@ static inline int i915_gem_stolen_insert_node_in_range(struct xe_device *xe, bo = xe_bo_create_locked_range(xe, xe_device_get_root_tile(xe), NULL, size, start, end, - ttm_bo_type_kernel, flags); + ttm_bo_type_kernel, flags, 0); if (IS_ERR(bo)) { err = PTR_ERR(bo); bo = NULL; diff --git a/drivers/gpu/drm/xe/display/xe_display.c b/drivers/gpu/drm/xe/display/xe_display.c index c6e0c8d77a70f..e164e2d71e115 100644 --- a/drivers/gpu/drm/xe/display/xe_display.c +++ b/drivers/gpu/drm/xe/display/xe_display.c @@ -96,6 +96,8 @@ int xe_display_create(struct xe_device *xe) spin_lock_init(&xe->display.fb_tracking.lock); xe->display.hotplug.dp_wq = alloc_ordered_workqueue("xe-dp", 0); + if (!xe->display.hotplug.dp_wq) + return -ENOMEM; return drmm_add_action_or_reset(&xe->drm, display_destroy, NULL); } @@ -352,6 +354,36 @@ void xe_display_pm_suspend(struct xe_device *xe) __xe_display_pm_suspend(xe, false); } +void xe_display_pm_shutdown(struct xe_device *xe) +{ + struct intel_display *display = &xe->display; + + if (!xe->info.probe_display) + return; + + intel_power_domains_disable(xe); + intel_fbdev_set_suspend(&xe->drm, FBINFO_STATE_SUSPENDED, true); + if (has_display(xe)) { + drm_kms_helper_poll_disable(&xe->drm); + intel_display_driver_disable_user_access(xe); + intel_display_driver_suspend(xe); + } + + xe_display_flush_cleanup_work(xe); + intel_dp_mst_suspend(xe); + intel_hpd_cancel_work(xe); + + if (has_display(xe)) + intel_display_driver_suspend_access(xe); + + intel_encoder_suspend_all(display); + intel_encoder_shutdown_all(display); + + intel_opregion_suspend(display, PCI_D3cold); + + intel_dmc_suspend(xe); +} + void xe_display_pm_runtime_suspend(struct xe_device *xe) { if (!xe->info.probe_display) @@ -376,6 +408,19 @@ void xe_display_pm_suspend_late(struct xe_device *xe) intel_display_power_suspend_late(xe); } +void xe_display_pm_shutdown_late(struct xe_device *xe) +{ + if (!xe->info.probe_display) + return; + + /* + * The only requirement is to reboot with display DC states disabled, + * for now leaving all display power wells in the INIT power domain + * enabled. + */ + intel_power_domains_driver_remove(xe); +} + void xe_display_pm_resume_early(struct xe_device *xe) { if (!xe->info.probe_display) diff --git a/drivers/gpu/drm/xe/display/xe_display.h b/drivers/gpu/drm/xe/display/xe_display.h index bed55fd26f304..17afa537aee50 100644 --- a/drivers/gpu/drm/xe/display/xe_display.h +++ b/drivers/gpu/drm/xe/display/xe_display.h @@ -35,7 +35,9 @@ void xe_display_irq_reset(struct xe_device *xe); void xe_display_irq_postinstall(struct xe_device *xe, struct xe_gt *gt); void xe_display_pm_suspend(struct xe_device *xe); +void xe_display_pm_shutdown(struct xe_device *xe); void xe_display_pm_suspend_late(struct xe_device *xe); +void xe_display_pm_shutdown_late(struct xe_device *xe); void xe_display_pm_resume_early(struct xe_device *xe); void xe_display_pm_resume(struct xe_device *xe); void xe_display_pm_runtime_suspend(struct xe_device *xe); @@ -66,7 +68,9 @@ static inline void xe_display_irq_reset(struct xe_device *xe) {} static inline void xe_display_irq_postinstall(struct xe_device *xe, struct xe_gt *gt) {} static inline void xe_display_pm_suspend(struct xe_device *xe) {} +static inline void xe_display_pm_shutdown(struct xe_device *xe) {} static inline void xe_display_pm_suspend_late(struct xe_device *xe) {} +static inline void xe_display_pm_shutdown_late(struct xe_device *xe) {} static inline void xe_display_pm_resume_early(struct xe_device *xe) {} static inline void xe_display_pm_resume(struct xe_device *xe) {} static inline void xe_display_pm_runtime_suspend(struct xe_device *xe) {} diff --git a/drivers/gpu/drm/xe/display/xe_dsb_buffer.c b/drivers/gpu/drm/xe/display/xe_dsb_buffer.c index f99d901a3214f..9f941fc2e36bb 100644 --- a/drivers/gpu/drm/xe/display/xe_dsb_buffer.c +++ b/drivers/gpu/drm/xe/display/xe_dsb_buffer.c @@ -17,10 +17,7 @@ u32 intel_dsb_buffer_ggtt_offset(struct intel_dsb_buffer *dsb_buf) void intel_dsb_buffer_write(struct intel_dsb_buffer *dsb_buf, u32 idx, u32 val) { - struct xe_device *xe = dsb_buf->vma->bo->tile->xe; - iosys_map_wr(&dsb_buf->vma->bo->vmap, idx * 4, u32, val); - xe_device_l2_flush(xe); } u32 intel_dsb_buffer_read(struct intel_dsb_buffer *dsb_buf, u32 idx) @@ -30,12 +27,9 @@ u32 intel_dsb_buffer_read(struct intel_dsb_buffer *dsb_buf, u32 idx) void intel_dsb_buffer_memset(struct intel_dsb_buffer *dsb_buf, u32 idx, u32 val, size_t size) { - struct xe_device *xe = dsb_buf->vma->bo->tile->xe; - WARN_ON(idx > (dsb_buf->buf_size - size) / sizeof(*dsb_buf->cmd_buf)); iosys_map_memset(&dsb_buf->vma->bo->vmap, idx * 4, val, size); - xe_device_l2_flush(xe); } bool intel_dsb_buffer_create(struct intel_crtc *crtc, struct intel_dsb_buffer *dsb_buf, size_t size) @@ -48,11 +42,12 @@ bool intel_dsb_buffer_create(struct intel_crtc *crtc, struct intel_dsb_buffer *d if (!vma) return false; + /* Set scanout flag for WC mapping */ obj = xe_bo_create_pin_map(xe, xe_device_get_root_tile(xe), NULL, PAGE_ALIGN(size), ttm_bo_type_kernel, XE_BO_FLAG_VRAM_IF_DGFX(xe_device_get_root_tile(xe)) | - XE_BO_FLAG_GGTT); + XE_BO_FLAG_SCANOUT | XE_BO_FLAG_GGTT); if (IS_ERR(obj)) { kfree(vma); return false; @@ -73,5 +68,12 @@ void intel_dsb_buffer_cleanup(struct intel_dsb_buffer *dsb_buf) void intel_dsb_buffer_flush_map(struct intel_dsb_buffer *dsb_buf) { - /* TODO: add xe specific flush_map() for dsb buffer object. */ + struct xe_device *xe = dsb_buf->vma->bo->tile->xe; + + /* + * The memory barrier here is to ensure coherency of DSB vs MMIO, + * both for weak ordering archs and discrete cards. + */ + xe_device_wmb(xe); + xe_device_l2_flush(xe); } diff --git a/drivers/gpu/drm/xe/display/xe_fb_pin.c b/drivers/gpu/drm/xe/display/xe_fb_pin.c index b58fc4ba2aacb..0558b106f8b60 100644 --- a/drivers/gpu/drm/xe/display/xe_fb_pin.c +++ b/drivers/gpu/drm/xe/display/xe_fb_pin.c @@ -153,7 +153,10 @@ static int __xe_pin_fb_vma_dpt(const struct intel_framebuffer *fb, } vma->dpt = dpt; - vma->node = dpt->ggtt_node; + vma->node = dpt->ggtt_node[tile0->id]; + + /* Ensure DPT writes are flushed */ + xe_device_l2_flush(xe); return 0; } @@ -203,8 +206,8 @@ static int __xe_pin_fb_vma_ggtt(const struct intel_framebuffer *fb, if (xe_bo_is_vram(bo) && ggtt->flags & XE_GGTT_FLAGS_64K) align = max_t(u32, align, SZ_64K); - if (bo->ggtt_node && view->type == I915_GTT_VIEW_NORMAL) { - vma->node = bo->ggtt_node; + if (bo->ggtt_node[ggtt->tile->id] && view->type == I915_GTT_VIEW_NORMAL) { + vma->node = bo->ggtt_node[ggtt->tile->id]; } else if (view->type == I915_GTT_VIEW_NORMAL) { u32 x, size = bo->ttm.base.size; @@ -318,8 +321,6 @@ static struct i915_vma *__xe_pin_fb_vma(const struct intel_framebuffer *fb, if (ret) goto err_unpin; - /* Ensure DPT writes are flushed */ - xe_device_l2_flush(xe); return vma; err_unpin: @@ -333,10 +334,12 @@ static struct i915_vma *__xe_pin_fb_vma(const struct intel_framebuffer *fb, static void __xe_unpin_fb_vma(struct i915_vma *vma) { + u8 tile_id = vma->node->ggtt->tile->id; + if (vma->dpt) xe_bo_unpin_map_no_vm(vma->dpt); - else if (!xe_ggtt_node_allocated(vma->bo->ggtt_node) || - vma->bo->ggtt_node->base.start != vma->node->base.start) + else if (!xe_ggtt_node_allocated(vma->bo->ggtt_node[tile_id]) || + vma->bo->ggtt_node[tile_id]->base.start != vma->node->base.start) xe_ggtt_node_remove(vma->node, false); ttm_bo_reserve(&vma->bo->ttm, false, false, NULL); diff --git a/drivers/gpu/drm/xe/regs/xe_gt_regs.h b/drivers/gpu/drm/xe/regs/xe_gt_regs.h index 5404de2aea545..c160b015d178a 100644 --- a/drivers/gpu/drm/xe/regs/xe_gt_regs.h +++ b/drivers/gpu/drm/xe/regs/xe_gt_regs.h @@ -157,6 +157,7 @@ #define XEHPG_SC_INSTDONE_EXTRA2 XE_REG_MCR(0x7108) #define COMMON_SLICE_CHICKEN4 XE_REG(0x7300, XE_REG_OPTION_MASKED) +#define SBE_PUSH_CONSTANT_BEHIND_FIX_ENABLE REG_BIT(12) #define DISABLE_TDC_LOAD_BALANCING_CALC REG_BIT(6) #define COMMON_SLICE_CHICKEN3 XE_REG(0x7304, XE_REG_OPTION_MASKED) diff --git a/drivers/gpu/drm/xe/regs/xe_reg_defs.h b/drivers/gpu/drm/xe/regs/xe_reg_defs.h index 23f7dc5bbe995..51fd40ffafcb9 100644 --- a/drivers/gpu/drm/xe/regs/xe_reg_defs.h +++ b/drivers/gpu/drm/xe/regs/xe_reg_defs.h @@ -128,7 +128,7 @@ struct xe_reg_mcr { * options. */ #define XE_REG_MCR(r_, ...) ((const struct xe_reg_mcr){ \ - .__reg = XE_REG_INITIALIZER(r_, ##__VA_ARGS__, .mcr = 1) \ + .__reg = XE_REG_INITIALIZER(r_, ##__VA_ARGS__, .mcr = 1) \ }) static inline bool xe_reg_is_valid(struct xe_reg r) diff --git a/drivers/gpu/drm/xe/xe_bo.c b/drivers/gpu/drm/xe/xe_bo.c index 8acc4640f0a28..5f745d9ed6cc2 100644 --- a/drivers/gpu/drm/xe/xe_bo.c +++ b/drivers/gpu/drm/xe/xe_bo.c @@ -1130,6 +1130,8 @@ static void xe_ttm_bo_destroy(struct ttm_buffer_object *ttm_bo) { struct xe_bo *bo = ttm_to_xe_bo(ttm_bo); struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev); + struct xe_tile *tile; + u8 id; if (bo->ttm.base.import_attach) drm_prime_gem_destroy(&bo->ttm.base, NULL); @@ -1137,8 +1139,9 @@ static void xe_ttm_bo_destroy(struct ttm_buffer_object *ttm_bo) xe_assert(xe, list_empty(&ttm_bo->base.gpuva.list)); - if (bo->ggtt_node && bo->ggtt_node->base.size) - xe_ggtt_remove_bo(bo->tile->mem.ggtt, bo); + for_each_tile(tile, xe, id) + if (bo->ggtt_node[id] && bo->ggtt_node[id]->base.size) + xe_ggtt_remove_bo(tile->mem.ggtt, bo); #ifdef CONFIG_PROC_FS if (bo->client) @@ -1308,6 +1311,10 @@ struct xe_bo *___xe_bo_create_locked(struct xe_device *xe, struct xe_bo *bo, return ERR_PTR(-EINVAL); } + /* XE_BO_FLAG_GGTTx requires XE_BO_FLAG_GGTT also be set */ + if ((flags & XE_BO_FLAG_GGTT_ALL) && !(flags & XE_BO_FLAG_GGTT)) + return ERR_PTR(-EINVAL); + if (flags & (XE_BO_FLAG_VRAM_MASK | XE_BO_FLAG_STOLEN) && !(flags & XE_BO_FLAG_IGNORE_MIN_PAGE_SIZE) && ((xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K) || @@ -1454,7 +1461,8 @@ static struct xe_bo * __xe_bo_create_locked(struct xe_device *xe, struct xe_tile *tile, struct xe_vm *vm, size_t size, u64 start, u64 end, - u16 cpu_caching, enum ttm_bo_type type, u32 flags) + u16 cpu_caching, enum ttm_bo_type type, u32 flags, + u64 alignment) { struct xe_bo *bo = NULL; int err; @@ -1483,6 +1491,8 @@ __xe_bo_create_locked(struct xe_device *xe, if (IS_ERR(bo)) return bo; + bo->min_align = alignment; + /* * Note that instead of taking a reference no the drm_gpuvm_resv_bo(), * to ensure the shared resv doesn't disappear under the bo, the bo @@ -1495,19 +1505,29 @@ __xe_bo_create_locked(struct xe_device *xe, bo->vm = vm; if (bo->flags & XE_BO_FLAG_GGTT) { - if (!tile && flags & XE_BO_FLAG_STOLEN) - tile = xe_device_get_root_tile(xe); + struct xe_tile *t; + u8 id; - xe_assert(xe, tile); + if (!(bo->flags & XE_BO_FLAG_GGTT_ALL)) { + if (!tile && flags & XE_BO_FLAG_STOLEN) + tile = xe_device_get_root_tile(xe); - if (flags & XE_BO_FLAG_FIXED_PLACEMENT) { - err = xe_ggtt_insert_bo_at(tile->mem.ggtt, bo, - start + bo->size, U64_MAX); - } else { - err = xe_ggtt_insert_bo(tile->mem.ggtt, bo); + xe_assert(xe, tile); + } + + for_each_tile(t, xe, id) { + if (t != tile && !(bo->flags & XE_BO_FLAG_GGTTx(t))) + continue; + + if (flags & XE_BO_FLAG_FIXED_PLACEMENT) { + err = xe_ggtt_insert_bo_at(t->mem.ggtt, bo, + start + bo->size, U64_MAX); + } else { + err = xe_ggtt_insert_bo(t->mem.ggtt, bo); + } + if (err) + goto err_unlock_put_bo; } - if (err) - goto err_unlock_put_bo; } return bo; @@ -1523,16 +1543,18 @@ struct xe_bo * xe_bo_create_locked_range(struct xe_device *xe, struct xe_tile *tile, struct xe_vm *vm, size_t size, u64 start, u64 end, - enum ttm_bo_type type, u32 flags) + enum ttm_bo_type type, u32 flags, u64 alignment) { - return __xe_bo_create_locked(xe, tile, vm, size, start, end, 0, type, flags); + return __xe_bo_create_locked(xe, tile, vm, size, start, end, 0, type, + flags, alignment); } struct xe_bo *xe_bo_create_locked(struct xe_device *xe, struct xe_tile *tile, struct xe_vm *vm, size_t size, enum ttm_bo_type type, u32 flags) { - return __xe_bo_create_locked(xe, tile, vm, size, 0, ~0ULL, 0, type, flags); + return __xe_bo_create_locked(xe, tile, vm, size, 0, ~0ULL, 0, type, + flags, 0); } struct xe_bo *xe_bo_create_user(struct xe_device *xe, struct xe_tile *tile, @@ -1542,7 +1564,7 @@ struct xe_bo *xe_bo_create_user(struct xe_device *xe, struct xe_tile *tile, { struct xe_bo *bo = __xe_bo_create_locked(xe, tile, vm, size, 0, ~0ULL, cpu_caching, ttm_bo_type_device, - flags | XE_BO_FLAG_USER); + flags | XE_BO_FLAG_USER, 0); if (!IS_ERR(bo)) xe_bo_unlock_vm_held(bo); @@ -1565,6 +1587,17 @@ struct xe_bo *xe_bo_create_pin_map_at(struct xe_device *xe, struct xe_tile *tile struct xe_vm *vm, size_t size, u64 offset, enum ttm_bo_type type, u32 flags) +{ + return xe_bo_create_pin_map_at_aligned(xe, tile, vm, size, offset, + type, flags, 0); +} + +struct xe_bo *xe_bo_create_pin_map_at_aligned(struct xe_device *xe, + struct xe_tile *tile, + struct xe_vm *vm, + size_t size, u64 offset, + enum ttm_bo_type type, u32 flags, + u64 alignment) { struct xe_bo *bo; int err; @@ -1576,7 +1609,8 @@ struct xe_bo *xe_bo_create_pin_map_at(struct xe_device *xe, struct xe_tile *tile flags |= XE_BO_FLAG_GGTT; bo = xe_bo_create_locked_range(xe, tile, vm, size, start, end, type, - flags | XE_BO_FLAG_NEEDS_CPU_ACCESS); + flags | XE_BO_FLAG_NEEDS_CPU_ACCESS, + alignment); if (IS_ERR(bo)) return bo; @@ -2355,14 +2389,18 @@ void xe_bo_put_commit(struct llist_head *deferred) void xe_bo_put(struct xe_bo *bo) { + struct xe_tile *tile; + u8 id; + might_sleep(); if (bo) { #ifdef CONFIG_PROC_FS if (bo->client) might_lock(&bo->client->bos_lock); #endif - if (bo->ggtt_node && bo->ggtt_node->ggtt) - might_lock(&bo->ggtt_node->ggtt->lock); + for_each_tile(tile, xe_bo_device(bo), id) + if (bo->ggtt_node[id] && bo->ggtt_node[id]->ggtt) + might_lock(&bo->ggtt_node[id]->ggtt->lock); drm_gem_object_put(&bo->ttm.base); } } diff --git a/drivers/gpu/drm/xe/xe_bo.h b/drivers/gpu/drm/xe/xe_bo.h index d22269a230aa1..d04159c598465 100644 --- a/drivers/gpu/drm/xe/xe_bo.h +++ b/drivers/gpu/drm/xe/xe_bo.h @@ -39,10 +39,22 @@ #define XE_BO_FLAG_NEEDS_64K BIT(15) #define XE_BO_FLAG_NEEDS_2M BIT(16) #define XE_BO_FLAG_GGTT_INVALIDATE BIT(17) +#define XE_BO_FLAG_GGTT0 BIT(18) +#define XE_BO_FLAG_GGTT1 BIT(19) +#define XE_BO_FLAG_GGTT2 BIT(20) +#define XE_BO_FLAG_GGTT3 BIT(21) +#define XE_BO_FLAG_GGTT_ALL (XE_BO_FLAG_GGTT0 | \ + XE_BO_FLAG_GGTT1 | \ + XE_BO_FLAG_GGTT2 | \ + XE_BO_FLAG_GGTT3) + /* this one is trigger internally only */ #define XE_BO_FLAG_INTERNAL_TEST BIT(30) #define XE_BO_FLAG_INTERNAL_64K BIT(31) +#define XE_BO_FLAG_GGTTx(tile) \ + (XE_BO_FLAG_GGTT0 << (tile)->id) + #define XE_PTE_SHIFT 12 #define XE_PAGE_SIZE (1 << XE_PTE_SHIFT) #define XE_PTE_MASK (XE_PAGE_SIZE - 1) @@ -77,7 +89,7 @@ struct xe_bo * xe_bo_create_locked_range(struct xe_device *xe, struct xe_tile *tile, struct xe_vm *vm, size_t size, u64 start, u64 end, - enum ttm_bo_type type, u32 flags); + enum ttm_bo_type type, u32 flags, u64 alignment); struct xe_bo *xe_bo_create_locked(struct xe_device *xe, struct xe_tile *tile, struct xe_vm *vm, size_t size, enum ttm_bo_type type, u32 flags); @@ -94,6 +106,12 @@ struct xe_bo *xe_bo_create_pin_map(struct xe_device *xe, struct xe_tile *tile, struct xe_bo *xe_bo_create_pin_map_at(struct xe_device *xe, struct xe_tile *tile, struct xe_vm *vm, size_t size, u64 offset, enum ttm_bo_type type, u32 flags); +struct xe_bo *xe_bo_create_pin_map_at_aligned(struct xe_device *xe, + struct xe_tile *tile, + struct xe_vm *vm, + size_t size, u64 offset, + enum ttm_bo_type type, u32 flags, + u64 alignment); struct xe_bo *xe_bo_create_from_data(struct xe_device *xe, struct xe_tile *tile, const void *data, size_t size, enum ttm_bo_type type, u32 flags); @@ -188,14 +206,24 @@ xe_bo_main_addr(struct xe_bo *bo, size_t page_size) } static inline u32 -xe_bo_ggtt_addr(struct xe_bo *bo) +__xe_bo_ggtt_addr(struct xe_bo *bo, u8 tile_id) { - if (XE_WARN_ON(!bo->ggtt_node)) + struct xe_ggtt_node *ggtt_node = bo->ggtt_node[tile_id]; + + if (XE_WARN_ON(!ggtt_node)) return 0; - XE_WARN_ON(bo->ggtt_node->base.size > bo->size); - XE_WARN_ON(bo->ggtt_node->base.start + bo->ggtt_node->base.size > (1ull << 32)); - return bo->ggtt_node->base.start; + XE_WARN_ON(ggtt_node->base.size > bo->size); + XE_WARN_ON(ggtt_node->base.start + ggtt_node->base.size > (1ull << 32)); + return ggtt_node->base.start; +} + +static inline u32 +xe_bo_ggtt_addr(struct xe_bo *bo) +{ + xe_assert(xe_bo_device(bo), bo->tile); + + return __xe_bo_ggtt_addr(bo, bo->tile->id); } int xe_bo_vmap(struct xe_bo *bo); diff --git a/drivers/gpu/drm/xe/xe_bo_evict.c b/drivers/gpu/drm/xe/xe_bo_evict.c index 8fb2be0610035..6a40eedd9db10 100644 --- a/drivers/gpu/drm/xe/xe_bo_evict.c +++ b/drivers/gpu/drm/xe/xe_bo_evict.c @@ -152,11 +152,17 @@ int xe_bo_restore_kernel(struct xe_device *xe) } if (bo->flags & XE_BO_FLAG_GGTT) { - struct xe_tile *tile = bo->tile; + struct xe_tile *tile; + u8 id; - mutex_lock(&tile->mem.ggtt->lock); - xe_ggtt_map_bo(tile->mem.ggtt, bo); - mutex_unlock(&tile->mem.ggtt->lock); + for_each_tile(tile, xe, id) { + if (tile != bo->tile && !(bo->flags & XE_BO_FLAG_GGTTx(tile))) + continue; + + mutex_lock(&tile->mem.ggtt->lock); + xe_ggtt_map_bo(tile->mem.ggtt, bo); + mutex_unlock(&tile->mem.ggtt->lock); + } } /* diff --git a/drivers/gpu/drm/xe/xe_bo_types.h b/drivers/gpu/drm/xe/xe_bo_types.h index 2ed558ac2264a..aa298d33c2508 100644 --- a/drivers/gpu/drm/xe/xe_bo_types.h +++ b/drivers/gpu/drm/xe/xe_bo_types.h @@ -13,6 +13,7 @@ #include #include +#include "xe_device_types.h" #include "xe_ggtt_types.h" struct xe_device; @@ -39,8 +40,8 @@ struct xe_bo { struct ttm_place placements[XE_BO_MAX_PLACEMENTS]; /** @placement: current placement for this BO */ struct ttm_placement placement; - /** @ggtt_node: GGTT node if this BO is mapped in the GGTT */ - struct xe_ggtt_node *ggtt_node; + /** @ggtt_node: Array of GGTT nodes if this BO is mapped in the GGTTs */ + struct xe_ggtt_node *ggtt_node[XE_MAX_TILES_PER_DEVICE]; /** @vmap: iosys map of this buffer */ struct iosys_map vmap; /** @ttm_kmap: TTM bo kmap object for internal use only. Keep off. */ @@ -76,6 +77,11 @@ struct xe_bo { /** @vram_userfault_link: Link into @mem_access.vram_userfault.list */ struct list_head vram_userfault_link; + + /** @min_align: minimum alignment needed for this BO if different + * from default + */ + u64 min_align; }; #define intel_bo_to_drm_bo(bo) (&(bo)->ttm.base) diff --git a/drivers/gpu/drm/xe/xe_device.c b/drivers/gpu/drm/xe/xe_device.c index 23e02372a49db..82da51a6616a1 100644 --- a/drivers/gpu/drm/xe/xe_device.c +++ b/drivers/gpu/drm/xe/xe_device.c @@ -37,6 +37,7 @@ #include "xe_gt_printk.h" #include "xe_gt_sriov_vf.h" #include "xe_guc.h" +#include "xe_guc_pc.h" #include "xe_hw_engine_group.h" #include "xe_hwmon.h" #include "xe_irq.h" @@ -374,6 +375,11 @@ struct xe_device *xe_device_create(struct pci_dev *pdev, return ERR_PTR(err); } +static bool xe_driver_flr_disabled(struct xe_device *xe) +{ + return xe_mmio_read32(xe_root_mmio_gt(xe), GU_CNTL_PROTECTED) & DRIVERINT_FLR_DIS; +} + /* * The driver-initiated FLR is the highest level of reset that we can trigger * from within the driver. It is different from the PCI FLR in that it doesn't @@ -387,17 +393,12 @@ struct xe_device *xe_device_create(struct pci_dev *pdev, * if/when a new instance of i915 is bound to the device it will do a full * re-init anyway. */ -static void xe_driver_flr(struct xe_device *xe) +static void __xe_driver_flr(struct xe_device *xe) { const unsigned int flr_timeout = 3 * MICRO; /* specs recommend a 3s wait */ struct xe_gt *gt = xe_root_mmio_gt(xe); int ret; - if (xe_mmio_read32(gt, GU_CNTL_PROTECTED) & DRIVERINT_FLR_DIS) { - drm_info_once(&xe->drm, "BIOS Disabled Driver-FLR\n"); - return; - } - drm_dbg(&xe->drm, "Triggering Driver-FLR\n"); /* @@ -438,6 +439,16 @@ static void xe_driver_flr(struct xe_device *xe) xe_mmio_write32(gt, GU_DEBUG, DRIVERFLR_STATUS); } +static void xe_driver_flr(struct xe_device *xe) +{ + if (xe_driver_flr_disabled(xe)) { + drm_info_once(&xe->drm, "BIOS Disabled Driver-FLR\n"); + return; + } + + __xe_driver_flr(xe); +} + static void xe_driver_flr_fini(void *arg) { struct xe_device *xe = arg; @@ -797,6 +808,24 @@ void xe_device_remove(struct xe_device *xe) void xe_device_shutdown(struct xe_device *xe) { + struct xe_gt *gt; + u8 id; + + drm_dbg(&xe->drm, "Shutting down device\n"); + + if (xe_driver_flr_disabled(xe)) { + xe_display_pm_shutdown(xe); + + xe_irq_suspend(xe); + + for_each_gt(gt, xe, id) + xe_gt_shutdown(gt); + + xe_display_pm_shutdown_late(xe); + } else { + /* BOOM! */ + __xe_driver_flr(xe); + } } /** @@ -843,31 +872,37 @@ void xe_device_td_flush(struct xe_device *xe) if (!IS_DGFX(xe) || GRAPHICS_VER(xe) < 20) return; - if (XE_WA(xe_root_mmio_gt(xe), 16023588340)) { + gt = xe_root_mmio_gt(xe); + if (XE_WA(gt, 16023588340)) { + /* A transient flush is not sufficient: flush the L2 */ xe_device_l2_flush(xe); - return; - } - - for_each_gt(gt, xe, id) { - if (xe_gt_is_media_type(gt)) - continue; - - if (xe_force_wake_get(gt_to_fw(gt), XE_FW_GT)) - return; - - xe_mmio_write32(gt, XE2_TDF_CTRL, TRANSIENT_FLUSH_REQUEST); - /* - * FIXME: We can likely do better here with our choice of - * timeout. Currently we just assume the worst case, i.e. 150us, - * which is believed to be sufficient to cover the worst case - * scenario on current platforms if all cache entries are - * transient and need to be flushed.. - */ - if (xe_mmio_wait32(gt, XE2_TDF_CTRL, TRANSIENT_FLUSH_REQUEST, 0, - 150, NULL, false)) - xe_gt_err_once(gt, "TD flush timeout\n"); - - xe_force_wake_put(gt_to_fw(gt), XE_FW_GT); + } else { + xe_guc_pc_apply_flush_freq_limit(>->uc.guc.pc); + + /* Execute TDF flush on all graphics GTs */ + for_each_gt(gt, xe, id) { + if (xe_gt_is_media_type(gt)) + continue; + + if (xe_force_wake_get(gt_to_fw(gt), XE_FW_GT)) + return; + + xe_mmio_write32(gt, XE2_TDF_CTRL, TRANSIENT_FLUSH_REQUEST); + /* + * FIXME: We can likely do better here with our choice of + * timeout. Currently we just assume the worst case, i.e. 150us, + * which is believed to be sufficient to cover the worst case + * scenario on current platforms if all cache entries are + * transient and need to be flushed.. + */ + if (xe_mmio_wait32(gt, XE2_TDF_CTRL, TRANSIENT_FLUSH_REQUEST, 0, + 150, NULL, false)) + xe_gt_err_once(gt, "TD flush timeout\n"); + + xe_force_wake_put(gt_to_fw(gt), XE_FW_GT); + } + + xe_guc_pc_remove_flush_freq_limit(&xe_root_mmio_gt(xe)->uc.guc.pc); } } diff --git a/drivers/gpu/drm/xe/xe_ggtt.c b/drivers/gpu/drm/xe/xe_ggtt.c index ff19eca5d358b..76e1092f51d92 100644 --- a/drivers/gpu/drm/xe/xe_ggtt.c +++ b/drivers/gpu/drm/xe/xe_ggtt.c @@ -198,6 +198,13 @@ static const struct xe_ggtt_pt_ops xelpg_pt_wa_ops = { .ggtt_set_pte = xe_ggtt_set_pte_and_flush, }; +static void dev_fini_ggtt(void *arg) +{ + struct xe_ggtt *ggtt = arg; + + drain_workqueue(ggtt->wq); +} + /** * xe_ggtt_init_early - Early GGTT initialization * @ggtt: the &xe_ggtt to be initialized @@ -254,6 +261,10 @@ int xe_ggtt_init_early(struct xe_ggtt *ggtt) if (err) return err; + err = devm_add_action_or_reset(xe->drm.dev, dev_fini_ggtt, ggtt); + if (err) + return err; + if (IS_SRIOV_VF(xe)) { err = xe_gt_sriov_vf_prepare_ggtt(xe_tile_get_gt(ggtt->tile, 0)); if (err) @@ -594,10 +605,10 @@ void xe_ggtt_map_bo(struct xe_ggtt *ggtt, struct xe_bo *bo) u64 start; u64 offset, pte; - if (XE_WARN_ON(!bo->ggtt_node)) + if (XE_WARN_ON(!bo->ggtt_node[ggtt->tile->id])) return; - start = bo->ggtt_node->base.start; + start = bo->ggtt_node[ggtt->tile->id]->base.start; for (offset = 0; offset < bo->size; offset += XE_PAGE_SIZE) { pte = ggtt->pt_ops->pte_encode_bo(bo, offset, pat_index); @@ -608,15 +619,16 @@ void xe_ggtt_map_bo(struct xe_ggtt *ggtt, struct xe_bo *bo) static int __xe_ggtt_insert_bo_at(struct xe_ggtt *ggtt, struct xe_bo *bo, u64 start, u64 end) { + u64 alignment = bo->min_align > 0 ? bo->min_align : XE_PAGE_SIZE; + u8 tile_id = ggtt->tile->id; int err; - u64 alignment = XE_PAGE_SIZE; if (xe_bo_is_vram(bo) && ggtt->flags & XE_GGTT_FLAGS_64K) alignment = SZ_64K; - if (XE_WARN_ON(bo->ggtt_node)) { + if (XE_WARN_ON(bo->ggtt_node[tile_id])) { /* Someone's already inserted this BO in the GGTT */ - xe_tile_assert(ggtt->tile, bo->ggtt_node->base.size == bo->size); + xe_tile_assert(ggtt->tile, bo->ggtt_node[tile_id]->base.size == bo->size); return 0; } @@ -626,19 +638,19 @@ static int __xe_ggtt_insert_bo_at(struct xe_ggtt *ggtt, struct xe_bo *bo, xe_pm_runtime_get_noresume(tile_to_xe(ggtt->tile)); - bo->ggtt_node = xe_ggtt_node_init(ggtt); - if (IS_ERR(bo->ggtt_node)) { - err = PTR_ERR(bo->ggtt_node); - bo->ggtt_node = NULL; + bo->ggtt_node[tile_id] = xe_ggtt_node_init(ggtt); + if (IS_ERR(bo->ggtt_node[tile_id])) { + err = PTR_ERR(bo->ggtt_node[tile_id]); + bo->ggtt_node[tile_id] = NULL; goto out; } mutex_lock(&ggtt->lock); - err = drm_mm_insert_node_in_range(&ggtt->mm, &bo->ggtt_node->base, bo->size, - alignment, 0, start, end, 0); + err = drm_mm_insert_node_in_range(&ggtt->mm, &bo->ggtt_node[tile_id]->base, + bo->size, alignment, 0, start, end, 0); if (err) { - xe_ggtt_node_fini(bo->ggtt_node); - bo->ggtt_node = NULL; + xe_ggtt_node_fini(bo->ggtt_node[tile_id]); + bo->ggtt_node[tile_id] = NULL; } else { xe_ggtt_map_bo(ggtt, bo); } @@ -687,13 +699,15 @@ int xe_ggtt_insert_bo(struct xe_ggtt *ggtt, struct xe_bo *bo) */ void xe_ggtt_remove_bo(struct xe_ggtt *ggtt, struct xe_bo *bo) { - if (XE_WARN_ON(!bo->ggtt_node)) + u8 tile_id = ggtt->tile->id; + + if (XE_WARN_ON(!bo->ggtt_node[tile_id])) return; /* This BO is not currently in the GGTT */ - xe_tile_assert(ggtt->tile, bo->ggtt_node->base.size == bo->size); + xe_tile_assert(ggtt->tile, bo->ggtt_node[tile_id]->base.size == bo->size); - xe_ggtt_node_remove(bo->ggtt_node, + xe_ggtt_node_remove(bo->ggtt_node[tile_id], bo->flags & XE_BO_FLAG_GGTT_INVALIDATE); } diff --git a/drivers/gpu/drm/xe/xe_gpu_scheduler.h b/drivers/gpu/drm/xe/xe_gpu_scheduler.h index 64b2ae6839db2..400b2e9e89ab9 100644 --- a/drivers/gpu/drm/xe/xe_gpu_scheduler.h +++ b/drivers/gpu/drm/xe/xe_gpu_scheduler.h @@ -51,7 +51,15 @@ static inline void xe_sched_tdr_queue_imm(struct xe_gpu_scheduler *sched) static inline void xe_sched_resubmit_jobs(struct xe_gpu_scheduler *sched) { - drm_sched_resubmit_jobs(&sched->base); + struct drm_sched_job *s_job; + + list_for_each_entry(s_job, &sched->base.pending_list, list) { + struct drm_sched_fence *s_fence = s_job->s_fence; + struct dma_fence *hw_fence = s_fence->parent; + + if (hw_fence && !dma_fence_is_signaled(hw_fence)) + sched->base.ops->run_job(s_job); + } } static inline bool diff --git a/drivers/gpu/drm/xe/xe_gt.c b/drivers/gpu/drm/xe/xe_gt.c index 3a7628fb5ad32..231ed53cf907c 100644 --- a/drivers/gpu/drm/xe/xe_gt.c +++ b/drivers/gpu/drm/xe/xe_gt.c @@ -98,14 +98,14 @@ void xe_gt_sanitize(struct xe_gt *gt) static void xe_gt_enable_host_l2_vram(struct xe_gt *gt) { + unsigned int fw_ref; u32 reg; - int err; if (!XE_WA(gt, 16023588340)) return; - err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); - if (WARN_ON(err)) + fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); + if (!fw_ref) return; if (!xe_gt_is_media_type(gt)) { @@ -114,14 +114,14 @@ static void xe_gt_enable_host_l2_vram(struct xe_gt *gt) xe_gt_mcr_multicast_write(gt, XE2_GAMREQSTRM_CTRL, reg); } - xe_gt_mcr_multicast_write(gt, XEHPC_L3CLOS_MASK(3), 0x3); - xe_force_wake_put(gt_to_fw(gt), XE_FW_GT); + xe_gt_mcr_multicast_write(gt, XEHPC_L3CLOS_MASK(3), 0xF); + xe_force_wake_put(gt_to_fw(gt), fw_ref); } static void xe_gt_disable_host_l2_vram(struct xe_gt *gt) { + unsigned int fw_ref; u32 reg; - int err; if (!XE_WA(gt, 16023588340)) return; @@ -129,15 +129,15 @@ static void xe_gt_disable_host_l2_vram(struct xe_gt *gt) if (xe_gt_is_media_type(gt)) return; - err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); - if (WARN_ON(err)) + fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); + if (!fw_ref) return; reg = xe_gt_mcr_unicast_read_any(gt, XE2_GAMREQSTRM_CTRL); reg &= ~CG_DIS_CNTLBUS; xe_gt_mcr_multicast_write(gt, XE2_GAMREQSTRM_CTRL, reg); - xe_force_wake_put(gt_to_fw(gt), XE_FW_GT); + xe_force_wake_put(gt_to_fw(gt), fw_ref); } /** @@ -405,11 +405,14 @@ static void dump_pat_on_error(struct xe_gt *gt) static int gt_fw_domain_init(struct xe_gt *gt) { + unsigned int fw_ref; int err, i; - err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); - if (err) + fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); + if (!fw_ref) { + err = -ETIMEDOUT; goto err_hw_fence_irq; + } if (!xe_gt_is_media_type(gt)) { err = xe_ggtt_init(gt_to_tile(gt)->mem.ggtt); @@ -444,14 +447,12 @@ static int gt_fw_domain_init(struct xe_gt *gt) */ gt->info.gmdid = xe_mmio_read32(gt, GMD_ID); - err = xe_force_wake_put(gt_to_fw(gt), XE_FW_GT); - XE_WARN_ON(err); - + xe_force_wake_put(gt_to_fw(gt), fw_ref); return 0; err_force_wake: dump_pat_on_error(gt); - xe_force_wake_put(gt_to_fw(gt), XE_FW_GT); + xe_force_wake_put(gt_to_fw(gt), fw_ref); err_hw_fence_irq: for (i = 0; i < XE_ENGINE_CLASS_MAX; ++i) xe_hw_fence_irq_finish(>->fence_irq[i]); @@ -461,11 +462,14 @@ static int gt_fw_domain_init(struct xe_gt *gt) static int all_fw_domain_init(struct xe_gt *gt) { + unsigned int fw_ref; int err, i; - err = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL); - if (err) - goto err_hw_fence_irq; + fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL); + if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL)) { + err = -ETIMEDOUT; + goto err_force_wake; + } xe_gt_mcr_set_implicit_defaults(gt); xe_wa_process_gt(gt); @@ -531,14 +535,12 @@ static int all_fw_domain_init(struct xe_gt *gt) if (IS_SRIOV_PF(gt_to_xe(gt))) xe_gt_sriov_pf_init_hw(gt); - err = xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL); - XE_WARN_ON(err); + xe_force_wake_put(gt_to_fw(gt), fw_ref); return 0; err_force_wake: - xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL); -err_hw_fence_irq: + xe_force_wake_put(gt_to_fw(gt), fw_ref); for (i = 0; i < XE_ENGINE_CLASS_MAX; ++i) xe_hw_fence_irq_finish(>->fence_irq[i]); @@ -551,11 +553,12 @@ static int all_fw_domain_init(struct xe_gt *gt) */ int xe_gt_init_hwconfig(struct xe_gt *gt) { + unsigned int fw_ref; int err; - err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); - if (err) - goto out; + fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); + if (!fw_ref) + return -ETIMEDOUT; xe_gt_mcr_init_early(gt); xe_pat_init(gt); @@ -573,8 +576,7 @@ int xe_gt_init_hwconfig(struct xe_gt *gt) xe_gt_enable_host_l2_vram(gt); out_fw: - xe_force_wake_put(gt_to_fw(gt), XE_FW_GT); -out: + xe_force_wake_put(gt_to_fw(gt), fw_ref); return err; } @@ -744,6 +746,7 @@ static int do_gt_restart(struct xe_gt *gt) static int gt_reset(struct xe_gt *gt) { + unsigned int fw_ref; int err; if (xe_device_wedged(gt_to_xe(gt))) @@ -764,9 +767,11 @@ static int gt_reset(struct xe_gt *gt) xe_gt_sanitize(gt); - err = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL); - if (err) - goto err_msg; + fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL); + if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL)) { + err = -ETIMEDOUT; + goto err_out; + } xe_uc_gucrc_disable(>->uc); xe_uc_stop_prepare(>->uc); @@ -784,8 +789,7 @@ static int gt_reset(struct xe_gt *gt) if (err) goto err_out; - err = xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL); - XE_WARN_ON(err); + xe_force_wake_put(gt_to_fw(gt), fw_ref); xe_pm_runtime_put(gt_to_xe(gt)); xe_gt_info(gt, "reset done\n"); @@ -793,8 +797,7 @@ static int gt_reset(struct xe_gt *gt) return 0; err_out: - XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL)); -err_msg: + xe_force_wake_put(gt_to_fw(gt), fw_ref); XE_WARN_ON(xe_uc_start(>->uc)); err_fail: xe_gt_err(gt, "reset failed (%pe)\n", ERR_PTR(err)); @@ -826,22 +829,25 @@ void xe_gt_reset_async(struct xe_gt *gt) void xe_gt_suspend_prepare(struct xe_gt *gt) { - XE_WARN_ON(xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL)); + unsigned int fw_ref; + + fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL); xe_uc_suspend_prepare(>->uc); - XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL)); + xe_force_wake_put(gt_to_fw(gt), fw_ref); } int xe_gt_suspend(struct xe_gt *gt) { + unsigned int fw_ref; int err; xe_gt_dbg(gt, "suspending\n"); xe_gt_sanitize(gt); - err = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL); - if (err) + fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL); + if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL)) goto err_msg; err = xe_uc_suspend(>->uc); @@ -852,19 +858,29 @@ int xe_gt_suspend(struct xe_gt *gt) xe_gt_disable_host_l2_vram(gt); - XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL)); + xe_force_wake_put(gt_to_fw(gt), fw_ref); xe_gt_dbg(gt, "suspended\n"); return 0; -err_force_wake: - XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL)); err_msg: + err = -ETIMEDOUT; +err_force_wake: + xe_force_wake_put(gt_to_fw(gt), fw_ref); xe_gt_err(gt, "suspend failed (%pe)\n", ERR_PTR(err)); return err; } +void xe_gt_shutdown(struct xe_gt *gt) +{ + unsigned int fw_ref; + + fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL); + do_gt_reset(gt); + xe_force_wake_put(gt_to_fw(gt), fw_ref); +} + /** * xe_gt_sanitize_freq() - Restore saved frequencies if necessary. * @gt: the GT object @@ -887,11 +903,12 @@ int xe_gt_sanitize_freq(struct xe_gt *gt) int xe_gt_resume(struct xe_gt *gt) { + unsigned int fw_ref; int err; xe_gt_dbg(gt, "resuming\n"); - err = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL); - if (err) + fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL); + if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL)) goto err_msg; err = do_gt_restart(gt); @@ -900,14 +917,15 @@ int xe_gt_resume(struct xe_gt *gt) xe_gt_idle_enable_pg(gt); - XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL)); + xe_force_wake_put(gt_to_fw(gt), fw_ref); xe_gt_dbg(gt, "resumed\n"); return 0; -err_force_wake: - XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL)); err_msg: + err = -ETIMEDOUT; +err_force_wake: + xe_force_wake_put(gt_to_fw(gt), fw_ref); xe_gt_err(gt, "resume failed (%pe)\n", ERR_PTR(err)); return err; diff --git a/drivers/gpu/drm/xe/xe_gt.h b/drivers/gpu/drm/xe/xe_gt.h index ee138e9768a23..881f1cbc2c491 100644 --- a/drivers/gpu/drm/xe/xe_gt.h +++ b/drivers/gpu/drm/xe/xe_gt.h @@ -48,6 +48,7 @@ void xe_gt_record_user_engines(struct xe_gt *gt); void xe_gt_suspend_prepare(struct xe_gt *gt); int xe_gt_suspend(struct xe_gt *gt); +void xe_gt_shutdown(struct xe_gt *gt); int xe_gt_resume(struct xe_gt *gt); void xe_gt_reset_async(struct xe_gt *gt); void xe_gt_sanitize(struct xe_gt *gt); diff --git a/drivers/gpu/drm/xe/xe_gt_freq.c b/drivers/gpu/drm/xe/xe_gt_freq.c index ab76973f3e1e6..a05fde2c7b122 100644 --- a/drivers/gpu/drm/xe/xe_gt_freq.c +++ b/drivers/gpu/drm/xe/xe_gt_freq.c @@ -32,6 +32,7 @@ * Xe's Freq provides a sysfs API for frequency management: * * device/tile#/gt#/freq0/_freq *read-only* files: + * * - act_freq: The actual resolved frequency decided by PCODE. * - cur_freq: The current one requested by GuC PC to the PCODE. * - rpn_freq: The Render Performance (RP) N level, which is the minimal one. @@ -39,6 +40,7 @@ * - rp0_freq: The Render Performance (RP) 0 level, which is the maximum one. * * device/tile#/gt#/freq0/_freq *read-write* files: + * * - min_freq: Min frequency request. * - max_freq: Max frequency request. * If max <= min, then freq_min becomes a fixed frequency request. diff --git a/drivers/gpu/drm/xe/xe_gt_pagefault.c b/drivers/gpu/drm/xe/xe_gt_pagefault.c index db540c8be6c7c..656c2ab6ca9f3 100644 --- a/drivers/gpu/drm/xe/xe_gt_pagefault.c +++ b/drivers/gpu/drm/xe/xe_gt_pagefault.c @@ -432,6 +432,7 @@ static int xe_alloc_pf_queue(struct xe_gt *gt, struct pf_queue *pf_queue) #define PF_MULTIPLIER 8 pf_queue->num_dw = (num_eus + XE_NUM_HW_ENGINES) * PF_MSG_LEN_DW * PF_MULTIPLIER; + pf_queue->num_dw = roundup_pow_of_two(pf_queue->num_dw); #undef PF_MULTIPLIER pf_queue->gt = gt; diff --git a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c index 3155825fa46ad..9deb9b44c3c3e 100644 --- a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c +++ b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c @@ -137,6 +137,14 @@ void xe_gt_tlb_invalidation_reset(struct xe_gt *gt) struct xe_gt_tlb_invalidation_fence *fence, *next; int pending_seqno; + /* + * we can get here before the CTs are even initialized if we're wedging + * very early, in which case there are not going to be any pending + * fences so we can bail immediately. + */ + if (!xe_guc_ct_initialized(>->uc.guc.ct)) + return; + /* * CT channel is already disabled at this point. No new TLB requests can * appear. diff --git a/drivers/gpu/drm/xe/xe_guc.c b/drivers/gpu/drm/xe/xe_guc.c index 52df28032a6ff..96373cdb366be 100644 --- a/drivers/gpu/drm/xe/xe_guc.c +++ b/drivers/gpu/drm/xe/xe_guc.c @@ -985,7 +985,7 @@ int xe_guc_mmio_send_recv(struct xe_guc *guc, const u32 *request, BUILD_BUG_ON(FIELD_MAX(GUC_HXG_MSG_0_TYPE) != GUC_HXG_TYPE_RESPONSE_SUCCESS); BUILD_BUG_ON((GUC_HXG_TYPE_RESPONSE_SUCCESS ^ GUC_HXG_TYPE_RESPONSE_FAILURE) != 1); - ret = xe_mmio_wait32(gt, reply_reg, resp_mask, resp_mask, + ret = xe_mmio_wait32(gt, reply_reg, resp_mask, resp_mask, 1000000, &header, false); if (unlikely(FIELD_GET(GUC_HXG_MSG_0_ORIGIN, header) != @@ -1175,7 +1175,7 @@ void xe_guc_print_info(struct xe_guc *guc, struct drm_printer *p) xe_force_wake_put(gt_to_fw(gt), XE_FW_GT); - xe_guc_ct_print(&guc->ct, p, false); + xe_guc_ct_print(&guc->ct, p); xe_guc_submit_print(guc, p); } diff --git a/drivers/gpu/drm/xe/xe_guc_ct.c b/drivers/gpu/drm/xe/xe_guc_ct.c index cd6a5f09d631e..f1ce4e14dcb5f 100644 --- a/drivers/gpu/drm/xe/xe_guc_ct.c +++ b/drivers/gpu/drm/xe/xe_guc_ct.c @@ -25,12 +25,53 @@ #include "xe_gt_sriov_pf_monitor.h" #include "xe_gt_tlb_invalidation.h" #include "xe_guc.h" +#include "xe_guc_log.h" #include "xe_guc_relay.h" #include "xe_guc_submit.h" #include "xe_map.h" #include "xe_pm.h" #include "xe_trace_guc.h" +static void receive_g2h(struct xe_guc_ct *ct); +static void g2h_worker_func(struct work_struct *w); +static void safe_mode_worker_func(struct work_struct *w); +static void ct_exit_safe_mode(struct xe_guc_ct *ct); + +#if IS_ENABLED(CONFIG_DRM_XE_DEBUG) +enum { + /* Internal states, not error conditions */ + CT_DEAD_STATE_REARM, /* 0x0001 */ + CT_DEAD_STATE_CAPTURE, /* 0x0002 */ + + /* Error conditions */ + CT_DEAD_SETUP, /* 0x0004 */ + CT_DEAD_H2G_WRITE, /* 0x0008 */ + CT_DEAD_H2G_HAS_ROOM, /* 0x0010 */ + CT_DEAD_G2H_READ, /* 0x0020 */ + CT_DEAD_G2H_RECV, /* 0x0040 */ + CT_DEAD_G2H_RELEASE, /* 0x0080 */ + CT_DEAD_DEADLOCK, /* 0x0100 */ + CT_DEAD_PROCESS_FAILED, /* 0x0200 */ + CT_DEAD_FAST_G2H, /* 0x0400 */ + CT_DEAD_PARSE_G2H_RESPONSE, /* 0x0800 */ + CT_DEAD_PARSE_G2H_UNKNOWN, /* 0x1000 */ + CT_DEAD_PARSE_G2H_ORIGIN, /* 0x2000 */ + CT_DEAD_PARSE_G2H_TYPE, /* 0x4000 */ +}; + +static void ct_dead_worker_func(struct work_struct *w); +static void ct_dead_capture(struct xe_guc_ct *ct, struct guc_ctb *ctb, u32 reason_code); + +#define CT_DEAD(ct, ctb, reason_code) ct_dead_capture((ct), (ctb), CT_DEAD_##reason_code) +#else +#define CT_DEAD(ct, ctb, reason) \ + do { \ + struct guc_ctb *_ctb = (ctb); \ + if (_ctb) \ + _ctb->info.broken = true; \ + } while (0) +#endif + /* Used when a CT send wants to block and / or receive data */ struct g2h_fence { u32 *response_buffer; @@ -147,14 +188,11 @@ static void guc_ct_fini(struct drm_device *drm, void *arg) { struct xe_guc_ct *ct = arg; + ct_exit_safe_mode(ct); destroy_workqueue(ct->g2h_wq); xa_destroy(&ct->fence_lookup); } -static void receive_g2h(struct xe_guc_ct *ct); -static void g2h_worker_func(struct work_struct *w); -static void safe_mode_worker_func(struct work_struct *w); - static void primelockdep(struct xe_guc_ct *ct) { if (!IS_ENABLED(CONFIG_LOCKDEP)) @@ -182,7 +220,11 @@ int xe_guc_ct_init(struct xe_guc_ct *ct) spin_lock_init(&ct->fast_lock); xa_init(&ct->fence_lookup); INIT_WORK(&ct->g2h_worker, g2h_worker_func); - INIT_DELAYED_WORK(&ct->safe_mode_worker, safe_mode_worker_func); + INIT_DELAYED_WORK(&ct->safe_mode_worker, safe_mode_worker_func); +#if IS_ENABLED(CONFIG_DRM_XE_DEBUG) + spin_lock_init(&ct->dead.lock); + INIT_WORK(&ct->dead.worker, ct_dead_worker_func); +#endif init_waitqueue_head(&ct->wq); init_waitqueue_head(&ct->g2h_fence_wq); @@ -419,10 +461,22 @@ int xe_guc_ct_enable(struct xe_guc_ct *ct) if (ct_needs_safe_mode(ct)) ct_enter_safe_mode(ct); +#if IS_ENABLED(CONFIG_DRM_XE_DEBUG) + /* + * The CT has now been reset so the dumper can be re-armed + * after any existing dead state has been dumped. + */ + spin_lock_irq(&ct->dead.lock); + if (ct->dead.reason) + ct->dead.reason |= (1 << CT_DEAD_STATE_REARM); + spin_unlock_irq(&ct->dead.lock); +#endif + return 0; err_out: xe_gt_err(gt, "Failed to enable GuC CT (%pe)\n", ERR_PTR(err)); + CT_DEAD(ct, NULL, SETUP); return err; } @@ -454,6 +508,9 @@ void xe_guc_ct_disable(struct xe_guc_ct *ct) */ void xe_guc_ct_stop(struct xe_guc_ct *ct) { + if (!xe_guc_ct_initialized(ct)) + return; + xe_guc_ct_set_state(ct, XE_GUC_CT_STATE_STOPPED); stop_g2h_handler(ct); } @@ -466,6 +523,19 @@ static bool h2g_has_room(struct xe_guc_ct *ct, u32 cmd_len) if (cmd_len > h2g->info.space) { h2g->info.head = desc_read(ct_to_xe(ct), h2g, head); + + if (h2g->info.head > h2g->info.size) { + struct xe_device *xe = ct_to_xe(ct); + u32 desc_status = desc_read(xe, h2g, status); + + desc_write(xe, h2g, status, desc_status | GUC_CTB_STATUS_OVERFLOW); + + xe_gt_err(ct_to_gt(ct), "CT: invalid head offset %u >= %u)\n", + h2g->info.head, h2g->info.size); + CT_DEAD(ct, h2g, H2G_HAS_ROOM); + return false; + } + h2g->info.space = CIRC_SPACE(h2g->info.tail, h2g->info.head, h2g->info.size) - h2g->info.resv_space; @@ -521,10 +591,24 @@ static void __g2h_reserve_space(struct xe_guc_ct *ct, u32 g2h_len, u32 num_g2h) static void __g2h_release_space(struct xe_guc_ct *ct, u32 g2h_len) { + bool bad = false; + lockdep_assert_held(&ct->fast_lock); - xe_gt_assert(ct_to_gt(ct), ct->ctbs.g2h.info.space + g2h_len <= - ct->ctbs.g2h.info.size - ct->ctbs.g2h.info.resv_space); - xe_gt_assert(ct_to_gt(ct), ct->g2h_outstanding); + + bad = ct->ctbs.g2h.info.space + g2h_len > + ct->ctbs.g2h.info.size - ct->ctbs.g2h.info.resv_space; + bad |= !ct->g2h_outstanding; + + if (bad) { + xe_gt_err(ct_to_gt(ct), "Invalid G2H release: %d + %d vs %d - %d -> %d vs %d, outstanding = %d!\n", + ct->ctbs.g2h.info.space, g2h_len, + ct->ctbs.g2h.info.size, ct->ctbs.g2h.info.resv_space, + ct->ctbs.g2h.info.space + g2h_len, + ct->ctbs.g2h.info.size - ct->ctbs.g2h.info.resv_space, + ct->g2h_outstanding); + CT_DEAD(ct, &ct->ctbs.g2h, G2H_RELEASE); + return; + } ct->ctbs.g2h.info.space += g2h_len; if (!--ct->g2h_outstanding) @@ -551,12 +635,43 @@ static int h2g_write(struct xe_guc_ct *ct, const u32 *action, u32 len, u32 full_len; struct iosys_map map = IOSYS_MAP_INIT_OFFSET(&h2g->cmds, tail * sizeof(u32)); + u32 desc_status; full_len = len + GUC_CTB_HDR_LEN; lockdep_assert_held(&ct->lock); xe_gt_assert(gt, full_len <= GUC_CTB_MSG_MAX_LEN); - xe_gt_assert(gt, tail <= h2g->info.size); + + desc_status = desc_read(xe, h2g, status); + if (desc_status) { + xe_gt_err(gt, "CT write: non-zero status: %u\n", desc_status); + goto corrupted; + } + + if (IS_ENABLED(CONFIG_DRM_XE_DEBUG)) { + u32 desc_tail = desc_read(xe, h2g, tail); + u32 desc_head = desc_read(xe, h2g, head); + + if (tail != desc_tail) { + desc_write(xe, h2g, status, desc_status | GUC_CTB_STATUS_MISMATCH); + xe_gt_err(gt, "CT write: tail was modified %u != %u\n", desc_tail, tail); + goto corrupted; + } + + if (tail > h2g->info.size) { + desc_write(xe, h2g, status, desc_status | GUC_CTB_STATUS_OVERFLOW); + xe_gt_err(gt, "CT write: tail out of range: %u vs %u\n", + tail, h2g->info.size); + goto corrupted; + } + + if (desc_head >= h2g->info.size) { + desc_write(xe, h2g, status, desc_status | GUC_CTB_STATUS_OVERFLOW); + xe_gt_err(gt, "CT write: invalid head offset %u >= %u)\n", + desc_head, h2g->info.size); + goto corrupted; + } + } /* Command will wrap, zero fill (NOPs), return and check credits again */ if (tail + full_len > h2g->info.size) { @@ -609,6 +724,10 @@ static int h2g_write(struct xe_guc_ct *ct, const u32 *action, u32 len, desc_read(xe, h2g, head), h2g->info.tail); return 0; + +corrupted: + CT_DEAD(ct, &ct->ctbs.h2g, H2G_WRITE); + return -EPIPE; } /* @@ -638,7 +757,7 @@ static int __guc_ct_send_locked(struct xe_guc_ct *ct, const u32 *action, u16 seqno; int ret; - xe_gt_assert(gt, ct->state != XE_GUC_CT_STATE_NOT_INITIALIZED); + xe_gt_assert(gt, xe_guc_ct_initialized(ct)); xe_gt_assert(gt, !g2h_len || !g2h_fence); xe_gt_assert(gt, !num_g2h || !g2h_fence); xe_gt_assert(gt, !g2h_len || num_g2h); @@ -716,7 +835,6 @@ static int guc_ct_send_locked(struct xe_guc_ct *ct, const u32 *action, u32 len, { struct xe_device *xe = ct_to_xe(ct); struct xe_gt *gt = ct_to_gt(ct); - struct drm_printer p = xe_gt_info_printer(gt); unsigned int sleep_period_ms = 1; int ret; @@ -769,8 +887,13 @@ static int guc_ct_send_locked(struct xe_guc_ct *ct, const u32 *action, u32 len, goto broken; #undef g2h_avail - if (dequeue_one_g2h(ct) < 0) + ret = dequeue_one_g2h(ct); + if (ret < 0) { + if (ret != -ECANCELED) + xe_gt_err(ct_to_gt(ct), "CTB receive failed (%pe)", + ERR_PTR(ret)); goto broken; + } goto try_again; } @@ -779,8 +902,7 @@ static int guc_ct_send_locked(struct xe_guc_ct *ct, const u32 *action, u32 len, broken: xe_gt_err(gt, "No forward process on H2G, reset required\n"); - xe_guc_ct_print(ct, &p, true); - ct->ctbs.h2g.info.broken = true; + CT_DEAD(ct, &ct->ctbs.h2g, DEADLOCK); return -EDEADLK; } @@ -848,7 +970,7 @@ static bool retry_failure(struct xe_guc_ct *ct, int ret) #define ct_alive(ct) \ (xe_guc_ct_enabled(ct) && !ct->ctbs.h2g.info.broken && \ !ct->ctbs.g2h.info.broken) - if (!wait_event_interruptible_timeout(ct->wq, ct_alive(ct), HZ * 5)) + if (!wait_event_interruptible_timeout(ct->wq, ct_alive(ct), HZ * 5)) return false; #undef ct_alive @@ -1046,6 +1168,7 @@ static int parse_g2h_response(struct xe_guc_ct *ct, u32 *msg, u32 len) else xe_gt_err(gt, "unexpected response %u for FAST_REQ H2G fence 0x%x!\n", type, fence); + CT_DEAD(ct, NULL, PARSE_G2H_RESPONSE); return -EPROTO; } @@ -1053,6 +1176,7 @@ static int parse_g2h_response(struct xe_guc_ct *ct, u32 *msg, u32 len) g2h_fence = xa_erase(&ct->fence_lookup, fence); if (unlikely(!g2h_fence)) { /* Don't tear down channel, as send could've timed out */ + /* CT_DEAD(ct, NULL, PARSE_G2H_UNKNOWN); */ xe_gt_warn(gt, "G2H fence (%u) not found!\n", fence); g2h_release_space(ct, GUC_CTB_HXG_MSG_MAX_LEN); return 0; @@ -1097,7 +1221,7 @@ static int parse_g2h_msg(struct xe_guc_ct *ct, u32 *msg, u32 len) if (unlikely(origin != GUC_HXG_ORIGIN_GUC)) { xe_gt_err(gt, "G2H channel broken on read, origin=%u, reset required\n", origin); - ct->ctbs.g2h.info.broken = true; + CT_DEAD(ct, &ct->ctbs.g2h, PARSE_G2H_ORIGIN); return -EPROTO; } @@ -1115,7 +1239,7 @@ static int parse_g2h_msg(struct xe_guc_ct *ct, u32 *msg, u32 len) default: xe_gt_err(gt, "G2H channel broken on read, type=%u, reset required\n", type); - ct->ctbs.g2h.info.broken = true; + CT_DEAD(ct, &ct->ctbs.g2h, PARSE_G2H_TYPE); ret = -EOPNOTSUPP; } @@ -1192,9 +1316,11 @@ static int process_g2h_msg(struct xe_guc_ct *ct, u32 *msg, u32 len) xe_gt_err(gt, "unexpected G2H action 0x%04x\n", action); } - if (ret) + if (ret) { xe_gt_err(gt, "G2H action 0x%04x failed (%pe)\n", action, ERR_PTR(ret)); + CT_DEAD(ct, NULL, PROCESS_FAILED); + } return 0; } @@ -1204,12 +1330,12 @@ static int g2h_read(struct xe_guc_ct *ct, u32 *msg, bool fast_path) struct xe_device *xe = ct_to_xe(ct); struct xe_gt *gt = ct_to_gt(ct); struct guc_ctb *g2h = &ct->ctbs.g2h; - u32 tail, head, len; + u32 tail, head, len, desc_status; s32 avail; u32 action; u32 *hxg; - xe_gt_assert(gt, ct->state != XE_GUC_CT_STATE_NOT_INITIALIZED); + xe_gt_assert(gt, xe_guc_ct_initialized(ct)); lockdep_assert_held(&ct->fast_lock); if (ct->state == XE_GUC_CT_STATE_DISABLED) @@ -1223,6 +1349,63 @@ static int g2h_read(struct xe_guc_ct *ct, u32 *msg, bool fast_path) xe_gt_assert(gt, xe_guc_ct_enabled(ct)); + desc_status = desc_read(xe, g2h, status); + if (desc_status) { + if (desc_status & GUC_CTB_STATUS_DISABLED) { + /* + * Potentially valid if a CLIENT_RESET request resulted in + * contexts/engines being reset. But should never happen as + * no contexts should be active when CLIENT_RESET is sent. + */ + xe_gt_err(gt, "CT read: unexpected G2H after GuC has stopped!\n"); + desc_status &= ~GUC_CTB_STATUS_DISABLED; + } + + if (desc_status) { + xe_gt_err(gt, "CT read: non-zero status: %u\n", desc_status); + goto corrupted; + } + } + + if (IS_ENABLED(CONFIG_DRM_XE_DEBUG)) { + u32 desc_tail = desc_read(xe, g2h, tail); + /* + u32 desc_head = desc_read(xe, g2h, head); + + * info.head and desc_head are updated back-to-back at the end of + * this function and nowhere else. Hence, they cannot be different + * unless two g2h_read calls are running concurrently. Which is not + * possible because it is guarded by ct->fast_lock. And yet, some + * discrete platforms are reguarly hitting this error :(. + * + * desc_head rolling backwards shouldn't cause any noticeable + * problems - just a delay in GuC being allowed to proceed past that + * point in the queue. So for now, just disable the error until it + * can be root caused. + * + if (g2h->info.head != desc_head) { + desc_write(xe, g2h, status, desc_status | GUC_CTB_STATUS_MISMATCH); + xe_gt_err(gt, "CT read: head was modified %u != %u\n", + desc_head, g2h->info.head); + goto corrupted; + } + */ + + if (g2h->info.head > g2h->info.size) { + desc_write(xe, g2h, status, desc_status | GUC_CTB_STATUS_OVERFLOW); + xe_gt_err(gt, "CT read: head out of range: %u vs %u\n", + g2h->info.head, g2h->info.size); + goto corrupted; + } + + if (desc_tail >= g2h->info.size) { + desc_write(xe, g2h, status, desc_status | GUC_CTB_STATUS_OVERFLOW); + xe_gt_err(gt, "CT read: invalid tail offset %u >= %u)\n", + desc_tail, g2h->info.size); + goto corrupted; + } + } + /* Calculate DW available to read */ tail = desc_read(xe, g2h, tail); avail = tail - g2h->info.head; @@ -1239,9 +1422,7 @@ static int g2h_read(struct xe_guc_ct *ct, u32 *msg, bool fast_path) if (len > avail) { xe_gt_err(gt, "G2H channel broken on read, avail=%d, len=%d, reset required\n", avail, len); - g2h->info.broken = true; - - return -EPROTO; + goto corrupted; } head = (g2h->info.head + 1) % g2h->info.size; @@ -1287,6 +1468,10 @@ static int g2h_read(struct xe_guc_ct *ct, u32 *msg, bool fast_path) action, len, g2h->info.head, tail); return len; + +corrupted: + CT_DEAD(ct, &ct->ctbs.g2h, G2H_READ); + return -EPROTO; } static void g2h_fast_path(struct xe_guc_ct *ct, u32 *msg, u32 len) @@ -1313,9 +1498,11 @@ static void g2h_fast_path(struct xe_guc_ct *ct, u32 *msg, u32 len) xe_gt_warn(gt, "NOT_POSSIBLE"); } - if (ret) + if (ret) { xe_gt_err(gt, "G2H action 0x%04x failed (%pe)\n", action, ERR_PTR(ret)); + CT_DEAD(ct, NULL, FAST_G2H); + } } /** @@ -1375,7 +1562,6 @@ static int dequeue_one_g2h(struct xe_guc_ct *ct) static void receive_g2h(struct xe_guc_ct *ct) { - struct xe_gt *gt = ct_to_gt(ct); bool ongoing; int ret; @@ -1412,9 +1598,8 @@ static void receive_g2h(struct xe_guc_ct *ct) mutex_unlock(&ct->lock); if (unlikely(ret == -EPROTO || ret == -EOPNOTSUPP)) { - struct drm_printer p = xe_gt_info_printer(gt); - - xe_guc_ct_print(ct, &p, false); + xe_gt_err(ct_to_gt(ct), "CT dequeue failed: %d", ret); + CT_DEAD(ct, NULL, G2H_RECV); kick_reset(ct); } } while (ret == 1); @@ -1442,9 +1627,8 @@ static void guc_ctb_snapshot_capture(struct xe_device *xe, struct guc_ctb *ctb, snapshot->cmds = kmalloc_array(ctb->info.size, sizeof(u32), atomic ? GFP_ATOMIC : GFP_KERNEL); - if (!snapshot->cmds) { - drm_err(&xe->drm, "Skipping CTB commands snapshot. Only CTB info will be available.\n"); + drm_err(&xe->drm, "Skipping CTB commands snapshot. Only CT info will be available.\n"); return; } @@ -1525,7 +1709,7 @@ struct xe_guc_ct_snapshot *xe_guc_ct_snapshot_capture(struct xe_guc_ct *ct, atomic ? GFP_ATOMIC : GFP_KERNEL); if (!snapshot) { - drm_err(&xe->drm, "Skipping CTB snapshot entirely.\n"); + xe_gt_err(ct_to_gt(ct), "Skipping CTB snapshot entirely.\n"); return NULL; } @@ -1589,16 +1773,119 @@ void xe_guc_ct_snapshot_free(struct xe_guc_ct_snapshot *snapshot) * xe_guc_ct_print - GuC CT Print. * @ct: GuC CT. * @p: drm_printer where it will be printed out. - * @atomic: Boolean to indicate if this is called from atomic context like - * reset or CTB handler or from some regular path like debugfs. * * This function quickly capture a snapshot and immediately print it out. */ -void xe_guc_ct_print(struct xe_guc_ct *ct, struct drm_printer *p, bool atomic) +void xe_guc_ct_print(struct xe_guc_ct *ct, struct drm_printer *p) { struct xe_guc_ct_snapshot *snapshot; - snapshot = xe_guc_ct_snapshot_capture(ct, atomic); + snapshot = xe_guc_ct_snapshot_capture(ct, false); xe_guc_ct_snapshot_print(snapshot, p); xe_guc_ct_snapshot_free(snapshot); } + +#if IS_ENABLED(CONFIG_DRM_XE_DEBUG) +static void ct_dead_capture(struct xe_guc_ct *ct, struct guc_ctb *ctb, u32 reason_code) +{ + struct xe_guc_log_snapshot *snapshot_log; + struct xe_guc_ct_snapshot *snapshot_ct; + struct xe_guc *guc = ct_to_guc(ct); + unsigned long flags; + bool have_capture; + + if (ctb) + ctb->info.broken = true; + + /* Ignore further errors after the first dump until a reset */ + if (ct->dead.reported) + return; + + spin_lock_irqsave(&ct->dead.lock, flags); + + /* And only capture one dump at a time */ + have_capture = ct->dead.reason & (1 << CT_DEAD_STATE_CAPTURE); + ct->dead.reason |= (1 << reason_code) | + (1 << CT_DEAD_STATE_CAPTURE); + + spin_unlock_irqrestore(&ct->dead.lock, flags); + + if (have_capture) + return; + + snapshot_log = xe_guc_log_snapshot_capture(&guc->log, true); + snapshot_ct = xe_guc_ct_snapshot_capture((ct), true); + + spin_lock_irqsave(&ct->dead.lock, flags); + + if (ct->dead.snapshot_log || ct->dead.snapshot_ct) { + xe_gt_err(ct_to_gt(ct), "Got unexpected dead CT capture!\n"); + xe_guc_log_snapshot_free(snapshot_log); + xe_guc_ct_snapshot_free(snapshot_ct); + } else { + ct->dead.snapshot_log = snapshot_log; + ct->dead.snapshot_ct = snapshot_ct; + } + + spin_unlock_irqrestore(&ct->dead.lock, flags); + + queue_work(system_unbound_wq, &(ct)->dead.worker); +} + +static void ct_dead_print(struct xe_dead_ct *dead) +{ + struct xe_guc_ct *ct = container_of(dead, struct xe_guc_ct, dead); + struct xe_device *xe = ct_to_xe(ct); + struct xe_gt *gt = ct_to_gt(ct); + static int g_count; + struct drm_printer ip = xe_gt_info_printer(gt); + struct drm_printer lp = drm_line_printer(&ip, "Capture", ++g_count); + + if (!dead->reason) { + xe_gt_err(gt, "CTB is dead for no reason!?\n"); + return; + } + + drm_printf(&lp, "CTB is dead - reason=0x%X\n", dead->reason); + + /* Can't generate a genuine core dump at this point, so just do the good bits */ + drm_puts(&lp, "**** Xe Device Coredump ****\n"); + xe_device_snapshot_print(xe, &lp); + + drm_printf(&lp, "**** GT #%d ****\n", gt->info.id); + drm_printf(&lp, "\tTile: %d\n", gt->tile->id); + + drm_puts(&lp, "**** GuC Log ****\n"); + xe_guc_log_snapshot_print(dead->snapshot_log, &lp); + + drm_puts(&lp, "**** GuC CT ****\n"); + xe_guc_ct_snapshot_print(dead->snapshot_ct, &lp); + + drm_puts(&lp, "Done.\n"); +} + +static void ct_dead_worker_func(struct work_struct *w) +{ + struct xe_guc_ct *ct = container_of(w, struct xe_guc_ct, dead.worker); + + if (!ct->dead.reported) { + ct->dead.reported = true; + ct_dead_print(&ct->dead); + } + + spin_lock_irq(&ct->dead.lock); + + xe_guc_log_snapshot_free(ct->dead.snapshot_log); + ct->dead.snapshot_log = NULL; + xe_guc_ct_snapshot_free(ct->dead.snapshot_ct); + ct->dead.snapshot_ct = NULL; + + if (ct->dead.reason & (1 << CT_DEAD_STATE_REARM)) { + /* A reset has occurred so re-arm the error reporting */ + ct->dead.reason = 0; + ct->dead.reported = false; + } + + spin_unlock_irq(&ct->dead.lock); +} +#endif diff --git a/drivers/gpu/drm/xe/xe_guc_ct.h b/drivers/gpu/drm/xe/xe_guc_ct.h index 190202fce2d04..c7ac9407b861e 100644 --- a/drivers/gpu/drm/xe/xe_guc_ct.h +++ b/drivers/gpu/drm/xe/xe_guc_ct.h @@ -21,7 +21,12 @@ xe_guc_ct_snapshot_capture(struct xe_guc_ct *ct, bool atomic); void xe_guc_ct_snapshot_print(struct xe_guc_ct_snapshot *snapshot, struct drm_printer *p); void xe_guc_ct_snapshot_free(struct xe_guc_ct_snapshot *snapshot); -void xe_guc_ct_print(struct xe_guc_ct *ct, struct drm_printer *p, bool atomic); +void xe_guc_ct_print(struct xe_guc_ct *ct, struct drm_printer *p); + +static inline bool xe_guc_ct_initialized(struct xe_guc_ct *ct) +{ + return ct->state != XE_GUC_CT_STATE_NOT_INITIALIZED; +} static inline bool xe_guc_ct_enabled(struct xe_guc_ct *ct) { diff --git a/drivers/gpu/drm/xe/xe_guc_ct_types.h b/drivers/gpu/drm/xe/xe_guc_ct_types.h index 761cb90312984..85e127ec91d7a 100644 --- a/drivers/gpu/drm/xe/xe_guc_ct_types.h +++ b/drivers/gpu/drm/xe/xe_guc_ct_types.h @@ -86,6 +86,24 @@ enum xe_guc_ct_state { XE_GUC_CT_STATE_ENABLED, }; +#if IS_ENABLED(CONFIG_DRM_XE_DEBUG) +/** struct xe_dead_ct - Information for debugging a dead CT */ +struct xe_dead_ct { + /** @lock: protects memory allocation/free operations, and @reason updates */ + spinlock_t lock; + /** @reason: bit mask of CT_DEAD_* reason codes */ + unsigned int reason; + /** @reported: for preventing multiple dumps per error sequence */ + bool reported; + /** @worker: worker thread to get out of interrupt context before dumping */ + struct work_struct worker; + /** snapshot_ct: copy of CT state and CTB content at point of error */ + struct xe_guc_ct_snapshot *snapshot_ct; + /** snapshot_log: copy of GuC log at point of error */ + struct xe_guc_log_snapshot *snapshot_log; +}; +#endif + /** * struct xe_guc_ct - GuC command transport (CT) layer * @@ -128,6 +146,11 @@ struct xe_guc_ct { u32 msg[GUC_CTB_MSG_MAX_LEN]; /** @fast_msg: Message buffer */ u32 fast_msg[GUC_CTB_MSG_MAX_LEN]; + +#if IS_ENABLED(CONFIG_DRM_XE_DEBUG) + /** @dead: information for debugging dead CTs */ + struct xe_dead_ct dead; +#endif }; #endif diff --git a/drivers/gpu/drm/xe/xe_guc_pc.c b/drivers/gpu/drm/xe/xe_guc_pc.c index 034b29984d5ed..af02803c145bf 100644 --- a/drivers/gpu/drm/xe/xe_guc_pc.c +++ b/drivers/gpu/drm/xe/xe_guc_pc.c @@ -6,6 +6,9 @@ #include "xe_guc_pc.h" #include +#include +#include +#include #include #include @@ -47,6 +50,12 @@ #define LNL_MERT_FREQ_CAP 800 #define BMG_MERT_FREQ_CAP 2133 +#define BMG_MIN_FREQ 1200 +#define BMG_MERT_FLUSH_FREQ_CAP 2600 + +#define SLPC_RESET_TIMEOUT_MS 5 /* roughly 5ms, but no need for precision */ +#define SLPC_RESET_EXTENDED_TIMEOUT_MS 1000 /* To be used only at pc_start */ +#define SLPC_ACT_FREQ_TIMEOUT_MS 100 /** * DOC: GuC Power Conservation (PC) @@ -133,6 +142,36 @@ static int wait_for_pc_state(struct xe_guc_pc *pc, return -ETIMEDOUT; } +static int wait_for_flush_complete(struct xe_guc_pc *pc) +{ + const unsigned long timeout = msecs_to_jiffies(30); + + if (!wait_var_event_timeout(&pc->flush_freq_limit, + !atomic_read(&pc->flush_freq_limit), + timeout)) + return -ETIMEDOUT; + + return 0; +} + +static int wait_for_act_freq_limit(struct xe_guc_pc *pc, u32 freq) +{ + int timeout_us = SLPC_ACT_FREQ_TIMEOUT_MS * USEC_PER_MSEC; + int slept, wait = 10; + + for (slept = 0; slept < timeout_us;) { + if (xe_guc_pc_get_act_freq(pc) <= freq) + return 0; + + usleep_range(wait, wait << 1); + slept += wait; + wait <<= 1; + if (slept + wait > timeout_us) + wait = timeout_us - slept; + } + + return -ETIMEDOUT; +} static int pc_action_reset(struct xe_guc_pc *pc) { struct xe_guc_ct *ct = pc_to_ct(pc); @@ -584,6 +623,11 @@ int xe_guc_pc_set_max_freq(struct xe_guc_pc *pc, u32 freq) { int ret; + if (XE_WA(pc_to_gt(pc), 22019338487)) { + if (wait_for_flush_complete(pc) != 0) + return -EAGAIN; + } + mutex_lock(&pc->freq_lock); if (!pc->freq_ready) { /* Might be in the middle of a gt reset */ @@ -793,6 +837,106 @@ static int pc_adjust_requested_freq(struct xe_guc_pc *pc) return ret; } +static bool needs_flush_freq_limit(struct xe_guc_pc *pc) +{ + struct xe_gt *gt = pc_to_gt(pc); + + return XE_WA(gt, 22019338487) && + pc->rp0_freq > BMG_MERT_FLUSH_FREQ_CAP; +} + +/** + * xe_guc_pc_apply_flush_freq_limit() - Limit max GT freq during L2 flush + * @pc: the xe_guc_pc object + * + * As per the WA, reduce max GT frequency during L2 cache flush + */ +void xe_guc_pc_apply_flush_freq_limit(struct xe_guc_pc *pc) +{ + struct xe_gt *gt = pc_to_gt(pc); + u32 max_freq; + int ret; + + if (!needs_flush_freq_limit(pc)) + return; + + mutex_lock(&pc->freq_lock); + + if (!pc->freq_ready) { + mutex_unlock(&pc->freq_lock); + return; + } + + ret = pc_action_query_task_state(pc); + if (ret) { + mutex_unlock(&pc->freq_lock); + return; + } + + max_freq = pc_get_max_freq(pc); + if (max_freq > BMG_MERT_FLUSH_FREQ_CAP) { + ret = pc_set_max_freq(pc, BMG_MERT_FLUSH_FREQ_CAP); + if (ret) { + xe_gt_err_once(gt, "Failed to cap max freq on flush to %u, %pe\n", + BMG_MERT_FLUSH_FREQ_CAP, ERR_PTR(ret)); + mutex_unlock(&pc->freq_lock); + return; + } + + atomic_set(&pc->flush_freq_limit, 1); + + /* + * If user has previously changed max freq, stash that value to + * restore later, otherwise use the current max. New user + * requests wait on flush. + */ + if (pc->user_requested_max != 0) + pc->stashed_max_freq = pc->user_requested_max; + else + pc->stashed_max_freq = max_freq; + } + + mutex_unlock(&pc->freq_lock); + + /* + * Wait for actual freq to go below the flush cap: even if the previous + * max was below cap, the current one might still be above it + */ + ret = wait_for_act_freq_limit(pc, BMG_MERT_FLUSH_FREQ_CAP); + if (ret) + xe_gt_err_once(gt, "Actual freq did not reduce to %u, %pe\n", + BMG_MERT_FLUSH_FREQ_CAP, ERR_PTR(ret)); +} + +/** + * xe_guc_pc_remove_flush_freq_limit() - Remove max GT freq limit after L2 flush completes. + * @pc: the xe_guc_pc object + * + * Retrieve the previous GT max frequency value. + */ +void xe_guc_pc_remove_flush_freq_limit(struct xe_guc_pc *pc) +{ + struct xe_gt *gt = pc_to_gt(pc); + int ret = 0; + + if (!needs_flush_freq_limit(pc)) + return; + + if (!atomic_read(&pc->flush_freq_limit)) + return; + + mutex_lock(&pc->freq_lock); + + ret = pc_set_max_freq(>->uc.guc.pc, pc->stashed_max_freq); + if (ret) + xe_gt_err_once(gt, "Failed to restore max freq %u:%d", + pc->stashed_max_freq, ret); + + atomic_set(&pc->flush_freq_limit, 0); + mutex_unlock(&pc->freq_lock); + wake_up_var(&pc->flush_freq_limit); +} + static int pc_set_mert_freq_cap(struct xe_guc_pc *pc) { int ret = 0; @@ -975,7 +1119,7 @@ int xe_guc_pc_start(struct xe_guc_pc *pc) goto out; } - memset(pc->bo->vmap.vaddr, 0, size); + xe_map_memset(xe, &pc->bo->vmap, 0, 0, size); slpc_shared_data_write(pc, header.size, size); ret = pc_action_reset(pc); diff --git a/drivers/gpu/drm/xe/xe_guc_pc.h b/drivers/gpu/drm/xe/xe_guc_pc.h index efda432fadfc8..7154b3aab0d84 100644 --- a/drivers/gpu/drm/xe/xe_guc_pc.h +++ b/drivers/gpu/drm/xe/xe_guc_pc.h @@ -34,5 +34,7 @@ u64 xe_guc_pc_mc6_residency(struct xe_guc_pc *pc); void xe_guc_pc_init_early(struct xe_guc_pc *pc); int xe_guc_pc_restore_stashed_freq(struct xe_guc_pc *pc); void xe_guc_pc_raise_unslice(struct xe_guc_pc *pc); +void xe_guc_pc_apply_flush_freq_limit(struct xe_guc_pc *pc); +void xe_guc_pc_remove_flush_freq_limit(struct xe_guc_pc *pc); #endif /* _XE_GUC_PC_H_ */ diff --git a/drivers/gpu/drm/xe/xe_guc_pc_types.h b/drivers/gpu/drm/xe/xe_guc_pc_types.h index 13810be015db5..5b86d91296cb9 100644 --- a/drivers/gpu/drm/xe/xe_guc_pc_types.h +++ b/drivers/gpu/drm/xe/xe_guc_pc_types.h @@ -15,6 +15,8 @@ struct xe_guc_pc { /** @bo: GGTT buffer object that is shared with GuC PC */ struct xe_bo *bo; + /** @flush_freq_limit: 1 when max freq changes are limited by driver */ + atomic_t flush_freq_limit; /** @rp0_freq: HW RP0 frequency - The Maximum one */ u32 rp0_freq; /** @rpe_freq: HW RPe frequency - The Efficient one */ diff --git a/drivers/gpu/drm/xe/xe_guc_submit.c b/drivers/gpu/drm/xe/xe_guc_submit.c index 20d05efdd406e..0e17820a35e2c 100644 --- a/drivers/gpu/drm/xe/xe_guc_submit.c +++ b/drivers/gpu/drm/xe/xe_guc_submit.c @@ -227,6 +227,17 @@ static bool exec_queue_killed_or_banned_or_wedged(struct xe_exec_queue *q) static void guc_submit_fini(struct drm_device *drm, void *arg) { struct xe_guc *guc = arg; + struct xe_device *xe = guc_to_xe(guc); + struct xe_gt *gt = guc_to_gt(guc); + int ret; + + ret = wait_event_timeout(guc->submission_state.fini_wq, + xa_empty(&guc->submission_state.exec_queue_lookup), + HZ * 5); + + drain_workqueue(xe->destroy_wq); + + xe_gt_assert(gt, ret); xa_destroy(&guc->submission_state.exec_queue_lookup); } @@ -298,6 +309,8 @@ int xe_guc_submit_init(struct xe_guc *guc, unsigned int num_ids) primelockdep(guc); + guc->submission_state.initialized = true; + return drmm_add_action_or_reset(&xe->drm, guc_submit_fini, guc); } @@ -826,6 +839,13 @@ void xe_guc_submit_wedge(struct xe_guc *guc) xe_gt_assert(guc_to_gt(guc), guc_to_xe(guc)->wedged.mode); + /* + * If device is being wedged even before submission_state is + * initialized, there's nothing to do here. + */ + if (!guc->submission_state.initialized) + return; + err = devm_add_action_or_reset(guc_to_xe(guc)->drm.dev, guc_submit_wedged_fini, guc); if (err) { @@ -1702,6 +1722,9 @@ int xe_guc_submit_reset_prepare(struct xe_guc *guc) { int ret; + if (!guc->submission_state.initialized) + return 0; + /* * Using an atomic here rather than submission_state.lock as this * function can be called while holding the CT lock (engine reset diff --git a/drivers/gpu/drm/xe/xe_guc_types.h b/drivers/gpu/drm/xe/xe_guc_types.h index ed150fc09ad04..7842b71e68beb 100644 --- a/drivers/gpu/drm/xe/xe_guc_types.h +++ b/drivers/gpu/drm/xe/xe_guc_types.h @@ -74,6 +74,11 @@ struct xe_guc { struct mutex lock; /** @submission_state.enabled: submission is enabled */ bool enabled; + /** + * @submission_state.initialized: mark when submission state is + * even initialized - before that not even the lock is valid + */ + bool initialized; /** @submission_state.fini_wq: submit fini wait queue */ wait_queue_head_t fini_wq; } submission_state; diff --git a/drivers/gpu/drm/xe/xe_irq.c b/drivers/gpu/drm/xe/xe_irq.c index 5f2c368c35adb..14c3a476597a7 100644 --- a/drivers/gpu/drm/xe/xe_irq.c +++ b/drivers/gpu/drm/xe/xe_irq.c @@ -173,7 +173,7 @@ void xe_irq_enable_hwe(struct xe_gt *gt) if (ccs_mask & (BIT(0)|BIT(1))) xe_mmio_write32(gt, CCS0_CCS1_INTR_MASK, ~dmask); if (ccs_mask & (BIT(2)|BIT(3))) - xe_mmio_write32(gt, CCS2_CCS3_INTR_MASK, ~dmask); + xe_mmio_write32(gt, CCS2_CCS3_INTR_MASK, ~dmask); } if (xe_gt_is_media_type(gt) || MEDIA_VER(xe) < 13) { @@ -504,7 +504,7 @@ static void gt_irq_reset(struct xe_tile *tile) if (ccs_mask & (BIT(0)|BIT(1))) xe_mmio_write32(mmio, CCS0_CCS1_INTR_MASK, ~0); if (ccs_mask & (BIT(2)|BIT(3))) - xe_mmio_write32(mmio, CCS2_CCS3_INTR_MASK, ~0); + xe_mmio_write32(mmio, CCS2_CCS3_INTR_MASK, ~0); if ((tile->media_gt && xe_hw_engine_mask_per_class(tile->media_gt, XE_ENGINE_CLASS_OTHER)) || diff --git a/drivers/gpu/drm/xe/xe_lmtt.c b/drivers/gpu/drm/xe/xe_lmtt.c index 8999ac511555f..485658f69fba8 100644 --- a/drivers/gpu/drm/xe/xe_lmtt.c +++ b/drivers/gpu/drm/xe/xe_lmtt.c @@ -78,6 +78,9 @@ static struct xe_lmtt_pt *lmtt_pt_alloc(struct xe_lmtt *lmtt, unsigned int level } lmtt_assert(lmtt, xe_bo_is_vram(bo)); + lmtt_debug(lmtt, "level=%u addr=%#llx\n", level, (u64)xe_bo_main_addr(bo, XE_PAGE_SIZE)); + + xe_map_memset(lmtt_to_xe(lmtt), &bo->vmap, 0, 0, bo->size); pt->level = level; pt->bo = bo; @@ -91,6 +94,9 @@ static struct xe_lmtt_pt *lmtt_pt_alloc(struct xe_lmtt *lmtt, unsigned int level static void lmtt_pt_free(struct xe_lmtt_pt *pt) { + lmtt_debug(&pt->bo->tile->sriov.pf.lmtt, "level=%u addr=%llx\n", + pt->level, (u64)xe_bo_main_addr(pt->bo, XE_PAGE_SIZE)); + xe_bo_unpin_map_no_vm(pt->bo); kfree(pt); } @@ -226,9 +232,14 @@ static void lmtt_write_pte(struct xe_lmtt *lmtt, struct xe_lmtt_pt *pt, switch (lmtt->ops->lmtt_pte_size(level)) { case sizeof(u32): + lmtt_assert(lmtt, !overflows_type(pte, u32)); + lmtt_assert(lmtt, !pte || !iosys_map_rd(&pt->bo->vmap, idx * sizeof(u32), u32)); + xe_map_wr(lmtt_to_xe(lmtt), &pt->bo->vmap, idx * sizeof(u32), u32, pte); break; case sizeof(u64): + lmtt_assert(lmtt, !pte || !iosys_map_rd(&pt->bo->vmap, idx * sizeof(u64), u64)); + xe_map_wr(lmtt_to_xe(lmtt), &pt->bo->vmap, idx * sizeof(u64), u64, pte); break; default: diff --git a/drivers/gpu/drm/xe/xe_lrc.c b/drivers/gpu/drm/xe/xe_lrc.c index 2d4e38b3bab19..ce6d2167b94ad 100644 --- a/drivers/gpu/drm/xe/xe_lrc.c +++ b/drivers/gpu/drm/xe/xe_lrc.c @@ -874,7 +874,7 @@ static void *empty_lrc_data(struct xe_hw_engine *hwe) static void xe_lrc_set_ppgtt(struct xe_lrc *lrc, struct xe_vm *vm) { - u64 desc = xe_vm_pdp4_descriptor(vm, lrc->tile); + u64 desc = xe_vm_pdp4_descriptor(vm, gt_to_tile(lrc->gt)); xe_lrc_write_ctx_reg(lrc, CTX_PDP0_UDW, upper_32_bits(desc)); xe_lrc_write_ctx_reg(lrc, CTX_PDP0_LDW, lower_32_bits(desc)); @@ -905,6 +905,7 @@ static int xe_lrc_init(struct xe_lrc *lrc, struct xe_hw_engine *hwe, int err; kref_init(&lrc->refcount); + lrc->gt = gt; lrc->flags = 0; lrc_size = ring_size + xe_gt_lrc_size(gt, hwe->class); if (xe_gt_has_indirect_ring_state(gt)) @@ -923,7 +924,6 @@ static int xe_lrc_init(struct xe_lrc *lrc, struct xe_hw_engine *hwe, return PTR_ERR(lrc->bo); lrc->size = lrc_size; - lrc->tile = gt_to_tile(hwe->gt); lrc->ring.size = ring_size; lrc->ring.tail = 0; lrc->ctx_timestamp = 0; diff --git a/drivers/gpu/drm/xe/xe_lrc_types.h b/drivers/gpu/drm/xe/xe_lrc_types.h index 71ecb453f811a..cd38586ae9893 100644 --- a/drivers/gpu/drm/xe/xe_lrc_types.h +++ b/drivers/gpu/drm/xe/xe_lrc_types.h @@ -25,8 +25,8 @@ struct xe_lrc { /** @size: size of lrc including any indirect ring state page */ u32 size; - /** @tile: tile which this LRC belongs to */ - struct xe_tile *tile; + /** @gt: gt which this LRC belongs to */ + struct xe_gt *gt; /** @flags: LRC flags */ #define XE_LRC_FLAG_INDIRECT_RING_STATE 0x1 diff --git a/drivers/gpu/drm/xe/xe_migrate.c b/drivers/gpu/drm/xe/xe_migrate.c index 6431697c61693..c2da2691fd2b9 100644 --- a/drivers/gpu/drm/xe/xe_migrate.c +++ b/drivers/gpu/drm/xe/xe_migrate.c @@ -860,7 +860,7 @@ struct dma_fence *xe_migrate_copy(struct xe_migrate *m, if (src_is_vram && xe_migrate_allow_identity(src_L0, &src_it)) xe_res_next(&src_it, src_L0); else - emit_pte(m, bb, src_L0_pt, src_is_vram, copy_system_ccs, + emit_pte(m, bb, src_L0_pt, src_is_vram, copy_system_ccs || use_comp_pat, &src_it, src_L0, src); if (dst_is_vram && xe_migrate_allow_identity(src_L0, &dst_it)) diff --git a/drivers/gpu/drm/xe/xe_pci.c b/drivers/gpu/drm/xe/xe_pci.c index 025d649434673..da09c26249f5f 100644 --- a/drivers/gpu/drm/xe/xe_pci.c +++ b/drivers/gpu/drm/xe/xe_pci.c @@ -164,7 +164,6 @@ static const struct xe_graphics_desc graphics_xelpg = { .has_asid = 1, \ .has_atomic_enable_pte_bit = 1, \ .has_flat_ccs = 1, \ - .has_indirect_ring_state = 1, \ .has_range_tlb_invalidation = 1, \ .has_usm = 1, \ .va_bits = 48, \ @@ -910,6 +909,7 @@ static int xe_pci_suspend(struct device *dev) pci_save_state(pdev); pci_disable_device(pdev); + pci_set_power_state(pdev, PCI_D3cold); return 0; } diff --git a/drivers/gpu/drm/xe/xe_pm.c b/drivers/gpu/drm/xe/xe_pm.c index 06f50aa313267..46c73ff10c747 100644 --- a/drivers/gpu/drm/xe/xe_pm.c +++ b/drivers/gpu/drm/xe/xe_pm.c @@ -682,11 +682,13 @@ void xe_pm_assert_unbounded_bridge(struct xe_device *xe) } /** - * xe_pm_set_vram_threshold - Set a vram threshold for allowing/blocking D3Cold + * xe_pm_set_vram_threshold - Set a VRAM threshold for allowing/blocking D3Cold * @xe: xe device instance - * @threshold: VRAM size in bites for the D3cold threshold + * @threshold: VRAM size in MiB for the D3cold threshold * - * Returns 0 for success, negative error code otherwise. + * Return: + * * 0 - success + * * -EINVAL - invalid argument */ int xe_pm_set_vram_threshold(struct xe_device *xe, u32 threshold) { diff --git a/drivers/gpu/drm/xe/xe_trace_bo.h b/drivers/gpu/drm/xe/xe_trace_bo.h index ba0f61e7d2d6b..4ff023b5d040d 100644 --- a/drivers/gpu/drm/xe/xe_trace_bo.h +++ b/drivers/gpu/drm/xe/xe_trace_bo.h @@ -189,7 +189,7 @@ DECLARE_EVENT_CLASS(xe_vm, ), TP_printk("dev=%s, vm=%p, asid=0x%05x", __get_str(dev), - __entry->vm, __entry->asid) + __entry->vm, __entry->asid) ); DEFINE_EVENT(xe_vm, xe_vm_kill, diff --git a/drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c b/drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c index ef84fa757b26f..34e38bb167bac 100644 --- a/drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c +++ b/drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c @@ -57,12 +57,35 @@ bool xe_ttm_stolen_cpu_access_needs_ggtt(struct xe_device *xe) return GRAPHICS_VERx100(xe) < 1270 && !IS_DGFX(xe); } +static u32 get_wopcm_size(struct xe_device *xe) +{ + u32 wopcm_size; + u64 val; + + val = xe_mmio_read64_2x32(xe_root_mmio_gt(xe), STOLEN_RESERVED); + val = REG_FIELD_GET64(WOPCM_SIZE_MASK, val); + + switch (val) { + case 0x5 ... 0x6: + val--; + fallthrough; + case 0x0 ... 0x3: + wopcm_size = (1U << val) * SZ_1M; + break; + default: + WARN(1, "Missing case wopcm_size=%llx\n", val); + wopcm_size = 0; + } + + return wopcm_size; +} + static s64 detect_bar2_dgfx(struct xe_device *xe, struct xe_ttm_stolen_mgr *mgr) { struct xe_tile *tile = xe_device_get_root_tile(xe); struct xe_gt *mmio = xe_root_mmio_gt(xe); struct pci_dev *pdev = to_pci_dev(xe->drm.dev); - u64 stolen_size; + u64 stolen_size, wopcm_size; u64 tile_offset; u64 tile_size; @@ -74,7 +97,13 @@ static s64 detect_bar2_dgfx(struct xe_device *xe, struct xe_ttm_stolen_mgr *mgr) if (drm_WARN_ON(&xe->drm, tile_size < mgr->stolen_base)) return 0; + /* Carve out the top of DSM as it contains the reserved WOPCM region */ + wopcm_size = get_wopcm_size(xe); + if (drm_WARN_ON(&xe->drm, !wopcm_size)) + return 0; + stolen_size = tile_size - mgr->stolen_base; + stolen_size -= wopcm_size; /* Verify usage fits in the actual resource available */ if (mgr->stolen_base + stolen_size <= pci_resource_len(pdev, LMEM_BAR)) @@ -89,29 +118,6 @@ static s64 detect_bar2_dgfx(struct xe_device *xe, struct xe_ttm_stolen_mgr *mgr) return ALIGN_DOWN(stolen_size, SZ_1M); } -static u32 get_wopcm_size(struct xe_device *xe) -{ - u32 wopcm_size; - u64 val; - - val = xe_mmio_read64_2x32(xe_root_mmio_gt(xe), STOLEN_RESERVED); - val = REG_FIELD_GET64(WOPCM_SIZE_MASK, val); - - switch (val) { - case 0x5 ... 0x6: - val--; - fallthrough; - case 0x0 ... 0x3: - wopcm_size = (1U << val) * SZ_1M; - break; - default: - WARN(1, "Missing case wopcm_size=%llx\n", val); - wopcm_size = 0; - } - - return wopcm_size; -} - static u32 detect_bar2_integrated(struct xe_device *xe, struct xe_ttm_stolen_mgr *mgr) { struct pci_dev *pdev = to_pci_dev(xe->drm.dev); diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c index de257a032225f..15fd497c920c8 100644 --- a/drivers/gpu/drm/xe/xe_vm.c +++ b/drivers/gpu/drm/xe/xe_vm.c @@ -1477,8 +1477,10 @@ struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags) * scheduler drops all the references of it, hence protecting the VM * for this case is necessary. */ - if (flags & XE_VM_FLAG_LR_MODE) + if (flags & XE_VM_FLAG_LR_MODE) { + INIT_WORK(&vm->preempt.rebind_work, preempt_rebind_work_func); xe_pm_runtime_get_noresume(xe); + } vm_resv_obj = drm_gpuvm_resv_object_alloc(&xe->drm); if (!vm_resv_obj) { @@ -1523,10 +1525,8 @@ struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags) vm->batch_invalidate_tlb = true; } - if (vm->flags & XE_VM_FLAG_LR_MODE) { - INIT_WORK(&vm->preempt.rebind_work, preempt_rebind_work_func); + if (vm->flags & XE_VM_FLAG_LR_MODE) vm->batch_invalidate_tlb = false; - } /* Fill pt_root after allocating scratch tables */ for_each_tile(tile, xe, id) { diff --git a/drivers/gpu/drm/xe/xe_wa.c b/drivers/gpu/drm/xe/xe_wa.c index 0a1905f8d380a..aea6034a81079 100644 --- a/drivers/gpu/drm/xe/xe_wa.c +++ b/drivers/gpu/drm/xe/xe_wa.c @@ -783,6 +783,10 @@ static const struct xe_rtp_entry_sr lrc_was[] = { XE_RTP_RULES(GRAPHICS_VERSION(2001), ENGINE_CLASS(RENDER)), XE_RTP_ACTIONS(SET(CHICKEN_RASTER_1, DIS_CLIP_NEGATIVE_BOUNDING_BOX)) }, + { XE_RTP_NAME("22021007897"), + XE_RTP_RULES(GRAPHICS_VERSION(2001), ENGINE_CLASS(RENDER)), + XE_RTP_ACTIONS(SET(COMMON_SLICE_CHICKEN4, SBE_PUSH_CONSTANT_BEHIND_FIX_ENABLE)) + }, /* Xe3_LPG */ { XE_RTP_NAME("14021490052"), diff --git a/drivers/hid/hid-asus.c b/drivers/hid/hid-asus.c index bcdd168cdc6d7..c5bdf0f1b32f7 100644 --- a/drivers/hid/hid-asus.c +++ b/drivers/hid/hid-asus.c @@ -52,6 +52,10 @@ MODULE_DESCRIPTION("Asus HID Keyboard and TouchPad"); #define FEATURE_KBD_LED_REPORT_ID1 0x5d #define FEATURE_KBD_LED_REPORT_ID2 0x5e +#define ROG_ALLY_REPORT_SIZE 64 +#define ROG_ALLY_X_MIN_MCU 313 +#define ROG_ALLY_MIN_MCU 319 + #define SUPPORT_KBD_BACKLIGHT BIT(0) #define MAX_TOUCH_MAJOR 8 @@ -84,6 +88,7 @@ MODULE_DESCRIPTION("Asus HID Keyboard and TouchPad"); #define QUIRK_MEDION_E1239T BIT(10) #define QUIRK_ROG_NKEY_KEYBOARD BIT(11) #define QUIRK_ROG_CLAYMORE_II_KEYBOARD BIT(12) +#define QUIRK_ROG_ALLY_XPAD BIT(13) #define I2C_KEYBOARD_QUIRKS (QUIRK_FIX_NOTEBOOK_REPORT | \ QUIRK_NO_INIT_REPORTS | \ @@ -534,9 +539,99 @@ static bool asus_kbd_wmi_led_control_present(struct hid_device *hdev) return !!(value & ASUS_WMI_DSTS_PRESENCE_BIT); } +/* + * We don't care about any other part of the string except the version section. + * Example strings: FGA80100.RC72LA.312_T01, FGA80100.RC71LS.318_T01 + * The bytes "5a 05 03 31 00 1a 13" and possibly more come before the version + * string, and there may be additional bytes after the version string such as + * "75 00 74 00 65 00" or a postfix such as "_T01" + */ +static int mcu_parse_version_string(const u8 *response, size_t response_size) +{ + const u8 *end = response + response_size; + const u8 *p = response; + int dots, err, version; + char buf[4]; + + dots = 0; + while (p < end && dots < 2) { + if (*p++ == '.') + dots++; + } + + if (dots != 2 || p >= end || (p + 3) >= end) + return -EINVAL; + + memcpy(buf, p, 3); + buf[3] = '\0'; + + err = kstrtoint(buf, 10, &version); + if (err || version < 0) + return -EINVAL; + + return version; +} + +static int mcu_request_version(struct hid_device *hdev) +{ + u8 *response __free(kfree) = kzalloc(ROG_ALLY_REPORT_SIZE, GFP_KERNEL); + const u8 request[] = { 0x5a, 0x05, 0x03, 0x31, 0x00, 0x20 }; + int ret; + + if (!response) + return -ENOMEM; + + ret = asus_kbd_set_report(hdev, request, sizeof(request)); + if (ret < 0) + return ret; + + ret = hid_hw_raw_request(hdev, FEATURE_REPORT_ID, response, + ROG_ALLY_REPORT_SIZE, HID_FEATURE_REPORT, + HID_REQ_GET_REPORT); + if (ret < 0) + return ret; + + ret = mcu_parse_version_string(response, ROG_ALLY_REPORT_SIZE); + if (ret < 0) { + pr_err("Failed to parse MCU version: %d\n", ret); + print_hex_dump(KERN_ERR, "MCU: ", DUMP_PREFIX_NONE, + 16, 1, response, ROG_ALLY_REPORT_SIZE, false); + } + + return ret; +} + +static void validate_mcu_fw_version(struct hid_device *hdev, int idProduct) +{ + int min_version, version; + + version = mcu_request_version(hdev); + if (version < 0) + return; + + switch (idProduct) { + case USB_DEVICE_ID_ASUSTEK_ROG_NKEY_ALLY: + min_version = ROG_ALLY_MIN_MCU; + break; + case USB_DEVICE_ID_ASUSTEK_ROG_NKEY_ALLY_X: + min_version = ROG_ALLY_X_MIN_MCU; + break; + default: + min_version = 0; + } + + if (version < min_version) { + hid_warn(hdev, + "The MCU firmware version must be %d or greater to avoid issues with suspend.\n", + min_version); + } +} + static int asus_kbd_register_leds(struct hid_device *hdev) { struct asus_drvdata *drvdata = hid_get_drvdata(hdev); + struct usb_interface *intf; + struct usb_device *udev; unsigned char kbd_func; int ret; @@ -560,6 +655,14 @@ static int asus_kbd_register_leds(struct hid_device *hdev) if (ret < 0) return ret; } + + if (drvdata->quirks & QUIRK_ROG_ALLY_XPAD) { + intf = to_usb_interface(hdev->dev.parent); + udev = interface_to_usbdev(intf); + validate_mcu_fw_version(hdev, + le16_to_cpu(udev->descriptor.idProduct)); + } + } else { /* Initialize keyboard */ ret = asus_kbd_init(hdev, FEATURE_KBD_REPORT_ID); @@ -1280,10 +1383,10 @@ static const struct hid_device_id asus_devices[] = { QUIRK_USE_KBD_BACKLIGHT | QUIRK_ROG_NKEY_KEYBOARD }, { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_ROG_NKEY_ALLY), - QUIRK_USE_KBD_BACKLIGHT | QUIRK_ROG_NKEY_KEYBOARD }, + QUIRK_USE_KBD_BACKLIGHT | QUIRK_ROG_NKEY_KEYBOARD | QUIRK_ROG_ALLY_XPAD}, { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_ROG_NKEY_ALLY_X), - QUIRK_USE_KBD_BACKLIGHT | QUIRK_ROG_NKEY_KEYBOARD }, + QUIRK_USE_KBD_BACKLIGHT | QUIRK_ROG_NKEY_KEYBOARD | QUIRK_ROG_ALLY_XPAD }, { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_ROG_CLAYMORE_II_KEYBOARD), QUIRK_ROG_CLAYMORE_II_KEYBOARD }, diff --git a/drivers/hid/hid-hyperv.c b/drivers/hid/hid-hyperv.c index 0fb210e40a412..9eafff0b6ea4c 100644 --- a/drivers/hid/hid-hyperv.c +++ b/drivers/hid/hid-hyperv.c @@ -192,7 +192,7 @@ static void mousevsc_on_receive_device_info(struct mousevsc_dev *input_device, goto cleanup; input_device->report_desc_size = le16_to_cpu( - desc->desc[0].wDescriptorLength); + desc->rpt_desc.wDescriptorLength); if (input_device->report_desc_size == 0) { input_device->dev_info_status = -EINVAL; goto cleanup; @@ -210,7 +210,7 @@ static void mousevsc_on_receive_device_info(struct mousevsc_dev *input_device, memcpy(input_device->report_desc, ((unsigned char *)desc) + desc->bLength, - le16_to_cpu(desc->desc[0].wDescriptorLength)); + le16_to_cpu(desc->rpt_desc.wDescriptorLength)); /* Send the ack */ memset(&ack, 0, sizeof(struct mousevsc_prt_msg)); diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index 92baa34f42f28..b472140421f5a 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h @@ -41,6 +41,10 @@ #define USB_VENDOR_ID_ACTIONSTAR 0x2101 #define USB_DEVICE_ID_ACTIONSTAR_1011 0x1011 +#define USB_VENDOR_ID_ADATA_XPG 0x125f +#define USB_VENDOR_ID_ADATA_XPG_WL_GAMING_MOUSE 0x7505 +#define USB_VENDOR_ID_ADATA_XPG_WL_GAMING_MOUSE_DONGLE 0x7506 + #define USB_VENDOR_ID_ADS_TECH 0x06e1 #define USB_DEVICE_ID_ADS_TECH_RADIO_SI470X 0xa155 @@ -307,6 +311,8 @@ #define USB_DEVICE_ID_ASUS_AK1D 0x1125 #define USB_DEVICE_ID_CHICONY_TOSHIBA_WT10A 0x1408 #define USB_DEVICE_ID_CHICONY_ACER_SWITCH12 0x1421 +#define USB_DEVICE_ID_CHICONY_HP_5MP_CAMERA 0xb824 +#define USB_DEVICE_ID_CHICONY_HP_5MP_CAMERA2 0xb82c #define USB_VENDOR_ID_CHUNGHWAT 0x2247 #define USB_DEVICE_ID_CHUNGHWAT_MULTITOUCH 0x0001 @@ -810,6 +816,7 @@ #define USB_DEVICE_ID_LENOVO_TPPRODOCK 0x6067 #define USB_DEVICE_ID_LENOVO_X1_COVER 0x6085 #define USB_DEVICE_ID_LENOVO_X1_TAB 0x60a3 +#define USB_DEVICE_ID_LENOVO_X1_TAB2 0x60a4 #define USB_DEVICE_ID_LENOVO_X1_TAB3 0x60b5 #define USB_DEVICE_ID_LENOVO_X12_TAB 0x60fe #define USB_DEVICE_ID_LENOVO_X12_TAB2 0x61ae @@ -1514,4 +1521,7 @@ #define USB_VENDOR_ID_SIGNOTEC 0x2133 #define USB_DEVICE_ID_SIGNOTEC_VIEWSONIC_PD1011 0x0018 +#define USB_VENDOR_ID_SMARTLINKTECHNOLOGY 0x4c4a +#define USB_DEVICE_ID_SMARTLINKTECHNOLOGY_4155 0x4155 + #endif diff --git a/drivers/hid/hid-lenovo.c b/drivers/hid/hid-lenovo.c index f66194fde8912..8482852c662dd 100644 --- a/drivers/hid/hid-lenovo.c +++ b/drivers/hid/hid-lenovo.c @@ -473,6 +473,7 @@ static int lenovo_input_mapping(struct hid_device *hdev, return lenovo_input_mapping_tp10_ultrabook_kbd(hdev, hi, field, usage, bit, max); case USB_DEVICE_ID_LENOVO_X1_TAB: + case USB_DEVICE_ID_LENOVO_X1_TAB2: case USB_DEVICE_ID_LENOVO_X1_TAB3: return lenovo_input_mapping_x1_tab_kbd(hdev, hi, field, usage, bit, max); default: @@ -529,11 +530,14 @@ static void lenovo_features_set_cptkbd(struct hid_device *hdev) /* * Tell the keyboard a driver understands it, and turn F7, F9, F11 into - * regular keys + * regular keys (Compact only) */ - ret = lenovo_send_cmd_cptkbd(hdev, 0x01, 0x03); - if (ret) - hid_warn(hdev, "Failed to switch F7/9/11 mode: %d\n", ret); + if (hdev->product == USB_DEVICE_ID_LENOVO_CUSBKBD || + hdev->product == USB_DEVICE_ID_LENOVO_CBTKBD) { + ret = lenovo_send_cmd_cptkbd(hdev, 0x01, 0x03); + if (ret) + hid_warn(hdev, "Failed to switch F7/9/11 mode: %d\n", ret); + } /* Switch middle button to native mode */ ret = lenovo_send_cmd_cptkbd(hdev, 0x09, 0x01); @@ -584,6 +588,7 @@ static ssize_t attr_fn_lock_store(struct device *dev, break; case USB_DEVICE_ID_LENOVO_TP10UBKBD: case USB_DEVICE_ID_LENOVO_X1_TAB: + case USB_DEVICE_ID_LENOVO_X1_TAB2: case USB_DEVICE_ID_LENOVO_X1_TAB3: ret = lenovo_led_set_tp10ubkbd(hdev, TP10UBKBD_FN_LOCK_LED, value); if (ret) @@ -778,6 +783,7 @@ static int lenovo_event(struct hid_device *hdev, struct hid_field *field, return lenovo_event_cptkbd(hdev, field, usage, value); case USB_DEVICE_ID_LENOVO_TP10UBKBD: case USB_DEVICE_ID_LENOVO_X1_TAB: + case USB_DEVICE_ID_LENOVO_X1_TAB2: case USB_DEVICE_ID_LENOVO_X1_TAB3: return lenovo_event_tp10ubkbd(hdev, field, usage, value); default: @@ -1059,6 +1065,7 @@ static int lenovo_led_brightness_set(struct led_classdev *led_cdev, break; case USB_DEVICE_ID_LENOVO_TP10UBKBD: case USB_DEVICE_ID_LENOVO_X1_TAB: + case USB_DEVICE_ID_LENOVO_X1_TAB2: case USB_DEVICE_ID_LENOVO_X1_TAB3: ret = lenovo_led_set_tp10ubkbd(hdev, tp10ubkbd_led[led_nr], value); break; @@ -1290,6 +1297,7 @@ static int lenovo_probe(struct hid_device *hdev, break; case USB_DEVICE_ID_LENOVO_TP10UBKBD: case USB_DEVICE_ID_LENOVO_X1_TAB: + case USB_DEVICE_ID_LENOVO_X1_TAB2: case USB_DEVICE_ID_LENOVO_X1_TAB3: ret = lenovo_probe_tp10ubkbd(hdev); break; @@ -1377,6 +1385,7 @@ static void lenovo_remove(struct hid_device *hdev) break; case USB_DEVICE_ID_LENOVO_TP10UBKBD: case USB_DEVICE_ID_LENOVO_X1_TAB: + case USB_DEVICE_ID_LENOVO_X1_TAB2: case USB_DEVICE_ID_LENOVO_X1_TAB3: lenovo_remove_tp10ubkbd(hdev); break; @@ -1427,6 +1436,8 @@ static const struct hid_device_id lenovo_devices[] = { */ { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC, USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_X1_TAB) }, + { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC, + USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_X1_TAB2) }, { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC, USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_X1_TAB3) }, { } diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c index 93b5c648ef82c..641292cfdaa6f 100644 --- a/drivers/hid/hid-multitouch.c +++ b/drivers/hid/hid-multitouch.c @@ -2116,12 +2116,18 @@ static const struct hid_device_id mt_devices[] = { HID_DEVICE(BUS_I2C, HID_GROUP_GENERIC, USB_VENDOR_ID_LG, I2C_DEVICE_ID_LG_7010) }, - /* Lenovo X1 TAB Gen 2 */ + /* Lenovo X1 TAB Gen 1 */ { .driver_data = MT_CLS_WIN_8_FORCE_MULTI_INPUT, HID_DEVICE(BUS_USB, HID_GROUP_MULTITOUCH_WIN_8, USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_X1_TAB) }, + /* Lenovo X1 TAB Gen 2 */ + { .driver_data = MT_CLS_WIN_8_FORCE_MULTI_INPUT, + HID_DEVICE(BUS_USB, HID_GROUP_MULTITOUCH_WIN_8, + USB_VENDOR_ID_LENOVO, + USB_DEVICE_ID_LENOVO_X1_TAB2) }, + /* Lenovo X1 TAB Gen 3 */ { .driver_data = MT_CLS_WIN_8_FORCE_MULTI_INPUT, HID_DEVICE(BUS_USB, HID_GROUP_MULTITOUCH_WIN_8, diff --git a/drivers/hid/hid-nintendo.c b/drivers/hid/hid-nintendo.c index 55153a2f79886..2a3ae1068739d 100644 --- a/drivers/hid/hid-nintendo.c +++ b/drivers/hid/hid-nintendo.c @@ -308,6 +308,7 @@ enum joycon_ctlr_state { JOYCON_CTLR_STATE_INIT, JOYCON_CTLR_STATE_READ, JOYCON_CTLR_STATE_REMOVED, + JOYCON_CTLR_STATE_SUSPENDED, }; /* Controller type received as part of device info */ @@ -2754,14 +2755,46 @@ static void nintendo_hid_remove(struct hid_device *hdev) static int nintendo_hid_resume(struct hid_device *hdev) { - int ret = joycon_init(hdev); + struct joycon_ctlr *ctlr = hid_get_drvdata(hdev); + int ret; + + hid_dbg(hdev, "resume\n"); + if (!joycon_using_usb(ctlr)) { + hid_dbg(hdev, "no-op resume for bt ctlr\n"); + ctlr->ctlr_state = JOYCON_CTLR_STATE_READ; + return 0; + } + ret = joycon_init(hdev); if (ret) - hid_err(hdev, "Failed to restore controller after resume"); + hid_err(hdev, + "Failed to restore controller after resume: %d\n", + ret); + else + ctlr->ctlr_state = JOYCON_CTLR_STATE_READ; return ret; } +static int nintendo_hid_suspend(struct hid_device *hdev, pm_message_t message) +{ + struct joycon_ctlr *ctlr = hid_get_drvdata(hdev); + + hid_dbg(hdev, "suspend: %d\n", message.event); + /* + * Avoid any blocking loops in suspend/resume transitions. + * + * joycon_enforce_subcmd_rate() can result in repeated retries if for + * whatever reason the controller stops providing input reports. + * + * This has been observed with bluetooth controllers which lose + * connectivity prior to suspend (but not long enough to result in + * complete disconnection). + */ + ctlr->ctlr_state = JOYCON_CTLR_STATE_SUSPENDED; + return 0; +} + #endif static const struct hid_device_id nintendo_hid_devices[] = { @@ -2800,6 +2833,7 @@ static struct hid_driver nintendo_hid_driver = { #ifdef CONFIG_PM .resume = nintendo_hid_resume, + .suspend = nintendo_hid_suspend, #endif }; static int __init nintendo_init(void) diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c index 5d7a418ccdbec..80372342c176a 100644 --- a/drivers/hid/hid-quirks.c +++ b/drivers/hid/hid-quirks.c @@ -27,6 +27,8 @@ static const struct hid_device_id hid_quirks[] = { { HID_USB_DEVICE(USB_VENDOR_ID_AASHIMA, USB_DEVICE_ID_AASHIMA_GAMEPAD), HID_QUIRK_BADPAD }, { HID_USB_DEVICE(USB_VENDOR_ID_AASHIMA, USB_DEVICE_ID_AASHIMA_PREDATOR), HID_QUIRK_BADPAD }, + { HID_USB_DEVICE(USB_VENDOR_ID_ADATA_XPG, USB_VENDOR_ID_ADATA_XPG_WL_GAMING_MOUSE), HID_QUIRK_ALWAYS_POLL }, + { HID_USB_DEVICE(USB_VENDOR_ID_ADATA_XPG, USB_VENDOR_ID_ADATA_XPG_WL_GAMING_MOUSE_DONGLE), HID_QUIRK_ALWAYS_POLL }, { HID_USB_DEVICE(USB_VENDOR_ID_AFATECH, USB_DEVICE_ID_AFATECH_AF9016), HID_QUIRK_FULLSPEED_INTERVAL }, { HID_USB_DEVICE(USB_VENDOR_ID_AIREN, USB_DEVICE_ID_AIREN_SLIMPLUS), HID_QUIRK_NOGET }, { HID_USB_DEVICE(USB_VENDOR_ID_AKAI_09E8, USB_DEVICE_ID_AKAI_09E8_MIDIMIX), HID_QUIRK_NO_INIT_REPORTS }, @@ -745,6 +747,8 @@ static const struct hid_device_id hid_ignore_list[] = { { HID_USB_DEVICE(USB_VENDOR_ID_AVERMEDIA, USB_DEVICE_ID_AVER_FM_MR800) }, { HID_USB_DEVICE(USB_VENDOR_ID_AXENTIA, USB_DEVICE_ID_AXENTIA_FM_RADIO) }, { HID_USB_DEVICE(USB_VENDOR_ID_BERKSHIRE, USB_DEVICE_ID_BERKSHIRE_PCWD) }, + { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_HP_5MP_CAMERA) }, + { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_HP_5MP_CAMERA2) }, { HID_USB_DEVICE(USB_VENDOR_ID_CIDC, 0x0103) }, { HID_USB_DEVICE(USB_VENDOR_ID_CYGNAL, USB_DEVICE_ID_CYGNAL_RADIO_SI470X) }, { HID_USB_DEVICE(USB_VENDOR_ID_CYGNAL, USB_DEVICE_ID_CYGNAL_RADIO_SI4713) }, @@ -892,6 +896,7 @@ static const struct hid_device_id hid_ignore_list[] = { #endif { HID_USB_DEVICE(USB_VENDOR_ID_YEALINK, USB_DEVICE_ID_YEALINK_P1K_P4K_B2K) }, { HID_USB_DEVICE(USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_QUANTA_HP_5MP_CAMERA_5473) }, + { HID_USB_DEVICE(USB_VENDOR_ID_SMARTLINKTECHNOLOGY, USB_DEVICE_ID_SMARTLINKTECHNOLOGY_4155) }, { } }; diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c index bf0f51ef0149f..01625dbb28e8d 100644 --- a/drivers/hid/usbhid/hid-core.c +++ b/drivers/hid/usbhid/hid-core.c @@ -984,12 +984,11 @@ static int usbhid_parse(struct hid_device *hid) struct usb_host_interface *interface = intf->cur_altsetting; struct usb_device *dev = interface_to_usbdev (intf); struct hid_descriptor *hdesc; + struct hid_class_descriptor *hcdesc; u32 quirks = 0; unsigned int rsize = 0; char *rdesc; - int ret, n; - int num_descriptors; - size_t offset = offsetof(struct hid_descriptor, desc); + int ret; quirks = hid_lookup_quirk(hid); @@ -1011,20 +1010,19 @@ static int usbhid_parse(struct hid_device *hid) return -ENODEV; } - if (hdesc->bLength < sizeof(struct hid_descriptor)) { - dbg_hid("hid descriptor is too short\n"); + if (!hdesc->bNumDescriptors || + hdesc->bLength != sizeof(*hdesc) + + (hdesc->bNumDescriptors - 1) * sizeof(*hcdesc)) { + dbg_hid("hid descriptor invalid, bLen=%hhu bNum=%hhu\n", + hdesc->bLength, hdesc->bNumDescriptors); return -EINVAL; } hid->version = le16_to_cpu(hdesc->bcdHID); hid->country = hdesc->bCountryCode; - num_descriptors = min_t(int, hdesc->bNumDescriptors, - (hdesc->bLength - offset) / sizeof(struct hid_class_descriptor)); - - for (n = 0; n < num_descriptors; n++) - if (hdesc->desc[n].bDescriptorType == HID_DT_REPORT) - rsize = le16_to_cpu(hdesc->desc[n].wDescriptorLength); + if (hdesc->rpt_desc.bDescriptorType == HID_DT_REPORT) + rsize = le16_to_cpu(hdesc->rpt_desc.wDescriptorLength); if (!rsize || rsize > HID_MAX_DESCRIPTOR_SIZE) { dbg_hid("weird size of report descriptor (%u)\n", rsize); @@ -1052,6 +1050,11 @@ static int usbhid_parse(struct hid_device *hid) goto err; } + if (hdesc->bNumDescriptors > 1) + hid_warn(intf, + "%u unsupported optional hid class descriptors\n", + (int)(hdesc->bNumDescriptors - 1)); + hid->quirks |= quirks; return 0; diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c index 34428349fa311..1b1112772777c 100644 --- a/drivers/hid/wacom_sys.c +++ b/drivers/hid/wacom_sys.c @@ -2021,14 +2021,18 @@ static int wacom_initialize_remotes(struct wacom *wacom) remote->remote_dir = kobject_create_and_add("wacom_remote", &wacom->hdev->dev.kobj); - if (!remote->remote_dir) + if (!remote->remote_dir) { + kfifo_free(&remote->remote_fifo); return -ENOMEM; + } error = sysfs_create_files(remote->remote_dir, remote_unpair_attrs); if (error) { hid_err(wacom->hdev, "cannot create sysfs group err: %d\n", error); + kfifo_free(&remote->remote_fifo); + kobject_put(remote->remote_dir); return error; } @@ -2874,6 +2878,7 @@ static void wacom_remove(struct hid_device *hdev) hid_hw_stop(hdev); cancel_delayed_work_sync(&wacom->init_work); + cancel_delayed_work_sync(&wacom->aes_battery_work); cancel_work_sync(&wacom->wireless_work); cancel_work_sync(&wacom->battery_work); cancel_work_sync(&wacom->remote_work); diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c index f001ae880e1db..27306c17b0c4e 100644 --- a/drivers/hv/connection.c +++ b/drivers/hv/connection.c @@ -206,11 +206,20 @@ int vmbus_connect(void) INIT_LIST_HEAD(&vmbus_connection.chn_list); mutex_init(&vmbus_connection.channel_mutex); + /* + * The following Hyper-V interrupt and monitor pages can be used by + * UIO for mapping to user-space, so they should always be allocated on + * system page boundaries. The system page size must be >= the Hyper-V + * page size. + */ + BUILD_BUG_ON(PAGE_SIZE < HV_HYP_PAGE_SIZE); + /* * Setup the vmbus event connection for channel interrupt * abstraction stuff */ - vmbus_connection.int_page = hv_alloc_hyperv_zeroed_page(); + vmbus_connection.int_page = + (void *)__get_free_page(GFP_KERNEL | __GFP_ZERO); if (vmbus_connection.int_page == NULL) { ret = -ENOMEM; goto cleanup; @@ -225,8 +234,8 @@ int vmbus_connect(void) * Setup the monitor notification facility. The 1st page for * parent->child and the 2nd page for child->parent */ - vmbus_connection.monitor_pages[0] = hv_alloc_hyperv_page(); - vmbus_connection.monitor_pages[1] = hv_alloc_hyperv_page(); + vmbus_connection.monitor_pages[0] = (void *)__get_free_page(GFP_KERNEL); + vmbus_connection.monitor_pages[1] = (void *)__get_free_page(GFP_KERNEL); if ((vmbus_connection.monitor_pages[0] == NULL) || (vmbus_connection.monitor_pages[1] == NULL)) { ret = -ENOMEM; @@ -342,21 +351,23 @@ void vmbus_disconnect(void) destroy_workqueue(vmbus_connection.work_queue); if (vmbus_connection.int_page) { - hv_free_hyperv_page(vmbus_connection.int_page); + free_page((unsigned long)vmbus_connection.int_page); vmbus_connection.int_page = NULL; } if (vmbus_connection.monitor_pages[0]) { if (!set_memory_encrypted( (unsigned long)vmbus_connection.monitor_pages[0], 1)) - hv_free_hyperv_page(vmbus_connection.monitor_pages[0]); + free_page((unsigned long) + vmbus_connection.monitor_pages[0]); vmbus_connection.monitor_pages[0] = NULL; } if (vmbus_connection.monitor_pages[1]) { if (!set_memory_encrypted( (unsigned long)vmbus_connection.monitor_pages[1], 1)) - hv_free_hyperv_page(vmbus_connection.monitor_pages[1]); + free_page((unsigned long) + vmbus_connection.monitor_pages[1]); vmbus_connection.monitor_pages[1] = NULL; } } diff --git a/drivers/hwmon/asus-ec-sensors.c b/drivers/hwmon/asus-ec-sensors.c index 9555366aeaf0d..fdc157c7394d9 100644 --- a/drivers/hwmon/asus-ec-sensors.c +++ b/drivers/hwmon/asus-ec-sensors.c @@ -910,6 +910,10 @@ static int asus_ec_hwmon_read_string(struct device *dev, { struct ec_sensors_data *state = dev_get_drvdata(dev); int sensor_index = find_ec_sensor_index(state, type, channel); + + if (sensor_index < 0) + return sensor_index; + *str = get_sensor_info(state, sensor_index)->label; return 0; diff --git a/drivers/hwmon/ftsteutates.c b/drivers/hwmon/ftsteutates.c index a3a07662e4917..8aeec16a7a905 100644 --- a/drivers/hwmon/ftsteutates.c +++ b/drivers/hwmon/ftsteutates.c @@ -423,13 +423,16 @@ static int fts_read(struct device *dev, enum hwmon_sensor_types type, u32 attr, break; case hwmon_pwm: switch (attr) { - case hwmon_pwm_auto_channels_temp: - if (data->fan_source[channel] == FTS_FAN_SOURCE_INVALID) + case hwmon_pwm_auto_channels_temp: { + u8 fan_source = data->fan_source[channel]; + + if (fan_source == FTS_FAN_SOURCE_INVALID || fan_source >= BITS_PER_LONG) *val = 0; else - *val = BIT(data->fan_source[channel]); + *val = BIT(fan_source); return 0; + } default: break; } diff --git a/drivers/hwmon/ltc4282.c b/drivers/hwmon/ltc4282.c index 4f608a3790fb7..953dfe2bd166c 100644 --- a/drivers/hwmon/ltc4282.c +++ b/drivers/hwmon/ltc4282.c @@ -1511,13 +1511,6 @@ static int ltc4282_setup(struct ltc4282_state *st, struct device *dev) return ret; } - if (device_property_read_bool(dev, "adi,fault-log-enable")) { - ret = regmap_set_bits(st->map, LTC4282_ADC_CTRL, - LTC4282_FAULT_LOG_EN_MASK); - if (ret) - return ret; - } - if (device_property_read_bool(dev, "adi,fault-log-enable")) { ret = regmap_set_bits(st->map, LTC4282_ADC_CTRL, LTC4282_FAULT_LOG_EN_MASK); if (ret) diff --git a/drivers/hwmon/occ/common.c b/drivers/hwmon/occ/common.c index 9486db249c64f..b3694a4209b97 100644 --- a/drivers/hwmon/occ/common.c +++ b/drivers/hwmon/occ/common.c @@ -459,12 +459,10 @@ static ssize_t occ_show_power_1(struct device *dev, return sysfs_emit(buf, "%llu\n", val); } -static u64 occ_get_powr_avg(u64 *accum, u32 *samples) +static u64 occ_get_powr_avg(u64 accum, u32 samples) { - u64 divisor = get_unaligned_be32(samples); - - return (divisor == 0) ? 0 : - div64_u64(get_unaligned_be64(accum) * 1000000ULL, divisor); + return (samples == 0) ? 0 : + mul_u64_u32_div(accum, 1000000UL, samples); } static ssize_t occ_show_power_2(struct device *dev, @@ -489,8 +487,8 @@ static ssize_t occ_show_power_2(struct device *dev, get_unaligned_be32(&power->sensor_id), power->function_id, power->apss_channel); case 1: - val = occ_get_powr_avg(&power->accumulator, - &power->update_tag); + val = occ_get_powr_avg(get_unaligned_be64(&power->accumulator), + get_unaligned_be32(&power->update_tag)); break; case 2: val = (u64)get_unaligned_be32(&power->update_tag) * @@ -527,8 +525,8 @@ static ssize_t occ_show_power_a0(struct device *dev, return sysfs_emit(buf, "%u_system\n", get_unaligned_be32(&power->sensor_id)); case 1: - val = occ_get_powr_avg(&power->system.accumulator, - &power->system.update_tag); + val = occ_get_powr_avg(get_unaligned_be64(&power->system.accumulator), + get_unaligned_be32(&power->system.update_tag)); break; case 2: val = (u64)get_unaligned_be32(&power->system.update_tag) * @@ -541,8 +539,8 @@ static ssize_t occ_show_power_a0(struct device *dev, return sysfs_emit(buf, "%u_proc\n", get_unaligned_be32(&power->sensor_id)); case 5: - val = occ_get_powr_avg(&power->proc.accumulator, - &power->proc.update_tag); + val = occ_get_powr_avg(get_unaligned_be64(&power->proc.accumulator), + get_unaligned_be32(&power->proc.update_tag)); break; case 6: val = (u64)get_unaligned_be32(&power->proc.update_tag) * @@ -555,8 +553,8 @@ static ssize_t occ_show_power_a0(struct device *dev, return sysfs_emit(buf, "%u_vdd\n", get_unaligned_be32(&power->sensor_id)); case 9: - val = occ_get_powr_avg(&power->vdd.accumulator, - &power->vdd.update_tag); + val = occ_get_powr_avg(get_unaligned_be64(&power->vdd.accumulator), + get_unaligned_be32(&power->vdd.update_tag)); break; case 10: val = (u64)get_unaligned_be32(&power->vdd.update_tag) * @@ -569,8 +567,8 @@ static ssize_t occ_show_power_a0(struct device *dev, return sysfs_emit(buf, "%u_vdn\n", get_unaligned_be32(&power->sensor_id)); case 13: - val = occ_get_powr_avg(&power->vdn.accumulator, - &power->vdn.update_tag); + val = occ_get_powr_avg(get_unaligned_be64(&power->vdn.accumulator), + get_unaligned_be32(&power->vdn.update_tag)); break; case 14: val = (u64)get_unaligned_be32(&power->vdn.update_tag) * @@ -747,29 +745,30 @@ static ssize_t occ_show_extended(struct device *dev, } /* - * Some helper macros to make it easier to define an occ_attribute. Since these - * are dynamically allocated, we shouldn't use the existing kernel macros which + * A helper to make it easier to define an occ_attribute. Since these + * are dynamically allocated, we cannot use the existing kernel macros which * stringify the name argument. */ -#define ATTR_OCC(_name, _mode, _show, _store) { \ - .attr = { \ - .name = _name, \ - .mode = VERIFY_OCTAL_PERMISSIONS(_mode), \ - }, \ - .show = _show, \ - .store = _store, \ -} - -#define SENSOR_ATTR_OCC(_name, _mode, _show, _store, _nr, _index) { \ - .dev_attr = ATTR_OCC(_name, _mode, _show, _store), \ - .index = _index, \ - .nr = _nr, \ +static void occ_init_attribute(struct occ_attribute *attr, int mode, + ssize_t (*show)(struct device *dev, struct device_attribute *attr, char *buf), + ssize_t (*store)(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count), + int nr, int index, const char *fmt, ...) +{ + va_list args; + + va_start(args, fmt); + vsnprintf(attr->name, sizeof(attr->name), fmt, args); + va_end(args); + + attr->sensor.dev_attr.attr.name = attr->name; + attr->sensor.dev_attr.attr.mode = mode; + attr->sensor.dev_attr.show = show; + attr->sensor.dev_attr.store = store; + attr->sensor.index = index; + attr->sensor.nr = nr; } -#define OCC_INIT_ATTR(_name, _mode, _show, _store, _nr, _index) \ - ((struct sensor_device_attribute_2) \ - SENSOR_ATTR_OCC(_name, _mode, _show, _store, _nr, _index)) - /* * Allocate and instatiate sensor_device_attribute_2s. It's most efficient to * use our own instead of the built-in hwmon attribute types. @@ -855,14 +854,15 @@ static int occ_setup_sensor_attrs(struct occ *occ) sensors->extended.num_sensors = 0; } - occ->attrs = devm_kzalloc(dev, sizeof(*occ->attrs) * num_attrs, + occ->attrs = devm_kcalloc(dev, num_attrs, sizeof(*occ->attrs), GFP_KERNEL); if (!occ->attrs) return -ENOMEM; /* null-terminated list */ - occ->group.attrs = devm_kzalloc(dev, sizeof(*occ->group.attrs) * - num_attrs + 1, GFP_KERNEL); + occ->group.attrs = devm_kcalloc(dev, num_attrs + 1, + sizeof(*occ->group.attrs), + GFP_KERNEL); if (!occ->group.attrs) return -ENOMEM; @@ -872,43 +872,33 @@ static int occ_setup_sensor_attrs(struct occ *occ) s = i + 1; temp = ((struct temp_sensor_2 *)sensors->temp.data) + i; - snprintf(attr->name, sizeof(attr->name), "temp%d_label", s); - attr->sensor = OCC_INIT_ATTR(attr->name, 0444, show_temp, NULL, - 0, i); + occ_init_attribute(attr, 0444, show_temp, NULL, + 0, i, "temp%d_label", s); attr++; if (sensors->temp.version == 2 && temp->fru_type == OCC_FRU_TYPE_VRM) { - snprintf(attr->name, sizeof(attr->name), - "temp%d_alarm", s); + occ_init_attribute(attr, 0444, show_temp, NULL, + 1, i, "temp%d_alarm", s); } else { - snprintf(attr->name, sizeof(attr->name), - "temp%d_input", s); + occ_init_attribute(attr, 0444, show_temp, NULL, + 1, i, "temp%d_input", s); } - attr->sensor = OCC_INIT_ATTR(attr->name, 0444, show_temp, NULL, - 1, i); attr++; if (sensors->temp.version > 1) { - snprintf(attr->name, sizeof(attr->name), - "temp%d_fru_type", s); - attr->sensor = OCC_INIT_ATTR(attr->name, 0444, - show_temp, NULL, 2, i); + occ_init_attribute(attr, 0444, show_temp, NULL, + 2, i, "temp%d_fru_type", s); attr++; - snprintf(attr->name, sizeof(attr->name), - "temp%d_fault", s); - attr->sensor = OCC_INIT_ATTR(attr->name, 0444, - show_temp, NULL, 3, i); + occ_init_attribute(attr, 0444, show_temp, NULL, + 3, i, "temp%d_fault", s); attr++; if (sensors->temp.version == 0x10) { - snprintf(attr->name, sizeof(attr->name), - "temp%d_max", s); - attr->sensor = OCC_INIT_ATTR(attr->name, 0444, - show_temp, NULL, - 4, i); + occ_init_attribute(attr, 0444, show_temp, NULL, + 4, i, "temp%d_max", s); attr++; } } @@ -917,14 +907,12 @@ static int occ_setup_sensor_attrs(struct occ *occ) for (i = 0; i < sensors->freq.num_sensors; ++i) { s = i + 1; - snprintf(attr->name, sizeof(attr->name), "freq%d_label", s); - attr->sensor = OCC_INIT_ATTR(attr->name, 0444, show_freq, NULL, - 0, i); + occ_init_attribute(attr, 0444, show_freq, NULL, + 0, i, "freq%d_label", s); attr++; - snprintf(attr->name, sizeof(attr->name), "freq%d_input", s); - attr->sensor = OCC_INIT_ATTR(attr->name, 0444, show_freq, NULL, - 1, i); + occ_init_attribute(attr, 0444, show_freq, NULL, + 1, i, "freq%d_input", s); attr++; } @@ -940,32 +928,24 @@ static int occ_setup_sensor_attrs(struct occ *occ) s = (i * 4) + 1; for (j = 0; j < 4; ++j) { - snprintf(attr->name, sizeof(attr->name), - "power%d_label", s); - attr->sensor = OCC_INIT_ATTR(attr->name, 0444, - show_power, NULL, - nr++, i); + occ_init_attribute(attr, 0444, show_power, + NULL, nr++, i, + "power%d_label", s); attr++; - snprintf(attr->name, sizeof(attr->name), - "power%d_average", s); - attr->sensor = OCC_INIT_ATTR(attr->name, 0444, - show_power, NULL, - nr++, i); + occ_init_attribute(attr, 0444, show_power, + NULL, nr++, i, + "power%d_average", s); attr++; - snprintf(attr->name, sizeof(attr->name), - "power%d_average_interval", s); - attr->sensor = OCC_INIT_ATTR(attr->name, 0444, - show_power, NULL, - nr++, i); + occ_init_attribute(attr, 0444, show_power, + NULL, nr++, i, + "power%d_average_interval", s); attr++; - snprintf(attr->name, sizeof(attr->name), - "power%d_input", s); - attr->sensor = OCC_INIT_ATTR(attr->name, 0444, - show_power, NULL, - nr++, i); + occ_init_attribute(attr, 0444, show_power, + NULL, nr++, i, + "power%d_input", s); attr++; s++; @@ -977,28 +957,20 @@ static int occ_setup_sensor_attrs(struct occ *occ) for (i = 0; i < sensors->power.num_sensors; ++i) { s = i + 1; - snprintf(attr->name, sizeof(attr->name), - "power%d_label", s); - attr->sensor = OCC_INIT_ATTR(attr->name, 0444, - show_power, NULL, 0, i); + occ_init_attribute(attr, 0444, show_power, NULL, + 0, i, "power%d_label", s); attr++; - snprintf(attr->name, sizeof(attr->name), - "power%d_average", s); - attr->sensor = OCC_INIT_ATTR(attr->name, 0444, - show_power, NULL, 1, i); + occ_init_attribute(attr, 0444, show_power, NULL, + 1, i, "power%d_average", s); attr++; - snprintf(attr->name, sizeof(attr->name), - "power%d_average_interval", s); - attr->sensor = OCC_INIT_ATTR(attr->name, 0444, - show_power, NULL, 2, i); + occ_init_attribute(attr, 0444, show_power, NULL, + 2, i, "power%d_average_interval", s); attr++; - snprintf(attr->name, sizeof(attr->name), - "power%d_input", s); - attr->sensor = OCC_INIT_ATTR(attr->name, 0444, - show_power, NULL, 3, i); + occ_init_attribute(attr, 0444, show_power, NULL, + 3, i, "power%d_input", s); attr++; } @@ -1006,56 +978,43 @@ static int occ_setup_sensor_attrs(struct occ *occ) } if (sensors->caps.num_sensors >= 1) { - snprintf(attr->name, sizeof(attr->name), "power%d_label", s); - attr->sensor = OCC_INIT_ATTR(attr->name, 0444, show_caps, NULL, - 0, 0); + occ_init_attribute(attr, 0444, show_caps, NULL, + 0, 0, "power%d_label", s); attr++; - snprintf(attr->name, sizeof(attr->name), "power%d_cap", s); - attr->sensor = OCC_INIT_ATTR(attr->name, 0444, show_caps, NULL, - 1, 0); + occ_init_attribute(attr, 0444, show_caps, NULL, + 1, 0, "power%d_cap", s); attr++; - snprintf(attr->name, sizeof(attr->name), "power%d_input", s); - attr->sensor = OCC_INIT_ATTR(attr->name, 0444, show_caps, NULL, - 2, 0); + occ_init_attribute(attr, 0444, show_caps, NULL, + 2, 0, "power%d_input", s); attr++; - snprintf(attr->name, sizeof(attr->name), - "power%d_cap_not_redundant", s); - attr->sensor = OCC_INIT_ATTR(attr->name, 0444, show_caps, NULL, - 3, 0); + occ_init_attribute(attr, 0444, show_caps, NULL, + 3, 0, "power%d_cap_not_redundant", s); attr++; - snprintf(attr->name, sizeof(attr->name), "power%d_cap_max", s); - attr->sensor = OCC_INIT_ATTR(attr->name, 0444, show_caps, NULL, - 4, 0); + occ_init_attribute(attr, 0444, show_caps, NULL, + 4, 0, "power%d_cap_max", s); attr++; - snprintf(attr->name, sizeof(attr->name), "power%d_cap_min", s); - attr->sensor = OCC_INIT_ATTR(attr->name, 0444, show_caps, NULL, - 5, 0); + occ_init_attribute(attr, 0444, show_caps, NULL, + 5, 0, "power%d_cap_min", s); attr++; - snprintf(attr->name, sizeof(attr->name), "power%d_cap_user", - s); - attr->sensor = OCC_INIT_ATTR(attr->name, 0644, show_caps, - occ_store_caps_user, 6, 0); + occ_init_attribute(attr, 0644, show_caps, occ_store_caps_user, + 6, 0, "power%d_cap_user", s); attr++; if (sensors->caps.version > 1) { - snprintf(attr->name, sizeof(attr->name), - "power%d_cap_user_source", s); - attr->sensor = OCC_INIT_ATTR(attr->name, 0444, - show_caps, NULL, 7, 0); + occ_init_attribute(attr, 0444, show_caps, NULL, + 7, 0, "power%d_cap_user_source", s); attr++; if (sensors->caps.version > 2) { - snprintf(attr->name, sizeof(attr->name), - "power%d_cap_min_soft", s); - attr->sensor = OCC_INIT_ATTR(attr->name, 0444, - show_caps, NULL, - 8, 0); + occ_init_attribute(attr, 0444, show_caps, NULL, + 8, 0, + "power%d_cap_min_soft", s); attr++; } } @@ -1064,19 +1023,16 @@ static int occ_setup_sensor_attrs(struct occ *occ) for (i = 0; i < sensors->extended.num_sensors; ++i) { s = i + 1; - snprintf(attr->name, sizeof(attr->name), "extn%d_label", s); - attr->sensor = OCC_INIT_ATTR(attr->name, 0444, - occ_show_extended, NULL, 0, i); + occ_init_attribute(attr, 0444, occ_show_extended, NULL, + 0, i, "extn%d_label", s); attr++; - snprintf(attr->name, sizeof(attr->name), "extn%d_flags", s); - attr->sensor = OCC_INIT_ATTR(attr->name, 0444, - occ_show_extended, NULL, 1, i); + occ_init_attribute(attr, 0444, occ_show_extended, NULL, + 1, i, "extn%d_flags", s); attr++; - snprintf(attr->name, sizeof(attr->name), "extn%d_input", s); - attr->sensor = OCC_INIT_ATTR(attr->name, 0444, - occ_show_extended, NULL, 2, i); + occ_init_attribute(attr, 0444, occ_show_extended, NULL, + 2, i, "extn%d_input", s); attr++; } diff --git a/drivers/hwmon/pmbus/max34440.c b/drivers/hwmon/pmbus/max34440.c index fe7f6b1b09851..e14be8ebaad30 100644 --- a/drivers/hwmon/pmbus/max34440.c +++ b/drivers/hwmon/pmbus/max34440.c @@ -34,16 +34,21 @@ enum chips { max34440, max34441, max34446, max34451, max34460, max34461 }; /* * The whole max344* family have IOUT_OC_WARN_LIMIT and IOUT_OC_FAULT_LIMIT * swapped from the standard pmbus spec addresses. + * For max34451, version MAX34451ETNA6+ and later has this issue fixed. */ #define MAX34440_IOUT_OC_WARN_LIMIT 0x46 #define MAX34440_IOUT_OC_FAULT_LIMIT 0x4A +#define MAX34451ETNA6_MFR_REV 0x0012 + #define MAX34451_MFR_CHANNEL_CONFIG 0xe4 #define MAX34451_MFR_CHANNEL_CONFIG_SEL_MASK 0x3f struct max34440_data { int id; struct pmbus_driver_info info; + u8 iout_oc_warn_limit; + u8 iout_oc_fault_limit; }; #define to_max34440_data(x) container_of(x, struct max34440_data, info) @@ -60,11 +65,11 @@ static int max34440_read_word_data(struct i2c_client *client, int page, switch (reg) { case PMBUS_IOUT_OC_FAULT_LIMIT: ret = pmbus_read_word_data(client, page, phase, - MAX34440_IOUT_OC_FAULT_LIMIT); + data->iout_oc_fault_limit); break; case PMBUS_IOUT_OC_WARN_LIMIT: ret = pmbus_read_word_data(client, page, phase, - MAX34440_IOUT_OC_WARN_LIMIT); + data->iout_oc_warn_limit); break; case PMBUS_VIRT_READ_VOUT_MIN: ret = pmbus_read_word_data(client, page, phase, @@ -133,11 +138,11 @@ static int max34440_write_word_data(struct i2c_client *client, int page, switch (reg) { case PMBUS_IOUT_OC_FAULT_LIMIT: - ret = pmbus_write_word_data(client, page, MAX34440_IOUT_OC_FAULT_LIMIT, + ret = pmbus_write_word_data(client, page, data->iout_oc_fault_limit, word); break; case PMBUS_IOUT_OC_WARN_LIMIT: - ret = pmbus_write_word_data(client, page, MAX34440_IOUT_OC_WARN_LIMIT, + ret = pmbus_write_word_data(client, page, data->iout_oc_warn_limit, word); break; case PMBUS_VIRT_RESET_POUT_HISTORY: @@ -235,6 +240,25 @@ static int max34451_set_supported_funcs(struct i2c_client *client, */ int page, rv; + bool max34451_na6 = false; + + rv = i2c_smbus_read_word_data(client, PMBUS_MFR_REVISION); + if (rv < 0) + return rv; + + if (rv >= MAX34451ETNA6_MFR_REV) { + max34451_na6 = true; + data->info.format[PSC_VOLTAGE_IN] = direct; + data->info.format[PSC_CURRENT_IN] = direct; + data->info.m[PSC_VOLTAGE_IN] = 1; + data->info.b[PSC_VOLTAGE_IN] = 0; + data->info.R[PSC_VOLTAGE_IN] = 3; + data->info.m[PSC_CURRENT_IN] = 1; + data->info.b[PSC_CURRENT_IN] = 0; + data->info.R[PSC_CURRENT_IN] = 2; + data->iout_oc_fault_limit = PMBUS_IOUT_OC_FAULT_LIMIT; + data->iout_oc_warn_limit = PMBUS_IOUT_OC_WARN_LIMIT; + } for (page = 0; page < 16; page++) { rv = i2c_smbus_write_byte_data(client, PMBUS_PAGE, page); @@ -251,16 +275,30 @@ static int max34451_set_supported_funcs(struct i2c_client *client, case 0x20: data->info.func[page] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT; + + if (max34451_na6) + data->info.func[page] |= PMBUS_HAVE_VIN | + PMBUS_HAVE_STATUS_INPUT; break; case 0x21: data->info.func[page] = PMBUS_HAVE_VOUT; + + if (max34451_na6) + data->info.func[page] |= PMBUS_HAVE_VIN; break; case 0x22: data->info.func[page] = PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT; + + if (max34451_na6) + data->info.func[page] |= PMBUS_HAVE_IIN | + PMBUS_HAVE_STATUS_INPUT; break; case 0x23: data->info.func[page] = PMBUS_HAVE_IOUT; + + if (max34451_na6) + data->info.func[page] |= PMBUS_HAVE_IIN; break; default: break; @@ -494,6 +532,8 @@ static int max34440_probe(struct i2c_client *client) return -ENOMEM; data->id = i2c_match_id(max34440_id, client)->driver_data; data->info = max34440_info[data->id]; + data->iout_oc_fault_limit = MAX34440_IOUT_OC_FAULT_LIMIT; + data->iout_oc_warn_limit = MAX34440_IOUT_OC_WARN_LIMIT; if (data->id == max34451) { rv = max34451_set_supported_funcs(client, data); diff --git a/drivers/hwtracing/coresight/coresight-catu.c b/drivers/hwtracing/coresight/coresight-catu.c index d8ad64ea81f11..25fd02955c38d 100644 --- a/drivers/hwtracing/coresight/coresight-catu.c +++ b/drivers/hwtracing/coresight/coresight-catu.c @@ -458,12 +458,17 @@ static int catu_enable_hw(struct catu_drvdata *drvdata, enum cs_mode cs_mode, static int catu_enable(struct coresight_device *csdev, enum cs_mode mode, void *data) { - int rc; + int rc = 0; struct catu_drvdata *catu_drvdata = csdev_to_catu_drvdata(csdev); - CS_UNLOCK(catu_drvdata->base); - rc = catu_enable_hw(catu_drvdata, mode, data); - CS_LOCK(catu_drvdata->base); + guard(raw_spinlock_irqsave)(&catu_drvdata->spinlock); + if (csdev->refcnt == 0) { + CS_UNLOCK(catu_drvdata->base); + rc = catu_enable_hw(catu_drvdata, mode, data); + CS_LOCK(catu_drvdata->base); + } + if (!rc) + csdev->refcnt++; return rc; } @@ -486,12 +491,15 @@ static int catu_disable_hw(struct catu_drvdata *drvdata) static int catu_disable(struct coresight_device *csdev, void *__unused) { - int rc; + int rc = 0; struct catu_drvdata *catu_drvdata = csdev_to_catu_drvdata(csdev); - CS_UNLOCK(catu_drvdata->base); - rc = catu_disable_hw(catu_drvdata); - CS_LOCK(catu_drvdata->base); + guard(raw_spinlock_irqsave)(&catu_drvdata->spinlock); + if (--csdev->refcnt == 0) { + CS_UNLOCK(catu_drvdata->base); + rc = catu_disable_hw(catu_drvdata); + CS_LOCK(catu_drvdata->base); + } return rc; } @@ -550,6 +558,7 @@ static int __catu_probe(struct device *dev, struct resource *res) dev->platform_data = pdata; drvdata->base = base; + raw_spin_lock_init(&drvdata->spinlock); catu_desc.access = CSDEV_ACCESS_IOMEM(base); catu_desc.pdata = pdata; catu_desc.dev = dev; @@ -702,7 +711,7 @@ static int __init catu_init(void) { int ret; - ret = coresight_init_driver("catu", &catu_driver, &catu_platform_driver); + ret = coresight_init_driver("catu", &catu_driver, &catu_platform_driver, THIS_MODULE); tmc_etr_set_catu_ops(&etr_catu_buf_ops); return ret; } diff --git a/drivers/hwtracing/coresight/coresight-catu.h b/drivers/hwtracing/coresight/coresight-catu.h index 141feac1c14b0..755776cd19c5b 100644 --- a/drivers/hwtracing/coresight/coresight-catu.h +++ b/drivers/hwtracing/coresight/coresight-catu.h @@ -65,6 +65,7 @@ struct catu_drvdata { void __iomem *base; struct coresight_device *csdev; int irq; + raw_spinlock_t spinlock; }; #define CATU_REG32(name, offset) \ diff --git a/drivers/hwtracing/coresight/coresight-config.h b/drivers/hwtracing/coresight/coresight-config.h index 6ba0139757418..84cdde6f0e4db 100644 --- a/drivers/hwtracing/coresight/coresight-config.h +++ b/drivers/hwtracing/coresight/coresight-config.h @@ -228,7 +228,7 @@ struct cscfg_feature_csdev { * @feats_csdev:references to the device features to enable. */ struct cscfg_config_csdev { - const struct cscfg_config_desc *config_desc; + struct cscfg_config_desc *config_desc; struct coresight_device *csdev; bool enabled; struct list_head node; diff --git a/drivers/hwtracing/coresight/coresight-core.c b/drivers/hwtracing/coresight/coresight-core.c index c42aa9fddab9b..b7941d8abbfe7 100644 --- a/drivers/hwtracing/coresight/coresight-core.c +++ b/drivers/hwtracing/coresight/coresight-core.c @@ -97,7 +97,8 @@ coresight_find_out_connection(struct coresight_device *src_dev, static inline u32 coresight_read_claim_tags(struct coresight_device *csdev) { - return csdev_access_relaxed_read32(&csdev->access, CORESIGHT_CLAIMCLR); + return FIELD_GET(CORESIGHT_CLAIM_MASK, + csdev_access_relaxed_read32(&csdev->access, CORESIGHT_CLAIMCLR)); } static inline bool coresight_is_claimed_self_hosted(struct coresight_device *csdev) @@ -1422,17 +1423,17 @@ module_init(coresight_init); module_exit(coresight_exit); int coresight_init_driver(const char *drv, struct amba_driver *amba_drv, - struct platform_driver *pdev_drv) + struct platform_driver *pdev_drv, struct module *owner) { int ret; - ret = amba_driver_register(amba_drv); + ret = __amba_driver_register(amba_drv, owner); if (ret) { pr_err("%s: error registering AMBA driver\n", drv); return ret; } - ret = platform_driver_register(pdev_drv); + ret = __platform_driver_register(pdev_drv, owner); if (!ret) return 0; diff --git a/drivers/hwtracing/coresight/coresight-cpu-debug.c b/drivers/hwtracing/coresight/coresight-cpu-debug.c index 75962dae9aa18..cc599c5ef4b22 100644 --- a/drivers/hwtracing/coresight/coresight-cpu-debug.c +++ b/drivers/hwtracing/coresight/coresight-cpu-debug.c @@ -774,7 +774,8 @@ static struct platform_driver debug_platform_driver = { static int __init debug_init(void) { - return coresight_init_driver("debug", &debug_driver, &debug_platform_driver); + return coresight_init_driver("debug", &debug_driver, &debug_platform_driver, + THIS_MODULE); } static void __exit debug_exit(void) diff --git a/drivers/hwtracing/coresight/coresight-funnel.c b/drivers/hwtracing/coresight/coresight-funnel.c index 5a819c8970fbf..8f451b051ddc3 100644 --- a/drivers/hwtracing/coresight/coresight-funnel.c +++ b/drivers/hwtracing/coresight/coresight-funnel.c @@ -433,7 +433,8 @@ static struct amba_driver dynamic_funnel_driver = { static int __init funnel_init(void) { - return coresight_init_driver("funnel", &dynamic_funnel_driver, &funnel_driver); + return coresight_init_driver("funnel", &dynamic_funnel_driver, &funnel_driver, + THIS_MODULE); } static void __exit funnel_exit(void) diff --git a/drivers/hwtracing/coresight/coresight-priv.h b/drivers/hwtracing/coresight/coresight-priv.h index 05f891ca6b5c9..cc7ff1e36ef42 100644 --- a/drivers/hwtracing/coresight/coresight-priv.h +++ b/drivers/hwtracing/coresight/coresight-priv.h @@ -35,6 +35,7 @@ extern const struct device_type coresight_dev_type[]; * Coresight device CLAIM protocol. * See PSCI - ARM DEN 0022D, Section: 6.8.1 Debug and Trace save and restore. */ +#define CORESIGHT_CLAIM_MASK GENMASK(1, 0) #define CORESIGHT_CLAIM_SELF_HOSTED BIT(1) #define TIMEOUT_US 100 diff --git a/drivers/hwtracing/coresight/coresight-replicator.c b/drivers/hwtracing/coresight/coresight-replicator.c index 3e55be9c84186..f7607c72857c5 100644 --- a/drivers/hwtracing/coresight/coresight-replicator.c +++ b/drivers/hwtracing/coresight/coresight-replicator.c @@ -438,7 +438,8 @@ static struct amba_driver dynamic_replicator_driver = { static int __init replicator_init(void) { - return coresight_init_driver("replicator", &dynamic_replicator_driver, &replicator_driver); + return coresight_init_driver("replicator", &dynamic_replicator_driver, &replicator_driver, + THIS_MODULE); } static void __exit replicator_exit(void) diff --git a/drivers/hwtracing/coresight/coresight-stm.c b/drivers/hwtracing/coresight/coresight-stm.c index cb3e04755c992..65bc50a6d3e9a 100644 --- a/drivers/hwtracing/coresight/coresight-stm.c +++ b/drivers/hwtracing/coresight/coresight-stm.c @@ -1047,7 +1047,7 @@ static struct platform_driver stm_platform_driver = { static int __init stm_init(void) { - return coresight_init_driver("stm", &stm_driver, &stm_platform_driver); + return coresight_init_driver("stm", &stm_driver, &stm_platform_driver, THIS_MODULE); } static void __exit stm_exit(void) diff --git a/drivers/hwtracing/coresight/coresight-syscfg.c b/drivers/hwtracing/coresight/coresight-syscfg.c index 11138a9762b01..30a561d874819 100644 --- a/drivers/hwtracing/coresight/coresight-syscfg.c +++ b/drivers/hwtracing/coresight/coresight-syscfg.c @@ -867,6 +867,25 @@ void cscfg_csdev_reset_feats(struct coresight_device *csdev) } EXPORT_SYMBOL_GPL(cscfg_csdev_reset_feats); +static bool cscfg_config_desc_get(struct cscfg_config_desc *config_desc) +{ + if (!atomic_fetch_inc(&config_desc->active_cnt)) { + /* must ensure that config cannot be unloaded in use */ + if (unlikely(cscfg_owner_get(config_desc->load_owner))) { + atomic_dec(&config_desc->active_cnt); + return false; + } + } + + return true; +} + +static void cscfg_config_desc_put(struct cscfg_config_desc *config_desc) +{ + if (!atomic_dec_return(&config_desc->active_cnt)) + cscfg_owner_put(config_desc->load_owner); +} + /* * This activate configuration for either perf or sysfs. Perf can have multiple * active configs, selected per event, sysfs is limited to one. @@ -890,22 +909,17 @@ static int _cscfg_activate_config(unsigned long cfg_hash) if (config_desc->available == false) return -EBUSY; - /* must ensure that config cannot be unloaded in use */ - err = cscfg_owner_get(config_desc->load_owner); - if (err) + if (!cscfg_config_desc_get(config_desc)) { + err = -EINVAL; break; + } + /* * increment the global active count - control changes to * active configurations */ atomic_inc(&cscfg_mgr->sys_active_cnt); - /* - * mark the descriptor as active so enable config on a - * device instance will use it - */ - atomic_inc(&config_desc->active_cnt); - err = 0; dev_dbg(cscfg_device(), "Activate config %s.\n", config_desc->name); break; @@ -920,9 +934,8 @@ static void _cscfg_deactivate_config(unsigned long cfg_hash) list_for_each_entry(config_desc, &cscfg_mgr->config_desc_list, item) { if ((unsigned long)config_desc->event_ea->var == cfg_hash) { - atomic_dec(&config_desc->active_cnt); atomic_dec(&cscfg_mgr->sys_active_cnt); - cscfg_owner_put(config_desc->load_owner); + cscfg_config_desc_put(config_desc); dev_dbg(cscfg_device(), "Deactivate config %s.\n", config_desc->name); break; } @@ -1047,7 +1060,7 @@ int cscfg_csdev_enable_active_config(struct coresight_device *csdev, unsigned long cfg_hash, int preset) { struct cscfg_config_csdev *config_csdev_active = NULL, *config_csdev_item; - const struct cscfg_config_desc *config_desc; + struct cscfg_config_desc *config_desc; unsigned long flags; int err = 0; @@ -1062,8 +1075,8 @@ int cscfg_csdev_enable_active_config(struct coresight_device *csdev, spin_lock_irqsave(&csdev->cscfg_csdev_lock, flags); list_for_each_entry(config_csdev_item, &csdev->config_csdev_list, node) { config_desc = config_csdev_item->config_desc; - if ((atomic_read(&config_desc->active_cnt)) && - ((unsigned long)config_desc->event_ea->var == cfg_hash)) { + if (((unsigned long)config_desc->event_ea->var == cfg_hash) && + cscfg_config_desc_get(config_desc)) { config_csdev_active = config_csdev_item; csdev->active_cscfg_ctxt = (void *)config_csdev_active; break; @@ -1097,7 +1110,11 @@ int cscfg_csdev_enable_active_config(struct coresight_device *csdev, err = -EBUSY; spin_unlock_irqrestore(&csdev->cscfg_csdev_lock, flags); } + + if (err) + cscfg_config_desc_put(config_desc); } + return err; } EXPORT_SYMBOL_GPL(cscfg_csdev_enable_active_config); @@ -1136,8 +1153,10 @@ void cscfg_csdev_disable_active_config(struct coresight_device *csdev) spin_unlock_irqrestore(&csdev->cscfg_csdev_lock, flags); /* true if there was an enabled active config */ - if (config_csdev) + if (config_csdev) { cscfg_csdev_disable_config(config_csdev); + cscfg_config_desc_put(config_csdev->config_desc); + } } EXPORT_SYMBOL_GPL(cscfg_csdev_disable_active_config); diff --git a/drivers/hwtracing/coresight/coresight-tmc-core.c b/drivers/hwtracing/coresight/coresight-tmc-core.c index 3a482fd2cb225..475fa4bb6813b 100644 --- a/drivers/hwtracing/coresight/coresight-tmc-core.c +++ b/drivers/hwtracing/coresight/coresight-tmc-core.c @@ -741,7 +741,7 @@ static struct platform_driver tmc_platform_driver = { static int __init tmc_init(void) { - return coresight_init_driver("tmc", &tmc_driver, &tmc_platform_driver); + return coresight_init_driver("tmc", &tmc_driver, &tmc_platform_driver, THIS_MODULE); } static void __exit tmc_exit(void) diff --git a/drivers/hwtracing/coresight/coresight-tpiu.c b/drivers/hwtracing/coresight/coresight-tpiu.c index b048e146fbb10..f9ecd05cbe5c5 100644 --- a/drivers/hwtracing/coresight/coresight-tpiu.c +++ b/drivers/hwtracing/coresight/coresight-tpiu.c @@ -318,7 +318,7 @@ static struct platform_driver tpiu_platform_driver = { static int __init tpiu_init(void) { - return coresight_init_driver("tpiu", &tpiu_driver, &tpiu_platform_driver); + return coresight_init_driver("tpiu", &tpiu_driver, &tpiu_platform_driver, THIS_MODULE); } static void __exit tpiu_exit(void) diff --git a/drivers/i2c/busses/i2c-designware-master.c b/drivers/i2c/busses/i2c-designware-master.c index 28188c6d0555e..52dc666c3ef42 100644 --- a/drivers/i2c/busses/i2c-designware-master.c +++ b/drivers/i2c/busses/i2c-designware-master.c @@ -346,6 +346,7 @@ static int amd_i2c_dw_xfer_quirk(struct i2c_adapter *adap, struct i2c_msg *msgs, dev->msgs = msgs; dev->msgs_num = num_msgs; + dev->msg_write_idx = 0; i2c_dw_xfer_init(dev); /* Initiate messages read/write transaction */ diff --git a/drivers/i2c/busses/i2c-designware-slave.c b/drivers/i2c/busses/i2c-designware-slave.c index f0f0f1f2131d0..602e98e61cc01 100644 --- a/drivers/i2c/busses/i2c-designware-slave.c +++ b/drivers/i2c/busses/i2c-designware-slave.c @@ -94,7 +94,7 @@ static int i2c_dw_unreg_slave(struct i2c_client *slave) i2c_dw_disable(dev); synchronize_irq(dev->irq); dev->slave = NULL; - pm_runtime_put(dev->dev); + pm_runtime_put_sync_suspend(dev->dev); return 0; } diff --git a/drivers/i2c/busses/i2c-npcm7xx.c b/drivers/i2c/busses/i2c-npcm7xx.c index a693ebb64edf4..7b6eb2bfb412e 100644 --- a/drivers/i2c/busses/i2c-npcm7xx.c +++ b/drivers/i2c/busses/i2c-npcm7xx.c @@ -1969,10 +1969,14 @@ static int npcm_i2c_init_module(struct npcm_i2c *bus, enum i2c_mode mode, /* Check HW is OK: SDA and SCL should be high at this point. */ if ((npcm_i2c_get_SDA(&bus->adap) == 0) || (npcm_i2c_get_SCL(&bus->adap) == 0)) { - dev_err(bus->dev, "I2C%d init fail: lines are low\n", bus->num); - dev_err(bus->dev, "SDA=%d SCL=%d\n", npcm_i2c_get_SDA(&bus->adap), - npcm_i2c_get_SCL(&bus->adap)); - return -ENXIO; + dev_warn(bus->dev, " I2C%d SDA=%d SCL=%d, attempting to recover\n", bus->num, + npcm_i2c_get_SDA(&bus->adap), npcm_i2c_get_SCL(&bus->adap)); + if (npcm_i2c_recovery_tgclk(&bus->adap)) { + dev_err(bus->dev, "I2C%d init fail: SDA=%d SCL=%d\n", + bus->num, npcm_i2c_get_SDA(&bus->adap), + npcm_i2c_get_SCL(&bus->adap)); + return -ENXIO; + } } npcm_i2c_int_enable(bus, true); diff --git a/drivers/i2c/busses/i2c-robotfuzz-osif.c b/drivers/i2c/busses/i2c-robotfuzz-osif.c index 80d45079b763c..e0a76fb5bc31f 100644 --- a/drivers/i2c/busses/i2c-robotfuzz-osif.c +++ b/drivers/i2c/busses/i2c-robotfuzz-osif.c @@ -111,6 +111,11 @@ static u32 osif_func(struct i2c_adapter *adapter) return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; } +/* prevent invalid 0-length usb_control_msg */ +static const struct i2c_adapter_quirks osif_quirks = { + .flags = I2C_AQ_NO_ZERO_LEN_READ, +}; + static const struct i2c_algorithm osif_algorithm = { .xfer = osif_xfer, .functionality = osif_func, @@ -143,6 +148,7 @@ static int osif_probe(struct usb_interface *interface, priv->adapter.owner = THIS_MODULE; priv->adapter.class = I2C_CLASS_HWMON; + priv->adapter.quirks = &osif_quirks; priv->adapter.algo = &osif_algorithm; priv->adapter.algo_data = priv; snprintf(priv->adapter.name, sizeof(priv->adapter.name), diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c index 1df5b42041427..89ce8a62b37c6 100644 --- a/drivers/i2c/busses/i2c-tegra.c +++ b/drivers/i2c/busses/i2c-tegra.c @@ -1395,6 +1395,11 @@ static int tegra_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], ret = tegra_i2c_xfer_msg(i2c_dev, &msgs[i], MSG_END_CONTINUE); if (ret) break; + + /* Validate message length before proceeding */ + if (msgs[i].buf[0] == 0 || msgs[i].buf[0] > I2C_SMBUS_BLOCK_MAX) + break; + /* Set the msg length from first byte */ msgs[i].len += msgs[i].buf[0]; dev_dbg(i2c_dev->dev, "reading %d bytes\n", msgs[i].len); diff --git a/drivers/i2c/busses/i2c-tiny-usb.c b/drivers/i2c/busses/i2c-tiny-usb.c index 0f2ed181b2665..0cc7c0a816fc0 100644 --- a/drivers/i2c/busses/i2c-tiny-usb.c +++ b/drivers/i2c/busses/i2c-tiny-usb.c @@ -138,6 +138,11 @@ static u32 usb_func(struct i2c_adapter *adapter) return ret; } +/* prevent invalid 0-length usb_control_msg */ +static const struct i2c_adapter_quirks usb_quirks = { + .flags = I2C_AQ_NO_ZERO_LEN_READ, +}; + /* This is the actual algorithm we define */ static const struct i2c_algorithm usb_algorithm = { .xfer = usb_xfer, @@ -246,6 +251,7 @@ static int i2c_tiny_usb_probe(struct usb_interface *interface, /* setup i2c adapter description */ dev->adapter.owner = THIS_MODULE; dev->adapter.class = I2C_CLASS_HWMON; + dev->adapter.quirks = &usb_quirks; dev->adapter.algo = &usb_algorithm; dev->adapter.algo_data = dev; snprintf(dev->adapter.name, sizeof(dev->adapter.name), diff --git a/drivers/iio/accel/fxls8962af-core.c b/drivers/iio/accel/fxls8962af-core.c index acadabec4df7a..5e17c1e6d2c71 100644 --- a/drivers/iio/accel/fxls8962af-core.c +++ b/drivers/iio/accel/fxls8962af-core.c @@ -22,6 +22,7 @@ #include #include #include +#include #include #include @@ -436,8 +437,16 @@ static int fxls8962af_read_raw(struct iio_dev *indio_dev, *val = FXLS8962AF_TEMP_CENTER_VAL; return IIO_VAL_INT; case IIO_CHAN_INFO_SCALE: - *val = 0; - return fxls8962af_read_full_scale(data, val2); + switch (chan->type) { + case IIO_TEMP: + *val = MILLIDEGREE_PER_DEGREE; + return IIO_VAL_INT; + case IIO_ACCEL: + *val = 0; + return fxls8962af_read_full_scale(data, val2); + default: + return -EINVAL; + } case IIO_CHAN_INFO_SAMP_FREQ: return fxls8962af_read_samp_freq(data, val, val2); default: @@ -736,9 +745,11 @@ static const struct iio_event_spec fxls8962af_event[] = { .type = IIO_TEMP, \ .address = FXLS8962AF_TEMP_OUT, \ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \ + BIT(IIO_CHAN_INFO_SCALE) | \ BIT(IIO_CHAN_INFO_OFFSET),\ .scan_index = -1, \ .scan_type = { \ + .sign = 's', \ .realbits = 8, \ .storagebits = 8, \ }, \ diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig index 6c4e74420fd25..216f3c9ce183e 100644 --- a/drivers/iio/adc/Kconfig +++ b/drivers/iio/adc/Kconfig @@ -1452,6 +1452,7 @@ config TI_ADS1298 tristate "Texas Instruments ADS1298" depends on SPI select IIO_BUFFER + select IIO_KFIFO_BUF help If you say yes here you get support for Texas Instruments ADS1298 medical ADC chips diff --git a/drivers/iio/adc/ad7124.c b/drivers/iio/adc/ad7124.c index 30a7392c4f8b9..9c9e0c950b427 100644 --- a/drivers/iio/adc/ad7124.c +++ b/drivers/iio/adc/ad7124.c @@ -300,9 +300,9 @@ static int ad7124_get_3db_filter_freq(struct ad7124_state *st, switch (st->channels[channel].cfg.filter_type) { case AD7124_SINC3_FILTER: - return DIV_ROUND_CLOSEST(fadc * 230, 1000); + return DIV_ROUND_CLOSEST(fadc * 272, 1000); case AD7124_SINC4_FILTER: - return DIV_ROUND_CLOSEST(fadc * 262, 1000); + return DIV_ROUND_CLOSEST(fadc * 230, 1000); default: return -EINVAL; } diff --git a/drivers/iio/adc/ad7606_spi.c b/drivers/iio/adc/ad7606_spi.c index 32a5448116a13..42112f97fac1a 100644 --- a/drivers/iio/adc/ad7606_spi.c +++ b/drivers/iio/adc/ad7606_spi.c @@ -151,7 +151,7 @@ static int ad7606_spi_reg_write(struct ad7606_state *st, struct spi_device *spi = to_spi_device(st->dev); st->d16[0] = cpu_to_be16((st->bops->rd_wr_cmd(addr, 1) << 8) | - (val & 0x1FF)); + (val & 0xFF)); return spi_write(spi, &st->d16[0], sizeof(st->d16[0])); } diff --git a/drivers/iio/adc/ad7944.c b/drivers/iio/adc/ad7944.c index 58a25792cec37..1e2cf512c2f5f 100644 --- a/drivers/iio/adc/ad7944.c +++ b/drivers/iio/adc/ad7944.c @@ -290,6 +290,8 @@ static int ad7944_single_conversion(struct ad7944_adc *adc, if (chan->scan_type.sign == 's') *val = sign_extend32(*val, chan->scan_type.realbits - 1); + else + *val &= GENMASK(chan->scan_type.realbits - 1, 0); return IIO_VAL_INT; } diff --git a/drivers/iio/adc/ad_sigma_delta.c b/drivers/iio/adc/ad_sigma_delta.c index ea4aabd3960a0..3df1d4f6bc959 100644 --- a/drivers/iio/adc/ad_sigma_delta.c +++ b/drivers/iio/adc/ad_sigma_delta.c @@ -477,6 +477,10 @@ static irqreturn_t ad_sd_trigger_handler(int irq, void *p) * byte set to zero. */ ad_sd_read_reg_raw(sigma_delta, data_reg, transfer_size, &data[1]); break; + + default: + dev_err_ratelimited(&indio_dev->dev, "Unsupported reg_size: %u\n", reg_size); + goto irq_handled; } /* diff --git a/drivers/iio/adc/mcp3911.c b/drivers/iio/adc/mcp3911.c index b097f04172c80..4bd6b5aac4fe8 100644 --- a/drivers/iio/adc/mcp3911.c +++ b/drivers/iio/adc/mcp3911.c @@ -6,7 +6,7 @@ * Copyright (C) 2018 Kent Gustavsson */ #include -#include +#include #include #include #include @@ -79,6 +79,8 @@ #define MCP3910_CONFIG1_CLKEXT BIT(6) #define MCP3910_CONFIG1_VREFEXT BIT(7) +#define MCP3910_CHANNEL(ch) (MCP3911_REG_CHANNEL0 + (ch)) + #define MCP3910_REG_OFFCAL_CH0 0x0f #define MCP3910_OFFCAL(ch) (MCP3910_REG_OFFCAL_CH0 + (ch) * 6) @@ -110,6 +112,7 @@ struct mcp3911_chip_info { int (*get_offset)(struct mcp3911 *adc, int channel, int *val); int (*set_offset)(struct mcp3911 *adc, int channel, int val); int (*set_scale)(struct mcp3911 *adc, int channel, u32 val); + int (*get_raw)(struct mcp3911 *adc, int channel, int *val); }; struct mcp3911 { @@ -170,6 +173,18 @@ static int mcp3911_update(struct mcp3911 *adc, u8 reg, u32 mask, u32 val, u8 len return mcp3911_write(adc, reg, val, len); } +static int mcp3911_read_s24(struct mcp3911 *const adc, u8 const reg, s32 *const val) +{ + u32 uval; + int const ret = mcp3911_read(adc, reg, &uval, 3); + + if (ret) + return ret; + + *val = sign_extend32(uval, 23); + return ret; +} + static int mcp3910_enable_offset(struct mcp3911 *adc, bool enable) { unsigned int mask = MCP3910_CONFIG0_EN_OFFCAL; @@ -194,6 +209,11 @@ static int mcp3910_set_offset(struct mcp3911 *adc, int channel, int val) return adc->chip->enable_offset(adc, 1); } +static int mcp3910_get_raw(struct mcp3911 *adc, int channel, s32 *val) +{ + return mcp3911_read_s24(adc, MCP3910_CHANNEL(channel), val); +} + static int mcp3911_enable_offset(struct mcp3911 *adc, bool enable) { unsigned int mask = MCP3911_STATUSCOM_EN_OFFCAL; @@ -218,6 +238,11 @@ static int mcp3911_set_offset(struct mcp3911 *adc, int channel, int val) return adc->chip->enable_offset(adc, 1); } +static int mcp3911_get_raw(struct mcp3911 *adc, int channel, s32 *val) +{ + return mcp3911_read_s24(adc, MCP3911_CHANNEL(channel), val); +} + static int mcp3910_get_osr(struct mcp3911 *adc, u32 *val) { int ret; @@ -321,12 +346,9 @@ static int mcp3911_read_raw(struct iio_dev *indio_dev, guard(mutex)(&adc->lock); switch (mask) { case IIO_CHAN_INFO_RAW: - ret = mcp3911_read(adc, - MCP3911_CHANNEL(channel->channel), val, 3); + ret = adc->chip->get_raw(adc, channel->channel, val); if (ret) return ret; - - *val = sign_extend32(*val, 23); return IIO_VAL_INT; case IIO_CHAN_INFO_OFFSET: ret = adc->chip->get_offset(adc, channel->channel, val); @@ -799,6 +821,7 @@ static const struct mcp3911_chip_info mcp3911_chip_info[] = { .get_offset = mcp3910_get_offset, .set_offset = mcp3910_set_offset, .set_scale = mcp3910_set_scale, + .get_raw = mcp3910_get_raw, }, [MCP3911] = { .channels = mcp3911_channels, @@ -810,6 +833,7 @@ static const struct mcp3911_chip_info mcp3911_chip_info[] = { .get_offset = mcp3911_get_offset, .set_offset = mcp3911_set_offset, .set_scale = mcp3911_set_scale, + .get_raw = mcp3911_get_raw, }, [MCP3912] = { .channels = mcp3912_channels, @@ -821,6 +845,7 @@ static const struct mcp3911_chip_info mcp3911_chip_info[] = { .get_offset = mcp3910_get_offset, .set_offset = mcp3910_set_offset, .set_scale = mcp3910_set_scale, + .get_raw = mcp3910_get_raw, }, [MCP3913] = { .channels = mcp3913_channels, @@ -832,6 +857,7 @@ static const struct mcp3911_chip_info mcp3911_chip_info[] = { .get_offset = mcp3910_get_offset, .set_offset = mcp3910_set_offset, .set_scale = mcp3910_set_scale, + .get_raw = mcp3910_get_raw, }, [MCP3914] = { .channels = mcp3914_channels, @@ -843,6 +869,7 @@ static const struct mcp3911_chip_info mcp3911_chip_info[] = { .get_offset = mcp3910_get_offset, .set_offset = mcp3910_set_offset, .set_scale = mcp3910_set_scale, + .get_raw = mcp3910_get_raw, }, [MCP3918] = { .channels = mcp3918_channels, @@ -854,6 +881,7 @@ static const struct mcp3911_chip_info mcp3911_chip_info[] = { .get_offset = mcp3910_get_offset, .set_offset = mcp3910_set_offset, .set_scale = mcp3910_set_scale, + .get_raw = mcp3910_get_raw, }, [MCP3919] = { .channels = mcp3919_channels, @@ -865,6 +893,7 @@ static const struct mcp3911_chip_info mcp3911_chip_info[] = { .get_offset = mcp3910_get_offset, .set_offset = mcp3910_set_offset, .set_scale = mcp3910_set_scale, + .get_raw = mcp3910_get_raw, }, }; static const struct of_device_id mcp3911_dt_ids[] = { diff --git a/drivers/iio/adc/pac1934.c b/drivers/iio/adc/pac1934.c index 7ef249d832866..c3f9fa307b84c 100644 --- a/drivers/iio/adc/pac1934.c +++ b/drivers/iio/adc/pac1934.c @@ -1081,7 +1081,7 @@ static int pac1934_chip_identify(struct pac1934_chip_info *info) /* * documentation related to the ACPI device definition - * https://ww1.microchip.com/downloads/aemDocuments/documents/OTH/ApplicationNotes/ApplicationNotes/PAC1934-Integration-Notes-for-Microsoft-Windows-10-and-Windows-11-Driver-Support-DS00002534.pdf + * https://ww1.microchip.com/downloads/aemDocuments/documents/OTH/ApplicationNotes/ApplicationNotes/PAC193X-Integration-Notes-for-Microsoft-Windows-10-and-Windows-11-Driver-Support-DS00002534.pdf */ static int pac1934_acpi_parse_channel_config(struct i2c_client *client, struct pac1934_chip_info *info) diff --git a/drivers/iio/dac/Makefile b/drivers/iio/dac/Makefile index 2cf148f16306d..56a125f56284f 100644 --- a/drivers/iio/dac/Makefile +++ b/drivers/iio/dac/Makefile @@ -4,7 +4,7 @@ # # When adding new entries keep the list in alphabetical order -obj-$(CONFIG_AD3552R) += ad3552r.o +obj-$(CONFIG_AD3552R) += ad3552r.o ad3552r-common.o obj-$(CONFIG_AD5360) += ad5360.o obj-$(CONFIG_AD5380) += ad5380.o obj-$(CONFIG_AD5421) += ad5421.o diff --git a/drivers/iio/dac/ad3552r-common.c b/drivers/iio/dac/ad3552r-common.c new file mode 100644 index 0000000000000..94869ad15c27e --- /dev/null +++ b/drivers/iio/dac/ad3552r-common.c @@ -0,0 +1,248 @@ +// SPDX-License-Identifier: GPL-2.0+ +// +// Copyright (c) 2010-2024 Analog Devices Inc. +// Copyright (c) 2024 Baylibre, SAS + +#include +#include +#include +#include +#include + +#include "ad3552r.h" + +const s32 ad3552r_ch_ranges[AD3552R_MAX_RANGES][2] = { + [AD3552R_CH_OUTPUT_RANGE_0__2P5V] = { 0, 2500 }, + [AD3552R_CH_OUTPUT_RANGE_0__5V] = { 0, 5000 }, + [AD3552R_CH_OUTPUT_RANGE_0__10V] = { 0, 10000 }, + [AD3552R_CH_OUTPUT_RANGE_NEG_5__5V] = { -5000, 5000 }, + [AD3552R_CH_OUTPUT_RANGE_NEG_10__10V] = { -10000, 10000 } +}; +EXPORT_SYMBOL_NS_GPL(ad3552r_ch_ranges, IIO_AD3552R); + +const s32 ad3542r_ch_ranges[AD3542R_MAX_RANGES][2] = { + [AD3542R_CH_OUTPUT_RANGE_0__2P5V] = { 0, 2500 }, + [AD3542R_CH_OUTPUT_RANGE_0__5V] = { 0, 5000 }, + [AD3542R_CH_OUTPUT_RANGE_0__10V] = { 0, 10000 }, + [AD3542R_CH_OUTPUT_RANGE_NEG_5__5V] = { -5000, 5000 }, + [AD3542R_CH_OUTPUT_RANGE_NEG_2P5__7P5V] = { -2500, 7500 } +}; +EXPORT_SYMBOL_NS_GPL(ad3542r_ch_ranges, IIO_AD3552R); + +/* Gain * AD3552R_GAIN_SCALE */ +static const s32 gains_scaling_table[] = { + [AD3552R_CH_GAIN_SCALING_1] = 1000, + [AD3552R_CH_GAIN_SCALING_0_5] = 500, + [AD3552R_CH_GAIN_SCALING_0_25] = 250, + [AD3552R_CH_GAIN_SCALING_0_125] = 125 +}; + +u16 ad3552r_calc_custom_gain(u8 p, u8 n, s16 goffs) +{ + return FIELD_PREP(AD3552R_MASK_CH_RANGE_OVERRIDE, 1) | + FIELD_PREP(AD3552R_MASK_CH_GAIN_SCALING_P, p) | + FIELD_PREP(AD3552R_MASK_CH_GAIN_SCALING_N, n) | + FIELD_PREP(AD3552R_MASK_CH_OFFSET_BIT_8, abs(goffs)) | + FIELD_PREP(AD3552R_MASK_CH_OFFSET_POLARITY, goffs < 0); +} +EXPORT_SYMBOL_NS_GPL(ad3552r_calc_custom_gain, IIO_AD3552R); + +static void ad3552r_get_custom_range(struct ad3552r_ch_data *ch_data, + s32 *v_min, s32 *v_max) +{ + s64 vref, tmp, common, offset, gn, gp; + /* + * From datasheet formula (In Volts): + * Vmin = 2.5 + [(GainN + Offset / 1024) * 2.5 * Rfb * 1.03] + * Vmax = 2.5 - [(GainP + Offset / 1024) * 2.5 * Rfb * 1.03] + * Calculus are converted to milivolts + */ + vref = 2500; + /* 2.5 * 1.03 * 1000 (To mV) */ + common = 2575 * ch_data->rfb; + offset = ch_data->gain_offset; + + gn = gains_scaling_table[ch_data->n]; + tmp = (1024 * gn + AD3552R_GAIN_SCALE * offset) * common; + tmp = div_s64(tmp, 1024 * AD3552R_GAIN_SCALE); + *v_max = vref + tmp; + + gp = gains_scaling_table[ch_data->p]; + tmp = (1024 * gp - AD3552R_GAIN_SCALE * offset) * common; + tmp = div_s64(tmp, 1024 * AD3552R_GAIN_SCALE); + *v_min = vref - tmp; +} + +void ad3552r_calc_gain_and_offset(struct ad3552r_ch_data *ch_data, + const struct ad3552r_model_data *model_data) +{ + s32 idx, v_max, v_min, span, rem; + s64 tmp; + + if (ch_data->range_override) { + ad3552r_get_custom_range(ch_data, &v_min, &v_max); + } else { + /* Normal range */ + idx = ch_data->range; + v_min = model_data->ranges_table[idx][0]; + v_max = model_data->ranges_table[idx][1]; + } + + /* + * From datasheet formula: + * Vout = Span * (D / 65536) + Vmin + * Converted to scale and offset: + * Scale = Span / 65536 + * Offset = 65536 * Vmin / Span + * + * Reminders are in micros in order to be printed as + * IIO_VAL_INT_PLUS_MICRO + */ + span = v_max - v_min; + ch_data->scale_int = div_s64_rem(span, 65536, &rem); + /* Do operations in microvolts */ + ch_data->scale_dec = DIV_ROUND_CLOSEST((s64)rem * 1000000, 65536); + + ch_data->offset_int = div_s64_rem(v_min * 65536, span, &rem); + tmp = (s64)rem * 1000000; + ch_data->offset_dec = div_s64(tmp, span); +} +EXPORT_SYMBOL_NS_GPL(ad3552r_calc_gain_and_offset, IIO_AD3552R); + +int ad3552r_get_ref_voltage(struct device *dev, u32 *val) +{ + int voltage; + int delta = 100000; + + voltage = devm_regulator_get_enable_read_voltage(dev, "vref"); + if (voltage < 0 && voltage != -ENODEV) + return dev_err_probe(dev, voltage, + "Error getting vref voltage\n"); + + if (voltage == -ENODEV) { + if (device_property_read_bool(dev, "adi,vref-out-en")) + *val = AD3552R_INTERNAL_VREF_PIN_2P5V; + else + *val = AD3552R_INTERNAL_VREF_PIN_FLOATING; + + return 0; + } + + if (voltage > 2500000 + delta || voltage < 2500000 - delta) { + dev_warn(dev, "vref-supply must be 2.5V"); + return -EINVAL; + } + + *val = AD3552R_EXTERNAL_VREF_PIN_INPUT; + + return 0; +} +EXPORT_SYMBOL_NS_GPL(ad3552r_get_ref_voltage, IIO_AD3552R); + +int ad3552r_get_drive_strength(struct device *dev, u32 *val) +{ + int err; + u32 drive_strength; + + err = device_property_read_u32(dev, "adi,sdo-drive-strength", + &drive_strength); + if (err) + return err; + + if (drive_strength > 3) { + dev_err_probe(dev, -EINVAL, + "adi,sdo-drive-strength must be less than 4\n"); + return -EINVAL; + } + + *val = drive_strength; + + return 0; +} +EXPORT_SYMBOL_NS_GPL(ad3552r_get_drive_strength, IIO_AD3552R); + +int ad3552r_get_custom_gain(struct device *dev, struct fwnode_handle *child, + u8 *gs_p, u8 *gs_n, u16 *rfb, s16 *goffs) +{ + int err; + u32 val; + struct fwnode_handle *gain_child __free(fwnode_handle) = + fwnode_get_named_child_node(child, + "custom-output-range-config"); + + if (!gain_child) + return dev_err_probe(dev, -EINVAL, + "custom-output-range-config mandatory\n"); + + err = fwnode_property_read_u32(gain_child, "adi,gain-scaling-p", &val); + if (err) + return dev_err_probe(dev, err, + "adi,gain-scaling-p mandatory\n"); + *gs_p = val; + + err = fwnode_property_read_u32(gain_child, "adi,gain-scaling-n", &val); + if (err) + return dev_err_probe(dev, err, + "adi,gain-scaling-n property mandatory\n"); + *gs_n = val; + + err = fwnode_property_read_u32(gain_child, "adi,rfb-ohms", &val); + if (err) + return dev_err_probe(dev, err, + "adi,rfb-ohms mandatory\n"); + *rfb = val; + + err = fwnode_property_read_u32(gain_child, "adi,gain-offset", &val); + if (err) + return dev_err_probe(dev, err, + "adi,gain-offset mandatory\n"); + *goffs = val; + + return 0; +} +EXPORT_SYMBOL_NS_GPL(ad3552r_get_custom_gain, IIO_AD3552R); + +static int ad3552r_find_range(const struct ad3552r_model_data *model_info, + s32 *vals) +{ + int i; + + for (i = 0; i < model_info->num_ranges; i++) + if (vals[0] == model_info->ranges_table[i][0] * 1000 && + vals[1] == model_info->ranges_table[i][1] * 1000) + return i; + + return -EINVAL; +} + +int ad3552r_get_output_range(struct device *dev, + const struct ad3552r_model_data *model_info, + struct fwnode_handle *child, u32 *val) +{ + int ret; + s32 vals[2]; + + /* This property is optional, so returning -ENOENT if missing */ + if (!fwnode_property_present(child, "adi,output-range-microvolt")) + return -ENOENT; + + ret = fwnode_property_read_u32_array(child, + "adi,output-range-microvolt", + vals, 2); + if (ret) + return dev_err_probe(dev, ret, + "invalid adi,output-range-microvolt\n"); + + ret = ad3552r_find_range(model_info, vals); + if (ret < 0) + return dev_err_probe(dev, ret, + "invalid adi,output-range-microvolt value\n"); + + *val = ret; + + return 0; +} +EXPORT_SYMBOL_NS_GPL(ad3552r_get_output_range, IIO_AD3552R); + +MODULE_DESCRIPTION("ad3552r common functions"); +MODULE_LICENSE("GPL"); diff --git a/drivers/iio/dac/ad3552r.c b/drivers/iio/dac/ad3552r.c index 390d3fab21478..5b2ce2aa67a47 100644 --- a/drivers/iio/dac/ad3552r.c +++ b/drivers/iio/dac/ad3552r.c @@ -6,271 +6,15 @@ * Copyright 2021 Analog Devices Inc. */ #include +#include #include #include #include #include #include -#include #include -/* Register addresses */ -/* Primary address space */ -#define AD3552R_REG_ADDR_INTERFACE_CONFIG_A 0x00 -#define AD3552R_MASK_SOFTWARE_RESET (BIT(7) | BIT(0)) -#define AD3552R_MASK_ADDR_ASCENSION BIT(5) -#define AD3552R_MASK_SDO_ACTIVE BIT(4) -#define AD3552R_REG_ADDR_INTERFACE_CONFIG_B 0x01 -#define AD3552R_MASK_SINGLE_INST BIT(7) -#define AD3552R_MASK_SHORT_INSTRUCTION BIT(3) -#define AD3552R_REG_ADDR_DEVICE_CONFIG 0x02 -#define AD3552R_MASK_DEVICE_STATUS(n) BIT(4 + (n)) -#define AD3552R_MASK_CUSTOM_MODES GENMASK(3, 2) -#define AD3552R_MASK_OPERATING_MODES GENMASK(1, 0) -#define AD3552R_REG_ADDR_CHIP_TYPE 0x03 -#define AD3552R_MASK_CLASS GENMASK(7, 0) -#define AD3552R_REG_ADDR_PRODUCT_ID_L 0x04 -#define AD3552R_REG_ADDR_PRODUCT_ID_H 0x05 -#define AD3552R_REG_ADDR_CHIP_GRADE 0x06 -#define AD3552R_MASK_GRADE GENMASK(7, 4) -#define AD3552R_MASK_DEVICE_REVISION GENMASK(3, 0) -#define AD3552R_REG_ADDR_SCRATCH_PAD 0x0A -#define AD3552R_REG_ADDR_SPI_REVISION 0x0B -#define AD3552R_REG_ADDR_VENDOR_L 0x0C -#define AD3552R_REG_ADDR_VENDOR_H 0x0D -#define AD3552R_REG_ADDR_STREAM_MODE 0x0E -#define AD3552R_MASK_LENGTH GENMASK(7, 0) -#define AD3552R_REG_ADDR_TRANSFER_REGISTER 0x0F -#define AD3552R_MASK_MULTI_IO_MODE GENMASK(7, 6) -#define AD3552R_MASK_STREAM_LENGTH_KEEP_VALUE BIT(2) -#define AD3552R_REG_ADDR_INTERFACE_CONFIG_C 0x10 -#define AD3552R_MASK_CRC_ENABLE (GENMASK(7, 6) |\ - GENMASK(1, 0)) -#define AD3552R_MASK_STRICT_REGISTER_ACCESS BIT(5) -#define AD3552R_REG_ADDR_INTERFACE_STATUS_A 0x11 -#define AD3552R_MASK_INTERFACE_NOT_READY BIT(7) -#define AD3552R_MASK_CLOCK_COUNTING_ERROR BIT(5) -#define AD3552R_MASK_INVALID_OR_NO_CRC BIT(3) -#define AD3552R_MASK_WRITE_TO_READ_ONLY_REGISTER BIT(2) -#define AD3552R_MASK_PARTIAL_REGISTER_ACCESS BIT(1) -#define AD3552R_MASK_REGISTER_ADDRESS_INVALID BIT(0) -#define AD3552R_REG_ADDR_INTERFACE_CONFIG_D 0x14 -#define AD3552R_MASK_ALERT_ENABLE_PULLUP BIT(6) -#define AD3552R_MASK_MEM_CRC_EN BIT(4) -#define AD3552R_MASK_SDO_DRIVE_STRENGTH GENMASK(3, 2) -#define AD3552R_MASK_DUAL_SPI_SYNCHROUNOUS_EN BIT(1) -#define AD3552R_MASK_SPI_CONFIG_DDR BIT(0) -#define AD3552R_REG_ADDR_SH_REFERENCE_CONFIG 0x15 -#define AD3552R_MASK_IDUMP_FAST_MODE BIT(6) -#define AD3552R_MASK_SAMPLE_HOLD_DIFFERENTIAL_USER_EN BIT(5) -#define AD3552R_MASK_SAMPLE_HOLD_USER_TRIM GENMASK(4, 3) -#define AD3552R_MASK_SAMPLE_HOLD_USER_ENABLE BIT(2) -#define AD3552R_MASK_REFERENCE_VOLTAGE_SEL GENMASK(1, 0) -#define AD3552R_REG_ADDR_ERR_ALARM_MASK 0x16 -#define AD3552R_MASK_REF_RANGE_ALARM BIT(6) -#define AD3552R_MASK_CLOCK_COUNT_ERR_ALARM BIT(5) -#define AD3552R_MASK_MEM_CRC_ERR_ALARM BIT(4) -#define AD3552R_MASK_SPI_CRC_ERR_ALARM BIT(3) -#define AD3552R_MASK_WRITE_TO_READ_ONLY_ALARM BIT(2) -#define AD3552R_MASK_PARTIAL_REGISTER_ACCESS_ALARM BIT(1) -#define AD3552R_MASK_REGISTER_ADDRESS_INVALID_ALARM BIT(0) -#define AD3552R_REG_ADDR_ERR_STATUS 0x17 -#define AD3552R_MASK_REF_RANGE_ERR_STATUS BIT(6) -#define AD3552R_MASK_DUAL_SPI_STREAM_EXCEEDS_DAC_ERR_STATUS BIT(5) -#define AD3552R_MASK_MEM_CRC_ERR_STATUS BIT(4) -#define AD3552R_MASK_RESET_STATUS BIT(0) -#define AD3552R_REG_ADDR_POWERDOWN_CONFIG 0x18 -#define AD3552R_MASK_CH_DAC_POWERDOWN(ch) BIT(4 + (ch)) -#define AD3552R_MASK_CH_AMPLIFIER_POWERDOWN(ch) BIT(ch) -#define AD3552R_REG_ADDR_CH0_CH1_OUTPUT_RANGE 0x19 -#define AD3552R_MASK_CH_OUTPUT_RANGE_SEL(ch) ((ch) ? GENMASK(7, 4) :\ - GENMASK(3, 0)) -#define AD3552R_REG_ADDR_CH_OFFSET(ch) (0x1B + (ch) * 2) -#define AD3552R_MASK_CH_OFFSET_BITS_0_7 GENMASK(7, 0) -#define AD3552R_REG_ADDR_CH_GAIN(ch) (0x1C + (ch) * 2) -#define AD3552R_MASK_CH_RANGE_OVERRIDE BIT(7) -#define AD3552R_MASK_CH_GAIN_SCALING_N GENMASK(6, 5) -#define AD3552R_MASK_CH_GAIN_SCALING_P GENMASK(4, 3) -#define AD3552R_MASK_CH_OFFSET_POLARITY BIT(2) -#define AD3552R_MASK_CH_OFFSET_BIT_8 BIT(0) -/* - * Secondary region - * For multibyte registers specify the highest address because the access is - * done in descending order - */ -#define AD3552R_SECONDARY_REGION_START 0x28 -#define AD3552R_REG_ADDR_HW_LDAC_16B 0x28 -#define AD3552R_REG_ADDR_CH_DAC_16B(ch) (0x2C - (1 - ch) * 2) -#define AD3552R_REG_ADDR_DAC_PAGE_MASK_16B 0x2E -#define AD3552R_REG_ADDR_CH_SELECT_16B 0x2F -#define AD3552R_REG_ADDR_INPUT_PAGE_MASK_16B 0x31 -#define AD3552R_REG_ADDR_SW_LDAC_16B 0x32 -#define AD3552R_REG_ADDR_CH_INPUT_16B(ch) (0x36 - (1 - ch) * 2) -/* 3 bytes registers */ -#define AD3552R_REG_START_24B 0x37 -#define AD3552R_REG_ADDR_HW_LDAC_24B 0x37 -#define AD3552R_REG_ADDR_CH_DAC_24B(ch) (0x3D - (1 - ch) * 3) -#define AD3552R_REG_ADDR_DAC_PAGE_MASK_24B 0x40 -#define AD3552R_REG_ADDR_CH_SELECT_24B 0x41 -#define AD3552R_REG_ADDR_INPUT_PAGE_MASK_24B 0x44 -#define AD3552R_REG_ADDR_SW_LDAC_24B 0x45 -#define AD3552R_REG_ADDR_CH_INPUT_24B(ch) (0x4B - (1 - ch) * 3) - -/* Useful defines */ -#define AD3552R_MAX_CH 2 -#define AD3552R_MASK_CH(ch) BIT(ch) -#define AD3552R_MASK_ALL_CH GENMASK(1, 0) -#define AD3552R_MAX_REG_SIZE 3 -#define AD3552R_READ_BIT BIT(7) -#define AD3552R_ADDR_MASK GENMASK(6, 0) -#define AD3552R_MASK_DAC_12B 0xFFF0 -#define AD3552R_DEFAULT_CONFIG_B_VALUE 0x8 -#define AD3552R_SCRATCH_PAD_TEST_VAL1 0x34 -#define AD3552R_SCRATCH_PAD_TEST_VAL2 0xB2 -#define AD3552R_GAIN_SCALE 1000 -#define AD3552R_LDAC_PULSE_US 100 - -enum ad3552r_ch_vref_select { - /* Internal source with Vref I/O floating */ - AD3552R_INTERNAL_VREF_PIN_FLOATING, - /* Internal source with Vref I/O at 2.5V */ - AD3552R_INTERNAL_VREF_PIN_2P5V, - /* External source with Vref I/O as input */ - AD3552R_EXTERNAL_VREF_PIN_INPUT -}; - -enum ad3552r_id { - AD3541R_ID = 0x400b, - AD3542R_ID = 0x4009, - AD3551R_ID = 0x400a, - AD3552R_ID = 0x4008, -}; - -enum ad3552r_ch_output_range { - /* Range from 0 V to 2.5 V. Requires Rfb1x connection */ - AD3552R_CH_OUTPUT_RANGE_0__2P5V, - /* Range from 0 V to 5 V. Requires Rfb1x connection */ - AD3552R_CH_OUTPUT_RANGE_0__5V, - /* Range from 0 V to 10 V. Requires Rfb2x connection */ - AD3552R_CH_OUTPUT_RANGE_0__10V, - /* Range from -5 V to 5 V. Requires Rfb2x connection */ - AD3552R_CH_OUTPUT_RANGE_NEG_5__5V, - /* Range from -10 V to 10 V. Requires Rfb4x connection */ - AD3552R_CH_OUTPUT_RANGE_NEG_10__10V, -}; - -static const s32 ad3552r_ch_ranges[][2] = { - [AD3552R_CH_OUTPUT_RANGE_0__2P5V] = {0, 2500}, - [AD3552R_CH_OUTPUT_RANGE_0__5V] = {0, 5000}, - [AD3552R_CH_OUTPUT_RANGE_0__10V] = {0, 10000}, - [AD3552R_CH_OUTPUT_RANGE_NEG_5__5V] = {-5000, 5000}, - [AD3552R_CH_OUTPUT_RANGE_NEG_10__10V] = {-10000, 10000} -}; - -enum ad3542r_ch_output_range { - /* Range from 0 V to 2.5 V. Requires Rfb1x connection */ - AD3542R_CH_OUTPUT_RANGE_0__2P5V, - /* Range from 0 V to 3 V. Requires Rfb1x connection */ - AD3542R_CH_OUTPUT_RANGE_0__3V, - /* Range from 0 V to 5 V. Requires Rfb1x connection */ - AD3542R_CH_OUTPUT_RANGE_0__5V, - /* Range from 0 V to 10 V. Requires Rfb2x connection */ - AD3542R_CH_OUTPUT_RANGE_0__10V, - /* Range from -2.5 V to 7.5 V. Requires Rfb2x connection */ - AD3542R_CH_OUTPUT_RANGE_NEG_2P5__7P5V, - /* Range from -5 V to 5 V. Requires Rfb2x connection */ - AD3542R_CH_OUTPUT_RANGE_NEG_5__5V, -}; - -static const s32 ad3542r_ch_ranges[][2] = { - [AD3542R_CH_OUTPUT_RANGE_0__2P5V] = {0, 2500}, - [AD3542R_CH_OUTPUT_RANGE_0__3V] = {0, 3000}, - [AD3542R_CH_OUTPUT_RANGE_0__5V] = {0, 5000}, - [AD3542R_CH_OUTPUT_RANGE_0__10V] = {0, 10000}, - [AD3542R_CH_OUTPUT_RANGE_NEG_2P5__7P5V] = {-2500, 7500}, - [AD3542R_CH_OUTPUT_RANGE_NEG_5__5V] = {-5000, 5000} -}; - -enum ad3552r_ch_gain_scaling { - /* Gain scaling of 1 */ - AD3552R_CH_GAIN_SCALING_1, - /* Gain scaling of 0.5 */ - AD3552R_CH_GAIN_SCALING_0_5, - /* Gain scaling of 0.25 */ - AD3552R_CH_GAIN_SCALING_0_25, - /* Gain scaling of 0.125 */ - AD3552R_CH_GAIN_SCALING_0_125, -}; - -/* Gain * AD3552R_GAIN_SCALE */ -static const s32 gains_scaling_table[] = { - [AD3552R_CH_GAIN_SCALING_1] = 1000, - [AD3552R_CH_GAIN_SCALING_0_5] = 500, - [AD3552R_CH_GAIN_SCALING_0_25] = 250, - [AD3552R_CH_GAIN_SCALING_0_125] = 125 -}; - -enum ad3552r_dev_attributes { - /* - Direct register values */ - /* From 0-3 */ - AD3552R_SDO_DRIVE_STRENGTH, - /* - * 0 -> Internal Vref, vref_io pin floating (default) - * 1 -> Internal Vref, vref_io driven by internal vref - * 2 or 3 -> External Vref - */ - AD3552R_VREF_SELECT, - /* Read registers in ascending order if set. Else descending */ - AD3552R_ADDR_ASCENSION, -}; - -enum ad3552r_ch_attributes { - /* DAC powerdown */ - AD3552R_CH_DAC_POWERDOWN, - /* DAC amplifier powerdown */ - AD3552R_CH_AMPLIFIER_POWERDOWN, - /* Select the output range. Select from enum ad3552r_ch_output_range */ - AD3552R_CH_OUTPUT_RANGE_SEL, - /* - * Over-rider the range selector in order to manually set the output - * voltage range - */ - AD3552R_CH_RANGE_OVERRIDE, - /* Manually set the offset voltage */ - AD3552R_CH_GAIN_OFFSET, - /* Sets the polarity of the offset. */ - AD3552R_CH_GAIN_OFFSET_POLARITY, - /* PDAC gain scaling */ - AD3552R_CH_GAIN_SCALING_P, - /* NDAC gain scaling */ - AD3552R_CH_GAIN_SCALING_N, - /* Rfb value */ - AD3552R_CH_RFB, - /* Channel select. When set allow Input -> DAC and Mask -> DAC */ - AD3552R_CH_SELECT, -}; - -struct ad3552r_ch_data { - s32 scale_int; - s32 scale_dec; - s32 offset_int; - s32 offset_dec; - s16 gain_offset; - u16 rfb; - u8 n; - u8 p; - u8 range; - bool range_override; -}; - -struct ad3552r_model_data { - const char *model_name; - enum ad3552r_id chip_id; - unsigned int num_hw_channels; - const s32 (*ranges_table)[2]; - int num_ranges; - bool requires_output_range; -}; +#include "ad3552r.h" struct ad3552r_desc { const struct ad3552r_model_data *model_data; @@ -285,45 +29,6 @@ struct ad3552r_desc { unsigned int num_ch; }; -static const u16 addr_mask_map[][2] = { - [AD3552R_ADDR_ASCENSION] = { - AD3552R_REG_ADDR_INTERFACE_CONFIG_A, - AD3552R_MASK_ADDR_ASCENSION - }, - [AD3552R_SDO_DRIVE_STRENGTH] = { - AD3552R_REG_ADDR_INTERFACE_CONFIG_D, - AD3552R_MASK_SDO_DRIVE_STRENGTH - }, - [AD3552R_VREF_SELECT] = { - AD3552R_REG_ADDR_SH_REFERENCE_CONFIG, - AD3552R_MASK_REFERENCE_VOLTAGE_SEL - }, -}; - -/* 0 -> reg addr, 1->ch0 mask, 2->ch1 mask */ -static const u16 addr_mask_map_ch[][3] = { - [AD3552R_CH_DAC_POWERDOWN] = { - AD3552R_REG_ADDR_POWERDOWN_CONFIG, - AD3552R_MASK_CH_DAC_POWERDOWN(0), - AD3552R_MASK_CH_DAC_POWERDOWN(1) - }, - [AD3552R_CH_AMPLIFIER_POWERDOWN] = { - AD3552R_REG_ADDR_POWERDOWN_CONFIG, - AD3552R_MASK_CH_AMPLIFIER_POWERDOWN(0), - AD3552R_MASK_CH_AMPLIFIER_POWERDOWN(1) - }, - [AD3552R_CH_OUTPUT_RANGE_SEL] = { - AD3552R_REG_ADDR_CH0_CH1_OUTPUT_RANGE, - AD3552R_MASK_CH_OUTPUT_RANGE_SEL(0), - AD3552R_MASK_CH_OUTPUT_RANGE_SEL(1) - }, - [AD3552R_CH_SELECT] = { - AD3552R_REG_ADDR_CH_SELECT_16B, - AD3552R_MASK_CH(0), - AD3552R_MASK_CH(1) - } -}; - static u8 _ad3552r_reg_len(u8 addr) { switch (addr) { @@ -399,11 +104,6 @@ static int ad3552r_read_reg(struct ad3552r_desc *dac, u8 addr, u16 *val) return 0; } -static u16 ad3552r_field_prep(u16 val, u16 mask) -{ - return (val << __ffs(mask)) & mask; -} - /* Update field of a register, shift val if needed */ static int ad3552r_update_reg_field(struct ad3552r_desc *dac, u8 addr, u16 mask, u16 val) @@ -416,21 +116,11 @@ static int ad3552r_update_reg_field(struct ad3552r_desc *dac, u8 addr, u16 mask, return ret; reg &= ~mask; - reg |= ad3552r_field_prep(val, mask); + reg |= val; return ad3552r_write_reg(dac, addr, reg); } -static int ad3552r_set_ch_value(struct ad3552r_desc *dac, - enum ad3552r_ch_attributes attr, - u8 ch, - u16 val) -{ - /* Update register related to attributes in chip */ - return ad3552r_update_reg_field(dac, addr_mask_map_ch[attr][0], - addr_mask_map_ch[attr][ch + 1], val); -} - #define AD3552R_CH_DAC(_idx) ((struct iio_chan_spec) { \ .type = IIO_VOLTAGE, \ .output = true, \ @@ -510,8 +200,14 @@ static int ad3552r_write_raw(struct iio_dev *indio_dev, val); break; case IIO_CHAN_INFO_ENABLE: - err = ad3552r_set_ch_value(dac, AD3552R_CH_DAC_POWERDOWN, - chan->channel, !val); + if (chan->channel == 0) + val = FIELD_PREP(AD3552R_MASK_CH_DAC_POWERDOWN(0), !val); + else + val = FIELD_PREP(AD3552R_MASK_CH_DAC_POWERDOWN(1), !val); + + err = ad3552r_update_reg_field(dac, AD3552R_REG_ADDR_POWERDOWN_CONFIG, + AD3552R_MASK_CH_DAC_POWERDOWN(chan->channel), + val); break; default: err = -EINVAL; @@ -721,83 +417,9 @@ static int ad3552r_reset(struct ad3552r_desc *dac) return ret; return ad3552r_update_reg_field(dac, - addr_mask_map[AD3552R_ADDR_ASCENSION][0], - addr_mask_map[AD3552R_ADDR_ASCENSION][1], - val); -} - -static void ad3552r_get_custom_range(struct ad3552r_desc *dac, s32 i, s32 *v_min, - s32 *v_max) -{ - s64 vref, tmp, common, offset, gn, gp; - /* - * From datasheet formula (In Volts): - * Vmin = 2.5 + [(GainN + Offset / 1024) * 2.5 * Rfb * 1.03] - * Vmax = 2.5 - [(GainP + Offset / 1024) * 2.5 * Rfb * 1.03] - * Calculus are converted to milivolts - */ - vref = 2500; - /* 2.5 * 1.03 * 1000 (To mV) */ - common = 2575 * dac->ch_data[i].rfb; - offset = dac->ch_data[i].gain_offset; - - gn = gains_scaling_table[dac->ch_data[i].n]; - tmp = (1024 * gn + AD3552R_GAIN_SCALE * offset) * common; - tmp = div_s64(tmp, 1024 * AD3552R_GAIN_SCALE); - *v_max = vref + tmp; - - gp = gains_scaling_table[dac->ch_data[i].p]; - tmp = (1024 * gp - AD3552R_GAIN_SCALE * offset) * common; - tmp = div_s64(tmp, 1024 * AD3552R_GAIN_SCALE); - *v_min = vref - tmp; -} - -static void ad3552r_calc_gain_and_offset(struct ad3552r_desc *dac, s32 ch) -{ - s32 idx, v_max, v_min, span, rem; - s64 tmp; - - if (dac->ch_data[ch].range_override) { - ad3552r_get_custom_range(dac, ch, &v_min, &v_max); - } else { - /* Normal range */ - idx = dac->ch_data[ch].range; - v_min = dac->model_data->ranges_table[idx][0]; - v_max = dac->model_data->ranges_table[idx][1]; - } - - /* - * From datasheet formula: - * Vout = Span * (D / 65536) + Vmin - * Converted to scale and offset: - * Scale = Span / 65536 - * Offset = 65536 * Vmin / Span - * - * Reminders are in micros in order to be printed as - * IIO_VAL_INT_PLUS_MICRO - */ - span = v_max - v_min; - dac->ch_data[ch].scale_int = div_s64_rem(span, 65536, &rem); - /* Do operations in microvolts */ - dac->ch_data[ch].scale_dec = DIV_ROUND_CLOSEST((s64)rem * 1000000, - 65536); - - dac->ch_data[ch].offset_int = div_s64_rem(v_min * 65536, span, &rem); - tmp = (s64)rem * 1000000; - dac->ch_data[ch].offset_dec = div_s64(tmp, span); -} - -static int ad3552r_find_range(const struct ad3552r_model_data *model_data, - s32 *vals) -{ - int i; - - for (i = 0; i < model_data->num_ranges; i++) - if (vals[0] == model_data->ranges_table[i][0] * 1000 && - vals[1] == model_data->ranges_table[i][1] * 1000) - return i; - - return -EINVAL; + AD3552R_REG_ADDR_INTERFACE_CONFIG_A, + AD3552R_MASK_ADDR_ASCENSION, + FIELD_PREP(AD3552R_MASK_ADDR_ASCENSION, val)); } static int ad3552r_configure_custom_gain(struct ad3552r_desc *dac, @@ -805,57 +427,30 @@ static int ad3552r_configure_custom_gain(struct ad3552r_desc *dac, u32 ch) { struct device *dev = &dac->spi->dev; - u32 val; int err; u8 addr; - u16 reg = 0, offset; - - struct fwnode_handle *gain_child __free(fwnode_handle) - = fwnode_get_named_child_node(child, - "custom-output-range-config"); - if (!gain_child) - return dev_err_probe(dev, -EINVAL, - "mandatory custom-output-range-config property missing\n"); - - dac->ch_data[ch].range_override = 1; - reg |= ad3552r_field_prep(1, AD3552R_MASK_CH_RANGE_OVERRIDE); - - err = fwnode_property_read_u32(gain_child, "adi,gain-scaling-p", &val); - if (err) - return dev_err_probe(dev, err, - "mandatory adi,gain-scaling-p property missing\n"); - reg |= ad3552r_field_prep(val, AD3552R_MASK_CH_GAIN_SCALING_P); - dac->ch_data[ch].p = val; - - err = fwnode_property_read_u32(gain_child, "adi,gain-scaling-n", &val); - if (err) - return dev_err_probe(dev, err, - "mandatory adi,gain-scaling-n property missing\n"); - reg |= ad3552r_field_prep(val, AD3552R_MASK_CH_GAIN_SCALING_N); - dac->ch_data[ch].n = val; - - err = fwnode_property_read_u32(gain_child, "adi,rfb-ohms", &val); - if (err) - return dev_err_probe(dev, err, - "mandatory adi,rfb-ohms property missing\n"); - dac->ch_data[ch].rfb = val; + u16 reg; - err = fwnode_property_read_u32(gain_child, "adi,gain-offset", &val); + err = ad3552r_get_custom_gain(dev, child, + &dac->ch_data[ch].p, + &dac->ch_data[ch].n, + &dac->ch_data[ch].rfb, + &dac->ch_data[ch].gain_offset); if (err) - return dev_err_probe(dev, err, - "mandatory adi,gain-offset property missing\n"); - dac->ch_data[ch].gain_offset = val; + return err; - offset = abs((s32)val); - reg |= ad3552r_field_prep((offset >> 8), AD3552R_MASK_CH_OFFSET_BIT_8); + dac->ch_data[ch].range_override = 1; - reg |= ad3552r_field_prep((s32)val < 0, AD3552R_MASK_CH_OFFSET_POLARITY); addr = AD3552R_REG_ADDR_CH_GAIN(ch); err = ad3552r_write_reg(dac, addr, - offset & AD3552R_MASK_CH_OFFSET_BITS_0_7); + abs((s32)dac->ch_data[ch].gain_offset) & + AD3552R_MASK_CH_OFFSET_BITS_0_7); if (err) return dev_err_probe(dev, err, "Error writing register\n"); + reg = ad3552r_calc_custom_gain(dac->ch_data[ch].p, dac->ch_data[ch].n, + dac->ch_data[ch].gain_offset); + err = ad3552r_write_reg(dac, addr, reg); if (err) return dev_err_probe(dev, err, "Error writing register\n"); @@ -866,49 +461,31 @@ static int ad3552r_configure_custom_gain(struct ad3552r_desc *dac, static int ad3552r_configure_device(struct ad3552r_desc *dac) { struct device *dev = &dac->spi->dev; - int err, cnt = 0, voltage, delta = 100000; - u32 vals[2], val, ch; + int err, cnt = 0; + u32 val, ch; dac->gpio_ldac = devm_gpiod_get_optional(dev, "ldac", GPIOD_OUT_HIGH); if (IS_ERR(dac->gpio_ldac)) return dev_err_probe(dev, PTR_ERR(dac->gpio_ldac), "Error getting gpio ldac"); - voltage = devm_regulator_get_enable_read_voltage(dev, "vref"); - if (voltage < 0 && voltage != -ENODEV) - return dev_err_probe(dev, voltage, "Error getting vref voltage\n"); - - if (voltage == -ENODEV) { - if (device_property_read_bool(dev, "adi,vref-out-en")) - val = AD3552R_INTERNAL_VREF_PIN_2P5V; - else - val = AD3552R_INTERNAL_VREF_PIN_FLOATING; - } else { - if (voltage > 2500000 + delta || voltage < 2500000 - delta) { - dev_warn(dev, "vref-supply must be 2.5V"); - return -EINVAL; - } - val = AD3552R_EXTERNAL_VREF_PIN_INPUT; - } + err = ad3552r_get_ref_voltage(dev, &val); + if (err < 0) + return err; err = ad3552r_update_reg_field(dac, - addr_mask_map[AD3552R_VREF_SELECT][0], - addr_mask_map[AD3552R_VREF_SELECT][1], - val); + AD3552R_REG_ADDR_SH_REFERENCE_CONFIG, + AD3552R_MASK_REFERENCE_VOLTAGE_SEL, + FIELD_PREP(AD3552R_MASK_REFERENCE_VOLTAGE_SEL, val)); if (err) return err; - err = device_property_read_u32(dev, "adi,sdo-drive-strength", &val); + err = ad3552r_get_drive_strength(dev, &val); if (!err) { - if (val > 3) { - dev_err(dev, "adi,sdo-drive-strength must be less than 4\n"); - return -EINVAL; - } - err = ad3552r_update_reg_field(dac, - addr_mask_map[AD3552R_SDO_DRIVE_STRENGTH][0], - addr_mask_map[AD3552R_SDO_DRIVE_STRENGTH][1], - val); + AD3552R_REG_ADDR_INTERFACE_CONFIG_D, + AD3552R_MASK_SDO_DRIVE_STRENGTH, + FIELD_PREP(AD3552R_MASK_SDO_DRIVE_STRENGTH, val)); if (err) return err; } @@ -929,24 +506,21 @@ static int ad3552r_configure_device(struct ad3552r_desc *dac) "reg must be less than %d\n", dac->model_data->num_hw_channels); - if (fwnode_property_present(child, "adi,output-range-microvolt")) { - err = fwnode_property_read_u32_array(child, - "adi,output-range-microvolt", - vals, - 2); - if (err) - return dev_err_probe(dev, err, - "adi,output-range-microvolt property could not be parsed\n"); - - err = ad3552r_find_range(dac->model_data, vals); - if (err < 0) - return dev_err_probe(dev, err, - "Invalid adi,output-range-microvolt value\n"); - - val = err; - err = ad3552r_set_ch_value(dac, - AD3552R_CH_OUTPUT_RANGE_SEL, - ch, val); + err = ad3552r_get_output_range(dev, dac->model_data, + child, &val); + if (err && err != -ENOENT) + return err; + + if (!err) { + if (ch == 0) + val = FIELD_PREP(AD3552R_MASK_CH_OUTPUT_RANGE_SEL(0), val); + else + val = FIELD_PREP(AD3552R_MASK_CH_OUTPUT_RANGE_SEL(1), val); + + err = ad3552r_update_reg_field(dac, + AD3552R_REG_ADDR_CH0_CH1_OUTPUT_RANGE, + AD3552R_MASK_CH_OUTPUT_RANGE_SEL(ch), + val); if (err) return err; @@ -961,10 +535,17 @@ static int ad3552r_configure_device(struct ad3552r_desc *dac) return err; } - ad3552r_calc_gain_and_offset(dac, ch); + ad3552r_calc_gain_and_offset(&dac->ch_data[ch], dac->model_data); dac->enabled_ch |= BIT(ch); - err = ad3552r_set_ch_value(dac, AD3552R_CH_SELECT, ch, 1); + if (ch == 0) + val = FIELD_PREP(AD3552R_MASK_CH(0), 1); + else + val = FIELD_PREP(AD3552R_MASK_CH(1), 1); + + err = ad3552r_update_reg_field(dac, + AD3552R_REG_ADDR_CH_SELECT_16B, + AD3552R_MASK_CH(ch), val); if (err < 0) return err; @@ -976,8 +557,15 @@ static int ad3552r_configure_device(struct ad3552r_desc *dac) /* Disable unused channels */ for_each_clear_bit(ch, &dac->enabled_ch, dac->model_data->num_hw_channels) { - err = ad3552r_set_ch_value(dac, AD3552R_CH_AMPLIFIER_POWERDOWN, - ch, 1); + if (ch == 0) + val = FIELD_PREP(AD3552R_MASK_CH_AMPLIFIER_POWERDOWN(0), 1); + else + val = FIELD_PREP(AD3552R_MASK_CH_AMPLIFIER_POWERDOWN(1), 1); + + err = ad3552r_update_reg_field(dac, + AD3552R_REG_ADDR_POWERDOWN_CONFIG, + AD3552R_MASK_CH_AMPLIFIER_POWERDOWN(ch), + val); if (err) return err; } @@ -1146,3 +734,4 @@ module_spi_driver(ad3552r_driver); MODULE_AUTHOR("Mihail Chindris "); MODULE_DESCRIPTION("Analog Device AD3552R DAC"); MODULE_LICENSE("GPL v2"); +MODULE_IMPORT_NS(IIO_AD3552R); diff --git a/drivers/iio/dac/ad3552r.h b/drivers/iio/dac/ad3552r.h new file mode 100644 index 0000000000000..c20f64f80d5db --- /dev/null +++ b/drivers/iio/dac/ad3552r.h @@ -0,0 +1,223 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * AD3552R Digital <-> Analog converters common header + * + * Copyright 2021-2024 Analog Devices Inc. + * Author: Angelo Dureghello + */ + +#ifndef __DRIVERS_IIO_DAC_AD3552R_H__ +#define __DRIVERS_IIO_DAC_AD3552R_H__ + +/* Register addresses */ +/* Primary address space */ +#define AD3552R_REG_ADDR_INTERFACE_CONFIG_A 0x00 +#define AD3552R_MASK_SOFTWARE_RESET (BIT(7) | BIT(0)) +#define AD3552R_MASK_ADDR_ASCENSION BIT(5) +#define AD3552R_MASK_SDO_ACTIVE BIT(4) +#define AD3552R_REG_ADDR_INTERFACE_CONFIG_B 0x01 +#define AD3552R_MASK_SINGLE_INST BIT(7) +#define AD3552R_MASK_SHORT_INSTRUCTION BIT(3) +#define AD3552R_REG_ADDR_DEVICE_CONFIG 0x02 +#define AD3552R_MASK_DEVICE_STATUS(n) BIT(4 + (n)) +#define AD3552R_MASK_CUSTOM_MODES GENMASK(3, 2) +#define AD3552R_MASK_OPERATING_MODES GENMASK(1, 0) +#define AD3552R_REG_ADDR_CHIP_TYPE 0x03 +#define AD3552R_MASK_CLASS GENMASK(7, 0) +#define AD3552R_REG_ADDR_PRODUCT_ID_L 0x04 +#define AD3552R_REG_ADDR_PRODUCT_ID_H 0x05 +#define AD3552R_REG_ADDR_CHIP_GRADE 0x06 +#define AD3552R_MASK_GRADE GENMASK(7, 4) +#define AD3552R_MASK_DEVICE_REVISION GENMASK(3, 0) +#define AD3552R_REG_ADDR_SCRATCH_PAD 0x0A +#define AD3552R_REG_ADDR_SPI_REVISION 0x0B +#define AD3552R_REG_ADDR_VENDOR_L 0x0C +#define AD3552R_REG_ADDR_VENDOR_H 0x0D +#define AD3552R_REG_ADDR_STREAM_MODE 0x0E +#define AD3552R_MASK_LENGTH GENMASK(7, 0) +#define AD3552R_REG_ADDR_TRANSFER_REGISTER 0x0F +#define AD3552R_MASK_MULTI_IO_MODE GENMASK(7, 6) +#define AD3552R_MASK_STREAM_LENGTH_KEEP_VALUE BIT(2) +#define AD3552R_REG_ADDR_INTERFACE_CONFIG_C 0x10 +#define AD3552R_MASK_CRC_ENABLE \ + (GENMASK(7, 6) | GENMASK(1, 0)) +#define AD3552R_MASK_STRICT_REGISTER_ACCESS BIT(5) +#define AD3552R_REG_ADDR_INTERFACE_STATUS_A 0x11 +#define AD3552R_MASK_INTERFACE_NOT_READY BIT(7) +#define AD3552R_MASK_CLOCK_COUNTING_ERROR BIT(5) +#define AD3552R_MASK_INVALID_OR_NO_CRC BIT(3) +#define AD3552R_MASK_WRITE_TO_READ_ONLY_REGISTER BIT(2) +#define AD3552R_MASK_PARTIAL_REGISTER_ACCESS BIT(1) +#define AD3552R_MASK_REGISTER_ADDRESS_INVALID BIT(0) +#define AD3552R_REG_ADDR_INTERFACE_CONFIG_D 0x14 +#define AD3552R_MASK_ALERT_ENABLE_PULLUP BIT(6) +#define AD3552R_MASK_MEM_CRC_EN BIT(4) +#define AD3552R_MASK_SDO_DRIVE_STRENGTH GENMASK(3, 2) +#define AD3552R_MASK_DUAL_SPI_SYNCHROUNOUS_EN BIT(1) +#define AD3552R_MASK_SPI_CONFIG_DDR BIT(0) +#define AD3552R_REG_ADDR_SH_REFERENCE_CONFIG 0x15 +#define AD3552R_MASK_IDUMP_FAST_MODE BIT(6) +#define AD3552R_MASK_SAMPLE_HOLD_DIFF_USER_EN BIT(5) +#define AD3552R_MASK_SAMPLE_HOLD_USER_TRIM GENMASK(4, 3) +#define AD3552R_MASK_SAMPLE_HOLD_USER_ENABLE BIT(2) +#define AD3552R_MASK_REFERENCE_VOLTAGE_SEL GENMASK(1, 0) +#define AD3552R_REG_ADDR_ERR_ALARM_MASK 0x16 +#define AD3552R_MASK_REF_RANGE_ALARM BIT(6) +#define AD3552R_MASK_CLOCK_COUNT_ERR_ALARM BIT(5) +#define AD3552R_MASK_MEM_CRC_ERR_ALARM BIT(4) +#define AD3552R_MASK_SPI_CRC_ERR_ALARM BIT(3) +#define AD3552R_MASK_WRITE_TO_READ_ONLY_ALARM BIT(2) +#define AD3552R_MASK_PARTIAL_REGISTER_ACCESS_ALARM BIT(1) +#define AD3552R_MASK_REGISTER_ADDRESS_INVALID_ALARM BIT(0) +#define AD3552R_REG_ADDR_ERR_STATUS 0x17 +#define AD3552R_MASK_REF_RANGE_ERR_STATUS BIT(6) +#define AD3552R_MASK_STREAM_EXCEEDS_DAC_ERR_STATUS BIT(5) +#define AD3552R_MASK_MEM_CRC_ERR_STATUS BIT(4) +#define AD3552R_MASK_RESET_STATUS BIT(0) +#define AD3552R_REG_ADDR_POWERDOWN_CONFIG 0x18 +#define AD3552R_MASK_CH_DAC_POWERDOWN(ch) BIT(4 + (ch)) +#define AD3552R_MASK_CH_AMPLIFIER_POWERDOWN(ch) BIT(ch) +#define AD3552R_REG_ADDR_CH0_CH1_OUTPUT_RANGE 0x19 +#define AD3552R_MASK_CH0_RANGE GENMASK(2, 0) +#define AD3552R_MASK_CH1_RANGE GENMASK(6, 4) +#define AD3552R_MASK_CH_OUTPUT_RANGE GENMASK(7, 0) +#define AD3552R_MASK_CH_OUTPUT_RANGE_SEL(ch) \ + ((ch) ? GENMASK(7, 4) : GENMASK(3, 0)) +#define AD3552R_REG_ADDR_CH_OFFSET(ch) (0x1B + (ch) * 2) +#define AD3552R_MASK_CH_OFFSET_BITS_0_7 GENMASK(7, 0) +#define AD3552R_REG_ADDR_CH_GAIN(ch) (0x1C + (ch) * 2) +#define AD3552R_MASK_CH_RANGE_OVERRIDE BIT(7) +#define AD3552R_MASK_CH_GAIN_SCALING_N GENMASK(6, 5) +#define AD3552R_MASK_CH_GAIN_SCALING_P GENMASK(4, 3) +#define AD3552R_MASK_CH_OFFSET_POLARITY BIT(2) +#define AD3552R_MASK_CH_OFFSET_BIT_8 BIT(8) +/* + * Secondary region + * For multibyte registers specify the highest address because the access is + * done in descending order + */ +#define AD3552R_SECONDARY_REGION_START 0x28 +#define AD3552R_REG_ADDR_HW_LDAC_16B 0x28 +#define AD3552R_REG_ADDR_CH_DAC_16B(ch) (0x2C - (1 - (ch)) * 2) +#define AD3552R_REG_ADDR_DAC_PAGE_MASK_16B 0x2E +#define AD3552R_REG_ADDR_CH_SELECT_16B 0x2F +#define AD3552R_REG_ADDR_INPUT_PAGE_MASK_16B 0x31 +#define AD3552R_REG_ADDR_SW_LDAC_16B 0x32 +#define AD3552R_REG_ADDR_CH_INPUT_16B(ch) (0x36 - (1 - (ch)) * 2) +/* 3 bytes registers */ +#define AD3552R_REG_START_24B 0x37 +#define AD3552R_REG_ADDR_HW_LDAC_24B 0x37 +#define AD3552R_REG_ADDR_CH_DAC_24B(ch) (0x3D - (1 - (ch)) * 3) +#define AD3552R_REG_ADDR_DAC_PAGE_MASK_24B 0x40 +#define AD3552R_REG_ADDR_CH_SELECT_24B 0x41 +#define AD3552R_REG_ADDR_INPUT_PAGE_MASK_24B 0x44 +#define AD3552R_REG_ADDR_SW_LDAC_24B 0x45 +#define AD3552R_REG_ADDR_CH_INPUT_24B(ch) (0x4B - (1 - (ch)) * 3) + +#define AD3552R_MAX_CH 2 +#define AD3552R_MASK_CH(ch) BIT(ch) +#define AD3552R_MASK_ALL_CH GENMASK(1, 0) +#define AD3552R_MAX_REG_SIZE 3 +#define AD3552R_READ_BIT BIT(7) +#define AD3552R_ADDR_MASK GENMASK(6, 0) +#define AD3552R_MASK_DAC_12B GENMASK(15, 4) +#define AD3552R_DEFAULT_CONFIG_B_VALUE 0x8 +#define AD3552R_SCRATCH_PAD_TEST_VAL1 0x34 +#define AD3552R_SCRATCH_PAD_TEST_VAL2 0xB2 +#define AD3552R_GAIN_SCALE 1000 +#define AD3552R_LDAC_PULSE_US 100 + +#define AD3552R_MAX_RANGES 5 +#define AD3542R_MAX_RANGES 5 +#define AD3552R_QUAD_SPI 2 + +extern const s32 ad3552r_ch_ranges[AD3552R_MAX_RANGES][2]; +extern const s32 ad3542r_ch_ranges[AD3542R_MAX_RANGES][2]; + +enum ad3552r_id { + AD3541R_ID = 0x400b, + AD3542R_ID = 0x4009, + AD3551R_ID = 0x400a, + AD3552R_ID = 0x4008, +}; + +struct ad3552r_model_data { + const char *model_name; + enum ad3552r_id chip_id; + unsigned int num_hw_channels; + const s32 (*ranges_table)[2]; + int num_ranges; + bool requires_output_range; +}; + +struct ad3552r_ch_data { + s32 scale_int; + s32 scale_dec; + s32 offset_int; + s32 offset_dec; + s16 gain_offset; + u16 rfb; + u8 n; + u8 p; + u8 range; + bool range_override; +}; + +enum ad3552r_ch_gain_scaling { + /* Gain scaling of 1 */ + AD3552R_CH_GAIN_SCALING_1, + /* Gain scaling of 0.5 */ + AD3552R_CH_GAIN_SCALING_0_5, + /* Gain scaling of 0.25 */ + AD3552R_CH_GAIN_SCALING_0_25, + /* Gain scaling of 0.125 */ + AD3552R_CH_GAIN_SCALING_0_125, +}; + +enum ad3552r_ch_vref_select { + /* Internal source with Vref I/O floating */ + AD3552R_INTERNAL_VREF_PIN_FLOATING, + /* Internal source with Vref I/O at 2.5V */ + AD3552R_INTERNAL_VREF_PIN_2P5V, + /* External source with Vref I/O as input */ + AD3552R_EXTERNAL_VREF_PIN_INPUT +}; + +enum ad3542r_ch_output_range { + /* Range from 0 V to 2.5 V. Requires Rfb1x connection */ + AD3542R_CH_OUTPUT_RANGE_0__2P5V, + /* Range from 0 V to 5 V. Requires Rfb1x connection */ + AD3542R_CH_OUTPUT_RANGE_0__5V, + /* Range from 0 V to 10 V. Requires Rfb2x connection */ + AD3542R_CH_OUTPUT_RANGE_0__10V, + /* Range from -5 V to 5 V. Requires Rfb2x connection */ + AD3542R_CH_OUTPUT_RANGE_NEG_5__5V, + /* Range from -2.5 V to 7.5 V. Requires Rfb2x connection */ + AD3542R_CH_OUTPUT_RANGE_NEG_2P5__7P5V, +}; + +enum ad3552r_ch_output_range { + /* Range from 0 V to 2.5 V. Requires Rfb1x connection */ + AD3552R_CH_OUTPUT_RANGE_0__2P5V, + /* Range from 0 V to 5 V. Requires Rfb1x connection */ + AD3552R_CH_OUTPUT_RANGE_0__5V, + /* Range from 0 V to 10 V. Requires Rfb2x connection */ + AD3552R_CH_OUTPUT_RANGE_0__10V, + /* Range from -5 V to 5 V. Requires Rfb2x connection */ + AD3552R_CH_OUTPUT_RANGE_NEG_5__5V, + /* Range from -10 V to 10 V. Requires Rfb4x connection */ + AD3552R_CH_OUTPUT_RANGE_NEG_10__10V, +}; + +int ad3552r_get_output_range(struct device *dev, + const struct ad3552r_model_data *model_info, + struct fwnode_handle *child, u32 *val); +int ad3552r_get_custom_gain(struct device *dev, struct fwnode_handle *child, + u8 *gs_p, u8 *gs_n, u16 *rfb, s16 *goffs); +u16 ad3552r_calc_custom_gain(u8 p, u8 n, s16 goffs); +int ad3552r_get_ref_voltage(struct device *dev, u32 *val); +int ad3552r_get_drive_strength(struct device *dev, u32 *val); +void ad3552r_calc_gain_and_offset(struct ad3552r_ch_data *ch_data, + const struct ad3552r_model_data *model_data); + +#endif /* __DRIVERS_IIO_DAC_AD3552R_H__ */ diff --git a/drivers/iio/filter/admv8818.c b/drivers/iio/filter/admv8818.c index d85b7d3de8660..cc8ce0fe74e7c 100644 --- a/drivers/iio/filter/admv8818.c +++ b/drivers/iio/filter/admv8818.c @@ -14,6 +14,7 @@ #include #include #include +#include #include #include #include @@ -70,6 +71,16 @@ #define ADMV8818_HPF_WR0_MSK GENMASK(7, 4) #define ADMV8818_LPF_WR0_MSK GENMASK(3, 0) +#define ADMV8818_BAND_BYPASS 0 +#define ADMV8818_BAND_MIN 1 +#define ADMV8818_BAND_MAX 4 +#define ADMV8818_BAND_CORNER_LOW 0 +#define ADMV8818_BAND_CORNER_HIGH 1 + +#define ADMV8818_STATE_MIN 0 +#define ADMV8818_STATE_MAX 15 +#define ADMV8818_NUM_STATES 16 + enum { ADMV8818_BW_FREQ, ADMV8818_CENTER_FREQ @@ -90,20 +101,24 @@ struct admv8818_state { struct mutex lock; unsigned int filter_mode; u64 cf_hz; + u64 lpf_margin_hz; + u64 hpf_margin_hz; }; -static const unsigned long long freq_range_hpf[4][2] = { +static const unsigned long long freq_range_hpf[5][2] = { + {0ULL, 0ULL}, /* bypass */ {1750000000ULL, 3550000000ULL}, {3400000000ULL, 7250000000ULL}, {6600000000, 12000000000}, {12500000000, 19900000000} }; -static const unsigned long long freq_range_lpf[4][2] = { +static const unsigned long long freq_range_lpf[5][2] = { + {U64_MAX, U64_MAX}, /* bypass */ {2050000000ULL, 3850000000ULL}, {3350000000ULL, 7250000000ULL}, {7000000000, 13000000000}, - {12550000000, 18500000000} + {12550000000, 18850000000} }; static const struct regmap_config admv8818_regmap_config = { @@ -121,44 +136,59 @@ static const char * const admv8818_modes[] = { static int __admv8818_hpf_select(struct admv8818_state *st, u64 freq) { - unsigned int hpf_step = 0, hpf_band = 0, i, j; - u64 freq_step; - int ret; + int band, state, ret; + unsigned int hpf_state = ADMV8818_STATE_MIN, hpf_band = ADMV8818_BAND_BYPASS; + u64 freq_error, min_freq_error, freq_corner, freq_step; - if (freq < freq_range_hpf[0][0]) + if (freq < freq_range_hpf[ADMV8818_BAND_MIN][ADMV8818_BAND_CORNER_LOW]) goto hpf_write; - if (freq > freq_range_hpf[3][1]) { - hpf_step = 15; - hpf_band = 4; - + if (freq >= freq_range_hpf[ADMV8818_BAND_MAX][ADMV8818_BAND_CORNER_HIGH]) { + hpf_state = ADMV8818_STATE_MAX; + hpf_band = ADMV8818_BAND_MAX; goto hpf_write; } - for (i = 0; i < 4; i++) { - freq_step = div_u64((freq_range_hpf[i][1] - - freq_range_hpf[i][0]), 15); + /* Close HPF frequency gap between 12 and 12.5 GHz */ + if (freq >= 12000ULL * HZ_PER_MHZ && freq < 12500ULL * HZ_PER_MHZ) { + hpf_state = ADMV8818_STATE_MAX; + hpf_band = 3; + goto hpf_write; + } - if (freq > freq_range_hpf[i][0] && - (freq < freq_range_hpf[i][1] + freq_step)) { - hpf_band = i + 1; + min_freq_error = U64_MAX; + for (band = ADMV8818_BAND_MIN; band <= ADMV8818_BAND_MAX; band++) { + /* + * This (and therefore all other ranges) have a corner + * frequency higher than the target frequency. + */ + if (freq_range_hpf[band][ADMV8818_BAND_CORNER_LOW] > freq) + break; - for (j = 1; j <= 16; j++) { - if (freq < (freq_range_hpf[i][0] + (freq_step * j))) { - hpf_step = j - 1; - break; - } + freq_step = freq_range_hpf[band][ADMV8818_BAND_CORNER_HIGH] - + freq_range_hpf[band][ADMV8818_BAND_CORNER_LOW]; + freq_step = div_u64(freq_step, ADMV8818_NUM_STATES - 1); + + for (state = ADMV8818_STATE_MIN; state <= ADMV8818_STATE_MAX; state++) { + freq_corner = freq_range_hpf[band][ADMV8818_BAND_CORNER_LOW] + + freq_step * state; + + /* + * This (and therefore all other states) have a corner + * frequency higher than the target frequency. + */ + if (freq_corner > freq) + break; + + freq_error = freq - freq_corner; + if (freq_error < min_freq_error) { + min_freq_error = freq_error; + hpf_state = state; + hpf_band = band; } - break; } } - /* Close HPF frequency gap between 12 and 12.5 GHz */ - if (freq >= 12000 * HZ_PER_MHZ && freq <= 12500 * HZ_PER_MHZ) { - hpf_band = 3; - hpf_step = 15; - } - hpf_write: ret = regmap_update_bits(st->regmap, ADMV8818_REG_WR0_SW, ADMV8818_SW_IN_SET_WR0_MSK | @@ -170,7 +200,7 @@ static int __admv8818_hpf_select(struct admv8818_state *st, u64 freq) return regmap_update_bits(st->regmap, ADMV8818_REG_WR0_FILTER, ADMV8818_HPF_WR0_MSK, - FIELD_PREP(ADMV8818_HPF_WR0_MSK, hpf_step)); + FIELD_PREP(ADMV8818_HPF_WR0_MSK, hpf_state)); } static int admv8818_hpf_select(struct admv8818_state *st, u64 freq) @@ -186,31 +216,52 @@ static int admv8818_hpf_select(struct admv8818_state *st, u64 freq) static int __admv8818_lpf_select(struct admv8818_state *st, u64 freq) { - unsigned int lpf_step = 0, lpf_band = 0, i, j; - u64 freq_step; - int ret; + int band, state, ret; + unsigned int lpf_state = ADMV8818_STATE_MIN, lpf_band = ADMV8818_BAND_BYPASS; + u64 freq_error, min_freq_error, freq_corner, freq_step; - if (freq > freq_range_lpf[3][1]) + if (freq > freq_range_lpf[ADMV8818_BAND_MAX][ADMV8818_BAND_CORNER_HIGH]) goto lpf_write; - if (freq < freq_range_lpf[0][0]) { - lpf_band = 1; - + if (freq < freq_range_lpf[ADMV8818_BAND_MIN][ADMV8818_BAND_CORNER_LOW]) { + lpf_state = ADMV8818_STATE_MIN; + lpf_band = ADMV8818_BAND_MIN; goto lpf_write; } - for (i = 0; i < 4; i++) { - if (freq > freq_range_lpf[i][0] && freq < freq_range_lpf[i][1]) { - lpf_band = i + 1; - freq_step = div_u64((freq_range_lpf[i][1] - freq_range_lpf[i][0]), 15); + min_freq_error = U64_MAX; + for (band = ADMV8818_BAND_MAX; band >= ADMV8818_BAND_MIN; --band) { + /* + * At this point the highest corner frequency of + * all remaining ranges is below the target. + * LPF corner should be >= the target. + */ + if (freq > freq_range_lpf[band][ADMV8818_BAND_CORNER_HIGH]) + break; + + freq_step = freq_range_lpf[band][ADMV8818_BAND_CORNER_HIGH] - + freq_range_lpf[band][ADMV8818_BAND_CORNER_LOW]; + freq_step = div_u64(freq_step, ADMV8818_NUM_STATES - 1); + + for (state = ADMV8818_STATE_MAX; state >= ADMV8818_STATE_MIN; --state) { + + freq_corner = freq_range_lpf[band][ADMV8818_BAND_CORNER_LOW] + + state * freq_step; - for (j = 0; j <= 15; j++) { - if (freq < (freq_range_lpf[i][0] + (freq_step * j))) { - lpf_step = j; - break; - } + /* + * At this point all other states in range will + * place the corner frequency below the target + * LPF corner should >= the target. + */ + if (freq > freq_corner) + break; + + freq_error = freq_corner - freq; + if (freq_error < min_freq_error) { + min_freq_error = freq_error; + lpf_state = state; + lpf_band = band; } - break; } } @@ -225,7 +276,7 @@ static int __admv8818_lpf_select(struct admv8818_state *st, u64 freq) return regmap_update_bits(st->regmap, ADMV8818_REG_WR0_FILTER, ADMV8818_LPF_WR0_MSK, - FIELD_PREP(ADMV8818_LPF_WR0_MSK, lpf_step)); + FIELD_PREP(ADMV8818_LPF_WR0_MSK, lpf_state)); } static int admv8818_lpf_select(struct admv8818_state *st, u64 freq) @@ -242,16 +293,28 @@ static int admv8818_lpf_select(struct admv8818_state *st, u64 freq) static int admv8818_rfin_band_select(struct admv8818_state *st) { int ret; + u64 hpf_corner_target, lpf_corner_target; st->cf_hz = clk_get_rate(st->clkin); + /* Check for underflow */ + if (st->cf_hz > st->hpf_margin_hz) + hpf_corner_target = st->cf_hz - st->hpf_margin_hz; + else + hpf_corner_target = 0; + + /* Check for overflow */ + lpf_corner_target = st->cf_hz + st->lpf_margin_hz; + if (lpf_corner_target < st->cf_hz) + lpf_corner_target = U64_MAX; + mutex_lock(&st->lock); - ret = __admv8818_hpf_select(st, st->cf_hz); + ret = __admv8818_hpf_select(st, hpf_corner_target); if (ret) goto exit; - ret = __admv8818_lpf_select(st, st->cf_hz); + ret = __admv8818_lpf_select(st, lpf_corner_target); exit: mutex_unlock(&st->lock); return ret; @@ -278,8 +341,11 @@ static int __admv8818_read_hpf_freq(struct admv8818_state *st, u64 *hpf_freq) hpf_state = FIELD_GET(ADMV8818_HPF_WR0_MSK, data); - *hpf_freq = div_u64(freq_range_hpf[hpf_band - 1][1] - freq_range_hpf[hpf_band - 1][0], 15); - *hpf_freq = freq_range_hpf[hpf_band - 1][0] + (*hpf_freq * hpf_state); + *hpf_freq = freq_range_hpf[hpf_band][ADMV8818_BAND_CORNER_HIGH] - + freq_range_hpf[hpf_band][ADMV8818_BAND_CORNER_LOW]; + *hpf_freq = div_u64(*hpf_freq, ADMV8818_NUM_STATES - 1); + *hpf_freq = freq_range_hpf[hpf_band][ADMV8818_BAND_CORNER_LOW] + + (*hpf_freq * hpf_state); return ret; } @@ -316,8 +382,11 @@ static int __admv8818_read_lpf_freq(struct admv8818_state *st, u64 *lpf_freq) lpf_state = FIELD_GET(ADMV8818_LPF_WR0_MSK, data); - *lpf_freq = div_u64(freq_range_lpf[lpf_band - 1][1] - freq_range_lpf[lpf_band - 1][0], 15); - *lpf_freq = freq_range_lpf[lpf_band - 1][0] + (*lpf_freq * lpf_state); + *lpf_freq = freq_range_lpf[lpf_band][ADMV8818_BAND_CORNER_HIGH] - + freq_range_lpf[lpf_band][ADMV8818_BAND_CORNER_LOW]; + *lpf_freq = div_u64(*lpf_freq, ADMV8818_NUM_STATES - 1); + *lpf_freq = freq_range_lpf[lpf_band][ADMV8818_BAND_CORNER_LOW] + + (*lpf_freq * lpf_state); return ret; } @@ -333,6 +402,19 @@ static int admv8818_read_lpf_freq(struct admv8818_state *st, u64 *lpf_freq) return ret; } +static int admv8818_write_raw_get_fmt(struct iio_dev *indio_dev, + struct iio_chan_spec const *chan, + long mask) +{ + switch (mask) { + case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY: + case IIO_CHAN_INFO_HIGH_PASS_FILTER_3DB_FREQUENCY: + return IIO_VAL_INT_64; + default: + return -EINVAL; + } +} + static int admv8818_write_raw(struct iio_dev *indio_dev, struct iio_chan_spec const *chan, int val, int val2, long info) @@ -341,6 +423,9 @@ static int admv8818_write_raw(struct iio_dev *indio_dev, u64 freq = ((u64)val2 << 32 | (u32)val); + if ((s64)freq < 0) + return -EINVAL; + switch (info) { case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY: return admv8818_lpf_select(st, freq); @@ -502,6 +587,7 @@ static int admv8818_set_mode(struct iio_dev *indio_dev, static const struct iio_info admv8818_info = { .write_raw = admv8818_write_raw, + .write_raw_get_fmt = admv8818_write_raw_get_fmt, .read_raw = admv8818_read_raw, .debugfs_reg_access = &admv8818_reg_access, }; @@ -641,6 +727,32 @@ static int admv8818_clk_setup(struct admv8818_state *st) return devm_add_action_or_reset(&spi->dev, admv8818_clk_notifier_unreg, st); } +static int admv8818_read_properties(struct admv8818_state *st) +{ + struct spi_device *spi = st->spi; + u32 mhz; + int ret; + + ret = device_property_read_u32(&spi->dev, "adi,lpf-margin-mhz", &mhz); + if (ret == 0) + st->lpf_margin_hz = (u64)mhz * HZ_PER_MHZ; + else if (ret == -EINVAL) + st->lpf_margin_hz = 0; + else + return ret; + + + ret = device_property_read_u32(&spi->dev, "adi,hpf-margin-mhz", &mhz); + if (ret == 0) + st->hpf_margin_hz = (u64)mhz * HZ_PER_MHZ; + else if (ret == -EINVAL) + st->hpf_margin_hz = 0; + else if (ret < 0) + return ret; + + return 0; +} + static int admv8818_probe(struct spi_device *spi) { struct iio_dev *indio_dev; @@ -672,6 +784,10 @@ static int admv8818_probe(struct spi_device *spi) mutex_init(&st->lock); + ret = admv8818_read_properties(st); + if (ret) + return ret; + ret = admv8818_init(st); if (ret) return ret; diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600_temp.c b/drivers/iio/imu/inv_icm42600/inv_icm42600_temp.c index 213cce1c31110..91f0f381082bd 100644 --- a/drivers/iio/imu/inv_icm42600/inv_icm42600_temp.c +++ b/drivers/iio/imu/inv_icm42600/inv_icm42600_temp.c @@ -67,16 +67,18 @@ int inv_icm42600_temp_read_raw(struct iio_dev *indio_dev, return IIO_VAL_INT; /* * T°C = (temp / 132.48) + 25 - * Tm°C = 1000 * ((temp * 100 / 13248) + 25) + * Tm°C = 1000 * ((temp / 132.48) + 25) + * Tm°C = 7.548309 * temp + 25000 + * Tm°C = (temp + 3312) * 7.548309 * scale: 100000 / 13248 ~= 7.548309 - * offset: 25000 + * offset: 3312 */ case IIO_CHAN_INFO_SCALE: *val = 7; *val2 = 548309; return IIO_VAL_INT_PLUS_MICRO; case IIO_CHAN_INFO_OFFSET: - *val = 25000; + *val = 3312; return IIO_VAL_INT; default: return -EINVAL; diff --git a/drivers/iio/pressure/zpa2326.c b/drivers/iio/pressure/zpa2326.c index b4c6c7c472569..8fae58db1d639 100644 --- a/drivers/iio/pressure/zpa2326.c +++ b/drivers/iio/pressure/zpa2326.c @@ -582,7 +582,7 @@ static int zpa2326_fill_sample_buffer(struct iio_dev *indio_dev, struct { u32 pressure; u16 temperature; - u64 timestamp; + aligned_s64 timestamp; } sample; int err; diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c index 07fb8d3c037f0..d45e3909dafe1 100644 --- a/drivers/infiniband/core/cm.c +++ b/drivers/infiniband/core/cm.c @@ -166,7 +166,7 @@ struct cm_port { struct cm_device { struct kref kref; struct list_head list; - spinlock_t mad_agent_lock; + rwlock_t mad_agent_lock; struct ib_device *ib_device; u8 ack_delay; int going_down; @@ -284,7 +284,7 @@ static struct ib_mad_send_buf *cm_alloc_msg(struct cm_id_private *cm_id_priv) if (!cm_id_priv->av.port) return ERR_PTR(-EINVAL); - spin_lock(&cm_id_priv->av.port->cm_dev->mad_agent_lock); + read_lock(&cm_id_priv->av.port->cm_dev->mad_agent_lock); mad_agent = cm_id_priv->av.port->mad_agent; if (!mad_agent) { m = ERR_PTR(-EINVAL); @@ -315,7 +315,7 @@ static struct ib_mad_send_buf *cm_alloc_msg(struct cm_id_private *cm_id_priv) m->context[0] = cm_id_priv; out: - spin_unlock(&cm_id_priv->av.port->cm_dev->mad_agent_lock); + read_unlock(&cm_id_priv->av.port->cm_dev->mad_agent_lock); return m; } @@ -1294,10 +1294,10 @@ static __be64 cm_form_tid(struct cm_id_private *cm_id_priv) if (!cm_id_priv->av.port) return cpu_to_be64(low_tid); - spin_lock(&cm_id_priv->av.port->cm_dev->mad_agent_lock); + read_lock(&cm_id_priv->av.port->cm_dev->mad_agent_lock); if (cm_id_priv->av.port->mad_agent) hi_tid = ((u64)cm_id_priv->av.port->mad_agent->hi_tid) << 32; - spin_unlock(&cm_id_priv->av.port->cm_dev->mad_agent_lock); + read_unlock(&cm_id_priv->av.port->cm_dev->mad_agent_lock); return cpu_to_be64(hi_tid | low_tid); } @@ -4374,7 +4374,7 @@ static int cm_add_one(struct ib_device *ib_device) return -ENOMEM; kref_init(&cm_dev->kref); - spin_lock_init(&cm_dev->mad_agent_lock); + rwlock_init(&cm_dev->mad_agent_lock); cm_dev->ib_device = ib_device; cm_dev->ack_delay = ib_device->attrs.local_ca_ack_delay; cm_dev->going_down = 0; @@ -4490,9 +4490,9 @@ static void cm_remove_one(struct ib_device *ib_device, void *client_data) * The above ensures no call paths from the work are running, * the remaining paths all take the mad_agent_lock. */ - spin_lock(&cm_dev->mad_agent_lock); + write_lock(&cm_dev->mad_agent_lock); port->mad_agent = NULL; - spin_unlock(&cm_dev->mad_agent_lock); + write_unlock(&cm_dev->mad_agent_lock); ib_unregister_mad_agent(mad_agent); ib_port_unregister_client_groups(ib_device, i, cm_counter_groups); diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index 176d0b3e44887..81bc24a346d37 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c @@ -5231,7 +5231,8 @@ static int cma_netevent_callback(struct notifier_block *self, neigh->ha, ETH_ALEN)) continue; cma_id_get(current_id); - queue_work(cma_wq, ¤t_id->id.net_work); + if (!queue_work(cma_wq, ¤t_id->id.net_work)) + cma_id_put(current_id); } out: spin_unlock_irqrestore(&id_table_lock, flags); diff --git a/drivers/infiniband/core/iwcm.c b/drivers/infiniband/core/iwcm.c index 7e3a55349e107..96a678250e553 100644 --- a/drivers/infiniband/core/iwcm.c +++ b/drivers/infiniband/core/iwcm.c @@ -366,12 +366,9 @@ EXPORT_SYMBOL(iw_cm_disconnect); /* * CM_ID <-- DESTROYING * - * Clean up all resources associated with the connection and release - * the initial reference taken by iw_create_cm_id. - * - * Returns true if and only if the last cm_id_priv reference has been dropped. + * Clean up all resources associated with the connection. */ -static bool destroy_cm_id(struct iw_cm_id *cm_id) +static void destroy_cm_id(struct iw_cm_id *cm_id) { struct iwcm_id_private *cm_id_priv; struct ib_qp *qp; @@ -440,20 +437,22 @@ static bool destroy_cm_id(struct iw_cm_id *cm_id) iwpm_remove_mapinfo(&cm_id->local_addr, &cm_id->m_local_addr); iwpm_remove_mapping(&cm_id->local_addr, RDMA_NL_IWCM); } - - return iwcm_deref_id(cm_id_priv); } /* - * This function is only called by the application thread and cannot - * be called by the event thread. The function will wait for all - * references to be released on the cm_id and then kfree the cm_id - * object. + * Destroy cm_id. If the cm_id still has other references, wait for all + * references to be released on the cm_id and then release the initial + * reference taken by iw_create_cm_id. */ void iw_destroy_cm_id(struct iw_cm_id *cm_id) { - if (!destroy_cm_id(cm_id)) + struct iwcm_id_private *cm_id_priv; + + cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); + destroy_cm_id(cm_id); + if (refcount_read(&cm_id_priv->refcount) > 1) flush_workqueue(iwcm_wq); + iwcm_deref_id(cm_id_priv); } EXPORT_SYMBOL(iw_destroy_cm_id); @@ -1033,8 +1032,10 @@ static void cm_work_handler(struct work_struct *_work) if (!test_bit(IWCM_F_DROP_EVENTS, &cm_id_priv->flags)) { ret = process_event(cm_id_priv, &levent); - if (ret) - WARN_ON_ONCE(destroy_cm_id(&cm_id_priv->id)); + if (ret) { + destroy_cm_id(&cm_id_priv->id); + WARN_ON_ONCE(iwcm_deref_id(cm_id_priv)); + } } else pr_debug("dropping event %d\n", levent.event); if (iwcm_deref_id(cm_id_priv)) diff --git a/drivers/infiniband/hw/hns/hns_roce_ah.c b/drivers/infiniband/hw/hns/hns_roce_ah.c index 4fc5b9d5fea87..307c35888b300 100644 --- a/drivers/infiniband/hw/hns/hns_roce_ah.c +++ b/drivers/infiniband/hw/hns/hns_roce_ah.c @@ -33,7 +33,6 @@ #include #include #include -#include "hnae3.h" #include "hns_roce_device.h" #include "hns_roce_hw_v2.h" diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c index f5c3e560df58d..81e44b7381229 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c @@ -43,7 +43,6 @@ #include #include -#include "hnae3.h" #include "hns_roce_common.h" #include "hns_roce_device.h" #include "hns_roce_cmd.h" @@ -943,7 +942,7 @@ static void fill_wqe_idx(struct hns_roce_srq *srq, unsigned int wqe_idx) static void update_srq_db(struct hns_roce_srq *srq) { struct hns_roce_dev *hr_dev = to_hr_dev(srq->ibsrq.device); - struct hns_roce_v2_db db; + struct hns_roce_v2_db db = {}; hr_reg_write(&db, DB_TAG, srq->srqn); hr_reg_write(&db, DB_CMD, HNS_ROCE_V2_SRQ_DB); diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h index 91a5665465ffb..bc7466830eaf9 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h @@ -34,6 +34,7 @@ #define _HNS_ROCE_HW_V2_H #include +#include "hnae3.h" #define HNS_ROCE_V2_MAX_RC_INL_INN_SZ 32 #define HNS_ROCE_V2_MTT_ENTRY_SZ 64 diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c index 8d0b63d4b50a6..e7a497cc125cc 100644 --- a/drivers/infiniband/hw/hns/hns_roce_main.c +++ b/drivers/infiniband/hw/hns/hns_roce_main.c @@ -37,7 +37,6 @@ #include #include #include -#include "hnae3.h" #include "hns_roce_common.h" #include "hns_roce_device.h" #include "hns_roce_hem.h" diff --git a/drivers/infiniband/hw/hns/hns_roce_restrack.c b/drivers/infiniband/hw/hns/hns_roce_restrack.c index 356d988169497..f637b73b946e4 100644 --- a/drivers/infiniband/hw/hns/hns_roce_restrack.c +++ b/drivers/infiniband/hw/hns/hns_roce_restrack.c @@ -4,7 +4,6 @@ #include #include #include -#include "hnae3.h" #include "hns_roce_common.h" #include "hns_roce_device.h" #include "hns_roce_hw_v2.h" diff --git a/drivers/infiniband/hw/mlx5/counters.c b/drivers/infiniband/hw/mlx5/counters.c index 81cfa74147a18..ad6c195d077bb 100644 --- a/drivers/infiniband/hw/mlx5/counters.c +++ b/drivers/infiniband/hw/mlx5/counters.c @@ -391,7 +391,7 @@ static int do_get_hw_stats(struct ib_device *ibdev, return ret; /* We don't expose device counters over Vports */ - if (is_mdev_switchdev_mode(dev->mdev) && port_num != 0) + if (is_mdev_switchdev_mode(dev->mdev) && dev->is_rep && port_num != 0) goto done; if (MLX5_CAP_PCAM_FEATURE(dev->mdev, rx_icrc_encapsulated_counter)) { @@ -411,7 +411,7 @@ static int do_get_hw_stats(struct ib_device *ibdev, */ goto done; } - ret = mlx5_lag_query_cong_counters(dev->mdev, + ret = mlx5_lag_query_cong_counters(mdev, stats->value + cnts->num_q_counters, cnts->num_cong_counters, diff --git a/drivers/infiniband/hw/mlx5/devx.c b/drivers/infiniband/hw/mlx5/devx.c index 69999d8d24f37..f49f78b69ab9c 100644 --- a/drivers/infiniband/hw/mlx5/devx.c +++ b/drivers/infiniband/hw/mlx5/devx.c @@ -1914,6 +1914,7 @@ subscribe_event_xa_alloc(struct mlx5_devx_event_table *devx_event_table, /* Level1 is valid for future use, no need to free */ return -ENOMEM; + INIT_LIST_HEAD(&obj_event->obj_sub_list); err = xa_insert(&event->object_ids, key_level2, obj_event, @@ -1922,7 +1923,6 @@ subscribe_event_xa_alloc(struct mlx5_devx_event_table *devx_event_table, kfree(obj_event); return err; } - INIT_LIST_HEAD(&obj_event->obj_sub_list); } return 0; diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index 8c47cb4edd0a0..435c456a4fd5b 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -1766,6 +1766,33 @@ static void deallocate_uars(struct mlx5_ib_dev *dev, context->devx_uid); } +static int mlx5_ib_enable_lb_mp(struct mlx5_core_dev *master, + struct mlx5_core_dev *slave) +{ + int err; + + err = mlx5_nic_vport_update_local_lb(master, true); + if (err) + return err; + + err = mlx5_nic_vport_update_local_lb(slave, true); + if (err) + goto out; + + return 0; + +out: + mlx5_nic_vport_update_local_lb(master, false); + return err; +} + +static void mlx5_ib_disable_lb_mp(struct mlx5_core_dev *master, + struct mlx5_core_dev *slave) +{ + mlx5_nic_vport_update_local_lb(slave, false); + mlx5_nic_vport_update_local_lb(master, false); +} + int mlx5_ib_enable_lb(struct mlx5_ib_dev *dev, bool td, bool qp) { int err = 0; @@ -3448,6 +3475,8 @@ static void mlx5_ib_unbind_slave_port(struct mlx5_ib_dev *ibdev, lockdep_assert_held(&mlx5_ib_multiport_mutex); + mlx5_ib_disable_lb_mp(ibdev->mdev, mpi->mdev); + mlx5_core_mp_event_replay(ibdev->mdev, MLX5_DRIVER_EVENT_AFFILIATION_REMOVED, NULL); @@ -3543,6 +3572,10 @@ static bool mlx5_ib_bind_slave_port(struct mlx5_ib_dev *ibdev, MLX5_DRIVER_EVENT_AFFILIATION_DONE, &key); + err = mlx5_ib_enable_lb_mp(ibdev->mdev, mpi->mdev); + if (err) + goto unbind; + return true; unbind: diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c index 068eac3bdb50b..726b81b6330c6 100644 --- a/drivers/infiniband/hw/mlx5/mr.c +++ b/drivers/infiniband/hw/mlx5/mr.c @@ -1968,7 +1968,6 @@ static int cache_ent_find_and_store(struct mlx5_ib_dev *dev, if (mr->mmkey.cache_ent) { spin_lock_irq(&mr->mmkey.cache_ent->mkeys_queue.lock); - mr->mmkey.cache_ent->in_use--; goto end; } @@ -2029,32 +2028,62 @@ void mlx5_ib_revoke_data_direct_mrs(struct mlx5_ib_dev *dev) } } -static int mlx5_revoke_mr(struct mlx5_ib_mr *mr) +static int mlx5_umr_revoke_mr_with_lock(struct mlx5_ib_mr *mr) { - struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.device); - struct mlx5_cache_ent *ent = mr->mmkey.cache_ent; - bool is_odp = is_odp_mr(mr); bool is_odp_dma_buf = is_dmabuf_mr(mr) && - !to_ib_umem_dmabuf(mr->umem)->pinned; - int ret = 0; + !to_ib_umem_dmabuf(mr->umem)->pinned; + bool is_odp = is_odp_mr(mr); + int ret; if (is_odp) mutex_lock(&to_ib_umem_odp(mr->umem)->umem_mutex); if (is_odp_dma_buf) - dma_resv_lock(to_ib_umem_dmabuf(mr->umem)->attach->dmabuf->resv, NULL); + dma_resv_lock(to_ib_umem_dmabuf(mr->umem)->attach->dmabuf->resv, + NULL); + + ret = mlx5r_umr_revoke_mr(mr); - if (mr->mmkey.cacheable && !mlx5r_umr_revoke_mr(mr) && !cache_ent_find_and_store(dev, mr)) { + if (is_odp) { + if (!ret) + to_ib_umem_odp(mr->umem)->private = NULL; + mutex_unlock(&to_ib_umem_odp(mr->umem)->umem_mutex); + } + + if (is_odp_dma_buf) { + if (!ret) + to_ib_umem_dmabuf(mr->umem)->private = NULL; + dma_resv_unlock( + to_ib_umem_dmabuf(mr->umem)->attach->dmabuf->resv); + } + + return ret; +} + +static int mlx5r_handle_mkey_cleanup(struct mlx5_ib_mr *mr) +{ + bool is_odp_dma_buf = is_dmabuf_mr(mr) && + !to_ib_umem_dmabuf(mr->umem)->pinned; + struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.device); + struct mlx5_cache_ent *ent = mr->mmkey.cache_ent; + bool is_odp = is_odp_mr(mr); + bool from_cache = !!ent; + int ret; + + if (mr->mmkey.cacheable && !mlx5_umr_revoke_mr_with_lock(mr) && + !cache_ent_find_and_store(dev, mr)) { ent = mr->mmkey.cache_ent; /* upon storing to a clean temp entry - schedule its cleanup */ spin_lock_irq(&ent->mkeys_queue.lock); + if (from_cache) + ent->in_use--; if (ent->is_tmp && !ent->tmp_cleanup_scheduled) { mod_delayed_work(ent->dev->cache.wq, &ent->dwork, msecs_to_jiffies(30 * 1000)); ent->tmp_cleanup_scheduled = true; } spin_unlock_irq(&ent->mkeys_queue.lock); - goto out; + return 0; } if (ent) { @@ -2063,8 +2092,14 @@ static int mlx5_revoke_mr(struct mlx5_ib_mr *mr) mr->mmkey.cache_ent = NULL; spin_unlock_irq(&ent->mkeys_queue.lock); } + + if (is_odp) + mutex_lock(&to_ib_umem_odp(mr->umem)->umem_mutex); + + if (is_odp_dma_buf) + dma_resv_lock(to_ib_umem_dmabuf(mr->umem)->attach->dmabuf->resv, + NULL); ret = destroy_mkey(dev, mr); -out: if (is_odp) { if (!ret) to_ib_umem_odp(mr->umem)->private = NULL; @@ -2074,9 +2109,9 @@ static int mlx5_revoke_mr(struct mlx5_ib_mr *mr) if (is_odp_dma_buf) { if (!ret) to_ib_umem_dmabuf(mr->umem)->private = NULL; - dma_resv_unlock(to_ib_umem_dmabuf(mr->umem)->attach->dmabuf->resv); + dma_resv_unlock( + to_ib_umem_dmabuf(mr->umem)->attach->dmabuf->resv); } - return ret; } @@ -2125,7 +2160,7 @@ static int __mlx5_ib_dereg_mr(struct ib_mr *ibmr) } /* Stop DMA */ - rc = mlx5_revoke_mr(mr); + rc = mlx5r_handle_mkey_cleanup(mr); if (rc) return rc; diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c index e158d5b1ab17b..98a76c9db7aba 100644 --- a/drivers/infiniband/hw/mlx5/odp.c +++ b/drivers/infiniband/hw/mlx5/odp.c @@ -247,8 +247,8 @@ static void destroy_unused_implicit_child_mr(struct mlx5_ib_mr *mr) } if (MLX5_CAP_ODP(mr_to_mdev(mr)->mdev, mem_page_fault)) - __xa_erase(&mr_to_mdev(mr)->odp_mkeys, - mlx5_base_mkey(mr->mmkey.key)); + xa_erase(&mr_to_mdev(mr)->odp_mkeys, + mlx5_base_mkey(mr->mmkey.key)); xa_unlock(&imr->implicit_children); /* Freeing a MR is a sleeping operation, so bounce to a work queue */ @@ -521,8 +521,8 @@ static struct mlx5_ib_mr *implicit_get_child_mr(struct mlx5_ib_mr *imr, } if (MLX5_CAP_ODP(dev->mdev, mem_page_fault)) { - ret = __xa_store(&dev->odp_mkeys, mlx5_base_mkey(mr->mmkey.key), - &mr->mmkey, GFP_KERNEL); + ret = xa_store(&dev->odp_mkeys, mlx5_base_mkey(mr->mmkey.key), + &mr->mmkey, GFP_KERNEL); if (xa_is_err(ret)) { ret = ERR_PTR(xa_err(ret)); __xa_erase(&imr->implicit_children, idx); diff --git a/drivers/infiniband/hw/mlx5/qpc.c b/drivers/infiniband/hw/mlx5/qpc.c index d3dcc272200af..146d03ae40bd9 100644 --- a/drivers/infiniband/hw/mlx5/qpc.c +++ b/drivers/infiniband/hw/mlx5/qpc.c @@ -21,8 +21,10 @@ mlx5_get_rsc(struct mlx5_qp_table *table, u32 rsn) spin_lock_irqsave(&table->lock, flags); common = radix_tree_lookup(&table->tree, rsn); - if (common) + if (common && !common->invalid) refcount_inc(&common->refcount); + else + common = NULL; spin_unlock_irqrestore(&table->lock, flags); @@ -178,6 +180,18 @@ static int create_resource_common(struct mlx5_ib_dev *dev, return 0; } +static void modify_resource_common_state(struct mlx5_ib_dev *dev, + struct mlx5_core_qp *qp, + bool invalid) +{ + struct mlx5_qp_table *table = &dev->qp_table; + unsigned long flags; + + spin_lock_irqsave(&table->lock, flags); + qp->common.invalid = invalid; + spin_unlock_irqrestore(&table->lock, flags); +} + static void destroy_resource_common(struct mlx5_ib_dev *dev, struct mlx5_core_qp *qp) { @@ -609,8 +623,20 @@ int mlx5_core_create_rq_tracked(struct mlx5_ib_dev *dev, u32 *in, int inlen, int mlx5_core_destroy_rq_tracked(struct mlx5_ib_dev *dev, struct mlx5_core_qp *rq) { + int ret; + + /* The rq destruction can be called again in case it fails, hence we + * mark the common resource as invalid and only once FW destruction + * is completed successfully we actually destroy the resources. + */ + modify_resource_common_state(dev, rq, true); + ret = destroy_rq_tracked(dev, rq->qpn, rq->uid); + if (ret) { + modify_resource_common_state(dev, rq, false); + return ret; + } destroy_resource_common(dev, rq); - return destroy_rq_tracked(dev, rq->qpn, rq->uid); + return 0; } static void destroy_sq_tracked(struct mlx5_ib_dev *dev, u32 sqn, u16 uid) diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c index 91d329e903083..8b805b16136e5 100644 --- a/drivers/infiniband/sw/rxe/rxe_qp.c +++ b/drivers/infiniband/sw/rxe/rxe_qp.c @@ -811,7 +811,12 @@ static void rxe_qp_do_cleanup(struct work_struct *work) spin_unlock_irqrestore(&qp->state_lock, flags); qp->qp_timeout_jiffies = 0; - if (qp_type(qp) == IB_QPT_RC) { + /* In the function timer_setup, .function is initialized. If .function + * is NULL, it indicates the function timer_setup is not called, the + * timer is not initialized. Or else, the timer is initialized. + */ + if (qp_type(qp) == IB_QPT_RC && qp->retrans_timer.function && + qp->rnr_nak_timer.function) { del_timer_sync(&qp->retrans_timer); del_timer_sync(&qp->rnr_nak_timer); } diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c index da5b14602a761..6d679e235af6c 100644 --- a/drivers/input/joystick/xpad.c +++ b/drivers/input/joystick/xpad.c @@ -174,6 +174,7 @@ static const struct xpad_device { { 0x05fd, 0x107a, "InterAct 'PowerPad Pro' X-Box pad (Germany)", 0, XTYPE_XBOX }, { 0x05fe, 0x3030, "Chic Controller", 0, XTYPE_XBOX }, { 0x05fe, 0x3031, "Chic Controller", 0, XTYPE_XBOX }, + { 0x0502, 0x1305, "Acer NGR200", 0, XTYPE_XBOX }, { 0x062a, 0x0020, "Logic3 Xbox GamePad", 0, XTYPE_XBOX }, { 0x062a, 0x0033, "Competition Pro Steering Wheel", 0, XTYPE_XBOX }, { 0x06a3, 0x0200, "Saitek Racing Wheel", 0, XTYPE_XBOX }, @@ -515,6 +516,7 @@ static const struct usb_device_id xpad_table[] = { XPAD_XBOX360_VENDOR(0x045e), /* Microsoft Xbox 360 controllers */ XPAD_XBOXONE_VENDOR(0x045e), /* Microsoft Xbox One controllers */ XPAD_XBOX360_VENDOR(0x046d), /* Logitech Xbox 360-style controllers */ + XPAD_XBOX360_VENDOR(0x0502), /* Acer Inc. Xbox 360 style controllers */ XPAD_XBOX360_VENDOR(0x056e), /* Elecom JC-U3613M */ XPAD_XBOX360_VENDOR(0x06a3), /* Saitek P3600 */ XPAD_XBOX360_VENDOR(0x0738), /* Mad Catz Xbox 360 controllers */ diff --git a/drivers/input/keyboard/gpio_keys.c b/drivers/input/keyboard/gpio_keys.c index 380fe8dab3b06..9514f577995fa 100644 --- a/drivers/input/keyboard/gpio_keys.c +++ b/drivers/input/keyboard/gpio_keys.c @@ -449,6 +449,8 @@ static enum hrtimer_restart gpio_keys_irq_timer(struct hrtimer *t) release_timer); struct input_dev *input = bdata->input; + guard(spinlock_irqsave)(&bdata->lock); + if (bdata->key_pressed) { input_report_key(input, *bdata->code, 0); input_sync(input); diff --git a/drivers/input/misc/cs40l50-vibra.c b/drivers/input/misc/cs40l50-vibra.c index dce3b0ec8cf36..330f091236318 100644 --- a/drivers/input/misc/cs40l50-vibra.c +++ b/drivers/input/misc/cs40l50-vibra.c @@ -238,6 +238,8 @@ static int cs40l50_upload_owt(struct cs40l50_work *work_data) header.data_words = len / sizeof(u32); new_owt_effect_data = kmalloc(sizeof(header) + len, GFP_KERNEL); + if (!new_owt_effect_data) + return -ENOMEM; memcpy(new_owt_effect_data, &header, sizeof(header)); memcpy(new_owt_effect_data + sizeof(header), work_data->custom_data, len); diff --git a/drivers/input/misc/ims-pcu.c b/drivers/input/misc/ims-pcu.c index 4215f9b9c2b07..fc22cbb854a38 100644 --- a/drivers/input/misc/ims-pcu.c +++ b/drivers/input/misc/ims-pcu.c @@ -844,6 +844,12 @@ static int ims_pcu_flash_firmware(struct ims_pcu *pcu, addr = be32_to_cpu(rec->addr) / 2; len = be16_to_cpu(rec->len); + if (len > sizeof(pcu->cmd_buf) - 1 - sizeof(*fragment)) { + dev_err(pcu->dev, + "Invalid record length in firmware: %d\n", len); + return -EINVAL; + } + fragment = (void *)&pcu->cmd_buf[1]; put_unaligned_le32(addr, &fragment->addr); fragment->len = len; diff --git a/drivers/input/misc/iqs7222.c b/drivers/input/misc/iqs7222.c index 01c4009fd53e7..846aac9a5c9df 100644 --- a/drivers/input/misc/iqs7222.c +++ b/drivers/input/misc/iqs7222.c @@ -301,6 +301,7 @@ struct iqs7222_dev_desc { int allow_offset; int event_offset; int comms_offset; + int ext_chan; bool legacy_gesture; struct iqs7222_reg_grp_desc reg_grps[IQS7222_NUM_REG_GRPS]; }; @@ -315,6 +316,7 @@ static const struct iqs7222_dev_desc iqs7222_devs[] = { .allow_offset = 9, .event_offset = 10, .comms_offset = 12, + .ext_chan = 10, .reg_grps = { [IQS7222_REG_GRP_STAT] = { .base = IQS7222_SYS_STATUS, @@ -373,6 +375,7 @@ static const struct iqs7222_dev_desc iqs7222_devs[] = { .allow_offset = 9, .event_offset = 10, .comms_offset = 12, + .ext_chan = 10, .legacy_gesture = true, .reg_grps = { [IQS7222_REG_GRP_STAT] = { @@ -2244,7 +2247,7 @@ static int iqs7222_parse_chan(struct iqs7222_private *iqs7222, const struct iqs7222_dev_desc *dev_desc = iqs7222->dev_desc; struct i2c_client *client = iqs7222->client; int num_chan = dev_desc->reg_grps[IQS7222_REG_GRP_CHAN].num_row; - int ext_chan = rounddown(num_chan, 10); + int ext_chan = dev_desc->ext_chan ? : num_chan; int error, i; u16 *chan_setup = iqs7222->chan_setup[chan_index]; u16 *sys_setup = iqs7222->sys_setup; @@ -2448,7 +2451,7 @@ static int iqs7222_parse_sldr(struct iqs7222_private *iqs7222, const struct iqs7222_dev_desc *dev_desc = iqs7222->dev_desc; struct i2c_client *client = iqs7222->client; int num_chan = dev_desc->reg_grps[IQS7222_REG_GRP_CHAN].num_row; - int ext_chan = rounddown(num_chan, 10); + int ext_chan = dev_desc->ext_chan ? : num_chan; int count, error, reg_offset, i; u16 *event_mask = &iqs7222->sys_setup[dev_desc->event_offset]; u16 *sldr_setup = iqs7222->sldr_setup[sldr_index]; diff --git a/drivers/input/misc/sparcspkr.c b/drivers/input/misc/sparcspkr.c index 20020cbc0752b..a94699f2bbc67 100644 --- a/drivers/input/misc/sparcspkr.c +++ b/drivers/input/misc/sparcspkr.c @@ -75,9 +75,14 @@ static int bbc_spkr_event(struct input_dev *dev, unsigned int type, unsigned int return -1; switch (code) { - case SND_BELL: if (value) value = 1000; - case SND_TONE: break; - default: return -1; + case SND_BELL: + if (value) + value = 1000; + break; + case SND_TONE: + break; + default: + return -1; } if (value > 20 && value < 32767) @@ -113,9 +118,14 @@ static int grover_spkr_event(struct input_dev *dev, unsigned int type, unsigned return -1; switch (code) { - case SND_BELL: if (value) value = 1000; - case SND_TONE: break; - default: return -1; + case SND_BELL: + if (value) + value = 1000; + break; + case SND_TONE: + break; + default: + return -1; } if (value > 20 && value < 32767) diff --git a/drivers/input/rmi4/rmi_f34.c b/drivers/input/rmi4/rmi_f34.c index e2468bc04a5cb..c2516c7549582 100644 --- a/drivers/input/rmi4/rmi_f34.c +++ b/drivers/input/rmi4/rmi_f34.c @@ -4,6 +4,7 @@ * Copyright (C) 2016 Zodiac Inflight Innovations */ +#include "linux/device.h" #include #include #include @@ -298,39 +299,30 @@ static int rmi_f34_update_firmware(struct f34_data *f34, return ret; } -static int rmi_f34_status(struct rmi_function *fn) -{ - struct f34_data *f34 = dev_get_drvdata(&fn->dev); - - /* - * The status is the percentage complete, or once complete, - * zero for success or a negative return code. - */ - return f34->update_status; -} - static ssize_t rmi_driver_bootloader_id_show(struct device *dev, struct device_attribute *dattr, char *buf) { struct rmi_driver_data *data = dev_get_drvdata(dev); - struct rmi_function *fn = data->f34_container; + struct rmi_function *fn; struct f34_data *f34; - if (fn) { - f34 = dev_get_drvdata(&fn->dev); - - if (f34->bl_version == 5) - return sysfs_emit(buf, "%c%c\n", - f34->bootloader_id[0], - f34->bootloader_id[1]); - else - return sysfs_emit(buf, "V%d.%d\n", - f34->bootloader_id[1], - f34->bootloader_id[0]); - } + fn = data->f34_container; + if (!fn) + return -ENODEV; - return 0; + f34 = dev_get_drvdata(&fn->dev); + if (!f34) + return -ENODEV; + + if (f34->bl_version == 5) + return sysfs_emit(buf, "%c%c\n", + f34->bootloader_id[0], + f34->bootloader_id[1]); + else + return sysfs_emit(buf, "V%d.%d\n", + f34->bootloader_id[1], + f34->bootloader_id[0]); } static DEVICE_ATTR(bootloader_id, 0444, rmi_driver_bootloader_id_show, NULL); @@ -343,13 +335,16 @@ static ssize_t rmi_driver_configuration_id_show(struct device *dev, struct rmi_function *fn = data->f34_container; struct f34_data *f34; - if (fn) { - f34 = dev_get_drvdata(&fn->dev); + fn = data->f34_container; + if (!fn) + return -ENODEV; - return sysfs_emit(buf, "%s\n", f34->configuration_id); - } + f34 = dev_get_drvdata(&fn->dev); + if (!f34) + return -ENODEV; - return 0; + + return sysfs_emit(buf, "%s\n", f34->configuration_id); } static DEVICE_ATTR(configuration_id, 0444, @@ -365,10 +360,14 @@ static int rmi_firmware_update(struct rmi_driver_data *data, if (!data->f34_container) { dev_warn(dev, "%s: No F34 present!\n", __func__); - return -EINVAL; + return -ENODEV; } f34 = dev_get_drvdata(&data->f34_container->dev); + if (!f34) { + dev_warn(dev, "%s: No valid F34 present!\n", __func__); + return -ENODEV; + } if (f34->bl_version >= 7) { if (data->pdt_props & HAS_BSR) { @@ -494,10 +493,18 @@ static ssize_t rmi_driver_update_fw_status_show(struct device *dev, char *buf) { struct rmi_driver_data *data = dev_get_drvdata(dev); - int update_status = 0; + struct f34_data *f34; + int update_status = -ENODEV; - if (data->f34_container) - update_status = rmi_f34_status(data->f34_container); + /* + * The status is the percentage complete, or once complete, + * zero for success or a negative return code. + */ + if (data->f34_container) { + f34 = dev_get_drvdata(&data->f34_container->dev); + if (f34) + update_status = f34->update_status; + } return sysfs_emit(buf, "%d\n", update_status); } @@ -517,33 +524,21 @@ static const struct attribute_group rmi_firmware_attr_group = { .attrs = rmi_firmware_attrs, }; -static int rmi_f34_probe(struct rmi_function *fn) +static int rmi_f34v5_probe(struct f34_data *f34) { - struct f34_data *f34; - unsigned char f34_queries[9]; + struct rmi_function *fn = f34->fn; + u8 f34_queries[9]; bool has_config_id; - u8 version = fn->fd.function_version; - int ret; - - f34 = devm_kzalloc(&fn->dev, sizeof(struct f34_data), GFP_KERNEL); - if (!f34) - return -ENOMEM; - - f34->fn = fn; - dev_set_drvdata(&fn->dev, f34); - - /* v5 code only supported version 0, try V7 probe */ - if (version > 0) - return rmi_f34v7_probe(f34); + int error; f34->bl_version = 5; - ret = rmi_read_block(fn->rmi_dev, fn->fd.query_base_addr, - f34_queries, sizeof(f34_queries)); - if (ret) { + error = rmi_read_block(fn->rmi_dev, fn->fd.query_base_addr, + f34_queries, sizeof(f34_queries)); + if (error) { dev_err(&fn->dev, "%s: Failed to query properties\n", __func__); - return ret; + return error; } snprintf(f34->bootloader_id, sizeof(f34->bootloader_id), @@ -569,11 +564,11 @@ static int rmi_f34_probe(struct rmi_function *fn) f34->v5.config_blocks); if (has_config_id) { - ret = rmi_read_block(fn->rmi_dev, fn->fd.control_base_addr, - f34_queries, sizeof(f34_queries)); - if (ret) { + error = rmi_read_block(fn->rmi_dev, fn->fd.control_base_addr, + f34_queries, sizeof(f34_queries)); + if (error) { dev_err(&fn->dev, "Failed to read F34 config ID\n"); - return ret; + return error; } snprintf(f34->configuration_id, sizeof(f34->configuration_id), @@ -582,12 +577,34 @@ static int rmi_f34_probe(struct rmi_function *fn) f34_queries[2], f34_queries[3]); rmi_dbg(RMI_DEBUG_FN, &fn->dev, "Configuration ID: %s\n", - f34->configuration_id); + f34->configuration_id); } return 0; } +static int rmi_f34_probe(struct rmi_function *fn) +{ + struct f34_data *f34; + u8 version = fn->fd.function_version; + int error; + + f34 = devm_kzalloc(&fn->dev, sizeof(struct f34_data), GFP_KERNEL); + if (!f34) + return -ENOMEM; + + f34->fn = fn; + + /* v5 code only supported version 0 */ + error = version == 0 ? rmi_f34v5_probe(f34) : rmi_f34v7_probe(f34); + if (error) + return error; + + dev_set_drvdata(&fn->dev, f34); + + return 0; +} + int rmi_f34_create_sysfs(struct rmi_device *rmi_dev) { return sysfs_create_group(&rmi_dev->dev.kobj, &rmi_firmware_attr_group); diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig index b3aa1f5d53218..1469ad0794f28 100644 --- a/drivers/iommu/Kconfig +++ b/drivers/iommu/Kconfig @@ -199,7 +199,6 @@ source "drivers/iommu/iommufd/Kconfig" config IRQ_REMAP bool "Support for Interrupt Remapping" depends on X86_64 && X86_IO_APIC && PCI_MSI && ACPI - select DMAR_TABLE if INTEL_IOMMU help Supports Interrupt remapping for IO-APIC and MSI devices. To use x2apic mode in the CPU's which support x2APIC enhancements or diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c index f61e48f237324..23e78a034da8f 100644 --- a/drivers/iommu/amd/iommu.c +++ b/drivers/iommu/amd/iommu.c @@ -107,7 +107,9 @@ static inline int get_acpihid_device_id(struct device *dev, struct acpihid_map_entry **entry) { struct acpi_device *adev = ACPI_COMPANION(dev); - struct acpihid_map_entry *p; + struct acpihid_map_entry *p, *p1 = NULL; + int hid_count = 0; + bool fw_bug; if (!adev) return -ENODEV; @@ -115,12 +117,33 @@ static inline int get_acpihid_device_id(struct device *dev, list_for_each_entry(p, &acpihid_map, list) { if (acpi_dev_hid_uid_match(adev, p->hid, p->uid[0] ? p->uid : NULL)) { - if (entry) - *entry = p; - return p->devid; + p1 = p; + fw_bug = false; + hid_count = 1; + break; + } + + /* + * Count HID matches w/o UID, raise FW_BUG but allow exactly one match + */ + if (acpi_dev_hid_match(adev, p->hid)) { + p1 = p; + hid_count++; + fw_bug = true; } } - return -EINVAL; + + if (!p1) + return -EINVAL; + if (fw_bug) + dev_err_once(dev, FW_BUG "No ACPI device matched UID, but %d device%s matched HID.\n", + hid_count, hid_count > 1 ? "s" : ""); + if (hid_count > 1) + return -EINVAL; + if (entry) + *entry = p1; + + return p1->devid; } static inline int get_device_sbdf_id(struct device *dev) @@ -838,6 +861,14 @@ int amd_iommu_register_ga_log_notifier(int (*notifier)(u32)) { iommu_ga_log_notifier = notifier; + /* + * Ensure all in-flight IRQ handlers run to completion before returning + * to the caller, e.g. to ensure module code isn't unloaded while it's + * being executed in the IRQ handler. + */ + if (!notifier) + synchronize_rcu(); + return 0; } EXPORT_SYMBOL(amd_iommu_register_ga_log_notifier); diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c index 157542c07aaaf..56e9f125cda9a 100644 --- a/drivers/iommu/intel/iommu.c +++ b/drivers/iommu/intel/iommu.c @@ -1970,6 +1970,7 @@ static int dmar_domain_attach_device(struct dmar_domain *domain, return ret; info->domain = domain; + info->domain_attached = true; spin_lock_irqsave(&domain->lock, flags); list_add(&info->link, &domain->devices); spin_unlock_irqrestore(&domain->lock, flags); @@ -3381,6 +3382,10 @@ void device_block_translation(struct device *dev) struct intel_iommu *iommu = info->iommu; unsigned long flags; + /* Device in DMA blocking state. Noting to do. */ + if (!info->domain_attached) + return; + if (info->domain) cache_tag_unassign_domain(info->domain, dev, IOMMU_NO_PASID); @@ -3393,6 +3398,9 @@ void device_block_translation(struct device *dev) domain_context_clear(info); } + /* Device now in DMA blocking state. */ + info->domain_attached = false; + if (!info->domain) return; @@ -4406,6 +4414,9 @@ static int device_set_dirty_tracking(struct list_head *devices, bool enable) break; } + if (!ret) + info->domain_attached = true; + return ret; } diff --git a/drivers/iommu/intel/iommu.h b/drivers/iommu/intel/iommu.h index 1497f3112b12c..6f16eeb2ac655 100644 --- a/drivers/iommu/intel/iommu.h +++ b/drivers/iommu/intel/iommu.h @@ -776,6 +776,7 @@ struct device_domain_info { u8 ats_supported:1; u8 ats_enabled:1; u8 dtlb_extra_inval:1; /* Quirk for devices need extra flush */ + u8 domain_attached:1; /* Device has domain attached */ u8 ats_qdep; struct device *dev; /* it's NULL for PCIe-to-PCI bridge */ struct intel_iommu *iommu; /* IOMMU used by this device */ diff --git a/drivers/iommu/intel/nested.c b/drivers/iommu/intel/nested.c index 433c58944401f..3b5251034a871 100644 --- a/drivers/iommu/intel/nested.c +++ b/drivers/iommu/intel/nested.c @@ -27,8 +27,7 @@ static int intel_nested_attach_dev(struct iommu_domain *domain, unsigned long flags; int ret = 0; - if (info->domain) - device_block_translation(dev); + device_block_translation(dev); if (iommu->agaw < dmar_domain->s2_domain->agaw) { dev_err_ratelimited(dev, "Adjusted guest address width not compatible\n"); @@ -62,6 +61,7 @@ static int intel_nested_attach_dev(struct iommu_domain *domain, goto unassign_tag; info->domain = dmar_domain; + info->domain_attached = true; spin_lock_irqsave(&dmar_domain->lock, flags); list_add(&info->link, &dmar_domain->devices); spin_unlock_irqrestore(&dmar_domain->lock, flags); diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index 879009adef407..0ad55649e2d00 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -2394,6 +2394,7 @@ static size_t iommu_pgsize(struct iommu_domain *domain, unsigned long iova, unsigned int pgsize_idx, pgsize_idx_next; unsigned long pgsizes; size_t offset, pgsize, pgsize_next; + size_t offset_end; unsigned long addr_merge = paddr | iova; /* Page sizes supported by the hardware and small enough for @size */ @@ -2434,7 +2435,8 @@ static size_t iommu_pgsize(struct iommu_domain *domain, unsigned long iova, * If size is big enough to accommodate the larger page, reduce * the number of smaller pages. */ - if (offset + pgsize_next <= size) + if (!check_add_overflow(offset, pgsize_next, &offset_end) && + offset_end <= size) size = offset; out_set_count: diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c index ff55b8c307126..ae69691471e9f 100644 --- a/drivers/iommu/ipmmu-vmsa.c +++ b/drivers/iommu/ipmmu-vmsa.c @@ -1087,7 +1087,7 @@ static int ipmmu_probe(struct platform_device *pdev) * - R-Car Gen3 IPMMU (leaf devices only - skip root IPMMU-MM device) */ if (!mmu->features->has_cache_leaf_nodes || !ipmmu_is_root(mmu)) { - ret = iommu_device_sysfs_add(&mmu->iommu, &pdev->dev, NULL, + ret = iommu_device_sysfs_add(&mmu->iommu, &pdev->dev, NULL, "%s", dev_name(&pdev->dev)); if (ret) return ret; diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c index 4b369419b32ce..4c7f470a4752f 100644 --- a/drivers/iommu/rockchip-iommu.c +++ b/drivers/iommu/rockchip-iommu.c @@ -1154,7 +1154,6 @@ static int rk_iommu_of_xlate(struct device *dev, iommu_dev = of_find_device_by_node(args->np); data->iommu = platform_get_drvdata(iommu_dev); - data->iommu->domain = &rk_identity_domain; dev_iommu_priv_set(dev, data); platform_device_put(iommu_dev); @@ -1192,6 +1191,8 @@ static int rk_iommu_probe(struct platform_device *pdev) if (!iommu) return -ENOMEM; + iommu->domain = &rk_identity_domain; + platform_set_drvdata(pdev, iommu); iommu->dev = dev; iommu->num_mmu = 0; diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig index c1f3048360085..a799a89195c51 100644 --- a/drivers/irqchip/Kconfig +++ b/drivers/irqchip/Kconfig @@ -71,6 +71,7 @@ config ARM_VIC_NR config IRQ_MSI_LIB bool + select GENERIC_MSI_IRQ config ARMADA_370_XP_IRQ bool diff --git a/drivers/leds/led-class-multicolor.c b/drivers/leds/led-class-multicolor.c index 30c1ecb5f361e..c707be97049b7 100644 --- a/drivers/leds/led-class-multicolor.c +++ b/drivers/leds/led-class-multicolor.c @@ -61,7 +61,8 @@ static ssize_t multi_intensity_store(struct device *dev, for (i = 0; i < mcled_cdev->num_colors; i++) mcled_cdev->subled_info[i].intensity = intensity_value[i]; - led_set_brightness(led_cdev, led_cdev->brightness); + if (!test_bit(LED_BLINK_SW, &led_cdev->work_flags)) + led_set_brightness(led_cdev, led_cdev->brightness); ret = size; err_out: mutex_unlock(&led_cdev->led_access); diff --git a/drivers/mailbox/imx-mailbox.c b/drivers/mailbox/imx-mailbox.c index f815dab3be50c..0657bd3d8f97b 100644 --- a/drivers/mailbox/imx-mailbox.c +++ b/drivers/mailbox/imx-mailbox.c @@ -226,7 +226,7 @@ static int imx_mu_generic_tx(struct imx_mu_priv *priv, { u32 *arg = data; u32 val; - int ret; + int ret, count; switch (cp->type) { case IMX_MU_TYPE_TX: @@ -240,11 +240,20 @@ static int imx_mu_generic_tx(struct imx_mu_priv *priv, case IMX_MU_TYPE_TXDB_V2: imx_mu_write(priv, IMX_MU_xCR_GIRn(priv->dcfg->type, cp->idx), priv->dcfg->xCR[IMX_MU_GCR]); - ret = readl_poll_timeout(priv->base + priv->dcfg->xCR[IMX_MU_GCR], val, - !(val & IMX_MU_xCR_GIRn(priv->dcfg->type, cp->idx)), - 0, 1000); - if (ret) - dev_warn_ratelimited(priv->dev, "channel type: %d failure\n", cp->type); + ret = -ETIMEDOUT; + count = 0; + while (ret && (count < 10)) { + ret = + readl_poll_timeout(priv->base + priv->dcfg->xCR[IMX_MU_GCR], val, + !(val & IMX_MU_xCR_GIRn(priv->dcfg->type, cp->idx)), + 0, 10000); + + if (ret) { + dev_warn_ratelimited(priv->dev, + "channel type: %d timeout, %d times, retry\n", + cp->type, ++count); + } + } break; default: dev_warn_ratelimited(priv->dev, "Send data on wrong channel type: %d\n", cp->type); diff --git a/drivers/mailbox/mailbox.c b/drivers/mailbox/mailbox.c index cb174e788a96c..92c2fb618c8e1 100644 --- a/drivers/mailbox/mailbox.c +++ b/drivers/mailbox/mailbox.c @@ -490,8 +490,8 @@ void mbox_free_channel(struct mbox_chan *chan) if (chan->txdone_method == TXDONE_BY_ACK) chan->txdone_method = TXDONE_BY_POLL; - module_put(chan->mbox->dev->driver->owner); spin_unlock_irqrestore(&chan->lock, flags); + module_put(chan->mbox->dev->driver->owner); } EXPORT_SYMBOL_GPL(mbox_free_channel); diff --git a/drivers/mailbox/mtk-cmdq-mailbox.c b/drivers/mailbox/mtk-cmdq-mailbox.c index 9c43ed9bdd37b..d24f71819c3d6 100644 --- a/drivers/mailbox/mtk-cmdq-mailbox.c +++ b/drivers/mailbox/mtk-cmdq-mailbox.c @@ -92,18 +92,6 @@ struct gce_plat { u32 gce_num; }; -static void cmdq_sw_ddr_enable(struct cmdq *cmdq, bool enable) -{ - WARN_ON(clk_bulk_enable(cmdq->pdata->gce_num, cmdq->clocks)); - - if (enable) - writel(GCE_DDR_EN | GCE_CTRL_BY_SW, cmdq->base + GCE_GCTL_VALUE); - else - writel(GCE_CTRL_BY_SW, cmdq->base + GCE_GCTL_VALUE); - - clk_bulk_disable(cmdq->pdata->gce_num, cmdq->clocks); -} - u8 cmdq_get_shift_pa(struct mbox_chan *chan) { struct cmdq *cmdq = container_of(chan->mbox, struct cmdq, mbox); @@ -112,6 +100,19 @@ u8 cmdq_get_shift_pa(struct mbox_chan *chan) } EXPORT_SYMBOL(cmdq_get_shift_pa); +static void cmdq_gctl_value_toggle(struct cmdq *cmdq, bool ddr_enable) +{ + u32 val = cmdq->pdata->control_by_sw ? GCE_CTRL_BY_SW : 0; + + if (!cmdq->pdata->control_by_sw && !cmdq->pdata->sw_ddr_en) + return; + + if (cmdq->pdata->sw_ddr_en && ddr_enable) + val |= GCE_DDR_EN; + + writel(val, cmdq->base + GCE_GCTL_VALUE); +} + static int cmdq_thread_suspend(struct cmdq *cmdq, struct cmdq_thread *thread) { u32 status; @@ -140,16 +141,10 @@ static void cmdq_thread_resume(struct cmdq_thread *thread) static void cmdq_init(struct cmdq *cmdq) { int i; - u32 gctl_regval = 0; WARN_ON(clk_bulk_enable(cmdq->pdata->gce_num, cmdq->clocks)); - if (cmdq->pdata->control_by_sw) - gctl_regval = GCE_CTRL_BY_SW; - if (cmdq->pdata->sw_ddr_en) - gctl_regval |= GCE_DDR_EN; - if (gctl_regval) - writel(gctl_regval, cmdq->base + GCE_GCTL_VALUE); + cmdq_gctl_value_toggle(cmdq, true); writel(CMDQ_THR_ACTIVE_SLOT_CYCLES, cmdq->base + CMDQ_THR_SLOT_CYCLES); for (i = 0; i <= CMDQ_MAX_EVENT; i++) @@ -315,14 +310,21 @@ static irqreturn_t cmdq_irq_handler(int irq, void *dev) static int cmdq_runtime_resume(struct device *dev) { struct cmdq *cmdq = dev_get_drvdata(dev); + int ret; - return clk_bulk_enable(cmdq->pdata->gce_num, cmdq->clocks); + ret = clk_bulk_enable(cmdq->pdata->gce_num, cmdq->clocks); + if (ret) + return ret; + + cmdq_gctl_value_toggle(cmdq, true); + return 0; } static int cmdq_runtime_suspend(struct device *dev) { struct cmdq *cmdq = dev_get_drvdata(dev); + cmdq_gctl_value_toggle(cmdq, false); clk_bulk_disable(cmdq->pdata->gce_num, cmdq->clocks); return 0; } @@ -347,9 +349,6 @@ static int cmdq_suspend(struct device *dev) if (task_running) dev_warn(dev, "exist running task(s) in suspend\n"); - if (cmdq->pdata->sw_ddr_en) - cmdq_sw_ddr_enable(cmdq, false); - return pm_runtime_force_suspend(dev); } @@ -360,9 +359,6 @@ static int cmdq_resume(struct device *dev) WARN_ON(pm_runtime_force_resume(dev)); cmdq->suspended = false; - if (cmdq->pdata->sw_ddr_en) - cmdq_sw_ddr_enable(cmdq, true); - return 0; } @@ -370,9 +366,6 @@ static void cmdq_remove(struct platform_device *pdev) { struct cmdq *cmdq = platform_get_drvdata(pdev); - if (cmdq->pdata->sw_ddr_en) - cmdq_sw_ddr_enable(cmdq, false); - if (!IS_ENABLED(CONFIG_PM)) cmdq_runtime_suspend(&pdev->dev); diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c index e42f1400cea9d..f5171167819b5 100644 --- a/drivers/md/bcache/super.c +++ b/drivers/md/bcache/super.c @@ -1733,7 +1733,12 @@ static CLOSURE_CALLBACK(cache_set_flush) mutex_unlock(&b->write_lock); } - if (ca->alloc_thread) + /* + * If the register_cache_set() call to bch_cache_set_alloc() failed, + * ca has not been assigned a value and return error. + * So we need check ca is not NULL during bch_cache_set_unregister(). + */ + if (ca && ca->alloc_thread) kthread_stop(ca->alloc_thread); if (c->journal.cur) { diff --git a/drivers/md/dm-core.h b/drivers/md/dm-core.h index 3637761f35853..f3a3f2ef63226 100644 --- a/drivers/md/dm-core.h +++ b/drivers/md/dm-core.h @@ -141,6 +141,7 @@ struct mapped_device { #ifdef CONFIG_BLK_DEV_ZONED unsigned int nr_zones; void *zone_revalidate_map; + struct task_struct *revalidate_map_task; #endif #ifdef CONFIG_IMA diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c index b690905ab89ff..347881f323d5b 100644 --- a/drivers/md/dm-flakey.c +++ b/drivers/md/dm-flakey.c @@ -47,14 +47,15 @@ enum feature_flag_bits { }; struct per_bio_data { - bool bio_submitted; + bool bio_can_corrupt; + struct bvec_iter saved_iter; }; static int parse_features(struct dm_arg_set *as, struct flakey_c *fc, struct dm_target *ti) { - int r; - unsigned int argc; + int r = 0; + unsigned int argc = 0; const char *arg_name; static const struct dm_arg _args[] = { @@ -65,14 +66,13 @@ static int parse_features(struct dm_arg_set *as, struct flakey_c *fc, {0, PROBABILITY_BASE, "Invalid random corrupt argument"}, }; - /* No feature arguments supplied. */ - if (!as->argc) - return 0; - - r = dm_read_arg_group(_args, as, &argc, &ti->error); - if (r) + if (as->argc && (r = dm_read_arg_group(_args, as, &argc, &ti->error))) return r; + /* No feature arguments supplied. */ + if (!argc) + goto error_all_io; + while (argc) { arg_name = dm_shift_arg(as); argc--; @@ -217,6 +217,7 @@ static int parse_features(struct dm_arg_set *as, struct flakey_c *fc, if (!fc->corrupt_bio_byte && !test_bit(ERROR_READS, &fc->flags) && !test_bit(DROP_WRITES, &fc->flags) && !test_bit(ERROR_WRITES, &fc->flags) && !fc->random_read_corrupt && !fc->random_write_corrupt) { +error_all_io: set_bit(ERROR_WRITES, &fc->flags); set_bit(ERROR_READS, &fc->flags); } @@ -339,7 +340,8 @@ static void flakey_map_bio(struct dm_target *ti, struct bio *bio) } static void corrupt_bio_common(struct bio *bio, unsigned int corrupt_bio_byte, - unsigned char corrupt_bio_value) + unsigned char corrupt_bio_value, + struct bvec_iter start) { struct bvec_iter iter; struct bio_vec bvec; @@ -348,7 +350,7 @@ static void corrupt_bio_common(struct bio *bio, unsigned int corrupt_bio_byte, * Overwrite the Nth byte of the bio's data, on whichever page * it falls. */ - bio_for_each_segment(bvec, bio, iter) { + __bio_for_each_segment(bvec, bio, iter, start) { if (bio_iter_len(bio, iter) > corrupt_bio_byte) { unsigned char *segment = bvec_kmap_local(&bvec); segment[corrupt_bio_byte] = corrupt_bio_value; @@ -357,36 +359,31 @@ static void corrupt_bio_common(struct bio *bio, unsigned int corrupt_bio_byte, "(rw=%c bi_opf=%u bi_sector=%llu size=%u)\n", bio, corrupt_bio_value, corrupt_bio_byte, (bio_data_dir(bio) == WRITE) ? 'w' : 'r', bio->bi_opf, - (unsigned long long)bio->bi_iter.bi_sector, - bio->bi_iter.bi_size); + (unsigned long long)start.bi_sector, + start.bi_size); break; } corrupt_bio_byte -= bio_iter_len(bio, iter); } } -static void corrupt_bio_data(struct bio *bio, struct flakey_c *fc) +static void corrupt_bio_data(struct bio *bio, struct flakey_c *fc, + struct bvec_iter start) { unsigned int corrupt_bio_byte = fc->corrupt_bio_byte - 1; - if (!bio_has_data(bio)) - return; - - corrupt_bio_common(bio, corrupt_bio_byte, fc->corrupt_bio_value); + corrupt_bio_common(bio, corrupt_bio_byte, fc->corrupt_bio_value, start); } -static void corrupt_bio_random(struct bio *bio) +static void corrupt_bio_random(struct bio *bio, struct bvec_iter start) { unsigned int corrupt_byte; unsigned char corrupt_value; - if (!bio_has_data(bio)) - return; - - corrupt_byte = get_random_u32() % bio->bi_iter.bi_size; + corrupt_byte = get_random_u32() % start.bi_size; corrupt_value = get_random_u8(); - corrupt_bio_common(bio, corrupt_byte, corrupt_value); + corrupt_bio_common(bio, corrupt_byte, corrupt_value, start); } static void clone_free(struct bio *clone) @@ -481,7 +478,7 @@ static int flakey_map(struct dm_target *ti, struct bio *bio) unsigned int elapsed; struct per_bio_data *pb = dm_per_bio_data(bio, sizeof(struct per_bio_data)); - pb->bio_submitted = false; + pb->bio_can_corrupt = false; if (op_is_zone_mgmt(bio_op(bio))) goto map_bio; @@ -490,10 +487,11 @@ static int flakey_map(struct dm_target *ti, struct bio *bio) elapsed = (jiffies - fc->start_time) / HZ; if (elapsed % (fc->up_interval + fc->down_interval) >= fc->up_interval) { bool corrupt_fixed, corrupt_random; - /* - * Flag this bio as submitted while down. - */ - pb->bio_submitted = true; + + if (bio_has_data(bio)) { + pb->bio_can_corrupt = true; + pb->saved_iter = bio->bi_iter; + } /* * Error reads if neither corrupt_bio_byte or drop_writes or error_writes are set. @@ -516,6 +514,8 @@ static int flakey_map(struct dm_target *ti, struct bio *bio) return DM_MAPIO_SUBMITTED; } + if (!pb->bio_can_corrupt) + goto map_bio; /* * Corrupt matching writes. */ @@ -535,9 +535,11 @@ static int flakey_map(struct dm_target *ti, struct bio *bio) struct bio *clone = clone_bio(ti, fc, bio); if (clone) { if (corrupt_fixed) - corrupt_bio_data(clone, fc); + corrupt_bio_data(clone, fc, + clone->bi_iter); if (corrupt_random) - corrupt_bio_random(clone); + corrupt_bio_random(clone, + clone->bi_iter); submit_bio(clone); return DM_MAPIO_SUBMITTED; } @@ -559,21 +561,21 @@ static int flakey_end_io(struct dm_target *ti, struct bio *bio, if (op_is_zone_mgmt(bio_op(bio))) return DM_ENDIO_DONE; - if (!*error && pb->bio_submitted && (bio_data_dir(bio) == READ)) { + if (!*error && pb->bio_can_corrupt && (bio_data_dir(bio) == READ)) { if (fc->corrupt_bio_byte) { if ((fc->corrupt_bio_rw == READ) && all_corrupt_bio_flags_match(bio, fc)) { /* * Corrupt successful matching READs while in down state. */ - corrupt_bio_data(bio, fc); + corrupt_bio_data(bio, fc, pb->saved_iter); } } if (fc->random_read_corrupt) { u64 rnd = get_random_u64(); u32 rem = do_div(rnd, PROBABILITY_BASE); if (rem < fc->random_read_corrupt) - corrupt_bio_random(bio); + corrupt_bio_random(bio, pb->saved_iter); } if (test_bit(ERROR_READS, &fc->flags)) { /* diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c index 1e0d3b9b75d6f..163a5bbd485f9 100644 --- a/drivers/md/dm-raid.c +++ b/drivers/md/dm-raid.c @@ -2410,7 +2410,7 @@ static int super_init_validation(struct raid_set *rs, struct md_rdev *rdev) */ sb_retrieve_failed_devices(sb, failed_devices); rdev_for_each(r, mddev) { - if (test_bit(Journal, &rdev->flags) || + if (test_bit(Journal, &r->flags) || !r->sb_page) continue; sb2 = page_address(r->sb_page); diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c index 9511dae5b556a..94b6c43dfa5cb 100644 --- a/drivers/md/dm-raid1.c +++ b/drivers/md/dm-raid1.c @@ -133,10 +133,9 @@ static void queue_bio(struct mirror_set *ms, struct bio *bio, int rw) spin_lock_irqsave(&ms->lock, flags); should_wake = !(bl->head); bio_list_add(bl, bio); - spin_unlock_irqrestore(&ms->lock, flags); - if (should_wake) wakeup_mirrord(ms); + spin_unlock_irqrestore(&ms->lock, flags); } static void dispatch_bios(void *context, struct bio_list *bio_list) @@ -646,9 +645,9 @@ static void write_callback(unsigned long error, void *context) if (!ms->failures.head) should_wake = 1; bio_list_add(&ms->failures, bio); - spin_unlock_irqrestore(&ms->lock, flags); if (should_wake) wakeup_mirrord(ms); + spin_unlock_irqrestore(&ms->lock, flags); } static void do_write(struct mirror_set *ms, struct bio *bio) diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index 883f01e78324f..e45cffdd419a8 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -431,6 +431,7 @@ static int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev, return 0; } + mutex_lock(&q->limits_lock); if (blk_stack_limits(limits, &q->limits, get_start_sect(bdev) + start) < 0) DMWARN("%s: adding target device %pg caused an alignment inconsistency: " @@ -448,6 +449,7 @@ static int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev, */ if (!dm_target_has_integrity(ti->type)) queue_limits_stack_integrity_bdev(limits, bdev); + mutex_unlock(&q->limits_lock); return 0; } @@ -1734,8 +1736,12 @@ static int device_not_write_zeroes_capable(struct dm_target *ti, struct dm_dev * sector_t start, sector_t len, void *data) { struct request_queue *q = bdev_get_queue(dev->bdev); + int b; - return !q->limits.max_write_zeroes_sectors; + mutex_lock(&q->limits_lock); + b = !q->limits.max_write_zeroes_sectors; + mutex_unlock(&q->limits_lock); + return b; } static bool dm_table_supports_write_zeroes(struct dm_table *t) diff --git a/drivers/md/dm-vdo/indexer/volume.c b/drivers/md/dm-vdo/indexer/volume.c index 655453bb276be..425b3a74f4dba 100644 --- a/drivers/md/dm-vdo/indexer/volume.c +++ b/drivers/md/dm-vdo/indexer/volume.c @@ -754,10 +754,11 @@ static int get_volume_page_protected(struct volume *volume, struct uds_request * u32 physical_page, struct cached_page **page_ptr) { struct cached_page *page; + unsigned int zone_number = request->zone_number; get_page_from_cache(&volume->page_cache, physical_page, &page); if (page != NULL) { - if (request->zone_number == 0) { + if (zone_number == 0) { /* Only one zone is allowed to update the LRU. */ make_page_most_recent(&volume->page_cache, page); } @@ -767,7 +768,7 @@ static int get_volume_page_protected(struct volume *volume, struct uds_request * } /* Prepare to enqueue a read for the page. */ - end_pending_search(&volume->page_cache, request->zone_number); + end_pending_search(&volume->page_cache, zone_number); mutex_lock(&volume->read_threads_mutex); /* @@ -787,8 +788,7 @@ static int get_volume_page_protected(struct volume *volume, struct uds_request * * the order does not matter for correctness as it does below. */ mutex_unlock(&volume->read_threads_mutex); - begin_pending_search(&volume->page_cache, physical_page, - request->zone_number); + begin_pending_search(&volume->page_cache, physical_page, zone_number); return UDS_QUEUED; } @@ -797,7 +797,7 @@ static int get_volume_page_protected(struct volume *volume, struct uds_request * * "search pending" state in careful order so no other thread can mess with the data before * the caller gets to look at it. */ - begin_pending_search(&volume->page_cache, physical_page, request->zone_number); + begin_pending_search(&volume->page_cache, physical_page, zone_number); mutex_unlock(&volume->read_threads_mutex); *page_ptr = page; return UDS_SUCCESS; @@ -849,6 +849,7 @@ static int search_cached_index_page(struct volume *volume, struct uds_request *r { int result; struct cached_page *page = NULL; + unsigned int zone_number = request->zone_number; u32 physical_page = map_to_physical_page(volume->geometry, chapter, index_page_number); @@ -858,18 +859,18 @@ static int search_cached_index_page(struct volume *volume, struct uds_request *r * invalidation by the reader thread, before the reader thread has noticed that the * invalidate_counter has been incremented. */ - begin_pending_search(&volume->page_cache, physical_page, request->zone_number); + begin_pending_search(&volume->page_cache, physical_page, zone_number); result = get_volume_page_protected(volume, request, physical_page, &page); if (result != UDS_SUCCESS) { - end_pending_search(&volume->page_cache, request->zone_number); + end_pending_search(&volume->page_cache, zone_number); return result; } result = uds_search_chapter_index_page(&page->index_page, volume->geometry, &request->record_name, record_page_number); - end_pending_search(&volume->page_cache, request->zone_number); + end_pending_search(&volume->page_cache, zone_number); return result; } @@ -882,6 +883,7 @@ int uds_search_cached_record_page(struct volume *volume, struct uds_request *req { struct cached_page *record_page; struct index_geometry *geometry = volume->geometry; + unsigned int zone_number = request->zone_number; int result; u32 physical_page, page_number; @@ -905,11 +907,11 @@ int uds_search_cached_record_page(struct volume *volume, struct uds_request *req * invalidation by the reader thread, before the reader thread has noticed that the * invalidate_counter has been incremented. */ - begin_pending_search(&volume->page_cache, physical_page, request->zone_number); + begin_pending_search(&volume->page_cache, physical_page, zone_number); result = get_volume_page_protected(volume, request, physical_page, &record_page); if (result != UDS_SUCCESS) { - end_pending_search(&volume->page_cache, request->zone_number); + end_pending_search(&volume->page_cache, zone_number); return result; } @@ -917,7 +919,7 @@ int uds_search_cached_record_page(struct volume *volume, struct uds_request *req &request->record_name, geometry, &request->old_metadata)) *found = true; - end_pending_search(&volume->page_cache, request->zone_number); + end_pending_search(&volume->page_cache, zone_number); return UDS_SUCCESS; } diff --git a/drivers/md/dm-verity-fec.c b/drivers/md/dm-verity-fec.c index 6bd9848518d47..559b8179ac502 100644 --- a/drivers/md/dm-verity-fec.c +++ b/drivers/md/dm-verity-fec.c @@ -604,6 +604,10 @@ int verity_fec_parse_opt_args(struct dm_arg_set *as, struct dm_verity *v, (*argc)--; if (!strcasecmp(arg_name, DM_VERITY_OPT_FEC_DEV)) { + if (v->fec->dev) { + ti->error = "FEC device already specified"; + return -EINVAL; + } r = dm_get_device(ti, arg_value, BLK_OPEN_READ, &v->fec->dev); if (r) { ti->error = "FEC device lookup failed"; diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c index 53ba0fbdf495c..ce0462e751a61 100644 --- a/drivers/md/dm-verity-target.c +++ b/drivers/md/dm-verity-target.c @@ -1080,6 +1080,9 @@ static int verity_alloc_most_once(struct dm_verity *v) { struct dm_target *ti = v->ti; + if (v->validated_blocks) + return 0; + /* the bitset can only handle INT_MAX blocks */ if (v->data_blocks > INT_MAX) { ti->error = "device too large to use check_at_most_once"; @@ -1103,6 +1106,9 @@ static int verity_alloc_zero_digest(struct dm_verity *v) struct dm_verity_io *io; u8 *zero_data; + if (v->zero_digest) + return 0; + v->zero_digest = kmalloc(v->digest_size, GFP_KERNEL); if (!v->zero_digest) @@ -1537,7 +1543,7 @@ static int verity_ctr(struct dm_target *ti, unsigned int argc, char **argv) goto bad; } - /* Root hash signature is a optional parameter*/ + /* Root hash signature is an optional parameter */ r = verity_verify_root_hash(root_hash_digest_to_validate, strlen(root_hash_digest_to_validate), verify_args.sig, diff --git a/drivers/md/dm-verity-verify-sig.c b/drivers/md/dm-verity-verify-sig.c index a9e2c6c0a33c6..d5261a0e4232e 100644 --- a/drivers/md/dm-verity-verify-sig.c +++ b/drivers/md/dm-verity-verify-sig.c @@ -71,9 +71,14 @@ int verity_verify_sig_parse_opt_args(struct dm_arg_set *as, const char *arg_name) { struct dm_target *ti = v->ti; - int ret = 0; + int ret; const char *sig_key = NULL; + if (v->signature_key_desc) { + ti->error = DM_VERITY_VERIFY_ERR("root_hash_sig_key_desc already specified"); + return -EINVAL; + } + if (!*argc) { ti->error = DM_VERITY_VERIFY_ERR("Signature key not specified"); return -EINVAL; @@ -83,14 +88,18 @@ int verity_verify_sig_parse_opt_args(struct dm_arg_set *as, (*argc)--; ret = verity_verify_get_sig_from_key(sig_key, sig_opts); - if (ret < 0) + if (ret < 0) { ti->error = DM_VERITY_VERIFY_ERR("Invalid key specified"); + return ret; + } v->signature_key_desc = kstrdup(sig_key, GFP_KERNEL); - if (!v->signature_key_desc) + if (!v->signature_key_desc) { + ti->error = DM_VERITY_VERIFY_ERR("Could not allocate memory for signature key"); return -ENOMEM; + } - return ret; + return 0; } /* diff --git a/drivers/md/dm-zone.c b/drivers/md/dm-zone.c index c0d41c36e06eb..04cc36a9d5ca4 100644 --- a/drivers/md/dm-zone.c +++ b/drivers/md/dm-zone.c @@ -56,24 +56,31 @@ int dm_blk_report_zones(struct gendisk *disk, sector_t sector, { struct mapped_device *md = disk->private_data; struct dm_table *map; - int srcu_idx, ret; + struct dm_table *zone_revalidate_map = md->zone_revalidate_map; + int srcu_idx, ret = -EIO; + bool put_table = false; - if (!md->zone_revalidate_map) { - /* Regular user context */ + if (!zone_revalidate_map || md->revalidate_map_task != current) { + /* + * Regular user context or + * Zone revalidation during __bind() is in progress, but this + * call is from a different process + */ if (dm_suspended_md(md)) return -EAGAIN; map = dm_get_live_table(md, &srcu_idx); - if (!map) - return -EIO; + put_table = true; } else { /* Zone revalidation during __bind() */ - map = md->zone_revalidate_map; + map = zone_revalidate_map; } - ret = dm_blk_do_report_zones(md, map, sector, nr_zones, cb, data); + if (map) + ret = dm_blk_do_report_zones(md, map, sector, nr_zones, cb, + data); - if (!md->zone_revalidate_map) + if (put_table) dm_put_live_table(md, srcu_idx); return ret; @@ -175,7 +182,9 @@ int dm_revalidate_zones(struct dm_table *t, struct request_queue *q) * our table for dm_blk_report_zones() to use directly. */ md->zone_revalidate_map = t; + md->revalidate_map_task = current; ret = blk_revalidate_disk_zones(disk); + md->revalidate_map_task = NULL; md->zone_revalidate_map = NULL; if (ret) { diff --git a/drivers/md/dm.c b/drivers/md/dm.c index d29125ee9e72a..92e5a233f5160 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -2410,21 +2410,29 @@ static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t, struct queue_limits *limits) { struct dm_table *old_map; - sector_t size; + sector_t size, old_size; int ret; lockdep_assert_held(&md->suspend_lock); size = dm_table_get_size(t); + old_size = dm_get_size(md); + set_capacity(md->disk, size); + + ret = dm_table_set_restrictions(t, md->queue, limits); + if (ret) { + set_capacity(md->disk, old_size); + old_map = ERR_PTR(ret); + goto out; + } + /* * Wipe any geometry if the size of the table changed. */ - if (size != dm_get_size(md)) + if (size != old_size) memset(&md->geometry, 0, sizeof(md->geometry)); - set_capacity(md->disk, size); - dm_table_event_callback(t, event_callback, md); if (dm_table_request_based(t)) { @@ -2442,10 +2450,10 @@ static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t, * requests in the queue may refer to bio from the old bioset, * so you must walk through the queue to unprep. */ - if (!md->mempools) { + if (!md->mempools) md->mempools = t->mempools; - t->mempools = NULL; - } + else + dm_free_md_mempools(t->mempools); } else { /* * The md may already have mempools that need changing. @@ -2454,14 +2462,8 @@ static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t, */ dm_free_md_mempools(md->mempools); md->mempools = t->mempools; - t->mempools = NULL; - } - - ret = dm_table_set_restrictions(t, md->queue, limits); - if (ret) { - old_map = ERR_PTR(ret); - goto out; } + t->mempools = NULL; old_map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); rcu_assign_pointer(md->map, (void *)t); diff --git a/drivers/md/md-bitmap.c b/drivers/md/md-bitmap.c index fbb4f57010da6..0da1d0723f882 100644 --- a/drivers/md/md-bitmap.c +++ b/drivers/md/md-bitmap.c @@ -787,7 +787,7 @@ static int md_bitmap_new_disk_sb(struct bitmap *bitmap) * is a good choice? We choose COUNTER_MAX / 2 arbitrarily. */ write_behind = bitmap->mddev->bitmap_info.max_write_behind; - if (write_behind > COUNTER_MAX) + if (write_behind > COUNTER_MAX / 2) write_behind = COUNTER_MAX / 2; sb->write_behind = cpu_to_le32(write_behind); bitmap->mddev->bitmap_info.max_write_behind = write_behind; @@ -2355,8 +2355,7 @@ static int bitmap_get_stats(void *data, struct md_bitmap_stats *stats) if (!bitmap) return -ENOENT; - if (!bitmap->mddev->bitmap_info.external && - !bitmap->storage.sb_page) + if (!bitmap->storage.sb_page) return -EINVAL; sb = kmap_local_page(bitmap->storage.sb_page); stats->sync_size = le64_to_cpu(sb->sync_size); diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 6b6cd753d61a9..fe1599db69c84 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -3380,6 +3380,7 @@ static int raid1_reshape(struct mddev *mddev) /* ok, everything is stopped */ oldpool = conf->r1bio_pool; conf->r1bio_pool = newpool; + init_waitqueue_head(&conf->r1bio_pool.wait); for (d = d2 = 0; d < conf->raid_disks; d++) { struct md_rdev *rdev = conf->mirrors[d].rdev; diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index cc194f6ec18da..5cdc599fcad3c 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -1181,8 +1181,11 @@ static void raid10_read_request(struct mddev *mddev, struct bio *bio, } } - if (!regular_request_wait(mddev, conf, bio, r10_bio->sectors)) + if (!regular_request_wait(mddev, conf, bio, r10_bio->sectors)) { + raid_end_bio_io(r10_bio); return; + } + rdev = read_balance(conf, r10_bio, &max_sectors); if (!rdev) { if (err_rdev) { @@ -1368,8 +1371,11 @@ static void raid10_write_request(struct mddev *mddev, struct bio *bio, } sectors = r10_bio->sectors; - if (!regular_request_wait(mddev, conf, bio, sectors)) + if (!regular_request_wait(mddev, conf, bio, sectors)) { + raid_end_bio_io(r10_bio); return; + } + if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && (mddev->reshape_backwards ? (bio->bi_iter.bi_sector < conf->reshape_safe && diff --git a/drivers/media/common/videobuf2/videobuf2-dma-sg.c b/drivers/media/common/videobuf2/videobuf2-dma-sg.c index 6975a71d740f6..a5aa6a2a028cb 100644 --- a/drivers/media/common/videobuf2/videobuf2-dma-sg.c +++ b/drivers/media/common/videobuf2/videobuf2-dma-sg.c @@ -469,7 +469,7 @@ vb2_dma_sg_dmabuf_ops_begin_cpu_access(struct dma_buf *dbuf, struct vb2_dma_sg_buf *buf = dbuf->priv; struct sg_table *sgt = buf->dma_sgt; - dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir); + dma_sync_sgtable_for_cpu(buf->dev, sgt, buf->dma_dir); return 0; } @@ -480,7 +480,7 @@ vb2_dma_sg_dmabuf_ops_end_cpu_access(struct dma_buf *dbuf, struct vb2_dma_sg_buf *buf = dbuf->priv; struct sg_table *sgt = buf->dma_sgt; - dma_sync_sg_for_device(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir); + dma_sync_sgtable_for_device(buf->dev, sgt, buf->dma_dir); return 0; } diff --git a/drivers/media/i2c/ccs-pll.c b/drivers/media/i2c/ccs-pll.c index cf8858cb13d4c..611c9823be857 100644 --- a/drivers/media/i2c/ccs-pll.c +++ b/drivers/media/i2c/ccs-pll.c @@ -312,6 +312,11 @@ __ccs_pll_calculate_vt_tree(struct device *dev, dev_dbg(dev, "more_mul2: %u\n", more_mul); pll_fr->pll_multiplier = mul * more_mul; + if (pll_fr->pll_multiplier > lim_fr->max_pll_multiplier) { + dev_dbg(dev, "pll multiplier %u too high\n", + pll_fr->pll_multiplier); + return -EINVAL; + } if (pll_fr->pll_multiplier * pll_fr->pll_ip_clk_freq_hz > lim_fr->max_pll_op_clk_freq_hz) @@ -397,6 +402,8 @@ static int ccs_pll_calculate_vt_tree(struct device *dev, min_pre_pll_clk_div = max_t(u16, min_pre_pll_clk_div, pll->ext_clk_freq_hz / lim_fr->max_pll_ip_clk_freq_hz); + if (!(pll->flags & CCS_PLL_FLAG_EXT_IP_PLL_DIVIDER)) + min_pre_pll_clk_div = clk_div_even(min_pre_pll_clk_div); dev_dbg(dev, "vt min/max_pre_pll_clk_div: %u,%u\n", min_pre_pll_clk_div, max_pre_pll_clk_div); @@ -792,7 +799,7 @@ int ccs_pll_calculate(struct device *dev, const struct ccs_pll_limits *lim, op_lim_fr->min_pre_pll_clk_div, op_lim_fr->max_pre_pll_clk_div); max_op_pre_pll_clk_div = min_t(u16, op_lim_fr->max_pre_pll_clk_div, - clk_div_even(pll->ext_clk_freq_hz / + DIV_ROUND_UP(pll->ext_clk_freq_hz, op_lim_fr->min_pll_ip_clk_freq_hz)); min_op_pre_pll_clk_div = max_t(u16, op_lim_fr->min_pre_pll_clk_div, @@ -815,6 +822,8 @@ int ccs_pll_calculate(struct device *dev, const struct ccs_pll_limits *lim, one_or_more( DIV_ROUND_UP(op_lim_fr->max_pll_op_clk_freq_hz, pll->ext_clk_freq_hz)))); + if (!(pll->flags & CCS_PLL_FLAG_EXT_IP_PLL_DIVIDER)) + min_op_pre_pll_clk_div = clk_div_even(min_op_pre_pll_clk_div); dev_dbg(dev, "pll_op check: min / max op_pre_pll_clk_div: %u / %u\n", min_op_pre_pll_clk_div, max_op_pre_pll_clk_div); diff --git a/drivers/media/i2c/ds90ub913.c b/drivers/media/i2c/ds90ub913.c index 7670d6c82d923..5d754372230e5 100644 --- a/drivers/media/i2c/ds90ub913.c +++ b/drivers/media/i2c/ds90ub913.c @@ -450,10 +450,10 @@ static int ub913_set_fmt(struct v4l2_subdev *sd, if (!fmt) return -EINVAL; - format->format.code = finfo->outcode; - *fmt = format->format; + fmt->code = finfo->outcode; + return 0; } diff --git a/drivers/media/i2c/imx335.c b/drivers/media/i2c/imx335.c index 0beb80b8c4581..9b4db4cd4929c 100644 --- a/drivers/media/i2c/imx335.c +++ b/drivers/media/i2c/imx335.c @@ -31,7 +31,7 @@ #define IMX335_REG_CPWAIT_TIME CCI_REG8(0x300d) #define IMX335_REG_WINMODE CCI_REG8(0x3018) #define IMX335_REG_HTRIMMING_START CCI_REG16_LE(0x302c) -#define IMX335_REG_HNUM CCI_REG8(0x302e) +#define IMX335_REG_HNUM CCI_REG16_LE(0x302e) /* Lines per frame */ #define IMX335_REG_VMAX CCI_REG24_LE(0x3030) @@ -660,7 +660,8 @@ static int imx335_enum_frame_size(struct v4l2_subdev *sd, struct imx335 *imx335 = to_imx335(sd); u32 code; - if (fsize->index > ARRAY_SIZE(imx335_mbus_codes)) + /* Only a single supported_mode available. */ + if (fsize->index > 0) return -EINVAL; code = imx335_get_format_code(imx335, fsize->code); diff --git a/drivers/media/i2c/ov2740.c b/drivers/media/i2c/ov2740.c index bd0b2f0f0d45b..3a0835fa57667 100644 --- a/drivers/media/i2c/ov2740.c +++ b/drivers/media/i2c/ov2740.c @@ -1404,12 +1404,12 @@ static int ov2740_probe(struct i2c_client *client) return 0; probe_error_v4l2_subdev_cleanup: + pm_runtime_disable(&client->dev); + pm_runtime_set_suspended(&client->dev); v4l2_subdev_cleanup(&ov2740->sd); probe_error_media_entity_cleanup: media_entity_cleanup(&ov2740->sd.entity); - pm_runtime_disable(&client->dev); - pm_runtime_set_suspended(&client->dev); probe_error_v4l2_ctrl_handler_free: v4l2_ctrl_handler_free(ov2740->sd.ctrl_handler); diff --git a/drivers/media/i2c/ov5675.c b/drivers/media/i2c/ov5675.c index 2833b14ee139d..c0ab3c0ed88e3 100644 --- a/drivers/media/i2c/ov5675.c +++ b/drivers/media/i2c/ov5675.c @@ -1295,11 +1295,8 @@ static int ov5675_probe(struct i2c_client *client) return -ENOMEM; ret = ov5675_get_hwcfg(ov5675, &client->dev); - if (ret) { - dev_err(&client->dev, "failed to get HW configuration: %d", - ret); + if (ret) return ret; - } v4l2_i2c_subdev_init(&ov5675->sd, client, &ov5675_subdev_ops); diff --git a/drivers/media/i2c/ov8856.c b/drivers/media/i2c/ov8856.c index 3b94338f55ed3..23d524de7d60a 100644 --- a/drivers/media/i2c/ov8856.c +++ b/drivers/media/i2c/ov8856.c @@ -2276,8 +2276,8 @@ static int ov8856_get_hwcfg(struct ov8856 *ov8856, struct device *dev) if (!is_acpi_node(fwnode)) { ov8856->xvclk = devm_clk_get(dev, "xvclk"); if (IS_ERR(ov8856->xvclk)) { - dev_err(dev, "could not get xvclk clock (%pe)\n", - ov8856->xvclk); + dev_err_probe(dev, PTR_ERR(ov8856->xvclk), + "could not get xvclk clock\n"); return PTR_ERR(ov8856->xvclk); } @@ -2382,11 +2382,8 @@ static int ov8856_probe(struct i2c_client *client) return -ENOMEM; ret = ov8856_get_hwcfg(ov8856, &client->dev); - if (ret) { - dev_err(&client->dev, "failed to get HW configuration: %d", - ret); + if (ret) return ret; - } v4l2_i2c_subdev_init(&ov8856->sd, client, &ov8856_subdev_ops); diff --git a/drivers/media/pci/intel/ipu6/ipu6-dma.c b/drivers/media/pci/intel/ipu6/ipu6-dma.c index b71f66bd8c1fd..92d513608395c 100644 --- a/drivers/media/pci/intel/ipu6/ipu6-dma.c +++ b/drivers/media/pci/intel/ipu6/ipu6-dma.c @@ -172,7 +172,7 @@ void *ipu6_dma_alloc(struct ipu6_bus_device *sys, size_t size, count = PHYS_PFN(size); iova = alloc_iova(&mmu->dmap->iovad, count, - PHYS_PFN(dma_get_mask(dev)), 0); + PHYS_PFN(mmu->dmap->mmu_info->aperture_end), 0); if (!iova) goto out_kfree; @@ -398,7 +398,7 @@ int ipu6_dma_map_sg(struct ipu6_bus_device *sys, struct scatterlist *sglist, nents, npages); iova = alloc_iova(&mmu->dmap->iovad, npages, - PHYS_PFN(dma_get_mask(dev)), 0); + PHYS_PFN(mmu->dmap->mmu_info->aperture_end), 0); if (!iova) return 0; diff --git a/drivers/media/pci/intel/ipu6/ipu6.c b/drivers/media/pci/intel/ipu6/ipu6.c index 91718eabd74e5..1c5c38a30e629 100644 --- a/drivers/media/pci/intel/ipu6/ipu6.c +++ b/drivers/media/pci/intel/ipu6/ipu6.c @@ -463,11 +463,6 @@ static int ipu6_pci_config_setup(struct pci_dev *dev, u8 hw_ver) { int ret; - /* disable IPU6 PCI ATS on mtl ES2 */ - if (is_ipu6ep_mtl(hw_ver) && boot_cpu_data.x86_stepping == 0x2 && - pci_ats_supported(dev)) - pci_disable_ats(dev); - /* No PCI msi capability for IPU6EP */ if (is_ipu6ep(hw_ver) || is_ipu6ep_mtl(hw_ver)) { /* likely do nothing as msi not enabled by default */ diff --git a/drivers/media/platform/imagination/e5010-jpeg-enc.c b/drivers/media/platform/imagination/e5010-jpeg-enc.c index 187f2d8abfbb5..cb1f7de1b6321 100644 --- a/drivers/media/platform/imagination/e5010-jpeg-enc.c +++ b/drivers/media/platform/imagination/e5010-jpeg-enc.c @@ -1057,8 +1057,11 @@ static int e5010_probe(struct platform_device *pdev) e5010->vdev->lock = &e5010->mutex; ret = v4l2_device_register(dev, &e5010->v4l2_dev); - if (ret) - return dev_err_probe(dev, ret, "failed to register v4l2 device\n"); + if (ret) { + dev_err_probe(dev, ret, "failed to register v4l2 device\n"); + goto fail_after_video_device_alloc; + } + e5010->m2m_dev = v4l2_m2m_init(&e5010_m2m_ops); if (IS_ERR(e5010->m2m_dev)) { @@ -1118,6 +1121,8 @@ static int e5010_probe(struct platform_device *pdev) v4l2_m2m_release(e5010->m2m_dev); fail_after_v4l2_register: v4l2_device_unregister(&e5010->v4l2_dev); +fail_after_video_device_alloc: + video_device_release(e5010->vdev); return ret; } diff --git a/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_hevc_req_multi_if.c b/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_hevc_req_multi_if.c index aa721cc43647c..2725db882e5b3 100644 --- a/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_hevc_req_multi_if.c +++ b/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_hevc_req_multi_if.c @@ -821,7 +821,7 @@ static int vdec_hevc_slice_setup_core_buffer(struct vdec_hevc_slice_inst *inst, inst->vsi_core->fb.y.dma_addr = y_fb_dma; inst->vsi_core->fb.y.size = ctx->picinfo.fb_sz[0]; inst->vsi_core->fb.c.dma_addr = c_fb_dma; - inst->vsi_core->fb.y.size = ctx->picinfo.fb_sz[1]; + inst->vsi_core->fb.c.size = ctx->picinfo.fb_sz[1]; inst->vsi_core->dec.vdec_fb_va = (unsigned long)fb; diff --git a/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c b/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c index b8c9bb017fb5f..73be1013edd0c 100644 --- a/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c +++ b/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c @@ -752,6 +752,32 @@ static int mxc_get_free_slot(struct mxc_jpeg_slot_data *slot_data) return -1; } +static void mxc_jpeg_free_slot_data(struct mxc_jpeg_dev *jpeg) +{ + /* free descriptor for decoding/encoding phase */ + dma_free_coherent(jpeg->dev, sizeof(struct mxc_jpeg_desc), + jpeg->slot_data.desc, + jpeg->slot_data.desc_handle); + jpeg->slot_data.desc = NULL; + jpeg->slot_data.desc_handle = 0; + + /* free descriptor for encoder configuration phase / decoder DHT */ + dma_free_coherent(jpeg->dev, sizeof(struct mxc_jpeg_desc), + jpeg->slot_data.cfg_desc, + jpeg->slot_data.cfg_desc_handle); + jpeg->slot_data.cfg_desc_handle = 0; + jpeg->slot_data.cfg_desc = NULL; + + /* free configuration stream */ + dma_free_coherent(jpeg->dev, MXC_JPEG_MAX_CFG_STREAM, + jpeg->slot_data.cfg_stream_vaddr, + jpeg->slot_data.cfg_stream_handle); + jpeg->slot_data.cfg_stream_vaddr = NULL; + jpeg->slot_data.cfg_stream_handle = 0; + + jpeg->slot_data.used = false; +} + static bool mxc_jpeg_alloc_slot_data(struct mxc_jpeg_dev *jpeg) { struct mxc_jpeg_desc *desc; @@ -794,30 +820,11 @@ static bool mxc_jpeg_alloc_slot_data(struct mxc_jpeg_dev *jpeg) return true; err: dev_err(jpeg->dev, "Could not allocate descriptors for slot %d", jpeg->slot_data.slot); + mxc_jpeg_free_slot_data(jpeg); return false; } -static void mxc_jpeg_free_slot_data(struct mxc_jpeg_dev *jpeg) -{ - /* free descriptor for decoding/encoding phase */ - dma_free_coherent(jpeg->dev, sizeof(struct mxc_jpeg_desc), - jpeg->slot_data.desc, - jpeg->slot_data.desc_handle); - - /* free descriptor for encoder configuration phase / decoder DHT */ - dma_free_coherent(jpeg->dev, sizeof(struct mxc_jpeg_desc), - jpeg->slot_data.cfg_desc, - jpeg->slot_data.cfg_desc_handle); - - /* free configuration stream */ - dma_free_coherent(jpeg->dev, MXC_JPEG_MAX_CFG_STREAM, - jpeg->slot_data.cfg_stream_vaddr, - jpeg->slot_data.cfg_stream_handle); - - jpeg->slot_data.used = false; -} - static void mxc_jpeg_check_and_set_last_buffer(struct mxc_jpeg_ctx *ctx, struct vb2_v4l2_buffer *src_buf, struct vb2_v4l2_buffer *dst_buf) @@ -1918,9 +1925,19 @@ static void mxc_jpeg_buf_queue(struct vb2_buffer *vb) jpeg_src_buf = vb2_to_mxc_buf(vb); jpeg_src_buf->jpeg_parse_error = false; ret = mxc_jpeg_parse(ctx, vb); - if (ret) + if (ret) { jpeg_src_buf->jpeg_parse_error = true; + /* + * if the capture queue is not setup, the device_run() won't be scheduled, + * need to drop the error buffer, so that the decoding can continue + */ + if (!vb2_is_streaming(v4l2_m2m_get_dst_vq(ctx->fh.m2m_ctx))) { + v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_ERROR); + return; + } + } + end: v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf); } diff --git a/drivers/media/platform/nxp/imx8-isi/imx8-isi-m2m.c b/drivers/media/platform/nxp/imx8-isi/imx8-isi-m2m.c index 9745d6219a166..cd6c52e9d158a 100644 --- a/drivers/media/platform/nxp/imx8-isi/imx8-isi-m2m.c +++ b/drivers/media/platform/nxp/imx8-isi/imx8-isi-m2m.c @@ -43,6 +43,7 @@ struct mxc_isi_m2m_ctx_queue_data { struct v4l2_pix_format_mplane format; const struct mxc_isi_format_info *info; u32 sequence; + bool streaming; }; struct mxc_isi_m2m_ctx { @@ -486,15 +487,18 @@ static int mxc_isi_m2m_streamon(struct file *file, void *fh, enum v4l2_buf_type type) { struct mxc_isi_m2m_ctx *ctx = to_isi_m2m_ctx(fh); + struct mxc_isi_m2m_ctx_queue_data *q = mxc_isi_m2m_ctx_qdata(ctx, type); const struct v4l2_pix_format_mplane *out_pix = &ctx->queues.out.format; const struct v4l2_pix_format_mplane *cap_pix = &ctx->queues.cap.format; const struct mxc_isi_format_info *cap_info = ctx->queues.cap.info; const struct mxc_isi_format_info *out_info = ctx->queues.out.info; struct mxc_isi_m2m *m2m = ctx->m2m; bool bypass; - int ret; + if (q->streaming) + return 0; + mutex_lock(&m2m->lock); if (m2m->usage_count == INT_MAX) { @@ -547,6 +551,8 @@ static int mxc_isi_m2m_streamon(struct file *file, void *fh, goto unchain; } + q->streaming = true; + return 0; unchain: @@ -569,10 +575,14 @@ static int mxc_isi_m2m_streamoff(struct file *file, void *fh, enum v4l2_buf_type type) { struct mxc_isi_m2m_ctx *ctx = to_isi_m2m_ctx(fh); + struct mxc_isi_m2m_ctx_queue_data *q = mxc_isi_m2m_ctx_qdata(ctx, type); struct mxc_isi_m2m *m2m = ctx->m2m; v4l2_m2m_ioctl_streamoff(file, fh, type); + if (!q->streaming) + return 0; + mutex_lock(&m2m->lock); /* @@ -598,6 +608,8 @@ static int mxc_isi_m2m_streamoff(struct file *file, void *fh, mutex_unlock(&m2m->lock); + q->streaming = false; + return 0; } diff --git a/drivers/media/platform/qcom/venus/core.c b/drivers/media/platform/qcom/venus/core.c index cabcf710c0462..4d10e94eefe9e 100644 --- a/drivers/media/platform/qcom/venus/core.c +++ b/drivers/media/platform/qcom/venus/core.c @@ -354,7 +354,7 @@ static int venus_probe(struct platform_device *pdev) ret = v4l2_device_register(dev, &core->v4l2_dev); if (ret) - goto err_core_deinit; + goto err_hfi_destroy; platform_set_drvdata(pdev, core); @@ -386,24 +386,24 @@ static int venus_probe(struct platform_device *pdev) ret = venus_enumerate_codecs(core, VIDC_SESSION_TYPE_DEC); if (ret) - goto err_venus_shutdown; + goto err_core_deinit; ret = venus_enumerate_codecs(core, VIDC_SESSION_TYPE_ENC); if (ret) - goto err_venus_shutdown; + goto err_core_deinit; ret = pm_runtime_put_sync(dev); if (ret) { pm_runtime_get_noresume(dev); - goto err_dev_unregister; + goto err_core_deinit; } venus_dbgfs_init(core); return 0; -err_dev_unregister: - v4l2_device_unregister(&core->v4l2_dev); +err_core_deinit: + hfi_core_deinit(core, false); err_venus_shutdown: venus_shutdown(core); err_firmware_deinit: @@ -414,9 +414,9 @@ static int venus_probe(struct platform_device *pdev) pm_runtime_put_noidle(dev); pm_runtime_disable(dev); pm_runtime_set_suspended(dev); + v4l2_device_unregister(&core->v4l2_dev); +err_hfi_destroy: hfi_destroy(core); -err_core_deinit: - hfi_core_deinit(core, false); err_core_put: if (core->pm_ops->core_put) core->pm_ops->core_put(core); diff --git a/drivers/media/platform/ti/davinci/vpif.c b/drivers/media/platform/ti/davinci/vpif.c index f4e1fa76bf372..353e8ad158793 100644 --- a/drivers/media/platform/ti/davinci/vpif.c +++ b/drivers/media/platform/ti/davinci/vpif.c @@ -504,7 +504,7 @@ static int vpif_probe(struct platform_device *pdev) pdev_display = kzalloc(sizeof(*pdev_display), GFP_KERNEL); if (!pdev_display) { ret = -ENOMEM; - goto err_put_pdev_capture; + goto err_del_pdev_capture; } pdev_display->name = "vpif_display"; @@ -527,6 +527,8 @@ static int vpif_probe(struct platform_device *pdev) err_put_pdev_display: platform_device_put(pdev_display); +err_del_pdev_capture: + platform_device_del(pdev_capture); err_put_pdev_capture: platform_device_put(pdev_capture); err_put_rpm: diff --git a/drivers/media/platform/ti/omap3isp/ispccdc.c b/drivers/media/platform/ti/omap3isp/ispccdc.c index dd375c4e180d1..7d0c723dcd119 100644 --- a/drivers/media/platform/ti/omap3isp/ispccdc.c +++ b/drivers/media/platform/ti/omap3isp/ispccdc.c @@ -446,8 +446,8 @@ static int ccdc_lsc_config(struct isp_ccdc_device *ccdc, if (ret < 0) goto done; - dma_sync_sg_for_cpu(isp->dev, req->table.sgt.sgl, - req->table.sgt.nents, DMA_TO_DEVICE); + dma_sync_sgtable_for_cpu(isp->dev, &req->table.sgt, + DMA_TO_DEVICE); if (copy_from_user(req->table.addr, config->lsc, req->config.size)) { @@ -455,8 +455,8 @@ static int ccdc_lsc_config(struct isp_ccdc_device *ccdc, goto done; } - dma_sync_sg_for_device(isp->dev, req->table.sgt.sgl, - req->table.sgt.nents, DMA_TO_DEVICE); + dma_sync_sgtable_for_device(isp->dev, &req->table.sgt, + DMA_TO_DEVICE); } spin_lock_irqsave(&ccdc->lsc.req_lock, flags); diff --git a/drivers/media/platform/ti/omap3isp/ispstat.c b/drivers/media/platform/ti/omap3isp/ispstat.c index 359a846205b0f..d3da68408ecb1 100644 --- a/drivers/media/platform/ti/omap3isp/ispstat.c +++ b/drivers/media/platform/ti/omap3isp/ispstat.c @@ -161,8 +161,7 @@ static void isp_stat_buf_sync_for_device(struct ispstat *stat, if (ISP_STAT_USES_DMAENGINE(stat)) return; - dma_sync_sg_for_device(stat->isp->dev, buf->sgt.sgl, - buf->sgt.nents, DMA_FROM_DEVICE); + dma_sync_sgtable_for_device(stat->isp->dev, &buf->sgt, DMA_FROM_DEVICE); } static void isp_stat_buf_sync_for_cpu(struct ispstat *stat, @@ -171,8 +170,7 @@ static void isp_stat_buf_sync_for_cpu(struct ispstat *stat, if (ISP_STAT_USES_DMAENGINE(stat)) return; - dma_sync_sg_for_cpu(stat->isp->dev, buf->sgt.sgl, - buf->sgt.nents, DMA_FROM_DEVICE); + dma_sync_sgtable_for_cpu(stat->isp->dev, &buf->sgt, DMA_FROM_DEVICE); } static void isp_stat_buf_clear(struct ispstat *stat) diff --git a/drivers/media/platform/verisilicon/hantro_postproc.c b/drivers/media/platform/verisilicon/hantro_postproc.c index 232c93eea7eea..18cad5ac92d8d 100644 --- a/drivers/media/platform/verisilicon/hantro_postproc.c +++ b/drivers/media/platform/verisilicon/hantro_postproc.c @@ -260,8 +260,10 @@ int hantro_postproc_init(struct hantro_ctx *ctx) for (i = 0; i < num_buffers; i++) { ret = hantro_postproc_alloc(ctx, i); - if (ret) + if (ret) { + hantro_postproc_free(ctx); return ret; + } } return 0; diff --git a/drivers/media/test-drivers/vidtv/vidtv_channel.c b/drivers/media/test-drivers/vidtv/vidtv_channel.c index 7838e62727128..f3023e91b3ebc 100644 --- a/drivers/media/test-drivers/vidtv/vidtv_channel.c +++ b/drivers/media/test-drivers/vidtv/vidtv_channel.c @@ -497,7 +497,7 @@ int vidtv_channel_si_init(struct vidtv_mux *m) vidtv_psi_sdt_table_destroy(m->si.sdt); free_pat: vidtv_psi_pat_table_destroy(m->si.pat); - return 0; + return -EINVAL; } void vidtv_channel_si_destroy(struct vidtv_mux *m) diff --git a/drivers/media/test-drivers/vivid/vivid-vid-cap.c b/drivers/media/test-drivers/vivid/vivid-vid-cap.c index f25e011153642..0d5919e000756 100644 --- a/drivers/media/test-drivers/vivid/vivid-vid-cap.c +++ b/drivers/media/test-drivers/vivid/vivid-vid-cap.c @@ -947,8 +947,8 @@ int vivid_vid_cap_s_selection(struct file *file, void *fh, struct v4l2_selection if (dev->has_compose_cap) { v4l2_rect_set_min_size(compose, &min_rect); v4l2_rect_set_max_size(compose, &max_rect); - v4l2_rect_map_inside(compose, &fmt); } + v4l2_rect_map_inside(compose, &fmt); dev->fmt_cap_rect = fmt; tpg_s_buf_height(&dev->tpg, fmt.height); } else if (dev->has_compose_cap) { diff --git a/drivers/media/usb/dvb-usb/cxusb.c b/drivers/media/usb/dvb-usb/cxusb.c index 1d98d3465e28d..ce52c936cb931 100644 --- a/drivers/media/usb/dvb-usb/cxusb.c +++ b/drivers/media/usb/dvb-usb/cxusb.c @@ -119,9 +119,8 @@ static void cxusb_gpio_tuner(struct dvb_usb_device *d, int onoff) o[0] = GPIO_TUNER; o[1] = onoff; - cxusb_ctrl_msg(d, CMD_GPIO_WRITE, o, 2, &i, 1); - if (i != 0x01) + if (!cxusb_ctrl_msg(d, CMD_GPIO_WRITE, o, 2, &i, 1) && i != 0x01) dev_info(&d->udev->dev, "gpio_write failed.\n"); st->gpio_write_state[GPIO_TUNER] = onoff; diff --git a/drivers/media/usb/gspca/stv06xx/stv06xx_hdcs.c b/drivers/media/usb/gspca/stv06xx/stv06xx_hdcs.c index 5a47dcbf1c8e5..303b055fefea9 100644 --- a/drivers/media/usb/gspca/stv06xx/stv06xx_hdcs.c +++ b/drivers/media/usb/gspca/stv06xx/stv06xx_hdcs.c @@ -520,12 +520,13 @@ static int hdcs_init(struct sd *sd) static int hdcs_dump(struct sd *sd) { u16 reg, val; + int err = 0; pr_info("Dumping sensor registers:\n"); - for (reg = HDCS_IDENT; reg <= HDCS_ROWEXPH; reg++) { - stv06xx_read_sensor(sd, reg, &val); + for (reg = HDCS_IDENT; reg <= HDCS_ROWEXPH && !err; reg++) { + err = stv06xx_read_sensor(sd, reg, &val); pr_info("reg 0x%02x = 0x%02x\n", reg, val); } - return 0; + return (err < 0) ? err : 0; } diff --git a/drivers/media/usb/uvc/uvc_ctrl.c b/drivers/media/usb/uvc/uvc_ctrl.c index 58d1bc80253e8..957d620ad671c 100644 --- a/drivers/media/usb/uvc/uvc_ctrl.c +++ b/drivers/media/usb/uvc/uvc_ctrl.c @@ -1689,7 +1689,9 @@ static bool uvc_ctrl_xctrls_has_control(const struct v4l2_ext_control *xctrls, } static void uvc_ctrl_send_events(struct uvc_fh *handle, - const struct v4l2_ext_control *xctrls, unsigned int xctrls_count) + struct uvc_entity *entity, + const struct v4l2_ext_control *xctrls, + unsigned int xctrls_count) { struct uvc_control_mapping *mapping; struct uvc_control *ctrl; @@ -1700,6 +1702,9 @@ static void uvc_ctrl_send_events(struct uvc_fh *handle, u32 changes = V4L2_EVENT_CTRL_CH_VALUE; ctrl = uvc_find_control(handle->chain, xctrls[i].id, &mapping); + if (ctrl->entity != entity) + continue; + if (ctrl->info.flags & UVC_CTRL_FLAG_ASYNCHRONOUS) /* Notification will be sent from an Interrupt event. */ continue; @@ -1830,15 +1835,20 @@ int uvc_ctrl_begin(struct uvc_video_chain *chain) return mutex_lock_interruptible(&chain->ctrl_mutex) ? -ERESTARTSYS : 0; } +/* + * Returns the number of uvc controls that have been correctly set, or a + * negative number if there has been an error. + */ static int uvc_ctrl_commit_entity(struct uvc_device *dev, struct uvc_fh *handle, struct uvc_entity *entity, int rollback, struct uvc_control **err_ctrl) { + unsigned int processed_ctrls = 0; struct uvc_control *ctrl; unsigned int i; - int ret; + int ret = 0; if (entity == NULL) return 0; @@ -1867,8 +1877,9 @@ static int uvc_ctrl_commit_entity(struct uvc_device *dev, dev->intfnum, ctrl->info.selector, uvc_ctrl_data(ctrl, UVC_CTRL_DATA_CURRENT), ctrl->info.size); - else - ret = 0; + + if (!ret) + processed_ctrls++; if (rollback || ret < 0) memcpy(uvc_ctrl_data(ctrl, UVC_CTRL_DATA_CURRENT), @@ -1877,18 +1888,25 @@ static int uvc_ctrl_commit_entity(struct uvc_device *dev, ctrl->dirty = 0; - if (ret < 0) { + if (ret < 0 && !rollback) { if (err_ctrl) *err_ctrl = ctrl; - return ret; + /* + * If we fail to set a control, we need to rollback + * the next ones. + */ + rollback = 1; } - if (!rollback && handle && + if (!rollback && handle && !ret && ctrl->info.flags & UVC_CTRL_FLAG_ASYNCHRONOUS) uvc_ctrl_set_handle(handle, ctrl, handle); } - return 0; + if (ret) + return ret; + + return processed_ctrls; } static int uvc_ctrl_find_ctrl_idx(struct uvc_entity *entity, @@ -1918,7 +1936,8 @@ int __uvc_ctrl_commit(struct uvc_fh *handle, int rollback, struct uvc_video_chain *chain = handle->chain; struct uvc_control *err_ctrl; struct uvc_entity *entity; - int ret = 0; + int ret_out = 0; + int ret; /* Find the control. */ list_for_each_entry(entity, &chain->entities, chain) { @@ -1929,15 +1948,23 @@ int __uvc_ctrl_commit(struct uvc_fh *handle, int rollback, ctrls->error_idx = uvc_ctrl_find_ctrl_idx(entity, ctrls, err_ctrl); - goto done; + /* + * When we fail to commit an entity, we need to + * restore the UVC_CTRL_DATA_BACKUP for all the + * controls in the other entities, otherwise our cache + * and the hardware will be out of sync. + */ + rollback = 1; + + ret_out = ret; + } else if (ret > 0 && !rollback) { + uvc_ctrl_send_events(handle, entity, + ctrls->controls, ctrls->count); } } - if (!rollback) - uvc_ctrl_send_events(handle, ctrls->controls, ctrls->count); -done: mutex_unlock(&chain->ctrl_mutex); - return ret; + return ret_out; } int uvc_ctrl_get(struct uvc_video_chain *chain, diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c index a0d683d266471..241b3f95f3270 100644 --- a/drivers/media/usb/uvc/uvc_driver.c +++ b/drivers/media/usb/uvc/uvc_driver.c @@ -2217,13 +2217,16 @@ static int uvc_probe(struct usb_interface *intf, #endif /* Parse the Video Class control descriptor. */ - if (uvc_parse_control(dev) < 0) { + ret = uvc_parse_control(dev); + if (ret < 0) { + ret = -ENODEV; uvc_dbg(dev, PROBE, "Unable to parse UVC descriptors\n"); goto error; } /* Parse the associated GPIOs. */ - if (uvc_gpio_parse(dev) < 0) { + ret = uvc_gpio_parse(dev); + if (ret < 0) { uvc_dbg(dev, PROBE, "Unable to parse UVC GPIOs\n"); goto error; } @@ -2249,24 +2252,32 @@ static int uvc_probe(struct usb_interface *intf, } /* Register the V4L2 device. */ - if (v4l2_device_register(&intf->dev, &dev->vdev) < 0) + ret = v4l2_device_register(&intf->dev, &dev->vdev); + if (ret < 0) goto error; /* Scan the device for video chains. */ - if (uvc_scan_device(dev) < 0) + if (uvc_scan_device(dev) < 0) { + ret = -ENODEV; goto error; + } /* Initialize controls. */ - if (uvc_ctrl_init_device(dev) < 0) + if (uvc_ctrl_init_device(dev) < 0) { + ret = -ENODEV; goto error; + } /* Register video device nodes. */ - if (uvc_register_chains(dev) < 0) + if (uvc_register_chains(dev) < 0) { + ret = -ENODEV; goto error; + } #ifdef CONFIG_MEDIA_CONTROLLER /* Register the media device node */ - if (media_device_register(&dev->mdev) < 0) + ret = media_device_register(&dev->mdev); + if (ret < 0) goto error; #endif /* Save our data pointer in the interface data. */ @@ -2300,7 +2311,7 @@ static int uvc_probe(struct usb_interface *intf, error: uvc_unregister_video(dev); kref_put(&dev->ref, uvc_delete); - return -ENODEV; + return ret; } static void uvc_disconnect(struct usb_interface *intf) diff --git a/drivers/media/v4l2-core/v4l2-dev.c b/drivers/media/v4l2-core/v4l2-dev.c index 3d7711cc42bc5..56f3ab966d606 100644 --- a/drivers/media/v4l2-core/v4l2-dev.c +++ b/drivers/media/v4l2-core/v4l2-dev.c @@ -1052,25 +1052,25 @@ int __video_register_device(struct video_device *vdev, vdev->dev.class = &video_class; vdev->dev.devt = MKDEV(VIDEO_MAJOR, vdev->minor); vdev->dev.parent = vdev->dev_parent; + vdev->dev.release = v4l2_device_release; dev_set_name(&vdev->dev, "%s%d", name_base, vdev->num); + + /* Increase v4l2_device refcount */ + v4l2_device_get(vdev->v4l2_dev); + mutex_lock(&videodev_lock); ret = device_register(&vdev->dev); if (ret < 0) { mutex_unlock(&videodev_lock); pr_err("%s: device_register failed\n", __func__); - goto cleanup; + put_device(&vdev->dev); + return ret; } - /* Register the release callback that will be called when the last - reference to the device goes away. */ - vdev->dev.release = v4l2_device_release; if (nr != -1 && nr != vdev->num && warn_if_nr_in_use) pr_warn("%s: requested %s%d, got %s\n", __func__, name_base, nr, video_device_node_name(vdev)); - /* Increase v4l2_device refcount */ - v4l2_device_get(vdev->v4l2_dev); - /* Part 5: Register the entity. */ ret = video_register_media_controller(vdev); diff --git a/drivers/mfd/exynos-lpass.c b/drivers/mfd/exynos-lpass.c index e58990c85ed87..8b5fed4760394 100644 --- a/drivers/mfd/exynos-lpass.c +++ b/drivers/mfd/exynos-lpass.c @@ -104,11 +104,22 @@ static const struct regmap_config exynos_lpass_reg_conf = { .fast_io = true, }; +static void exynos_lpass_disable_lpass(void *data) +{ + struct platform_device *pdev = data; + struct exynos_lpass *lpass = platform_get_drvdata(pdev); + + pm_runtime_disable(&pdev->dev); + if (!pm_runtime_status_suspended(&pdev->dev)) + exynos_lpass_disable(lpass); +} + static int exynos_lpass_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct exynos_lpass *lpass; void __iomem *base_top; + int ret; lpass = devm_kzalloc(dev, sizeof(*lpass), GFP_KERNEL); if (!lpass) @@ -122,8 +133,8 @@ static int exynos_lpass_probe(struct platform_device *pdev) if (IS_ERR(lpass->sfr0_clk)) return PTR_ERR(lpass->sfr0_clk); - lpass->top = regmap_init_mmio(dev, base_top, - &exynos_lpass_reg_conf); + lpass->top = devm_regmap_init_mmio(dev, base_top, + &exynos_lpass_reg_conf); if (IS_ERR(lpass->top)) { dev_err(dev, "LPASS top regmap initialization failed\n"); return PTR_ERR(lpass->top); @@ -134,18 +145,11 @@ static int exynos_lpass_probe(struct platform_device *pdev) pm_runtime_enable(dev); exynos_lpass_enable(lpass); - return devm_of_platform_populate(dev); -} - -static void exynos_lpass_remove(struct platform_device *pdev) -{ - struct exynos_lpass *lpass = platform_get_drvdata(pdev); + ret = devm_add_action_or_reset(dev, exynos_lpass_disable_lpass, pdev); + if (ret) + return ret; - exynos_lpass_disable(lpass); - pm_runtime_disable(&pdev->dev); - if (!pm_runtime_status_suspended(&pdev->dev)) - exynos_lpass_disable(lpass); - regmap_exit(lpass->top); + return devm_of_platform_populate(dev); } static int __maybe_unused exynos_lpass_suspend(struct device *dev) @@ -185,7 +189,6 @@ static struct platform_driver exynos_lpass_driver = { .of_match_table = exynos_lpass_of_match, }, .probe = exynos_lpass_probe, - .remove_new = exynos_lpass_remove, }; module_platform_driver(exynos_lpass_driver); diff --git a/drivers/mfd/max14577.c b/drivers/mfd/max14577.c index 6fce79ec2dc64..7e7e8af9af224 100644 --- a/drivers/mfd/max14577.c +++ b/drivers/mfd/max14577.c @@ -456,6 +456,7 @@ static void max14577_i2c_remove(struct i2c_client *i2c) { struct max14577 *max14577 = i2c_get_clientdata(i2c); + device_init_wakeup(max14577->dev, false); mfd_remove_devices(max14577->dev); regmap_del_irq_chip(max14577->irq, max14577->irq_data); if (max14577->dev_type == MAXIM_DEVICE_TYPE_MAX77836) diff --git a/drivers/mfd/stmpe-spi.c b/drivers/mfd/stmpe-spi.c index 792236f56399a..b9cc85ea2c401 100644 --- a/drivers/mfd/stmpe-spi.c +++ b/drivers/mfd/stmpe-spi.c @@ -129,7 +129,7 @@ static const struct spi_device_id stmpe_spi_id[] = { { "stmpe2403", STMPE2403 }, { } }; -MODULE_DEVICE_TABLE(spi, stmpe_id); +MODULE_DEVICE_TABLE(spi, stmpe_spi_id); static struct spi_driver stmpe_spi_driver = { .driver = { diff --git a/drivers/misc/mei/vsc-tp.c b/drivers/misc/mei/vsc-tp.c index ad7c7f1573191..5e44b518f36c7 100644 --- a/drivers/misc/mei/vsc-tp.c +++ b/drivers/misc/mei/vsc-tp.c @@ -324,7 +324,7 @@ int vsc_tp_rom_xfer(struct vsc_tp *tp, const void *obuf, void *ibuf, size_t len) guard(mutex)(&tp->mutex); /* rom xfer is big endian */ - cpu_to_be32_array((u32 *)tp->tx_buf, obuf, words); + cpu_to_be32_array((__be32 *)tp->tx_buf, obuf, words); ret = read_poll_timeout(gpiod_get_value_cansleep, ret, !ret, VSC_TP_ROM_XFER_POLL_DELAY_US, @@ -340,7 +340,7 @@ int vsc_tp_rom_xfer(struct vsc_tp *tp, const void *obuf, void *ibuf, size_t len) return ret; if (ibuf) - be32_to_cpu_array(ibuf, (u32 *)tp->rx_buf, words); + be32_to_cpu_array(ibuf, (__be32 *)tp->rx_buf, words); return ret; } diff --git a/drivers/misc/tps6594-pfsm.c b/drivers/misc/tps6594-pfsm.c index 9bcca1856bfee..db3d6a21a2122 100644 --- a/drivers/misc/tps6594-pfsm.c +++ b/drivers/misc/tps6594-pfsm.c @@ -281,6 +281,9 @@ static int tps6594_pfsm_probe(struct platform_device *pdev) pfsm->miscdev.minor = MISC_DYNAMIC_MINOR; pfsm->miscdev.name = devm_kasprintf(dev, GFP_KERNEL, "pfsm-%ld-0x%02x", tps->chip_id, tps->reg); + if (!pfsm->miscdev.name) + return -ENOMEM; + pfsm->miscdev.fops = &tps6594_pfsm_fops; pfsm->miscdev.parent = dev->parent; pfsm->chip_id = tps->chip_id; diff --git a/drivers/misc/vmw_vmci/vmci_host.c b/drivers/misc/vmw_vmci/vmci_host.c index abe79f6fd2a79..b64944367ac53 100644 --- a/drivers/misc/vmw_vmci/vmci_host.c +++ b/drivers/misc/vmw_vmci/vmci_host.c @@ -227,6 +227,7 @@ static int drv_cp_harray_to_user(void __user *user_buf_uva, static int vmci_host_setup_notify(struct vmci_ctx *context, unsigned long uva) { + struct page *page; int retval; if (context->notify_page) { @@ -243,13 +244,11 @@ static int vmci_host_setup_notify(struct vmci_ctx *context, /* * Lock physical page backing a given user VA. */ - retval = get_user_pages_fast(uva, 1, FOLL_WRITE, &context->notify_page); - if (retval != 1) { - context->notify_page = NULL; + retval = get_user_pages_fast(uva, 1, FOLL_WRITE, &page); + if (retval != 1) return VMCI_ERROR_GENERIC; - } - if (context->notify_page == NULL) - return VMCI_ERROR_UNAVAILABLE; + + context->notify_page = page; /* * Map the locked page and set up notify pointer. diff --git a/drivers/mmc/core/card.h b/drivers/mmc/core/card.h index 3205feb1e8ff6..9cbdd240c3a7d 100644 --- a/drivers/mmc/core/card.h +++ b/drivers/mmc/core/card.h @@ -89,6 +89,7 @@ struct mmc_fixup { #define CID_MANFID_MICRON 0x13 #define CID_MANFID_SAMSUNG 0x15 #define CID_MANFID_APACER 0x27 +#define CID_MANFID_SWISSBIT 0x5D #define CID_MANFID_KINGSTON 0x70 #define CID_MANFID_HYNIX 0x90 #define CID_MANFID_KINGSTON_SD 0x9F @@ -294,4 +295,9 @@ static inline int mmc_card_broken_sd_poweroff_notify(const struct mmc_card *c) return c->quirks & MMC_QUIRK_BROKEN_SD_POWEROFF_NOTIFY; } +static inline int mmc_card_no_uhs_ddr50_tuning(const struct mmc_card *c) +{ + return c->quirks & MMC_QUIRK_NO_UHS_DDR50_TUNING; +} + #endif diff --git a/drivers/mmc/core/quirks.h b/drivers/mmc/core/quirks.h index 89b512905be14..c417ed34c0576 100644 --- a/drivers/mmc/core/quirks.h +++ b/drivers/mmc/core/quirks.h @@ -34,6 +34,22 @@ static const struct mmc_fixup __maybe_unused mmc_sd_fixups[] = { MMC_QUIRK_BROKEN_SD_CACHE | MMC_QUIRK_BROKEN_SD_POWEROFF_NOTIFY, EXT_CSD_REV_ANY), + /* + * Swissbit series S46-u cards throw I/O errors during tuning requests + * after the initial tuning request expectedly times out. This has + * only been observed on cards manufactured on 01/2019 that are using + * Bay Trail host controllers. + */ + _FIXUP_EXT("0016G", CID_MANFID_SWISSBIT, 0x5342, 2019, 1, + 0, -1ull, SDIO_ANY_ID, SDIO_ANY_ID, add_quirk_sd, + MMC_QUIRK_NO_UHS_DDR50_TUNING, EXT_CSD_REV_ANY), + + /* + * Some SD cards reports discard support while they don't + */ + MMC_FIXUP(CID_NAME_ANY, CID_MANFID_SANDISK_SD, 0x5344, add_quirk_sd, + MMC_QUIRK_BROKEN_SD_DISCARD), + END_FIXUP }; @@ -137,12 +153,6 @@ static const struct mmc_fixup __maybe_unused mmc_blk_fixups[] = { MMC_FIXUP("M62704", CID_MANFID_KINGSTON, 0x0100, add_quirk_mmc, MMC_QUIRK_TRIM_BROKEN), - /* - * Some SD cards reports discard support while they don't - */ - MMC_FIXUP(CID_NAME_ANY, CID_MANFID_SANDISK_SD, 0x5344, add_quirk_sd, - MMC_QUIRK_BROKEN_SD_DISCARD), - END_FIXUP }; diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c index 63915541c0e49..916ae9996e9d7 100644 --- a/drivers/mmc/core/sd.c +++ b/drivers/mmc/core/sd.c @@ -613,6 +613,29 @@ static int sd_set_current_limit(struct mmc_card *card, u8 *status) return 0; } +/* + * Determine if the card should tune or not. + */ +static bool mmc_sd_use_tuning(struct mmc_card *card) +{ + /* + * SPI mode doesn't define CMD19 and tuning is only valid for SDR50 and + * SDR104 mode SD-cards. Note that tuning is mandatory for SDR104. + */ + if (mmc_host_is_spi(card->host)) + return false; + + switch (card->host->ios.timing) { + case MMC_TIMING_UHS_SDR50: + case MMC_TIMING_UHS_SDR104: + return true; + case MMC_TIMING_UHS_DDR50: + return !mmc_card_no_uhs_ddr50_tuning(card); + } + + return false; +} + /* * UHS-I specific initialization procedure */ @@ -656,14 +679,7 @@ static int mmc_sd_init_uhs_card(struct mmc_card *card) if (err) goto out; - /* - * SPI mode doesn't define CMD19 and tuning is only valid for SDR50 and - * SDR104 mode SD-cards. Note that tuning is mandatory for SDR104. - */ - if (!mmc_host_is_spi(card->host) && - (card->host->ios.timing == MMC_TIMING_UHS_SDR50 || - card->host->ios.timing == MMC_TIMING_UHS_DDR50 || - card->host->ios.timing == MMC_TIMING_UHS_SDR104)) { + if (mmc_sd_use_tuning(card)) { err = mmc_execute_tuning(card); /* diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c index d5d868cb4edc7..be9954f5bc0a6 100644 --- a/drivers/mmc/host/mtk-sd.c +++ b/drivers/mmc/host/mtk-sd.c @@ -776,12 +776,18 @@ static inline void msdc_dma_setup(struct msdc_host *host, struct msdc_dma *dma, static void msdc_prepare_data(struct msdc_host *host, struct mmc_data *data) { if (!(data->host_cookie & MSDC_PREPARE_FLAG)) { - data->host_cookie |= MSDC_PREPARE_FLAG; data->sg_count = dma_map_sg(host->dev, data->sg, data->sg_len, mmc_get_dma_dir(data)); + if (data->sg_count) + data->host_cookie |= MSDC_PREPARE_FLAG; } } +static bool msdc_data_prepared(struct mmc_data *data) +{ + return data->host_cookie & MSDC_PREPARE_FLAG; +} + static void msdc_unprepare_data(struct msdc_host *host, struct mmc_data *data) { if (data->host_cookie & MSDC_ASYNC_FLAG) @@ -1345,8 +1351,19 @@ static void msdc_ops_request(struct mmc_host *mmc, struct mmc_request *mrq) WARN_ON(host->mrq); host->mrq = mrq; - if (mrq->data) + if (mrq->data) { msdc_prepare_data(host, mrq->data); + if (!msdc_data_prepared(mrq->data)) { + host->mrq = NULL; + /* + * Failed to prepare DMA area, fail fast before + * starting any commands. + */ + mrq->cmd->error = -ENOSPC; + mmc_request_done(mmc_from_priv(host), mrq); + return; + } + } /* if SBC is required, we have HW option and SW option. * if HW option is enabled, and SBC does not have "special" flags, diff --git a/drivers/mmc/host/sdhci-of-dwcmshc.c b/drivers/mmc/host/sdhci-of-dwcmshc.c index 8fd80dac11bfd..bf29aad082a19 100644 --- a/drivers/mmc/host/sdhci-of-dwcmshc.c +++ b/drivers/mmc/host/sdhci-of-dwcmshc.c @@ -17,6 +17,7 @@ #include #include #include +#include #include #include #include @@ -787,6 +788,29 @@ static void dwcmshc_rk35xx_postinit(struct sdhci_host *host, struct dwcmshc_priv } } +static void dwcmshc_rk3576_postinit(struct sdhci_host *host, struct dwcmshc_priv *dwc_priv) +{ + struct device *dev = mmc_dev(host->mmc); + int ret; + + /* + * This works around the design of the RK3576's power domains, which + * makes the PD_NVM power domain, which the sdhci controller on the + * RK3576 is in, never come back the same way once it's run-time + * suspended once. This can happen during early kernel boot if no driver + * is using either PD_NVM or its child power domain PD_SDGMAC for a + * short moment, leading to it being turned off to save power. By + * keeping it on, sdhci suspending won't lead to PD_NVM becoming a + * candidate for getting turned off. + */ + ret = dev_pm_genpd_rpm_always_on(dev, true); + if (ret && ret != -EOPNOTSUPP) + dev_warn(dev, "failed to set PD rpm always on, SoC may hang later: %pe\n", + ERR_PTR(ret)); + + dwcmshc_rk35xx_postinit(host, dwc_priv); +} + static int th1520_execute_tuning(struct sdhci_host *host, u32 opcode) { struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); @@ -1218,6 +1242,18 @@ static const struct dwcmshc_pltfm_data sdhci_dwcmshc_rk35xx_pdata = { .postinit = dwcmshc_rk35xx_postinit, }; +static const struct dwcmshc_pltfm_data sdhci_dwcmshc_rk3576_pdata = { + .pdata = { + .ops = &sdhci_dwcmshc_rk35xx_ops, + .quirks = SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN | + SDHCI_QUIRK_BROKEN_TIMEOUT_VAL, + .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN | + SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN, + }, + .init = dwcmshc_rk35xx_init, + .postinit = dwcmshc_rk3576_postinit, +}; + static const struct dwcmshc_pltfm_data sdhci_dwcmshc_th1520_pdata = { .pdata = { .ops = &sdhci_dwcmshc_th1520_ops, @@ -1316,6 +1352,10 @@ static const struct of_device_id sdhci_dwcmshc_dt_ids[] = { .compatible = "rockchip,rk3588-dwcmshc", .data = &sdhci_dwcmshc_rk35xx_pdata, }, + { + .compatible = "rockchip,rk3576-dwcmshc", + .data = &sdhci_dwcmshc_rk3576_pdata, + }, { .compatible = "rockchip,rk3568-dwcmshc", .data = &sdhci_dwcmshc_rk35xx_pdata, diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c index cea0b89bbada0..30ecc977712fe 100644 --- a/drivers/mmc/host/sdhci.c +++ b/drivers/mmc/host/sdhci.c @@ -2036,15 +2036,10 @@ void sdhci_set_clock(struct sdhci_host *host, unsigned int clock) host->mmc->actual_clock = 0; - clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL); - if (clk & SDHCI_CLOCK_CARD_EN) - sdhci_writew(host, clk & ~SDHCI_CLOCK_CARD_EN, - SDHCI_CLOCK_CONTROL); + sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL); - if (clock == 0) { - sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL); + if (clock == 0) return; - } clk = sdhci_calc_clk(host, clock, &host->mmc->actual_clock); sdhci_enable_clk(host, clk); diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h index edfd981dc3945..7896c2eb5ea1d 100644 --- a/drivers/mmc/host/sdhci.h +++ b/drivers/mmc/host/sdhci.h @@ -825,4 +825,20 @@ void sdhci_switch_external_dma(struct sdhci_host *host, bool en); void sdhci_set_data_timeout_irq(struct sdhci_host *host, bool enable); void __sdhci_set_timeout(struct sdhci_host *host, struct mmc_command *cmd); +#if defined(CONFIG_DYNAMIC_DEBUG) || \ + (defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE)) +#define SDHCI_DBG_ANYWAY 0 +#elif defined(DEBUG) +#define SDHCI_DBG_ANYWAY 1 +#else +#define SDHCI_DBG_ANYWAY 0 +#endif + +#define sdhci_dbg_dumpregs(host, fmt) \ +do { \ + DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \ + if (DYNAMIC_DEBUG_BRANCH(descriptor) || SDHCI_DBG_ANYWAY) \ + sdhci_dumpregs(host); \ +} while (0) + #endif /* __SDHCI_HW_H */ diff --git a/drivers/mtd/nand/ecc-mxic.c b/drivers/mtd/nand/ecc-mxic.c index 47e10945b8d27..63cb206269dd9 100644 --- a/drivers/mtd/nand/ecc-mxic.c +++ b/drivers/mtd/nand/ecc-mxic.c @@ -614,7 +614,7 @@ static int mxic_ecc_finish_io_req_external(struct nand_device *nand, { struct mxic_ecc_engine *mxic = nand_to_mxic(nand); struct mxic_ecc_ctx *ctx = nand_to_ecc_ctx(nand); - int nents, step, ret; + int nents, step, ret = 0; if (req->mode == MTD_OPS_RAW) return 0; diff --git a/drivers/mtd/nand/raw/qcom_nandc.c b/drivers/mtd/nand/raw/qcom_nandc.c index b8cff9240b286..beafca6ba0df4 100644 --- a/drivers/mtd/nand/raw/qcom_nandc.c +++ b/drivers/mtd/nand/raw/qcom_nandc.c @@ -2917,7 +2917,7 @@ static int qcom_param_page_type_exec(struct nand_chip *chip, const struct nand_ write_reg_dma(nandc, NAND_DEV_CMD1, 1, NAND_BAM_NEXT_SGL); } - nandc->buf_count = len; + nandc->buf_count = 512; memset(nandc->data_buffer, 0xff, nandc->buf_count); config_nand_single_cw_page_read(chip, false, 0); diff --git a/drivers/mtd/nand/raw/sunxi_nand.c b/drivers/mtd/nand/raw/sunxi_nand.c index c28634e20abf8..ac887754b98e2 100644 --- a/drivers/mtd/nand/raw/sunxi_nand.c +++ b/drivers/mtd/nand/raw/sunxi_nand.c @@ -817,6 +817,7 @@ static int sunxi_nfc_hw_ecc_read_chunk(struct nand_chip *nand, if (ret) return ret; + sunxi_nfc_randomizer_config(nand, page, false); sunxi_nfc_randomizer_enable(nand); writel(NFC_DATA_TRANS | NFC_DATA_SWAP_METHOD | NFC_ECC_OP, nfc->regs + NFC_REG_CMD); @@ -1049,6 +1050,7 @@ static int sunxi_nfc_hw_ecc_write_chunk(struct nand_chip *nand, if (ret) return ret; + sunxi_nfc_randomizer_config(nand, page, false); sunxi_nfc_randomizer_enable(nand); sunxi_nfc_hw_ecc_set_prot_oob_bytes(nand, oob, 0, bbm, page); diff --git a/drivers/mtd/nand/spi/core.c b/drivers/mtd/nand/spi/core.c index 4d76f9f71a0e9..241f6a4df16c1 100644 --- a/drivers/mtd/nand/spi/core.c +++ b/drivers/mtd/nand/spi/core.c @@ -1496,6 +1496,7 @@ static void spinand_cleanup(struct spinand_device *spinand) { struct nand_device *nand = spinand_to_nand(spinand); + nanddev_ecc_engine_cleanup(nand); nanddev_cleanup(nand); spinand_manufacturer_cleanup(spinand); kfree(spinand->databuf); diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 4d2e30f4ee250..52ff0f9e04e07 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -497,9 +497,9 @@ static int bond_ipsec_add_sa(struct xfrm_state *xs, goto out; } - xs->xso.real_dev = real_dev; err = real_dev->xfrmdev_ops->xdo_dev_state_add(xs, extack); if (!err) { + xs->xso.real_dev = real_dev; ipsec->xs = xs; INIT_LIST_HEAD(&ipsec->list); mutex_lock(&bond->ipsec_lock); @@ -541,11 +541,11 @@ static void bond_ipsec_add_sa_all(struct bonding *bond) if (ipsec->xs->xso.real_dev == real_dev) continue; - ipsec->xs->xso.real_dev = real_dev; if (real_dev->xfrmdev_ops->xdo_dev_state_add(ipsec->xs, NULL)) { slave_warn(bond_dev, real_dev, "%s: failed to add SA\n", __func__); - ipsec->xs->xso.real_dev = NULL; + continue; } + ipsec->xs->xso.real_dev = real_dev; } out: mutex_unlock(&bond->ipsec_lock); @@ -627,6 +627,7 @@ static void bond_ipsec_del_sa_all(struct bonding *bond) "%s: no slave xdo_dev_state_delete\n", __func__); } else { + ipsec->xs->xso.real_dev = NULL; real_dev->xfrmdev_ops->xdo_dev_state_delete(ipsec->xs); if (real_dev->xfrmdev_ops->xdo_dev_state_free) real_dev->xfrmdev_ops->xdo_dev_state_free(ipsec->xs); @@ -661,6 +662,7 @@ static void bond_ipsec_free_sa(struct xfrm_state *xs) WARN_ON(xs->xso.real_dev != real_dev); + xs->xso.real_dev = NULL; if (real_dev && real_dev->xfrmdev_ops && real_dev->xfrmdev_ops->xdo_dev_state_free) real_dev->xfrmdev_ops->xdo_dev_state_free(xs); @@ -2113,15 +2115,26 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev, * set the master's mac address to that of the first slave */ memcpy(ss.__data, bond_dev->dev_addr, bond_dev->addr_len); - ss.ss_family = slave_dev->type; - res = dev_set_mac_address(slave_dev, (struct sockaddr *)&ss, - extack); - if (res) { - slave_err(bond_dev, slave_dev, "Error %d calling set_mac_address\n", res); - goto err_restore_mtu; - } + } else if (bond->params.fail_over_mac == BOND_FOM_FOLLOW && + BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP && + memcmp(slave_dev->dev_addr, bond_dev->dev_addr, bond_dev->addr_len) == 0) { + /* Set slave to random address to avoid duplicate mac + * address in later fail over. + */ + eth_random_addr(ss.__data); + } else { + goto skip_mac_set; } + ss.ss_family = slave_dev->type; + res = dev_set_mac_address(slave_dev, (struct sockaddr *)&ss, extack); + if (res) { + slave_err(bond_dev, slave_dev, "Error %d calling set_mac_address\n", res); + goto err_restore_mtu; + } + +skip_mac_set: + /* set no_addrconf flag before open to prevent IPv6 addrconf */ slave_dev->priv_flags |= IFF_NO_ADDRCONF; diff --git a/drivers/net/can/kvaser_pciefd.c b/drivers/net/can/kvaser_pciefd.c index 020e5897812fe..3fa83f05bfcc8 100644 --- a/drivers/net/can/kvaser_pciefd.c +++ b/drivers/net/can/kvaser_pciefd.c @@ -966,7 +966,7 @@ static int kvaser_pciefd_setup_can_ctrls(struct kvaser_pciefd *pcie) u32 status, tx_nr_packets_max; netdev = alloc_candev(sizeof(struct kvaser_pciefd_can), - KVASER_PCIEFD_CAN_TX_MAX_COUNT); + roundup_pow_of_two(KVASER_PCIEFD_CAN_TX_MAX_COUNT)); if (!netdev) return -ENOMEM; @@ -995,7 +995,6 @@ static int kvaser_pciefd_setup_can_ctrls(struct kvaser_pciefd *pcie) can->tx_max_count = min(KVASER_PCIEFD_CAN_TX_MAX_COUNT, tx_nr_packets_max - 1); can->can.clock.freq = pcie->freq; - can->can.echo_skb_max = roundup_pow_of_two(can->tx_max_count); spin_lock_init(&can->lock); can->can.bittiming_const = &kvaser_pciefd_bittiming_const; @@ -1670,24 +1669,28 @@ static int kvaser_pciefd_read_buffer(struct kvaser_pciefd *pcie, int dma_buf) return res; } -static u32 kvaser_pciefd_receive_irq(struct kvaser_pciefd *pcie) +static void kvaser_pciefd_receive_irq(struct kvaser_pciefd *pcie) { + void __iomem *srb_cmd_reg = KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CMD_REG; u32 irq = ioread32(KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_IRQ_REG); - if (irq & KVASER_PCIEFD_SRB_IRQ_DPD0) + iowrite32(irq, KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_IRQ_REG); + + if (irq & KVASER_PCIEFD_SRB_IRQ_DPD0) { kvaser_pciefd_read_buffer(pcie, 0); + iowrite32(KVASER_PCIEFD_SRB_CMD_RDB0, srb_cmd_reg); /* Rearm buffer */ + } - if (irq & KVASER_PCIEFD_SRB_IRQ_DPD1) + if (irq & KVASER_PCIEFD_SRB_IRQ_DPD1) { kvaser_pciefd_read_buffer(pcie, 1); + iowrite32(KVASER_PCIEFD_SRB_CMD_RDB1, srb_cmd_reg); /* Rearm buffer */ + } if (unlikely(irq & KVASER_PCIEFD_SRB_IRQ_DOF0 || irq & KVASER_PCIEFD_SRB_IRQ_DOF1 || irq & KVASER_PCIEFD_SRB_IRQ_DUF0 || irq & KVASER_PCIEFD_SRB_IRQ_DUF1)) dev_err(&pcie->pci->dev, "DMA IRQ error 0x%08X\n", irq); - - iowrite32(irq, KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_IRQ_REG); - return irq; } static void kvaser_pciefd_transmit_irq(struct kvaser_pciefd_can *can) @@ -1715,29 +1718,22 @@ static irqreturn_t kvaser_pciefd_irq_handler(int irq, void *dev) struct kvaser_pciefd *pcie = (struct kvaser_pciefd *)dev; const struct kvaser_pciefd_irq_mask *irq_mask = pcie->driver_data->irq_mask; u32 pci_irq = ioread32(KVASER_PCIEFD_PCI_IRQ_ADDR(pcie)); - u32 srb_irq = 0; - u32 srb_release = 0; int i; if (!(pci_irq & irq_mask->all)) return IRQ_NONE; + iowrite32(0, KVASER_PCIEFD_PCI_IEN_ADDR(pcie)); + if (pci_irq & irq_mask->kcan_rx0) - srb_irq = kvaser_pciefd_receive_irq(pcie); + kvaser_pciefd_receive_irq(pcie); for (i = 0; i < pcie->nr_channels; i++) { if (pci_irq & irq_mask->kcan_tx[i]) kvaser_pciefd_transmit_irq(pcie->can[i]); } - if (srb_irq & KVASER_PCIEFD_SRB_IRQ_DPD0) - srb_release |= KVASER_PCIEFD_SRB_CMD_RDB0; - - if (srb_irq & KVASER_PCIEFD_SRB_IRQ_DPD1) - srb_release |= KVASER_PCIEFD_SRB_CMD_RDB1; - - if (srb_release) - iowrite32(srb_release, KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CMD_REG); + iowrite32(irq_mask->all, KVASER_PCIEFD_PCI_IEN_ADDR(pcie)); return IRQ_HANDLED; } @@ -1757,13 +1753,22 @@ static void kvaser_pciefd_teardown_can_ctrls(struct kvaser_pciefd *pcie) } } +static void kvaser_pciefd_disable_irq_srcs(struct kvaser_pciefd *pcie) +{ + unsigned int i; + + /* Masking PCI_IRQ is insufficient as running ISR will unmask it */ + iowrite32(0, KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_IEN_REG); + for (i = 0; i < pcie->nr_channels; ++i) + iowrite32(0, pcie->can[i]->reg_base + KVASER_PCIEFD_KCAN_IEN_REG); +} + static int kvaser_pciefd_probe(struct pci_dev *pdev, const struct pci_device_id *id) { int ret; struct kvaser_pciefd *pcie; const struct kvaser_pciefd_irq_mask *irq_mask; - void __iomem *irq_en_base; pcie = devm_kzalloc(&pdev->dev, sizeof(*pcie), GFP_KERNEL); if (!pcie) @@ -1829,8 +1834,7 @@ static int kvaser_pciefd_probe(struct pci_dev *pdev, KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_IEN_REG); /* Enable PCI interrupts */ - irq_en_base = KVASER_PCIEFD_PCI_IEN_ADDR(pcie); - iowrite32(irq_mask->all, irq_en_base); + iowrite32(irq_mask->all, KVASER_PCIEFD_PCI_IEN_ADDR(pcie)); /* Ready the DMA buffers */ iowrite32(KVASER_PCIEFD_SRB_CMD_RDB0, KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CMD_REG); @@ -1844,8 +1848,7 @@ static int kvaser_pciefd_probe(struct pci_dev *pdev, return 0; err_free_irq: - /* Disable PCI interrupts */ - iowrite32(0, irq_en_base); + kvaser_pciefd_disable_irq_srcs(pcie); free_irq(pcie->pci->irq, pcie); err_pci_free_irq_vectors: @@ -1868,35 +1871,26 @@ static int kvaser_pciefd_probe(struct pci_dev *pdev, return ret; } -static void kvaser_pciefd_remove_all_ctrls(struct kvaser_pciefd *pcie) -{ - int i; - - for (i = 0; i < pcie->nr_channels; i++) { - struct kvaser_pciefd_can *can = pcie->can[i]; - - if (can) { - iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG); - unregister_candev(can->can.dev); - del_timer(&can->bec_poll_timer); - kvaser_pciefd_pwm_stop(can); - free_candev(can->can.dev); - } - } -} - static void kvaser_pciefd_remove(struct pci_dev *pdev) { struct kvaser_pciefd *pcie = pci_get_drvdata(pdev); + unsigned int i; - kvaser_pciefd_remove_all_ctrls(pcie); + for (i = 0; i < pcie->nr_channels; ++i) { + struct kvaser_pciefd_can *can = pcie->can[i]; - /* Disable interrupts */ - iowrite32(0, KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CTRL_REG); - iowrite32(0, KVASER_PCIEFD_PCI_IEN_ADDR(pcie)); + unregister_candev(can->can.dev); + del_timer(&can->bec_poll_timer); + kvaser_pciefd_pwm_stop(can); + } + kvaser_pciefd_disable_irq_srcs(pcie); free_irq(pcie->pci->irq, pcie); pci_free_irq_vectors(pcie->pci); + + for (i = 0; i < pcie->nr_channels; ++i) + free_candev(pcie->can[i]->can.dev); + pci_iounmap(pdev, pcie->reg_base); pci_release_regions(pdev); pci_disable_device(pdev); diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c index dbd4d8796f9b0..dbcf17fb3ef25 100644 --- a/drivers/net/can/m_can/m_can.c +++ b/drivers/net/can/m_can/m_can.c @@ -665,7 +665,7 @@ static int m_can_handle_lost_msg(struct net_device *dev) struct can_frame *frame; u32 timestamp = 0; - netdev_err(dev, "msg lost in rxf0\n"); + netdev_dbg(dev, "msg lost in rxf0\n"); stats->rx_errors++; stats->rx_over_errors++; diff --git a/drivers/net/can/m_can/tcan4x5x-core.c b/drivers/net/can/m_can/tcan4x5x-core.c index 2f73bf3abad88..b6c5c8bab7390 100644 --- a/drivers/net/can/m_can/tcan4x5x-core.c +++ b/drivers/net/can/m_can/tcan4x5x-core.c @@ -385,10 +385,11 @@ static int tcan4x5x_can_probe(struct spi_device *spi) priv = cdev_to_priv(mcan_class); priv->power = devm_regulator_get_optional(&spi->dev, "vsup"); - if (PTR_ERR(priv->power) == -EPROBE_DEFER) { - ret = -EPROBE_DEFER; - goto out_m_can_class_free_dev; - } else { + if (IS_ERR(priv->power)) { + if (PTR_ERR(priv->power) == -EPROBE_DEFER) { + ret = -EPROBE_DEFER; + goto out_m_can_class_free_dev; + } priv->power = NULL; } diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c index 0168ad495e6c9..71c30a81c36db 100644 --- a/drivers/net/dsa/b53/b53_common.c +++ b/drivers/net/dsa/b53/b53_common.c @@ -1326,24 +1326,7 @@ static void b53_adjust_63xx_rgmii(struct dsa_switch *ds, int port, off = B53_RGMII_CTRL_P(port); b53_read8(dev, B53_CTRL_PAGE, off, &rgmii_ctrl); - - switch (interface) { - case PHY_INTERFACE_MODE_RGMII_ID: - rgmii_ctrl |= (RGMII_CTRL_DLL_RXC | RGMII_CTRL_DLL_TXC); - break; - case PHY_INTERFACE_MODE_RGMII_RXID: - rgmii_ctrl &= ~(RGMII_CTRL_DLL_TXC); - rgmii_ctrl |= RGMII_CTRL_DLL_RXC; - break; - case PHY_INTERFACE_MODE_RGMII_TXID: - rgmii_ctrl &= ~(RGMII_CTRL_DLL_RXC); - rgmii_ctrl |= RGMII_CTRL_DLL_TXC; - break; - case PHY_INTERFACE_MODE_RGMII: - default: - rgmii_ctrl &= ~(RGMII_CTRL_DLL_RXC | RGMII_CTRL_DLL_TXC); - break; - } + rgmii_ctrl &= ~(RGMII_CTRL_DLL_RXC | RGMII_CTRL_DLL_TXC); if (port != dev->imp_port) { if (is63268(dev)) @@ -1373,8 +1356,7 @@ static void b53_adjust_531x5_rgmii(struct dsa_switch *ds, int port, * tx_clk aligned timing (restoring to reset defaults) */ b53_read8(dev, B53_CTRL_PAGE, off, &rgmii_ctrl); - rgmii_ctrl &= ~(RGMII_CTRL_DLL_RXC | RGMII_CTRL_DLL_TXC | - RGMII_CTRL_TIMING_SEL); + rgmii_ctrl &= ~(RGMII_CTRL_DLL_RXC | RGMII_CTRL_DLL_TXC); /* PHY_INTERFACE_MODE_RGMII_TXID means TX internal delay, make * sure that we enable the port TX clock internal delay to @@ -1394,7 +1376,10 @@ static void b53_adjust_531x5_rgmii(struct dsa_switch *ds, int port, rgmii_ctrl |= RGMII_CTRL_DLL_TXC; if (interface == PHY_INTERFACE_MODE_RGMII) rgmii_ctrl |= RGMII_CTRL_DLL_TXC | RGMII_CTRL_DLL_RXC; - rgmii_ctrl |= RGMII_CTRL_TIMING_SEL; + + if (dev->chip_id != BCM53115_DEVICE_ID) + rgmii_ctrl |= RGMII_CTRL_TIMING_SEL; + b53_write8(dev, B53_CTRL_PAGE, off, rgmii_ctrl); dev_info(ds->dev, "Configured port %d for %s\n", port, @@ -1458,6 +1443,10 @@ static void b53_phylink_get_caps(struct dsa_switch *ds, int port, __set_bit(PHY_INTERFACE_MODE_MII, config->supported_interfaces); __set_bit(PHY_INTERFACE_MODE_REVMII, config->supported_interfaces); + /* BCM63xx RGMII ports support RGMII */ + if (is63xx(dev) && in_range(port, B53_63XX_RGMII0, 4)) + phy_interface_set_rgmii(config->supported_interfaces); + config->mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE | MAC_10 | MAC_100; @@ -2047,9 +2036,6 @@ int b53_br_join(struct dsa_switch *ds, int port, struct dsa_bridge bridge, b53_get_vlan_entry(dev, pvid, vl); vl->members &= ~BIT(port); - if (vl->members == BIT(cpu_port)) - vl->members &= ~BIT(cpu_port); - vl->untag = vl->members; b53_set_vlan_entry(dev, pvid, vl); } @@ -2128,8 +2114,7 @@ void b53_br_leave(struct dsa_switch *ds, int port, struct dsa_bridge bridge) } b53_get_vlan_entry(dev, pvid, vl); - vl->members |= BIT(port) | BIT(cpu_port); - vl->untag |= BIT(port) | BIT(cpu_port); + vl->members |= BIT(port); b53_set_vlan_entry(dev, pvid, vl); } } diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h index 3b70f67376331..aa25a8a0a106f 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h +++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h @@ -1373,6 +1373,8 @@ #define MDIO_VEND2_CTRL1_SS13 BIT(13) #endif +#define XGBE_VEND2_MAC_AUTO_SW BIT(9) + /* MDIO mask values */ #define XGBE_AN_CL73_INT_CMPLT BIT(0) #define XGBE_AN_CL73_INC_LINK BIT(1) diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c index 07f4f3418d018..ed76a8df6ec6e 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c @@ -375,6 +375,10 @@ static void xgbe_an37_set(struct xgbe_prv_data *pdata, bool enable, reg |= MDIO_VEND2_CTRL1_AN_RESTART; XMDIO_WRITE(pdata, MDIO_MMD_VEND2, MDIO_CTRL1, reg); + + reg = XMDIO_READ(pdata, MDIO_MMD_VEND2, MDIO_PCS_DIG_CTRL); + reg |= XGBE_VEND2_MAC_AUTO_SW; + XMDIO_WRITE(pdata, MDIO_MMD_VEND2, MDIO_PCS_DIG_CTRL, reg); } static void xgbe_an37_restart(struct xgbe_prv_data *pdata) @@ -1003,6 +1007,11 @@ static void xgbe_an37_init(struct xgbe_prv_data *pdata) netif_dbg(pdata, link, pdata->netdev, "CL37 AN (%s) initialized\n", (pdata->an_mode == XGBE_AN_MODE_CL37) ? "BaseX" : "SGMII"); + + reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_CTRL1); + reg &= ~MDIO_AN_CTRL1_ENABLE; + XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_CTRL1, reg); + } static void xgbe_an73_init(struct xgbe_prv_data *pdata) @@ -1404,6 +1413,10 @@ static void xgbe_phy_status(struct xgbe_prv_data *pdata) pdata->phy.link = pdata->phy_if.phy_impl.link_status(pdata, &an_restart); + /* bail out if the link status register read fails */ + if (pdata->phy.link < 0) + return; + if (an_restart) { xgbe_phy_config_aneg(pdata); goto adjust_link; diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c index 268399dfcf22f..32e633d113484 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c @@ -2855,8 +2855,7 @@ static bool xgbe_phy_valid_speed(struct xgbe_prv_data *pdata, int speed) static int xgbe_phy_link_status(struct xgbe_prv_data *pdata, int *an_restart) { struct xgbe_phy_data *phy_data = pdata->phy_data; - unsigned int reg; - int ret; + int reg, ret; *an_restart = 0; @@ -2890,11 +2889,20 @@ static int xgbe_phy_link_status(struct xgbe_prv_data *pdata, int *an_restart) return 0; } - /* Link status is latched low, so read once to clear - * and then read again to get current state - */ - reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1); reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1); + if (reg < 0) + return reg; + + /* Link status is latched low so that momentary link drops + * can be detected. If link was already down read again + * to get the latest state. + */ + + if (!pdata->phy.link && !(reg & MDIO_STAT1_LSTATUS)) { + reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1); + if (reg < 0) + return reg; + } if (pdata->en_rx_adap) { /* if the link is available and adaptation is done, @@ -2913,9 +2921,7 @@ static int xgbe_phy_link_status(struct xgbe_prv_data *pdata, int *an_restart) xgbe_phy_set_mode(pdata, phy_data->cur_mode); } - /* check again for the link and adaptation status */ - reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1); - if ((reg & MDIO_STAT1_LSTATUS) && pdata->rx_adapt_done) + if (pdata->rx_adapt_done) return 1; } else if (reg & MDIO_STAT1_LSTATUS) return 1; diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h index ed5d43c16d0e2..7526a0906b391 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe.h +++ b/drivers/net/ethernet/amd/xgbe/xgbe.h @@ -292,12 +292,12 @@ #define XGBE_LINK_TIMEOUT 5 #define XGBE_KR_TRAINING_WAIT_ITER 50 -#define XGBE_SGMII_AN_LINK_STATUS BIT(1) +#define XGBE_SGMII_AN_LINK_DUPLEX BIT(1) #define XGBE_SGMII_AN_LINK_SPEED (BIT(2) | BIT(3)) #define XGBE_SGMII_AN_LINK_SPEED_10 0x00 #define XGBE_SGMII_AN_LINK_SPEED_100 0x04 #define XGBE_SGMII_AN_LINK_SPEED_1000 0x08 -#define XGBE_SGMII_AN_LINK_DUPLEX BIT(4) +#define XGBE_SGMII_AN_LINK_STATUS BIT(4) /* ECC correctable error notification window (seconds) */ #define XGBE_ECC_LIMIT 60 diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_main.c b/drivers/net/ethernet/aquantia/atlantic/aq_main.c index c1d1673c5749d..b565189e59139 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_main.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_main.c @@ -123,7 +123,6 @@ static netdev_tx_t aq_ndev_start_xmit(struct sk_buff *skb, struct net_device *nd } #endif - skb_tx_timestamp(skb); return aq_nic_xmit(aq_nic, skb); } diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c index 71e50fc65c147..b0994bd05874a 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c @@ -898,6 +898,8 @@ int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb) frags = aq_nic_map_skb(self, skb, ring); + skb_tx_timestamp(skb); + if (likely(frags)) { err = self->aq_hw_ops->hw_ring_tx_xmit(self->aq_hw, ring, frags); diff --git a/drivers/net/ethernet/atheros/atlx/atl1.c b/drivers/net/ethernet/atheros/atlx/atl1.c index 3afd3627ce485..9c5d619909045 100644 --- a/drivers/net/ethernet/atheros/atlx/atl1.c +++ b/drivers/net/ethernet/atheros/atlx/atl1.c @@ -1861,14 +1861,21 @@ static u16 atl1_alloc_rx_buffers(struct atl1_adapter *adapter) break; } - buffer_info->alloced = 1; - buffer_info->skb = skb; - buffer_info->length = (u16) adapter->rx_buffer_len; page = virt_to_page(skb->data); offset = offset_in_page(skb->data); buffer_info->dma = dma_map_page(&pdev->dev, page, offset, adapter->rx_buffer_len, DMA_FROM_DEVICE); + if (dma_mapping_error(&pdev->dev, buffer_info->dma)) { + kfree_skb(skb); + adapter->soft_stats.rx_dropped++; + break; + } + + buffer_info->alloced = 1; + buffer_info->skb = skb; + buffer_info->length = (u16)adapter->rx_buffer_len; + rfd_desc->buffer_addr = cpu_to_le64(buffer_info->dma); rfd_desc->buf_len = cpu_to_le16(adapter->rx_buffer_len); rfd_desc->coalese = 0; @@ -2183,8 +2190,8 @@ static int atl1_tx_csum(struct atl1_adapter *adapter, struct sk_buff *skb, return 0; } -static void atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb, - struct tx_packet_desc *ptpd) +static bool atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb, + struct tx_packet_desc *ptpd) { struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring; struct atl1_buffer *buffer_info; @@ -2194,6 +2201,7 @@ static void atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb, unsigned int nr_frags; unsigned int f; int retval; + u16 first_mapped; u16 next_to_use; u16 data_len; u8 hdr_len; @@ -2201,6 +2209,7 @@ static void atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb, buf_len -= skb->data_len; nr_frags = skb_shinfo(skb)->nr_frags; next_to_use = atomic_read(&tpd_ring->next_to_use); + first_mapped = next_to_use; buffer_info = &tpd_ring->buffer_info[next_to_use]; BUG_ON(buffer_info->skb); /* put skb in last TPD */ @@ -2216,6 +2225,8 @@ static void atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb, buffer_info->dma = dma_map_page(&adapter->pdev->dev, page, offset, hdr_len, DMA_TO_DEVICE); + if (dma_mapping_error(&adapter->pdev->dev, buffer_info->dma)) + goto dma_err; if (++next_to_use == tpd_ring->count) next_to_use = 0; @@ -2242,6 +2253,9 @@ static void atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb, page, offset, buffer_info->length, DMA_TO_DEVICE); + if (dma_mapping_error(&adapter->pdev->dev, + buffer_info->dma)) + goto dma_err; if (++next_to_use == tpd_ring->count) next_to_use = 0; } @@ -2254,6 +2268,8 @@ static void atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb, buffer_info->dma = dma_map_page(&adapter->pdev->dev, page, offset, buf_len, DMA_TO_DEVICE); + if (dma_mapping_error(&adapter->pdev->dev, buffer_info->dma)) + goto dma_err; if (++next_to_use == tpd_ring->count) next_to_use = 0; } @@ -2277,6 +2293,9 @@ static void atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb, buffer_info->dma = skb_frag_dma_map(&adapter->pdev->dev, frag, i * ATL1_MAX_TX_BUF_LEN, buffer_info->length, DMA_TO_DEVICE); + if (dma_mapping_error(&adapter->pdev->dev, + buffer_info->dma)) + goto dma_err; if (++next_to_use == tpd_ring->count) next_to_use = 0; @@ -2285,6 +2304,22 @@ static void atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb, /* last tpd's buffer-info */ buffer_info->skb = skb; + + return true; + + dma_err: + while (first_mapped != next_to_use) { + buffer_info = &tpd_ring->buffer_info[first_mapped]; + dma_unmap_page(&adapter->pdev->dev, + buffer_info->dma, + buffer_info->length, + DMA_TO_DEVICE); + buffer_info->dma = 0; + + if (++first_mapped == tpd_ring->count) + first_mapped = 0; + } + return false; } static void atl1_tx_queue(struct atl1_adapter *adapter, u16 count, @@ -2355,10 +2390,8 @@ static netdev_tx_t atl1_xmit_frame(struct sk_buff *skb, len = skb_headlen(skb); - if (unlikely(skb->len <= 0)) { - dev_kfree_skb_any(skb); - return NETDEV_TX_OK; - } + if (unlikely(skb->len <= 0)) + goto drop_packet; nr_frags = skb_shinfo(skb)->nr_frags; for (f = 0; f < nr_frags; f++) { @@ -2371,10 +2404,9 @@ static netdev_tx_t atl1_xmit_frame(struct sk_buff *skb, if (mss) { if (skb->protocol == htons(ETH_P_IP)) { proto_hdr_len = skb_tcp_all_headers(skb); - if (unlikely(proto_hdr_len > len)) { - dev_kfree_skb_any(skb); - return NETDEV_TX_OK; - } + if (unlikely(proto_hdr_len > len)) + goto drop_packet; + /* need additional TPD ? */ if (proto_hdr_len != len) count += (len - proto_hdr_len + @@ -2406,23 +2438,26 @@ static netdev_tx_t atl1_xmit_frame(struct sk_buff *skb, } tso = atl1_tso(adapter, skb, ptpd); - if (tso < 0) { - dev_kfree_skb_any(skb); - return NETDEV_TX_OK; - } + if (tso < 0) + goto drop_packet; if (!tso) { ret_val = atl1_tx_csum(adapter, skb, ptpd); - if (ret_val < 0) { - dev_kfree_skb_any(skb); - return NETDEV_TX_OK; - } + if (ret_val < 0) + goto drop_packet; } - atl1_tx_map(adapter, skb, ptpd); + if (!atl1_tx_map(adapter, skb, ptpd)) + goto drop_packet; + atl1_tx_queue(adapter, count, ptpd); atl1_update_mailbox(adapter); return NETDEV_TX_OK; + +drop_packet: + adapter->soft_stats.tx_errors++; + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; } static int atl1_rings_clean(struct napi_struct *napi, int budget) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 2bb1fce350dbb..f4bafc71a7399 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -2871,6 +2871,7 @@ static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, { struct bnxt_napi *bnapi = cpr->bnapi; u32 raw_cons = cpr->cp_raw_cons; + bool flush_xdp = false; u32 cons; int rx_pkts = 0; u8 event = 0; @@ -2924,6 +2925,8 @@ static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, else rc = bnxt_force_rx_discard(bp, cpr, &raw_cons, &event); + if (event & BNXT_REDIRECT_EVENT) + flush_xdp = true; if (likely(rc >= 0)) rx_pkts += rc; /* Increment rx_pkts when rc is -ENOMEM to count towards @@ -2948,7 +2951,7 @@ static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, } } - if (event & BNXT_REDIRECT_EVENT) { + if (flush_xdp) { xdp_do_flush(); event &= ~BNXT_REDIRECT_EVENT; } @@ -10390,6 +10393,72 @@ void bnxt_del_one_rss_ctx(struct bnxt *bp, struct bnxt_rss_ctx *rss_ctx, bp->num_rss_ctx--; } +static bool bnxt_vnic_has_rx_ring(struct bnxt *bp, struct bnxt_vnic_info *vnic, + int rxr_id) +{ + u16 tbl_size = bnxt_get_rxfh_indir_size(bp->dev); + int i, vnic_rx; + + /* Ntuple VNIC always has all the rx rings. Any change of ring id + * must be updated because a future filter may use it. + */ + if (vnic->flags & BNXT_VNIC_NTUPLE_FLAG) + return true; + + for (i = 0; i < tbl_size; i++) { + if (vnic->flags & BNXT_VNIC_RSSCTX_FLAG) + vnic_rx = ethtool_rxfh_context_indir(vnic->rss_ctx)[i]; + else + vnic_rx = bp->rss_indir_tbl[i]; + + if (rxr_id == vnic_rx) + return true; + } + + return false; +} + +static int bnxt_set_vnic_mru_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic, + u16 mru, int rxr_id) +{ + int rc; + + if (!bnxt_vnic_has_rx_ring(bp, vnic, rxr_id)) + return 0; + + if (mru) { + rc = bnxt_hwrm_vnic_set_rss_p5(bp, vnic, true); + if (rc) { + netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %d\n", + vnic->vnic_id, rc); + return rc; + } + } + vnic->mru = mru; + bnxt_hwrm_vnic_update(bp, vnic, + VNIC_UPDATE_REQ_ENABLES_MRU_VALID); + + return 0; +} + +static int bnxt_set_rss_ctx_vnic_mru(struct bnxt *bp, u16 mru, int rxr_id) +{ + struct ethtool_rxfh_context *ctx; + unsigned long context; + int rc; + + xa_for_each(&bp->dev->ethtool->rss_ctx, context, ctx) { + struct bnxt_rss_ctx *rss_ctx = ethtool_rxfh_context_priv(ctx); + struct bnxt_vnic_info *vnic = &rss_ctx->vnic; + + rc = bnxt_set_vnic_mru_p5(bp, vnic, mru, rxr_id); + if (rc) + return rc; + } + + return 0; +} + static void bnxt_hwrm_realloc_rss_ctx_vnic(struct bnxt *bp) { bool set_tpa = !!(bp->flags & BNXT_FLAG_TPA); @@ -10992,11 +11061,9 @@ static void bnxt_free_irq(struct bnxt *bp) static int bnxt_request_irq(struct bnxt *bp) { + struct cpu_rmap *rmap = NULL; int i, j, rc = 0; unsigned long flags = 0; -#ifdef CONFIG_RFS_ACCEL - struct cpu_rmap *rmap; -#endif rc = bnxt_setup_int_mode(bp); if (rc) { @@ -11011,15 +11078,15 @@ static int bnxt_request_irq(struct bnxt *bp) int map_idx = bnxt_cp_num_to_irq_num(bp, i); struct bnxt_irq *irq = &bp->irq_tbl[map_idx]; -#ifdef CONFIG_RFS_ACCEL - if (rmap && bp->bnapi[i]->rx_ring) { + if (IS_ENABLED(CONFIG_RFS_ACCEL) && + rmap && bp->bnapi[i]->rx_ring) { rc = irq_cpu_rmap_add(rmap, irq->vector); if (rc) netdev_warn(bp->dev, "failed adding irq rmap for ring %d\n", j); j++; } -#endif + rc = request_irq(irq->vector, irq->handler, flags, irq->name, bp->bnapi[i]); if (rc) @@ -15326,6 +15393,7 @@ static int bnxt_queue_start(struct net_device *dev, void *qmem, int idx) struct bnxt_cp_ring_info *cpr; struct bnxt_vnic_info *vnic; int i, rc; + u16 mru; rxr = &bp->rx_ring[idx]; clone = qmem; @@ -15356,21 +15424,15 @@ static int bnxt_queue_start(struct net_device *dev, void *qmem, int idx) cpr = &rxr->bnapi->cp_ring; cpr->sw_stats->rx.rx_resets++; - for (i = 0; i <= bp->nr_vnics; i++) { + mru = bp->dev->mtu + ETH_HLEN + VLAN_HLEN; + for (i = 0; i < bp->nr_vnics; i++) { vnic = &bp->vnic_info[i]; - rc = bnxt_hwrm_vnic_set_rss_p5(bp, vnic, true); - if (rc) { - netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %d\n", - vnic->vnic_id, rc); + rc = bnxt_set_vnic_mru_p5(bp, vnic, mru, idx); + if (rc) return rc; - } - vnic->mru = bp->dev->mtu + ETH_HLEN + VLAN_HLEN; - bnxt_hwrm_vnic_update(bp, vnic, - VNIC_UPDATE_REQ_ENABLES_MRU_VALID); } - - return 0; + return bnxt_set_rss_ctx_vnic_mru(bp, mru, idx); err_free_hwrm_rx_ring: bnxt_hwrm_rx_ring_free(bp, rxr, false); @@ -15384,12 +15446,12 @@ static int bnxt_queue_stop(struct net_device *dev, void *qmem, int idx) struct bnxt_vnic_info *vnic; int i; - for (i = 0; i <= bp->nr_vnics; i++) { + for (i = 0; i < bp->nr_vnics; i++) { vnic = &bp->vnic_info[i]; - vnic->mru = 0; - bnxt_hwrm_vnic_update(bp, vnic, - VNIC_UPDATE_REQ_ENABLES_MRU_VALID); + + bnxt_set_vnic_mru_p5(bp, vnic, 0, idx); } + bnxt_set_rss_ctx_vnic_mru(bp, 0, idx); /* Make sure NAPI sees that the VNIC is disabled */ synchronize_net(); rxr = &bp->rx_ring[idx]; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c index 0dbb880a7aa0e..71e14be2507e1 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c @@ -487,7 +487,9 @@ static int bnxt_ets_validate(struct bnxt *bp, struct ieee_ets *ets, u8 *tc) if ((ets->tc_tx_bw[i] || ets->tc_tsa[i]) && i > bp->max_tc) return -EINVAL; + } + for (i = 0; i < max_tc; i++) { switch (ets->tc_tsa[i]) { case IEEE_8021QAZ_TSA_STRICT: break; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c index 546d9a3d7efea..1867552a8bdbe 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c @@ -148,7 +148,6 @@ void bnxt_unregister_dev(struct bnxt_en_dev *edev) struct net_device *dev = edev->net; struct bnxt *bp = netdev_priv(dev); struct bnxt_ulp *ulp; - int i = 0; ulp = edev->ulp_tbl; rtnl_lock(); @@ -164,10 +163,6 @@ void bnxt_unregister_dev(struct bnxt_en_dev *edev) synchronize_rcu(); ulp->max_async_event_id = 0; ulp->async_events_bmap = NULL; - while (atomic_read(&ulp->ref_count) != 0 && i < 10) { - msleep(100); - i++; - } mutex_unlock(&edev->en_dev_lock); rtnl_unlock(); return; @@ -235,10 +230,9 @@ void bnxt_ulp_stop(struct bnxt *bp) return; mutex_lock(&edev->en_dev_lock); - if (!bnxt_ulp_registered(edev)) { - mutex_unlock(&edev->en_dev_lock); - return; - } + if (!bnxt_ulp_registered(edev) || + (edev->flags & BNXT_EN_FLAG_ULP_STOPPED)) + goto ulp_stop_exit; edev->flags |= BNXT_EN_FLAG_ULP_STOPPED; if (aux_priv) { @@ -254,6 +248,7 @@ void bnxt_ulp_stop(struct bnxt *bp) adrv->suspend(adev, pm); } } +ulp_stop_exit: mutex_unlock(&edev->en_dev_lock); } @@ -262,19 +257,13 @@ void bnxt_ulp_start(struct bnxt *bp, int err) struct bnxt_aux_priv *aux_priv = bp->aux_priv; struct bnxt_en_dev *edev = bp->edev; - if (!edev) - return; - - edev->flags &= ~BNXT_EN_FLAG_ULP_STOPPED; - - if (err) + if (!edev || err) return; mutex_lock(&edev->en_dev_lock); - if (!bnxt_ulp_registered(edev)) { - mutex_unlock(&edev->en_dev_lock); - return; - } + if (!bnxt_ulp_registered(edev) || + !(edev->flags & BNXT_EN_FLAG_ULP_STOPPED)) + goto ulp_start_exit; if (edev->ulp_tbl->msix_requested) bnxt_fill_msix_vecs(bp, edev->msix_entries); @@ -291,6 +280,8 @@ void bnxt_ulp_start(struct bnxt *bp, int err) adrv->resume(adev); } } +ulp_start_exit: + edev->flags &= ~BNXT_EN_FLAG_ULP_STOPPED; mutex_unlock(&edev->en_dev_lock); } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h index 4f4914f5c84c9..b76a231ca7dac 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h @@ -48,7 +48,6 @@ struct bnxt_ulp { unsigned long *async_events_bmap; u16 max_async_event_id; u16 msix_requested; - atomic_t ref_count; }; struct bnxt_en_dev { diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c index 8726657f5cb9e..844812bd65363 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c @@ -115,7 +115,7 @@ static void __bnxt_xmit_xdp_redirect(struct bnxt *bp, tx_buf->action = XDP_REDIRECT; tx_buf->xdpf = xdpf; dma_unmap_addr_set(tx_buf, mapping, mapping); - dma_unmap_len_set(tx_buf, len, 0); + dma_unmap_len_set(tx_buf, len, len); } void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int budget) diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c index ae100ed8ed6b9..3c2a7919b1289 100644 --- a/drivers/net/ethernet/cadence/macb_main.c +++ b/drivers/net/ethernet/cadence/macb_main.c @@ -5117,7 +5117,11 @@ static int macb_probe(struct platform_device *pdev) #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT if (GEM_BFEXT(DAW64, gem_readl(bp, DCFG6))) { - dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(44)); + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(44)); + if (err) { + dev_err(&pdev->dev, "failed to set DMA mask\n"); + goto err_out_free_netdev; + } bp->hw_dma_cap |= HW_DMA_CAP_64B; } #endif diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c index ffed14b63d41d..a432783756d8c 100644 --- a/drivers/net/ethernet/cisco/enic/enic_main.c +++ b/drivers/net/ethernet/cisco/enic/enic_main.c @@ -2127,10 +2127,10 @@ static int enic_change_mtu(struct net_device *netdev, int new_mtu) if (enic_is_dynamic(enic) || enic_is_sriov_vf(enic)) return -EOPNOTSUPP; - if (netdev->mtu > enic->port_mtu) + if (new_mtu > enic->port_mtu) netdev_warn(netdev, "interface MTU (%d) set higher than port MTU (%d)\n", - netdev->mtu, enic->port_mtu); + new_mtu, enic->port_mtu); return _enic_change_mtu(netdev, new_mtu); } diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c index 73e1c71c5092e..92833eefc04b4 100644 --- a/drivers/net/ethernet/cortina/gemini.c +++ b/drivers/net/ethernet/cortina/gemini.c @@ -1143,6 +1143,7 @@ static int gmac_map_tx_bufs(struct net_device *netdev, struct sk_buff *skb, struct gmac_txdesc *txd; skb_frag_t *skb_frag; dma_addr_t mapping; + bool tcp = false; void *buffer; u16 mss; int ret; @@ -1150,6 +1151,13 @@ static int gmac_map_tx_bufs(struct net_device *netdev, struct sk_buff *skb, word1 = skb->len; word3 = SOF_BIT; + /* Determine if we are doing TCP */ + if (skb->protocol == htons(ETH_P_IP)) + tcp = (ip_hdr(skb)->protocol == IPPROTO_TCP); + else + /* IPv6 */ + tcp = (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP); + mss = skb_shinfo(skb)->gso_size; if (mss) { /* This means we are dealing with TCP and skb->len is the @@ -1162,8 +1170,26 @@ static int gmac_map_tx_bufs(struct net_device *netdev, struct sk_buff *skb, mss, skb->len); word1 |= TSS_MTU_ENABLE_BIT; word3 |= mss; + } else if (tcp) { + /* Even if we are not using TSO, use the hardware offloader + * for transferring the TCP frame: this hardware has partial + * TCP awareness (called TOE - TCP Offload Engine) and will + * according to the datasheet put packets belonging to the + * same TCP connection in the same queue for the TOE/TSO + * engine to process. The engine will deal with chopping + * up frames that exceed ETH_DATA_LEN which the + * checksumming engine cannot handle (see below) into + * manageable chunks. It flawlessly deals with quite big + * frames and frames containing custom DSA EtherTypes. + */ + mss = netdev->mtu + skb_tcp_all_headers(skb); + mss = min(mss, skb->len); + netdev_dbg(netdev, "TOE/TSO len %04x mtu %04x mss %04x\n", + skb->len, netdev->mtu, mss); + word1 |= TSS_MTU_ENABLE_BIT; + word3 |= mss; } else if (skb->len >= ETH_FRAME_LEN) { - /* Hardware offloaded checksumming isn't working on frames + /* Hardware offloaded checksumming isn't working on non-TCP frames * bigger than 1514 bytes. A hypothesis about this is that the * checksum buffer is only 1518 bytes, so when the frames get * bigger they get truncated, or the last few bytes get @@ -1180,21 +1206,16 @@ static int gmac_map_tx_bufs(struct net_device *netdev, struct sk_buff *skb, } if (skb->ip_summed == CHECKSUM_PARTIAL) { - int tcp = 0; - /* We do not switch off the checksumming on non TCP/UDP * frames: as is shown from tests, the checksumming engine * is smart enough to see that a frame is not actually TCP * or UDP and then just pass it through without any changes * to the frame. */ - if (skb->protocol == htons(ETH_P_IP)) { + if (skb->protocol == htons(ETH_P_IP)) word1 |= TSS_IP_CHKSUM_BIT; - tcp = ip_hdr(skb)->protocol == IPPROTO_TCP; - } else { /* IPv6 */ + else word1 |= TSS_IPV6_ENABLE_BIT; - tcp = ipv6_hdr(skb)->nexthdr == IPPROTO_TCP; - } word1 |= tcp ? TSS_TCP_CHKSUM_BIT : TSS_UDP_CHKSUM_BIT; } diff --git a/drivers/net/ethernet/dlink/dl2k.c b/drivers/net/ethernet/dlink/dl2k.c index 6bf8a7aeef908..787218d60c6b1 100644 --- a/drivers/net/ethernet/dlink/dl2k.c +++ b/drivers/net/ethernet/dlink/dl2k.c @@ -146,6 +146,8 @@ rio_probe1 (struct pci_dev *pdev, const struct pci_device_id *ent) np->ioaddr = ioaddr; np->chip_id = chip_idx; np->pdev = pdev; + + spin_lock_init(&np->stats_lock); spin_lock_init (&np->tx_lock); spin_lock_init (&np->rx_lock); @@ -865,7 +867,6 @@ tx_error (struct net_device *dev, int tx_status) frame_id = (tx_status & 0xffff0000); printk (KERN_ERR "%s: Transmit error, TxStatus %4.4x, FrameId %d.\n", dev->name, tx_status, frame_id); - dev->stats.tx_errors++; /* Ttransmit Underrun */ if (tx_status & 0x10) { dev->stats.tx_fifo_errors++; @@ -902,9 +903,15 @@ tx_error (struct net_device *dev, int tx_status) rio_set_led_mode(dev); /* Let TxStartThresh stay default value */ } + + spin_lock(&np->stats_lock); /* Maximum Collisions */ if (tx_status & 0x08) dev->stats.collisions++; + + dev->stats.tx_errors++; + spin_unlock(&np->stats_lock); + /* Restart the Tx */ dw32(MACCtrl, dr16(MACCtrl) | TxEnable); } @@ -1073,7 +1080,9 @@ get_stats (struct net_device *dev) int i; #endif unsigned int stat_reg; + unsigned long flags; + spin_lock_irqsave(&np->stats_lock, flags); /* All statistics registers need to be acknowledged, else statistic overflow could cause problems */ @@ -1123,6 +1132,9 @@ get_stats (struct net_device *dev) dr16(TCPCheckSumErrors); dr16(UDPCheckSumErrors); dr16(IPCheckSumErrors); + + spin_unlock_irqrestore(&np->stats_lock, flags); + return &dev->stats; } diff --git a/drivers/net/ethernet/dlink/dl2k.h b/drivers/net/ethernet/dlink/dl2k.h index 0e33e2eaae960..56aff2f0bdbfa 100644 --- a/drivers/net/ethernet/dlink/dl2k.h +++ b/drivers/net/ethernet/dlink/dl2k.h @@ -372,6 +372,8 @@ struct netdev_private { struct pci_dev *pdev; void __iomem *ioaddr; void __iomem *eeprom_addr; + // To ensure synchronization when stats are updated. + spinlock_t stats_lock; spinlock_t tx_lock; spinlock_t rx_lock; unsigned int rx_buf_sz; /* Based on MTU+slack. */ diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c index 51b8377edd1d0..a89aa4ac0a064 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.c +++ b/drivers/net/ethernet/emulex/benet/be_cmds.c @@ -1609,7 +1609,7 @@ int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd) /* version 1 of the cmd is not supported only by BE2 */ if (BE2_chip(adapter)) hdr->version = 0; - if (BE3_chip(adapter) || lancer_chip(adapter)) + else if (BE3_chip(adapter) || lancer_chip(adapter)) hdr->version = 1; else hdr->version = 2; diff --git a/drivers/net/ethernet/faraday/Kconfig b/drivers/net/ethernet/faraday/Kconfig index c699bd6bcbb93..474073c7f94d7 100644 --- a/drivers/net/ethernet/faraday/Kconfig +++ b/drivers/net/ethernet/faraday/Kconfig @@ -31,6 +31,7 @@ config FTGMAC100 depends on ARM || COMPILE_TEST depends on !64BIT || BROKEN select PHYLIB + select FIXED_PHY select MDIO_ASPEED if MACH_ASPEED_G6 select CRC32 help diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c index 29886a8ba73f3..efd0048acd3b2 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c @@ -3928,6 +3928,7 @@ static int dpaa2_eth_setup_rx_flow(struct dpaa2_eth_priv *priv, MEM_TYPE_PAGE_ORDER0, NULL); if (err) { dev_err(dev, "xdp_rxq_info_reg_mem_model failed\n"); + xdp_rxq_info_unreg(&fq->channel->xdp_rxq); return err; } @@ -4421,17 +4422,25 @@ static int dpaa2_eth_bind_dpni(struct dpaa2_eth_priv *priv) return -EINVAL; } if (err) - return err; + goto out; } err = dpni_get_qdid(priv->mc_io, 0, priv->mc_token, DPNI_QUEUE_TX, &priv->tx_qdid); if (err) { dev_err(dev, "dpni_get_qdid() failed\n"); - return err; + goto out; } return 0; + +out: + while (i--) { + if (priv->fq[i].type == DPAA2_RX_FQ && + xdp_rxq_info_is_reg(&priv->fq[i].channel->xdp_rxq)) + xdp_rxq_info_unreg(&priv->fq[i].channel->xdp_rxq); + } + return err; } /* Allocate rings for storing incoming frame descriptors */ @@ -4814,6 +4823,17 @@ static void dpaa2_eth_del_ch_napi(struct dpaa2_eth_priv *priv) } } +static void dpaa2_eth_free_rx_xdp_rxq(struct dpaa2_eth_priv *priv) +{ + int i; + + for (i = 0; i < priv->num_fqs; i++) { + if (priv->fq[i].type == DPAA2_RX_FQ && + xdp_rxq_info_is_reg(&priv->fq[i].channel->xdp_rxq)) + xdp_rxq_info_unreg(&priv->fq[i].channel->xdp_rxq); + } +} + static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev) { struct device *dev; @@ -5017,6 +5037,7 @@ static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev) free_percpu(priv->percpu_stats); err_alloc_percpu_stats: dpaa2_eth_del_ch_napi(priv); + dpaa2_eth_free_rx_xdp_rxq(priv); err_bind: dpaa2_eth_free_dpbps(priv); err_dpbp_setup: @@ -5069,6 +5090,7 @@ static void dpaa2_eth_remove(struct fsl_mc_device *ls_dev) free_percpu(priv->percpu_extras); dpaa2_eth_del_ch_napi(priv); + dpaa2_eth_free_rx_xdp_rxq(priv); dpaa2_eth_free_dpbps(priv); dpaa2_eth_free_dpio(priv); dpaa2_eth_free_dpni(priv); diff --git a/drivers/net/ethernet/freescale/enetc/enetc_hw.h b/drivers/net/ethernet/freescale/enetc/enetc_hw.h index 1619943fb2637..4e8881b479e48 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_hw.h +++ b/drivers/net/ethernet/freescale/enetc/enetc_hw.h @@ -485,7 +485,7 @@ static inline u64 _enetc_rd_reg64(void __iomem *reg) tmp = ioread32(reg + 4); } while (high != tmp); - return le64_to_cpu((__le64)high << 32 | low); + return (u64)high << 32 | low; } #endif diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c index 862c4575701fe..14f39d1f59d36 100644 --- a/drivers/net/ethernet/google/gve/gve_main.c +++ b/drivers/net/ethernet/google/gve/gve_main.c @@ -2207,7 +2207,7 @@ void gve_handle_report_stats(struct gve_priv *priv) }; stats[stats_idx++] = (struct stats) { .stat_name = cpu_to_be32(RX_BUFFERS_POSTED), - .value = cpu_to_be64(priv->rx[0].fill_cnt), + .value = cpu_to_be64(priv->rx[idx].fill_cnt), .queue_id = cpu_to_be32(idx), }; } diff --git a/drivers/net/ethernet/google/gve/gve_tx_dqo.c b/drivers/net/ethernet/google/gve/gve_tx_dqo.c index f879426cb5523..26053cc85d1c5 100644 --- a/drivers/net/ethernet/google/gve/gve_tx_dqo.c +++ b/drivers/net/ethernet/google/gve/gve_tx_dqo.c @@ -770,6 +770,9 @@ static int gve_tx_add_skb_dqo(struct gve_tx_ring *tx, s16 completion_tag; pkt = gve_alloc_pending_packet(tx); + if (!pkt) + return -ENOMEM; + pkt->skb = skb; completion_tag = pkt - tx->dqo.pending_packets; diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h index a189038d88df0..246ddce753f92 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.h +++ b/drivers/net/ethernet/ibm/ibmvnic.h @@ -211,7 +211,6 @@ struct ibmvnic_statistics { u8 reserved[72]; } __packed __aligned(8); -#define NUM_TX_STATS 3 struct ibmvnic_tx_queue_stats { u64 batched_packets; u64 direct_packets; @@ -219,13 +218,18 @@ struct ibmvnic_tx_queue_stats { u64 dropped_packets; }; -#define NUM_RX_STATS 3 +#define NUM_TX_STATS \ + (sizeof(struct ibmvnic_tx_queue_stats) / sizeof(u64)) + struct ibmvnic_rx_queue_stats { u64 packets; u64 bytes; u64 interrupts; }; +#define NUM_RX_STATS \ + (sizeof(struct ibmvnic_rx_queue_stats) / sizeof(u64)) + struct ibmvnic_acl_buffer { __be32 len; __be32 version; diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index 07e9033463582..5fe54e9b71e25 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -3540,9 +3540,6 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca) case e1000_pch_cnp: case e1000_pch_tgp: case e1000_pch_adp: - case e1000_pch_mtp: - case e1000_pch_lnp: - case e1000_pch_ptp: case e1000_pch_nvp: if (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI) { /* Stable 24MHz frequency */ @@ -3558,6 +3555,17 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca) adapter->cc.shift = shift; } break; + case e1000_pch_mtp: + case e1000_pch_lnp: + case e1000_pch_ptp: + /* System firmware can misreport this value, so set it to a + * stable 38400KHz frequency. + */ + incperiod = INCPERIOD_38400KHZ; + incvalue = INCVALUE_38400KHZ; + shift = INCVALUE_SHIFT_38400KHZ; + adapter->cc.shift = shift; + break; case e1000_82574: case e1000_82583: /* Stable 25MHz frequency */ diff --git a/drivers/net/ethernet/intel/e1000e/ptp.c b/drivers/net/ethernet/intel/e1000e/ptp.c index 89d57dd911dc8..ea3c3eb2ef202 100644 --- a/drivers/net/ethernet/intel/e1000e/ptp.c +++ b/drivers/net/ethernet/intel/e1000e/ptp.c @@ -295,15 +295,17 @@ void e1000e_ptp_init(struct e1000_adapter *adapter) case e1000_pch_cnp: case e1000_pch_tgp: case e1000_pch_adp: - case e1000_pch_mtp: - case e1000_pch_lnp: - case e1000_pch_ptp: case e1000_pch_nvp: if (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI) adapter->ptp_clock_info.max_adj = MAX_PPB_24MHZ; else adapter->ptp_clock_info.max_adj = MAX_PPB_38400KHZ; break; + case e1000_pch_mtp: + case e1000_pch_lnp: + case e1000_pch_ptp: + adapter->ptp_clock_info.max_adj = MAX_PPB_38400KHZ; + break; case e1000_82574: case e1000_82583: adapter->ptp_clock_info.max_adj = MAX_PPB_25MHZ; diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c index e8031f1a9b4fc..2f5a850148676 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_common.c +++ b/drivers/net/ethernet/intel/i40e/i40e_common.c @@ -817,10 +817,11 @@ int i40e_pf_reset(struct i40e_hw *hw) void i40e_clear_hw(struct i40e_hw *hw) { u32 num_queues, base_queue; - u32 num_pf_int; - u32 num_vf_int; + s32 num_pf_int; + s32 num_vf_int; u32 num_vfs; - u32 i, j; + s32 i; + u32 j; u32 val; u32 eol = 0x7ff; diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index dfa785e39458d..625fa93fc18bb 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -1546,8 +1546,8 @@ static void i40e_cleanup_reset_vf(struct i40e_vf *vf) * @vf: pointer to the VF structure * @flr: VFLR was issued or not * - * Returns true if the VF is in reset, resets successfully, or resets - * are disabled and false otherwise. + * Return: True if reset was performed successfully or if resets are disabled. + * False if reset is already in progress. **/ bool i40e_reset_vf(struct i40e_vf *vf, bool flr) { @@ -1566,7 +1566,7 @@ bool i40e_reset_vf(struct i40e_vf *vf, bool flr) /* If VF is being reset already we don't need to continue. */ if (test_and_set_bit(I40E_VF_STATE_RESETTING, &vf->vf_states)) - return true; + return false; i40e_trigger_vf_reset(vf, flr); @@ -4328,7 +4328,10 @@ int i40e_vc_process_vflr_event(struct i40e_pf *pf) reg = rd32(hw, I40E_GLGEN_VFLRSTAT(reg_idx)); if (reg & BIT(bit_idx)) /* i40e_reset_vf will clear the bit in GLGEN_VFLRSTAT */ - i40e_reset_vf(vf, true); + if (!i40e_reset_vf(vf, true)) { + /* At least one VF did not finish resetting, retry next time */ + set_bit(__I40E_VFLR_EVENT_PENDING, pf->state); + } } return 0; diff --git a/drivers/net/ethernet/intel/ice/ice_arfs.c b/drivers/net/ethernet/intel/ice/ice_arfs.c index 405ddd17de1bf..0bb4fb56fbe61 100644 --- a/drivers/net/ethernet/intel/ice/ice_arfs.c +++ b/drivers/net/ethernet/intel/ice/ice_arfs.c @@ -377,6 +377,50 @@ ice_arfs_is_perfect_flow_set(struct ice_hw *hw, __be16 l3_proto, u8 l4_proto) return false; } +/** + * ice_arfs_cmp - Check if aRFS filter matches this flow. + * @fltr_info: filter info of the saved ARFS entry. + * @fk: flow dissector keys. + * @n_proto: One of htons(ETH_P_IP) or htons(ETH_P_IPV6). + * @ip_proto: One of IPPROTO_TCP or IPPROTO_UDP. + * + * Since this function assumes limited values for n_proto and ip_proto, it + * is meant to be called only from ice_rx_flow_steer(). + * + * Return: + * * true - fltr_info refers to the same flow as fk. + * * false - fltr_info and fk refer to different flows. + */ +static bool +ice_arfs_cmp(const struct ice_fdir_fltr *fltr_info, const struct flow_keys *fk, + __be16 n_proto, u8 ip_proto) +{ + /* Determine if the filter is for IPv4 or IPv6 based on flow_type, + * which is one of ICE_FLTR_PTYPE_NONF_IPV{4,6}_{TCP,UDP}. + */ + bool is_v4 = fltr_info->flow_type == ICE_FLTR_PTYPE_NONF_IPV4_TCP || + fltr_info->flow_type == ICE_FLTR_PTYPE_NONF_IPV4_UDP; + + /* Following checks are arranged in the quickest and most discriminative + * fields first for early failure. + */ + if (is_v4) + return n_proto == htons(ETH_P_IP) && + fltr_info->ip.v4.src_port == fk->ports.src && + fltr_info->ip.v4.dst_port == fk->ports.dst && + fltr_info->ip.v4.src_ip == fk->addrs.v4addrs.src && + fltr_info->ip.v4.dst_ip == fk->addrs.v4addrs.dst && + fltr_info->ip.v4.proto == ip_proto; + + return fltr_info->ip.v6.src_port == fk->ports.src && + fltr_info->ip.v6.dst_port == fk->ports.dst && + fltr_info->ip.v6.proto == ip_proto && + !memcmp(&fltr_info->ip.v6.src_ip, &fk->addrs.v6addrs.src, + sizeof(struct in6_addr)) && + !memcmp(&fltr_info->ip.v6.dst_ip, &fk->addrs.v6addrs.dst, + sizeof(struct in6_addr)); +} + /** * ice_rx_flow_steer - steer the Rx flow to where application is being run * @netdev: ptr to the netdev being adjusted @@ -448,6 +492,10 @@ ice_rx_flow_steer(struct net_device *netdev, const struct sk_buff *skb, continue; fltr_info = &arfs_entry->fltr_info; + + if (!ice_arfs_cmp(fltr_info, &fk, n_proto, ip_proto)) + continue; + ret = fltr_info->fltr_id; if (fltr_info->q_index == rxq_idx || diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch.c b/drivers/net/ethernet/intel/ice/ice_eswitch.c index ed21d7f55ac11..5b9a7ee278f17 100644 --- a/drivers/net/ethernet/intel/ice/ice_eswitch.c +++ b/drivers/net/ethernet/intel/ice/ice_eswitch.c @@ -502,10 +502,14 @@ ice_eswitch_attach(struct ice_pf *pf, struct ice_repr *repr, unsigned long *id) */ int ice_eswitch_attach_vf(struct ice_pf *pf, struct ice_vf *vf) { - struct ice_repr *repr = ice_repr_create_vf(vf); struct devlink *devlink = priv_to_devlink(pf); + struct ice_repr *repr; int err; + if (!ice_is_eswitch_mode_switchdev(pf)) + return 0; + + repr = ice_repr_create_vf(vf); if (IS_ERR(repr)) return PTR_ERR(repr); diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index 63d2105fce933..d1abd21cfc647 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -2761,6 +2761,27 @@ void ice_map_xdp_rings(struct ice_vsi *vsi) } } +/** + * ice_unmap_xdp_rings - Unmap XDP rings from interrupt vectors + * @vsi: the VSI with XDP rings being unmapped + */ +static void ice_unmap_xdp_rings(struct ice_vsi *vsi) +{ + int v_idx; + + ice_for_each_q_vector(vsi, v_idx) { + struct ice_q_vector *q_vector = vsi->q_vectors[v_idx]; + struct ice_tx_ring *ring; + + ice_for_each_tx_ring(ring, q_vector->tx) + if (!ring->tx_buf || !ice_ring_is_xdp(ring)) + break; + + /* restore the value of last node prior to XDP setup */ + q_vector->tx.tx_ring = ring; + } +} + /** * ice_prepare_xdp_rings - Allocate, configure and setup Tx rings for XDP * @vsi: VSI to bring up Tx rings used by XDP @@ -2824,7 +2845,7 @@ int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog, if (status) { dev_err(dev, "Failed VSI LAN queue config for XDP, error: %d\n", status); - goto clear_xdp_rings; + goto unmap_xdp_rings; } /* assign the prog only when it's not already present on VSI; @@ -2840,6 +2861,8 @@ int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog, ice_vsi_assign_bpf_prog(vsi, prog); return 0; +unmap_xdp_rings: + ice_unmap_xdp_rings(vsi); clear_xdp_rings: ice_for_each_xdp_txq(vsi, i) if (vsi->xdp_rings[i]) { @@ -2856,6 +2879,8 @@ int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog, mutex_unlock(&pf->avail_q_mutex); devm_kfree(dev, vsi->xdp_rings); + vsi->xdp_rings = NULL; + return -ENOMEM; } @@ -2871,7 +2896,7 @@ int ice_destroy_xdp_rings(struct ice_vsi *vsi, enum ice_xdp_cfg cfg_type) { u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 }; struct ice_pf *pf = vsi->back; - int i, v_idx; + int i; /* q_vectors are freed in reset path so there's no point in detaching * rings @@ -2879,17 +2904,7 @@ int ice_destroy_xdp_rings(struct ice_vsi *vsi, enum ice_xdp_cfg cfg_type) if (cfg_type == ICE_XDP_CFG_PART) goto free_qmap; - ice_for_each_q_vector(vsi, v_idx) { - struct ice_q_vector *q_vector = vsi->q_vectors[v_idx]; - struct ice_tx_ring *ring; - - ice_for_each_tx_ring(ring, q_vector->tx) - if (!ring->tx_buf || !ice_ring_is_xdp(ring)) - break; - - /* restore the value of last node prior to XDP setup */ - q_vector->tx.tx_ring = ring; - } + ice_unmap_xdp_rings(vsi); free_qmap: mutex_lock(&pf->avail_q_mutex); @@ -3034,11 +3049,14 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog, xdp_ring_err = ice_vsi_determine_xdp_res(vsi); if (xdp_ring_err) { NL_SET_ERR_MSG_MOD(extack, "Not enough Tx resources for XDP"); + goto resume_if; } else { xdp_ring_err = ice_prepare_xdp_rings(vsi, prog, ICE_XDP_CFG_FULL); - if (xdp_ring_err) + if (xdp_ring_err) { NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Tx resources failed"); + goto resume_if; + } } xdp_features_set_redirect_target(vsi->netdev, true); /* reallocate Rx queues that are used for zero-copy */ @@ -3056,6 +3074,7 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog, NL_SET_ERR_MSG_MOD(extack, "Freeing XDP Rx resources failed"); } +resume_if: if (if_running) ret = ice_up(vsi); diff --git a/drivers/net/ethernet/intel/ice/ice_sched.c b/drivers/net/ethernet/intel/ice/ice_sched.c index 6ca13c5dcb14e..d9d09296d1d48 100644 --- a/drivers/net/ethernet/intel/ice/ice_sched.c +++ b/drivers/net/ethernet/intel/ice/ice_sched.c @@ -84,6 +84,27 @@ ice_sched_find_node_by_teid(struct ice_sched_node *start_node, u32 teid) return NULL; } +/** + * ice_sched_find_next_vsi_node - find the next node for a given VSI + * @vsi_node: VSI support node to start search with + * + * Return: Next VSI support node, or NULL. + * + * The function returns a pointer to the next node from the VSI layer + * assigned to the given VSI, or NULL if there is no such a node. + */ +static struct ice_sched_node * +ice_sched_find_next_vsi_node(struct ice_sched_node *vsi_node) +{ + unsigned int vsi_handle = vsi_node->vsi_handle; + + while ((vsi_node = vsi_node->sibling) != NULL) + if (vsi_node->vsi_handle == vsi_handle) + break; + + return vsi_node; +} + /** * ice_aqc_send_sched_elem_cmd - send scheduling elements cmd * @hw: pointer to the HW struct @@ -1084,8 +1105,10 @@ ice_sched_add_nodes_to_layer(struct ice_port_info *pi, if (parent->num_children < max_child_nodes) { new_num_nodes = max_child_nodes - parent->num_children; } else { - /* This parent is full, try the next sibling */ - parent = parent->sibling; + /* This parent is full, + * try the next available sibling. + */ + parent = ice_sched_find_next_vsi_node(parent); /* Don't modify the first node TEID memory if the * first node was added already in the above call. * Instead send some temp memory for all other @@ -1528,12 +1551,23 @@ ice_sched_get_free_qparent(struct ice_port_info *pi, u16 vsi_handle, u8 tc, /* get the first queue group node from VSI sub-tree */ qgrp_node = ice_sched_get_first_node(pi, vsi_node, qgrp_layer); while (qgrp_node) { + struct ice_sched_node *next_vsi_node; + /* make sure the qgroup node is part of the VSI subtree */ if (ice_sched_find_node_in_subtree(pi->hw, vsi_node, qgrp_node)) if (qgrp_node->num_children < max_children && qgrp_node->owner == owner) break; qgrp_node = qgrp_node->sibling; + if (qgrp_node) + continue; + + next_vsi_node = ice_sched_find_next_vsi_node(vsi_node); + if (!next_vsi_node) + break; + + vsi_node = next_vsi_node; + qgrp_node = ice_sched_get_first_node(pi, vsi_node, qgrp_layer); } /* Select the best queue group */ @@ -1604,16 +1638,16 @@ ice_sched_get_agg_node(struct ice_port_info *pi, struct ice_sched_node *tc_node, /** * ice_sched_calc_vsi_child_nodes - calculate number of VSI child nodes * @hw: pointer to the HW struct - * @num_qs: number of queues + * @num_new_qs: number of new queues that will be added to the tree * @num_nodes: num nodes array * * This function calculates the number of VSI child nodes based on the * number of queues. */ static void -ice_sched_calc_vsi_child_nodes(struct ice_hw *hw, u16 num_qs, u16 *num_nodes) +ice_sched_calc_vsi_child_nodes(struct ice_hw *hw, u16 num_new_qs, u16 *num_nodes) { - u16 num = num_qs; + u16 num = num_new_qs; u8 i, qgl, vsil; qgl = ice_sched_get_qgrp_layer(hw); @@ -1779,7 +1813,11 @@ ice_sched_add_vsi_support_nodes(struct ice_port_info *pi, u16 vsi_handle, if (!parent) return -EIO; - if (i == vsil) + /* Do not modify the VSI handle for already existing VSI nodes, + * (if no new VSI node was added to the tree). + * Assign the VSI handle only to newly added VSI nodes. + */ + if (i == vsil && num_added) parent->vsi_handle = vsi_handle; } @@ -1812,6 +1850,41 @@ ice_sched_add_vsi_to_topo(struct ice_port_info *pi, u16 vsi_handle, u8 tc) num_nodes); } +/** + * ice_sched_recalc_vsi_support_nodes - recalculate VSI support nodes count + * @hw: pointer to the HW struct + * @vsi_node: pointer to the leftmost VSI node that needs to be extended + * @new_numqs: new number of queues that has to be handled by the VSI + * @new_num_nodes: pointer to nodes count table to modify the VSI layer entry + * + * This function recalculates the number of supported nodes that need to + * be added after adding more Tx queues for a given VSI. + * The number of new VSI support nodes that shall be added will be saved + * to the @new_num_nodes table for the VSI layer. + */ +static void +ice_sched_recalc_vsi_support_nodes(struct ice_hw *hw, + struct ice_sched_node *vsi_node, + unsigned int new_numqs, u16 *new_num_nodes) +{ + u32 vsi_nodes_cnt = 1; + u32 max_queue_cnt = 1; + u32 qgl, vsil; + + qgl = ice_sched_get_qgrp_layer(hw); + vsil = ice_sched_get_vsi_layer(hw); + + for (u32 i = vsil; i <= qgl; i++) + max_queue_cnt *= hw->max_children[i]; + + while ((vsi_node = ice_sched_find_next_vsi_node(vsi_node)) != NULL) + vsi_nodes_cnt++; + + if (new_numqs > (max_queue_cnt * vsi_nodes_cnt)) + new_num_nodes[vsil] = DIV_ROUND_UP(new_numqs, max_queue_cnt) - + vsi_nodes_cnt; +} + /** * ice_sched_update_vsi_child_nodes - update VSI child nodes * @pi: port information structure @@ -1863,15 +1936,25 @@ ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle, return status; } - if (new_numqs) - ice_sched_calc_vsi_child_nodes(hw, new_numqs, new_num_nodes); - /* Keep the max number of queue configuration all the time. Update the - * tree only if number of queues > previous number of queues. This may + ice_sched_recalc_vsi_support_nodes(hw, vsi_node, + new_numqs, new_num_nodes); + ice_sched_calc_vsi_child_nodes(hw, new_numqs - prev_numqs, + new_num_nodes); + + /* Never decrease the number of queues in the tree. Update the tree + * only if number of queues > previous number of queues. This may * leave some extra nodes in the tree if number of queues < previous * number but that wouldn't harm anything. Removing those extra nodes * may complicate the code if those nodes are part of SRL or * individually rate limited. + * Also, add the required VSI support nodes if the existing ones cannot + * handle the requested new number of queues. */ + status = ice_sched_add_vsi_support_nodes(pi, vsi_handle, tc_node, + new_num_nodes); + if (status) + return status; + status = ice_sched_add_vsi_child_nodes(pi, vsi_handle, tc_node, new_num_nodes, owner); if (status) @@ -2012,6 +2095,58 @@ static bool ice_sched_is_leaf_node_present(struct ice_sched_node *node) return (node->info.data.elem_type == ICE_AQC_ELEM_TYPE_LEAF); } +/** + * ice_sched_rm_vsi_subtree - remove all nodes assigned to a given VSI + * @pi: port information structure + * @vsi_node: pointer to the leftmost node of the VSI to be removed + * @owner: LAN or RDMA + * @tc: TC number + * + * Return: Zero in case of success, or -EBUSY if the VSI has leaf nodes in TC. + * + * This function removes all the VSI support nodes associated with a given VSI + * and its LAN or RDMA children nodes from the scheduler tree. + */ +static int +ice_sched_rm_vsi_subtree(struct ice_port_info *pi, + struct ice_sched_node *vsi_node, u8 owner, u8 tc) +{ + u16 vsi_handle = vsi_node->vsi_handle; + bool all_vsi_nodes_removed = true; + int j = 0; + + while (vsi_node) { + struct ice_sched_node *next_vsi_node; + + if (ice_sched_is_leaf_node_present(vsi_node)) { + ice_debug(pi->hw, ICE_DBG_SCHED, "VSI has leaf nodes in TC %d\n", tc); + return -EBUSY; + } + while (j < vsi_node->num_children) { + if (vsi_node->children[j]->owner == owner) + ice_free_sched_node(pi, vsi_node->children[j]); + else + j++; + } + + next_vsi_node = ice_sched_find_next_vsi_node(vsi_node); + + /* remove the VSI if it has no children */ + if (!vsi_node->num_children) + ice_free_sched_node(pi, vsi_node); + else + all_vsi_nodes_removed = false; + + vsi_node = next_vsi_node; + } + + /* clean up aggregator related VSI info if any */ + if (all_vsi_nodes_removed) + ice_sched_rm_agg_vsi_info(pi, vsi_handle); + + return 0; +} + /** * ice_sched_rm_vsi_cfg - remove the VSI and its children nodes * @pi: port information structure @@ -2038,7 +2173,6 @@ ice_sched_rm_vsi_cfg(struct ice_port_info *pi, u16 vsi_handle, u8 owner) ice_for_each_traffic_class(i) { struct ice_sched_node *vsi_node, *tc_node; - u8 j = 0; tc_node = ice_sched_get_tc_node(pi, i); if (!tc_node) @@ -2048,31 +2182,12 @@ ice_sched_rm_vsi_cfg(struct ice_port_info *pi, u16 vsi_handle, u8 owner) if (!vsi_node) continue; - if (ice_sched_is_leaf_node_present(vsi_node)) { - ice_debug(pi->hw, ICE_DBG_SCHED, "VSI has leaf nodes in TC %d\n", i); - status = -EBUSY; + status = ice_sched_rm_vsi_subtree(pi, vsi_node, owner, i); + if (status) goto exit_sched_rm_vsi_cfg; - } - while (j < vsi_node->num_children) { - if (vsi_node->children[j]->owner == owner) { - ice_free_sched_node(pi, vsi_node->children[j]); - /* reset the counter again since the num - * children will be updated after node removal - */ - j = 0; - } else { - j++; - } - } - /* remove the VSI if it has no children */ - if (!vsi_node->num_children) { - ice_free_sched_node(pi, vsi_node); - vsi_ctx->sched.vsi_node[i] = NULL; + vsi_ctx->sched.vsi_node[i] = NULL; - /* clean up aggregator related VSI info if any */ - ice_sched_rm_agg_vsi_info(pi, vsi_handle); - } if (owner == ICE_SCHED_NODE_OWNER_LAN) vsi_ctx->sched.max_lanq[i] = 0; else diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c index 0e740342e2947..c5430363e7081 100644 --- a/drivers/net/ethernet/intel/ice/ice_switch.c +++ b/drivers/net/ethernet/intel/ice/ice_switch.c @@ -3146,7 +3146,7 @@ ice_add_update_vsi_list(struct ice_hw *hw, u16 vsi_handle_arr[2]; /* A rule already exists with the new VSI being added */ - if (cur_fltr->fwd_id.hw_vsi_id == new_fltr->fwd_id.hw_vsi_id) + if (cur_fltr->vsi_handle == new_fltr->vsi_handle) return -EEXIST; vsi_handle_arr[0] = cur_fltr->vsi_handle; @@ -5977,7 +5977,7 @@ ice_adv_add_update_vsi_list(struct ice_hw *hw, /* A rule already exists with the new VSI being added */ if (test_bit(vsi_handle, m_entry->vsi_list_info->vsi_map)) - return 0; + return -EEXIST; /* Update the previously created VSI list set with * the new VSI ID passed in diff --git a/drivers/net/ethernet/intel/idpf/idpf_controlq.c b/drivers/net/ethernet/intel/idpf/idpf_controlq.c index b28991dd18703..48b8e184f3db6 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_controlq.c +++ b/drivers/net/ethernet/intel/idpf/idpf_controlq.c @@ -96,7 +96,7 @@ static void idpf_ctlq_init_rxq_bufs(struct idpf_ctlq_info *cq) */ static void idpf_ctlq_shutdown(struct idpf_hw *hw, struct idpf_ctlq_info *cq) { - mutex_lock(&cq->cq_lock); + spin_lock(&cq->cq_lock); /* free ring buffers and the ring itself */ idpf_ctlq_dealloc_ring_res(hw, cq); @@ -104,8 +104,7 @@ static void idpf_ctlq_shutdown(struct idpf_hw *hw, struct idpf_ctlq_info *cq) /* Set ring_size to 0 to indicate uninitialized queue */ cq->ring_size = 0; - mutex_unlock(&cq->cq_lock); - mutex_destroy(&cq->cq_lock); + spin_unlock(&cq->cq_lock); } /** @@ -173,7 +172,7 @@ int idpf_ctlq_add(struct idpf_hw *hw, idpf_ctlq_init_regs(hw, cq, is_rxq); - mutex_init(&cq->cq_lock); + spin_lock_init(&cq->cq_lock); list_add(&cq->cq_list, &hw->cq_list_head); @@ -272,7 +271,7 @@ int idpf_ctlq_send(struct idpf_hw *hw, struct idpf_ctlq_info *cq, int err = 0; int i; - mutex_lock(&cq->cq_lock); + spin_lock(&cq->cq_lock); /* Ensure there are enough descriptors to send all messages */ num_desc_avail = IDPF_CTLQ_DESC_UNUSED(cq); @@ -332,7 +331,7 @@ int idpf_ctlq_send(struct idpf_hw *hw, struct idpf_ctlq_info *cq, wr32(hw, cq->reg.tail, cq->next_to_use); err_unlock: - mutex_unlock(&cq->cq_lock); + spin_unlock(&cq->cq_lock); return err; } @@ -364,7 +363,7 @@ int idpf_ctlq_clean_sq(struct idpf_ctlq_info *cq, u16 *clean_count, if (*clean_count > cq->ring_size) return -EBADR; - mutex_lock(&cq->cq_lock); + spin_lock(&cq->cq_lock); ntc = cq->next_to_clean; @@ -397,7 +396,7 @@ int idpf_ctlq_clean_sq(struct idpf_ctlq_info *cq, u16 *clean_count, cq->next_to_clean = ntc; - mutex_unlock(&cq->cq_lock); + spin_unlock(&cq->cq_lock); /* Return number of descriptors actually cleaned */ *clean_count = i; @@ -435,7 +434,7 @@ int idpf_ctlq_post_rx_buffs(struct idpf_hw *hw, struct idpf_ctlq_info *cq, if (*buff_count > 0) buffs_avail = true; - mutex_lock(&cq->cq_lock); + spin_lock(&cq->cq_lock); if (tbp >= cq->ring_size) tbp = 0; @@ -524,7 +523,7 @@ int idpf_ctlq_post_rx_buffs(struct idpf_hw *hw, struct idpf_ctlq_info *cq, wr32(hw, cq->reg.tail, cq->next_to_post); } - mutex_unlock(&cq->cq_lock); + spin_unlock(&cq->cq_lock); /* return the number of buffers that were not posted */ *buff_count = *buff_count - i; @@ -552,7 +551,7 @@ int idpf_ctlq_recv(struct idpf_ctlq_info *cq, u16 *num_q_msg, u16 i; /* take the lock before we start messing with the ring */ - mutex_lock(&cq->cq_lock); + spin_lock(&cq->cq_lock); ntc = cq->next_to_clean; @@ -614,7 +613,7 @@ int idpf_ctlq_recv(struct idpf_ctlq_info *cq, u16 *num_q_msg, cq->next_to_clean = ntc; - mutex_unlock(&cq->cq_lock); + spin_unlock(&cq->cq_lock); *num_q_msg = i; if (*num_q_msg == 0) diff --git a/drivers/net/ethernet/intel/idpf/idpf_controlq_api.h b/drivers/net/ethernet/intel/idpf/idpf_controlq_api.h index e8e046ef2f0d7..5890d8adca4a8 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_controlq_api.h +++ b/drivers/net/ethernet/intel/idpf/idpf_controlq_api.h @@ -99,7 +99,7 @@ struct idpf_ctlq_info { enum idpf_ctlq_type cq_type; int q_id; - struct mutex cq_lock; /* control queue lock */ + spinlock_t cq_lock; /* control queue lock */ /* used for interrupt processing */ u16 next_to_use; u16 next_to_clean; diff --git a/drivers/net/ethernet/intel/idpf/idpf_ethtool.c b/drivers/net/ethernet/intel/idpf/idpf_ethtool.c index 59b1a1a099967..f72420cf68216 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_ethtool.c +++ b/drivers/net/ethernet/intel/idpf/idpf_ethtool.c @@ -46,7 +46,7 @@ static u32 idpf_get_rxfh_key_size(struct net_device *netdev) struct idpf_vport_user_config_data *user_config; if (!idpf_is_cap_ena_all(np->adapter, IDPF_RSS_CAPS, IDPF_CAP_RSS)) - return -EOPNOTSUPP; + return 0; user_config = &np->adapter->vport_config[np->vport_idx]->user_config; @@ -65,7 +65,7 @@ static u32 idpf_get_rxfh_indir_size(struct net_device *netdev) struct idpf_vport_user_config_data *user_config; if (!idpf_is_cap_ena_all(np->adapter, IDPF_RSS_CAPS, IDPF_CAP_RSS)) - return -EOPNOTSUPP; + return 0; user_config = &np->adapter->vport_config[np->vport_idx]->user_config; diff --git a/drivers/net/ethernet/intel/idpf/idpf_lib.c b/drivers/net/ethernet/intel/idpf/idpf_lib.c index 615e74d038457..746b655337275 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_lib.c +++ b/drivers/net/ethernet/intel/idpf/idpf_lib.c @@ -1802,11 +1802,19 @@ void idpf_vc_event_task(struct work_struct *work) if (test_bit(IDPF_REMOVE_IN_PROG, adapter->flags)) return; - if (test_bit(IDPF_HR_FUNC_RESET, adapter->flags) || - test_bit(IDPF_HR_DRV_LOAD, adapter->flags)) { - set_bit(IDPF_HR_RESET_IN_PROG, adapter->flags); - idpf_init_hard_reset(adapter); - } + if (test_bit(IDPF_HR_FUNC_RESET, adapter->flags)) + goto func_reset; + + if (test_bit(IDPF_HR_DRV_LOAD, adapter->flags)) + goto drv_load; + + return; + +func_reset: + idpf_vc_xn_shutdown(adapter->vcxn_mngr); +drv_load: + set_bit(IDPF_HR_RESET_IN_PROG, adapter->flags); + idpf_init_hard_reset(adapter); } /** @@ -2307,8 +2315,12 @@ void *idpf_alloc_dma_mem(struct idpf_hw *hw, struct idpf_dma_mem *mem, u64 size) struct idpf_adapter *adapter = hw->back; size_t sz = ALIGN(size, 4096); - mem->va = dma_alloc_coherent(&adapter->pdev->dev, sz, - &mem->pa, GFP_KERNEL); + /* The control queue resources are freed under a spinlock, contiguous + * pages will avoid IOMMU remapping and the use vmap (and vunmap in + * dma_free_*() path. + */ + mem->va = dma_alloc_attrs(&adapter->pdev->dev, sz, &mem->pa, + GFP_KERNEL, DMA_ATTR_FORCE_CONTIGUOUS); mem->size = sz; return mem->va; @@ -2323,8 +2335,8 @@ void idpf_free_dma_mem(struct idpf_hw *hw, struct idpf_dma_mem *mem) { struct idpf_adapter *adapter = hw->back; - dma_free_coherent(&adapter->pdev->dev, mem->size, - mem->va, mem->pa); + dma_free_attrs(&adapter->pdev->dev, mem->size, + mem->va, mem->pa, DMA_ATTR_FORCE_CONTIGUOUS); mem->size = 0; mem->va = NULL; mem->pa = 0; diff --git a/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c index dfd7cf1d9aa0a..a986dd5725559 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c +++ b/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c @@ -362,17 +362,18 @@ netdev_tx_t idpf_tx_singleq_frame(struct sk_buff *skb, { struct idpf_tx_offload_params offload = { }; struct idpf_tx_buf *first; + int csum, tso, needed; unsigned int count; __be16 protocol; - int csum, tso; count = idpf_tx_desc_count_required(tx_q, skb); if (unlikely(!count)) return idpf_tx_drop_skb(tx_q, skb); - if (idpf_tx_maybe_stop_common(tx_q, - count + IDPF_TX_DESCS_PER_CACHE_LINE + - IDPF_TX_DESCS_FOR_CTX)) { + needed = count + IDPF_TX_DESCS_PER_CACHE_LINE + IDPF_TX_DESCS_FOR_CTX; + if (!netif_subqueue_maybe_stop(tx_q->netdev, tx_q->idx, + IDPF_DESC_UNUSED(tx_q), + needed, needed)) { idpf_tx_buf_hw_update(tx_q, tx_q->next_to_use, false); u64_stats_update_begin(&tx_q->stats_sync); diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_txrx.c index 623bf17f87f9c..c6c36de58b9d1 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_txrx.c +++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.c @@ -2132,6 +2132,19 @@ void idpf_tx_splitq_build_flow_desc(union idpf_tx_flex_desc *desc, desc->flow.qw1.compl_tag = cpu_to_le16(params->compl_tag); } +/* Global conditions to tell whether the txq (and related resources) + * has room to allow the use of "size" descriptors. + */ +static int idpf_txq_has_room(struct idpf_tx_queue *tx_q, u32 size) +{ + if (IDPF_DESC_UNUSED(tx_q) < size || + IDPF_TX_COMPLQ_PENDING(tx_q->txq_grp) > + IDPF_TX_COMPLQ_OVERFLOW_THRESH(tx_q->txq_grp->complq) || + IDPF_TX_BUF_RSV_LOW(tx_q)) + return 0; + return 1; +} + /** * idpf_tx_maybe_stop_splitq - 1st level check for Tx splitq stop conditions * @tx_q: the queue to be checked @@ -2142,29 +2155,11 @@ void idpf_tx_splitq_build_flow_desc(union idpf_tx_flex_desc *desc, static int idpf_tx_maybe_stop_splitq(struct idpf_tx_queue *tx_q, unsigned int descs_needed) { - if (idpf_tx_maybe_stop_common(tx_q, descs_needed)) - goto out; - - /* If there are too many outstanding completions expected on the - * completion queue, stop the TX queue to give the device some time to - * catch up - */ - if (unlikely(IDPF_TX_COMPLQ_PENDING(tx_q->txq_grp) > - IDPF_TX_COMPLQ_OVERFLOW_THRESH(tx_q->txq_grp->complq))) - goto splitq_stop; - - /* Also check for available book keeping buffers; if we are low, stop - * the queue to wait for more completions - */ - if (unlikely(IDPF_TX_BUF_RSV_LOW(tx_q))) - goto splitq_stop; - - return 0; - -splitq_stop: - netif_stop_subqueue(tx_q->netdev, tx_q->idx); + if (netif_subqueue_maybe_stop(tx_q->netdev, tx_q->idx, + idpf_txq_has_room(tx_q, descs_needed), + 1, 1)) + return 0; -out: u64_stats_update_begin(&tx_q->stats_sync); u64_stats_inc(&tx_q->q_stats.q_busy); u64_stats_update_end(&tx_q->stats_sync); @@ -2190,12 +2185,6 @@ void idpf_tx_buf_hw_update(struct idpf_tx_queue *tx_q, u32 val, nq = netdev_get_tx_queue(tx_q->netdev, tx_q->idx); tx_q->next_to_use = val; - if (idpf_tx_maybe_stop_common(tx_q, IDPF_TX_DESC_NEEDED)) { - u64_stats_update_begin(&tx_q->stats_sync); - u64_stats_inc(&tx_q->q_stats.q_busy); - u64_stats_update_end(&tx_q->stats_sync); - } - /* Force memory writes to complete before letting h/w * know there are new descriptors to fetch. (Only * applicable for weak-ordered memory model archs, diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.h b/drivers/net/ethernet/intel/idpf/idpf_txrx.h index 9c1fe84108ed2..ffeeaede6cf8f 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_txrx.h +++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.h @@ -1052,12 +1052,4 @@ bool idpf_rx_singleq_buf_hw_alloc_all(struct idpf_rx_queue *rxq, u16 cleaned_count); int idpf_tso(struct sk_buff *skb, struct idpf_tx_offload_params *off); -static inline bool idpf_tx_maybe_stop_common(struct idpf_tx_queue *tx_q, - u32 needed) -{ - return !netif_subqueue_maybe_stop(tx_q->netdev, tx_q->idx, - IDPF_DESC_UNUSED(tx_q), - needed, needed); -} - #endif /* !_IDPF_TXRX_H_ */ diff --git a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c index 99bdb95bf2266..151beea20d343 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c +++ b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c @@ -376,7 +376,7 @@ static void idpf_vc_xn_init(struct idpf_vc_xn_manager *vcxn_mngr) * All waiting threads will be woken-up and their transaction aborted. Further * operations on that object will fail. */ -static void idpf_vc_xn_shutdown(struct idpf_vc_xn_manager *vcxn_mngr) +void idpf_vc_xn_shutdown(struct idpf_vc_xn_manager *vcxn_mngr) { int i; diff --git a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.h b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.h index 83da5d8da56bf..23271cf0a2160 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.h +++ b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.h @@ -66,5 +66,6 @@ int idpf_send_get_stats_msg(struct idpf_vport *vport); int idpf_send_set_sriov_vfs_msg(struct idpf_adapter *adapter, u16 num_vfs); int idpf_send_get_set_rss_key_msg(struct idpf_vport *vport, bool get); int idpf_send_get_set_rss_lut_msg(struct idpf_vport *vport, bool get); +void idpf_vc_xn_shutdown(struct idpf_vc_xn_manager *vcxn_mngr); #endif /* _IDPF_VIRTCHNL_H_ */ diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c index 082b0baf5d37c..2a0c5a343e472 100644 --- a/drivers/net/ethernet/intel/igc/igc_main.c +++ b/drivers/net/ethernet/intel/igc/igc_main.c @@ -6987,6 +6987,10 @@ static int igc_probe(struct pci_dev *pdev, adapter->port_num = hw->bus.func; adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); + /* Disable ASPM L1.2 on I226 devices to avoid packet loss */ + if (igc_is_device_id_i226(hw)) + pci_disable_link_state(pdev, PCIE_LINK_STATE_L1_2); + err = pci_save_state(pdev); if (err) goto err_ioremap; @@ -7368,6 +7372,9 @@ static int igc_resume(struct device *dev) pci_enable_wake(pdev, PCI_D3hot, 0); pci_enable_wake(pdev, PCI_D3cold, 0); + if (igc_is_device_id_i226(hw)) + pci_disable_link_state(pdev, PCIE_LINK_STATE_L1_2); + if (igc_init_interrupt_scheme(adapter, true)) { netdev_err(netdev, "Unable to allocate memory for queues\n"); return -ENOMEM; @@ -7480,6 +7487,9 @@ static pci_ers_result_t igc_io_slot_reset(struct pci_dev *pdev) pci_enable_wake(pdev, PCI_D3hot, 0); pci_enable_wake(pdev, PCI_D3cold, 0); + if (igc_is_device_id_i226(hw)) + pci_disable_link_state_locked(pdev, PCIE_LINK_STATE_L1_2); + /* In case of PCI error, adapter loses its HW address * so we should re-assign it here. */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c index 07eaa3c3f4d36..530e4319a2e89 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c @@ -167,7 +167,7 @@ int ixgbe_write_i2c_combined_generic_int(struct ixgbe_hw *hw, u8 addr, u16 reg, u16 val, bool lock) { u32 swfw_mask = hw->phy.phy_semaphore_mask; - int max_retry = 1; + int max_retry = 3; int retry = 0; u8 reg_high; u8 csum; @@ -2284,7 +2284,7 @@ static int ixgbe_write_i2c_byte_generic_int(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr, u8 data, bool lock) { u32 swfw_mask = hw->phy.phy_semaphore_mask; - u32 max_retry = 1; + u32 max_retry = 3; u32 retry = 0; int status; diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c index 7417087b6db59..a2807a1e4f4a6 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c @@ -352,9 +352,12 @@ int cn10k_free_matchall_ipolicer(struct otx2_nic *pfvf) mutex_lock(&pfvf->mbox.lock); /* Remove RQ's policer mapping */ - for (qidx = 0; qidx < hw->rx_queues; qidx++) - cn10k_map_unmap_rq_policer(pfvf, qidx, - hw->matchall_ipolicer, false); + for (qidx = 0; qidx < hw->rx_queues; qidx++) { + rc = cn10k_map_unmap_rq_policer(pfvf, qidx, hw->matchall_ipolicer, false); + if (rc) + dev_warn(pfvf->dev, "Failed to unmap RQ %d's policer (error %d).", + qidx, rc); + } rc = cn10k_free_leaf_profile(pfvf, hw->matchall_ipolicer); diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/qos.c b/drivers/net/ethernet/marvell/octeontx2/nic/qos.c index 35acc07bd9648..5765bac119f0e 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/qos.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/qos.c @@ -1638,6 +1638,7 @@ static int otx2_qos_leaf_del_last(struct otx2_nic *pfvf, u16 classid, bool force if (!node->is_static) dwrr_del_node = true; + WRITE_ONCE(node->qid, OTX2_QOS_QID_INNER); /* destroy the leaf node */ otx2_qos_disable_sq(pfvf, qid); otx2_qos_destroy_node(pfvf, node); @@ -1682,9 +1683,6 @@ static int otx2_qos_leaf_del_last(struct otx2_nic *pfvf, u16 classid, bool force } kfree(new_cfg); - /* update tx_real_queues */ - otx2_qos_update_tx_netdev_queues(pfvf); - return 0; } diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/qos_sq.c b/drivers/net/ethernet/marvell/octeontx2/nic/qos_sq.c index 9d887bfc31089..ac9345644068e 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/qos_sq.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/qos_sq.c @@ -256,6 +256,26 @@ int otx2_qos_enable_sq(struct otx2_nic *pfvf, int qidx) return err; } +static int otx2_qos_nix_npa_ndc_sync(struct otx2_nic *pfvf) +{ + struct ndc_sync_op *req; + int rc; + + mutex_lock(&pfvf->mbox.lock); + + req = otx2_mbox_alloc_msg_ndc_sync_op(&pfvf->mbox); + if (!req) { + mutex_unlock(&pfvf->mbox.lock); + return -ENOMEM; + } + + req->nix_lf_tx_sync = true; + req->npa_lf_sync = true; + rc = otx2_sync_mbox_msg(&pfvf->mbox); + mutex_unlock(&pfvf->mbox.lock); + return rc; +} + void otx2_qos_disable_sq(struct otx2_nic *pfvf, int qidx) { struct otx2_qset *qset = &pfvf->qset; @@ -285,6 +305,8 @@ void otx2_qos_disable_sq(struct otx2_nic *pfvf, int qidx) otx2_qos_sqb_flush(pfvf, sq_idx); otx2_smq_flush(pfvf, otx2_get_smq_idx(pfvf, sq_idx)); + /* NIX/NPA NDC sync */ + otx2_qos_nix_npa_ndc_sync(pfvf); otx2_cleanup_tx_cqes(pfvf, cq); mutex_lock(&pfvf->mbox.lock); diff --git a/drivers/net/ethernet/mediatek/mtk_star_emac.c b/drivers/net/ethernet/mediatek/mtk_star_emac.c index c2ab87828d858..5eb7a97e7eb17 100644 --- a/drivers/net/ethernet/mediatek/mtk_star_emac.c +++ b/drivers/net/ethernet/mediatek/mtk_star_emac.c @@ -1468,6 +1468,8 @@ static __maybe_unused int mtk_star_suspend(struct device *dev) if (netif_running(ndev)) mtk_star_disable(ndev); + netif_device_detach(ndev); + clk_bulk_disable_unprepare(MTK_STAR_NCLKS, priv->clks); return 0; @@ -1492,6 +1494,8 @@ static __maybe_unused int mtk_star_resume(struct device *dev) clk_bulk_disable_unprepare(MTK_STAR_NCLKS, priv->clks); } + netif_device_attach(ndev); + return ret; } diff --git a/drivers/net/ethernet/mellanox/mlx4/en_clock.c b/drivers/net/ethernet/mellanox/mlx4/en_clock.c index cd754cd76bde1..d73a2044dc266 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_clock.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_clock.c @@ -249,7 +249,7 @@ static const struct ptp_clock_info mlx4_en_ptp_clock_info = { static u32 freq_to_shift(u16 freq) { u32 freq_khz = freq * 1000; - u64 max_val_cycles = freq_khz * 1000 * MLX4_EN_WRAP_AROUND_SEC; + u64 max_val_cycles = freq_khz * 1000ULL * MLX4_EN_WRAP_AROUND_SEC; u64 max_val_cycles_rounded = 1ULL << fls64(max_val_cycles - 1); /* calculate max possible multiplier in order to fit in 64bit */ u64 max_mul = div64_u64(ULLONG_MAX, max_val_cycles_rounded); diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c index cd17a3f4faf83..a68cd3f0304c6 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c @@ -1897,6 +1897,7 @@ static int mlx4_en_get_ts_info(struct net_device *dev, if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS) { info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE | + SOF_TIMESTAMPING_TX_SOFTWARE | SOF_TIMESTAMPING_RX_HARDWARE | SOF_TIMESTAMPING_RAW_HARDWARE; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h index 1e8b7d3307014..b5aac0e1a68ec 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h @@ -18,7 +18,8 @@ enum { enum { MLX5E_TC_PRIO = 0, - MLX5E_NIC_PRIO + MLX5E_PROMISC_PRIO, + MLX5E_NIC_PRIO, }; struct mlx5e_flow_table { @@ -68,9 +69,13 @@ struct mlx5e_l2_table { MLX5_HASH_FIELD_SEL_DST_IP |\ MLX5_HASH_FIELD_SEL_IPSEC_SPI) -/* NIC prio FTS */ +/* NIC promisc FT level */ enum { MLX5E_PROMISC_FT_LEVEL, +}; + +/* NIC prio FTS */ +enum { MLX5E_VLAN_FT_LEVEL, MLX5E_L2_FT_LEVEL, MLX5E_TTC_FT_LEVEL, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c index 08ab0999f7b31..14192da4b8ed0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c @@ -706,8 +706,8 @@ static void mlx5e_free_xdpsq_desc(struct mlx5e_xdpsq *sq, xdpi = mlx5e_xdpi_fifo_pop(xdpi_fifo); page = xdpi.page.page; - /* No need to check ((page->pp_magic & ~0x3UL) == PP_SIGNATURE) - * as we know this is a page_pool page. + /* No need to check page_pool_page_is_pp() as we + * know this is a page_pool page. */ page_pool_recycle_direct(page->pp, page); } while (++n < num); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c index 1baf8933a07cb..39dcbf863421a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c @@ -266,8 +266,7 @@ static void mlx5e_ipsec_init_macs(struct mlx5e_ipsec_sa_entry *sa_entry, struct mlx5_accel_esp_xfrm_attrs *attrs) { struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry); - struct xfrm_state *x = sa_entry->x; - struct net_device *netdev; + struct net_device *netdev = sa_entry->dev; struct neighbour *n; u8 addr[ETH_ALEN]; const void *pkey; @@ -277,8 +276,6 @@ static void mlx5e_ipsec_init_macs(struct mlx5e_ipsec_sa_entry *sa_entry, attrs->type != XFRM_DEV_OFFLOAD_PACKET) return; - netdev = x->xso.real_dev; - mlx5_query_mac_address(mdev, addr); switch (attrs->dir) { case XFRM_DEV_OFFLOAD_IN: @@ -707,6 +704,7 @@ static int mlx5e_xfrm_add_state(struct xfrm_state *x, return -ENOMEM; sa_entry->x = x; + sa_entry->dev = netdev; sa_entry->ipsec = ipsec; /* Check if this SA is originated from acquire flow temporary SA */ if (x->xso.flags & XFRM_DEV_OFFLOAD_FLAG_ACQ) @@ -849,8 +847,6 @@ static int mlx5e_ipsec_netevent_event(struct notifier_block *nb, struct mlx5e_ipsec_sa_entry *sa_entry; struct mlx5e_ipsec *ipsec; struct neighbour *n = ptr; - struct net_device *netdev; - struct xfrm_state *x; unsigned long idx; if (event != NETEVENT_NEIGH_UPDATE || !(n->nud_state & NUD_VALID)) @@ -870,11 +866,9 @@ static int mlx5e_ipsec_netevent_event(struct notifier_block *nb, continue; } - x = sa_entry->x; - netdev = x->xso.real_dev; data = sa_entry->work->data; - neigh_ha_snapshot(data->addr, n, netdev); + neigh_ha_snapshot(data->addr, n, sa_entry->dev); queue_work(ipsec->wq, &sa_entry->work->work); } @@ -1005,8 +999,8 @@ static void mlx5e_xfrm_update_stats(struct xfrm_state *x) size_t headers; lockdep_assert(lockdep_is_held(&x->lock) || - lockdep_is_held(&dev_net(x->xso.real_dev)->xfrm.xfrm_cfg_mutex) || - lockdep_is_held(&dev_net(x->xso.real_dev)->xfrm.xfrm_state_lock)); + lockdep_is_held(&net->xfrm.xfrm_cfg_mutex) || + lockdep_is_held(&net->xfrm.xfrm_state_lock)); if (x->xso.flags & XFRM_DEV_OFFLOAD_FLAG_ACQ) return; @@ -1141,7 +1135,7 @@ mlx5e_ipsec_build_accel_pol_attrs(struct mlx5e_ipsec_pol_entry *pol_entry, static int mlx5e_xfrm_add_policy(struct xfrm_policy *x, struct netlink_ext_ack *extack) { - struct net_device *netdev = x->xdo.real_dev; + struct net_device *netdev = x->xdo.dev; struct mlx5e_ipsec_pol_entry *pol_entry; struct mlx5e_priv *priv; int err; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h index 7d943e93cf6dc..9aff779c77c89 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h @@ -260,6 +260,7 @@ struct mlx5e_ipsec_limits { struct mlx5e_ipsec_sa_entry { struct mlx5e_ipsec_esn_state esn_state; struct xfrm_state *x; + struct net_device *dev; struct mlx5e_ipsec *ipsec; struct mlx5_accel_esp_xfrm_attrs attrs; void (*set_iv_op)(struct sk_buff *skb, struct xfrm_state *x, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dim.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dim.c index 298bb74ec5e94..d1d629697e285 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_dim.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dim.c @@ -113,7 +113,7 @@ int mlx5e_dim_rx_change(struct mlx5e_rq *rq, bool enable) __set_bit(MLX5E_RQ_STATE_DIM, &rq->state); } else { __clear_bit(MLX5E_RQ_STATE_DIM, &rq->state); - + synchronize_net(); mlx5e_dim_disable(rq->dim); rq->dim = NULL; } @@ -140,7 +140,7 @@ int mlx5e_dim_tx_change(struct mlx5e_txqsq *sq, bool enable) __set_bit(MLX5E_SQ_STATE_DIM, &sq->state); } else { __clear_bit(MLX5E_SQ_STATE_DIM, &sq->state); - + synchronize_net(); mlx5e_dim_disable(sq->dim); sq->dim = NULL; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c index 05058710d2c79..537e732085b22 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c @@ -776,7 +776,7 @@ static int mlx5e_create_promisc_table(struct mlx5e_flow_steering *fs) ft_attr.max_fte = MLX5E_PROMISC_TABLE_SIZE; ft_attr.autogroup.max_num_groups = 1; ft_attr.level = MLX5E_PROMISC_FT_LEVEL; - ft_attr.prio = MLX5E_NIC_PRIO; + ft_attr.prio = MLX5E_PROMISC_PRIO; ft->t = mlx5_create_auto_grouped_flow_table(fs->ns, &ft_attr); if (IS_ERR(ft->t)) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index 218d5402cd1a6..4d766eea32a37 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -2028,9 +2028,8 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv, return err; } -static bool mlx5_flow_has_geneve_opt(struct mlx5e_tc_flow *flow) +static bool mlx5_flow_has_geneve_opt(struct mlx5_flow_spec *spec) { - struct mlx5_flow_spec *spec = &flow->attr->parse_attr->spec; void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters_3); @@ -2069,7 +2068,7 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv, } complete_all(&flow->del_hw_done); - if (mlx5_flow_has_geneve_opt(flow)) + if (mlx5_flow_has_geneve_opt(&attr->parse_attr->spec)) mlx5_geneve_tlv_option_del(priv->mdev->geneve); if (flow->decap_route) @@ -2574,12 +2573,13 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv, err = mlx5e_tc_tun_parse(filter_dev, priv, tmp_spec, f, match_level); if (err) { - kvfree(tmp_spec); NL_SET_ERR_MSG_MOD(extack, "Failed to parse tunnel attributes"); netdev_warn(priv->netdev, "Failed to parse tunnel attributes"); - return err; + } else { + err = mlx5e_tc_set_attr_rx_tun(flow, tmp_spec); } - err = mlx5e_tc_set_attr_rx_tun(flow, tmp_spec); + if (mlx5_flow_has_geneve_opt(tmp_spec)) + mlx5_geneve_tlv_option_del(priv->mdev->geneve); kvfree(tmp_spec); if (err) return err; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c index 7aef30dbd82d6..6544546a1153f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c @@ -1295,12 +1295,15 @@ mlx5_eswitch_enable_pf_vf_vports(struct mlx5_eswitch *esw, ret = mlx5_eswitch_load_pf_vf_vport(esw, MLX5_VPORT_ECPF, enabled_events); if (ret) goto ecpf_err; - if (mlx5_core_ec_sriov_enabled(esw->dev)) { - ret = mlx5_eswitch_load_ec_vf_vports(esw, esw->esw_funcs.num_ec_vfs, - enabled_events); - if (ret) - goto ec_vf_err; - } + } + + /* Enable ECVF vports */ + if (mlx5_core_ec_sriov_enabled(esw->dev)) { + ret = mlx5_eswitch_load_ec_vf_vports(esw, + esw->esw_funcs.num_ec_vfs, + enabled_events); + if (ret) + goto ec_vf_err; } /* Enable VF vports */ @@ -1331,9 +1334,11 @@ void mlx5_eswitch_disable_pf_vf_vports(struct mlx5_eswitch *esw) { mlx5_eswitch_unload_vf_vports(esw, esw->esw_funcs.num_vfs); + if (mlx5_core_ec_sriov_enabled(esw->dev)) + mlx5_eswitch_unload_ec_vf_vports(esw, + esw->esw_funcs.num_ec_vfs); + if (mlx5_ecpf_vport_exists(esw->dev)) { - if (mlx5_core_ec_sriov_enabled(esw->dev)) - mlx5_eswitch_unload_ec_vf_vports(esw, esw->esw_funcs.num_vfs); mlx5_eswitch_unload_pf_vf_vport(esw, MLX5_VPORT_ECPF); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index 0ce999706d412..7ef0a4af89e48 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -113,13 +113,16 @@ #define ETHTOOL_PRIO_NUM_LEVELS 1 #define ETHTOOL_NUM_PRIOS 11 #define ETHTOOL_MIN_LEVEL (KERNEL_MIN_LEVEL + ETHTOOL_NUM_PRIOS) -/* Promiscuous, Vlan, mac, ttc, inner ttc, {UDP/ANY/aRFS/accel/{esp, esp_err}}, IPsec policy, +/* Vlan, mac, ttc, inner ttc, {UDP/ANY/aRFS/accel/{esp, esp_err}}, IPsec policy, * {IPsec RoCE MPV,Alias table},IPsec RoCE policy */ -#define KERNEL_NIC_PRIO_NUM_LEVELS 11 +#define KERNEL_NIC_PRIO_NUM_LEVELS 10 #define KERNEL_NIC_NUM_PRIOS 1 -/* One more level for tc */ -#define KERNEL_MIN_LEVEL (KERNEL_NIC_PRIO_NUM_LEVELS + 1) +/* One more level for tc, and one more for promisc */ +#define KERNEL_MIN_LEVEL (KERNEL_NIC_PRIO_NUM_LEVELS + 2) + +#define KERNEL_NIC_PROMISC_NUM_PRIOS 1 +#define KERNEL_NIC_PROMISC_NUM_LEVELS 1 #define KERNEL_NIC_TC_NUM_PRIOS 1 #define KERNEL_NIC_TC_NUM_LEVELS 3 @@ -187,6 +190,8 @@ static struct init_tree_node { ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF, ADD_MULTIPLE_PRIO(KERNEL_NIC_TC_NUM_PRIOS, KERNEL_NIC_TC_NUM_LEVELS), + ADD_MULTIPLE_PRIO(KERNEL_NIC_PROMISC_NUM_PRIOS, + KERNEL_NIC_PROMISC_NUM_LEVELS), ADD_MULTIPLE_PRIO(KERNEL_NIC_NUM_PRIOS, KERNEL_NIC_PRIO_NUM_LEVELS))), ADD_PRIO(0, BY_PASS_MIN_LEVEL, 0, FS_CHAINING_CAPS, @@ -2200,6 +2205,7 @@ try_add_to_existing_fg(struct mlx5_flow_table *ft, struct mlx5_flow_handle *rule; struct match_list *iter; bool take_write = false; + bool try_again = false; struct fs_fte *fte; u64 version = 0; int err; @@ -2264,6 +2270,7 @@ try_add_to_existing_fg(struct mlx5_flow_table *ft, nested_down_write_ref_node(&g->node, FS_LOCK_PARENT); if (!g->node.active) { + try_again = true; up_write_ref_node(&g->node, false); continue; } @@ -2285,7 +2292,8 @@ try_add_to_existing_fg(struct mlx5_flow_table *ft, tree_put_node(&fte->node, false); return rule; } - rule = ERR_PTR(-ENOENT); + err = try_again ? -EAGAIN : -ENOENT; + rule = ERR_PTR(err); out: kmem_cache_free(steering->ftes_cache, fte); return rule; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c index 972e8e9df585b..9bc9bd83c2324 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c @@ -291,7 +291,7 @@ static void free_4k(struct mlx5_core_dev *dev, u64 addr, u32 function) static int alloc_system_page(struct mlx5_core_dev *dev, u32 function) { struct device *device = mlx5_core_dma_dev(dev); - int nid = dev_to_node(device); + int nid = dev->priv.numa_node; struct page *page; u64 zero_addr = 1; u64 addr; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_definer.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_definer.c index ab5f8f07f1f7e..fc9ba534d5d97 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_definer.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_definer.c @@ -508,7 +508,7 @@ static int hws_definer_conv_outer(struct mlx5hws_definer_conv_data *cd, u32 *match_param) { - bool is_s_ipv6, is_d_ipv6, smac_set, dmac_set; + bool is_ipv6, smac_set, dmac_set, ip_addr_set, ip_ver_set; struct mlx5hws_definer_fc *fc = cd->fc; struct mlx5hws_definer_fc *curr_fc; u32 *s_ipv6, *d_ipv6; @@ -520,6 +520,20 @@ hws_definer_conv_outer(struct mlx5hws_definer_conv_data *cd, return -EINVAL; } + ip_addr_set = HWS_IS_FLD_SET_SZ(match_param, + outer_headers.src_ipv4_src_ipv6, + 0x80) || + HWS_IS_FLD_SET_SZ(match_param, + outer_headers.dst_ipv4_dst_ipv6, 0x80); + ip_ver_set = HWS_IS_FLD_SET(match_param, outer_headers.ip_version) || + HWS_IS_FLD_SET(match_param, outer_headers.ethertype); + + if (ip_addr_set && !ip_ver_set) { + mlx5hws_err(cd->ctx, + "Unsupported match on IP address without version or ethertype\n"); + return -EINVAL; + } + /* L2 Check ethertype */ HWS_SET_HDR(fc, match_param, ETH_TYPE_O, outer_headers.ethertype, @@ -558,6 +572,9 @@ hws_definer_conv_outer(struct mlx5hws_definer_conv_data *cd, HWS_SET_HDR(fc, match_param, IP_PROTOCOL_O, outer_headers.ip_protocol, eth_l3_outer.protocol_next_header); + HWS_SET_HDR(fc, match_param, IP_VERSION_O, + outer_headers.ip_version, + eth_l3_outer.ip_version); HWS_SET_HDR(fc, match_param, IP_TTL_O, outer_headers.ttl_hoplimit, eth_l3_outer.time_to_live_hop_limit); @@ -569,10 +586,16 @@ hws_definer_conv_outer(struct mlx5hws_definer_conv_data *cd, outer_headers.dst_ipv4_dst_ipv6.ipv6_layout); /* Assume IPv6 is used if ipv6 bits are set */ - is_s_ipv6 = s_ipv6[0] || s_ipv6[1] || s_ipv6[2]; - is_d_ipv6 = d_ipv6[0] || d_ipv6[1] || d_ipv6[2]; + is_ipv6 = s_ipv6[0] || s_ipv6[1] || s_ipv6[2] || + d_ipv6[0] || d_ipv6[1] || d_ipv6[2]; - if (is_s_ipv6) { + /* IHL is an IPv4-specific field. */ + if (is_ipv6 && HWS_IS_FLD_SET(match_param, outer_headers.ipv4_ihl)) { + mlx5hws_err(cd->ctx, "Unsupported match on IPv6 address and IPv4 IHL\n"); + return -EINVAL; + } + + if (is_ipv6) { /* Handle IPv6 source address */ HWS_SET_HDR(fc, match_param, IPV6_SRC_127_96_O, outer_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_127_96, @@ -586,13 +609,6 @@ hws_definer_conv_outer(struct mlx5hws_definer_conv_data *cd, HWS_SET_HDR(fc, match_param, IPV6_SRC_31_0_O, outer_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_31_0, ipv6_src_outer.ipv6_address_31_0); - } else { - /* Handle IPv4 source address */ - HWS_SET_HDR(fc, match_param, IPV4_SRC_O, - outer_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_31_0, - ipv4_src_dest_outer.source_address); - } - if (is_d_ipv6) { /* Handle IPv6 destination address */ HWS_SET_HDR(fc, match_param, IPV6_DST_127_96_O, outer_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_127_96, @@ -607,6 +623,10 @@ hws_definer_conv_outer(struct mlx5hws_definer_conv_data *cd, outer_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_31_0, ipv6_dst_outer.ipv6_address_31_0); } else { + /* Handle IPv4 source address */ + HWS_SET_HDR(fc, match_param, IPV4_SRC_O, + outer_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_31_0, + ipv4_src_dest_outer.source_address); /* Handle IPv4 destination address */ HWS_SET_HDR(fc, match_param, IPV4_DST_O, outer_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_31_0, @@ -664,7 +684,7 @@ static int hws_definer_conv_inner(struct mlx5hws_definer_conv_data *cd, u32 *match_param) { - bool is_s_ipv6, is_d_ipv6, smac_set, dmac_set; + bool is_ipv6, smac_set, dmac_set, ip_addr_set, ip_ver_set; struct mlx5hws_definer_fc *fc = cd->fc; struct mlx5hws_definer_fc *curr_fc; u32 *s_ipv6, *d_ipv6; @@ -676,6 +696,20 @@ hws_definer_conv_inner(struct mlx5hws_definer_conv_data *cd, return -EINVAL; } + ip_addr_set = HWS_IS_FLD_SET_SZ(match_param, + inner_headers.src_ipv4_src_ipv6, + 0x80) || + HWS_IS_FLD_SET_SZ(match_param, + inner_headers.dst_ipv4_dst_ipv6, 0x80); + ip_ver_set = HWS_IS_FLD_SET(match_param, inner_headers.ip_version) || + HWS_IS_FLD_SET(match_param, inner_headers.ethertype); + + if (ip_addr_set && !ip_ver_set) { + mlx5hws_err(cd->ctx, + "Unsupported match on IP address without version or ethertype\n"); + return -EINVAL; + } + /* L2 Check ethertype */ HWS_SET_HDR(fc, match_param, ETH_TYPE_I, inner_headers.ethertype, @@ -727,10 +761,16 @@ hws_definer_conv_inner(struct mlx5hws_definer_conv_data *cd, inner_headers.dst_ipv4_dst_ipv6.ipv6_layout); /* Assume IPv6 is used if ipv6 bits are set */ - is_s_ipv6 = s_ipv6[0] || s_ipv6[1] || s_ipv6[2]; - is_d_ipv6 = d_ipv6[0] || d_ipv6[1] || d_ipv6[2]; + is_ipv6 = s_ipv6[0] || s_ipv6[1] || s_ipv6[2] || + d_ipv6[0] || d_ipv6[1] || d_ipv6[2]; - if (is_s_ipv6) { + /* IHL is an IPv4-specific field. */ + if (is_ipv6 && HWS_IS_FLD_SET(match_param, inner_headers.ipv4_ihl)) { + mlx5hws_err(cd->ctx, "Unsupported match on IPv6 address and IPv4 IHL\n"); + return -EINVAL; + } + + if (is_ipv6) { /* Handle IPv6 source address */ HWS_SET_HDR(fc, match_param, IPV6_SRC_127_96_I, inner_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_127_96, @@ -744,13 +784,6 @@ hws_definer_conv_inner(struct mlx5hws_definer_conv_data *cd, HWS_SET_HDR(fc, match_param, IPV6_SRC_31_0_I, inner_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_31_0, ipv6_src_inner.ipv6_address_31_0); - } else { - /* Handle IPv4 source address */ - HWS_SET_HDR(fc, match_param, IPV4_SRC_I, - inner_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_31_0, - ipv4_src_dest_inner.source_address); - } - if (is_d_ipv6) { /* Handle IPv6 destination address */ HWS_SET_HDR(fc, match_param, IPV6_DST_127_96_I, inner_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_127_96, @@ -765,6 +798,10 @@ hws_definer_conv_inner(struct mlx5hws_definer_conv_data *cd, inner_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_31_0, ipv6_dst_inner.ipv6_address_31_0); } else { + /* Handle IPv4 source address */ + HWS_SET_HDR(fc, match_param, IPV4_SRC_I, + inner_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_31_0, + ipv4_src_dest_inner.source_address); /* Handle IPv4 destination address */ HWS_SET_HDR(fc, match_param, IPV4_DST_I, inner_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_31_0, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c index 0d5f750faa455..b04024d0ae676 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c @@ -465,19 +465,22 @@ int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid) { u32 *out; int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out); + int err; out = kvzalloc(outlen, GFP_KERNEL); if (!out) return -ENOMEM; - mlx5_query_nic_vport_context(mdev, 0, out); + err = mlx5_query_nic_vport_context(mdev, 0, out); + if (err) + goto out; *node_guid = MLX5_GET64(query_nic_vport_context_out, out, nic_vport_context.node_guid); - +out: kvfree(out); - return 0; + return err; } EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_node_guid); @@ -519,19 +522,22 @@ int mlx5_query_nic_vport_qkey_viol_cntr(struct mlx5_core_dev *mdev, { u32 *out; int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out); + int err; out = kvzalloc(outlen, GFP_KERNEL); if (!out) return -ENOMEM; - mlx5_query_nic_vport_context(mdev, 0, out); + err = mlx5_query_nic_vport_context(mdev, 0, out); + if (err) + goto out; *qkey_viol_cntr = MLX5_GET(query_nic_vport_context_out, out, nic_vport_context.qkey_violation_counter); - +out: kvfree(out); - return 0; + return err; } EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_qkey_viol_cntr); diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c index 385a56ac73481..c82254a8ae661 100644 --- a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c +++ b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c @@ -447,8 +447,10 @@ static int mlxbf_gige_probe(struct platform_device *pdev) priv->llu_plu_irq = platform_get_irq(pdev, MLXBF_GIGE_LLU_PLU_INTR_IDX); phy_irq = acpi_dev_gpio_irq_get_by(ACPI_COMPANION(&pdev->dev), "phy", 0); - if (phy_irq < 0) { - dev_err(&pdev->dev, "Error getting PHY irq. Use polling instead"); + if (phy_irq == -EPROBE_DEFER) { + err = -EPROBE_DEFER; + goto out; + } else if (phy_irq < 0) { phy_irq = PHY_POLL; } diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_fw.c b/drivers/net/ethernet/meta/fbnic/fbnic_fw.c index 7775418316df5..d6cf97ecf3276 100644 --- a/drivers/net/ethernet/meta/fbnic/fbnic_fw.c +++ b/drivers/net/ethernet/meta/fbnic/fbnic_fw.c @@ -127,11 +127,8 @@ static int fbnic_mbx_map_msg(struct fbnic_dev *fbd, int mbx_idx, return -EBUSY; addr = dma_map_single(fbd->dev, msg, PAGE_SIZE, direction); - if (dma_mapping_error(fbd->dev, addr)) { - free_page((unsigned long)msg); - + if (dma_mapping_error(fbd->dev, addr)) return -ENOSPC; - } mbx->buf_info[tail].msg = msg; mbx->buf_info[tail].addr = addr; diff --git a/drivers/net/ethernet/microchip/lan743x_ethtool.c b/drivers/net/ethernet/microchip/lan743x_ethtool.c index 1a1cbd034eda0..2acd9c3531dea 100644 --- a/drivers/net/ethernet/microchip/lan743x_ethtool.c +++ b/drivers/net/ethernet/microchip/lan743x_ethtool.c @@ -18,6 +18,8 @@ #define EEPROM_MAC_OFFSET (0x01) #define MAX_EEPROM_SIZE (512) #define MAX_OTP_SIZE (1024) +#define MAX_HS_OTP_SIZE (8 * 1024) +#define MAX_HS_EEPROM_SIZE (64 * 1024) #define OTP_INDICATOR_1 (0xF3) #define OTP_INDICATOR_2 (0xF7) @@ -272,6 +274,9 @@ static int lan743x_hs_otp_read(struct lan743x_adapter *adapter, u32 offset, int ret; int i; + if (offset + length > MAX_HS_OTP_SIZE) + return -EINVAL; + ret = lan743x_hs_syslock_acquire(adapter, LOCK_TIMEOUT_MAX_CNT); if (ret < 0) return ret; @@ -320,6 +325,9 @@ static int lan743x_hs_otp_write(struct lan743x_adapter *adapter, u32 offset, int ret; int i; + if (offset + length > MAX_HS_OTP_SIZE) + return -EINVAL; + ret = lan743x_hs_syslock_acquire(adapter, LOCK_TIMEOUT_MAX_CNT); if (ret < 0) return ret; @@ -497,6 +505,9 @@ static int lan743x_hs_eeprom_read(struct lan743x_adapter *adapter, u32 val; int i; + if (offset + length > MAX_HS_EEPROM_SIZE) + return -EINVAL; + retval = lan743x_hs_syslock_acquire(adapter, LOCK_TIMEOUT_MAX_CNT); if (retval < 0) return retval; @@ -539,6 +550,9 @@ static int lan743x_hs_eeprom_write(struct lan743x_adapter *adapter, u32 val; int i; + if (offset + length > MAX_HS_EEPROM_SIZE) + return -EINVAL; + retval = lan743x_hs_syslock_acquire(adapter, LOCK_TIMEOUT_MAX_CNT); if (retval < 0) return retval; @@ -604,9 +618,9 @@ static int lan743x_ethtool_get_eeprom_len(struct net_device *netdev) struct lan743x_adapter *adapter = netdev_priv(netdev); if (adapter->flags & LAN743X_ADAPTER_FLAG_OTP) - return MAX_OTP_SIZE; + return adapter->is_pci11x1x ? MAX_HS_OTP_SIZE : MAX_OTP_SIZE; - return MAX_EEPROM_SIZE; + return adapter->is_pci11x1x ? MAX_HS_EEPROM_SIZE : MAX_EEPROM_SIZE; } static int lan743x_ethtool_get_eeprom(struct net_device *netdev, diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c index 812ad9d61676a..9836fbbea0cc2 100644 --- a/drivers/net/ethernet/microchip/lan743x_main.c +++ b/drivers/net/ethernet/microchip/lan743x_main.c @@ -1330,7 +1330,7 @@ static int lan743x_mac_set_mtu(struct lan743x_adapter *adapter, int new_mtu) } /* PHY */ -static int lan743x_phy_reset(struct lan743x_adapter *adapter) +static int lan743x_hw_reset_phy(struct lan743x_adapter *adapter) { u32 data; @@ -1346,11 +1346,6 @@ static int lan743x_phy_reset(struct lan743x_adapter *adapter) 50000, 1000000); } -static int lan743x_phy_init(struct lan743x_adapter *adapter) -{ - return lan743x_phy_reset(adapter); -} - static void lan743x_phy_interface_select(struct lan743x_adapter *adapter) { u32 id_rev; @@ -3505,10 +3500,6 @@ static int lan743x_hardware_init(struct lan743x_adapter *adapter, if (ret) return ret; - ret = lan743x_phy_init(adapter); - if (ret) - return ret; - ret = lan743x_ptp_init(adapter); if (ret) return ret; @@ -3642,6 +3633,10 @@ static int lan743x_pcidev_probe(struct pci_dev *pdev, if (ret) goto cleanup_pci; + ret = lan743x_hw_reset_phy(adapter); + if (ret) + goto cleanup_pci; + ret = lan743x_hardware_init(adapter, pdev); if (ret) goto cleanup_pci; diff --git a/drivers/net/ethernet/microchip/lan743x_ptp.h b/drivers/net/ethernet/microchip/lan743x_ptp.h index 0d29914cd4606..225e8232474d7 100644 --- a/drivers/net/ethernet/microchip/lan743x_ptp.h +++ b/drivers/net/ethernet/microchip/lan743x_ptp.h @@ -18,9 +18,9 @@ */ #define LAN743X_PTP_N_EVENT_CHAN 2 #define LAN743X_PTP_N_PEROUT LAN743X_PTP_N_EVENT_CHAN -#define LAN743X_PTP_N_EXTTS 4 -#define LAN743X_PTP_N_PPS 0 #define PCI11X1X_PTP_IO_MAX_CHANNELS 8 +#define LAN743X_PTP_N_EXTTS PCI11X1X_PTP_IO_MAX_CHANNELS +#define LAN743X_PTP_N_PPS 0 #define PTP_CMD_CTL_TIMEOUT_CNT 50 struct lan743x_adapter; diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c index 534d4716d5f7d..b34e015eedf9b 100644 --- a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c @@ -353,6 +353,11 @@ static void lan966x_ifh_set_rew_op(void *ifh, u64 rew_op) lan966x_ifh_set(ifh, rew_op, IFH_POS_REW_CMD, IFH_WID_REW_CMD); } +static void lan966x_ifh_set_oam_type(void *ifh, u64 oam_type) +{ + lan966x_ifh_set(ifh, oam_type, IFH_POS_PDU_TYPE, IFH_WID_PDU_TYPE); +} + static void lan966x_ifh_set_timestamp(void *ifh, u64 timestamp) { lan966x_ifh_set(ifh, timestamp, IFH_POS_TIMESTAMP, IFH_WID_TIMESTAMP); @@ -380,6 +385,7 @@ static netdev_tx_t lan966x_port_xmit(struct sk_buff *skb, return err; lan966x_ifh_set_rew_op(ifh, LAN966X_SKB_CB(skb)->rew_op); + lan966x_ifh_set_oam_type(ifh, LAN966X_SKB_CB(skb)->pdu_type); lan966x_ifh_set_timestamp(ifh, LAN966X_SKB_CB(skb)->ts_id); } @@ -874,6 +880,7 @@ static int lan966x_probe_port(struct lan966x *lan966x, u32 p, lan966x_vlan_port_set_vlan_aware(port, 0); lan966x_vlan_port_set_vid(port, HOST_PVID, false, false); lan966x_vlan_port_apply(port); + lan966x_vlan_port_rew_host(port); return 0; } diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_main.h b/drivers/net/ethernet/microchip/lan966x/lan966x_main.h index 25cb2f61986f6..8aa39497818fe 100644 --- a/drivers/net/ethernet/microchip/lan966x/lan966x_main.h +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_main.h @@ -75,6 +75,10 @@ #define IFH_REW_OP_ONE_STEP_PTP 0x3 #define IFH_REW_OP_TWO_STEP_PTP 0x4 +#define IFH_PDU_TYPE_NONE 0 +#define IFH_PDU_TYPE_IPV4 7 +#define IFH_PDU_TYPE_IPV6 8 + #define FDMA_RX_DCB_MAX_DBS 1 #define FDMA_TX_DCB_MAX_DBS 1 @@ -254,6 +258,7 @@ struct lan966x_phc { struct lan966x_skb_cb { u8 rew_op; + u8 pdu_type; u16 ts_id; unsigned long jiffies; }; @@ -492,6 +497,7 @@ void lan966x_vlan_port_apply(struct lan966x_port *port); bool lan966x_vlan_cpu_member_cpu_vlan_mask(struct lan966x *lan966x, u16 vid); void lan966x_vlan_port_set_vlan_aware(struct lan966x_port *port, bool vlan_aware); +void lan966x_vlan_port_rew_host(struct lan966x_port *port); int lan966x_vlan_port_set_vid(struct lan966x_port *port, u16 vid, bool pvid, diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c b/drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c index 63905bb5a63a8..87e5e81d40dc6 100644 --- a/drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c @@ -322,34 +322,55 @@ void lan966x_ptp_hwtstamp_get(struct lan966x_port *port, *cfg = phc->hwtstamp_config; } -static int lan966x_ptp_classify(struct lan966x_port *port, struct sk_buff *skb) +static void lan966x_ptp_classify(struct lan966x_port *port, struct sk_buff *skb, + u8 *rew_op, u8 *pdu_type) { struct ptp_header *header; u8 msgtype; int type; - if (port->ptp_tx_cmd == IFH_REW_OP_NOOP) - return IFH_REW_OP_NOOP; + if (port->ptp_tx_cmd == IFH_REW_OP_NOOP) { + *rew_op = IFH_REW_OP_NOOP; + *pdu_type = IFH_PDU_TYPE_NONE; + return; + } type = ptp_classify_raw(skb); - if (type == PTP_CLASS_NONE) - return IFH_REW_OP_NOOP; + if (type == PTP_CLASS_NONE) { + *rew_op = IFH_REW_OP_NOOP; + *pdu_type = IFH_PDU_TYPE_NONE; + return; + } header = ptp_parse_header(skb, type); - if (!header) - return IFH_REW_OP_NOOP; + if (!header) { + *rew_op = IFH_REW_OP_NOOP; + *pdu_type = IFH_PDU_TYPE_NONE; + return; + } - if (port->ptp_tx_cmd == IFH_REW_OP_TWO_STEP_PTP) - return IFH_REW_OP_TWO_STEP_PTP; + if (type & PTP_CLASS_L2) + *pdu_type = IFH_PDU_TYPE_NONE; + if (type & PTP_CLASS_IPV4) + *pdu_type = IFH_PDU_TYPE_IPV4; + if (type & PTP_CLASS_IPV6) + *pdu_type = IFH_PDU_TYPE_IPV6; + + if (port->ptp_tx_cmd == IFH_REW_OP_TWO_STEP_PTP) { + *rew_op = IFH_REW_OP_TWO_STEP_PTP; + return; + } /* If it is sync and run 1 step then set the correct operation, * otherwise run as 2 step */ msgtype = ptp_get_msgtype(header, type); - if ((msgtype & 0xf) == 0) - return IFH_REW_OP_ONE_STEP_PTP; + if ((msgtype & 0xf) == 0) { + *rew_op = IFH_REW_OP_ONE_STEP_PTP; + return; + } - return IFH_REW_OP_TWO_STEP_PTP; + *rew_op = IFH_REW_OP_TWO_STEP_PTP; } static void lan966x_ptp_txtstamp_old_release(struct lan966x_port *port) @@ -374,10 +395,12 @@ int lan966x_ptp_txtstamp_request(struct lan966x_port *port, { struct lan966x *lan966x = port->lan966x; unsigned long flags; + u8 pdu_type; u8 rew_op; - rew_op = lan966x_ptp_classify(port, skb); + lan966x_ptp_classify(port, skb, &rew_op, &pdu_type); LAN966X_SKB_CB(skb)->rew_op = rew_op; + LAN966X_SKB_CB(skb)->pdu_type = pdu_type; if (rew_op != IFH_REW_OP_TWO_STEP_PTP) return 0; diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_switchdev.c b/drivers/net/ethernet/microchip/lan966x/lan966x_switchdev.c index 1c88120eb291a..bcb4db76b75cd 100644 --- a/drivers/net/ethernet/microchip/lan966x/lan966x_switchdev.c +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_switchdev.c @@ -297,6 +297,7 @@ static void lan966x_port_bridge_leave(struct lan966x_port *port, lan966x_vlan_port_set_vlan_aware(port, false); lan966x_vlan_port_set_vid(port, HOST_PVID, false, false); lan966x_vlan_port_apply(port); + lan966x_vlan_port_rew_host(port); } int lan966x_port_changeupper(struct net_device *dev, diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_vlan.c b/drivers/net/ethernet/microchip/lan966x/lan966x_vlan.c index fa34a739c748e..7da22520724ce 100644 --- a/drivers/net/ethernet/microchip/lan966x/lan966x_vlan.c +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_vlan.c @@ -149,6 +149,27 @@ void lan966x_vlan_port_set_vlan_aware(struct lan966x_port *port, port->vlan_aware = vlan_aware; } +/* When the interface is in host mode, the interface should not be vlan aware + * but it should insert all the tags that it gets from the network stack. + * The tags are not in the data of the frame but actually in the skb and the ifh + * is configured already to get this tag. So what we need to do is to update the + * rewriter to insert the vlan tag for all frames which have a vlan tag + * different than 0. + */ +void lan966x_vlan_port_rew_host(struct lan966x_port *port) +{ + struct lan966x *lan966x = port->lan966x; + u32 val; + + /* Tag all frames except when VID=0*/ + val = REW_TAG_CFG_TAG_CFG_SET(2); + + /* Update only some bits in the register */ + lan_rmw(val, + REW_TAG_CFG_TAG_CFG, + lan966x, REW_TAG_CFG(port->chip_port)); +} + void lan966x_vlan_port_apply(struct lan966x_port *port) { struct lan966x *lan966x = port->lan966x; diff --git a/drivers/net/ethernet/microsoft/mana/gdma_main.c b/drivers/net/ethernet/microsoft/mana/gdma_main.c index 9bac4083d8a09..876de6db63c4f 100644 --- a/drivers/net/ethernet/microsoft/mana/gdma_main.c +++ b/drivers/net/ethernet/microsoft/mana/gdma_main.c @@ -28,6 +28,9 @@ static void mana_gd_init_pf_regs(struct pci_dev *pdev) gc->db_page_base = gc->bar0_va + mana_gd_r64(gc, GDMA_PF_REG_DB_PAGE_OFF); + gc->phys_db_page_base = gc->bar0_pa + + mana_gd_r64(gc, GDMA_PF_REG_DB_PAGE_OFF); + sriov_base_off = mana_gd_r64(gc, GDMA_SRIOV_REG_CFG_BASE_OFF); sriov_base_va = gc->bar0_va + sriov_base_off; diff --git a/drivers/net/ethernet/pensando/ionic/ionic_main.c b/drivers/net/ethernet/pensando/ionic/ionic_main.c index 0f817c3f92d82..533df5993048f 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_main.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_main.c @@ -515,9 +515,9 @@ static int __ionic_dev_cmd_wait(struct ionic *ionic, unsigned long max_seconds, unsigned long start_time; unsigned long max_wait; unsigned long duration; - int done = 0; bool fw_up; int opcode; + bool done; int err; /* Wait for dev cmd to complete, retrying if we get EAGAIN, @@ -525,6 +525,7 @@ static int __ionic_dev_cmd_wait(struct ionic *ionic, unsigned long max_seconds, */ max_wait = jiffies + (max_seconds * HZ); try_again: + done = false; opcode = idev->opcode; start_time = jiffies; for (fw_up = ionic_is_fw_running(idev); diff --git a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c index 0eeda7e502db2..0f5758c273c22 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c @@ -321,7 +321,7 @@ static int ionic_xdp_post_frame(struct ionic_queue *q, struct xdp_frame *frame, len, DMA_TO_DEVICE); } else /* XDP_REDIRECT */ { dma_addr = ionic_tx_map_single(q, frame->data, len); - if (!dma_addr) + if (dma_addr == DMA_MAPPING_ERROR) return -EIO; } @@ -357,7 +357,7 @@ static int ionic_xdp_post_frame(struct ionic_queue *q, struct xdp_frame *frame, } else { dma_addr = ionic_tx_map_frag(q, frag, 0, skb_frag_size(frag)); - if (dma_mapping_error(q->dev, dma_addr)) { + if (dma_addr == DMA_MAPPING_ERROR) { ionic_tx_desc_unmap_bufs(q, desc_info); return -EIO; } @@ -1083,7 +1083,7 @@ static dma_addr_t ionic_tx_map_single(struct ionic_queue *q, net_warn_ratelimited("%s: DMA single map failed on %s!\n", dev_name(dev), q->name); q_to_tx_stats(q)->dma_map_err++; - return 0; + return DMA_MAPPING_ERROR; } return dma_addr; } @@ -1100,7 +1100,7 @@ static dma_addr_t ionic_tx_map_frag(struct ionic_queue *q, net_warn_ratelimited("%s: DMA frag map failed on %s!\n", dev_name(dev), q->name); q_to_tx_stats(q)->dma_map_err++; - return 0; + return DMA_MAPPING_ERROR; } return dma_addr; } @@ -1116,7 +1116,7 @@ static int ionic_tx_map_skb(struct ionic_queue *q, struct sk_buff *skb, int frag_idx; dma_addr = ionic_tx_map_single(q, skb->data, skb_headlen(skb)); - if (!dma_addr) + if (dma_addr == DMA_MAPPING_ERROR) return -EIO; buf_info->dma_addr = dma_addr; buf_info->len = skb_headlen(skb); @@ -1126,7 +1126,7 @@ static int ionic_tx_map_skb(struct ionic_queue *q, struct sk_buff *skb, nfrags = skb_shinfo(skb)->nr_frags; for (frag_idx = 0; frag_idx < nfrags; frag_idx++, frag++) { dma_addr = ionic_tx_map_frag(q, frag, 0, skb_frag_size(frag)); - if (!dma_addr) + if (dma_addr == DMA_MAPPING_ERROR) goto dma_fail; buf_info->dma_addr = dma_addr; buf_info->len = skb_frag_size(frag); diff --git a/drivers/net/ethernet/realtek/r8169.h b/drivers/net/ethernet/realtek/r8169.h index e2db944e6fa8b..be4c9622618d8 100644 --- a/drivers/net/ethernet/realtek/r8169.h +++ b/drivers/net/ethernet/realtek/r8169.h @@ -68,6 +68,7 @@ enum mac_version { /* support for RTL_GIGA_MAC_VER_60 has been removed */ RTL_GIGA_MAC_VER_61, RTL_GIGA_MAC_VER_63, + RTL_GIGA_MAC_VER_64, RTL_GIGA_MAC_VER_65, RTL_GIGA_MAC_VER_66, RTL_GIGA_MAC_NONE diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c index 85bb5121cd245..7b82779e4cd5d 100644 --- a/drivers/net/ethernet/realtek/r8169_main.c +++ b/drivers/net/ethernet/realtek/r8169_main.c @@ -55,6 +55,7 @@ #define FIRMWARE_8107E_2 "rtl_nic/rtl8107e-2.fw" #define FIRMWARE_8125A_3 "rtl_nic/rtl8125a-3.fw" #define FIRMWARE_8125B_2 "rtl_nic/rtl8125b-2.fw" +#define FIRMWARE_8125D_1 "rtl_nic/rtl8125d-1.fw" #define FIRMWARE_8126A_2 "rtl_nic/rtl8126a-2.fw" #define FIRMWARE_8126A_3 "rtl_nic/rtl8126a-3.fw" @@ -138,6 +139,7 @@ static const struct { [RTL_GIGA_MAC_VER_61] = {"RTL8125A", FIRMWARE_8125A_3}, /* reserve 62 for CFG_METHOD_4 in the vendor driver */ [RTL_GIGA_MAC_VER_63] = {"RTL8125B", FIRMWARE_8125B_2}, + [RTL_GIGA_MAC_VER_64] = {"RTL8125D", FIRMWARE_8125D_1}, [RTL_GIGA_MAC_VER_65] = {"RTL8126A", FIRMWARE_8126A_2}, [RTL_GIGA_MAC_VER_66] = {"RTL8126A", FIRMWARE_8126A_3}, }; @@ -707,6 +709,7 @@ MODULE_FIRMWARE(FIRMWARE_8168FP_3); MODULE_FIRMWARE(FIRMWARE_8107E_2); MODULE_FIRMWARE(FIRMWARE_8125A_3); MODULE_FIRMWARE(FIRMWARE_8125B_2); +MODULE_FIRMWARE(FIRMWARE_8125D_1); MODULE_FIRMWARE(FIRMWARE_8126A_2); MODULE_FIRMWARE(FIRMWARE_8126A_3); @@ -2098,10 +2101,7 @@ static void rtl_set_eee_txidle_timer(struct rtl8169_private *tp) tp->tx_lpi_timer = timer_val; r8168_mac_ocp_write(tp, 0xe048, timer_val); break; - case RTL_GIGA_MAC_VER_61: - case RTL_GIGA_MAC_VER_63: - case RTL_GIGA_MAC_VER_65: - case RTL_GIGA_MAC_VER_66: + case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_66: tp->tx_lpi_timer = timer_val; RTL_W16(tp, EEE_TXIDLE_TIMER_8125, timer_val); break; @@ -2233,6 +2233,9 @@ static enum mac_version rtl8169_get_mac_version(u16 xid, bool gmii) { 0x7cf, 0x64a, RTL_GIGA_MAC_VER_66 }, { 0x7cf, 0x649, RTL_GIGA_MAC_VER_65 }, + /* 8125D family. */ + { 0x7cf, 0x688, RTL_GIGA_MAC_VER_64 }, + /* 8125B family. */ { 0x7cf, 0x641, RTL_GIGA_MAC_VER_63 }, @@ -2500,9 +2503,7 @@ static void rtl_init_rxcfg(struct rtl8169_private *tp) case RTL_GIGA_MAC_VER_61: RTL_W32(tp, RxConfig, RX_FETCH_DFLT_8125 | RX_DMA_BURST); break; - case RTL_GIGA_MAC_VER_63: - case RTL_GIGA_MAC_VER_65: - case RTL_GIGA_MAC_VER_66: + case RTL_GIGA_MAC_VER_63 ... RTL_GIGA_MAC_VER_66: RTL_W32(tp, RxConfig, RX_FETCH_DFLT_8125 | RX_DMA_BURST | RX_PAUSE_SLOT_ON); break; @@ -3840,6 +3841,12 @@ static void rtl_hw_start_8125b(struct rtl8169_private *tp) rtl_hw_start_8125_common(tp); } +static void rtl_hw_start_8125d(struct rtl8169_private *tp) +{ + rtl_set_def_aspm_entry_latency(tp); + rtl_hw_start_8125_common(tp); +} + static void rtl_hw_start_8126a(struct rtl8169_private *tp) { rtl_disable_zrxdc_timeout(tp); @@ -3889,6 +3896,7 @@ static void rtl_hw_config(struct rtl8169_private *tp) [RTL_GIGA_MAC_VER_53] = rtl_hw_start_8117, [RTL_GIGA_MAC_VER_61] = rtl_hw_start_8125a_2, [RTL_GIGA_MAC_VER_63] = rtl_hw_start_8125b, + [RTL_GIGA_MAC_VER_64] = rtl_hw_start_8125d, [RTL_GIGA_MAC_VER_65] = rtl_hw_start_8126a, [RTL_GIGA_MAC_VER_66] = rtl_hw_start_8126a, }; @@ -3906,6 +3914,7 @@ static void rtl_hw_start_8125(struct rtl8169_private *tp) /* disable interrupt coalescing */ switch (tp->mac_version) { case RTL_GIGA_MAC_VER_61: + case RTL_GIGA_MAC_VER_64: for (i = 0xa00; i < 0xb00; i += 4) RTL_W32(tp, i, 0); break; diff --git a/drivers/net/ethernet/realtek/r8169_phy_config.c b/drivers/net/ethernet/realtek/r8169_phy_config.c index cf29b12084826..d09b2a41cd062 100644 --- a/drivers/net/ethernet/realtek/r8169_phy_config.c +++ b/drivers/net/ethernet/realtek/r8169_phy_config.c @@ -1104,6 +1104,15 @@ static void rtl8125b_hw_phy_config(struct rtl8169_private *tp, rtl8125b_config_eee_phy(phydev); } +static void rtl8125d_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + r8169_apply_firmware(tp); + rtl8125_legacy_force_mode(phydev); + rtl8168g_disable_aldps(phydev); + rtl8125b_config_eee_phy(phydev); +} + static void rtl8126a_hw_phy_config(struct rtl8169_private *tp, struct phy_device *phydev) { @@ -1160,6 +1169,7 @@ void r8169_hw_phy_config(struct rtl8169_private *tp, struct phy_device *phydev, [RTL_GIGA_MAC_VER_53] = rtl8117_hw_phy_config, [RTL_GIGA_MAC_VER_61] = rtl8125a_2_hw_phy_config, [RTL_GIGA_MAC_VER_63] = rtl8125b_hw_phy_config, + [RTL_GIGA_MAC_VER_64] = rtl8125d_hw_phy_config, [RTL_GIGA_MAC_VER_65] = rtl8126a_hw_phy_config, [RTL_GIGA_MAC_VER_66] = rtl8126a_hw_phy_config, }; diff --git a/drivers/net/ethernet/renesas/rtsn.c b/drivers/net/ethernet/renesas/rtsn.c index 6b3f7fca8d157..05c4b6c8c9c3d 100644 --- a/drivers/net/ethernet/renesas/rtsn.c +++ b/drivers/net/ethernet/renesas/rtsn.c @@ -1259,7 +1259,12 @@ static int rtsn_probe(struct platform_device *pdev) priv = netdev_priv(ndev); priv->pdev = pdev; priv->ndev = ndev; + priv->ptp_priv = rcar_gen4_ptp_alloc(pdev); + if (!priv->ptp_priv) { + ret = -ENOMEM; + goto error_free; + } spin_lock_init(&priv->lock); platform_set_drvdata(pdev, priv); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c index 7840bc403788e..5dcc95bc0ad28 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c @@ -364,19 +364,17 @@ static int dwxgmac2_dma_interrupt(struct stmmac_priv *priv, } /* TX/RX NORMAL interrupts */ - if (likely(intr_status & XGMAC_NIS)) { - if (likely(intr_status & XGMAC_RI)) { - u64_stats_update_begin(&stats->syncp); - u64_stats_inc(&stats->rx_normal_irq_n[chan]); - u64_stats_update_end(&stats->syncp); - ret |= handle_rx; - } - if (likely(intr_status & (XGMAC_TI | XGMAC_TBU))) { - u64_stats_update_begin(&stats->syncp); - u64_stats_inc(&stats->tx_normal_irq_n[chan]); - u64_stats_update_end(&stats->syncp); - ret |= handle_tx; - } + if (likely(intr_status & XGMAC_RI)) { + u64_stats_update_begin(&stats->syncp); + u64_stats_inc(&stats->rx_normal_irq_n[chan]); + u64_stats_update_end(&stats->syncp); + ret |= handle_rx; + } + if (likely(intr_status & (XGMAC_TI | XGMAC_TBU))) { + u64_stats_update_begin(&stats->syncp); + u64_stats_inc(&stats->tx_normal_irq_n[chan]); + u64_stats_update_end(&stats->syncp); + ret |= handle_tx; } /* Clear interrupts */ diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_est.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_est.c index c9693f77e1f61..ac6f2e3a3fcd2 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_est.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_est.c @@ -32,6 +32,11 @@ static int est_configure(struct stmmac_priv *priv, struct stmmac_est *cfg, int i, ret = 0; u32 ctrl; + if (!ptp_rate) { + netdev_warn(priv->dev, "Invalid PTP rate"); + return -EINVAL; + } + ret |= est_write(est_addr, EST_BTR_LOW, cfg->btr[0], false); ret |= est_write(est_addr, EST_BTR_HIGH, cfg->btr[1], false); ret |= est_write(est_addr, EST_TER, cfg->ter, false); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 918d7f2e8ba99..36328298dc9b8 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -835,6 +835,11 @@ int stmmac_init_tstamp_counter(struct stmmac_priv *priv, u32 systime_flags) if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp)) return -EOPNOTSUPP; + if (!priv->plat->clk_ptp_rate) { + netdev_err(priv->dev, "Invalid PTP clock rate"); + return -EINVAL; + } + stmmac_config_hw_tstamping(priv, priv->ptpaddr, systime_flags); priv->systime_flags = systime_flags; @@ -3598,7 +3603,6 @@ static int stmmac_request_irq_multi_msi(struct net_device *dev) { struct stmmac_priv *priv = netdev_priv(dev); enum request_irq_err irq_err; - cpumask_t cpu_mask; int irq_idx = 0; char *int_name; int ret; @@ -3727,9 +3731,8 @@ static int stmmac_request_irq_multi_msi(struct net_device *dev) irq_idx = i; goto irq_error; } - cpumask_clear(&cpu_mask); - cpumask_set_cpu(i % num_online_cpus(), &cpu_mask); - irq_set_affinity_hint(priv->rx_irq[i], &cpu_mask); + irq_set_affinity_hint(priv->rx_irq[i], + cpumask_of(i % num_online_cpus())); } /* Request Tx MSI irq */ @@ -3752,9 +3755,8 @@ static int stmmac_request_irq_multi_msi(struct net_device *dev) irq_idx = i; goto irq_error; } - cpumask_clear(&cpu_mask); - cpumask_set_cpu(i % num_online_cpus(), &cpu_mask); - irq_set_affinity_hint(priv->tx_irq[i], &cpu_mask); + irq_set_affinity_hint(priv->tx_irq[i], + cpumask_of(i % num_online_cpus())); } return 0; @@ -4419,8 +4421,6 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) if (priv->sarc_type) stmmac_set_desc_sarc(priv, first, priv->sarc_type); - skb_tx_timestamp(skb); - if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)) { /* declare that device is doing timestamping */ @@ -4455,6 +4455,7 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) } netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len); + skb_tx_timestamp(skb); stmmac_flush_tx_descriptors(priv, queue); stmmac_tx_timer_arm(priv, queue); @@ -4698,8 +4699,6 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) if (priv->sarc_type) stmmac_set_desc_sarc(priv, first, priv->sarc_type); - skb_tx_timestamp(skb); - /* Ready to fill the first descriptor and set the OWN bit w/o any * problems because all the descriptors are actually ready to be * passed to the DMA engine. @@ -4746,7 +4745,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len); stmmac_enable_dma_transmission(priv, priv->ioaddr, queue); - + skb_tx_timestamp(skb); stmmac_flush_tx_descriptors(priv, queue); stmmac_tx_timer_arm(priv, queue); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c index aaf008bdbbcd4..8fd868b671a26 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c @@ -419,6 +419,7 @@ stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac) struct device_node *np = pdev->dev.of_node; struct plat_stmmacenet_data *plat; struct stmmac_dma_cfg *dma_cfg; + static int bus_id = -ENODEV; int phy_mode; void *ret; int rc; @@ -454,8 +455,14 @@ stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac) of_property_read_u32(np, "max-speed", &plat->max_speed); plat->bus_id = of_alias_get_id(np, "ethernet"); - if (plat->bus_id < 0) - plat->bus_id = 0; + if (plat->bus_id < 0) { + if (bus_id < 0) + bus_id = of_alias_get_highest_id("ethernet"); + /* No ethernet alias found, init at -1 so first bus_id is 0 */ + if (bus_id < 0) + bus_id = -1; + plat->bus_id = ++bus_id; + } /* Default to phy auto-detection */ plat->phy_addr = -1; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c index a6b1de9a251dd..5c85040a1b937 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c @@ -303,7 +303,7 @@ void stmmac_ptp_register(struct stmmac_priv *priv) /* Calculate the clock domain crossing (CDC) error if necessary */ priv->plat->cdc_error_adj = 0; - if (priv->plat->has_gmac4 && priv->plat->clk_ptp_rate) + if (priv->plat->has_gmac4) priv->plat->cdc_error_adj = (2 * NSEC_PER_SEC) / priv->plat->clk_ptp_rate; stmmac_ptp_clock_ops.n_per_out = priv->dma_cap.pps_out_num; diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c index f5449b73b9a76..1e4cf89bd79ad 100644 --- a/drivers/net/ethernet/sun/niu.c +++ b/drivers/net/ethernet/sun/niu.c @@ -3336,7 +3336,7 @@ static int niu_rbr_add_page(struct niu *np, struct rx_ring_info *rp, addr = np->ops->map_page(np->device, page, 0, PAGE_SIZE, DMA_FROM_DEVICE); - if (!addr) { + if (np->ops->mapping_error(np->device, addr)) { __free_page(page); return -ENOMEM; } @@ -6672,6 +6672,8 @@ static netdev_tx_t niu_start_xmit(struct sk_buff *skb, len = skb_headlen(skb); mapping = np->ops->map_single(np->device, skb->data, len, DMA_TO_DEVICE); + if (np->ops->mapping_error(np->device, mapping)) + goto out_drop; prod = rp->prod; @@ -6713,6 +6715,8 @@ static netdev_tx_t niu_start_xmit(struct sk_buff *skb, mapping = np->ops->map_page(np->device, skb_frag_page(frag), skb_frag_off(frag), len, DMA_TO_DEVICE); + if (np->ops->mapping_error(np->device, mapping)) + goto out_unmap; rp->tx_buffs[prod].skb = NULL; rp->tx_buffs[prod].mapping = mapping; @@ -6737,6 +6741,19 @@ static netdev_tx_t niu_start_xmit(struct sk_buff *skb, out: return NETDEV_TX_OK; +out_unmap: + while (i--) { + const skb_frag_t *frag; + + prod = PREVIOUS_TX(rp, prod); + frag = &skb_shinfo(skb)->frags[i]; + np->ops->unmap_page(np->device, rp->tx_buffs[prod].mapping, + skb_frag_size(frag), DMA_TO_DEVICE); + } + + np->ops->unmap_single(np->device, rp->tx_buffs[rp->prod].mapping, + skb_headlen(skb), DMA_TO_DEVICE); + out_drop: rp->tx_errors++; kfree_skb(skb); @@ -9638,6 +9655,11 @@ static void niu_pci_unmap_single(struct device *dev, u64 dma_address, dma_unmap_single(dev, dma_address, size, direction); } +static int niu_pci_mapping_error(struct device *dev, u64 addr) +{ + return dma_mapping_error(dev, addr); +} + static const struct niu_ops niu_pci_ops = { .alloc_coherent = niu_pci_alloc_coherent, .free_coherent = niu_pci_free_coherent, @@ -9645,6 +9667,7 @@ static const struct niu_ops niu_pci_ops = { .unmap_page = niu_pci_unmap_page, .map_single = niu_pci_map_single, .unmap_single = niu_pci_unmap_single, + .mapping_error = niu_pci_mapping_error, }; static void niu_driver_version(void) @@ -10011,6 +10034,11 @@ static void niu_phys_unmap_single(struct device *dev, u64 dma_address, /* Nothing to do. */ } +static int niu_phys_mapping_error(struct device *dev, u64 dma_address) +{ + return false; +} + static const struct niu_ops niu_phys_ops = { .alloc_coherent = niu_phys_alloc_coherent, .free_coherent = niu_phys_free_coherent, @@ -10018,6 +10046,7 @@ static const struct niu_ops niu_phys_ops = { .unmap_page = niu_phys_unmap_page, .map_single = niu_phys_map_single, .unmap_single = niu_phys_unmap_single, + .mapping_error = niu_phys_mapping_error, }; static int niu_of_probe(struct platform_device *op) diff --git a/drivers/net/ethernet/sun/niu.h b/drivers/net/ethernet/sun/niu.h index 04c215f91fc08..0b169c08b0f2d 100644 --- a/drivers/net/ethernet/sun/niu.h +++ b/drivers/net/ethernet/sun/niu.h @@ -2879,6 +2879,9 @@ struct tx_ring_info { #define NEXT_TX(tp, index) \ (((index) + 1) < (tp)->pending ? ((index) + 1) : 0) +#define PREVIOUS_TX(tp, index) \ + (((index) - 1) >= 0 ? ((index) - 1) : (((tp)->pending) - 1)) + static inline u32 niu_tx_avail(struct tx_ring_info *tp) { return (tp->pending - @@ -3140,6 +3143,7 @@ struct niu_ops { enum dma_data_direction direction); void (*unmap_single)(struct device *dev, u64 dma_address, size_t size, enum dma_data_direction direction); + int (*mapping_error)(struct device *dev, u64 dma_address); }; struct niu_link_config { diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c index a21e7c0afbfdc..6b5cff087686e 100644 --- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c +++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c @@ -612,8 +612,6 @@ static struct sk_buff *am65_cpsw_build_skb(void *page_addr, { struct sk_buff *skb; - len += AM65_CPSW_HEADROOM; - skb = build_skb(page_addr, len); if (unlikely(!skb)) return NULL; @@ -1217,7 +1215,7 @@ static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_rx_flow *flow, } skb = am65_cpsw_build_skb(page_addr, ndev, - AM65_CPSW_MAX_PACKET_SIZE, headroom); + PAGE_SIZE, headroom); if (unlikely(!skb)) { new_page = page; goto requeue; @@ -2693,13 +2691,15 @@ static int am65_cpsw_nuss_init_slave_ports(struct am65_cpsw_common *common) goto of_node_put; ret = of_get_mac_address(port_np, port->slave.mac_addr); - if (ret) { + if (ret == -EPROBE_DEFER) { + goto of_node_put; + } else if (ret) { am65_cpsw_am654_get_efuse_macid(port_np, port->port_id, port->slave.mac_addr); if (!is_valid_ether_addr(port->slave.mac_addr)) { eth_random_addr(port->slave.mac_addr); - dev_err(dev, "Use random MAC address\n"); + dev_info(dev, "Use random MAC address\n"); } } @@ -3586,6 +3586,16 @@ static int am65_cpsw_nuss_probe(struct platform_device *pdev) return ret; } + am65_cpsw_nuss_get_ver(common); + + ret = am65_cpsw_nuss_init_host_p(common); + if (ret) + goto err_pm_clear; + + ret = am65_cpsw_nuss_init_slave_ports(common); + if (ret) + goto err_pm_clear; + node = of_get_child_by_name(dev->of_node, "mdio"); if (!node) { dev_warn(dev, "MDIO node not found\n"); @@ -3602,16 +3612,6 @@ static int am65_cpsw_nuss_probe(struct platform_device *pdev) } of_node_put(node); - am65_cpsw_nuss_get_ver(common); - - ret = am65_cpsw_nuss_init_host_p(common); - if (ret) - goto err_of_clear; - - ret = am65_cpsw_nuss_init_slave_ports(common); - if (ret) - goto err_of_clear; - /* init common data */ ale_params.dev = dev; ale_params.ale_ageout = AM65_CPSW_ALE_AGEOUT_DEFAULT; diff --git a/drivers/net/ethernet/ti/icssg/icssg_stats.c b/drivers/net/ethernet/ti/icssg/icssg_stats.c index 6f0edae38ea24..172ae38381b45 100644 --- a/drivers/net/ethernet/ti/icssg/icssg_stats.c +++ b/drivers/net/ethernet/ti/icssg/icssg_stats.c @@ -29,6 +29,14 @@ void emac_update_hardware_stats(struct prueth_emac *emac) spin_lock(&prueth->stats_lock); for (i = 0; i < ARRAY_SIZE(icssg_all_miig_stats); i++) { + /* In MII mode TX lines are swapped inside ICSSG, so read Tx stats + * from slice1 for port0 and slice0 for port1 to get accurate Tx + * stats for a given port + */ + if (emac->phy_if == PHY_INTERFACE_MODE_MII && + icssg_all_miig_stats[i].offset >= ICSSG_TX_PACKET_OFFSET && + icssg_all_miig_stats[i].offset <= ICSSG_TX_BYTE_OFFSET) + base = stats_base[slice ^ 1]; regmap_read(prueth->miig_rt, base + icssg_all_miig_stats[i].offset, &val); diff --git a/drivers/net/ethernet/vertexcom/mse102x.c b/drivers/net/ethernet/vertexcom/mse102x.c index e4d993f313740..545177e84c0eb 100644 --- a/drivers/net/ethernet/vertexcom/mse102x.c +++ b/drivers/net/ethernet/vertexcom/mse102x.c @@ -306,7 +306,7 @@ static void mse102x_dump_packet(const char *msg, int len, const char *data) data, len, true); } -static void mse102x_rx_pkt_spi(struct mse102x_net *mse) +static irqreturn_t mse102x_rx_pkt_spi(struct mse102x_net *mse) { struct sk_buff *skb; unsigned int rxalign; @@ -327,7 +327,7 @@ static void mse102x_rx_pkt_spi(struct mse102x_net *mse) mse102x_tx_cmd_spi(mse, CMD_CTR); ret = mse102x_rx_cmd_spi(mse, (u8 *)&rx); if (ret) - return; + return IRQ_NONE; cmd_resp = be16_to_cpu(rx); if ((cmd_resp & CMD_MASK) != CMD_RTS) { @@ -360,7 +360,7 @@ static void mse102x_rx_pkt_spi(struct mse102x_net *mse) rxalign = ALIGN(rxlen + DET_SOF_LEN + DET_DFT_LEN, 4); skb = netdev_alloc_skb_ip_align(mse->ndev, rxalign); if (!skb) - return; + return IRQ_NONE; /* 2 bytes Start of frame (before ethernet header) * 2 bytes Data frame tail (after ethernet frame) @@ -370,7 +370,7 @@ static void mse102x_rx_pkt_spi(struct mse102x_net *mse) if (mse102x_rx_frame_spi(mse, rxpkt, rxlen, drop)) { mse->ndev->stats.rx_errors++; dev_kfree_skb(skb); - return; + return IRQ_HANDLED; } if (netif_msg_pktdata(mse)) @@ -381,6 +381,8 @@ static void mse102x_rx_pkt_spi(struct mse102x_net *mse) mse->ndev->stats.rx_packets++; mse->ndev->stats.rx_bytes += rxlen; + + return IRQ_HANDLED; } static int mse102x_tx_pkt_spi(struct mse102x_net *mse, struct sk_buff *txb, @@ -512,12 +514,13 @@ static irqreturn_t mse102x_irq(int irq, void *_mse) { struct mse102x_net *mse = _mse; struct mse102x_net_spi *mses = to_mse102x_spi(mse); + irqreturn_t ret; mutex_lock(&mses->lock); - mse102x_rx_pkt_spi(mse); + ret = mse102x_rx_pkt_spi(mse); mutex_unlock(&mses->lock); - return IRQ_HANDLED; + return ret; } static int mse102x_net_open(struct net_device *ndev) diff --git a/drivers/net/ethernet/wangxun/libwx/wx_lib.c b/drivers/net/ethernet/wangxun/libwx/wx_lib.c index 71c891d14fb62..e711797a3a8cf 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_lib.c +++ b/drivers/net/ethernet/wangxun/libwx/wx_lib.c @@ -1336,6 +1336,7 @@ static void wx_tx_csum(struct wx_ring *tx_ring, struct wx_tx_buffer *first, u8 tun_prot = 0; if (skb->ip_summed != CHECKSUM_PARTIAL) { +csum_failed: if (!(first->tx_flags & WX_TX_FLAGS_HW_VLAN) && !(first->tx_flags & WX_TX_FLAGS_CC)) return; @@ -1429,7 +1430,8 @@ static void wx_tx_csum(struct wx_ring *tx_ring, struct wx_tx_buffer *first, WX_TXD_L4LEN_SHIFT; break; default: - break; + skb_checksum_help(skb); + goto csum_failed; } /* update TX checksum flag */ @@ -1583,6 +1585,7 @@ static void wx_set_rss_queues(struct wx *wx) clear_bit(WX_FLAG_FDIR_HASH, wx->flags); + wx->ring_feature[RING_F_FDIR].indices = 1; /* Use Flow Director in addition to RSS to ensure the best * distribution of flows across cores, even when an FDIR flow * isn't matched. @@ -1621,7 +1624,7 @@ static void wx_set_num_queues(struct wx *wx) */ static int wx_acquire_msix_vectors(struct wx *wx) { - struct irq_affinity affd = { .pre_vectors = 1 }; + struct irq_affinity affd = { .post_vectors = 1 }; int nvecs, i; /* We start by asking for one vector per queue pair */ @@ -1658,16 +1661,17 @@ static int wx_acquire_msix_vectors(struct wx *wx) return nvecs; } - wx->msix_entry->entry = 0; - wx->msix_entry->vector = pci_irq_vector(wx->pdev, 0); nvecs -= 1; for (i = 0; i < nvecs; i++) { wx->msix_q_entries[i].entry = i; - wx->msix_q_entries[i].vector = pci_irq_vector(wx->pdev, i + 1); + wx->msix_q_entries[i].vector = pci_irq_vector(wx->pdev, i); } wx->num_q_vectors = nvecs; + wx->msix_entry->entry = nvecs; + wx->msix_entry->vector = pci_irq_vector(wx->pdev, nvecs); + return 0; } @@ -2117,7 +2121,6 @@ static void wx_set_ivar(struct wx *wx, s8 direction, wr32(wx, WX_PX_MISC_IVAR, ivar); } else { /* tx or rx causes */ - msix_vector += 1; /* offset for queue vectors */ msix_vector |= WX_PX_IVAR_ALLOC_VAL; index = ((16 * (queue & 1)) + (8 * direction)); ivar = rd32(wx, WX_PX_IVAR(queue >> 1)); @@ -2148,7 +2151,7 @@ void wx_write_eitr(struct wx_q_vector *q_vector) itr_reg |= WX_PX_ITR_CNT_WDIS; - wr32(wx, WX_PX_ITR(v_idx + 1), itr_reg); + wr32(wx, WX_PX_ITR(v_idx), itr_reg); } /** @@ -2194,9 +2197,9 @@ void wx_configure_vectors(struct wx *wx) wx_write_eitr(q_vector); } - wx_set_ivar(wx, -1, 0, 0); + wx_set_ivar(wx, -1, 0, v_idx); if (pdev->msix_enabled) - wr32(wx, WX_PX_ITR(0), 1950); + wr32(wx, WX_PX_ITR(v_idx), 1950); } EXPORT_SYMBOL(wx_configure_vectors); @@ -2425,7 +2428,7 @@ static int wx_alloc_page_pool(struct wx_ring *rx_ring) struct page_pool_params pp_params = { .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV, .order = 0, - .pool_size = rx_ring->size, + .pool_size = rx_ring->count, .nid = dev_to_node(rx_ring->dev), .dev = rx_ring->dev, .dma_dir = DMA_FROM_DEVICE, diff --git a/drivers/net/ethernet/wangxun/libwx/wx_type.h b/drivers/net/ethernet/wangxun/libwx/wx_type.h index b54bffda027b4..dbac133eacfc5 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_type.h +++ b/drivers/net/ethernet/wangxun/libwx/wx_type.h @@ -1136,7 +1136,7 @@ struct wx { }; #define WX_INTR_ALL (~0ULL) -#define WX_INTR_Q(i) BIT((i) + 1) +#define WX_INTR_Q(i) BIT((i)) /* register operations */ #define wr32(a, reg, value) writel((value), ((a)->hw_addr + (reg))) diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c index 1be2a5cc4a83c..d2fb77f1d876b 100644 --- a/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c +++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c @@ -154,7 +154,7 @@ static void ngbe_irq_enable(struct wx *wx, bool queues) if (queues) wx_intr_enable(wx, NGBE_INTR_ALL); else - wx_intr_enable(wx, NGBE_INTR_MISC); + wx_intr_enable(wx, NGBE_INTR_MISC(wx)); } /** diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_type.h b/drivers/net/ethernet/wangxun/ngbe/ngbe_type.h index f48ed7fc1805a..f4dc4acbedaea 100644 --- a/drivers/net/ethernet/wangxun/ngbe/ngbe_type.h +++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_type.h @@ -80,7 +80,7 @@ NGBE_PX_MISC_IEN_GPIO) #define NGBE_INTR_ALL 0x1FF -#define NGBE_INTR_MISC BIT(0) +#define NGBE_INTR_MISC(A) BIT((A)->num_q_vectors) #define NGBE_PHY_CONFIG(reg_offset) (0x14000 + ((reg_offset) * 4)) #define NGBE_CFG_LAN_SPEED 0x14440 diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_irq.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_irq.c index 0ee73a265545c..76d33c042eee5 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_irq.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_irq.c @@ -21,7 +21,7 @@ void txgbe_irq_enable(struct wx *wx, bool queues) wr32(wx, WX_PX_MISC_IEN, TXGBE_PX_MISC_IEN_MASK); /* unmask interrupt */ - wx_intr_enable(wx, TXGBE_INTR_MISC); + wx_intr_enable(wx, TXGBE_INTR_MISC(wx)); if (queues) wx_intr_enable(wx, TXGBE_INTR_QALL(wx)); } @@ -68,7 +68,6 @@ int txgbe_request_queue_irqs(struct wx *wx) free_irq(wx->msix_q_entries[vector].vector, wx->q_vector[vector]); } - wx_reset_interrupt_capability(wx); return err; } @@ -148,7 +147,7 @@ static irqreturn_t txgbe_misc_irq_thread_fn(int irq, void *data) nhandled++; } - wx_intr_enable(wx, TXGBE_INTR_MISC); + wx_intr_enable(wx, TXGBE_INTR_MISC(wx)); return (nhandled > 0 ? IRQ_HANDLED : IRQ_NONE); } @@ -169,6 +168,7 @@ void txgbe_free_misc_irq(struct txgbe *txgbe) free_irq(txgbe->link_irq, txgbe); free_irq(txgbe->misc.irq, txgbe); txgbe_del_irq_domain(txgbe); + txgbe->wx->misc_irq_domain = false; } int txgbe_setup_misc_irq(struct txgbe *txgbe) diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c index 7e352837184fa..9ede260b85dcb 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c @@ -308,10 +308,14 @@ static int txgbe_open(struct net_device *netdev) wx_configure(wx); - err = txgbe_request_queue_irqs(wx); + err = txgbe_setup_misc_irq(wx->priv); if (err) goto err_free_resources; + err = txgbe_request_queue_irqs(wx); + if (err) + goto err_free_misc_irq; + /* Notify the stack of the actual queue counts. */ err = netif_set_real_num_tx_queues(netdev, wx->num_tx_queues); if (err) @@ -327,6 +331,9 @@ static int txgbe_open(struct net_device *netdev) err_free_irq: wx_free_irq(wx); +err_free_misc_irq: + txgbe_free_misc_irq(wx->priv); + wx_reset_interrupt_capability(wx); err_free_resources: wx_free_resources(wx); err_reset: @@ -365,6 +372,7 @@ static int txgbe_close(struct net_device *netdev) txgbe_down(wx); wx_free_irq(wx); + txgbe_free_misc_irq(wx->priv); wx_free_resources(wx); txgbe_fdir_filter_exit(wx); wx_control_hw(wx, false); @@ -410,7 +418,6 @@ static void txgbe_shutdown(struct pci_dev *pdev) int txgbe_setup_tc(struct net_device *dev, u8 tc) { struct wx *wx = netdev_priv(dev); - struct txgbe *txgbe = wx->priv; /* Hardware has to reinitialize queues and interrupts to * match packet buffer alignment. Unfortunately, the @@ -421,7 +428,6 @@ int txgbe_setup_tc(struct net_device *dev, u8 tc) else txgbe_reset(wx); - txgbe_free_misc_irq(txgbe); wx_clear_interrupt_scheme(wx); if (tc) @@ -430,7 +436,6 @@ int txgbe_setup_tc(struct net_device *dev, u8 tc) netdev_reset_tc(dev); wx_init_interrupt_scheme(wx); - txgbe_setup_misc_irq(txgbe); if (netif_running(dev)) txgbe_open(dev); @@ -677,13 +682,9 @@ static int txgbe_probe(struct pci_dev *pdev, txgbe_init_fdir(txgbe); - err = txgbe_setup_misc_irq(txgbe); - if (err) - goto err_release_hw; - err = txgbe_init_phy(txgbe); if (err) - goto err_free_misc_irq; + goto err_release_hw; err = register_netdev(netdev); if (err) @@ -711,8 +712,6 @@ static int txgbe_probe(struct pci_dev *pdev, err_remove_phy: txgbe_remove_phy(txgbe); -err_free_misc_irq: - txgbe_free_misc_irq(txgbe); err_release_hw: wx_clear_interrupt_scheme(wx); wx_control_hw(wx, false); @@ -746,7 +745,6 @@ static void txgbe_remove(struct pci_dev *pdev) unregister_netdev(netdev); txgbe_remove_phy(txgbe); - txgbe_free_misc_irq(txgbe); wx_free_isb_resources(wx); pci_release_selected_regions(pdev, diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h index 8ea413a7abe9d..5fe415f3f2ca9 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h @@ -264,8 +264,8 @@ struct txgbe_fdir_filter { #define TXGBE_DEFAULT_RX_WORK 128 #endif -#define TXGBE_INTR_MISC BIT(0) -#define TXGBE_INTR_QALL(A) GENMASK((A)->num_q_vectors, 1) +#define TXGBE_INTR_MISC(A) BIT((A)->num_q_vectors) +#define TXGBE_INTR_QALL(A) (TXGBE_INTR_MISC(A) - 1) #define TXGBE_MAX_EITR GENMASK(11, 3) diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c index 1072e2210aed3..6b93418224e7e 100644 --- a/drivers/net/ethernet/xilinx/ll_temac_main.c +++ b/drivers/net/ethernet/xilinx/ll_temac_main.c @@ -1309,7 +1309,7 @@ ll_temac_ethtools_set_ringparam(struct net_device *ndev, if (ering->rx_pending > RX_BD_NUM_MAX || ering->rx_mini_pending || ering->rx_jumbo_pending || - ering->rx_pending > TX_BD_NUM_MAX) + ering->tx_pending > TX_BD_NUM_MAX) return -EINVAL; if (netif_running(ndev)) diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c index fe3438abcd253..2d47b35443af0 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c +++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c @@ -843,7 +843,7 @@ static void axienet_dma_tx_cb(void *data, const struct dmaengine_result *result) dev_consume_skb_any(skbuf_dma->skb); netif_txq_completed_wake(txq, 1, len, CIRC_SPACE(lp->tx_ring_head, lp->tx_ring_tail, TX_BD_NUM_MAX), - 2 * MAX_SKB_FRAGS); + 2); } /** @@ -877,7 +877,7 @@ axienet_start_xmit_dmaengine(struct sk_buff *skb, struct net_device *ndev) dma_dev = lp->tx_chan->device; sg_len = skb_shinfo(skb)->nr_frags + 1; - if (CIRC_SPACE(lp->tx_ring_head, lp->tx_ring_tail, TX_BD_NUM_MAX) <= sg_len) { + if (CIRC_SPACE(lp->tx_ring_head, lp->tx_ring_tail, TX_BD_NUM_MAX) <= 1) { netif_stop_queue(ndev); if (net_ratelimit()) netdev_warn(ndev, "TX ring unexpectedly full\n"); @@ -927,7 +927,7 @@ axienet_start_xmit_dmaengine(struct sk_buff *skb, struct net_device *ndev) txq = skb_get_tx_queue(lp->ndev, skb); netdev_tx_sent_queue(txq, skb->len); netif_txq_maybe_stop(txq, CIRC_SPACE(lp->tx_ring_head, lp->tx_ring_tail, TX_BD_NUM_MAX), - MAX_SKB_FRAGS + 1, 2 * MAX_SKB_FRAGS); + 1, 2); dmaengine_submit(dma_tx_desc); dma_async_issue_pending(lp->tx_chan); diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c index ee21592825738..090a56a5e456a 100644 --- a/drivers/net/macsec.c +++ b/drivers/net/macsec.c @@ -246,15 +246,39 @@ static sci_t make_sci(const u8 *addr, __be16 port) return sci; } -static sci_t macsec_frame_sci(struct macsec_eth_header *hdr, bool sci_present) +static sci_t macsec_active_sci(struct macsec_secy *secy) { - sci_t sci; + struct macsec_rx_sc *rx_sc = rcu_dereference_bh(secy->rx_sc); + + /* Case single RX SC */ + if (rx_sc && !rcu_dereference_bh(rx_sc->next)) + return (rx_sc->active) ? rx_sc->sci : 0; + /* Case no RX SC or multiple */ + else + return 0; +} + +static sci_t macsec_frame_sci(struct macsec_eth_header *hdr, bool sci_present, + struct macsec_rxh_data *rxd) +{ + struct macsec_dev *macsec; + sci_t sci = 0; - if (sci_present) + /* SC = 1 */ + if (sci_present) { memcpy(&sci, hdr->secure_channel_id, sizeof(hdr->secure_channel_id)); - else + /* SC = 0; ES = 0 */ + } else if ((!(hdr->tci_an & (MACSEC_TCI_ES | MACSEC_TCI_SC))) && + (list_is_singular(&rxd->secys))) { + /* Only one SECY should exist on this scenario */ + macsec = list_first_or_null_rcu(&rxd->secys, struct macsec_dev, + secys); + if (macsec) + return macsec_active_sci(&macsec->secy); + } else { sci = make_sci(hdr->eth.h_source, MACSEC_PORT_ES); + } return sci; } @@ -1108,7 +1132,7 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb) struct macsec_rxh_data *rxd; struct macsec_dev *macsec; unsigned int len; - sci_t sci; + sci_t sci = 0; u32 hdr_pn; bool cbit; struct pcpu_rx_sc_stats *rxsc_stats; @@ -1155,11 +1179,14 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb) macsec_skb_cb(skb)->has_sci = !!(hdr->tci_an & MACSEC_TCI_SC); macsec_skb_cb(skb)->assoc_num = hdr->tci_an & MACSEC_AN_MASK; - sci = macsec_frame_sci(hdr, macsec_skb_cb(skb)->has_sci); rcu_read_lock(); rxd = macsec_data_rcu(skb->dev); + sci = macsec_frame_sci(hdr, macsec_skb_cb(skb)->has_sci, rxd); + if (!sci) + goto drop_nosc; + list_for_each_entry_rcu(macsec, &rxd->secys, secys) { struct macsec_rx_sc *sc = find_rx_sc(&macsec->secy, sci); @@ -1282,6 +1309,7 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb) macsec_rxsa_put(rx_sa); drop_nosa: macsec_rxsc_put(rx_sc); +drop_nosc: rcu_read_unlock(); drop_direct: kfree_skb(skb); diff --git a/drivers/net/netdevsim/netdev.c b/drivers/net/netdevsim/netdev.c index 1b29d1d794a20..ee2a7b2f6268d 100644 --- a/drivers/net/netdevsim/netdev.c +++ b/drivers/net/netdevsim/netdev.c @@ -25,6 +25,7 @@ #include #include #include +#include #include "netdevsim.h" @@ -341,6 +342,7 @@ static int nsim_rcv(struct nsim_rq *rq, int budget) break; skb = skb_dequeue(&rq->skb_queue); + skb_mark_napi_id(skb, &rq->napi); netif_receive_skb(skb); } @@ -353,7 +355,8 @@ static int nsim_poll(struct napi_struct *napi, int budget) int done; done = nsim_rcv(rq, budget); - napi_complete(napi); + if (done < budget) + napi_complete_done(napi, done); return done; } diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c index 7e2f10182c0cf..591e8fd33d8ea 100644 --- a/drivers/net/phy/mdio_bus.c +++ b/drivers/net/phy/mdio_bus.c @@ -889,6 +889,9 @@ int __mdiobus_read(struct mii_bus *bus, int addr, u32 regnum) lockdep_assert_held_once(&bus->mdio_lock); + if (addr >= PHY_MAX_ADDR) + return -ENXIO; + if (bus->read) retval = bus->read(bus, addr, regnum); else @@ -918,6 +921,9 @@ int __mdiobus_write(struct mii_bus *bus, int addr, u32 regnum, u16 val) lockdep_assert_held_once(&bus->mdio_lock); + if (addr >= PHY_MAX_ADDR) + return -ENXIO; + if (bus->write) err = bus->write(bus, addr, regnum, val); else @@ -979,6 +985,9 @@ int __mdiobus_c45_read(struct mii_bus *bus, int addr, int devad, u32 regnum) lockdep_assert_held_once(&bus->mdio_lock); + if (addr >= PHY_MAX_ADDR) + return -ENXIO; + if (bus->read_c45) retval = bus->read_c45(bus, addr, devad, regnum); else @@ -1010,6 +1019,9 @@ int __mdiobus_c45_write(struct mii_bus *bus, int addr, int devad, u32 regnum, lockdep_assert_held_once(&bus->mdio_lock); + if (addr >= PHY_MAX_ADDR) + return -ENXIO; + if (bus->write_c45) err = bus->write_c45(bus, addr, devad, regnum, val); else diff --git a/drivers/net/phy/microchip.c b/drivers/net/phy/microchip.c index e3a5961dced9b..ffca1cec4ec99 100644 --- a/drivers/net/phy/microchip.c +++ b/drivers/net/phy/microchip.c @@ -332,7 +332,7 @@ static void lan88xx_link_change_notify(struct phy_device *phydev) * As workaround, set to 10 before setting to 100 * at forced 100 F/H mode. */ - if (!phydev->autoneg && phydev->speed == 100) { + if (phydev->state == PHY_NOLINK && !phydev->autoneg && phydev->speed == 100) { /* disable phy interrupt */ temp = phy_read(phydev, LAN88XX_INT_MASK); temp &= ~LAN88XX_INT_MASK_MDINTPIN_EN_; @@ -486,6 +486,7 @@ static struct phy_driver microchip_phy_driver[] = { .config_init = lan88xx_config_init, .config_aneg = lan88xx_config_aneg, .link_change_notify = lan88xx_link_change_notify, + .soft_reset = genphy_soft_reset, /* Interrupt handling is broken, do not define related * functions to force polling. diff --git a/drivers/net/phy/mscc/mscc_ptp.c b/drivers/net/phy/mscc/mscc_ptp.c index 738a8822fcf01..ce49f3ac6939b 100644 --- a/drivers/net/phy/mscc/mscc_ptp.c +++ b/drivers/net/phy/mscc/mscc_ptp.c @@ -943,7 +943,9 @@ static int vsc85xx_ip1_conf(struct phy_device *phydev, enum ts_blk blk, /* UDP checksum offset in IPv4 packet * according to: https://tools.ietf.org/html/rfc768 */ - val |= IP1_NXT_PROT_UDP_CHKSUM_OFF(26) | IP1_NXT_PROT_UDP_CHKSUM_CLEAR; + val |= IP1_NXT_PROT_UDP_CHKSUM_OFF(26); + if (enable) + val |= IP1_NXT_PROT_UDP_CHKSUM_CLEAR; vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_NXT_PROT_UDP_CHKSUM, val); @@ -1163,18 +1165,24 @@ static void vsc85xx_txtstamp(struct mii_timestamper *mii_ts, container_of(mii_ts, struct vsc8531_private, mii_ts); if (!vsc8531->ptp->configured) - return; + goto out; - if (vsc8531->ptp->tx_type == HWTSTAMP_TX_OFF) { - kfree_skb(skb); - return; - } + if (vsc8531->ptp->tx_type == HWTSTAMP_TX_OFF) + goto out; + + if (vsc8531->ptp->tx_type == HWTSTAMP_TX_ONESTEP_SYNC) + if (ptp_msg_is_sync(skb, type)) + goto out; skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; mutex_lock(&vsc8531->ts_lock); __skb_queue_tail(&vsc8531->ptp->tx_queue, skb); mutex_unlock(&vsc8531->ts_lock); + return; + +out: + kfree_skb(skb); } static bool vsc85xx_rxtstamp(struct mii_timestamper *mii_ts, diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index 8af44224480f1..13dea33d86ffa 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -2010,8 +2010,10 @@ void phy_detach(struct phy_device *phydev) struct module *ndev_owner = NULL; struct mii_bus *bus; - if (phydev->devlink) + if (phydev->devlink) { device_link_del(phydev->devlink); + phydev->devlink = NULL; + } if (phydev->sysfs_links) { if (dev) diff --git a/drivers/net/phy/qcom/at803x.c b/drivers/net/phy/qcom/at803x.c index 105602581a033..ac909ad8a87b4 100644 --- a/drivers/net/phy/qcom/at803x.c +++ b/drivers/net/phy/qcom/at803x.c @@ -26,9 +26,6 @@ #define AT803X_LED_CONTROL 0x18 -#define AT803X_PHY_MMD3_WOL_CTRL 0x8012 -#define AT803X_WOL_EN BIT(5) - #define AT803X_REG_CHIP_CONFIG 0x1f #define AT803X_BT_BX_REG_SEL 0x8000 @@ -866,30 +863,6 @@ static int at8031_config_init(struct phy_device *phydev) return at803x_config_init(phydev); } -static int at8031_set_wol(struct phy_device *phydev, - struct ethtool_wolinfo *wol) -{ - int ret; - - /* First setup MAC address and enable WOL interrupt */ - ret = at803x_set_wol(phydev, wol); - if (ret) - return ret; - - if (wol->wolopts & WAKE_MAGIC) - /* Enable WOL function for 1588 */ - ret = phy_modify_mmd(phydev, MDIO_MMD_PCS, - AT803X_PHY_MMD3_WOL_CTRL, - 0, AT803X_WOL_EN); - else - /* Disable WoL function for 1588 */ - ret = phy_modify_mmd(phydev, MDIO_MMD_PCS, - AT803X_PHY_MMD3_WOL_CTRL, - AT803X_WOL_EN, 0); - - return ret; -} - static int at8031_config_intr(struct phy_device *phydev) { struct at803x_priv *priv = phydev->priv; diff --git a/drivers/net/phy/qcom/qca808x.c b/drivers/net/phy/qcom/qca808x.c index 5048304ccc9e8..c3aad0e6b700a 100644 --- a/drivers/net/phy/qcom/qca808x.c +++ b/drivers/net/phy/qcom/qca808x.c @@ -633,7 +633,7 @@ static struct phy_driver qca808x_driver[] = { .handle_interrupt = at803x_handle_interrupt, .get_tunable = at803x_get_tunable, .set_tunable = at803x_set_tunable, - .set_wol = at803x_set_wol, + .set_wol = at8031_set_wol, .get_wol = at803x_get_wol, .get_features = qca808x_get_features, .config_aneg = qca808x_config_aneg, diff --git a/drivers/net/phy/qcom/qcom-phy-lib.c b/drivers/net/phy/qcom/qcom-phy-lib.c index d28815ef56bbf..af7d0d8e81be5 100644 --- a/drivers/net/phy/qcom/qcom-phy-lib.c +++ b/drivers/net/phy/qcom/qcom-phy-lib.c @@ -115,6 +115,31 @@ int at803x_set_wol(struct phy_device *phydev, } EXPORT_SYMBOL_GPL(at803x_set_wol); +int at8031_set_wol(struct phy_device *phydev, + struct ethtool_wolinfo *wol) +{ + int ret; + + /* First setup MAC address and enable WOL interrupt */ + ret = at803x_set_wol(phydev, wol); + if (ret) + return ret; + + if (wol->wolopts & WAKE_MAGIC) + /* Enable WOL function for 1588 */ + ret = phy_modify_mmd(phydev, MDIO_MMD_PCS, + AT803X_PHY_MMD3_WOL_CTRL, + 0, AT803X_WOL_EN); + else + /* Disable WoL function for 1588 */ + ret = phy_modify_mmd(phydev, MDIO_MMD_PCS, + AT803X_PHY_MMD3_WOL_CTRL, + AT803X_WOL_EN, 0); + + return ret; +} +EXPORT_SYMBOL_GPL(at8031_set_wol); + void at803x_get_wol(struct phy_device *phydev, struct ethtool_wolinfo *wol) { diff --git a/drivers/net/phy/qcom/qcom.h b/drivers/net/phy/qcom/qcom.h index 4bb541728846d..7f7151c8bacaa 100644 --- a/drivers/net/phy/qcom/qcom.h +++ b/drivers/net/phy/qcom/qcom.h @@ -172,6 +172,9 @@ #define AT803X_LOC_MAC_ADDR_16_31_OFFSET 0x804B #define AT803X_LOC_MAC_ADDR_32_47_OFFSET 0x804A +#define AT803X_PHY_MMD3_WOL_CTRL 0x8012 +#define AT803X_WOL_EN BIT(5) + #define AT803X_DEBUG_ADDR 0x1D #define AT803X_DEBUG_DATA 0x1E @@ -215,6 +218,8 @@ int at803x_debug_reg_mask(struct phy_device *phydev, u16 reg, int at803x_debug_reg_write(struct phy_device *phydev, u16 reg, u16 data); int at803x_set_wol(struct phy_device *phydev, struct ethtool_wolinfo *wol); +int at8031_set_wol(struct phy_device *phydev, + struct ethtool_wolinfo *wol); void at803x_get_wol(struct phy_device *phydev, struct ethtool_wolinfo *wol); int at803x_ack_interrupt(struct phy_device *phydev); diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c index 166f6a7283731..8ce5705af69c5 100644 --- a/drivers/net/phy/realtek.c +++ b/drivers/net/phy/realtek.c @@ -92,6 +92,7 @@ #define RTL_GENERIC_PHYID 0x001cc800 #define RTL_8211FVD_PHYID 0x001cc878 +#define RTL_8221B 0x001cc840 #define RTL_8221B_VB_CG 0x001cc849 #define RTL_8221B_VN_CG 0x001cc84a #define RTL_8251B 0x001cc862 @@ -1040,6 +1041,23 @@ static bool rtlgen_supports_2_5gbps(struct phy_device *phydev) return val >= 0 && val & MDIO_PMA_SPEED_2_5G; } +/* On internal PHY's MMD reads over C22 always return 0. + * Check a MMD register which is known to be non-zero. + */ +static bool rtlgen_supports_mmd(struct phy_device *phydev) +{ + int val; + + phy_lock_mdio_bus(phydev); + __phy_write(phydev, MII_MMD_CTRL, MDIO_MMD_PCS); + __phy_write(phydev, MII_MMD_DATA, MDIO_PCS_EEE_ABLE); + __phy_write(phydev, MII_MMD_CTRL, MDIO_MMD_PCS | MII_MMD_CTRL_NOINCR); + val = __phy_read(phydev, MII_MMD_DATA); + phy_unlock_mdio_bus(phydev); + + return val > 0; +} + static int rtlgen_match_phy_device(struct phy_device *phydev) { return phydev->phy_id == RTL_GENERIC_PHYID && @@ -1049,7 +1067,8 @@ static int rtlgen_match_phy_device(struct phy_device *phydev) static int rtl8226_match_phy_device(struct phy_device *phydev) { return phydev->phy_id == RTL_GENERIC_PHYID && - rtlgen_supports_2_5gbps(phydev); + rtlgen_supports_2_5gbps(phydev) && + rtlgen_supports_mmd(phydev); } static int rtlgen_is_c45_match(struct phy_device *phydev, unsigned int id, @@ -1061,6 +1080,11 @@ static int rtlgen_is_c45_match(struct phy_device *phydev, unsigned int id, return !is_c45 && (id == phydev->phy_id); } +static int rtl8221b_match_phy_device(struct phy_device *phydev) +{ + return phydev->phy_id == RTL_8221B && rtlgen_supports_mmd(phydev); +} + static int rtl8221b_vb_cg_c22_match_phy_device(struct phy_device *phydev) { return rtlgen_is_c45_match(phydev, RTL_8221B_VB_CG, false); @@ -1081,9 +1105,22 @@ static int rtl8221b_vn_cg_c45_match_phy_device(struct phy_device *phydev) return rtlgen_is_c45_match(phydev, RTL_8221B_VN_CG, true); } -static int rtl8251b_c22_match_phy_device(struct phy_device *phydev) +static int rtl_internal_nbaset_match_phy_device(struct phy_device *phydev) { - return rtlgen_is_c45_match(phydev, RTL_8251B, false); + if (phydev->is_c45) + return false; + + switch (phydev->phy_id) { + case RTL_GENERIC_PHYID: + case RTL_8221B: + case RTL_8251B: + case 0x001cc841: + break; + default: + return false; + } + + return rtlgen_supports_2_5gbps(phydev) && !rtlgen_supports_mmd(phydev); } static int rtl8251b_c45_match_phy_device(struct phy_device *phydev) @@ -1345,10 +1382,8 @@ static struct phy_driver realtek_drvs[] = { .resume = rtlgen_resume, .read_page = rtl821x_read_page, .write_page = rtl821x_write_page, - .read_mmd = rtl822x_read_mmd, - .write_mmd = rtl822x_write_mmd, }, { - PHY_ID_MATCH_EXACT(0x001cc840), + .match_phy_device = rtl8221b_match_phy_device, .name = "RTL8226B_RTL8221B 2.5Gbps PHY", .get_features = rtl822x_get_features, .config_aneg = rtl822x_config_aneg, @@ -1359,8 +1394,6 @@ static struct phy_driver realtek_drvs[] = { .resume = rtlgen_resume, .read_page = rtl821x_read_page, .write_page = rtl821x_write_page, - .read_mmd = rtl822x_read_mmd, - .write_mmd = rtl822x_write_mmd, }, { PHY_ID_MATCH_EXACT(0x001cc838), .name = "RTL8226-CG 2.5Gbps PHY", @@ -1438,8 +1471,9 @@ static struct phy_driver realtek_drvs[] = { .read_page = rtl821x_read_page, .write_page = rtl821x_write_page, }, { - .match_phy_device = rtl8251b_c22_match_phy_device, - .name = "RTL8126A-internal 5Gbps PHY", + .match_phy_device = rtl_internal_nbaset_match_phy_device, + .name = "Realtek Internal NBASE-T PHY", + .flags = PHY_IS_INTERNAL, .get_features = rtl822x_get_features, .config_aneg = rtl822x_config_aneg, .read_status = rtl822x_read_status, diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c index 150aea7c9c367..6a43f6d6e85cb 100644 --- a/drivers/net/phy/smsc.c +++ b/drivers/net/phy/smsc.c @@ -155,10 +155,29 @@ static int smsc_phy_reset(struct phy_device *phydev) static int lan87xx_config_aneg(struct phy_device *phydev) { - int rc; + u8 mdix_ctrl; int val; + int rc; + + /* When auto-negotiation is disabled (forced mode), the PHY's + * Auto-MDIX will continue toggling the TX/RX pairs. + * + * To establish a stable link, we must select a fixed MDI mode. + * If the user has not specified a fixed MDI mode (i.e., mdix_ctrl is + * 'auto'), we default to ETH_TP_MDI. This choice of a ETH_TP_MDI mode + * mirrors the behavior the hardware would exhibit if the AUTOMDIX_EN + * strap were configured for a fixed MDI connection. + */ + if (phydev->autoneg == AUTONEG_DISABLE) { + if (phydev->mdix_ctrl == ETH_TP_MDI_AUTO) + mdix_ctrl = ETH_TP_MDI; + else + mdix_ctrl = phydev->mdix_ctrl; + } else { + mdix_ctrl = phydev->mdix_ctrl; + } - switch (phydev->mdix_ctrl) { + switch (mdix_ctrl) { case ETH_TP_MDI: val = SPECIAL_CTRL_STS_OVRRD_AMDIX_; break; @@ -167,7 +186,8 @@ static int lan87xx_config_aneg(struct phy_device *phydev) SPECIAL_CTRL_STS_AMDIX_STATE_; break; case ETH_TP_MDI_AUTO: - val = SPECIAL_CTRL_STS_AMDIX_ENABLE_; + val = SPECIAL_CTRL_STS_OVRRD_AMDIX_ | + SPECIAL_CTRL_STS_AMDIX_ENABLE_; break; default: return genphy_config_aneg(phydev); @@ -183,7 +203,7 @@ static int lan87xx_config_aneg(struct phy_device *phydev) rc |= val; phy_write(phydev, SPECIAL_CTRL_STS, rc); - phydev->mdix = phydev->mdix_ctrl; + phydev->mdix = mdix_ctrl; return genphy_config_aneg(phydev); } @@ -261,6 +281,33 @@ int lan87xx_read_status(struct phy_device *phydev) } EXPORT_SYMBOL_GPL(lan87xx_read_status); +static int lan87xx_phy_config_init(struct phy_device *phydev) +{ + int rc; + + /* The LAN87xx PHY's initial MDI-X mode is determined by the AUTOMDIX_EN + * hardware strap, but the driver cannot read the strap's status. This + * creates an unpredictable initial state. + * + * To ensure consistent and reliable behavior across all boards, + * override the strap configuration on initialization and force the PHY + * into a known state with Auto-MDIX enabled, which is the expected + * default for modern hardware. + */ + rc = phy_modify(phydev, SPECIAL_CTRL_STS, + SPECIAL_CTRL_STS_OVRRD_AMDIX_ | + SPECIAL_CTRL_STS_AMDIX_ENABLE_ | + SPECIAL_CTRL_STS_AMDIX_STATE_, + SPECIAL_CTRL_STS_OVRRD_AMDIX_ | + SPECIAL_CTRL_STS_AMDIX_ENABLE_); + if (rc < 0) + return rc; + + phydev->mdix_ctrl = ETH_TP_MDI_AUTO; + + return smsc_phy_config_init(phydev); +} + static int lan874x_phy_config_init(struct phy_device *phydev) { u16 val; @@ -694,7 +741,7 @@ static struct phy_driver smsc_phy_driver[] = { /* basic functions */ .read_status = lan87xx_read_status, - .config_init = smsc_phy_config_init, + .config_init = lan87xx_phy_config_init, .soft_reset = smsc_phy_reset, .config_aneg = lan87xx_config_aneg, diff --git a/drivers/net/usb/aqc111.c b/drivers/net/usb/aqc111.c index ff5be2cbf17b9..9201ee10a13f7 100644 --- a/drivers/net/usb/aqc111.c +++ b/drivers/net/usb/aqc111.c @@ -30,11 +30,14 @@ static int aqc111_read_cmd_nopm(struct usbnet *dev, u8 cmd, u16 value, ret = usbnet_read_cmd_nopm(dev, cmd, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, value, index, data, size); - if (unlikely(ret < 0)) + if (unlikely(ret < size)) { netdev_warn(dev->net, "Failed to read(0x%x) reg index 0x%04x: %d\n", cmd, index, ret); + ret = ret < 0 ? ret : -ENODATA; + } + return ret; } @@ -46,11 +49,14 @@ static int aqc111_read_cmd(struct usbnet *dev, u8 cmd, u16 value, ret = usbnet_read_cmd(dev, cmd, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, value, index, data, size); - if (unlikely(ret < 0)) + if (unlikely(ret < size)) { netdev_warn(dev->net, "Failed to read(0x%x) reg index 0x%04x: %d\n", cmd, index, ret); + ret = ret < 0 ? ret : -ENODATA; + } + return ret; } diff --git a/drivers/net/usb/asix.h b/drivers/net/usb/asix.h index 74162190bccc1..8531b804021aa 100644 --- a/drivers/net/usb/asix.h +++ b/drivers/net/usb/asix.h @@ -224,7 +224,6 @@ int asix_write_rx_ctl(struct usbnet *dev, u16 mode, int in_pm); u16 asix_read_medium_status(struct usbnet *dev, int in_pm); int asix_write_medium_mode(struct usbnet *dev, u16 mode, int in_pm); -void asix_adjust_link(struct net_device *netdev); int asix_write_gpio(struct usbnet *dev, u16 value, int sleep, int in_pm); diff --git a/drivers/net/usb/asix_common.c b/drivers/net/usb/asix_common.c index 72ffc89b477ad..7fd763917ae2c 100644 --- a/drivers/net/usb/asix_common.c +++ b/drivers/net/usb/asix_common.c @@ -414,28 +414,6 @@ int asix_write_medium_mode(struct usbnet *dev, u16 mode, int in_pm) return ret; } -/* set MAC link settings according to information from phylib */ -void asix_adjust_link(struct net_device *netdev) -{ - struct phy_device *phydev = netdev->phydev; - struct usbnet *dev = netdev_priv(netdev); - u16 mode = 0; - - if (phydev->link) { - mode = AX88772_MEDIUM_DEFAULT; - - if (phydev->duplex == DUPLEX_HALF) - mode &= ~AX_MEDIUM_FD; - - if (phydev->speed != SPEED_100) - mode &= ~AX_MEDIUM_PS; - } - - asix_write_medium_mode(dev, mode, 0); - phy_print_status(phydev); - usbnet_link_change(dev, phydev->link, 0); -} - int asix_write_gpio(struct usbnet *dev, u16 value, int sleep, int in_pm) { int ret; diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c index da24941a6e444..9b0318fb50b55 100644 --- a/drivers/net/usb/asix_devices.c +++ b/drivers/net/usb/asix_devices.c @@ -752,7 +752,6 @@ static void ax88772_mac_link_down(struct phylink_config *config, struct usbnet *dev = netdev_priv(to_net_dev(config->dev)); asix_write_medium_mode(dev, 0, 0); - usbnet_link_change(dev, false, false); } static void ax88772_mac_link_up(struct phylink_config *config, @@ -783,7 +782,6 @@ static void ax88772_mac_link_up(struct phylink_config *config, m |= AX_MEDIUM_RFC; asix_write_medium_mode(dev, m, 0); - usbnet_link_change(dev, true, false); } static const struct phylink_mac_ops ax88772_phylink_mac_ops = { @@ -1350,10 +1348,9 @@ static const struct driver_info ax88772_info = { .description = "ASIX AX88772 USB 2.0 Ethernet", .bind = ax88772_bind, .unbind = ax88772_unbind, - .status = asix_status, .reset = ax88772_reset, .stop = ax88772_stop, - .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_LINK_INTR | FLAG_MULTI_PACKET, + .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_MULTI_PACKET, .rx_fixup = asix_rx_fixup_common, .tx_fixup = asix_tx_fixup, }; @@ -1362,11 +1359,9 @@ static const struct driver_info ax88772b_info = { .description = "ASIX AX88772B USB 2.0 Ethernet", .bind = ax88772_bind, .unbind = ax88772_unbind, - .status = asix_status, .reset = ax88772_reset, .stop = ax88772_stop, - .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_LINK_INTR | - FLAG_MULTI_PACKET, + .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_MULTI_PACKET, .rx_fixup = asix_rx_fixup_common, .tx_fixup = asix_tx_fixup, .data = FLAG_EEPROM_MAC, @@ -1376,11 +1371,9 @@ static const struct driver_info lxausb_t1l_info = { .description = "Linux Automation GmbH USB 10Base-T1L", .bind = ax88772_bind, .unbind = ax88772_unbind, - .status = asix_status, .reset = ax88772_reset, .stop = ax88772_stop, - .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_LINK_INTR | - FLAG_MULTI_PACKET, + .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_MULTI_PACKET, .rx_fixup = asix_rx_fixup_common, .tx_fixup = asix_tx_fixup, .data = FLAG_EEPROM_MAC, @@ -1412,10 +1405,8 @@ static const struct driver_info hg20f9_info = { .description = "HG20F9 USB 2.0 Ethernet", .bind = ax88772_bind, .unbind = ax88772_unbind, - .status = asix_status, .reset = ax88772_reset, - .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_LINK_INTR | - FLAG_MULTI_PACKET, + .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_MULTI_PACKET, .rx_fixup = asix_rx_fixup_common, .tx_fixup = asix_tx_fixup, .data = FLAG_EEPROM_MAC, diff --git a/drivers/net/usb/ch9200.c b/drivers/net/usb/ch9200.c index f69d9b902da04..a206ffa76f1b9 100644 --- a/drivers/net/usb/ch9200.c +++ b/drivers/net/usb/ch9200.c @@ -178,6 +178,7 @@ static int ch9200_mdio_read(struct net_device *netdev, int phy_id, int loc) { struct usbnet *dev = netdev_priv(netdev); unsigned char buff[2]; + int ret; netdev_dbg(netdev, "%s phy_id:%02x loc:%02x\n", __func__, phy_id, loc); @@ -185,8 +186,10 @@ static int ch9200_mdio_read(struct net_device *netdev, int phy_id, int loc) if (phy_id != 0) return -ENODEV; - control_read(dev, REQUEST_READ, 0, loc * 2, buff, 0x02, - CONTROL_TIMEOUT_MS); + ret = control_read(dev, REQUEST_READ, 0, loc * 2, buff, 0x02, + CONTROL_TIMEOUT_MS); + if (ret < 0) + return ret; return (buff[0] | buff[1] << 8); } diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c index 531b1b6a37d19..2f8637224b69e 100644 --- a/drivers/net/usb/lan78xx.c +++ b/drivers/net/usb/lan78xx.c @@ -4229,8 +4229,6 @@ static void lan78xx_disconnect(struct usb_interface *intf) if (!dev) return; - netif_napi_del(&dev->napi); - udev = interface_to_usbdev(intf); net = dev->net; diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index 944a33361dae5..7e0608f568353 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -1426,6 +1426,7 @@ static const struct usb_device_id products[] = { {QMI_QUIRK_SET_DTR(0x22de, 0x9051, 2)}, /* Hucom Wireless HM-211S/K */ {QMI_FIXED_INTF(0x22de, 0x9061, 3)}, /* WeTelecom WPD-600N */ {QMI_QUIRK_SET_DTR(0x1e0e, 0x9001, 5)}, /* SIMCom 7100E, 7230E, 7600E ++ */ + {QMI_QUIRK_SET_DTR(0x1e0e, 0x9071, 3)}, /* SIMCom 8230C ++ */ {QMI_QUIRK_SET_DTR(0x2c7c, 0x0121, 4)}, /* Quectel EC21 Mini PCIe */ {QMI_QUIRK_SET_DTR(0x2c7c, 0x0191, 4)}, /* Quectel EG91 */ {QMI_QUIRK_SET_DTR(0x2c7c, 0x0195, 4)}, /* Quectel EG95 */ diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 6d36cb204f9bc..54c5d9a14c672 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -765,6 +765,26 @@ static unsigned int mergeable_ctx_to_truesize(void *mrg_ctx) return (unsigned long)mrg_ctx & ((1 << MRG_CTX_HEADER_SHIFT) - 1); } +static int check_mergeable_len(struct net_device *dev, void *mrg_ctx, + unsigned int len) +{ + unsigned int headroom, tailroom, room, truesize; + + truesize = mergeable_ctx_to_truesize(mrg_ctx); + headroom = mergeable_ctx_to_headroom(mrg_ctx); + tailroom = headroom ? sizeof(struct skb_shared_info) : 0; + room = SKB_DATA_ALIGN(headroom + tailroom); + + if (len > truesize - room) { + pr_debug("%s: rx error: len %u exceeds truesize %lu\n", + dev->name, len, (unsigned long)(truesize - room)); + DEV_STATS_INC(dev, rx_length_errors); + return -1; + } + + return 0; +} + static struct sk_buff *virtnet_build_skb(void *buf, unsigned int buflen, unsigned int headroom, unsigned int len) @@ -1098,15 +1118,29 @@ static void sg_fill_dma(struct scatterlist *sg, dma_addr_t addr, u32 len) sg->length = len; } +/* Note that @len is the length of received data without virtio header */ static struct xdp_buff *buf_to_xdp(struct virtnet_info *vi, - struct receive_queue *rq, void *buf, u32 len) + struct receive_queue *rq, void *buf, + u32 len, bool first_buf) { struct xdp_buff *xdp; u32 bufsize; xdp = (struct xdp_buff *)buf; - bufsize = xsk_pool_get_rx_frame_size(rq->xsk_pool) + vi->hdr_len; + /* In virtnet_add_recvbuf_xsk, we use part of XDP_PACKET_HEADROOM for + * virtio header and ask the vhost to fill data from + * hard_start + XDP_PACKET_HEADROOM - vi->hdr_len + * The first buffer has virtio header so the remaining region for frame + * data is + * xsk_pool_get_rx_frame_size() + * While other buffers than the first one do not have virtio header, so + * the maximum frame data's length can be + * xsk_pool_get_rx_frame_size() + vi->hdr_len + */ + bufsize = xsk_pool_get_rx_frame_size(rq->xsk_pool); + if (!first_buf) + bufsize += vi->hdr_len; if (unlikely(len > bufsize)) { pr_debug("%s: rx error: len %u exceeds truesize %u\n", @@ -1231,7 +1265,7 @@ static int xsk_append_merge_buffer(struct virtnet_info *vi, u64_stats_add(&stats->bytes, len); - xdp = buf_to_xdp(vi, rq, buf, len); + xdp = buf_to_xdp(vi, rq, buf, len, false); if (!xdp) goto err; @@ -1329,7 +1363,7 @@ static void virtnet_receive_xsk_buf(struct virtnet_info *vi, struct receive_queu u64_stats_add(&stats->bytes, len); - xdp = buf_to_xdp(vi, rq, buf, len); + xdp = buf_to_xdp(vi, rq, buf, len, true); if (!xdp) return; @@ -1649,7 +1683,8 @@ static unsigned int virtnet_get_headroom(struct virtnet_info *vi) * across multiple buffers (num_buf > 1), and we make sure buffers * have enough headroom. */ -static struct page *xdp_linearize_page(struct receive_queue *rq, +static struct page *xdp_linearize_page(struct net_device *dev, + struct receive_queue *rq, int *num_buf, struct page *p, int offset, @@ -1669,18 +1704,27 @@ static struct page *xdp_linearize_page(struct receive_queue *rq, memcpy(page_address(page) + page_off, page_address(p) + offset, *len); page_off += *len; + /* Only mergeable mode can go inside this while loop. In small mode, + * *num_buf == 1, so it cannot go inside. + */ while (--*num_buf) { unsigned int buflen; void *buf; + void *ctx; int off; - buf = virtnet_rq_get_buf(rq, &buflen, NULL); + buf = virtnet_rq_get_buf(rq, &buflen, &ctx); if (unlikely(!buf)) goto err_buf; p = virt_to_head_page(buf); off = buf - page_address(p); + if (check_mergeable_len(dev, ctx, buflen)) { + put_page(p); + goto err_buf; + } + /* guard against a misconfigured or uncooperative backend that * is sending packet larger than the MTU. */ @@ -1769,7 +1813,7 @@ static struct sk_buff *receive_small_xdp(struct net_device *dev, headroom = vi->hdr_len + header_offset; buflen = SKB_DATA_ALIGN(GOOD_PACKET_LEN + headroom) + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); - xdp_page = xdp_linearize_page(rq, &num_buf, page, + xdp_page = xdp_linearize_page(dev, rq, &num_buf, page, offset, header_offset, &tlen); if (!xdp_page) @@ -2104,7 +2148,7 @@ static void *mergeable_xdp_get_buf(struct virtnet_info *vi, */ if (!xdp_prog->aux->xdp_has_frags) { /* linearize data for XDP */ - xdp_page = xdp_linearize_page(rq, num_buf, + xdp_page = xdp_linearize_page(vi->dev, rq, num_buf, *page, offset, XDP_PACKET_HEADROOM, len); diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c index 151d7cdfc4802..c48c2de6f961f 100644 --- a/drivers/net/vmxnet3/vmxnet3_drv.c +++ b/drivers/net/vmxnet3/vmxnet3_drv.c @@ -1560,6 +1560,30 @@ vmxnet3_get_hdr_len(struct vmxnet3_adapter *adapter, struct sk_buff *skb, return (hlen + (hdr.tcp->doff << 2)); } +static void +vmxnet3_lro_tunnel(struct sk_buff *skb, __be16 ip_proto) +{ + struct udphdr *uh = NULL; + + if (ip_proto == htons(ETH_P_IP)) { + struct iphdr *iph = (struct iphdr *)skb->data; + + if (iph->protocol == IPPROTO_UDP) + uh = (struct udphdr *)(iph + 1); + } else { + struct ipv6hdr *iph = (struct ipv6hdr *)skb->data; + + if (iph->nexthdr == IPPROTO_UDP) + uh = (struct udphdr *)(iph + 1); + } + if (uh) { + if (uh->check) + skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL_CSUM; + else + skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL; + } +} + static int vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq, struct vmxnet3_adapter *adapter, int quota) @@ -1873,6 +1897,8 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq, if (segCnt != 0 && mss != 0) { skb_shinfo(skb)->gso_type = rcd->v4 ? SKB_GSO_TCPV4 : SKB_GSO_TCPV6; + if (encap_lro) + vmxnet3_lro_tunnel(skb, skb->protocol); skb_shinfo(skb)->gso_size = mss; skb_shinfo(skb)->gso_segs = segCnt; } else if ((segCnt != 0 || skb->len > mtu) && !encap_lro) { diff --git a/drivers/net/vxlan/vxlan_core.c b/drivers/net/vxlan/vxlan_core.c index 474faccf75fd9..1a70770938001 100644 --- a/drivers/net/vxlan/vxlan_core.c +++ b/drivers/net/vxlan/vxlan_core.c @@ -605,10 +605,10 @@ static int vxlan_fdb_append(struct vxlan_fdb *f, if (rd == NULL) return -ENOMEM; - if (dst_cache_init(&rd->dst_cache, GFP_ATOMIC)) { - kfree(rd); - return -ENOMEM; - } + /* The driver can work correctly without a dst cache, so do not treat + * dst cache initialization errors as fatal. + */ + dst_cache_init(&rd->dst_cache, GFP_ATOMIC | __GFP_NOWARN); rd->remote_ip = *ip; rd->remote_port = port; diff --git a/drivers/net/wireguard/device.c b/drivers/net/wireguard/device.c index 45e9b908dbfb0..acb9ce7a626af 100644 --- a/drivers/net/wireguard/device.c +++ b/drivers/net/wireguard/device.c @@ -364,6 +364,7 @@ static int wg_newlink(struct net *src_net, struct net_device *dev, if (ret < 0) goto err_free_handshake_queue; + dev_set_threaded(dev, true); ret = register_netdevice(dev); if (ret < 0) goto err_uninit_ratelimiter; diff --git a/drivers/net/wireless/ath/ath10k/snoc.c b/drivers/net/wireless/ath/ath10k/snoc.c index 0fe47d51013c7..59f7ccb33fde3 100644 --- a/drivers/net/wireless/ath/ath10k/snoc.c +++ b/drivers/net/wireless/ath/ath10k/snoc.c @@ -937,7 +937,9 @@ static int ath10k_snoc_hif_start(struct ath10k *ar) dev_set_threaded(ar->napi_dev, true); ath10k_core_napi_enable(ar); - ath10k_snoc_irq_enable(ar); + /* IRQs are left enabled when we restart due to a firmware crash */ + if (!test_bit(ATH10K_SNOC_FLAG_RECOVERY, &ar_snoc->flags)) + ath10k_snoc_irq_enable(ar); ath10k_snoc_rx_post(ar); clear_bit(ATH10K_SNOC_FLAG_RECOVERY, &ar_snoc->flags); diff --git a/drivers/net/wireless/ath/ath11k/ce.c b/drivers/net/wireless/ath/ath11k/ce.c index e66e86bdec20f..9d8efec46508a 100644 --- a/drivers/net/wireless/ath/ath11k/ce.c +++ b/drivers/net/wireless/ath/ath11k/ce.c @@ -393,11 +393,10 @@ static int ath11k_ce_completed_recv_next(struct ath11k_ce_pipe *pipe, goto err; } + /* Make sure descriptor is read after the head pointer. */ + dma_rmb(); + *nbytes = ath11k_hal_ce_dst_status_get_length(desc); - if (*nbytes == 0) { - ret = -EIO; - goto err; - } *skb = pipe->dest_ring->skb[sw_index]; pipe->dest_ring->skb[sw_index] = NULL; @@ -430,8 +429,8 @@ static void ath11k_ce_recv_process_cb(struct ath11k_ce_pipe *pipe) dma_unmap_single(ab->dev, ATH11K_SKB_RXCB(skb)->paddr, max_nbytes, DMA_FROM_DEVICE); - if (unlikely(max_nbytes < nbytes)) { - ath11k_warn(ab, "rxed more than expected (nbytes %d, max %d)", + if (unlikely(max_nbytes < nbytes || nbytes == 0)) { + ath11k_warn(ab, "unexpected rx length (nbytes %d, max %d)", nbytes, max_nbytes); dev_kfree_skb_any(skb); continue; diff --git a/drivers/net/wireless/ath/ath11k/core.c b/drivers/net/wireless/ath/ath11k/core.c index 7eba6ee054ffe..2ec1771262fd9 100644 --- a/drivers/net/wireless/ath/ath11k/core.c +++ b/drivers/net/wireless/ath/ath11k/core.c @@ -811,6 +811,52 @@ static const struct ath11k_hw_params ath11k_hw_params[] = { }, }; +static const struct dmi_system_id ath11k_pm_quirk_table[] = { + { + .driver_data = (void *)ATH11K_PM_WOW, + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "21J4"), + }, + }, + { + .driver_data = (void *)ATH11K_PM_WOW, + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "21K4"), + }, + }, + { + .driver_data = (void *)ATH11K_PM_WOW, + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "21K6"), + }, + }, + { + .driver_data = (void *)ATH11K_PM_WOW, + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "21K8"), + }, + }, + { + .driver_data = (void *)ATH11K_PM_WOW, + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "21KA"), + }, + }, + { + .driver_data = (void *)ATH11K_PM_WOW, + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "21F9"), + }, + }, + {} +}; + static inline struct ath11k_pdev *ath11k_core_get_single_pdev(struct ath11k_base *ab) { WARN_ON(!ab->hw_params.single_pdev_only); @@ -855,6 +901,7 @@ void ath11k_fw_stats_init(struct ath11k *ar) INIT_LIST_HEAD(&ar->fw_stats.bcn); init_completion(&ar->fw_stats_complete); + init_completion(&ar->fw_stats_done); } void ath11k_fw_stats_free(struct ath11k_fw_stats *stats) @@ -1811,6 +1858,20 @@ int ath11k_core_qmi_firmware_ready(struct ath11k_base *ab) { int ret; + switch (ath11k_crypto_mode) { + case ATH11K_CRYPT_MODE_SW: + set_bit(ATH11K_FLAG_HW_CRYPTO_DISABLED, &ab->dev_flags); + set_bit(ATH11K_FLAG_RAW_MODE, &ab->dev_flags); + break; + case ATH11K_CRYPT_MODE_HW: + clear_bit(ATH11K_FLAG_HW_CRYPTO_DISABLED, &ab->dev_flags); + clear_bit(ATH11K_FLAG_RAW_MODE, &ab->dev_flags); + break; + default: + ath11k_info(ab, "invalid crypto_mode: %d\n", ath11k_crypto_mode); + return -EINVAL; + } + ret = ath11k_core_start_firmware(ab, ab->fw_mode); if (ret) { ath11k_err(ab, "failed to start firmware: %d\n", ret); @@ -1829,20 +1890,6 @@ int ath11k_core_qmi_firmware_ready(struct ath11k_base *ab) goto err_firmware_stop; } - switch (ath11k_crypto_mode) { - case ATH11K_CRYPT_MODE_SW: - set_bit(ATH11K_FLAG_HW_CRYPTO_DISABLED, &ab->dev_flags); - set_bit(ATH11K_FLAG_RAW_MODE, &ab->dev_flags); - break; - case ATH11K_CRYPT_MODE_HW: - clear_bit(ATH11K_FLAG_HW_CRYPTO_DISABLED, &ab->dev_flags); - clear_bit(ATH11K_FLAG_RAW_MODE, &ab->dev_flags); - break; - default: - ath11k_info(ab, "invalid crypto_mode: %d\n", ath11k_crypto_mode); - return -EINVAL; - } - if (ath11k_frame_mode == ATH11K_HW_TXRX_RAW) set_bit(ATH11K_FLAG_RAW_MODE, &ab->dev_flags); @@ -1915,6 +1962,7 @@ static int ath11k_core_reconfigure_on_crash(struct ath11k_base *ab) void ath11k_core_halt(struct ath11k *ar) { struct ath11k_base *ab = ar->ab; + struct list_head *pos, *n; lockdep_assert_held(&ar->conf_mutex); @@ -1929,7 +1977,12 @@ void ath11k_core_halt(struct ath11k *ar) rcu_assign_pointer(ab->pdevs_active[ar->pdev_idx], NULL); synchronize_rcu(); - INIT_LIST_HEAD(&ar->arvifs); + + spin_lock_bh(&ar->data_lock); + list_for_each_safe(pos, n, &ar->arvifs) + list_del_init(pos); + spin_unlock_bh(&ar->data_lock); + idr_init(&ar->txmgmt_idr); } @@ -2190,8 +2243,17 @@ EXPORT_SYMBOL(ath11k_core_pre_init); int ath11k_core_init(struct ath11k_base *ab) { + const struct dmi_system_id *dmi_id; int ret; + dmi_id = dmi_first_match(ath11k_pm_quirk_table); + if (dmi_id) + ab->pm_policy = (kernel_ulong_t)dmi_id->driver_data; + else + ab->pm_policy = ATH11K_PM_DEFAULT; + + ath11k_dbg(ab, ATH11K_DBG_BOOT, "pm policy %u\n", ab->pm_policy); + ret = ath11k_core_soc_create(ab); if (ret) { ath11k_err(ab, "failed to create soc core: %d\n", ret); diff --git a/drivers/net/wireless/ath/ath11k/core.h b/drivers/net/wireless/ath/ath11k/core.h index 09c37e19a1680..09fdb7be0e197 100644 --- a/drivers/net/wireless/ath/ath11k/core.h +++ b/drivers/net/wireless/ath/ath11k/core.h @@ -599,6 +599,8 @@ struct ath11k_fw_stats { struct list_head pdevs; struct list_head vdevs; struct list_head bcn; + u32 num_vdev_recvd; + u32 num_bcn_recvd; }; struct ath11k_dbg_htt_stats { @@ -780,7 +782,7 @@ struct ath11k { u8 alpha2[REG_ALPHA2_LEN + 1]; struct ath11k_fw_stats fw_stats; struct completion fw_stats_complete; - bool fw_stats_done; + struct completion fw_stats_done; /* protected by conf_mutex */ bool ps_state_enable; @@ -889,6 +891,11 @@ struct ath11k_msi_config { u16 hw_rev; }; +enum ath11k_pm_policy { + ATH11K_PM_DEFAULT, + ATH11K_PM_WOW, +}; + /* Master structure to hold the hw data which may be used in core module */ struct ath11k_base { enum ath11k_hw_rev hw_rev; @@ -1051,6 +1058,8 @@ struct ath11k_base { } testmode; #endif + enum ath11k_pm_policy pm_policy; + /* must be last */ u8 drv_priv[] __aligned(sizeof(void *)); }; diff --git a/drivers/net/wireless/ath/ath11k/debugfs.c b/drivers/net/wireless/ath/ath11k/debugfs.c index 57281a135dd7f..5d46f8e4c231f 100644 --- a/drivers/net/wireless/ath/ath11k/debugfs.c +++ b/drivers/net/wireless/ath/ath11k/debugfs.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: BSD-3-Clause-Clear /* * Copyright (c) 2018-2020 The Linux Foundation. All rights reserved. - * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved. */ #include @@ -93,57 +93,14 @@ void ath11k_debugfs_add_dbring_entry(struct ath11k *ar, spin_unlock_bh(&dbr_data->lock); } -static void ath11k_debugfs_fw_stats_reset(struct ath11k *ar) -{ - spin_lock_bh(&ar->data_lock); - ar->fw_stats_done = false; - ath11k_fw_stats_pdevs_free(&ar->fw_stats.pdevs); - ath11k_fw_stats_vdevs_free(&ar->fw_stats.vdevs); - spin_unlock_bh(&ar->data_lock); -} - void ath11k_debugfs_fw_stats_process(struct ath11k *ar, struct ath11k_fw_stats *stats) { struct ath11k_base *ab = ar->ab; - struct ath11k_pdev *pdev; - bool is_end; - static unsigned int num_vdev, num_bcn; - size_t total_vdevs_started = 0; - int i; - - /* WMI_REQUEST_PDEV_STAT request has been already processed */ - - if (stats->stats_id == WMI_REQUEST_RSSI_PER_CHAIN_STAT) { - ar->fw_stats_done = true; - return; - } - - if (stats->stats_id == WMI_REQUEST_VDEV_STAT) { - if (list_empty(&stats->vdevs)) { - ath11k_warn(ab, "empty vdev stats"); - return; - } - /* FW sends all the active VDEV stats irrespective of PDEV, - * hence limit until the count of all VDEVs started - */ - for (i = 0; i < ab->num_radios; i++) { - pdev = rcu_dereference(ab->pdevs_active[i]); - if (pdev && pdev->ar) - total_vdevs_started += ar->num_started_vdevs; - } - - is_end = ((++num_vdev) == total_vdevs_started); - - list_splice_tail_init(&stats->vdevs, - &ar->fw_stats.vdevs); - - if (is_end) { - ar->fw_stats_done = true; - num_vdev = 0; - } - return; - } + bool is_end = true; + /* WMI_REQUEST_PDEV_STAT, WMI_REQUEST_RSSI_PER_CHAIN_STAT and + * WMI_REQUEST_VDEV_STAT requests have been already processed. + */ if (stats->stats_id == WMI_REQUEST_BCN_STAT) { if (list_empty(&stats->bcn)) { ath11k_warn(ab, "empty bcn stats"); @@ -152,97 +109,18 @@ void ath11k_debugfs_fw_stats_process(struct ath11k *ar, struct ath11k_fw_stats * /* Mark end until we reached the count of all started VDEVs * within the PDEV */ - is_end = ((++num_bcn) == ar->num_started_vdevs); + if (ar->num_started_vdevs) + is_end = ((++ar->fw_stats.num_bcn_recvd) == + ar->num_started_vdevs); list_splice_tail_init(&stats->bcn, &ar->fw_stats.bcn); - if (is_end) { - ar->fw_stats_done = true; - num_bcn = 0; - } + if (is_end) + complete(&ar->fw_stats_done); } } -static int ath11k_debugfs_fw_stats_request(struct ath11k *ar, - struct stats_request_params *req_param) -{ - struct ath11k_base *ab = ar->ab; - unsigned long timeout, time_left; - int ret; - - lockdep_assert_held(&ar->conf_mutex); - - /* FW stats can get split when exceeding the stats data buffer limit. - * In that case, since there is no end marking for the back-to-back - * received 'update stats' event, we keep a 3 seconds timeout in case, - * fw_stats_done is not marked yet - */ - timeout = jiffies + msecs_to_jiffies(3 * 1000); - - ath11k_debugfs_fw_stats_reset(ar); - - reinit_completion(&ar->fw_stats_complete); - - ret = ath11k_wmi_send_stats_request_cmd(ar, req_param); - - if (ret) { - ath11k_warn(ab, "could not request fw stats (%d)\n", - ret); - return ret; - } - - time_left = wait_for_completion_timeout(&ar->fw_stats_complete, 1 * HZ); - - if (!time_left) - return -ETIMEDOUT; - - for (;;) { - if (time_after(jiffies, timeout)) - break; - - spin_lock_bh(&ar->data_lock); - if (ar->fw_stats_done) { - spin_unlock_bh(&ar->data_lock); - break; - } - spin_unlock_bh(&ar->data_lock); - } - return 0; -} - -int ath11k_debugfs_get_fw_stats(struct ath11k *ar, u32 pdev_id, - u32 vdev_id, u32 stats_id) -{ - struct ath11k_base *ab = ar->ab; - struct stats_request_params req_param; - int ret; - - mutex_lock(&ar->conf_mutex); - - if (ar->state != ATH11K_STATE_ON) { - ret = -ENETDOWN; - goto err_unlock; - } - - req_param.pdev_id = pdev_id; - req_param.vdev_id = vdev_id; - req_param.stats_id = stats_id; - - ret = ath11k_debugfs_fw_stats_request(ar, &req_param); - if (ret) - ath11k_warn(ab, "failed to request fw stats: %d\n", ret); - - ath11k_dbg(ab, ATH11K_DBG_WMI, - "debug get fw stat pdev id %d vdev id %d stats id 0x%x\n", - pdev_id, vdev_id, stats_id); - -err_unlock: - mutex_unlock(&ar->conf_mutex); - - return ret; -} - static int ath11k_open_pdev_stats(struct inode *inode, struct file *file) { struct ath11k *ar = inode->i_private; @@ -268,7 +146,7 @@ static int ath11k_open_pdev_stats(struct inode *inode, struct file *file) req_param.vdev_id = 0; req_param.stats_id = WMI_REQUEST_PDEV_STAT; - ret = ath11k_debugfs_fw_stats_request(ar, &req_param); + ret = ath11k_mac_fw_stats_request(ar, &req_param); if (ret) { ath11k_warn(ab, "failed to request fw pdev stats: %d\n", ret); goto err_free; @@ -339,7 +217,7 @@ static int ath11k_open_vdev_stats(struct inode *inode, struct file *file) req_param.vdev_id = 0; req_param.stats_id = WMI_REQUEST_VDEV_STAT; - ret = ath11k_debugfs_fw_stats_request(ar, &req_param); + ret = ath11k_mac_fw_stats_request(ar, &req_param); if (ret) { ath11k_warn(ar->ab, "failed to request fw vdev stats: %d\n", ret); goto err_free; @@ -415,7 +293,7 @@ static int ath11k_open_bcn_stats(struct inode *inode, struct file *file) continue; req_param.vdev_id = arvif->vdev_id; - ret = ath11k_debugfs_fw_stats_request(ar, &req_param); + ret = ath11k_mac_fw_stats_request(ar, &req_param); if (ret) { ath11k_warn(ar->ab, "failed to request fw bcn stats: %d\n", ret); goto err_free; diff --git a/drivers/net/wireless/ath/ath11k/debugfs.h b/drivers/net/wireless/ath/ath11k/debugfs.h index a39e458637b01..ed7fec177588f 100644 --- a/drivers/net/wireless/ath/ath11k/debugfs.h +++ b/drivers/net/wireless/ath/ath11k/debugfs.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: BSD-3-Clause-Clear */ /* * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. - * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2021-2022, 2025 Qualcomm Innovation Center, Inc. All rights reserved. */ #ifndef _ATH11K_DEBUGFS_H_ @@ -273,8 +273,6 @@ void ath11k_debugfs_unregister(struct ath11k *ar); void ath11k_debugfs_fw_stats_process(struct ath11k *ar, struct ath11k_fw_stats *stats); void ath11k_debugfs_fw_stats_init(struct ath11k *ar); -int ath11k_debugfs_get_fw_stats(struct ath11k *ar, u32 pdev_id, - u32 vdev_id, u32 stats_id); static inline bool ath11k_debugfs_is_pktlog_lite_mode_enabled(struct ath11k *ar) { @@ -381,12 +379,6 @@ static inline int ath11k_debugfs_rx_filter(struct ath11k *ar) return 0; } -static inline int ath11k_debugfs_get_fw_stats(struct ath11k *ar, - u32 pdev_id, u32 vdev_id, u32 stats_id) -{ - return 0; -} - static inline void ath11k_debugfs_add_dbring_entry(struct ath11k *ar, enum wmi_direct_buffer_module id, diff --git a/drivers/net/wireless/ath/ath11k/dp_rx.c b/drivers/net/wireless/ath/ath11k/dp_rx.c index bfb8e7b1a300c..007d869590423 100644 --- a/drivers/net/wireless/ath/ath11k/dp_rx.c +++ b/drivers/net/wireless/ath/ath11k/dp_rx.c @@ -2637,7 +2637,7 @@ int ath11k_dp_process_rx(struct ath11k_base *ab, int ring_id, struct ath11k *ar; struct hal_reo_dest_ring *desc; enum hal_reo_dest_ring_push_reason push_reason; - u32 cookie; + u32 cookie, info0, rx_msdu_info0, rx_mpdu_info0; int i; for (i = 0; i < MAX_RADIOS; i++) @@ -2650,11 +2650,14 @@ int ath11k_dp_process_rx(struct ath11k_base *ab, int ring_id, try_again: ath11k_hal_srng_access_begin(ab, srng); + /* Make sure descriptor is read after the head pointer. */ + dma_rmb(); + while (likely(desc = (struct hal_reo_dest_ring *)ath11k_hal_srng_dst_get_next_entry(ab, srng))) { cookie = FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE, - desc->buf_addr_info.info1); + READ_ONCE(desc->buf_addr_info.info1)); buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, cookie); mac_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_PDEV_ID, cookie); @@ -2683,8 +2686,9 @@ int ath11k_dp_process_rx(struct ath11k_base *ab, int ring_id, num_buffs_reaped[mac_id]++; + info0 = READ_ONCE(desc->info0); push_reason = FIELD_GET(HAL_REO_DEST_RING_INFO0_PUSH_REASON, - desc->info0); + info0); if (unlikely(push_reason != HAL_REO_DEST_RING_PUSH_REASON_ROUTING_INSTRUCTION)) { dev_kfree_skb_any(msdu); @@ -2692,18 +2696,21 @@ int ath11k_dp_process_rx(struct ath11k_base *ab, int ring_id, continue; } - rxcb->is_first_msdu = !!(desc->rx_msdu_info.info0 & + rx_msdu_info0 = READ_ONCE(desc->rx_msdu_info.info0); + rx_mpdu_info0 = READ_ONCE(desc->rx_mpdu_info.info0); + + rxcb->is_first_msdu = !!(rx_msdu_info0 & RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU); - rxcb->is_last_msdu = !!(desc->rx_msdu_info.info0 & + rxcb->is_last_msdu = !!(rx_msdu_info0 & RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU); - rxcb->is_continuation = !!(desc->rx_msdu_info.info0 & + rxcb->is_continuation = !!(rx_msdu_info0 & RX_MSDU_DESC_INFO0_MSDU_CONTINUATION); rxcb->peer_id = FIELD_GET(RX_MPDU_DESC_META_DATA_PEER_ID, - desc->rx_mpdu_info.meta_data); + READ_ONCE(desc->rx_mpdu_info.meta_data)); rxcb->seq_no = FIELD_GET(RX_MPDU_DESC_INFO0_SEQ_NUM, - desc->rx_mpdu_info.info0); + rx_mpdu_info0); rxcb->tid = FIELD_GET(HAL_REO_DEST_RING_INFO0_RX_QUEUE_NUM, - desc->info0); + info0); rxcb->mac_id = mac_id; __skb_queue_tail(&msdu_list[mac_id], msdu); diff --git a/drivers/net/wireless/ath/ath11k/hal.c b/drivers/net/wireless/ath/ath11k/hal.c index f02599bd1c36b..c445bf5cd8321 100644 --- a/drivers/net/wireless/ath/ath11k/hal.c +++ b/drivers/net/wireless/ath/ath11k/hal.c @@ -599,7 +599,7 @@ u32 ath11k_hal_ce_dst_status_get_length(void *buf) struct hal_ce_srng_dst_status_desc *desc = buf; u32 len; - len = FIELD_GET(HAL_CE_DST_STATUS_DESC_FLAGS_LEN, desc->flags); + len = FIELD_GET(HAL_CE_DST_STATUS_DESC_FLAGS_LEN, READ_ONCE(desc->flags)); desc->flags &= ~HAL_CE_DST_STATUS_DESC_FLAGS_LEN; return len; @@ -829,7 +829,7 @@ void ath11k_hal_srng_access_begin(struct ath11k_base *ab, struct hal_srng *srng) srng->u.src_ring.cached_tp = *(volatile u32 *)srng->u.src_ring.tp_addr; } else { - srng->u.dst_ring.cached_hp = *srng->u.dst_ring.hp_addr; + srng->u.dst_ring.cached_hp = READ_ONCE(*srng->u.dst_ring.hp_addr); /* Try to prefetch the next descriptor in the ring */ if (srng->flags & HAL_SRNG_FLAGS_CACHED) diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c index f8068d2e848c3..7ead581f5bfd1 100644 --- a/drivers/net/wireless/ath/ath11k/mac.c +++ b/drivers/net/wireless/ath/ath11k/mac.c @@ -8938,6 +8938,86 @@ static void ath11k_mac_put_chain_rssi(struct station_info *sinfo, } } +static void ath11k_mac_fw_stats_reset(struct ath11k *ar) +{ + spin_lock_bh(&ar->data_lock); + ath11k_fw_stats_pdevs_free(&ar->fw_stats.pdevs); + ath11k_fw_stats_vdevs_free(&ar->fw_stats.vdevs); + ar->fw_stats.num_vdev_recvd = 0; + ar->fw_stats.num_bcn_recvd = 0; + spin_unlock_bh(&ar->data_lock); +} + +int ath11k_mac_fw_stats_request(struct ath11k *ar, + struct stats_request_params *req_param) +{ + struct ath11k_base *ab = ar->ab; + unsigned long time_left; + int ret; + + lockdep_assert_held(&ar->conf_mutex); + + ath11k_mac_fw_stats_reset(ar); + + reinit_completion(&ar->fw_stats_complete); + reinit_completion(&ar->fw_stats_done); + + ret = ath11k_wmi_send_stats_request_cmd(ar, req_param); + + if (ret) { + ath11k_warn(ab, "could not request fw stats (%d)\n", + ret); + return ret; + } + + time_left = wait_for_completion_timeout(&ar->fw_stats_complete, 1 * HZ); + if (!time_left) + return -ETIMEDOUT; + + /* FW stats can get split when exceeding the stats data buffer limit. + * In that case, since there is no end marking for the back-to-back + * received 'update stats' event, we keep a 3 seconds timeout in case, + * fw_stats_done is not marked yet + */ + time_left = wait_for_completion_timeout(&ar->fw_stats_done, 3 * HZ); + if (!time_left) + return -ETIMEDOUT; + + return 0; +} + +static int ath11k_mac_get_fw_stats(struct ath11k *ar, u32 pdev_id, + u32 vdev_id, u32 stats_id) +{ + struct ath11k_base *ab = ar->ab; + struct stats_request_params req_param; + int ret; + + mutex_lock(&ar->conf_mutex); + + if (ar->state != ATH11K_STATE_ON) { + ret = -ENETDOWN; + goto err_unlock; + } + + req_param.pdev_id = pdev_id; + req_param.vdev_id = vdev_id; + req_param.stats_id = stats_id; + + ret = ath11k_mac_fw_stats_request(ar, &req_param); + if (ret) + ath11k_warn(ab, "failed to request fw stats: %d\n", ret); + + ath11k_dbg(ab, ATH11K_DBG_WMI, + "debug get fw stat pdev id %d vdev id %d stats id 0x%x\n", + pdev_id, vdev_id, stats_id); + +err_unlock: + mutex_unlock(&ar->conf_mutex); + + return ret; +} + static void ath11k_mac_op_sta_statistics(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta, @@ -8975,8 +9055,8 @@ static void ath11k_mac_op_sta_statistics(struct ieee80211_hw *hw, if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_CHAIN_SIGNAL)) && arsta->arvif->vdev_type == WMI_VDEV_TYPE_STA && ar->ab->hw_params.supports_rssi_stats && - !ath11k_debugfs_get_fw_stats(ar, ar->pdev->pdev_id, 0, - WMI_REQUEST_RSSI_PER_CHAIN_STAT)) { + !ath11k_mac_get_fw_stats(ar, ar->pdev->pdev_id, 0, + WMI_REQUEST_RSSI_PER_CHAIN_STAT)) { ath11k_mac_put_chain_rssi(sinfo, arsta, "fw stats", true); } @@ -8984,8 +9064,8 @@ static void ath11k_mac_op_sta_statistics(struct ieee80211_hw *hw, if (!signal && arsta->arvif->vdev_type == WMI_VDEV_TYPE_STA && ar->ab->hw_params.supports_rssi_stats && - !(ath11k_debugfs_get_fw_stats(ar, ar->pdev->pdev_id, 0, - WMI_REQUEST_VDEV_STAT))) + !(ath11k_mac_get_fw_stats(ar, ar->pdev->pdev_id, 0, + WMI_REQUEST_VDEV_STAT))) signal = arsta->rssi_beacon; ath11k_dbg(ar->ab, ATH11K_DBG_MAC, @@ -9331,11 +9411,13 @@ static int ath11k_fw_stats_request(struct ath11k *ar, lockdep_assert_held(&ar->conf_mutex); spin_lock_bh(&ar->data_lock); - ar->fw_stats_done = false; ath11k_fw_stats_pdevs_free(&ar->fw_stats.pdevs); + ar->fw_stats.num_vdev_recvd = 0; + ar->fw_stats.num_bcn_recvd = 0; spin_unlock_bh(&ar->data_lock); reinit_completion(&ar->fw_stats_complete); + reinit_completion(&ar->fw_stats_done); ret = ath11k_wmi_send_stats_request_cmd(ar, req_param); if (ret) { diff --git a/drivers/net/wireless/ath/ath11k/mac.h b/drivers/net/wireless/ath/ath11k/mac.h index f5800fbecff89..5e61eea1bb037 100644 --- a/drivers/net/wireless/ath/ath11k/mac.h +++ b/drivers/net/wireless/ath/ath11k/mac.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: BSD-3-Clause-Clear */ /* * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. - * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2021-2023, 2025 Qualcomm Innovation Center, Inc. All rights reserved. */ #ifndef ATH11K_MAC_H @@ -179,4 +179,6 @@ int ath11k_mac_vif_set_keepalive(struct ath11k_vif *arvif, void ath11k_mac_fill_reg_tpc_info(struct ath11k *ar, struct ieee80211_vif *vif, struct ieee80211_chanctx_conf *ctx); +int ath11k_mac_fw_stats_request(struct ath11k *ar, + struct stats_request_params *req_param); #endif diff --git a/drivers/net/wireless/ath/ath11k/qmi.c b/drivers/net/wireless/ath/ath11k/qmi.c index 7a22483b35cd9..a5555c959dec9 100644 --- a/drivers/net/wireless/ath/ath11k/qmi.c +++ b/drivers/net/wireless/ath/ath11k/qmi.c @@ -1989,6 +1989,15 @@ static int ath11k_qmi_alloc_target_mem_chunk(struct ath11k_base *ab) chunk->prev_size == chunk->size) continue; + if (ab->qmi.mem_seg_count <= ATH11K_QMI_FW_MEM_REQ_SEGMENT_CNT) { + ath11k_dbg(ab, ATH11K_DBG_QMI, + "size/type mismatch (current %d %u) (prev %d %u), try later with small size\n", + chunk->size, chunk->type, + chunk->prev_size, chunk->prev_type); + ab->qmi.target_mem_delayed = true; + return 0; + } + /* cannot reuse the existing chunk */ dma_free_coherent(ab->dev, chunk->prev_size, chunk->vaddr, chunk->paddr); diff --git a/drivers/net/wireless/ath/ath11k/wmi.c b/drivers/net/wireless/ath/ath11k/wmi.c index 87abfa5475295..5f7edf622de7a 100644 --- a/drivers/net/wireless/ath/ath11k/wmi.c +++ b/drivers/net/wireless/ath/ath11k/wmi.c @@ -8157,6 +8157,11 @@ static void ath11k_peer_assoc_conf_event(struct ath11k_base *ab, struct sk_buff static void ath11k_update_stats_event(struct ath11k_base *ab, struct sk_buff *skb) { struct ath11k_fw_stats stats = {}; + size_t total_vdevs_started = 0; + struct ath11k_pdev *pdev; + bool is_end = true; + int i; + struct ath11k *ar; int ret; @@ -8183,18 +8188,50 @@ static void ath11k_update_stats_event(struct ath11k_base *ab, struct sk_buff *sk spin_lock_bh(&ar->data_lock); - /* WMI_REQUEST_PDEV_STAT can be requested via .get_txpower mac ops or via + /* WMI_REQUEST_PDEV_STAT, WMI_REQUEST_VDEV_STAT and + * WMI_REQUEST_RSSI_PER_CHAIN_STAT can be requested via mac ops or via * debugfs fw stats. Therefore, processing it separately. */ if (stats.stats_id == WMI_REQUEST_PDEV_STAT) { list_splice_tail_init(&stats.pdevs, &ar->fw_stats.pdevs); - ar->fw_stats_done = true; + complete(&ar->fw_stats_done); + goto complete; + } + + if (stats.stats_id == WMI_REQUEST_RSSI_PER_CHAIN_STAT) { + complete(&ar->fw_stats_done); + goto complete; + } + + if (stats.stats_id == WMI_REQUEST_VDEV_STAT) { + if (list_empty(&stats.vdevs)) { + ath11k_warn(ab, "empty vdev stats"); + goto complete; + } + /* FW sends all the active VDEV stats irrespective of PDEV, + * hence limit until the count of all VDEVs started + */ + for (i = 0; i < ab->num_radios; i++) { + pdev = rcu_dereference(ab->pdevs_active[i]); + if (pdev && pdev->ar) + total_vdevs_started += ar->num_started_vdevs; + } + + if (total_vdevs_started) + is_end = ((++ar->fw_stats.num_vdev_recvd) == + total_vdevs_started); + + list_splice_tail_init(&stats.vdevs, + &ar->fw_stats.vdevs); + + if (is_end) + complete(&ar->fw_stats_done); + goto complete; } - /* WMI_REQUEST_VDEV_STAT, WMI_REQUEST_BCN_STAT and WMI_REQUEST_RSSI_PER_CHAIN_STAT - * are currently requested only via debugfs fw stats. Hence, processing these - * in debugfs context + /* WMI_REQUEST_BCN_STAT is currently requested only via debugfs fw stats. + * Hence, processing it in debugfs context */ ath11k_debugfs_fw_stats_process(ar, &stats); diff --git a/drivers/net/wireless/ath/ath12k/ce.c b/drivers/net/wireless/ath/ath12k/ce.c index be0d669d31fcc..740586fe49d1f 100644 --- a/drivers/net/wireless/ath/ath12k/ce.c +++ b/drivers/net/wireless/ath/ath12k/ce.c @@ -343,11 +343,10 @@ static int ath12k_ce_completed_recv_next(struct ath12k_ce_pipe *pipe, goto err; } + /* Make sure descriptor is read after the head pointer. */ + dma_rmb(); + *nbytes = ath12k_hal_ce_dst_status_get_length(desc); - if (*nbytes == 0) { - ret = -EIO; - goto err; - } *skb = pipe->dest_ring->skb[sw_index]; pipe->dest_ring->skb[sw_index] = NULL; @@ -380,8 +379,8 @@ static void ath12k_ce_recv_process_cb(struct ath12k_ce_pipe *pipe) dma_unmap_single(ab->dev, ATH12K_SKB_RXCB(skb)->paddr, max_nbytes, DMA_FROM_DEVICE); - if (unlikely(max_nbytes < nbytes)) { - ath12k_warn(ab, "rxed more than expected (nbytes %d, max %d)", + if (unlikely(max_nbytes < nbytes || nbytes == 0)) { + ath12k_warn(ab, "unexpected rx length (nbytes %d, max %d)", nbytes, max_nbytes); dev_kfree_skb_any(skb); continue; diff --git a/drivers/net/wireless/ath/ath12k/ce.h b/drivers/net/wireless/ath/ath12k/ce.h index 857bc5f9e946a..f9547a3945e44 100644 --- a/drivers/net/wireless/ath/ath12k/ce.h +++ b/drivers/net/wireless/ath/ath12k/ce.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: BSD-3-Clause-Clear */ /* * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. - * Copyright (c) 2021-2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2021-2022, 2024-2025 Qualcomm Innovation Center, Inc. All rights reserved. */ #ifndef ATH12K_CE_H @@ -39,8 +39,8 @@ #define PIPEDIR_INOUT_H2H 4 /* bidirectional, host to host */ /* CE address/mask */ -#define CE_HOST_IE_ADDRESS 0x00A1803C -#define CE_HOST_IE_2_ADDRESS 0x00A18040 +#define CE_HOST_IE_ADDRESS 0x75804C +#define CE_HOST_IE_2_ADDRESS 0x758050 #define CE_HOST_IE_3_ADDRESS CE_HOST_IE_ADDRESS #define CE_HOST_IE_3_SHIFT 0xC diff --git a/drivers/net/wireless/ath/ath12k/core.c b/drivers/net/wireless/ath/ath12k/core.c index 8bb8ee98188bf..c3c76e2680629 100644 --- a/drivers/net/wireless/ath/ath12k/core.c +++ b/drivers/net/wireless/ath/ath12k/core.c @@ -1004,6 +1004,7 @@ static void ath12k_rfkill_work(struct work_struct *work) void ath12k_core_halt(struct ath12k *ar) { + struct list_head *pos, *n; struct ath12k_base *ab = ar->ab; lockdep_assert_held(&ar->conf_mutex); @@ -1019,7 +1020,12 @@ void ath12k_core_halt(struct ath12k *ar) rcu_assign_pointer(ab->pdevs_active[ar->pdev_idx], NULL); synchronize_rcu(); - INIT_LIST_HEAD(&ar->arvifs); + + spin_lock_bh(&ar->data_lock); + list_for_each_safe(pos, n, &ar->arvifs) + list_del_init(pos); + spin_unlock_bh(&ar->data_lock); + idr_init(&ar->txmgmt_idr); } diff --git a/drivers/net/wireless/ath/ath12k/debugfs_htt_stats.c b/drivers/net/wireless/ath/ath12k/debugfs_htt_stats.c index f1b7e74aefe42..6f2e7ecc66af7 100644 --- a/drivers/net/wireless/ath/ath12k/debugfs_htt_stats.c +++ b/drivers/net/wireless/ath/ath12k/debugfs_htt_stats.c @@ -1646,6 +1646,9 @@ static ssize_t ath12k_write_htt_stats_type(struct file *file, const int size = 32; int num_args; + if (count > size) + return -EINVAL; + char *buf __free(kfree) = kzalloc(size, GFP_KERNEL); if (!buf) return -ENOMEM; diff --git a/drivers/net/wireless/ath/ath12k/dp_mon.c b/drivers/net/wireless/ath/ath12k/dp_mon.c index 6a88745369447..7bfd323cdf244 100644 --- a/drivers/net/wireless/ath/ath12k/dp_mon.c +++ b/drivers/net/wireless/ath/ath12k/dp_mon.c @@ -1080,6 +1080,8 @@ static void ath12k_dp_mon_rx_deliver_msdu(struct ath12k *ar, struct napi_struct bool is_mcbc = rxcb->is_mcbc; bool is_eapol_tkip = rxcb->is_eapol; + status->link_valid = 0; + if ((status->encoding == RX_ENC_HE) && !(status->flag & RX_FLAG_RADIOTAP_HE) && !(status->flag & RX_FLAG_SKIP_MONITOR)) { he = skb_push(msdu, sizeof(known)); diff --git a/drivers/net/wireless/ath/ath12k/dp_rx.c b/drivers/net/wireless/ath/ath12k/dp_rx.c index 4cbba96121a11..eebdcc16e8fc4 100644 --- a/drivers/net/wireless/ath/ath12k/dp_rx.c +++ b/drivers/net/wireless/ath/ath12k/dp_rx.c @@ -228,12 +228,6 @@ static void ath12k_dp_rx_desc_get_crypto_header(struct ath12k_base *ab, ab->hal_rx_ops->rx_desc_get_crypto_header(desc, crypto_hdr, enctype); } -static u16 ath12k_dp_rxdesc_get_mpdu_frame_ctrl(struct ath12k_base *ab, - struct hal_rx_desc *desc) -{ - return ab->hal_rx_ops->rx_desc_get_mpdu_frame_ctl(desc); -} - static inline u8 ath12k_dp_rx_get_msdu_src_link(struct ath12k_base *ab, struct hal_rx_desc *desc) { @@ -1768,6 +1762,7 @@ static int ath12k_dp_rx_msdu_coalesce(struct ath12k *ar, struct hal_rx_desc *ldesc; int space_extra, rem_len, buf_len; u32 hal_rx_desc_sz = ar->ab->hal.hal_desc_sz; + bool is_continuation; /* As the msdu is spread across multiple rx buffers, * find the offset to the start of msdu for computing @@ -1816,7 +1811,8 @@ static int ath12k_dp_rx_msdu_coalesce(struct ath12k *ar, rem_len = msdu_len - buf_first_len; while ((skb = __skb_dequeue(msdu_list)) != NULL && rem_len > 0) { rxcb = ATH12K_SKB_RXCB(skb); - if (rxcb->is_continuation) + is_continuation = rxcb->is_continuation; + if (is_continuation) buf_len = DP_RX_BUFFER_SIZE - hal_rx_desc_sz; else buf_len = rem_len; @@ -1834,7 +1830,7 @@ static int ath12k_dp_rx_msdu_coalesce(struct ath12k *ar, dev_kfree_skb_any(skb); rem_len -= buf_len; - if (!rxcb->is_continuation) + if (!is_continuation) break; } @@ -1872,8 +1868,7 @@ static void ath12k_dp_rx_h_csum_offload(struct ath12k *ar, struct sk_buff *msdu) CHECKSUM_NONE : CHECKSUM_UNNECESSARY; } -static int ath12k_dp_rx_crypto_mic_len(struct ath12k *ar, - enum hal_encrypt_type enctype) +int ath12k_dp_rx_crypto_mic_len(struct ath12k *ar, enum hal_encrypt_type enctype) { switch (enctype) { case HAL_ENCRYPT_TYPE_OPEN: @@ -2067,10 +2062,13 @@ static void ath12k_get_dot11_hdr_from_rx_desc(struct ath12k *ar, struct hal_rx_desc *rx_desc = rxcb->rx_desc; struct ath12k_base *ab = ar->ab; size_t hdr_len, crypto_len; - struct ieee80211_hdr *hdr; - u16 qos_ctl; - __le16 fc; - u8 *crypto_hdr; + struct ieee80211_hdr hdr; + __le16 qos_ctl; + u8 *crypto_hdr, mesh_ctrl; + + ath12k_dp_rx_desc_get_dot11_hdr(ab, rx_desc, &hdr); + hdr_len = ieee80211_hdrlen(hdr.frame_control); + mesh_ctrl = ath12k_dp_rx_h_mesh_ctl_present(ab, rx_desc); if (!(status->flag & RX_FLAG_IV_STRIPPED)) { crypto_len = ath12k_dp_rx_crypto_param_len(ar, enctype); @@ -2078,27 +2076,21 @@ static void ath12k_get_dot11_hdr_from_rx_desc(struct ath12k *ar, ath12k_dp_rx_desc_get_crypto_header(ab, rx_desc, crypto_hdr, enctype); } - fc = cpu_to_le16(ath12k_dp_rxdesc_get_mpdu_frame_ctrl(ab, rx_desc)); - hdr_len = ieee80211_hdrlen(fc); skb_push(msdu, hdr_len); - hdr = (struct ieee80211_hdr *)msdu->data; - hdr->frame_control = fc; - - /* Get wifi header from rx_desc */ - ath12k_dp_rx_desc_get_dot11_hdr(ab, rx_desc, hdr); + memcpy(msdu->data, &hdr, min(hdr_len, sizeof(hdr))); if (rxcb->is_mcbc) status->flag &= ~RX_FLAG_PN_VALIDATED; /* Add QOS header */ - if (ieee80211_is_data_qos(hdr->frame_control)) { - qos_ctl = rxcb->tid; - if (ath12k_dp_rx_h_mesh_ctl_present(ab, rx_desc)) - qos_ctl |= IEEE80211_QOS_CTL_MESH_CONTROL_PRESENT; + if (ieee80211_is_data_qos(hdr.frame_control)) { + struct ieee80211_hdr *qos_ptr = (struct ieee80211_hdr *)msdu->data; - /* TODO: Add other QoS ctl fields when required */ - memcpy(msdu->data + (hdr_len - IEEE80211_QOS_CTL_LEN), - &qos_ctl, IEEE80211_QOS_CTL_LEN); + qos_ctl = cpu_to_le16(rxcb->tid & IEEE80211_QOS_CTL_TID_MASK); + if (mesh_ctrl) + qos_ctl |= cpu_to_le16(IEEE80211_QOS_CTL_MESH_CONTROL_PRESENT); + + memcpy(ieee80211_get_qos_ctl(qos_ptr), &qos_ctl, IEEE80211_QOS_CTL_LEN); } } @@ -3693,6 +3685,15 @@ static bool ath12k_dp_rx_h_tkip_mic_err(struct ath12k *ar, struct sk_buff *msdu, l3pad_bytes = ath12k_dp_rx_h_l3pad(ab, desc); msdu_len = ath12k_dp_rx_h_msdu_len(ab, desc); + + if ((hal_rx_desc_sz + l3pad_bytes + msdu_len) > DP_RX_BUFFER_SIZE) { + ath12k_dbg(ab, ATH12K_DBG_DATA, + "invalid msdu len in tkip mic err %u\n", msdu_len); + ath12k_dbg_dump(ab, ATH12K_DBG_DATA, NULL, "", desc, + sizeof(*desc)); + return true; + } + skb_put(msdu, hal_rx_desc_sz + l3pad_bytes + msdu_len); skb_pull(msdu, hal_rx_desc_sz + l3pad_bytes); diff --git a/drivers/net/wireless/ath/ath12k/dp_rx.h b/drivers/net/wireless/ath/ath12k/dp_rx.h index eb1f92559179b..4232091d9e328 100644 --- a/drivers/net/wireless/ath/ath12k/dp_rx.h +++ b/drivers/net/wireless/ath/ath12k/dp_rx.h @@ -143,4 +143,7 @@ int ath12k_dp_htt_tlv_iter(struct ath12k_base *ab, const void *ptr, size_t len, int (*iter)(struct ath12k_base *ar, u16 tag, u16 len, const void *ptr, void *data), void *data); + +int ath12k_dp_rx_crypto_mic_len(struct ath12k *ar, enum hal_encrypt_type enctype); + #endif /* ATH12K_DP_RX_H */ diff --git a/drivers/net/wireless/ath/ath12k/dp_tx.c b/drivers/net/wireless/ath/ath12k/dp_tx.c index 201ffdb8c44ae..21e07b5cee570 100644 --- a/drivers/net/wireless/ath/ath12k/dp_tx.c +++ b/drivers/net/wireless/ath/ath12k/dp_tx.c @@ -227,7 +227,7 @@ int ath12k_dp_tx(struct ath12k *ar, struct ath12k_vif *arvif, struct ath12k_skb_cb *skb_cb = ATH12K_SKB_CB(skb); struct hal_tcl_data_cmd *hal_tcl_desc; struct hal_tx_msdu_ext_desc *msg; - struct sk_buff *skb_ext_desc; + struct sk_buff *skb_ext_desc = NULL; struct hal_srng *tcl_ring; struct ieee80211_hdr *hdr = (void *)skb->data; struct dp_tx_ring *tx_ring; @@ -397,17 +397,15 @@ int ath12k_dp_tx(struct ath12k *ar, struct ath12k_vif *arvif, if (ret < 0) { ath12k_dbg(ab, ATH12K_DBG_DP_TX, "Failed to add HTT meta data, dropping packet\n"); - goto fail_unmap_dma; + goto fail_free_ext_skb; } } ti.paddr = dma_map_single(ab->dev, skb_ext_desc->data, skb_ext_desc->len, DMA_TO_DEVICE); ret = dma_mapping_error(ab->dev, ti.paddr); - if (ret) { - kfree_skb(skb_ext_desc); - goto fail_unmap_dma; - } + if (ret) + goto fail_free_ext_skb; ti.data_len = skb_ext_desc->len; ti.type = HAL_TCL_DESC_TYPE_EXT_DESC; @@ -443,7 +441,7 @@ int ath12k_dp_tx(struct ath12k *ar, struct ath12k_vif *arvif, ring_selector++; } - goto fail_unmap_dma; + goto fail_unmap_dma_ext; } ath12k_hal_tx_cmd_desc_setup(ab, hal_tcl_desc, &ti); @@ -459,13 +457,16 @@ int ath12k_dp_tx(struct ath12k *ar, struct ath12k_vif *arvif, return 0; -fail_unmap_dma: - dma_unmap_single(ab->dev, ti.paddr, ti.data_len, DMA_TO_DEVICE); - +fail_unmap_dma_ext: if (skb_cb->paddr_ext_desc) dma_unmap_single(ab->dev, skb_cb->paddr_ext_desc, sizeof(struct hal_tx_msdu_ext_desc), DMA_TO_DEVICE); +fail_free_ext_skb: + kfree_skb(skb_ext_desc); + +fail_unmap_dma: + dma_unmap_single(ab->dev, ti.paddr, ti.data_len, DMA_TO_DEVICE); fail_remove_tx_buf: ath12k_dp_tx_release_txbuf(dp, tx_desc, pool_id); @@ -566,6 +567,7 @@ ath12k_dp_tx_process_htt_tx_complete(struct ath12k_base *ab, case HAL_WBM_REL_HTT_TX_COMP_STATUS_TTL: case HAL_WBM_REL_HTT_TX_COMP_STATUS_REINJ: case HAL_WBM_REL_HTT_TX_COMP_STATUS_INSPECT: + case HAL_WBM_REL_HTT_TX_COMP_STATUS_VDEVID_MISMATCH: ath12k_dp_tx_free_txbuf(ab, msdu, mac_id, tx_ring); break; case HAL_WBM_REL_HTT_TX_COMP_STATUS_MEC_NOTIFY: diff --git a/drivers/net/wireless/ath/ath12k/hal.c b/drivers/net/wireless/ath/ath12k/hal.c index ca04bfae8bdcc..3afb11c7bf18e 100644 --- a/drivers/net/wireless/ath/ath12k/hal.c +++ b/drivers/net/wireless/ath/ath12k/hal.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: BSD-3-Clause-Clear /* * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. - * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved. */ #include #include "hal_tx.h" @@ -449,8 +449,8 @@ static u8 *ath12k_hw_qcn9274_rx_desc_mpdu_start_addr2(struct hal_rx_desc *desc) static bool ath12k_hw_qcn9274_rx_desc_is_da_mcbc(struct hal_rx_desc *desc) { - return __le32_to_cpu(desc->u.qcn9274.mpdu_start.info6) & - RX_MPDU_START_INFO6_MCAST_BCAST; + return __le16_to_cpu(desc->u.qcn9274.msdu_end.info5) & + RX_MSDU_END_INFO5_DA_IS_MCBC; } static void ath12k_hw_qcn9274_rx_desc_get_dot11_hdr(struct hal_rx_desc *desc, @@ -511,11 +511,6 @@ static void ath12k_hw_qcn9274_rx_desc_get_crypto_hdr(struct hal_rx_desc *desc, crypto_hdr[7] = HAL_RX_MPDU_INFO_PN_GET_BYTE2(desc->u.qcn9274.mpdu_start.pn[1]); } -static u16 ath12k_hw_qcn9274_rx_desc_get_mpdu_frame_ctl(struct hal_rx_desc *desc) -{ - return __le16_to_cpu(desc->u.qcn9274.mpdu_start.frame_ctrl); -} - static int ath12k_hal_srng_create_config_qcn9274(struct ath12k_base *ab) { struct ath12k_hal *hal = &ab->hal; @@ -552,9 +547,9 @@ static int ath12k_hal_srng_create_config_qcn9274(struct ath12k_base *ab) s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_STATUS_HP; s = &hal->srng_config[HAL_TCL_DATA]; - s->reg_start[0] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL1_RING_BASE_LSB; + s->reg_start[0] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL1_RING_BASE_LSB(ab); s->reg_start[1] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL1_RING_HP; - s->reg_size[0] = HAL_TCL2_RING_BASE_LSB - HAL_TCL1_RING_BASE_LSB; + s->reg_size[0] = HAL_TCL2_RING_BASE_LSB(ab) - HAL_TCL1_RING_BASE_LSB(ab); s->reg_size[1] = HAL_TCL2_RING_HP - HAL_TCL1_RING_HP; s = &hal->srng_config[HAL_TCL_CMD]; @@ -566,29 +561,29 @@ static int ath12k_hal_srng_create_config_qcn9274(struct ath12k_base *ab) s->reg_start[1] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_STATUS_RING_HP; s = &hal->srng_config[HAL_CE_SRC]; - s->reg_start[0] = HAL_SEQ_WCSS_UMAC_CE0_SRC_REG + HAL_CE_DST_RING_BASE_LSB; - s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_SRC_REG + HAL_CE_DST_RING_HP; - s->reg_size[0] = HAL_SEQ_WCSS_UMAC_CE1_SRC_REG - - HAL_SEQ_WCSS_UMAC_CE0_SRC_REG; - s->reg_size[1] = HAL_SEQ_WCSS_UMAC_CE1_SRC_REG - - HAL_SEQ_WCSS_UMAC_CE0_SRC_REG; + s->reg_start[0] = HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(ab) + HAL_CE_DST_RING_BASE_LSB; + s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(ab) + HAL_CE_DST_RING_HP; + s->reg_size[0] = HAL_SEQ_WCSS_UMAC_CE1_SRC_REG(ab) - + HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(ab); + s->reg_size[1] = HAL_SEQ_WCSS_UMAC_CE1_SRC_REG(ab) - + HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(ab); s = &hal->srng_config[HAL_CE_DST]; - s->reg_start[0] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG + HAL_CE_DST_RING_BASE_LSB; - s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG + HAL_CE_DST_RING_HP; - s->reg_size[0] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG - - HAL_SEQ_WCSS_UMAC_CE0_DST_REG; - s->reg_size[1] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG - - HAL_SEQ_WCSS_UMAC_CE0_DST_REG; + s->reg_start[0] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab) + HAL_CE_DST_RING_BASE_LSB; + s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab) + HAL_CE_DST_RING_HP; + s->reg_size[0] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG(ab) - + HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab); + s->reg_size[1] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG(ab) - + HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab); s = &hal->srng_config[HAL_CE_DST_STATUS]; - s->reg_start[0] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG + + s->reg_start[0] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab) + HAL_CE_DST_STATUS_RING_BASE_LSB; - s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG + HAL_CE_DST_STATUS_RING_HP; - s->reg_size[0] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG - - HAL_SEQ_WCSS_UMAC_CE0_DST_REG; - s->reg_size[1] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG - - HAL_SEQ_WCSS_UMAC_CE0_DST_REG; + s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab) + HAL_CE_DST_STATUS_RING_HP; + s->reg_size[0] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG(ab) - + HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab); + s->reg_size[1] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG(ab) - + HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab); s = &hal->srng_config[HAL_WBM_IDLE_LINK]; s->reg_start[0] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_IDLE_LINK_RING_BASE_LSB(ab); @@ -736,7 +731,6 @@ const struct hal_rx_ops hal_rx_qcn9274_ops = { .rx_desc_is_da_mcbc = ath12k_hw_qcn9274_rx_desc_is_da_mcbc, .rx_desc_get_dot11_hdr = ath12k_hw_qcn9274_rx_desc_get_dot11_hdr, .rx_desc_get_crypto_header = ath12k_hw_qcn9274_rx_desc_get_crypto_hdr, - .rx_desc_get_mpdu_frame_ctl = ath12k_hw_qcn9274_rx_desc_get_mpdu_frame_ctl, .dp_rx_h_msdu_done = ath12k_hw_qcn9274_dp_rx_h_msdu_done, .dp_rx_h_l4_cksum_fail = ath12k_hw_qcn9274_dp_rx_h_l4_cksum_fail, .dp_rx_h_ip_cksum_fail = ath12k_hw_qcn9274_dp_rx_h_ip_cksum_fail, @@ -908,8 +902,8 @@ static u8 *ath12k_hw_qcn9274_compact_rx_desc_mpdu_start_addr2(struct hal_rx_desc static bool ath12k_hw_qcn9274_compact_rx_desc_is_da_mcbc(struct hal_rx_desc *desc) { - return __le32_to_cpu(desc->u.qcn9274_compact.mpdu_start.info6) & - RX_MPDU_START_INFO6_MCAST_BCAST; + return __le16_to_cpu(desc->u.qcn9274_compact.msdu_end.info5) & + RX_MSDU_END_INFO5_DA_IS_MCBC; } static void ath12k_hw_qcn9274_compact_rx_desc_get_dot11_hdr(struct hal_rx_desc *desc, @@ -975,11 +969,6 @@ ath12k_hw_qcn9274_compact_rx_desc_get_crypto_hdr(struct hal_rx_desc *desc, HAL_RX_MPDU_INFO_PN_GET_BYTE2(desc->u.qcn9274_compact.mpdu_start.pn[1]); } -static u16 ath12k_hw_qcn9274_compact_rx_desc_get_mpdu_frame_ctl(struct hal_rx_desc *desc) -{ - return __le16_to_cpu(desc->u.qcn9274_compact.mpdu_start.frame_ctrl); -} - static bool ath12k_hw_qcn9274_compact_dp_rx_h_msdu_done(struct hal_rx_desc *desc) { return !!le32_get_bits(desc->u.qcn9274_compact.msdu_end.info14, @@ -1080,8 +1069,6 @@ const struct hal_rx_ops hal_rx_qcn9274_compact_ops = { .rx_desc_is_da_mcbc = ath12k_hw_qcn9274_compact_rx_desc_is_da_mcbc, .rx_desc_get_dot11_hdr = ath12k_hw_qcn9274_compact_rx_desc_get_dot11_hdr, .rx_desc_get_crypto_header = ath12k_hw_qcn9274_compact_rx_desc_get_crypto_hdr, - .rx_desc_get_mpdu_frame_ctl = - ath12k_hw_qcn9274_compact_rx_desc_get_mpdu_frame_ctl, .dp_rx_h_msdu_done = ath12k_hw_qcn9274_compact_dp_rx_h_msdu_done, .dp_rx_h_l4_cksum_fail = ath12k_hw_qcn9274_compact_dp_rx_h_l4_cksum_fail, .dp_rx_h_ip_cksum_fail = ath12k_hw_qcn9274_compact_dp_rx_h_ip_cksum_fail, @@ -1330,11 +1317,6 @@ static void ath12k_hw_wcn7850_rx_desc_get_crypto_hdr(struct hal_rx_desc *desc, crypto_hdr[7] = HAL_RX_MPDU_INFO_PN_GET_BYTE2(desc->u.wcn7850.mpdu_start.pn[1]); } -static u16 ath12k_hw_wcn7850_rx_desc_get_mpdu_frame_ctl(struct hal_rx_desc *desc) -{ - return __le16_to_cpu(desc->u.wcn7850.mpdu_start.frame_ctrl); -} - static int ath12k_hal_srng_create_config_wcn7850(struct ath12k_base *ab) { struct ath12k_hal *hal = &ab->hal; @@ -1371,9 +1353,9 @@ static int ath12k_hal_srng_create_config_wcn7850(struct ath12k_base *ab) s = &hal->srng_config[HAL_TCL_DATA]; s->max_rings = 5; - s->reg_start[0] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL1_RING_BASE_LSB; + s->reg_start[0] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL1_RING_BASE_LSB(ab); s->reg_start[1] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL1_RING_HP; - s->reg_size[0] = HAL_TCL2_RING_BASE_LSB - HAL_TCL1_RING_BASE_LSB; + s->reg_size[0] = HAL_TCL2_RING_BASE_LSB(ab) - HAL_TCL1_RING_BASE_LSB(ab); s->reg_size[1] = HAL_TCL2_RING_HP - HAL_TCL1_RING_HP; s = &hal->srng_config[HAL_TCL_CMD]; @@ -1386,31 +1368,31 @@ static int ath12k_hal_srng_create_config_wcn7850(struct ath12k_base *ab) s = &hal->srng_config[HAL_CE_SRC]; s->max_rings = 12; - s->reg_start[0] = HAL_SEQ_WCSS_UMAC_CE0_SRC_REG + HAL_CE_DST_RING_BASE_LSB; - s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_SRC_REG + HAL_CE_DST_RING_HP; - s->reg_size[0] = HAL_SEQ_WCSS_UMAC_CE1_SRC_REG - - HAL_SEQ_WCSS_UMAC_CE0_SRC_REG; - s->reg_size[1] = HAL_SEQ_WCSS_UMAC_CE1_SRC_REG - - HAL_SEQ_WCSS_UMAC_CE0_SRC_REG; + s->reg_start[0] = HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(ab) + HAL_CE_DST_RING_BASE_LSB; + s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(ab) + HAL_CE_DST_RING_HP; + s->reg_size[0] = HAL_SEQ_WCSS_UMAC_CE1_SRC_REG(ab) - + HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(ab); + s->reg_size[1] = HAL_SEQ_WCSS_UMAC_CE1_SRC_REG(ab) - + HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(ab); s = &hal->srng_config[HAL_CE_DST]; s->max_rings = 12; - s->reg_start[0] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG + HAL_CE_DST_RING_BASE_LSB; - s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG + HAL_CE_DST_RING_HP; - s->reg_size[0] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG - - HAL_SEQ_WCSS_UMAC_CE0_DST_REG; - s->reg_size[1] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG - - HAL_SEQ_WCSS_UMAC_CE0_DST_REG; + s->reg_start[0] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab) + HAL_CE_DST_RING_BASE_LSB; + s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab) + HAL_CE_DST_RING_HP; + s->reg_size[0] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG(ab) - + HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab); + s->reg_size[1] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG(ab) - + HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab); s = &hal->srng_config[HAL_CE_DST_STATUS]; s->max_rings = 12; - s->reg_start[0] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG + + s->reg_start[0] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab) + HAL_CE_DST_STATUS_RING_BASE_LSB; - s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG + HAL_CE_DST_STATUS_RING_HP; - s->reg_size[0] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG - - HAL_SEQ_WCSS_UMAC_CE0_DST_REG; - s->reg_size[1] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG - - HAL_SEQ_WCSS_UMAC_CE0_DST_REG; + s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab) + HAL_CE_DST_STATUS_RING_HP; + s->reg_size[0] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG(ab) - + HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab); + s->reg_size[1] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG(ab) - + HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab); s = &hal->srng_config[HAL_WBM_IDLE_LINK]; s->reg_start[0] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_IDLE_LINK_RING_BASE_LSB(ab); @@ -1555,7 +1537,6 @@ const struct hal_rx_ops hal_rx_wcn7850_ops = { .rx_desc_is_da_mcbc = ath12k_hw_wcn7850_rx_desc_is_da_mcbc, .rx_desc_get_dot11_hdr = ath12k_hw_wcn7850_rx_desc_get_dot11_hdr, .rx_desc_get_crypto_header = ath12k_hw_wcn7850_rx_desc_get_crypto_hdr, - .rx_desc_get_mpdu_frame_ctl = ath12k_hw_wcn7850_rx_desc_get_mpdu_frame_ctl, .dp_rx_h_msdu_done = ath12k_hw_wcn7850_dp_rx_h_msdu_done, .dp_rx_h_l4_cksum_fail = ath12k_hw_wcn7850_dp_rx_h_l4_cksum_fail, .dp_rx_h_ip_cksum_fail = ath12k_hw_wcn7850_dp_rx_h_ip_cksum_fail, @@ -1756,7 +1737,7 @@ static void ath12k_hal_srng_src_hw_init(struct ath12k_base *ab, HAL_TCL1_RING_BASE_MSB_RING_BASE_ADDR_MSB) | u32_encode_bits((srng->entry_size * srng->num_entries), HAL_TCL1_RING_BASE_MSB_RING_SIZE); - ath12k_hif_write32(ab, reg_base + HAL_TCL1_RING_BASE_MSB_OFFSET, val); + ath12k_hif_write32(ab, reg_base + HAL_TCL1_RING_BASE_MSB_OFFSET(ab), val); val = u32_encode_bits(srng->entry_size, HAL_REO1_RING_ID_ENTRY_SIZE); ath12k_hif_write32(ab, reg_base + HAL_TCL1_RING_ID_OFFSET(ab), val); @@ -1962,7 +1943,7 @@ u32 ath12k_hal_ce_dst_status_get_length(struct hal_ce_srng_dst_status_desc *desc { u32 len; - len = le32_get_bits(desc->flags, HAL_CE_DST_STATUS_DESC_FLAGS_LEN); + len = le32_get_bits(READ_ONCE(desc->flags), HAL_CE_DST_STATUS_DESC_FLAGS_LEN); desc->flags &= ~cpu_to_le32(HAL_CE_DST_STATUS_DESC_FLAGS_LEN); return len; @@ -2132,7 +2113,7 @@ void ath12k_hal_srng_access_begin(struct ath12k_base *ab, struct hal_srng *srng) srng->u.src_ring.cached_tp = *(volatile u32 *)srng->u.src_ring.tp_addr; else - srng->u.dst_ring.cached_hp = *srng->u.dst_ring.hp_addr; + srng->u.dst_ring.cached_hp = READ_ONCE(*srng->u.dst_ring.hp_addr); } /* Update cached ring head/tail pointers to HW. ath12k_hal_srng_access_begin() diff --git a/drivers/net/wireless/ath/ath12k/hal.h b/drivers/net/wireless/ath/ath12k/hal.h index 8a78bb9a10bc1..fb7ec6fce07d3 100644 --- a/drivers/net/wireless/ath/ath12k/hal.h +++ b/drivers/net/wireless/ath/ath12k/hal.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: BSD-3-Clause-Clear */ /* * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. - * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved. */ #ifndef ATH12K_HAL_H @@ -44,10 +44,14 @@ struct ath12k_base; #define HAL_SEQ_WCSS_UMAC_OFFSET 0x00a00000 #define HAL_SEQ_WCSS_UMAC_REO_REG 0x00a38000 #define HAL_SEQ_WCSS_UMAC_TCL_REG 0x00a44000 -#define HAL_SEQ_WCSS_UMAC_CE0_SRC_REG 0x01b80000 -#define HAL_SEQ_WCSS_UMAC_CE0_DST_REG 0x01b81000 -#define HAL_SEQ_WCSS_UMAC_CE1_SRC_REG 0x01b82000 -#define HAL_SEQ_WCSS_UMAC_CE1_DST_REG 0x01b83000 +#define HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(ab) \ + ((ab)->hw_params->regs->hal_umac_ce0_src_reg_base) +#define HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab) \ + ((ab)->hw_params->regs->hal_umac_ce0_dest_reg_base) +#define HAL_SEQ_WCSS_UMAC_CE1_SRC_REG(ab) \ + ((ab)->hw_params->regs->hal_umac_ce1_src_reg_base) +#define HAL_SEQ_WCSS_UMAC_CE1_DST_REG(ab) \ + ((ab)->hw_params->regs->hal_umac_ce1_dest_reg_base) #define HAL_SEQ_WCSS_UMAC_WBM_REG 0x00a34000 #define HAL_CE_WFSS_CE_REG_BASE 0x01b80000 @@ -57,8 +61,10 @@ struct ath12k_base; /* SW2TCL(x) R0 ring configuration address */ #define HAL_TCL1_RING_CMN_CTRL_REG 0x00000020 #define HAL_TCL1_RING_DSCP_TID_MAP 0x00000240 -#define HAL_TCL1_RING_BASE_LSB 0x00000900 -#define HAL_TCL1_RING_BASE_MSB 0x00000904 +#define HAL_TCL1_RING_BASE_LSB(ab) \ + ((ab)->hw_params->regs->hal_tcl1_ring_base_lsb) +#define HAL_TCL1_RING_BASE_MSB(ab) \ + ((ab)->hw_params->regs->hal_tcl1_ring_base_msb) #define HAL_TCL1_RING_ID(ab) ((ab)->hw_params->regs->hal_tcl1_ring_id) #define HAL_TCL1_RING_MISC(ab) \ ((ab)->hw_params->regs->hal_tcl1_ring_misc) @@ -76,30 +82,31 @@ struct ath12k_base; ((ab)->hw_params->regs->hal_tcl1_ring_msi1_base_msb) #define HAL_TCL1_RING_MSI1_DATA(ab) \ ((ab)->hw_params->regs->hal_tcl1_ring_msi1_data) -#define HAL_TCL2_RING_BASE_LSB 0x00000978 +#define HAL_TCL2_RING_BASE_LSB(ab) \ + ((ab)->hw_params->regs->hal_tcl2_ring_base_lsb) #define HAL_TCL_RING_BASE_LSB(ab) \ ((ab)->hw_params->regs->hal_tcl_ring_base_lsb) -#define HAL_TCL1_RING_MSI1_BASE_LSB_OFFSET(ab) \ - (HAL_TCL1_RING_MSI1_BASE_LSB(ab) - HAL_TCL1_RING_BASE_LSB) -#define HAL_TCL1_RING_MSI1_BASE_MSB_OFFSET(ab) \ - (HAL_TCL1_RING_MSI1_BASE_MSB(ab) - HAL_TCL1_RING_BASE_LSB) -#define HAL_TCL1_RING_MSI1_DATA_OFFSET(ab) \ - (HAL_TCL1_RING_MSI1_DATA(ab) - HAL_TCL1_RING_BASE_LSB) -#define HAL_TCL1_RING_BASE_MSB_OFFSET \ - (HAL_TCL1_RING_BASE_MSB - HAL_TCL1_RING_BASE_LSB) -#define HAL_TCL1_RING_ID_OFFSET(ab) \ - (HAL_TCL1_RING_ID(ab) - HAL_TCL1_RING_BASE_LSB) -#define HAL_TCL1_RING_CONSR_INT_SETUP_IX0_OFFSET(ab) \ - (HAL_TCL1_RING_CONSUMER_INT_SETUP_IX0(ab) - HAL_TCL1_RING_BASE_LSB) -#define HAL_TCL1_RING_CONSR_INT_SETUP_IX1_OFFSET(ab) \ - (HAL_TCL1_RING_CONSUMER_INT_SETUP_IX1(ab) - HAL_TCL1_RING_BASE_LSB) -#define HAL_TCL1_RING_TP_ADDR_LSB_OFFSET(ab) \ - (HAL_TCL1_RING_TP_ADDR_LSB(ab) - HAL_TCL1_RING_BASE_LSB) -#define HAL_TCL1_RING_TP_ADDR_MSB_OFFSET(ab) \ - (HAL_TCL1_RING_TP_ADDR_MSB(ab) - HAL_TCL1_RING_BASE_LSB) -#define HAL_TCL1_RING_MISC_OFFSET(ab) \ - (HAL_TCL1_RING_MISC(ab) - HAL_TCL1_RING_BASE_LSB) +#define HAL_TCL1_RING_MSI1_BASE_LSB_OFFSET(ab) ({ typeof(ab) _ab = (ab); \ + (HAL_TCL1_RING_MSI1_BASE_LSB(_ab) - HAL_TCL1_RING_BASE_LSB(_ab)); }) +#define HAL_TCL1_RING_MSI1_BASE_MSB_OFFSET(ab) ({ typeof(ab) _ab = (ab); \ + (HAL_TCL1_RING_MSI1_BASE_MSB(_ab) - HAL_TCL1_RING_BASE_LSB(_ab)); }) +#define HAL_TCL1_RING_MSI1_DATA_OFFSET(ab) ({ typeof(ab) _ab = (ab); \ + (HAL_TCL1_RING_MSI1_DATA(_ab) - HAL_TCL1_RING_BASE_LSB(_ab)); }) +#define HAL_TCL1_RING_BASE_MSB_OFFSET(ab) ({ typeof(ab) _ab = (ab); \ + (HAL_TCL1_RING_BASE_MSB(_ab) - HAL_TCL1_RING_BASE_LSB(_ab)); }) +#define HAL_TCL1_RING_ID_OFFSET(ab) ({ typeof(ab) _ab = (ab); \ + (HAL_TCL1_RING_ID(_ab) - HAL_TCL1_RING_BASE_LSB(_ab)); }) +#define HAL_TCL1_RING_CONSR_INT_SETUP_IX0_OFFSET(ab) ({ typeof(ab) _ab = (ab); \ + (HAL_TCL1_RING_CONSUMER_INT_SETUP_IX0(_ab) - HAL_TCL1_RING_BASE_LSB(_ab)); }) +#define HAL_TCL1_RING_CONSR_INT_SETUP_IX1_OFFSET(ab) ({ typeof(ab) _ab = (ab); \ + (HAL_TCL1_RING_CONSUMER_INT_SETUP_IX1(_ab) - HAL_TCL1_RING_BASE_LSB(_ab)); }) +#define HAL_TCL1_RING_TP_ADDR_LSB_OFFSET(ab) ({ typeof(ab) _ab = (ab); \ + (HAL_TCL1_RING_TP_ADDR_LSB(_ab) - HAL_TCL1_RING_BASE_LSB(_ab)); }) +#define HAL_TCL1_RING_TP_ADDR_MSB_OFFSET(ab) ({ typeof(ab) _ab = (ab); \ + (HAL_TCL1_RING_TP_ADDR_MSB(_ab) - HAL_TCL1_RING_BASE_LSB(_ab)); }) +#define HAL_TCL1_RING_MISC_OFFSET(ab) ({ typeof(ab) _ab = (ab); \ + (HAL_TCL1_RING_MISC(_ab) - HAL_TCL1_RING_BASE_LSB(_ab)); }) /* SW2TCL(x) R2 ring pointers (head/tail) address */ #define HAL_TCL1_RING_HP 0x00002000 @@ -1068,7 +1075,6 @@ struct hal_rx_ops { bool (*rx_desc_is_da_mcbc)(struct hal_rx_desc *desc); void (*rx_desc_get_dot11_hdr)(struct hal_rx_desc *desc, struct ieee80211_hdr *hdr); - u16 (*rx_desc_get_mpdu_frame_ctl)(struct hal_rx_desc *desc); void (*rx_desc_get_crypto_header)(struct hal_rx_desc *desc, u8 *crypto_hdr, enum hal_encrypt_type enctype); diff --git a/drivers/net/wireless/ath/ath12k/hal_desc.h b/drivers/net/wireless/ath/ath12k/hal_desc.h index 4f745cfd7d8e7..8cbe28950d0c0 100644 --- a/drivers/net/wireless/ath/ath12k/hal_desc.h +++ b/drivers/net/wireless/ath/ath12k/hal_desc.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: BSD-3-Clause-Clear */ /* * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. - * Copyright (c) 2021-2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2021-2022, 2024-2025 Qualcomm Innovation Center, Inc. All rights reserved. */ #include "core.h" @@ -705,7 +705,7 @@ enum hal_rx_msdu_desc_reo_dest_ind { #define RX_MSDU_DESC_INFO0_DECAP_FORMAT GENMASK(30, 29) #define HAL_RX_MSDU_PKT_LENGTH_GET(val) \ - (u32_get_bits((val), RX_MSDU_DESC_INFO0_MSDU_LENGTH)) + (le32_get_bits((val), RX_MSDU_DESC_INFO0_MSDU_LENGTH)) struct rx_msdu_desc { __le32 info0; @@ -1296,6 +1296,7 @@ enum hal_wbm_htt_tx_comp_status { HAL_WBM_REL_HTT_TX_COMP_STATUS_REINJ, HAL_WBM_REL_HTT_TX_COMP_STATUS_INSPECT, HAL_WBM_REL_HTT_TX_COMP_STATUS_MEC_NOTIFY, + HAL_WBM_REL_HTT_TX_COMP_STATUS_VDEVID_MISMATCH, HAL_WBM_REL_HTT_TX_COMP_STATUS_MAX, }; diff --git a/drivers/net/wireless/ath/ath12k/hw.c b/drivers/net/wireless/ath/ath12k/hw.c index ec1bda95e555d..e3eb22bb9e1cb 100644 --- a/drivers/net/wireless/ath/ath12k/hw.c +++ b/drivers/net/wireless/ath/ath12k/hw.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: BSD-3-Clause-Clear /* * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. - * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved. */ #include @@ -615,6 +615,9 @@ static const struct ath12k_hw_regs qcn9274_v1_regs = { .hal_tcl1_ring_msi1_base_msb = 0x0000094c, .hal_tcl1_ring_msi1_data = 0x00000950, .hal_tcl_ring_base_lsb = 0x00000b58, + .hal_tcl1_ring_base_lsb = 0x00000900, + .hal_tcl1_ring_base_msb = 0x00000904, + .hal_tcl2_ring_base_lsb = 0x00000978, /* TCL STATUS ring address */ .hal_tcl_status_ring_base_lsb = 0x00000d38, @@ -677,6 +680,14 @@ static const struct ath12k_hw_regs qcn9274_v1_regs = { /* REO status ring address */ .hal_reo_status_ring_base = 0x00000a84, + + /* CE base address */ + .hal_umac_ce0_src_reg_base = 0x01b80000, + .hal_umac_ce0_dest_reg_base = 0x01b81000, + .hal_umac_ce1_src_reg_base = 0x01b82000, + .hal_umac_ce1_dest_reg_base = 0x01b83000, + + .gcc_gcc_pcie_hot_rst = 0x1e38338, }; static const struct ath12k_hw_regs qcn9274_v2_regs = { @@ -691,6 +702,9 @@ static const struct ath12k_hw_regs qcn9274_v2_regs = { .hal_tcl1_ring_msi1_base_msb = 0x0000094c, .hal_tcl1_ring_msi1_data = 0x00000950, .hal_tcl_ring_base_lsb = 0x00000b58, + .hal_tcl1_ring_base_lsb = 0x00000900, + .hal_tcl1_ring_base_msb = 0x00000904, + .hal_tcl2_ring_base_lsb = 0x00000978, /* TCL STATUS ring address */ .hal_tcl_status_ring_base_lsb = 0x00000d38, @@ -757,6 +771,14 @@ static const struct ath12k_hw_regs qcn9274_v2_regs = { /* REO status ring address */ .hal_reo_status_ring_base = 0x00000aa0, + + /* CE base address */ + .hal_umac_ce0_src_reg_base = 0x01b80000, + .hal_umac_ce0_dest_reg_base = 0x01b81000, + .hal_umac_ce1_src_reg_base = 0x01b82000, + .hal_umac_ce1_dest_reg_base = 0x01b83000, + + .gcc_gcc_pcie_hot_rst = 0x1e38338, }; static const struct ath12k_hw_regs wcn7850_regs = { @@ -771,6 +793,9 @@ static const struct ath12k_hw_regs wcn7850_regs = { .hal_tcl1_ring_msi1_base_msb = 0x0000094c, .hal_tcl1_ring_msi1_data = 0x00000950, .hal_tcl_ring_base_lsb = 0x00000b58, + .hal_tcl1_ring_base_lsb = 0x00000900, + .hal_tcl1_ring_base_msb = 0x00000904, + .hal_tcl2_ring_base_lsb = 0x00000978, /* TCL STATUS ring address */ .hal_tcl_status_ring_base_lsb = 0x00000d38, @@ -833,6 +858,14 @@ static const struct ath12k_hw_regs wcn7850_regs = { /* REO status ring address */ .hal_reo_status_ring_base = 0x00000a84, + + /* CE base address */ + .hal_umac_ce0_src_reg_base = 0x01b80000, + .hal_umac_ce0_dest_reg_base = 0x01b81000, + .hal_umac_ce1_src_reg_base = 0x01b82000, + .hal_umac_ce1_dest_reg_base = 0x01b83000, + + .gcc_gcc_pcie_hot_rst = 0x1e40304, }; static const struct ath12k_hw_hal_params ath12k_hw_hal_params_qcn9274 = { diff --git a/drivers/net/wireless/ath/ath12k/hw.h b/drivers/net/wireless/ath/ath12k/hw.h index 8d52182e28aef..862b11325a902 100644 --- a/drivers/net/wireless/ath/ath12k/hw.h +++ b/drivers/net/wireless/ath/ath12k/hw.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: BSD-3-Clause-Clear */ /* * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. - * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved. */ #ifndef ATH12K_HW_H @@ -293,6 +293,9 @@ struct ath12k_hw_regs { u32 hal_tcl1_ring_msi1_base_msb; u32 hal_tcl1_ring_msi1_data; u32 hal_tcl_ring_base_lsb; + u32 hal_tcl1_ring_base_lsb; + u32 hal_tcl1_ring_base_msb; + u32 hal_tcl2_ring_base_lsb; u32 hal_tcl_status_ring_base_lsb; @@ -316,6 +319,11 @@ struct ath12k_hw_regs { u32 pcie_qserdes_sysclk_en_sel; u32 pcie_pcs_osc_dtct_config_base; + u32 hal_umac_ce0_src_reg_base; + u32 hal_umac_ce0_dest_reg_base; + u32 hal_umac_ce1_src_reg_base; + u32 hal_umac_ce1_dest_reg_base; + u32 hal_ppe_rel_ring_base; u32 hal_reo2_ring_base; @@ -347,6 +355,8 @@ struct ath12k_hw_regs { u32 hal_reo_cmd_ring_base; u32 hal_reo_status_ring_base; + + u32 gcc_gcc_pcie_hot_rst; }; static inline const char *ath12k_bd_ie_type_str(enum ath12k_bd_ie_type type) diff --git a/drivers/net/wireless/ath/ath12k/mac.c b/drivers/net/wireless/ath/ath12k/mac.c index fbf5d57283576..4ca684278c367 100644 --- a/drivers/net/wireless/ath/ath12k/mac.c +++ b/drivers/net/wireless/ath/ath12k/mac.c @@ -3864,8 +3864,8 @@ static int ath12k_install_key(struct ath12k_vif *arvif, switch (key->cipher) { case WLAN_CIPHER_SUITE_CCMP: + case WLAN_CIPHER_SUITE_CCMP_256: arg.key_cipher = WMI_CIPHER_AES_CCM; - /* TODO: Re-check if flag is valid */ key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV_MGMT; break; case WLAN_CIPHER_SUITE_TKIP: @@ -3873,12 +3873,10 @@ static int ath12k_install_key(struct ath12k_vif *arvif, arg.key_txmic_len = 8; arg.key_rxmic_len = 8; break; - case WLAN_CIPHER_SUITE_CCMP_256: - arg.key_cipher = WMI_CIPHER_AES_CCM; - break; case WLAN_CIPHER_SUITE_GCMP: case WLAN_CIPHER_SUITE_GCMP_256: arg.key_cipher = WMI_CIPHER_AES_GCM; + key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV_MGMT; break; default: ath12k_warn(ar->ab, "cipher %d is not supported\n", key->cipher); @@ -5725,6 +5723,8 @@ static int ath12k_mac_mgmt_tx_wmi(struct ath12k *ar, struct ath12k_vif *arvif, struct ath12k_base *ab = ar->ab; struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; struct ieee80211_tx_info *info; + enum hal_encrypt_type enctype; + unsigned int mic_len; dma_addr_t paddr; int buf_id; int ret; @@ -5738,12 +5738,16 @@ static int ath12k_mac_mgmt_tx_wmi(struct ath12k *ar, struct ath12k_vif *arvif, return -ENOSPC; info = IEEE80211_SKB_CB(skb); - if (!(info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP)) { + if ((ATH12K_SKB_CB(skb)->flags & ATH12K_SKB_CIPHER_SET) && + !(info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP)) { if ((ieee80211_is_action(hdr->frame_control) || ieee80211_is_deauth(hdr->frame_control) || ieee80211_is_disassoc(hdr->frame_control)) && ieee80211_has_protected(hdr->frame_control)) { - skb_put(skb, IEEE80211_CCMP_MIC_LEN); + enctype = + ath12k_dp_tx_get_encrypt_type(ATH12K_SKB_CB(skb)->cipher); + mic_len = ath12k_dp_rx_crypto_mic_len(ar, enctype); + skb_put(skb, mic_len); } } diff --git a/drivers/net/wireless/ath/ath12k/pci.c b/drivers/net/wireless/ath/ath12k/pci.c index 45d537066345a..0ac92a606cea0 100644 --- a/drivers/net/wireless/ath/ath12k/pci.c +++ b/drivers/net/wireless/ath/ath12k/pci.c @@ -290,10 +290,10 @@ static void ath12k_pci_enable_ltssm(struct ath12k_base *ab) ath12k_dbg(ab, ATH12K_DBG_PCI, "pci ltssm 0x%x\n", val); - val = ath12k_pci_read32(ab, GCC_GCC_PCIE_HOT_RST); + val = ath12k_pci_read32(ab, GCC_GCC_PCIE_HOT_RST(ab)); val |= GCC_GCC_PCIE_HOT_RST_VAL; - ath12k_pci_write32(ab, GCC_GCC_PCIE_HOT_RST, val); - val = ath12k_pci_read32(ab, GCC_GCC_PCIE_HOT_RST); + ath12k_pci_write32(ab, GCC_GCC_PCIE_HOT_RST(ab), val); + val = ath12k_pci_read32(ab, GCC_GCC_PCIE_HOT_RST(ab)); ath12k_dbg(ab, ATH12K_DBG_PCI, "pci pcie_hot_rst 0x%x\n", val); @@ -1301,6 +1301,9 @@ void ath12k_pci_power_down(struct ath12k_base *ab, bool is_suspend) { struct ath12k_pci *ab_pci = ath12k_pci_priv(ab); + if (!test_bit(ATH12K_PCI_FLAG_INIT_DONE, &ab_pci->flags)) + return; + /* restore aspm in case firmware bootup fails */ ath12k_pci_aspm_restore(ab_pci); @@ -1503,6 +1506,8 @@ static int ath12k_pci_probe(struct pci_dev *pdev, return 0; err_free_irq: + /* __free_irq() expects the caller to have cleared the affinity hint */ + ath12k_pci_set_irq_affinity_hint(ab_pci, NULL); ath12k_pci_free_irq(ab); err_ce_free: @@ -1514,12 +1519,12 @@ static int ath12k_pci_probe(struct pci_dev *pdev, err_mhi_unregister: ath12k_mhi_unregister(ab_pci); -err_pci_msi_free: - ath12k_pci_msi_free(ab_pci); - err_irq_affinity_cleanup: ath12k_pci_set_irq_affinity_hint(ab_pci, NULL); +err_pci_msi_free: + ath12k_pci_msi_free(ab_pci); + err_pci_free_region: ath12k_pci_free_region(ab_pci); diff --git a/drivers/net/wireless/ath/ath12k/pci.h b/drivers/net/wireless/ath/ath12k/pci.h index 31584a7ad80eb..9321674eef8b8 100644 --- a/drivers/net/wireless/ath/ath12k/pci.h +++ b/drivers/net/wireless/ath/ath12k/pci.h @@ -28,7 +28,9 @@ #define PCIE_PCIE_PARF_LTSSM 0x1e081b0 #define PARM_LTSSM_VALUE 0x111 -#define GCC_GCC_PCIE_HOT_RST 0x1e38338 +#define GCC_GCC_PCIE_HOT_RST(ab) \ + ((ab)->hw_params->regs->gcc_gcc_pcie_hot_rst) + #define GCC_GCC_PCIE_HOT_RST_VAL 0x10 #define PCIE_PCIE_INT_ALL_CLEAR 0x1e08228 diff --git a/drivers/net/wireless/ath/ath12k/wmi.c b/drivers/net/wireless/ath/ath12k/wmi.c index 30836a09d5506..5c2130f77dac6 100644 --- a/drivers/net/wireless/ath/ath12k/wmi.c +++ b/drivers/net/wireless/ath/ath12k/wmi.c @@ -980,14 +980,24 @@ int ath12k_wmi_vdev_down(struct ath12k *ar, u8 vdev_id) static void ath12k_wmi_put_wmi_channel(struct ath12k_wmi_channel_params *chan, struct wmi_vdev_start_req_arg *arg) { + u32 center_freq1 = arg->band_center_freq1; + memset(chan, 0, sizeof(*chan)); chan->mhz = cpu_to_le32(arg->freq); - chan->band_center_freq1 = cpu_to_le32(arg->band_center_freq1); - if (arg->mode == MODE_11AC_VHT80_80) + chan->band_center_freq1 = cpu_to_le32(center_freq1); + if (arg->mode == MODE_11BE_EHT160) { + if (arg->freq > center_freq1) + chan->band_center_freq1 = cpu_to_le32(center_freq1 + 40); + else + chan->band_center_freq1 = cpu_to_le32(center_freq1 - 40); + + chan->band_center_freq2 = cpu_to_le32(center_freq1); + } else if (arg->mode == MODE_11BE_EHT80_80) { chan->band_center_freq2 = cpu_to_le32(arg->band_center_freq2); - else + } else { chan->band_center_freq2 = 0; + } chan->info |= le32_encode_bits(arg->mode, WMI_CHAN_INFO_MODE); if (arg->passive) @@ -2157,7 +2167,7 @@ int ath12k_wmi_send_peer_assoc_cmd(struct ath12k *ar, for (i = 0; i < arg->peer_eht_mcs_count; i++) { eht_mcs = ptr; - eht_mcs->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_HE_RATE_SET, + eht_mcs->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_EHT_RATE_SET, sizeof(*eht_mcs)); eht_mcs->rx_mcs_set = cpu_to_le32(arg->peer_eht_rx_mcs_set[i]); @@ -4372,6 +4382,7 @@ static int ath12k_service_ready_ext_event(struct ath12k_base *ab, return 0; err: + kfree(svc_rdy_ext.mac_phy_caps); ath12k_wmi_free_dbring_caps(ab); return ret; } @@ -5783,7 +5794,7 @@ static int ath12k_reg_chan_list_event(struct ath12k_base *ab, struct sk_buff *sk goto fallback; } - spin_lock(&ab->base_lock); + spin_lock_bh(&ab->base_lock); if (test_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags)) { /* Once mac is registered, ar is valid and all CC events from * fw is considered to be received due to user requests @@ -5807,7 +5818,7 @@ static int ath12k_reg_chan_list_event(struct ath12k_base *ab, struct sk_buff *sk ab->default_regd[pdev_idx] = regd; } ab->dfs_region = reg_info->dfs_region; - spin_unlock(&ab->base_lock); + spin_unlock_bh(&ab->base_lock); goto mem_free; diff --git a/drivers/net/wireless/ath/ath6kl/bmi.c b/drivers/net/wireless/ath/ath6kl/bmi.c index af98e871199d3..5a9e93fd1ef42 100644 --- a/drivers/net/wireless/ath/ath6kl/bmi.c +++ b/drivers/net/wireless/ath/ath6kl/bmi.c @@ -87,7 +87,9 @@ int ath6kl_bmi_get_target_info(struct ath6kl *ar, * We need to do some backwards compatibility to make this work. */ if (le32_to_cpu(targ_info->byte_count) != sizeof(*targ_info)) { - WARN_ON(1); + ath6kl_err("mismatched byte count %d vs. expected %zd\n", + le32_to_cpu(targ_info->byte_count), + sizeof(*targ_info)); return -EINVAL; } diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c b/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c index 547634f82183d..81fa7cbad8921 100644 --- a/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c +++ b/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c @@ -290,6 +290,9 @@ void ath9k_htc_swba(struct ath9k_htc_priv *priv, struct ath_common *common = ath9k_hw_common(priv->ah); int slot; + if (!priv->cur_beacon_conf.enable_beacon) + return; + if (swba->beacon_pending != 0) { priv->beacon.bmisscnt++; if (priv->beacon.bmisscnt > BSTUCK_THRESHOLD) { diff --git a/drivers/net/wireless/ath/carl9170/usb.c b/drivers/net/wireless/ath/carl9170/usb.c index a3e03580cd9ff..564ca6a619856 100644 --- a/drivers/net/wireless/ath/carl9170/usb.c +++ b/drivers/net/wireless/ath/carl9170/usb.c @@ -438,14 +438,21 @@ static void carl9170_usb_rx_complete(struct urb *urb) if (atomic_read(&ar->rx_anch_urbs) == 0) { /* - * The system is too slow to cope with - * the enormous workload. We have simply - * run out of active rx urbs and this - * unfortunately leads to an unpredictable - * device. + * At this point, either the system is too slow to + * cope with the enormous workload (so we have simply + * run out of active rx urbs and this unfortunately + * leads to an unpredictable device), or the device + * is not fully functional after an unsuccessful + * firmware loading attempts (so it doesn't pass + * ieee80211_register_hw() and there is no internal + * workqueue at all). */ - ieee80211_queue_work(ar->hw, &ar->ping_work); + if (ar->registered) + ieee80211_queue_work(ar->hw, &ar->ping_work); + else + pr_warn_once("device %s is not registered\n", + dev_name(&ar->udev->dev)); } } else { /* diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c index 2e2fcb3807efb..10d647fbc971e 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c @@ -44,6 +44,8 @@ IWL_QU_C_HR_B_FW_PRE "-" __stringify(api) ".ucode" #define IWL_QU_B_JF_B_MODULE_FIRMWARE(api) \ IWL_QU_B_JF_B_FW_PRE "-" __stringify(api) ".ucode" +#define IWL_QU_C_JF_B_MODULE_FIRMWARE(api) \ + IWL_QU_C_JF_B_FW_PRE "-" __stringify(api) ".ucode" #define IWL_CC_A_MODULE_FIRMWARE(api) \ IWL_CC_A_FW_PRE "-" __stringify(api) ".ucode" @@ -423,6 +425,7 @@ const struct iwl_cfg iwl_cfg_quz_a0_hr_b0 = { MODULE_FIRMWARE(IWL_QU_B_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL_QU_C_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL_QU_B_JF_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); +MODULE_FIRMWARE(IWL_QU_C_JF_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL_QUZ_A_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL_QUZ_A_JF_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL_CC_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c index e96ddaeeeeff5..d013de30e7ed6 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2012-2014, 2018-2024 Intel Corporation + * Copyright (C) 2012-2014, 2018-2025 Intel Corporation * Copyright (C) 2013-2014 Intel Mobile Communications GmbH * Copyright (C) 2015-2017 Intel Deutschland GmbH */ @@ -962,7 +962,7 @@ u16 iwl_mvm_mac_ctxt_get_beacon_flags(const struct iwl_fw *fw, u8 rate_idx) u16 flags = iwl_mvm_mac80211_idx_to_hwrate(fw, rate_idx); bool is_new_rate = iwl_fw_lookup_cmd_ver(fw, BEACON_TEMPLATE_CMD, 0) > 10; - if (rate_idx <= IWL_FIRST_CCK_RATE) + if (rate_idx <= IWL_LAST_CCK_RATE) flags |= is_new_rate ? IWL_MAC_BEACON_CCK : IWL_MAC_BEACON_CCK_V1; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c index a8c4e354e2ce7..5f8f245804443 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c @@ -2,6 +2,7 @@ /****************************************************************************** * * Copyright(c) 2005 - 2014, 2018 - 2023 Intel Corporation. All rights reserved. + * Copyright(c) 2025 Intel Corporation * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH *****************************************************************************/ @@ -2709,6 +2710,7 @@ static void rs_drv_get_rate(void *mvm_r, struct ieee80211_sta *sta, optimal_rate); iwl_mvm_hwrate_to_tx_rate_v1(last_ucode_rate, info->band, &txrc->reported_rate); + txrc->reported_rate.count = 1; } spin_unlock_bh(&lq_sta->pers.lock); } diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c index 18d7d59ae5814..462ebe088b3c1 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c @@ -2726,6 +2726,8 @@ static ssize_t iwl_dbgfs_rx_queue_read(struct file *file, for (i = 0; i < trans->num_rx_queues && pos < bufsz; i++) { struct iwl_rxq *rxq = &trans_pcie->rxq[i]; + spin_lock_bh(&rxq->lock); + pos += scnprintf(buf + pos, bufsz - pos, "queue#: %2d\n", i); pos += scnprintf(buf + pos, bufsz - pos, "\tread: %u\n", @@ -2746,6 +2748,7 @@ static ssize_t iwl_dbgfs_rx_queue_read(struct file *file, pos += scnprintf(buf + pos, bufsz - pos, "\tclosed_rb_num: Not Allocated\n"); } + spin_unlock_bh(&rxq->lock); } ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); kfree(buf); @@ -3410,8 +3413,11 @@ iwl_trans_pcie_dump_data(struct iwl_trans *trans, u32 dump_mask, /* Dump RBs is supported only for pre-9000 devices (1 queue) */ struct iwl_rxq *rxq = &trans_pcie->rxq[0]; /* RBs */ + spin_lock_bh(&rxq->lock); num_rbs = iwl_get_closed_rb_stts(trans, rxq); num_rbs = (num_rbs - rxq->read) & RX_QUEUE_MASK; + spin_unlock_bh(&rxq->lock); + len += num_rbs * (sizeof(*data) + sizeof(struct iwl_fw_error_dump_rb) + (PAGE_SIZE << trans_pcie->rx_page_order)); diff --git a/drivers/net/wireless/intersil/p54/fwio.c b/drivers/net/wireless/intersil/p54/fwio.c index 772084a9bd8d7..3baf8ab01e22b 100644 --- a/drivers/net/wireless/intersil/p54/fwio.c +++ b/drivers/net/wireless/intersil/p54/fwio.c @@ -231,6 +231,7 @@ int p54_download_eeprom(struct p54_common *priv, void *buf, mutex_lock(&priv->eeprom_mutex); priv->eeprom = buf; + priv->eeprom_slice_size = len; eeprom_hdr = skb_put(skb, eeprom_hdr_size + len); if (priv->fw_var < 0x509) { @@ -253,6 +254,7 @@ int p54_download_eeprom(struct p54_common *priv, void *buf, ret = -EBUSY; } priv->eeprom = NULL; + priv->eeprom_slice_size = 0; mutex_unlock(&priv->eeprom_mutex); return ret; } diff --git a/drivers/net/wireless/intersil/p54/p54.h b/drivers/net/wireless/intersil/p54/p54.h index 522656de41598..aeb5e40cc5ef3 100644 --- a/drivers/net/wireless/intersil/p54/p54.h +++ b/drivers/net/wireless/intersil/p54/p54.h @@ -258,6 +258,7 @@ struct p54_common { /* eeprom handling */ void *eeprom; + size_t eeprom_slice_size; struct completion eeprom_comp; struct mutex eeprom_mutex; }; diff --git a/drivers/net/wireless/intersil/p54/txrx.c b/drivers/net/wireless/intersil/p54/txrx.c index 8414aa208655f..2deb1bb54f24b 100644 --- a/drivers/net/wireless/intersil/p54/txrx.c +++ b/drivers/net/wireless/intersil/p54/txrx.c @@ -496,14 +496,19 @@ static void p54_rx_eeprom_readback(struct p54_common *priv, return ; if (priv->fw_var >= 0x509) { - memcpy(priv->eeprom, eeprom->v2.data, - le16_to_cpu(eeprom->v2.len)); + if (le16_to_cpu(eeprom->v2.len) != priv->eeprom_slice_size) + return; + + memcpy(priv->eeprom, eeprom->v2.data, priv->eeprom_slice_size); } else { - memcpy(priv->eeprom, eeprom->v1.data, - le16_to_cpu(eeprom->v1.len)); + if (le16_to_cpu(eeprom->v1.len) != priv->eeprom_slice_size) + return; + + memcpy(priv->eeprom, eeprom->v1.data, priv->eeprom_slice_size); } priv->eeprom = NULL; + priv->eeprom_slice_size = 0; tmp = p54_find_and_unlink_skb(priv, hdr->req_id); dev_kfree_skb_any(tmp); complete(&priv->eeprom_comp); diff --git a/drivers/net/wireless/marvell/mwifiex/11n.c b/drivers/net/wireless/marvell/mwifiex/11n.c index 738bafc3749b0..66f0f5377ac18 100644 --- a/drivers/net/wireless/marvell/mwifiex/11n.c +++ b/drivers/net/wireless/marvell/mwifiex/11n.c @@ -403,14 +403,12 @@ mwifiex_cmd_append_11n_tlv(struct mwifiex_private *priv, if (sband->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40 && bss_desc->bcn_ht_oper->ht_param & - IEEE80211_HT_PARAM_CHAN_WIDTH_ANY) { - chan_list->chan_scan_param[0].radio_type |= - CHAN_BW_40MHZ << 2; + IEEE80211_HT_PARAM_CHAN_WIDTH_ANY) SET_SECONDARYCHAN(chan_list->chan_scan_param[0]. radio_type, (bss_desc->bcn_ht_oper->ht_param & IEEE80211_HT_PARAM_CHA_SEC_OFFSET)); - } + *buffer += struct_size(chan_list, chan_scan_param, 1); ret_len += struct_size(chan_list, chan_scan_param, 1); } diff --git a/drivers/net/wireless/marvell/mwifiex/util.c b/drivers/net/wireless/marvell/mwifiex/util.c index 1f1f6280a0f25..86e20edb593b3 100644 --- a/drivers/net/wireless/marvell/mwifiex/util.c +++ b/drivers/net/wireless/marvell/mwifiex/util.c @@ -477,7 +477,9 @@ mwifiex_process_mgmt_packet(struct mwifiex_private *priv, "auth: receive authentication from %pM\n", ieee_hdr->addr3); } else { - if (!priv->wdev.connected) + if (!priv->wdev.connected || + !ether_addr_equal(ieee_hdr->addr3, + priv->curr_bss_params.bss_descriptor.mac_address)) return 0; if (ieee80211_is_deauth(ieee_hdr->frame_control)) { diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c index 452579ccc4922..a6324f6ead781 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c @@ -1696,8 +1696,8 @@ int mt76_connac_mcu_hw_scan(struct mt76_phy *phy, struct ieee80211_vif *vif, if (!sreq->ssids[i].ssid_len) continue; - req->ssids[i].ssid_len = cpu_to_le32(sreq->ssids[i].ssid_len); - memcpy(req->ssids[i].ssid, sreq->ssids[i].ssid, + req->ssids[n_ssids].ssid_len = cpu_to_le32(sreq->ssids[i].ssid_len); + memcpy(req->ssids[n_ssids].ssid, sreq->ssids[i].ssid, sreq->ssids[i].ssid_len); n_ssids++; } diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c index 84ef80ab4afbf..96cecc576a986 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c @@ -17,6 +17,8 @@ static const struct usb_device_id mt76x2u_device_table[] = { { USB_DEVICE(0x057c, 0x8503) }, /* Avm FRITZ!WLAN AC860 */ { USB_DEVICE(0x7392, 0xb711) }, /* Edimax EW 7722 UAC */ { USB_DEVICE(0x0e8d, 0x7632) }, /* HC-M7662BU1 */ + { USB_DEVICE(0x0471, 0x2126) }, /* LiteOn WN4516R module, nonstandard USB connector */ + { USB_DEVICE(0x0471, 0x7600) }, /* LiteOn WN4519R module, nonstandard USB connector */ { USB_DEVICE(0x2c4e, 0x0103) }, /* Mercury UD13 */ { USB_DEVICE(0x0846, 0x9014) }, /* Netgear WNDA3100v3 */ { USB_DEVICE(0x0846, 0x9053) }, /* Netgear A6210 */ diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_init.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_init.c index 33a14365ec9b9..3b55628115115 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_init.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_init.c @@ -191,6 +191,7 @@ int mt76x2u_register_device(struct mt76x02_dev *dev) { struct ieee80211_hw *hw = mt76_hw(dev); struct mt76_usb *usb = &dev->mt76.usb; + bool vht; int err; INIT_DELAYED_WORK(&dev->cal_work, mt76x2u_phy_calibrate); @@ -217,7 +218,17 @@ int mt76x2u_register_device(struct mt76x02_dev *dev) /* check hw sg support in order to enable AMSDU */ hw->max_tx_fragments = dev->mt76.usb.sg_en ? MT_TX_SG_MAX_SIZE : 1; - err = mt76_register_device(&dev->mt76, true, mt76x02_rates, + switch (dev->mt76.rev) { + case 0x76320044: + /* these ASIC revisions do not support VHT */ + vht = false; + break; + default: + vht = true; + break; + } + + err = mt76_register_device(&dev->mt76, vht, mt76x02_rates, ARRAY_SIZE(mt76x02_rates)); if (err) goto fail; diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c b/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c index 2e7604eed27b0..a6245c3ccef48 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c @@ -649,6 +649,9 @@ int mt7915_mmio_wed_init(struct mt7915_dev *dev, void *pdev_ptr, wed->wlan.base = devm_ioremap(dev->mt76.dev, pci_resource_start(pci_dev, 0), pci_resource_len(pci_dev, 0)); + if (!wed->wlan.base) + return -ENOMEM; + wed->wlan.phy_base = pci_resource_start(pci_dev, 0); wed->wlan.wpdma_int = pci_resource_start(pci_dev, 0) + MT_INT_WED_SOURCE_CSR; @@ -676,6 +679,9 @@ int mt7915_mmio_wed_init(struct mt7915_dev *dev, void *pdev_ptr, wed->wlan.bus_type = MTK_WED_BUS_AXI; wed->wlan.base = devm_ioremap(dev->mt76.dev, res->start, resource_size(res)); + if (!wed->wlan.base) + return -ENOMEM; + wed->wlan.phy_base = res->start; wed->wlan.wpdma_int = res->start + MT_INT_SOURCE_CSR; wed->wlan.wpdma_mask = res->start + MT_INT_MASK_CSR; diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/main.c b/drivers/net/wireless/mediatek/mt76/mt7921/main.c index 6a3629f71caaa..5b832f1aa00d7 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7921/main.c +++ b/drivers/net/wireless/mediatek/mt76/mt7921/main.c @@ -83,6 +83,11 @@ mt7921_init_he_caps(struct mt792x_phy *phy, enum nl80211_band band, he_cap_elem->phy_cap_info[9] |= IEEE80211_HE_PHY_CAP9_TX_1024_QAM_LESS_THAN_242_TONE_RU | IEEE80211_HE_PHY_CAP9_RX_1024_QAM_LESS_THAN_242_TONE_RU; + + if (is_mt7922(phy->mt76->dev)) { + he_cap_elem->phy_cap_info[0] |= + IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G; + } break; case NL80211_IFTYPE_STATION: he_cap_elem->mac_cap_info[1] |= @@ -1168,6 +1173,9 @@ static void mt7921_sta_set_decap_offload(struct ieee80211_hw *hw, struct mt792x_sta *msta = (struct mt792x_sta *)sta->drv_priv; struct mt792x_dev *dev = mt792x_hw_dev(hw); + if (!msta->deflink.wcid.sta) + return; + mt792x_mutex_acquire(dev); if (enabled) diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/init.c b/drivers/net/wireless/mediatek/mt76/mt7925/init.c index 039949b344b98..02899320da5c1 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7925/init.c +++ b/drivers/net/wireless/mediatek/mt76/mt7925/init.c @@ -52,6 +52,8 @@ static int mt7925_thermal_init(struct mt792x_phy *phy) name = devm_kasprintf(&wiphy->dev, GFP_KERNEL, "mt7925_%s", wiphy_name(wiphy)); + if (!name) + return -ENOMEM; hwmon = devm_hwmon_device_register_with_groups(&wiphy->dev, name, phy, mt7925_hwmon_groups); @@ -204,6 +206,12 @@ static void mt7925_init_work(struct work_struct *work) return; } + ret = mt7925_mcu_set_thermal_protect(dev); + if (ret) { + dev_err(dev->mt76.dev, "thermal protection enable failed\n"); + return; + } + /* we support chip reset now */ dev->hw_init_done = true; diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/main.c b/drivers/net/wireless/mediatek/mt76/mt7925/main.c index d2a98c92e1147..ca5f1dc05815f 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7925/main.c +++ b/drivers/net/wireless/mediatek/mt76/mt7925/main.c @@ -1565,6 +1565,9 @@ static void mt7925_sta_set_decap_offload(struct ieee80211_hw *hw, unsigned long valid = mvif->valid_links; u8 i; + if (!msta->vif) + return; + mt792x_mutex_acquire(dev); valid = ieee80211_vif_is_mld(vif) ? mvif->valid_links : BIT(0); @@ -1579,6 +1582,9 @@ static void mt7925_sta_set_decap_offload(struct ieee80211_hw *hw, else clear_bit(MT_WCID_FLAG_HDR_TRANS, &mlink->wcid.flags); + if (!mlink->wcid.sta) + continue; + mt7925_mcu_wtbl_update_hdr_trans(dev, vif, sta, i); } diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c index 2396e1795fe17..2aeb9ba4256ab 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c @@ -769,7 +769,7 @@ int mt7925_mcu_fw_log_2_host(struct mt792x_dev *dev, u8 ctrl) int ret; ret = mt76_mcu_send_and_get_msg(&dev->mt76, MCU_UNI_CMD(WSYS_CONFIG), - &req, sizeof(req), false, NULL); + &req, sizeof(req), true, NULL); return ret; } @@ -961,6 +961,23 @@ int mt7925_mcu_set_deep_sleep(struct mt792x_dev *dev, bool enable) } EXPORT_SYMBOL_GPL(mt7925_mcu_set_deep_sleep); +int mt7925_mcu_set_thermal_protect(struct mt792x_dev *dev) +{ + char cmd[64]; + int ret = 0; + + snprintf(cmd, sizeof(cmd), "ThermalProtGband %d %d %d %d %d %d %d %d %d %d", + 0, 100, 90, 80, 30, 1, 1, 115, 105, 5); + ret = mt7925_mcu_chip_config(dev, cmd); + + snprintf(cmd, sizeof(cmd), "ThermalProtAband %d %d %d %d %d %d %d %d %d %d", + 1, 100, 90, 80, 30, 1, 1, 115, 105, 5); + ret |= mt7925_mcu_chip_config(dev, cmd); + + return ret; +} +EXPORT_SYMBOL_GPL(mt7925_mcu_set_thermal_protect); + int mt7925_run_firmware(struct mt792x_dev *dev) { int err; @@ -1411,7 +1428,7 @@ int mt7925_mcu_set_eeprom(struct mt792x_dev *dev) }; return mt76_mcu_send_and_get_msg(&dev->mt76, MCU_UNI_CMD(EFUSE_CTRL), - &req, sizeof(req), false, NULL); + &req, sizeof(req), true, NULL); } EXPORT_SYMBOL_GPL(mt7925_mcu_set_eeprom); @@ -2087,8 +2104,6 @@ int mt7925_mcu_set_sniffer(struct mt792x_dev *dev, struct ieee80211_vif *vif, }, }; - mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD(SNIFFER), &req, sizeof(req), true); - return mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD(SNIFFER), &req, sizeof(req), true); } @@ -2743,7 +2758,7 @@ int mt7925_mcu_set_dbdc(struct mt76_phy *phy, bool enable) conf->band = 0; /* unused */ err = mt76_mcu_skb_send_msg(mdev, skb, MCU_UNI_CMD(SET_DBDC_PARMS), - false); + true); return err; } @@ -2771,6 +2786,9 @@ int mt7925_mcu_hw_scan(struct mt76_phy *phy, struct ieee80211_vif *vif, struct tlv *tlv; int max_len; + if (test_bit(MT76_HW_SCANNING, &phy->state)) + return -EBUSY; + max_len = sizeof(*hdr) + sizeof(*req) + sizeof(*ssid) + sizeof(*bssid) + sizeof(*chan_info) + sizeof(*misc) + sizeof(*ie); @@ -2805,8 +2823,8 @@ int mt7925_mcu_hw_scan(struct mt76_phy *phy, struct ieee80211_vif *vif, if (!sreq->ssids[i].ssid_len) continue; - ssid->ssids[i].ssid_len = cpu_to_le32(sreq->ssids[i].ssid_len); - memcpy(ssid->ssids[i].ssid, sreq->ssids[i].ssid, + ssid->ssids[n_ssids].ssid_len = cpu_to_le32(sreq->ssids[i].ssid_len); + memcpy(ssid->ssids[n_ssids].ssid, sreq->ssids[i].ssid, sreq->ssids[i].ssid_len); n_ssids++; } @@ -2858,7 +2876,7 @@ int mt7925_mcu_hw_scan(struct mt76_phy *phy, struct ieee80211_vif *vif, } err = mt76_mcu_skb_send_msg(mdev, skb, MCU_UNI_CMD(SCAN_REQ), - false); + true); if (err < 0) clear_bit(MT76_HW_SCANNING, &phy->state); @@ -2964,7 +2982,7 @@ int mt7925_mcu_sched_scan_req(struct mt76_phy *phy, } return mt76_mcu_skb_send_msg(mdev, skb, MCU_UNI_CMD(SCAN_REQ), - false); + true); } EXPORT_SYMBOL_GPL(mt7925_mcu_sched_scan_req); @@ -3000,7 +3018,7 @@ mt7925_mcu_sched_scan_enable(struct mt76_phy *phy, clear_bit(MT76_HW_SCHED_SCANNING, &phy->state); return mt76_mcu_skb_send_msg(mdev, skb, MCU_UNI_CMD(SCAN_REQ), - false); + true); } int mt7925_mcu_cancel_hw_scan(struct mt76_phy *phy, @@ -3039,7 +3057,7 @@ int mt7925_mcu_cancel_hw_scan(struct mt76_phy *phy, } return mt76_mcu_send_msg(phy->dev, MCU_UNI_CMD(SCAN_REQ), - &req, sizeof(req), false); + &req, sizeof(req), true); } EXPORT_SYMBOL_GPL(mt7925_mcu_cancel_hw_scan); @@ -3144,7 +3162,7 @@ int mt7925_mcu_set_channel_domain(struct mt76_phy *phy) memcpy(__skb_push(skb, sizeof(req)), &req, sizeof(req)); return mt76_mcu_skb_send_msg(dev, skb, MCU_UNI_CMD(SET_DOMAIN_INFO), - false); + true); } EXPORT_SYMBOL_GPL(mt7925_mcu_set_channel_domain); @@ -3287,7 +3305,8 @@ int mt7925_mcu_fill_message(struct mt76_dev *mdev, struct sk_buff *skb, else uni_txd->option = MCU_CMD_UNI_EXT_ACK; - if (cmd == MCU_UNI_CMD(HIF_CTRL)) + if (cmd == MCU_UNI_CMD(HIF_CTRL) || + cmd == MCU_UNI_CMD(CHIP_CONFIG)) uni_txd->option &= ~MCU_CMD_ACK; goto exit; diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/mcu.h b/drivers/net/wireless/mediatek/mt76/mt7925/mcu.h index 887427e0760ae..780c5921679aa 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7925/mcu.h +++ b/drivers/net/wireless/mediatek/mt76/mt7925/mcu.h @@ -635,6 +635,7 @@ int mt7925_mcu_add_bss_info(struct mt792x_phy *phy, int mt7925_mcu_set_timing(struct mt792x_phy *phy, struct ieee80211_bss_conf *link_conf); int mt7925_mcu_set_deep_sleep(struct mt792x_dev *dev, bool enable); +int mt7925_mcu_set_thermal_protect(struct mt792x_dev *dev); int mt7925_mcu_set_channel_domain(struct mt76_phy *phy); int mt7925_mcu_set_radio_en(struct mt792x_phy *phy, bool enable); int mt7925_mcu_set_chctx(struct mt76_phy *phy, struct mt76_vif *mvif, diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/pci.c b/drivers/net/wireless/mediatek/mt76/mt7925/pci.c index 9aec675450f26..5e428f19f9722 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7925/pci.c +++ b/drivers/net/wireless/mediatek/mt76/mt7925/pci.c @@ -482,9 +482,6 @@ static int mt7925_pci_suspend(struct device *device) /* disable interrupt */ mt76_wr(dev, dev->irq_map->host_irq_enable, 0); - mt76_wr(dev, MT_WFDMA0_HOST_INT_DIS, - dev->irq_map->tx.all_complete_mask | - MT_INT_RX_DONE_ALL | MT_INT_MCU_CMD); mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0x0); diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/regs.h b/drivers/net/wireless/mediatek/mt76/mt7925/regs.h index 985794a40c1a8..341987e47f67a 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7925/regs.h +++ b/drivers/net/wireless/mediatek/mt76/mt7925/regs.h @@ -28,7 +28,7 @@ #define MT_MDP_TO_HIF 0 #define MT_MDP_TO_WM 1 -#define MT_WFDMA0_HOST_INT_ENA MT_WFDMA0(0x228) +#define MT_WFDMA0_HOST_INT_ENA MT_WFDMA0(0x204) #define MT_WFDMA0_HOST_INT_DIS MT_WFDMA0(0x22c) #define HOST_RX_DONE_INT_ENA4 BIT(12) #define HOST_RX_DONE_INT_ENA5 BIT(13) @@ -58,7 +58,7 @@ #define MT_INT_TX_DONE_MCU (MT_INT_TX_DONE_MCU_WM | \ MT_INT_TX_DONE_FWDL) -#define MT_INT_TX_DONE_ALL (MT_INT_TX_DONE_MCU_WM | \ +#define MT_INT_TX_DONE_ALL (MT_INT_TX_DONE_MCU | \ MT_INT_TX_DONE_BAND0 | \ GENMASK(18, 4)) diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/dma.c b/drivers/net/wireless/mediatek/mt76/mt7996/dma.c index 69a7d9b2e38bd..4b68d2fc5e094 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7996/dma.c +++ b/drivers/net/wireless/mediatek/mt76/mt7996/dma.c @@ -493,7 +493,7 @@ int mt7996_dma_init(struct mt7996_dev *dev) ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MCU], MT_RXQ_ID(MT_RXQ_MCU), MT7996_RX_MCU_RING_SIZE, - MT_RX_BUF_SIZE, + MT7996_RX_MCU_BUF_SIZE, MT_RXQ_RING_BASE(MT_RXQ_MCU)); if (ret) return ret; @@ -502,7 +502,7 @@ int mt7996_dma_init(struct mt7996_dev *dev) ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MCU_WA], MT_RXQ_ID(MT_RXQ_MCU_WA), MT7996_RX_MCU_RING_SIZE_WA, - MT_RX_BUF_SIZE, + MT7996_RX_MCU_BUF_SIZE, MT_RXQ_RING_BASE(MT_RXQ_MCU_WA)); if (ret) return ret; diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/init.c b/drivers/net/wireless/mediatek/mt76/mt7996/init.c index d8a013812d1e3..c550385541143 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7996/init.c +++ b/drivers/net/wireless/mediatek/mt76/mt7996/init.c @@ -1193,6 +1193,9 @@ mt7996_init_eht_caps(struct mt7996_phy *phy, enum nl80211_band band, u8_encode_bits(IEEE80211_EHT_MAC_CAP0_MAX_MPDU_LEN_11454, IEEE80211_EHT_MAC_CAP0_MAX_MPDU_LEN_MASK); + eht_cap_elem->mac_cap_info[1] |= + IEEE80211_EHT_MAC_CAP1_MAX_AMPDU_LEN_MASK; + eht_cap_elem->phy_cap_info[0] = IEEE80211_EHT_PHY_CAP0_NDP_4_EHT_LFT_32_GI | IEEE80211_EHT_PHY_CAP0_SU_BEAMFORMER | diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mac.c b/drivers/net/wireless/mediatek/mt76/mt7996/mac.c index ef2d7eaaaffdd..0990a3d481f2d 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7996/mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt7996/mac.c @@ -623,6 +623,14 @@ mt7996_mac_fill_rx(struct mt7996_dev *dev, enum mt76_rxq_id q, status->last_amsdu = amsdu_info == MT_RXD4_LAST_AMSDU_FRAME; } + /* IEEE 802.11 fragmentation can only be applied to unicast frames. + * Hence, drop fragments with multicast/broadcast RA. + * This check fixes vulnerabilities, like CVE-2020-26145. + */ + if ((ieee80211_has_morefrags(fc) || seq_ctrl & IEEE80211_SCTL_FRAG) && + FIELD_GET(MT_RXD3_NORMAL_ADDR_TYPE, rxd3) != MT_RXD3_NORMAL_U2M) + return -EINVAL; + hdr_gap = (u8 *)rxd - skb->data + 2 * remove_pad; if (hdr_trans && ieee80211_has_morefrags(fc)) { if (mt7996_reverse_frag0_hdr_trans(skb, hdr_gap)) diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mmio.c b/drivers/net/wireless/mediatek/mt76/mt7996/mmio.c index b6209ed1cfe01..bffee73b780cb 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7996/mmio.c +++ b/drivers/net/wireless/mediatek/mt76/mt7996/mmio.c @@ -323,6 +323,9 @@ int mt7996_mmio_wed_init(struct mt7996_dev *dev, void *pdev_ptr, wed->wlan.base = devm_ioremap(dev->mt76.dev, pci_resource_start(pci_dev, 0), pci_resource_len(pci_dev, 0)); + if (!wed->wlan.base) + return -ENOMEM; + wed->wlan.phy_base = pci_resource_start(pci_dev, 0); if (hif2) { diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h b/drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h index ab8c9070630b0..425fd030bee00 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h +++ b/drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h @@ -28,6 +28,9 @@ #define MT7996_RX_RING_SIZE 1536 #define MT7996_RX_MCU_RING_SIZE 512 #define MT7996_RX_MCU_RING_SIZE_WA 1024 +/* scatter-gather of mcu event is not supported in connac3 */ +#define MT7996_RX_MCU_BUF_SIZE (2048 + \ + SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) #define MT7996_FIRMWARE_WA "mediatek/mt7996/mt7996_wa.bin" #define MT7996_FIRMWARE_WM "mediatek/mt7996/mt7996_wm.bin" diff --git a/drivers/net/wireless/purelifi/plfxlc/usb.c b/drivers/net/wireless/purelifi/plfxlc/usb.c index 56d1139ba8bcc..7e7bfa532ed25 100644 --- a/drivers/net/wireless/purelifi/plfxlc/usb.c +++ b/drivers/net/wireless/purelifi/plfxlc/usb.c @@ -503,8 +503,10 @@ int plfxlc_usb_wreq_async(struct plfxlc_usb *usb, const u8 *buffer, (void *)buffer, buffer_len, complete_fn, context); r = usb_submit_urb(urb, GFP_ATOMIC); - if (r) + if (r) { + usb_free_urb(urb); dev_err(&udev->dev, "Async write submit failed (%d)\n", r); + } return r; } diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00soc.c b/drivers/net/wireless/ralink/rt2x00/rt2x00soc.c index eface610178d2..f7f3a2340c392 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2x00soc.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00soc.c @@ -108,7 +108,7 @@ int rt2x00soc_probe(struct platform_device *pdev, const struct rt2x00_ops *ops) } EXPORT_SYMBOL_GPL(rt2x00soc_probe); -int rt2x00soc_remove(struct platform_device *pdev) +void rt2x00soc_remove(struct platform_device *pdev) { struct ieee80211_hw *hw = platform_get_drvdata(pdev); struct rt2x00_dev *rt2x00dev = hw->priv; @@ -119,8 +119,6 @@ int rt2x00soc_remove(struct platform_device *pdev) rt2x00lib_remove_dev(rt2x00dev); rt2x00soc_free_reg(rt2x00dev); ieee80211_free_hw(hw); - - return 0; } EXPORT_SYMBOL_GPL(rt2x00soc_remove); diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00soc.h b/drivers/net/wireless/ralink/rt2x00/rt2x00soc.h index 021fd06b36272..d6226b8a10e00 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2x00soc.h +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00soc.h @@ -17,7 +17,7 @@ * SoC driver handlers. */ int rt2x00soc_probe(struct platform_device *pdev, const struct rt2x00_ops *ops); -int rt2x00soc_remove(struct platform_device *pdev); +void rt2x00soc_remove(struct platform_device *pdev); #ifdef CONFIG_PM int rt2x00soc_suspend(struct platform_device *pdev, pm_message_t state); int rt2x00soc_resume(struct platform_device *pdev); diff --git a/drivers/net/wireless/realtek/rtlwifi/pci.c b/drivers/net/wireless/realtek/rtlwifi/pci.c index 0eafc4d125f91..898f597f70a96 100644 --- a/drivers/net/wireless/realtek/rtlwifi/pci.c +++ b/drivers/net/wireless/realtek/rtlwifi/pci.c @@ -155,6 +155,16 @@ static void _rtl_pci_update_default_setting(struct ieee80211_hw *hw) ((u8)init_aspm) == (PCI_EXP_LNKCTL_ASPM_L0S | PCI_EXP_LNKCTL_ASPM_L1 | PCI_EXP_LNKCTL_CCC)) ppsc->support_aspm = false; + + /* RTL8723BE found on some ASUSTek laptops, such as F441U and + * X555UQ with subsystem ID 11ad:1723 are known to output large + * amounts of PCIe AER errors during and after boot up, causing + * heavy lags, poor network throughput, and occasional lock-ups. + */ + if (rtlpriv->rtlhal.hw_type == HARDWARE_TYPE_RTL8723BE && + (rtlpci->pdev->subsystem_vendor == 0x11ad && + rtlpci->pdev->subsystem_device == 0x1723)) + ppsc->support_aspm = false; } static bool _rtl_pci_platform_switch_device_pci_aspm( diff --git a/drivers/net/wireless/realtek/rtw88/coex.c b/drivers/net/wireless/realtek/rtw88/coex.c index a99776af56c27..c476e65c4d71e 100644 --- a/drivers/net/wireless/realtek/rtw88/coex.c +++ b/drivers/net/wireless/realtek/rtw88/coex.c @@ -309,7 +309,7 @@ static void rtw_coex_tdma_timer_base(struct rtw_dev *rtwdev, u8 type) { struct rtw_coex *coex = &rtwdev->coex; struct rtw_coex_stat *coex_stat = &coex->stat; - u8 para[2] = {0}; + u8 para[6] = {}; u8 times; u16 tbtt_interval = coex_stat->wl_beacon_interval; diff --git a/drivers/net/wireless/realtek/rtw88/hci.h b/drivers/net/wireless/realtek/rtw88/hci.h index 96aeda26014e2..d4bee9c3ecfea 100644 --- a/drivers/net/wireless/realtek/rtw88/hci.h +++ b/drivers/net/wireless/realtek/rtw88/hci.h @@ -19,6 +19,8 @@ struct rtw_hci_ops { void (*link_ps)(struct rtw_dev *rtwdev, bool enter); void (*interface_cfg)(struct rtw_dev *rtwdev); void (*dynamic_rx_agg)(struct rtw_dev *rtwdev, bool enable); + void (*write_firmware_page)(struct rtw_dev *rtwdev, u32 page, + const u8 *data, u32 size); int (*write_data_rsvd_page)(struct rtw_dev *rtwdev, u8 *buf, u32 size); int (*write_data_h2c)(struct rtw_dev *rtwdev, u8 *buf, u32 size); @@ -79,6 +81,12 @@ static inline void rtw_hci_dynamic_rx_agg(struct rtw_dev *rtwdev, bool enable) rtwdev->hci.ops->dynamic_rx_agg(rtwdev, enable); } +static inline void rtw_hci_write_firmware_page(struct rtw_dev *rtwdev, u32 page, + const u8 *data, u32 size) +{ + rtwdev->hci.ops->write_firmware_page(rtwdev, page, data, size); +} + static inline int rtw_hci_write_data_rsvd_page(struct rtw_dev *rtwdev, u8 *buf, u32 size) { diff --git a/drivers/net/wireless/realtek/rtw88/mac.c b/drivers/net/wireless/realtek/rtw88/mac.c index d1c4f5cdcb21d..efb1da198e74c 100644 --- a/drivers/net/wireless/realtek/rtw88/mac.c +++ b/drivers/net/wireless/realtek/rtw88/mac.c @@ -854,8 +854,8 @@ static void en_download_firmware_legacy(struct rtw_dev *rtwdev, bool en) } } -static void -write_firmware_page(struct rtw_dev *rtwdev, u32 page, const u8 *data, u32 size) +void rtw_write_firmware_page(struct rtw_dev *rtwdev, u32 page, + const u8 *data, u32 size) { u32 val32; u32 block_nr; @@ -885,6 +885,7 @@ write_firmware_page(struct rtw_dev *rtwdev, u32 page, const u8 *data, u32 size) rtw_write32(rtwdev, write_addr, le32_to_cpu(remain_data)); } } +EXPORT_SYMBOL(rtw_write_firmware_page); static int download_firmware_legacy(struct rtw_dev *rtwdev, const u8 *data, u32 size) @@ -902,11 +903,13 @@ download_firmware_legacy(struct rtw_dev *rtwdev, const u8 *data, u32 size) rtw_write8_set(rtwdev, REG_MCUFW_CTRL, BIT_FWDL_CHK_RPT); for (page = 0; page < total_page; page++) { - write_firmware_page(rtwdev, page, data, DLFW_PAGE_SIZE_LEGACY); + rtw_hci_write_firmware_page(rtwdev, page, data, + DLFW_PAGE_SIZE_LEGACY); data += DLFW_PAGE_SIZE_LEGACY; } if (last_page_size) - write_firmware_page(rtwdev, page, data, last_page_size); + rtw_hci_write_firmware_page(rtwdev, page, data, + last_page_size); if (!check_hw_ready(rtwdev, REG_MCUFW_CTRL, BIT_FWDL_CHK_RPT, 1)) { rtw_err(rtwdev, "failed to check download firmware report\n"); diff --git a/drivers/net/wireless/realtek/rtw88/mac.h b/drivers/net/wireless/realtek/rtw88/mac.h index 58c3dccc14bb5..737c6d5d8da72 100644 --- a/drivers/net/wireless/realtek/rtw88/mac.h +++ b/drivers/net/wireless/realtek/rtw88/mac.h @@ -32,6 +32,8 @@ void rtw_set_channel_mac(struct rtw_dev *rtwdev, u8 channel, u8 bw, u8 primary_ch_idx); int rtw_mac_power_on(struct rtw_dev *rtwdev); void rtw_mac_power_off(struct rtw_dev *rtwdev); +void rtw_write_firmware_page(struct rtw_dev *rtwdev, u32 page, + const u8 *data, u32 size); int rtw_download_firmware(struct rtw_dev *rtwdev, struct rtw_fw_state *fw); int rtw_mac_init(struct rtw_dev *rtwdev); void rtw_mac_flush_queues(struct rtw_dev *rtwdev, u32 queues, bool drop); diff --git a/drivers/net/wireless/realtek/rtw88/pci.c b/drivers/net/wireless/realtek/rtw88/pci.c index 0b9b8807af2cb..fab9bb9257dd9 100644 --- a/drivers/net/wireless/realtek/rtw88/pci.c +++ b/drivers/net/wireless/realtek/rtw88/pci.c @@ -12,6 +12,7 @@ #include "fw.h" #include "ps.h" #include "debug.h" +#include "mac.h" static bool rtw_disable_msi; static bool rtw_pci_disable_aspm; @@ -1602,6 +1603,7 @@ static struct rtw_hci_ops rtw_pci_ops = { .link_ps = rtw_pci_link_ps, .interface_cfg = rtw_pci_interface_cfg, .dynamic_rx_agg = NULL, + .write_firmware_page = rtw_write_firmware_page, .read8 = rtw_pci_read8, .read16 = rtw_pci_read16, diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822c.c b/drivers/net/wireless/realtek/rtw88/rtw8822c.c index 1dbe1cdbc3fd4..3157cd834233d 100644 --- a/drivers/net/wireless/realtek/rtw88/rtw8822c.c +++ b/drivers/net/wireless/realtek/rtw88/rtw8822c.c @@ -3993,7 +3993,8 @@ static void rtw8822c_dpk_cal_coef1(struct rtw_dev *rtwdev) rtw_write32(rtwdev, REG_NCTL0, 0x00001148); rtw_write32(rtwdev, REG_NCTL0, 0x00001149); - check_hw_ready(rtwdev, 0x2d9c, MASKBYTE0, 0x55); + if (!check_hw_ready(rtwdev, 0x2d9c, MASKBYTE0, 0x55)) + rtw_warn(rtwdev, "DPK stuck, performance may be suboptimal"); rtw_write8(rtwdev, 0x1b10, 0x0); rtw_write32_mask(rtwdev, REG_NCTL0, BIT_SUBPAGE, 0x0000000c); diff --git a/drivers/net/wireless/realtek/rtw88/sdio.c b/drivers/net/wireless/realtek/rtw88/sdio.c index 1d62b38526c48..787fa09fd063a 100644 --- a/drivers/net/wireless/realtek/rtw88/sdio.c +++ b/drivers/net/wireless/realtek/rtw88/sdio.c @@ -10,6 +10,7 @@ #include #include #include "main.h" +#include "mac.h" #include "debug.h" #include "fw.h" #include "ps.h" @@ -718,10 +719,7 @@ static u8 rtw_sdio_get_tx_qsel(struct rtw_dev *rtwdev, struct sk_buff *skb, case RTW_TX_QUEUE_H2C: return TX_DESC_QSEL_H2C; case RTW_TX_QUEUE_MGMT: - if (rtw_chip_wcpu_11n(rtwdev)) - return TX_DESC_QSEL_HIGH; - else - return TX_DESC_QSEL_MGMT; + return TX_DESC_QSEL_MGMT; case RTW_TX_QUEUE_HI0: return TX_DESC_QSEL_HIGH; default: @@ -1158,6 +1156,7 @@ static struct rtw_hci_ops rtw_sdio_ops = { .link_ps = rtw_sdio_link_ps, .interface_cfg = rtw_sdio_interface_cfg, .dynamic_rx_agg = NULL, + .write_firmware_page = rtw_write_firmware_page, .read8 = rtw_sdio_read8, .read16 = rtw_sdio_read16, @@ -1228,10 +1227,7 @@ static void rtw_sdio_process_tx_queue(struct rtw_dev *rtwdev, return; } - if (queue <= RTW_TX_QUEUE_VO) - rtw_sdio_indicate_tx_status(rtwdev, skb); - else - dev_kfree_skb_any(skb); + rtw_sdio_indicate_tx_status(rtwdev, skb); } static void rtw_sdio_tx_handler(struct work_struct *work) diff --git a/drivers/net/wireless/realtek/rtw88/usb.c b/drivers/net/wireless/realtek/rtw88/usb.c index 07695294767ac..a446be45f26e7 100644 --- a/drivers/net/wireless/realtek/rtw88/usb.c +++ b/drivers/net/wireless/realtek/rtw88/usb.c @@ -138,7 +138,7 @@ static void rtw_usb_write(struct rtw_dev *rtwdev, u32 addr, u32 val, int len) ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), RTW_USB_CMD_REQ, RTW_USB_CMD_WRITE, - addr, 0, data, len, 30000); + addr, 0, data, len, 500); if (ret < 0 && ret != -ENODEV && count++ < 4) rtw_err(rtwdev, "write register 0x%x failed with %d\n", addr, ret); @@ -164,6 +164,60 @@ static void rtw_usb_write32(struct rtw_dev *rtwdev, u32 addr, u32 val) rtw_usb_write(rtwdev, addr, val, 4); } +static void rtw_usb_write_firmware_page(struct rtw_dev *rtwdev, u32 page, + const u8 *data, u32 size) +{ + struct rtw_usb *rtwusb = rtw_get_usb_priv(rtwdev); + struct usb_device *udev = rtwusb->udev; + u32 addr = FW_START_ADDR_LEGACY; + u8 *data_dup, *buf; + u32 n, block_size; + int ret; + + switch (rtwdev->chip->id) { + case RTW_CHIP_TYPE_8723D: + block_size = 254; + break; + default: + block_size = 196; + break; + } + + data_dup = kmemdup(data, size, GFP_KERNEL); + if (!data_dup) + return; + + buf = data_dup; + + rtw_write32_mask(rtwdev, REG_MCUFW_CTRL, BIT_ROM_PGE, page); + + while (size > 0) { + if (size >= block_size) + n = block_size; + else if (size >= 8) + n = 8; + else + n = 1; + + ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), + RTW_USB_CMD_REQ, RTW_USB_CMD_WRITE, + addr, 0, buf, n, 500); + if (ret != n) { + if (ret != -ENODEV) + rtw_err(rtwdev, + "write 0x%x len %d failed: %d\n", + addr, n, ret); + break; + } + + addr += n; + buf += n; + size -= n; + } + + kfree(data_dup); +} + static int dma_mapping_to_ep(enum rtw_dma_mapping dma_mapping) { switch (dma_mapping) { @@ -815,6 +869,7 @@ static struct rtw_hci_ops rtw_usb_ops = { .link_ps = rtw_usb_link_ps, .interface_cfg = rtw_usb_interface_cfg, .dynamic_rx_agg = rtw_usb_dynamic_rx_agg, + .write_firmware_page = rtw_usb_write_firmware_page, .write8 = rtw_usb_write8, .write16 = rtw_usb_write16, diff --git a/drivers/net/wireless/realtek/rtw89/cam.c b/drivers/net/wireless/realtek/rtw89/cam.c index 8d140b94cb440..0c8ea5e629e6a 100644 --- a/drivers/net/wireless/realtek/rtw89/cam.c +++ b/drivers/net/wireless/realtek/rtw89/cam.c @@ -6,6 +6,7 @@ #include "debug.h" #include "fw.h" #include "mac.h" +#include "ps.h" static struct sk_buff * rtw89_cam_get_sec_key_cmd(struct rtw89_dev *rtwdev, @@ -447,9 +448,11 @@ int rtw89_cam_sec_key_add(struct rtw89_dev *rtwdev, switch (key->cipher) { case WLAN_CIPHER_SUITE_WEP40: + rtw89_leave_ips_by_hwflags(rtwdev); hw_key_type = RTW89_SEC_KEY_TYPE_WEP40; break; case WLAN_CIPHER_SUITE_WEP104: + rtw89_leave_ips_by_hwflags(rtwdev); hw_key_type = RTW89_SEC_KEY_TYPE_WEP104; break; case WLAN_CIPHER_SUITE_CCMP: diff --git a/drivers/net/wireless/realtek/rtw89/fw.c b/drivers/net/wireless/realtek/rtw89/fw.c index e5c90050e7115..7dbce3b10a7de 100644 --- a/drivers/net/wireless/realtek/rtw89/fw.c +++ b/drivers/net/wireless/realtek/rtw89/fw.c @@ -5016,7 +5016,7 @@ int rtw89_fw_h2c_scan_list_offload_be(struct rtw89_dev *rtwdev, int ch_num, return 0; } -#define RTW89_SCAN_DELAY_TSF_UNIT 104800 +#define RTW89_SCAN_DELAY_TSF_UNIT 1000000 int rtw89_fw_h2c_scan_offload_ax(struct rtw89_dev *rtwdev, struct rtw89_scan_option *option, struct rtw89_vif_link *rtwvif_link, diff --git a/drivers/net/wireless/realtek/rtw89/mac.c b/drivers/net/wireless/realtek/rtw89/mac.c index 9b09d4b7dea59..2188bca899e39 100644 --- a/drivers/net/wireless/realtek/rtw89/mac.c +++ b/drivers/net/wireless/realtek/rtw89/mac.c @@ -5513,11 +5513,11 @@ void rtw89_mac_c2h_handle(struct rtw89_dev *rtwdev, struct sk_buff *skb, case RTW89_MAC_C2H_CLASS_FWDBG: return; default: - rtw89_info(rtwdev, "c2h class %d not support\n", class); + rtw89_info(rtwdev, "MAC c2h class %d not support\n", class); return; } if (!handler) { - rtw89_info(rtwdev, "c2h class %d func %d not support\n", class, + rtw89_info(rtwdev, "MAC c2h class %d func %d not support\n", class, func); return; } diff --git a/drivers/net/wireless/realtek/rtw89/pci.c b/drivers/net/wireless/realtek/rtw89/pci.c index 0ac84f968994b..e203d3b2a8274 100644 --- a/drivers/net/wireless/realtek/rtw89/pci.c +++ b/drivers/net/wireless/realtek/rtw89/pci.c @@ -228,7 +228,7 @@ int rtw89_pci_sync_skb_for_device_and_validate_rx_info(struct rtw89_dev *rtwdev, struct sk_buff *skb) { struct rtw89_pci_rx_info *rx_info = RTW89_PCI_RX_SKB_CB(skb); - int rx_tag_retry = 100; + int rx_tag_retry = 1000; int ret; do { diff --git a/drivers/net/wireless/realtek/rtw89/phy.c b/drivers/net/wireless/realtek/rtw89/phy.c index 5c31639b4cade..355c3f58ab185 100644 --- a/drivers/net/wireless/realtek/rtw89/phy.c +++ b/drivers/net/wireless/realtek/rtw89/phy.c @@ -3062,10 +3062,16 @@ rtw89_phy_c2h_rfk_report_state(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u3 (int)(len - sizeof(report->hdr)), &report->state); } +static void +rtw89_phy_c2h_rfk_log_tas_pwr(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len) +{ +} + static void (* const rtw89_phy_c2h_rfk_report_handler[])(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len) = { [RTW89_PHY_C2H_RFK_REPORT_FUNC_STATE] = rtw89_phy_c2h_rfk_report_state, + [RTW89_PHY_C2H_RFK_LOG_TAS_PWR] = rtw89_phy_c2h_rfk_log_tas_pwr, }; bool rtw89_phy_c2h_chk_atomic(struct rtw89_dev *rtwdev, u8 class, u8 func) @@ -3119,11 +3125,11 @@ void rtw89_phy_c2h_handle(struct rtw89_dev *rtwdev, struct sk_buff *skb, return; fallthrough; default: - rtw89_info(rtwdev, "c2h class %d not support\n", class); + rtw89_info(rtwdev, "PHY c2h class %d not support\n", class); return; } if (!handler) { - rtw89_info(rtwdev, "c2h class %d func %d not support\n", class, + rtw89_info(rtwdev, "PHY c2h class %d func %d not support\n", class, func); return; } diff --git a/drivers/net/wireless/realtek/rtw89/phy.h b/drivers/net/wireless/realtek/rtw89/phy.h index 9bb9c9c8e7a1b..961a4bacb02a5 100644 --- a/drivers/net/wireless/realtek/rtw89/phy.h +++ b/drivers/net/wireless/realtek/rtw89/phy.h @@ -151,6 +151,7 @@ enum rtw89_phy_c2h_rfk_log_func { enum rtw89_phy_c2h_rfk_report_func { RTW89_PHY_C2H_RFK_REPORT_FUNC_STATE = 0, + RTW89_PHY_C2H_RFK_LOG_TAS_PWR = 6, }; enum rtw89_phy_c2h_dm_func { diff --git a/drivers/net/wireless/realtek/rtw89/rtw8922a_rfk.c b/drivers/net/wireless/realtek/rtw89/rtw8922a_rfk.c index 28907df7407d5..c958d6ab24d32 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8922a_rfk.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8922a_rfk.c @@ -77,11 +77,6 @@ void rtw8922a_ctl_band_ch_bw(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, RR_CFGCH_BAND0 | RR_CFGCH_CH); rf_reg[path][i] |= u32_encode_bits(central_ch, RR_CFGCH_CH); - if (band == RTW89_BAND_2G) - rtw89_write_rf(rtwdev, path, RR_SMD, RR_VCO2, 0x0); - else - rtw89_write_rf(rtwdev, path, RR_SMD, RR_VCO2, 0x1); - switch (band) { case RTW89_BAND_2G: default: diff --git a/drivers/net/wireless/virtual/mac80211_hwsim.c b/drivers/net/wireless/virtual/mac80211_hwsim.c index 4a2b7c9921bc6..6fcc21f596ea7 100644 --- a/drivers/net/wireless/virtual/mac80211_hwsim.c +++ b/drivers/net/wireless/virtual/mac80211_hwsim.c @@ -1229,6 +1229,11 @@ static void mac80211_hwsim_set_tsf(struct ieee80211_hw *hw, /* MLD not supported here */ u32 bcn_int = data->link_data[0].beacon_int; u64 delta = abs(tsf - now); + struct ieee80211_bss_conf *conf; + + conf = link_conf_dereference_protected(vif, data->link_data[0].link_id); + if (conf && !conf->enable_beacon) + return; /* adjust after beaconing with new timestamp at old TBTT */ if (tsf > now) { diff --git a/drivers/net/wireless/zydas/zd1211rw/zd_mac.c b/drivers/net/wireless/zydas/zd1211rw/zd_mac.c index f90c33d19b399..8fd7be37e209c 100644 --- a/drivers/net/wireless/zydas/zd1211rw/zd_mac.c +++ b/drivers/net/wireless/zydas/zd1211rw/zd_mac.c @@ -583,7 +583,11 @@ void zd_mac_tx_to_dev(struct sk_buff *skb, int error) skb_queue_tail(q, skb); while (skb_queue_len(q) > ZD_MAC_MAX_ACK_WAITERS) { - zd_mac_tx_status(hw, skb_dequeue(q), + skb = skb_dequeue(q); + if (!skb) + break; + + zd_mac_tx_status(hw, skb, mac->ack_pending ? mac->ack_signal : 0, NULL); mac->ack_pending = 0; diff --git a/drivers/net/wwan/mhi_wwan_mbim.c b/drivers/net/wwan/mhi_wwan_mbim.c index 8755c5e6a65b3..c814fbd756a1e 100644 --- a/drivers/net/wwan/mhi_wwan_mbim.c +++ b/drivers/net/wwan/mhi_wwan_mbim.c @@ -550,8 +550,8 @@ static int mhi_mbim_newlink(void *ctxt, struct net_device *ndev, u32 if_id, struct mhi_mbim_link *link = wwan_netdev_drvpriv(ndev); struct mhi_mbim_context *mbim = ctxt; - link->session = if_id; link->mbim = mbim; + link->session = mhi_mbim_get_link_mux_id(link->mbim->mdev->mhi_cntrl) + if_id; link->ndev = ndev; u64_stats_init(&link->rx_syncp); u64_stats_init(&link->tx_syncp); @@ -607,7 +607,7 @@ static int mhi_mbim_probe(struct mhi_device *mhi_dev, const struct mhi_device_id { struct mhi_controller *cntrl = mhi_dev->mhi_cntrl; struct mhi_mbim_context *mbim; - int err, link_id; + int err; mbim = devm_kzalloc(&mhi_dev->dev, sizeof(*mbim), GFP_KERNEL); if (!mbim) @@ -628,11 +628,8 @@ static int mhi_mbim_probe(struct mhi_device *mhi_dev, const struct mhi_device_id /* Number of transfer descriptors determines size of the queue */ mbim->rx_queue_sz = mhi_get_free_desc_count(mhi_dev, DMA_FROM_DEVICE); - /* Get the corresponding mux_id from mhi */ - link_id = mhi_mbim_get_link_mux_id(cntrl); - /* Register wwan link ops with MHI controller representing WWAN instance */ - return wwan_register_ops(&cntrl->mhi_dev->dev, &mhi_mbim_wwan_ops, mbim, link_id); + return wwan_register_ops(&cntrl->mhi_dev->dev, &mhi_mbim_wwan_ops, mbim, 0); } static void mhi_mbim_remove(struct mhi_device *mhi_dev) diff --git a/drivers/net/wwan/t7xx/t7xx_netdev.c b/drivers/net/wwan/t7xx/t7xx_netdev.c index 91fa082e9cab8..fc0a7cb181df2 100644 --- a/drivers/net/wwan/t7xx/t7xx_netdev.c +++ b/drivers/net/wwan/t7xx/t7xx_netdev.c @@ -302,7 +302,7 @@ static int t7xx_ccmni_wwan_newlink(void *ctxt, struct net_device *dev, u32 if_id ccmni->ctlb = ctlb; ccmni->dev = dev; atomic_set(&ccmni->usage, 0); - ctlb->ccmni_inst[if_id] = ccmni; + WRITE_ONCE(ctlb->ccmni_inst[if_id], ccmni); ret = register_netdevice(dev); if (ret) @@ -324,6 +324,7 @@ static void t7xx_ccmni_wwan_dellink(void *ctxt, struct net_device *dev, struct l if (WARN_ON(ctlb->ccmni_inst[if_id] != ccmni)) return; + WRITE_ONCE(ctlb->ccmni_inst[if_id], NULL); unregister_netdevice(dev); } @@ -419,7 +420,7 @@ static void t7xx_ccmni_recv_skb(struct t7xx_ccmni_ctrl *ccmni_ctlb, struct sk_bu skb_cb = T7XX_SKB_CB(skb); netif_id = skb_cb->netif_idx; - ccmni = ccmni_ctlb->ccmni_inst[netif_id]; + ccmni = READ_ONCE(ccmni_ctlb->ccmni_inst[netif_id]); if (!ccmni) { dev_kfree_skb(skb); return; @@ -441,7 +442,7 @@ static void t7xx_ccmni_recv_skb(struct t7xx_ccmni_ctrl *ccmni_ctlb, struct sk_bu static void t7xx_ccmni_queue_tx_irq_notify(struct t7xx_ccmni_ctrl *ctlb, int qno) { - struct t7xx_ccmni *ccmni = ctlb->ccmni_inst[0]; + struct t7xx_ccmni *ccmni = READ_ONCE(ctlb->ccmni_inst[0]); struct netdev_queue *net_queue; if (netif_running(ccmni->dev) && atomic_read(&ccmni->usage) > 0) { @@ -453,7 +454,7 @@ static void t7xx_ccmni_queue_tx_irq_notify(struct t7xx_ccmni_ctrl *ctlb, int qno static void t7xx_ccmni_queue_tx_full_notify(struct t7xx_ccmni_ctrl *ctlb, int qno) { - struct t7xx_ccmni *ccmni = ctlb->ccmni_inst[0]; + struct t7xx_ccmni *ccmni = READ_ONCE(ctlb->ccmni_inst[0]); struct netdev_queue *net_queue; if (atomic_read(&ccmni->usage) > 0) { @@ -471,7 +472,7 @@ static void t7xx_ccmni_queue_state_notify(struct t7xx_pci_dev *t7xx_dev, if (ctlb->md_sta != MD_STATE_READY) return; - if (!ctlb->ccmni_inst[0]) { + if (!READ_ONCE(ctlb->ccmni_inst[0])) { dev_warn(&t7xx_dev->pdev->dev, "No netdev registered yet\n"); return; } diff --git a/drivers/nvme/host/constants.c b/drivers/nvme/host/constants.c index 2b9e6cfaf2a80..1a0058be58210 100644 --- a/drivers/nvme/host/constants.c +++ b/drivers/nvme/host/constants.c @@ -145,7 +145,7 @@ static const char * const nvme_statuses[] = { [NVME_SC_BAD_ATTRIBUTES] = "Conflicting Attributes", [NVME_SC_INVALID_PI] = "Invalid Protection Information", [NVME_SC_READ_ONLY] = "Attempted Write to Read Only Range", - [NVME_SC_ONCS_NOT_SUPPORTED] = "ONCS Not Supported", + [NVME_SC_CMD_SIZE_LIM_EXCEEDED ] = "Command Size Limits Exceeded", [NVME_SC_ZONE_BOUNDARY_ERROR] = "Zoned Boundary Error", [NVME_SC_ZONE_FULL] = "Zone Is Full", [NVME_SC_ZONE_READ_ONLY] = "Zone Is Read Only", diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 98dad1bdff440..abd42598fc78b 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -284,7 +284,6 @@ static blk_status_t nvme_error_status(u16 status) case NVME_SC_NS_NOT_READY: return BLK_STS_TARGET; case NVME_SC_BAD_ATTRIBUTES: - case NVME_SC_ONCS_NOT_SUPPORTED: case NVME_SC_INVALID_OPCODE: case NVME_SC_INVALID_FIELD: case NVME_SC_INVALID_NS: @@ -381,7 +380,7 @@ static void nvme_log_err_passthru(struct request *req) nr->cmd->common.cdw12, nr->cmd->common.cdw13, nr->cmd->common.cdw14, - nr->cmd->common.cdw14); + nr->cmd->common.cdw15); } enum nvme_disposition { diff --git a/drivers/nvme/host/ioctl.c b/drivers/nvme/host/ioctl.c index a1b3c538a4bd2..64ae8af01d9a4 100644 --- a/drivers/nvme/host/ioctl.c +++ b/drivers/nvme/host/ioctl.c @@ -442,21 +442,14 @@ static enum rq_end_io_ret nvme_uring_cmd_end_io(struct request *req, pdu->result = le64_to_cpu(nvme_req(req)->result.u64); /* - * For iopoll, complete it directly. Note that using the uring_cmd - * helper for this is safe only because we check blk_rq_is_poll(). - * As that returns false if we're NOT on a polled queue, then it's - * safe to use the polled completion helper. - * - * Otherwise, move the completion to task work. + * IOPOLL could potentially complete this request directly, but + * if multiple rings are polling on the same queue, then it's possible + * for one ring to find completions for another ring. Punting the + * completion via task_work will always direct it to the right + * location, rather than potentially complete requests for ringA + * under iopoll invocations from ringB. */ - if (blk_rq_is_poll(req)) { - if (pdu->bio) - blk_rq_unmap_user(pdu->bio); - io_uring_cmd_iopoll_done(ioucmd, pdu->result, pdu->status); - } else { - io_uring_cmd_do_in_task_lazy(ioucmd, nvme_uring_task_cb); - } - + io_uring_cmd_do_in_task_lazy(ioucmd, nvme_uring_task_cb); return RQ_END_IO_FREE; } diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index cd8a10f6accff..37fd1a8ace127 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -3701,6 +3701,8 @@ static const struct pci_device_id nvme_id_table[] = { .driver_data = NVME_QUIRK_NO_DEEPEST_PS, }, { PCI_DEVICE(0x1e49, 0x0041), /* ZHITAI TiPro7000 NVMe SSD */ .driver_data = NVME_QUIRK_NO_DEEPEST_PS, }, + { PCI_DEVICE(0x025e, 0xf1ac), /* SOLIDIGM P44 pro SSDPFKKW020X7 */ + .driver_data = NVME_QUIRK_NO_DEEPEST_PS, }, { PCI_DEVICE(0xc0a9, 0x540a), /* Crucial P2 */ .driver_data = NVME_QUIRK_BOGUS_NID, }, { PCI_DEVICE(0x1d97, 0x2263), /* Lexar NM610 */ diff --git a/drivers/nvme/host/pr.c b/drivers/nvme/host/pr.c index dc7922f226004..80dd09aa01a3b 100644 --- a/drivers/nvme/host/pr.c +++ b/drivers/nvme/host/pr.c @@ -82,8 +82,6 @@ static int nvme_status_to_pr_err(int status) return PR_STS_SUCCESS; case NVME_SC_RESERVATION_CONFLICT: return PR_STS_RESERVATION_CONFLICT; - case NVME_SC_ONCS_NOT_SUPPORTED: - return -EOPNOTSUPP; case NVME_SC_BAD_ATTRIBUTES: case NVME_SC_INVALID_OPCODE: case NVME_SC_INVALID_FIELD: diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c index 4cc72be28c731..25e486e6e8054 100644 --- a/drivers/nvme/host/tcp.c +++ b/drivers/nvme/host/tcp.c @@ -453,7 +453,8 @@ nvme_tcp_fetch_request(struct nvme_tcp_queue *queue) return NULL; } - list_del(&req->entry); + list_del_init(&req->entry); + init_llist_node(&req->lentry); return req; } @@ -561,6 +562,8 @@ static int nvme_tcp_init_request(struct blk_mq_tag_set *set, req->queue = queue; nvme_req(rq)->ctrl = &ctrl->ctrl; nvme_req(rq)->cmd = &pdu->cmd; + init_llist_node(&req->lentry); + INIT_LIST_HEAD(&req->entry); return 0; } @@ -765,6 +768,14 @@ static int nvme_tcp_handle_r2t(struct nvme_tcp_queue *queue, return -EPROTO; } + if (llist_on_list(&req->lentry) || + !list_empty(&req->entry)) { + dev_err(queue->ctrl->ctrl.device, + "req %d unexpected r2t while processing request\n", + rq->tag); + return -EPROTO; + } + req->pdu_len = 0; req->h2cdata_left = r2t_length; req->h2cdata_offset = r2t_offset; @@ -1349,7 +1360,7 @@ static int nvme_tcp_try_recv(struct nvme_tcp_queue *queue) queue->nr_cqe = 0; consumed = sock->ops->read_sock(sk, &rd_desc, nvme_tcp_recv_skb); release_sock(sk); - return consumed; + return consumed == -EAGAIN ? 0 : consumed; } static void nvme_tcp_io_work(struct work_struct *w) @@ -1377,6 +1388,11 @@ static void nvme_tcp_io_work(struct work_struct *w) else if (unlikely(result < 0)) return; + /* did we get some space after spending time in recv? */ + if (nvme_tcp_queue_has_pending(queue) && + sk_stream_is_writeable(queue->sock->sk)) + pending = true; + if (!pending || !queue->rd_enabled) return; @@ -2594,6 +2610,8 @@ static void nvme_tcp_submit_async_event(struct nvme_ctrl *arg) ctrl->async_req.offset = 0; ctrl->async_req.curr_bio = NULL; ctrl->async_req.data_len = 0; + init_llist_node(&ctrl->async_req.lentry); + INIT_LIST_HEAD(&ctrl->async_req.entry); nvme_tcp_queue_request(&ctrl->async_req, true, true); } diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c index ed2424f8a396e..4606c88136669 100644 --- a/drivers/nvme/target/core.c +++ b/drivers/nvme/target/core.c @@ -62,14 +62,7 @@ inline u16 errno_to_nvme_status(struct nvmet_req *req, int errno) return NVME_SC_LBA_RANGE | NVME_STATUS_DNR; case -EOPNOTSUPP: req->error_loc = offsetof(struct nvme_common_command, opcode); - switch (req->cmd->common.opcode) { - case nvme_cmd_dsm: - case nvme_cmd_write_zeroes: - return NVME_SC_ONCS_NOT_SUPPORTED | NVME_STATUS_DNR; - default: - return NVME_SC_INVALID_OPCODE | NVME_STATUS_DNR; - } - break; + return NVME_SC_INVALID_OPCODE | NVME_STATUS_DNR; case -ENODATA: req->error_loc = offsetof(struct nvme_rw_command, nsid); return NVME_SC_ACCESS_DENIED; diff --git a/drivers/nvme/target/fcloop.c b/drivers/nvme/target/fcloop.c index da195d61a9664..f1b5ffc00ce88 100644 --- a/drivers/nvme/target/fcloop.c +++ b/drivers/nvme/target/fcloop.c @@ -623,12 +623,13 @@ fcloop_fcp_recv_work(struct work_struct *work) { struct fcloop_fcpreq *tfcp_req = container_of(work, struct fcloop_fcpreq, fcp_rcv_work); - struct nvmefc_fcp_req *fcpreq = tfcp_req->fcpreq; + struct nvmefc_fcp_req *fcpreq; unsigned long flags; int ret = 0; bool aborted = false; spin_lock_irqsave(&tfcp_req->reqlock, flags); + fcpreq = tfcp_req->fcpreq; switch (tfcp_req->inistate) { case INI_IO_START: tfcp_req->inistate = INI_IO_ACTIVE; @@ -643,16 +644,19 @@ fcloop_fcp_recv_work(struct work_struct *work) } spin_unlock_irqrestore(&tfcp_req->reqlock, flags); - if (unlikely(aborted)) - ret = -ECANCELED; - else { - if (likely(!check_for_drop(tfcp_req))) - ret = nvmet_fc_rcv_fcp_req(tfcp_req->tport->targetport, - &tfcp_req->tgt_fcp_req, - fcpreq->cmdaddr, fcpreq->cmdlen); - else - pr_info("%s: dropped command ********\n", __func__); + if (unlikely(aborted)) { + /* the abort handler will call fcloop_call_host_done */ + return; + } + + if (unlikely(check_for_drop(tfcp_req))) { + pr_info("%s: dropped command ********\n", __func__); + return; } + + ret = nvmet_fc_rcv_fcp_req(tfcp_req->tport->targetport, + &tfcp_req->tgt_fcp_req, + fcpreq->cmdaddr, fcpreq->cmdlen); if (ret) fcloop_call_host_done(fcpreq, tfcp_req, ret); } @@ -667,9 +671,10 @@ fcloop_fcp_abort_recv_work(struct work_struct *work) unsigned long flags; spin_lock_irqsave(&tfcp_req->reqlock, flags); - fcpreq = tfcp_req->fcpreq; switch (tfcp_req->inistate) { case INI_IO_ABORTED: + fcpreq = tfcp_req->fcpreq; + tfcp_req->fcpreq = NULL; break; case INI_IO_COMPLETED: completed = true; @@ -691,10 +696,6 @@ fcloop_fcp_abort_recv_work(struct work_struct *work) nvmet_fc_rcv_fcp_abort(tfcp_req->tport->targetport, &tfcp_req->tgt_fcp_req); - spin_lock_irqsave(&tfcp_req->reqlock, flags); - tfcp_req->fcpreq = NULL; - spin_unlock_irqrestore(&tfcp_req->reqlock, flags); - fcloop_call_host_done(fcpreq, tfcp_req, -ECANCELED); /* call_host_done releases reference for abort downcall */ } diff --git a/drivers/nvme/target/io-cmd-bdev.c b/drivers/nvme/target/io-cmd-bdev.c index eaf31c823cbe8..73ecbc13c5b23 100644 --- a/drivers/nvme/target/io-cmd-bdev.c +++ b/drivers/nvme/target/io-cmd-bdev.c @@ -145,15 +145,8 @@ u16 blk_to_nvme_status(struct nvmet_req *req, blk_status_t blk_sts) req->error_loc = offsetof(struct nvme_rw_command, slba); break; case BLK_STS_NOTSUPP: + status = NVME_SC_INVALID_OPCODE | NVME_STATUS_DNR; req->error_loc = offsetof(struct nvme_common_command, opcode); - switch (req->cmd->common.opcode) { - case nvme_cmd_dsm: - case nvme_cmd_write_zeroes: - status = NVME_SC_ONCS_NOT_SUPPORTED | NVME_STATUS_DNR; - break; - default: - status = NVME_SC_INVALID_OPCODE | NVME_STATUS_DNR; - } break; case BLK_STS_MEDIUM: status = NVME_SC_ACCESS_DENIED; diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h index 190f55e6d7532..3062562c096a1 100644 --- a/drivers/nvme/target/nvmet.h +++ b/drivers/nvme/target/nvmet.h @@ -714,6 +714,8 @@ static inline void nvmet_req_bio_put(struct nvmet_req *req, struct bio *bio) { if (bio != &req->b.inline_bio) bio_put(bio); + else + bio_uninit(bio); } #ifdef CONFIG_NVME_TARGET_AUTH diff --git a/drivers/nvmem/zynqmp_nvmem.c b/drivers/nvmem/zynqmp_nvmem.c index 8682adaacd692..7da717d6c7faf 100644 --- a/drivers/nvmem/zynqmp_nvmem.c +++ b/drivers/nvmem/zynqmp_nvmem.c @@ -213,6 +213,7 @@ static int zynqmp_nvmem_probe(struct platform_device *pdev) econfig.word_size = 1; econfig.size = ZYNQMP_NVMEM_SIZE; econfig.dev = dev; + econfig.priv = dev; econfig.add_legacy_fixed_of_cells = true; econfig.reg_read = zynqmp_nvmem_read; econfig.reg_write = zynqmp_nvmem_write; diff --git a/drivers/of/unittest.c b/drivers/of/unittest.c index 576e9beefc7c8..9a72f75e5c2d8 100644 --- a/drivers/of/unittest.c +++ b/drivers/of/unittest.c @@ -1916,15 +1916,16 @@ static int __init unittest_data_add(void) rc = of_resolve_phandles(unittest_data_node); if (rc) { pr_err("%s: Failed to resolve phandles (rc=%i)\n", __func__, rc); - of_overlay_mutex_unlock(); - return -EINVAL; + rc = -EINVAL; + goto unlock; } /* attach the sub-tree to live tree */ if (!of_root) { pr_warn("%s: no live tree to attach sub-tree\n", __func__); kfree(unittest_data); - return -ENODEV; + rc = -ENODEV; + goto unlock; } EXPECT_BEGIN(KERN_INFO, @@ -1943,9 +1944,10 @@ static int __init unittest_data_add(void) EXPECT_END(KERN_INFO, "Duplicate name in testcase-data, renamed to \"duplicate-name#1\""); +unlock: of_overlay_mutex_unlock(); - return 0; + return rc; } #ifdef CONFIG_OF_OVERLAY diff --git a/drivers/pci/controller/cadence/pcie-cadence-ep.c b/drivers/pci/controller/cadence/pcie-cadence-ep.c index 0bf4cde34f517..f700e8c490822 100644 --- a/drivers/pci/controller/cadence/pcie-cadence-ep.c +++ b/drivers/pci/controller/cadence/pcie-cadence-ep.c @@ -292,13 +292,14 @@ static int cdns_pcie_ep_set_msix(struct pci_epc *epc, u8 fn, u8 vfn, struct cdns_pcie *pcie = &ep->pcie; u32 cap = CDNS_PCIE_EP_FUNC_MSIX_CAP_OFFSET; u32 val, reg; + u16 actual_interrupts = interrupts + 1; fn = cdns_pcie_get_fn_from_vfn(pcie, fn, vfn); reg = cap + PCI_MSIX_FLAGS; val = cdns_pcie_ep_fn_readw(pcie, fn, reg); val &= ~PCI_MSIX_FLAGS_QSIZE; - val |= interrupts; + val |= interrupts; /* 0's based value */ cdns_pcie_ep_fn_writew(pcie, fn, reg, val); /* Set MSIX BAR and offset */ @@ -308,7 +309,7 @@ static int cdns_pcie_ep_set_msix(struct pci_epc *epc, u8 fn, u8 vfn, /* Set PBA BAR and offset. BAR must match MSIX BAR */ reg = cap + PCI_MSIX_PBA; - val = (offset + (interrupts * PCI_MSIX_ENTRY_SIZE)) | bir; + val = (offset + (actual_interrupts * PCI_MSIX_ENTRY_SIZE)) | bir; cdns_pcie_ep_fn_writel(pcie, fn, reg, val); return 0; diff --git a/drivers/pci/controller/cadence/pcie-cadence-host.c b/drivers/pci/controller/cadence/pcie-cadence-host.c index 8af95e9da7cec..741e10a575ec7 100644 --- a/drivers/pci/controller/cadence/pcie-cadence-host.c +++ b/drivers/pci/controller/cadence/pcie-cadence-host.c @@ -570,14 +570,5 @@ int cdns_pcie_host_setup(struct cdns_pcie_rc *rc) if (!bridge->ops) bridge->ops = &cdns_pcie_host_ops; - ret = pci_host_probe(bridge); - if (ret < 0) - goto err_init; - - return 0; - - err_init: - pm_runtime_put_sync(dev); - - return ret; + return pci_host_probe(bridge); } diff --git a/drivers/pci/controller/dwc/pci-imx6.c b/drivers/pci/controller/dwc/pci-imx6.c index ad3028b755d16..3b24fed3177de 100644 --- a/drivers/pci/controller/dwc/pci-imx6.c +++ b/drivers/pci/controller/dwc/pci-imx6.c @@ -48,6 +48,8 @@ #define IMX95_PCIE_SS_RW_REG_0 0xf0 #define IMX95_PCIE_REF_CLKEN BIT(23) #define IMX95_PCIE_PHY_CR_PARA_SEL BIT(9) +#define IMX95_PCIE_SS_RW_REG_1 0xf4 +#define IMX95_PCIE_SYS_AUX_PWR_DET BIT(31) #define IMX95_PE0_GEN_CTRL_1 0x1050 #define IMX95_PCIE_DEVICE_TYPE GENMASK(3, 0) @@ -206,6 +208,19 @@ static unsigned int imx_pcie_grp_offset(const struct imx_pcie *imx_pcie) static int imx95_pcie_init_phy(struct imx_pcie *imx_pcie) { + /* + * ERR051624: The Controller Without Vaux Cannot Exit L23 Ready + * Through Beacon or PERST# De-assertion + * + * When the auxiliary power is not available, the controller + * cannot exit from L23 Ready with beacon or PERST# de-assertion + * when main power is not removed. + * + * Workaround: Set SS_RW_REG_1[SYS_AUX_PWR_DET] to 1. + */ + regmap_set_bits(imx_pcie->iomuxc_gpr, IMX95_PCIE_SS_RW_REG_1, + IMX95_PCIE_SYS_AUX_PWR_DET); + regmap_update_bits(imx_pcie->iomuxc_gpr, IMX95_PCIE_SS_RW_REG_0, IMX95_PCIE_PHY_CR_PARA_SEL, diff --git a/drivers/pci/controller/dwc/pcie-designware-ep.c b/drivers/pci/controller/dwc/pcie-designware-ep.c index 9e7e94f32b436..00289948f9c12 100644 --- a/drivers/pci/controller/dwc/pcie-designware-ep.c +++ b/drivers/pci/controller/dwc/pcie-designware-ep.c @@ -398,6 +398,7 @@ static int dw_pcie_ep_set_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no, struct dw_pcie *pci = to_dw_pcie_from_ep(ep); struct dw_pcie_ep_func *ep_func; u32 val, reg; + u16 actual_interrupts = interrupts + 1; ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no); if (!ep_func || !ep_func->msix_cap) @@ -408,7 +409,7 @@ static int dw_pcie_ep_set_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no, reg = ep_func->msix_cap + PCI_MSIX_FLAGS; val = dw_pcie_ep_readw_dbi(ep, func_no, reg); val &= ~PCI_MSIX_FLAGS_QSIZE; - val |= interrupts; + val |= interrupts; /* 0's based value */ dw_pcie_writew_dbi(pci, reg, val); reg = ep_func->msix_cap + PCI_MSIX_TABLE; @@ -416,7 +417,7 @@ static int dw_pcie_ep_set_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no, dw_pcie_ep_writel_dbi(ep, func_no, reg, val); reg = ep_func->msix_cap + PCI_MSIX_PBA; - val = (offset + (interrupts * PCI_MSIX_ENTRY_SIZE)) | bir; + val = (offset + (actual_interrupts * PCI_MSIX_ENTRY_SIZE)) | bir; dw_pcie_ep_writel_dbi(ep, func_no, reg, val); dw_pcie_dbi_ro_wr_dis(pci); diff --git a/drivers/pci/controller/dwc/pcie-designware.c b/drivers/pci/controller/dwc/pcie-designware.c index 6d6cbc8b5b2c6..d40afe74ddd1a 100644 --- a/drivers/pci/controller/dwc/pcie-designware.c +++ b/drivers/pci/controller/dwc/pcie-designware.c @@ -752,22 +752,19 @@ static void dw_pcie_link_set_max_link_width(struct dw_pcie *pci, u32 num_lanes) /* Set link width speed control register */ lwsc = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL); lwsc &= ~PORT_LOGIC_LINK_WIDTH_MASK; + lwsc |= PORT_LOGIC_LINK_WIDTH_1_LANES; switch (num_lanes) { case 1: plc |= PORT_LINK_MODE_1_LANES; - lwsc |= PORT_LOGIC_LINK_WIDTH_1_LANES; break; case 2: plc |= PORT_LINK_MODE_2_LANES; - lwsc |= PORT_LOGIC_LINK_WIDTH_2_LANES; break; case 4: plc |= PORT_LINK_MODE_4_LANES; - lwsc |= PORT_LOGIC_LINK_WIDTH_4_LANES; break; case 8: plc |= PORT_LINK_MODE_8_LANES; - lwsc |= PORT_LOGIC_LINK_WIDTH_8_LANES; break; default: dev_err(pci->dev, "num-lanes %u: invalid value\n", num_lanes); diff --git a/drivers/pci/controller/dwc/pcie-dw-rockchip.c b/drivers/pci/controller/dwc/pcie-dw-rockchip.c index 1170e1107508b..6b113a1212a92 100644 --- a/drivers/pci/controller/dwc/pcie-dw-rockchip.c +++ b/drivers/pci/controller/dwc/pcie-dw-rockchip.c @@ -44,7 +44,6 @@ #define PCIE_LINKUP (PCIE_SMLH_LINKUP | PCIE_RDLH_LINKUP) #define PCIE_RDLH_LINK_UP_CHGED BIT(1) #define PCIE_LINK_REQ_RST_NOT_INT BIT(2) -#define PCIE_L0S_ENTRY 0x11 #define PCIE_CLIENT_GENERAL_CONTROL 0x0 #define PCIE_CLIENT_INTR_STATUS_LEGACY 0x8 #define PCIE_CLIENT_INTR_MASK_LEGACY 0x1c @@ -177,8 +176,7 @@ static int rockchip_pcie_link_up(struct dw_pcie *pci) struct rockchip_pcie *rockchip = to_rockchip_pcie(pci); u32 val = rockchip_pcie_get_ltssm(rockchip); - if ((val & PCIE_LINKUP) == PCIE_LINKUP && - (val & PCIE_LTSSM_STATUS_MASK) == PCIE_L0S_ENTRY) + if ((val & PCIE_LINKUP) == PCIE_LINKUP) return 1; return 0; @@ -379,8 +377,8 @@ static int rockchip_pcie_phy_init(struct rockchip_pcie *rockchip) static void rockchip_pcie_phy_deinit(struct rockchip_pcie *rockchip) { - phy_exit(rockchip->phy); phy_power_off(rockchip->phy); + phy_exit(rockchip->phy); } static const struct dw_pcie_ops dw_pcie_ops = { diff --git a/drivers/pci/controller/dwc/pcie-rcar-gen4.c b/drivers/pci/controller/dwc/pcie-rcar-gen4.c index 3a5511c3f7d97..5d77a01648606 100644 --- a/drivers/pci/controller/dwc/pcie-rcar-gen4.c +++ b/drivers/pci/controller/dwc/pcie-rcar-gen4.c @@ -403,6 +403,7 @@ static const struct pci_epc_features rcar_gen4_pcie_epc_features = { .msix_capable = false, .bar[BAR_1] = { .type = BAR_RESERVED, }, .bar[BAR_3] = { .type = BAR_RESERVED, }, + .bar[BAR_4] = { .type = BAR_FIXED, .fixed_size = 256 }, .bar[BAR_5] = { .type = BAR_RESERVED, }, .align = SZ_1M, }; diff --git a/drivers/pci/controller/pcie-apple.c b/drivers/pci/controller/pcie-apple.c index fefab2758a064..16725f9536f65 100644 --- a/drivers/pci/controller/pcie-apple.c +++ b/drivers/pci/controller/pcie-apple.c @@ -541,7 +541,7 @@ static int apple_pcie_setup_port(struct apple_pcie *pcie, rmw_set(PORT_APPCLK_EN, port->base + PORT_APPCLK); /* Assert PERST# before setting up the clock */ - gpiod_set_value(reset, 1); + gpiod_set_value_cansleep(reset, 1); ret = apple_pcie_setup_refclk(pcie, port); if (ret < 0) @@ -552,7 +552,7 @@ static int apple_pcie_setup_port(struct apple_pcie *pcie, /* Deassert PERST# */ rmw_set(PORT_PERST_OFF, port->base + PORT_PERST); - gpiod_set_value(reset, 0); + gpiod_set_value_cansleep(reset, 0); /* Wait for 100ms after PERST# deassertion (PCIe r5.0, 6.6.1) */ msleep(100); @@ -585,6 +585,9 @@ static int apple_pcie_setup_port(struct apple_pcie *pcie, list_add_tail(&port->entry, &pcie->ports); init_completion(&pcie->event); + /* In the success path, we keep a reference to np around */ + of_node_get(np); + ret = apple_pcie_port_register_irqs(port); WARN_ON(ret); @@ -764,7 +767,6 @@ static int apple_pcie_init(struct pci_config_window *cfg) { struct device *dev = cfg->parent; struct platform_device *platform = to_platform_device(dev); - struct device_node *of_port; struct apple_pcie *pcie; int ret; @@ -787,11 +789,10 @@ static int apple_pcie_init(struct pci_config_window *cfg) if (ret) return ret; - for_each_child_of_node(dev->of_node, of_port) { + for_each_available_child_of_node_scoped(dev->of_node, of_port) { ret = apple_pcie_setup_port(pcie, of_port); if (ret) { dev_err(pcie->dev, "Port %pOF setup fail: %d\n", of_port, ret); - of_node_put(of_port); return ret; } } diff --git a/drivers/pci/endpoint/pci-epf-core.c b/drivers/pci/endpoint/pci-epf-core.c index 50bc2892a36c5..963d2f3aa5d47 100644 --- a/drivers/pci/endpoint/pci-epf-core.c +++ b/drivers/pci/endpoint/pci-epf-core.c @@ -236,12 +236,13 @@ void pci_epf_free_space(struct pci_epf *epf, void *addr, enum pci_barno bar, } dev = epc->dev.parent; - dma_free_coherent(dev, epf_bar[bar].size, addr, + dma_free_coherent(dev, epf_bar[bar].aligned_size, addr, epf_bar[bar].phys_addr); epf_bar[bar].phys_addr = 0; epf_bar[bar].addr = NULL; epf_bar[bar].size = 0; + epf_bar[bar].aligned_size = 0; epf_bar[bar].barno = 0; epf_bar[bar].flags = 0; } @@ -264,7 +265,7 @@ void *pci_epf_alloc_space(struct pci_epf *epf, size_t size, enum pci_barno bar, enum pci_epc_interface_type type) { u64 bar_fixed_size = epc_features->bar[bar].fixed_size; - size_t align = epc_features->align; + size_t aligned_size, align = epc_features->align; struct pci_epf_bar *epf_bar; dma_addr_t phys_addr; struct pci_epc *epc; @@ -281,12 +282,18 @@ void *pci_epf_alloc_space(struct pci_epf *epf, size_t size, enum pci_barno bar, return NULL; } size = bar_fixed_size; + } else { + /* BAR size must be power of two */ + size = roundup_pow_of_two(size); } - if (align) - size = ALIGN(size, align); - else - size = roundup_pow_of_two(size); + /* + * Allocate enough memory to accommodate the iATU alignment + * requirement. In most cases, this will be the same as .size but + * it might be different if, for example, the fixed size of a BAR + * is smaller than align. + */ + aligned_size = align ? ALIGN(size, align) : size; if (type == PRIMARY_INTERFACE) { epc = epf->epc; @@ -297,7 +304,7 @@ void *pci_epf_alloc_space(struct pci_epf *epf, size_t size, enum pci_barno bar, } dev = epc->dev.parent; - space = dma_alloc_coherent(dev, size, &phys_addr, GFP_KERNEL); + space = dma_alloc_coherent(dev, aligned_size, &phys_addr, GFP_KERNEL); if (!space) { dev_err(dev, "failed to allocate mem space\n"); return NULL; @@ -306,6 +313,7 @@ void *pci_epf_alloc_space(struct pci_epf *epf, size_t size, enum pci_barno bar, epf_bar[bar].phys_addr = phys_addr; epf_bar[bar].addr = space; epf_bar[bar].size = size; + epf_bar[bar].aligned_size = aligned_size; epf_bar[bar].barno = bar; if (upper_32_bits(size) || epc_features->bar[bar].only_64bit) epf_bar[bar].flags |= PCI_BASE_ADDRESS_MEM_TYPE_64; diff --git a/drivers/pci/hotplug/s390_pci_hpc.c b/drivers/pci/hotplug/s390_pci_hpc.c index e9e9aaa91770a..d9996516f49e6 100644 --- a/drivers/pci/hotplug/s390_pci_hpc.c +++ b/drivers/pci/hotplug/s390_pci_hpc.c @@ -65,9 +65,9 @@ static int disable_slot(struct hotplug_slot *hotplug_slot) rc = zpci_deconfigure_device(zdev); out: - mutex_unlock(&zdev->state_lock); if (pdev) pci_dev_put(pdev); + mutex_unlock(&zdev->state_lock); return rc; } diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index 25c07af1686b9..51a09e48967f2 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -4945,7 +4945,7 @@ int pci_bridge_wait_for_secondary_bus(struct pci_dev *dev, char *reset_type) delay); if (!pcie_wait_for_link_delay(dev, true, delay)) { /* Did not train, no need to wait any further */ - pci_info(dev, "Data Link Layer Link Active not set in 1000 msec\n"); + pci_info(dev, "Data Link Layer Link Active not set in %d msec\n", delay); return -ENOTTY; } @@ -5643,7 +5643,8 @@ static void pci_slot_unlock(struct pci_slot *slot) continue; if (dev->subordinate) pci_bus_unlock(dev->subordinate); - pci_dev_unlock(dev); + else + pci_dev_unlock(dev); } } diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c index 62650a2f00ccc..a392e060ca2f4 100644 --- a/drivers/pci/pcie/aspm.c +++ b/drivers/pci/pcie/aspm.c @@ -805,6 +805,15 @@ static void pcie_aspm_cap_init(struct pcie_link_state *link, int blacklist) pcie_capability_read_word(parent, PCI_EXP_LNKCTL, &parent_lnkctl); pcie_capability_read_word(child, PCI_EXP_LNKCTL, &child_lnkctl); + /* Disable L0s/L1 before updating L1SS config */ + if (FIELD_GET(PCI_EXP_LNKCTL_ASPMC, child_lnkctl) || + FIELD_GET(PCI_EXP_LNKCTL_ASPMC, parent_lnkctl)) { + pcie_capability_write_word(child, PCI_EXP_LNKCTL, + child_lnkctl & ~PCI_EXP_LNKCTL_ASPMC); + pcie_capability_write_word(parent, PCI_EXP_LNKCTL, + parent_lnkctl & ~PCI_EXP_LNKCTL_ASPMC); + } + /* * Setup L0s state * @@ -829,6 +838,13 @@ static void pcie_aspm_cap_init(struct pcie_link_state *link, int blacklist) aspm_l1ss_init(link); + /* Restore L0s/L1 if they were enabled */ + if (FIELD_GET(PCI_EXP_LNKCTL_ASPMC, child_lnkctl) || + FIELD_GET(PCI_EXP_LNKCTL_ASPMC, parent_lnkctl)) { + pcie_capability_write_word(parent, PCI_EXP_LNKCTL, parent_lnkctl); + pcie_capability_write_word(child, PCI_EXP_LNKCTL, child_lnkctl); + } + /* Save default state */ link->aspm_default = link->aspm_enabled; @@ -845,25 +861,28 @@ static void pcie_aspm_cap_init(struct pcie_link_state *link, int blacklist) } } -/* Configure the ASPM L1 substates */ +/* Configure the ASPM L1 substates. Caller must disable L1 first. */ static void pcie_config_aspm_l1ss(struct pcie_link_state *link, u32 state) { - u32 val, enable_req; + u32 val; struct pci_dev *child = link->downstream, *parent = link->pdev; - enable_req = (link->aspm_enabled ^ state) & state; + val = 0; + if (state & PCIE_LINK_STATE_L1_1) + val |= PCI_L1SS_CTL1_ASPM_L1_1; + if (state & PCIE_LINK_STATE_L1_2) + val |= PCI_L1SS_CTL1_ASPM_L1_2; + if (state & PCIE_LINK_STATE_L1_1_PCIPM) + val |= PCI_L1SS_CTL1_PCIPM_L1_1; + if (state & PCIE_LINK_STATE_L1_2_PCIPM) + val |= PCI_L1SS_CTL1_PCIPM_L1_2; /* - * Here are the rules specified in the PCIe spec for enabling L1SS: - * - When enabling L1.x, enable bit at parent first, then at child - * - When disabling L1.x, disable bit at child first, then at parent - * - When enabling ASPM L1.x, need to disable L1 - * (at child followed by parent). - * - The ASPM/PCIPM L1.2 must be disabled while programming timing + * PCIe r6.2, sec 5.5.4, rules for enabling L1 PM Substates: + * - Clear L1.x enable bits at child first, then at parent + * - Set L1.x enable bits at parent first, then at child + * - ASPM/PCIPM L1.2 must be disabled while programming timing * parameters - * - * To keep it simple, disable all L1SS bits first, and later enable - * what is needed. */ /* Disable all L1 substates */ @@ -871,26 +890,6 @@ static void pcie_config_aspm_l1ss(struct pcie_link_state *link, u32 state) PCI_L1SS_CTL1_L1SS_MASK, 0); pci_clear_and_set_config_dword(parent, parent->l1ss + PCI_L1SS_CTL1, PCI_L1SS_CTL1_L1SS_MASK, 0); - /* - * If needed, disable L1, and it gets enabled later - * in pcie_config_aspm_link(). - */ - if (enable_req & (PCIE_LINK_STATE_L1_1 | PCIE_LINK_STATE_L1_2)) { - pcie_capability_clear_word(child, PCI_EXP_LNKCTL, - PCI_EXP_LNKCTL_ASPM_L1); - pcie_capability_clear_word(parent, PCI_EXP_LNKCTL, - PCI_EXP_LNKCTL_ASPM_L1); - } - - val = 0; - if (state & PCIE_LINK_STATE_L1_1) - val |= PCI_L1SS_CTL1_ASPM_L1_1; - if (state & PCIE_LINK_STATE_L1_2) - val |= PCI_L1SS_CTL1_ASPM_L1_2; - if (state & PCIE_LINK_STATE_L1_1_PCIPM) - val |= PCI_L1SS_CTL1_PCIPM_L1_1; - if (state & PCIE_LINK_STATE_L1_2_PCIPM) - val |= PCI_L1SS_CTL1_PCIPM_L1_2; /* Enable what we need to enable */ pci_clear_and_set_config_dword(parent, parent->l1ss + PCI_L1SS_CTL1, @@ -937,21 +936,30 @@ static void pcie_config_aspm_link(struct pcie_link_state *link, u32 state) dwstream |= PCI_EXP_LNKCTL_ASPM_L1; } + /* + * Per PCIe r6.2, sec 5.5.4, setting either or both of the enable + * bits for ASPM L1 PM Substates must be done while ASPM L1 is + * disabled. Disable L1 here and apply new configuration after L1SS + * configuration has been completed. + * + * Per sec 7.5.3.7, when disabling ASPM L1, software must disable + * it in the Downstream component prior to disabling it in the + * Upstream component, and ASPM L1 must be enabled in the Upstream + * component prior to enabling it in the Downstream component. + * + * Sec 7.5.3.7 also recommends programming the same ASPM Control + * value for all functions of a multi-function device. + */ + list_for_each_entry(child, &linkbus->devices, bus_list) + pcie_config_aspm_dev(child, 0); + pcie_config_aspm_dev(parent, 0); + if (link->aspm_capable & PCIE_LINK_STATE_L1SS) pcie_config_aspm_l1ss(link, state); - /* - * Spec 2.0 suggests all functions should be configured the - * same setting for ASPM. Enabling ASPM L1 should be done in - * upstream component first and then downstream, and vice - * versa for disabling ASPM L1. Spec doesn't mention L0S. - */ - if (state & PCIE_LINK_STATE_L1) - pcie_config_aspm_dev(parent, upstream); + pcie_config_aspm_dev(parent, upstream); list_for_each_entry(child, &linkbus->devices, bus_list) pcie_config_aspm_dev(child, dwstream); - if (!(state & PCIE_LINK_STATE_L1)) - pcie_config_aspm_dev(parent, upstream); link->aspm_enabled = state; diff --git a/drivers/pci/pcie/dpc.c b/drivers/pci/pcie/dpc.c index 2b6ef7efa3c11..cdc54315d879f 100644 --- a/drivers/pci/pcie/dpc.c +++ b/drivers/pci/pcie/dpc.c @@ -260,40 +260,48 @@ static int dpc_get_aer_uncorrect_severity(struct pci_dev *dev, void dpc_process_error(struct pci_dev *pdev) { u16 cap = pdev->dpc_cap, status, source, reason, ext_reason; - struct aer_err_info info; + struct aer_err_info info = {}; pci_read_config_word(pdev, cap + PCI_EXP_DPC_STATUS, &status); - pci_read_config_word(pdev, cap + PCI_EXP_DPC_SOURCE_ID, &source); - - pci_info(pdev, "containment event, status:%#06x source:%#06x\n", - status, source); reason = status & PCI_EXP_DPC_STATUS_TRIGGER_RSN; - ext_reason = status & PCI_EXP_DPC_STATUS_TRIGGER_RSN_EXT; - pci_warn(pdev, "%s detected\n", - (reason == PCI_EXP_DPC_STATUS_TRIGGER_RSN_UNCOR) ? - "unmasked uncorrectable error" : - (reason == PCI_EXP_DPC_STATUS_TRIGGER_RSN_NFE) ? - "ERR_NONFATAL" : - (reason == PCI_EXP_DPC_STATUS_TRIGGER_RSN_FE) ? - "ERR_FATAL" : - (ext_reason == PCI_EXP_DPC_STATUS_TRIGGER_RSN_RP_PIO) ? - "RP PIO error" : - (ext_reason == PCI_EXP_DPC_STATUS_TRIGGER_RSN_SW_TRIGGER) ? - "software trigger" : - "reserved error"); - - /* show RP PIO error detail information */ - if (pdev->dpc_rp_extensions && - reason == PCI_EXP_DPC_STATUS_TRIGGER_RSN_IN_EXT && - ext_reason == PCI_EXP_DPC_STATUS_TRIGGER_RSN_RP_PIO) - dpc_process_rp_pio_error(pdev); - else if (reason == PCI_EXP_DPC_STATUS_TRIGGER_RSN_UNCOR && - dpc_get_aer_uncorrect_severity(pdev, &info) && - aer_get_device_error_info(pdev, &info)) { - aer_print_error(pdev, &info); - pci_aer_clear_nonfatal_status(pdev); - pci_aer_clear_fatal_status(pdev); + + switch (reason) { + case PCI_EXP_DPC_STATUS_TRIGGER_RSN_UNCOR: + pci_warn(pdev, "containment event, status:%#06x: unmasked uncorrectable error detected\n", + status); + if (dpc_get_aer_uncorrect_severity(pdev, &info) && + aer_get_device_error_info(pdev, &info)) { + aer_print_error(pdev, &info); + pci_aer_clear_nonfatal_status(pdev); + pci_aer_clear_fatal_status(pdev); + } + break; + case PCI_EXP_DPC_STATUS_TRIGGER_RSN_NFE: + case PCI_EXP_DPC_STATUS_TRIGGER_RSN_FE: + pci_read_config_word(pdev, cap + PCI_EXP_DPC_SOURCE_ID, + &source); + pci_warn(pdev, "containment event, status:%#06x, %s received from %04x:%02x:%02x.%d\n", + status, + (reason == PCI_EXP_DPC_STATUS_TRIGGER_RSN_FE) ? + "ERR_FATAL" : "ERR_NONFATAL", + pci_domain_nr(pdev->bus), PCI_BUS_NUM(source), + PCI_SLOT(source), PCI_FUNC(source)); + break; + case PCI_EXP_DPC_STATUS_TRIGGER_RSN_IN_EXT: + ext_reason = status & PCI_EXP_DPC_STATUS_TRIGGER_RSN_EXT; + pci_warn(pdev, "containment event, status:%#06x: %s detected\n", + status, + (ext_reason == PCI_EXP_DPC_STATUS_TRIGGER_RSN_RP_PIO) ? + "RP PIO error" : + (ext_reason == PCI_EXP_DPC_STATUS_TRIGGER_RSN_SW_TRIGGER) ? + "software trigger" : + "reserved error"); + /* show RP PIO error detail information */ + if (ext_reason == PCI_EXP_DPC_STATUS_TRIGGER_RSN_RP_PIO && + pdev->dpc_rp_extensions) + dpc_process_rp_pio_error(pdev); + break; } } diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index 064067d9c8b52..db609d26811ba 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -4995,6 +4995,18 @@ static int pci_quirk_brcm_acs(struct pci_dev *dev, u16 acs_flags) PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF); } +static int pci_quirk_loongson_acs(struct pci_dev *dev, u16 acs_flags) +{ + /* + * Loongson PCIe Root Ports don't advertise an ACS capability, but + * they do not allow peer-to-peer transactions between Root Ports. + * Allow each Root Port to be in a separate IOMMU group by masking + * SV/RR/CR/UF bits. + */ + return pci_acs_ctrl_enabled(acs_flags, + PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF); +} + /* * Wangxun 40G/25G/10G/1G NICs have no ACS capability, but on * multi-function devices, the hardware isolates the functions by @@ -5128,6 +5140,17 @@ static const struct pci_dev_acs_enabled { { PCI_VENDOR_ID_BROADCOM, 0x1762, pci_quirk_mf_endpoint_acs }, { PCI_VENDOR_ID_BROADCOM, 0x1763, pci_quirk_mf_endpoint_acs }, { PCI_VENDOR_ID_BROADCOM, 0xD714, pci_quirk_brcm_acs }, + /* Loongson PCIe Root Ports */ + { PCI_VENDOR_ID_LOONGSON, 0x3C09, pci_quirk_loongson_acs }, + { PCI_VENDOR_ID_LOONGSON, 0x3C19, pci_quirk_loongson_acs }, + { PCI_VENDOR_ID_LOONGSON, 0x3C29, pci_quirk_loongson_acs }, + { PCI_VENDOR_ID_LOONGSON, 0x7A09, pci_quirk_loongson_acs }, + { PCI_VENDOR_ID_LOONGSON, 0x7A19, pci_quirk_loongson_acs }, + { PCI_VENDOR_ID_LOONGSON, 0x7A29, pci_quirk_loongson_acs }, + { PCI_VENDOR_ID_LOONGSON, 0x7A39, pci_quirk_loongson_acs }, + { PCI_VENDOR_ID_LOONGSON, 0x7A49, pci_quirk_loongson_acs }, + { PCI_VENDOR_ID_LOONGSON, 0x7A59, pci_quirk_loongson_acs }, + { PCI_VENDOR_ID_LOONGSON, 0x7A69, pci_quirk_loongson_acs }, /* Amazon Annapurna Labs */ { PCI_VENDOR_ID_AMAZON_ANNAPURNA_LABS, 0x0031, pci_quirk_al_acs }, /* Zhaoxin multi-function devices */ diff --git a/drivers/perf/amlogic/meson_ddr_pmu_core.c b/drivers/perf/amlogic/meson_ddr_pmu_core.c index 07446d784a1a6..c1e755c356a33 100644 --- a/drivers/perf/amlogic/meson_ddr_pmu_core.c +++ b/drivers/perf/amlogic/meson_ddr_pmu_core.c @@ -511,7 +511,7 @@ int meson_ddr_pmu_create(struct platform_device *pdev) fmt_attr_fill(pmu->info.hw_info->fmt_attr); - pmu->cpu = smp_processor_id(); + pmu->cpu = raw_smp_processor_id(); name = devm_kasprintf(&pdev->dev, GFP_KERNEL, DDR_PERF_DEV_NAME); if (!name) diff --git a/drivers/perf/arm-cmn.c b/drivers/perf/arm-cmn.c index 30506c43776f1..ff17e0f95fbb8 100644 --- a/drivers/perf/arm-cmn.c +++ b/drivers/perf/arm-cmn.c @@ -727,8 +727,8 @@ static umode_t arm_cmn_event_attr_is_visible(struct kobject *kobj, if ((chan == 5 && cmn->rsp_vc_num < 2) || (chan == 6 && cmn->dat_vc_num < 2) || - (chan == 7 && cmn->snp_vc_num < 2) || - (chan == 8 && cmn->req_vc_num < 2)) + (chan == 7 && cmn->req_vc_num < 2) || + (chan == 8 && cmn->snp_vc_num < 2)) return 0; } @@ -884,8 +884,8 @@ static umode_t arm_cmn_event_attr_is_visible(struct kobject *kobj, _CMN_EVENT_XP(pub_##_name, (_event) | (4 << 5)), \ _CMN_EVENT_XP(rsp2_##_name, (_event) | (5 << 5)), \ _CMN_EVENT_XP(dat2_##_name, (_event) | (6 << 5)), \ - _CMN_EVENT_XP(snp2_##_name, (_event) | (7 << 5)), \ - _CMN_EVENT_XP(req2_##_name, (_event) | (8 << 5)) + _CMN_EVENT_XP(req2_##_name, (_event) | (7 << 5)), \ + _CMN_EVENT_XP(snp2_##_name, (_event) | (8 << 5)) #define CMN_EVENT_XP_DAT(_name, _event) \ _CMN_EVENT_XP_PORT(dat_##_name, (_event) | (3 << 5)), \ @@ -2557,6 +2557,7 @@ static int arm_cmn_probe(struct platform_device *pdev) cmn->dev = &pdev->dev; cmn->part = (unsigned long)device_get_match_data(cmn->dev); + cmn->cpu = cpumask_local_spread(0, dev_to_node(cmn->dev)); platform_set_drvdata(pdev, cmn); if (cmn->part == PART_CMN600 && has_acpi_companion(cmn->dev)) { @@ -2584,7 +2585,6 @@ static int arm_cmn_probe(struct platform_device *pdev) if (err) return err; - cmn->cpu = cpumask_local_spread(0, dev_to_node(cmn->dev)); cmn->pmu = (struct pmu) { .module = THIS_MODULE, .parent = cmn->dev, @@ -2650,6 +2650,7 @@ static const struct acpi_device_id arm_cmn_acpi_match[] = { { "ARMHC600", PART_CMN600 }, { "ARMHC650" }, { "ARMHC700" }, + { "ARMHC003" }, {} }; MODULE_DEVICE_TABLE(acpi, arm_cmn_acpi_match); diff --git a/drivers/perf/arm-ni.c b/drivers/perf/arm-ni.c index 90fcfe693439e..b87d3a9ba7d54 100644 --- a/drivers/perf/arm-ni.c +++ b/drivers/perf/arm-ni.c @@ -576,6 +576,23 @@ static int arm_ni_init_cd(struct arm_ni *ni, struct arm_ni_node *node, u64 res_s return err; } +static void arm_ni_remove(struct platform_device *pdev) +{ + struct arm_ni *ni = platform_get_drvdata(pdev); + + for (int i = 0; i < ni->num_cds; i++) { + struct arm_ni_cd *cd = ni->cds + i; + + if (!cd->pmu_base) + continue; + + writel_relaxed(0, cd->pmu_base + NI_PMCR); + writel_relaxed(U32_MAX, cd->pmu_base + NI_PMINTENCLR); + perf_pmu_unregister(&cd->pmu); + cpuhp_state_remove_instance_nocalls(arm_ni_hp_state, &cd->cpuhp_node); + } +} + static void arm_ni_probe_domain(void __iomem *base, struct arm_ni_node *node) { u32 reg = readl_relaxed(base + NI_NODE_TYPE); @@ -644,6 +661,7 @@ static int arm_ni_probe(struct platform_device *pdev) ni->num_cds = num_cds; ni->part = part; ni->id = atomic_fetch_inc(&id); + platform_set_drvdata(pdev, ni); for (int v = 0; v < cfg.num_components; v++) { reg = readl_relaxed(cfg.base + NI_CHILD_PTR(v)); @@ -657,8 +675,11 @@ static int arm_ni_probe(struct platform_device *pdev) reg = readl_relaxed(pd.base + NI_CHILD_PTR(c)); arm_ni_probe_domain(base + reg, &cd); ret = arm_ni_init_cd(ni, &cd, res->start); - if (ret) + if (ret) { + ni->cds[cd.id].pmu_base = NULL; + arm_ni_remove(pdev); return ret; + } } } } @@ -666,23 +687,6 @@ static int arm_ni_probe(struct platform_device *pdev) return 0; } -static void arm_ni_remove(struct platform_device *pdev) -{ - struct arm_ni *ni = platform_get_drvdata(pdev); - - for (int i = 0; i < ni->num_cds; i++) { - struct arm_ni_cd *cd = ni->cds + i; - - if (!cd->pmu_base) - continue; - - writel_relaxed(0, cd->pmu_base + NI_PMCR); - writel_relaxed(U32_MAX, cd->pmu_base + NI_PMINTENCLR); - perf_pmu_unregister(&cd->pmu); - cpuhp_state_remove_instance_nocalls(arm_ni_hp_state, &cd->cpuhp_node); - } -} - #ifdef CONFIG_OF static const struct of_device_id arm_ni_of_match[] = { { .compatible = "arm,ni-700" }, diff --git a/drivers/phy/freescale/phy-fsl-imx8mq-usb.c b/drivers/phy/freescale/phy-fsl-imx8mq-usb.c index adc6394626ce8..f914f016b3d2c 100644 --- a/drivers/phy/freescale/phy-fsl-imx8mq-usb.c +++ b/drivers/phy/freescale/phy-fsl-imx8mq-usb.c @@ -95,12 +95,12 @@ static u32 phy_tx_preemp_amp_tune_from_property(u32 microamp) static u32 phy_tx_vboost_level_from_property(u32 microvolt) { switch (microvolt) { - case 0 ... 960: - return 0; - case 961 ... 1160: - return 2; - default: + case 1156: + return 5; + case 844: return 3; + default: + return 4; } } diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-usb.c b/drivers/phy/qualcomm/phy-qcom-qmp-usb.c index 8e2cd2c178d6b..c12efd127a612 100644 --- a/drivers/phy/qualcomm/phy-qcom-qmp-usb.c +++ b/drivers/phy/qualcomm/phy-qcom-qmp-usb.c @@ -2044,12 +2044,16 @@ static void __iomem *qmp_usb_iomap(struct device *dev, struct device_node *np, int index, bool exclusive) { struct resource res; + void __iomem *mem; if (!exclusive) { if (of_address_to_resource(np, index, &res)) return IOMEM_ERR_PTR(-EINVAL); - return devm_ioremap(dev, res.start, resource_size(&res)); + mem = devm_ioremap(dev, res.start, resource_size(&res)); + if (!mem) + return IOMEM_ERR_PTR(-ENOMEM); + return mem; } return devm_of_iomap(dev, np, index, NULL); diff --git a/drivers/phy/rockchip/phy-rockchip-samsung-hdptx.c b/drivers/phy/rockchip/phy-rockchip-samsung-hdptx.c index dc6e01dff5c74..5547f8df8e717 100644 --- a/drivers/phy/rockchip/phy-rockchip-samsung-hdptx.c +++ b/drivers/phy/rockchip/phy-rockchip-samsung-hdptx.c @@ -192,6 +192,7 @@ #define LN3_TX_SER_RATE_SEL_HBR2 BIT(3) #define LN3_TX_SER_RATE_SEL_HBR3 BIT(2) +#define HDMI14_MAX_RATE 340000000 #define HDMI20_MAX_RATE 600000000 struct lcpll_config { @@ -328,6 +329,8 @@ static const struct ropll_config ropll_tmds_cfg[] = { 1, 1, 0, 0x20, 0x0c, 1, 0x0e, 0, 0, }, { 650000, 162, 162, 1, 1, 11, 1, 1, 1, 1, 1, 1, 1, 54, 0, 16, 4, 1, 1, 1, 0, 0x20, 0x0c, 1, 0x0e, 0, 0, }, + { 502500, 84, 84, 1, 1, 7, 1, 1, 1, 1, 1, 1, 1, 11, 1, 4, 5, + 4, 11, 1, 0, 0x20, 0x0c, 1, 0x0e, 0, 0, }, { 337500, 0x70, 0x70, 1, 1, 0xf, 1, 1, 1, 1, 1, 1, 1, 0x2, 0, 0x01, 5, 1, 1, 1, 0, 0x20, 0x0c, 1, 0x0e, 0, 0, }, { 400000, 100, 100, 1, 1, 11, 1, 1, 0, 1, 0, 1, 1, 0x9, 0, 0x05, 0, @@ -778,9 +781,7 @@ static int rk_hdptx_ropll_tmds_cmn_config(struct rk_hdptx_phy *hdptx, { const struct ropll_config *cfg = NULL; struct ropll_config rc = {0}; - int i; - - hdptx->rate = rate * 100; + int ret, i; for (i = 0; i < ARRAY_SIZE(ropll_tmds_cfg); i++) if (rate == ropll_tmds_cfg[i].bit_rate) { @@ -839,7 +840,11 @@ static int rk_hdptx_ropll_tmds_cmn_config(struct rk_hdptx_phy *hdptx, regmap_update_bits(hdptx->regmap, CMN_REG(0086), PLL_PCG_CLK_EN, PLL_PCG_CLK_EN); - return rk_hdptx_post_enable_pll(hdptx); + ret = rk_hdptx_post_enable_pll(hdptx); + if (!ret) + hdptx->rate = rate * 100; + + return ret; } static int rk_hdptx_ropll_tmds_mode_config(struct rk_hdptx_phy *hdptx, @@ -849,7 +854,7 @@ static int rk_hdptx_ropll_tmds_mode_config(struct rk_hdptx_phy *hdptx, regmap_write(hdptx->regmap, LNTOP_REG(0200), 0x06); - if (rate >= 3400000) { + if (rate > HDMI14_MAX_RATE / 100) { /* For 1/40 bitrate clk */ rk_hdptx_multi_reg_write(hdptx, rk_hdtpx_tmds_lntop_highbr_seq); } else { diff --git a/drivers/phy/starfive/phy-jh7110-usb.c b/drivers/phy/starfive/phy-jh7110-usb.c index cb5454fbe2c8f..b505d89860b43 100644 --- a/drivers/phy/starfive/phy-jh7110-usb.c +++ b/drivers/phy/starfive/phy-jh7110-usb.c @@ -18,6 +18,8 @@ #include #define USB_125M_CLK_RATE 125000000 +#define USB_CLK_MODE_OFF 0x0 +#define USB_CLK_MODE_RX_NORMAL_PWR BIT(1) #define USB_LS_KEEPALIVE_OFF 0x4 #define USB_LS_KEEPALIVE_ENABLE BIT(4) @@ -78,6 +80,7 @@ static int jh7110_usb2_phy_init(struct phy *_phy) { struct jh7110_usb2_phy *phy = phy_get_drvdata(_phy); int ret; + unsigned int val; ret = clk_set_rate(phy->usb_125m_clk, USB_125M_CLK_RATE); if (ret) @@ -87,6 +90,10 @@ static int jh7110_usb2_phy_init(struct phy *_phy) if (ret) return ret; + val = readl(phy->regs + USB_CLK_MODE_OFF); + val |= USB_CLK_MODE_RX_NORMAL_PWR; + writel(val, phy->regs + USB_CLK_MODE_OFF); + return 0; } diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c b/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c index 4c4ada06423d7..53b6eb7486593 100644 --- a/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c +++ b/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c @@ -358,9 +358,7 @@ static int armada_37xx_pmx_set_by_name(struct pinctrl_dev *pctldev, val = grp->val[func]; - regmap_update_bits(info->regmap, reg, mask, val); - - return 0; + return regmap_update_bits(info->regmap, reg, mask, val); } static int armada_37xx_pmx_set(struct pinctrl_dev *pctldev, @@ -402,10 +400,13 @@ static int armada_37xx_gpio_get_direction(struct gpio_chip *chip, struct armada_37xx_pinctrl *info = gpiochip_get_data(chip); unsigned int reg = OUTPUT_EN; unsigned int val, mask; + int ret; armada_37xx_update_reg(®, &offset); mask = BIT(offset); - regmap_read(info->regmap, reg, &val); + ret = regmap_read(info->regmap, reg, &val); + if (ret) + return ret; if (val & mask) return GPIO_LINE_DIRECTION_OUT; @@ -417,20 +418,22 @@ static int armada_37xx_gpio_direction_output(struct gpio_chip *chip, unsigned int offset, int value) { struct armada_37xx_pinctrl *info = gpiochip_get_data(chip); - unsigned int reg = OUTPUT_EN; + unsigned int en_offset = offset; + unsigned int reg = OUTPUT_VAL; unsigned int mask, val, ret; armada_37xx_update_reg(®, &offset); mask = BIT(offset); + val = value ? mask : 0; - ret = regmap_update_bits(info->regmap, reg, mask, mask); - + ret = regmap_update_bits(info->regmap, reg, mask, val); if (ret) return ret; - reg = OUTPUT_VAL; - val = value ? mask : 0; - regmap_update_bits(info->regmap, reg, mask, val); + reg = OUTPUT_EN; + armada_37xx_update_reg(®, &en_offset); + + regmap_update_bits(info->regmap, reg, mask, mask); return 0; } @@ -440,11 +443,14 @@ static int armada_37xx_gpio_get(struct gpio_chip *chip, unsigned int offset) struct armada_37xx_pinctrl *info = gpiochip_get_data(chip); unsigned int reg = INPUT_VAL; unsigned int val, mask; + int ret; armada_37xx_update_reg(®, &offset); mask = BIT(offset); - regmap_read(info->regmap, reg, &val); + ret = regmap_read(info->regmap, reg, &val); + if (ret) + return ret; return (val & mask) != 0; } @@ -469,16 +475,17 @@ static int armada_37xx_pmx_gpio_set_direction(struct pinctrl_dev *pctldev, { struct armada_37xx_pinctrl *info = pinctrl_dev_get_drvdata(pctldev); struct gpio_chip *chip = range->gc; + int ret; dev_dbg(info->dev, "gpio_direction for pin %u as %s-%d to %s\n", offset, range->name, offset, input ? "input" : "output"); if (input) - armada_37xx_gpio_direction_input(chip, offset); + ret = armada_37xx_gpio_direction_input(chip, offset); else - armada_37xx_gpio_direction_output(chip, offset, 0); + ret = armada_37xx_gpio_direction_output(chip, offset, 0); - return 0; + return ret; } static int armada_37xx_gpio_request_enable(struct pinctrl_dev *pctldev, diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c index a12766b3bc8a7..debf36ce57857 100644 --- a/drivers/pinctrl/pinctrl-amd.c +++ b/drivers/pinctrl/pinctrl-amd.c @@ -933,6 +933,17 @@ static int amd_gpio_suspend_hibernate_common(struct device *dev, bool is_suspend pin, is_suspend ? "suspend" : "hibernate"); } + /* + * debounce enabled over suspend has shown issues with a GPIO + * being unable to wake the system, as we're only interested in + * the actual wakeup event, clear it. + */ + if (gpio_dev->saved_regs[i] & (DB_CNTRl_MASK << DB_CNTRL_OFF)) { + amd_gpio_set_debounce(gpio_dev, pin, 0); + pm_pr_dbg("Clearing debounce for GPIO #%d during %s.\n", + pin, is_suspend ? "suspend" : "hibernate"); + } + raw_spin_unlock_irqrestore(&gpio_dev->lock, flags); } diff --git a/drivers/pinctrl/pinctrl-at91.c b/drivers/pinctrl/pinctrl-at91.c index 93ab277d9943c..fbe74e4ef320c 100644 --- a/drivers/pinctrl/pinctrl-at91.c +++ b/drivers/pinctrl/pinctrl-at91.c @@ -1819,12 +1819,16 @@ static int at91_gpio_probe(struct platform_device *pdev) struct at91_gpio_chip *at91_chip = NULL; struct gpio_chip *chip; struct pinctrl_gpio_range *range; + int alias_idx; int ret = 0; int irq, i; - int alias_idx = of_alias_get_id(np, "gpio"); uint32_t ngpio; char **names; + alias_idx = of_alias_get_id(np, "gpio"); + if (alias_idx < 0) + return alias_idx; + BUG_ON(alias_idx >= ARRAY_SIZE(gpio_chips)); if (gpio_chips[alias_idx]) return dev_err_probe(dev, -EBUSY, "%d slot is occupied.\n", alias_idx); diff --git a/drivers/pinctrl/pinctrl-mcp23s08.c b/drivers/pinctrl/pinctrl-mcp23s08.c index 70d7485ada364..60fcd53830a7d 100644 --- a/drivers/pinctrl/pinctrl-mcp23s08.c +++ b/drivers/pinctrl/pinctrl-mcp23s08.c @@ -636,6 +636,14 @@ int mcp23s08_probe_one(struct mcp23s08 *mcp, struct device *dev, mcp->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW); + /* + * Reset the chip - we don't really know what state it's in, so reset + * all pins to input first to prevent surprises. + */ + ret = mcp_write(mcp, MCP_IODIR, mcp->chip.ngpio == 16 ? 0xFFFF : 0xFF); + if (ret < 0) + return ret; + /* verify MCP_IOCON.SEQOP = 0, so sequential reads work, * and MCP_IOCON.HAEN = 1, so we work with all chips. */ diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c index 018e96d921c05..5532328097894 100644 --- a/drivers/pinctrl/qcom/pinctrl-msm.c +++ b/drivers/pinctrl/qcom/pinctrl-msm.c @@ -1035,6 +1035,25 @@ static bool msm_gpio_needs_dual_edge_parent_workaround(struct irq_data *d, test_bit(d->hwirq, pctrl->skip_wake_irqs); } +static void msm_gpio_irq_init_valid_mask(struct gpio_chip *gc, + unsigned long *valid_mask, + unsigned int ngpios) +{ + struct msm_pinctrl *pctrl = gpiochip_get_data(gc); + const struct msm_pingroup *g; + int i; + + bitmap_fill(valid_mask, ngpios); + + for (i = 0; i < ngpios; i++) { + g = &pctrl->soc->groups[i]; + + if (g->intr_detection_width != 1 && + g->intr_detection_width != 2) + clear_bit(i, valid_mask); + } +} + static int msm_gpio_irq_set_type(struct irq_data *d, unsigned int type) { struct gpio_chip *gc = irq_data_get_irq_chip_data(d); @@ -1438,6 +1457,7 @@ static int msm_gpio_init(struct msm_pinctrl *pctrl) girq->default_type = IRQ_TYPE_NONE; girq->handler = handle_bad_irq; girq->parents[0] = pctrl->irq; + girq->init_valid_mask = msm_gpio_irq_init_valid_mask; ret = gpiochip_add_data(&pctrl->chip, pctrl); if (ret) { diff --git a/drivers/pinctrl/qcom/pinctrl-qcm2290.c b/drivers/pinctrl/qcom/pinctrl-qcm2290.c index f5c1c427b44e9..61b7c22e963c2 100644 --- a/drivers/pinctrl/qcom/pinctrl-qcm2290.c +++ b/drivers/pinctrl/qcom/pinctrl-qcm2290.c @@ -165,6 +165,10 @@ static const struct pinctrl_pin_desc qcm2290_pins[] = { PINCTRL_PIN(62, "GPIO_62"), PINCTRL_PIN(63, "GPIO_63"), PINCTRL_PIN(64, "GPIO_64"), + PINCTRL_PIN(65, "GPIO_65"), + PINCTRL_PIN(66, "GPIO_66"), + PINCTRL_PIN(67, "GPIO_67"), + PINCTRL_PIN(68, "GPIO_68"), PINCTRL_PIN(69, "GPIO_69"), PINCTRL_PIN(70, "GPIO_70"), PINCTRL_PIN(71, "GPIO_71"), @@ -179,12 +183,17 @@ static const struct pinctrl_pin_desc qcm2290_pins[] = { PINCTRL_PIN(80, "GPIO_80"), PINCTRL_PIN(81, "GPIO_81"), PINCTRL_PIN(82, "GPIO_82"), + PINCTRL_PIN(83, "GPIO_83"), + PINCTRL_PIN(84, "GPIO_84"), + PINCTRL_PIN(85, "GPIO_85"), PINCTRL_PIN(86, "GPIO_86"), PINCTRL_PIN(87, "GPIO_87"), PINCTRL_PIN(88, "GPIO_88"), PINCTRL_PIN(89, "GPIO_89"), PINCTRL_PIN(90, "GPIO_90"), PINCTRL_PIN(91, "GPIO_91"), + PINCTRL_PIN(92, "GPIO_92"), + PINCTRL_PIN(93, "GPIO_93"), PINCTRL_PIN(94, "GPIO_94"), PINCTRL_PIN(95, "GPIO_95"), PINCTRL_PIN(96, "GPIO_96"), diff --git a/drivers/pinctrl/samsung/pinctrl-exynos-arm64.c b/drivers/pinctrl/samsung/pinctrl-exynos-arm64.c index 23b4bc1e5da81..a2ac1702d0dfa 100644 --- a/drivers/pinctrl/samsung/pinctrl-exynos-arm64.c +++ b/drivers/pinctrl/samsung/pinctrl-exynos-arm64.c @@ -809,8 +809,8 @@ static const struct samsung_pin_ctrl exynosautov920_pin_ctrl[] = { .pin_banks = exynosautov920_pin_banks0, .nr_banks = ARRAY_SIZE(exynosautov920_pin_banks0), .eint_wkup_init = exynos_eint_wkup_init, - .suspend = exynos_pinctrl_suspend, - .resume = exynos_pinctrl_resume, + .suspend = exynosautov920_pinctrl_suspend, + .resume = exynosautov920_pinctrl_resume, .retention_data = &exynosautov920_retention_data, }, { /* pin-controller instance 1 AUD data */ @@ -821,43 +821,43 @@ static const struct samsung_pin_ctrl exynosautov920_pin_ctrl[] = { .pin_banks = exynosautov920_pin_banks2, .nr_banks = ARRAY_SIZE(exynosautov920_pin_banks2), .eint_gpio_init = exynos_eint_gpio_init, - .suspend = exynos_pinctrl_suspend, - .resume = exynos_pinctrl_resume, + .suspend = exynosautov920_pinctrl_suspend, + .resume = exynosautov920_pinctrl_resume, }, { /* pin-controller instance 3 HSI1 data */ .pin_banks = exynosautov920_pin_banks3, .nr_banks = ARRAY_SIZE(exynosautov920_pin_banks3), .eint_gpio_init = exynos_eint_gpio_init, - .suspend = exynos_pinctrl_suspend, - .resume = exynos_pinctrl_resume, + .suspend = exynosautov920_pinctrl_suspend, + .resume = exynosautov920_pinctrl_resume, }, { /* pin-controller instance 4 HSI2 data */ .pin_banks = exynosautov920_pin_banks4, .nr_banks = ARRAY_SIZE(exynosautov920_pin_banks4), .eint_gpio_init = exynos_eint_gpio_init, - .suspend = exynos_pinctrl_suspend, - .resume = exynos_pinctrl_resume, + .suspend = exynosautov920_pinctrl_suspend, + .resume = exynosautov920_pinctrl_resume, }, { /* pin-controller instance 5 HSI2UFS data */ .pin_banks = exynosautov920_pin_banks5, .nr_banks = ARRAY_SIZE(exynosautov920_pin_banks5), .eint_gpio_init = exynos_eint_gpio_init, - .suspend = exynos_pinctrl_suspend, - .resume = exynos_pinctrl_resume, + .suspend = exynosautov920_pinctrl_suspend, + .resume = exynosautov920_pinctrl_resume, }, { /* pin-controller instance 6 PERIC0 data */ .pin_banks = exynosautov920_pin_banks6, .nr_banks = ARRAY_SIZE(exynosautov920_pin_banks6), .eint_gpio_init = exynos_eint_gpio_init, - .suspend = exynos_pinctrl_suspend, - .resume = exynos_pinctrl_resume, + .suspend = exynosautov920_pinctrl_suspend, + .resume = exynosautov920_pinctrl_resume, }, { /* pin-controller instance 7 PERIC1 data */ .pin_banks = exynosautov920_pin_banks7, .nr_banks = ARRAY_SIZE(exynosautov920_pin_banks7), .eint_gpio_init = exynos_eint_gpio_init, - .suspend = exynos_pinctrl_suspend, - .resume = exynos_pinctrl_resume, + .suspend = exynosautov920_pinctrl_suspend, + .resume = exynosautov920_pinctrl_resume, }, }; @@ -1024,15 +1024,15 @@ static const struct samsung_pin_ctrl gs101_pin_ctrl[] __initconst = { .pin_banks = gs101_pin_alive, .nr_banks = ARRAY_SIZE(gs101_pin_alive), .eint_wkup_init = exynos_eint_wkup_init, - .suspend = exynos_pinctrl_suspend, - .resume = exynos_pinctrl_resume, + .suspend = gs101_pinctrl_suspend, + .resume = gs101_pinctrl_resume, }, { /* pin banks of gs101 pin-controller (FAR_ALIVE) */ .pin_banks = gs101_pin_far_alive, .nr_banks = ARRAY_SIZE(gs101_pin_far_alive), .eint_wkup_init = exynos_eint_wkup_init, - .suspend = exynos_pinctrl_suspend, - .resume = exynos_pinctrl_resume, + .suspend = gs101_pinctrl_suspend, + .resume = gs101_pinctrl_resume, }, { /* pin banks of gs101 pin-controller (GSACORE) */ .pin_banks = gs101_pin_gsacore, @@ -1046,29 +1046,29 @@ static const struct samsung_pin_ctrl gs101_pin_ctrl[] __initconst = { .pin_banks = gs101_pin_peric0, .nr_banks = ARRAY_SIZE(gs101_pin_peric0), .eint_gpio_init = exynos_eint_gpio_init, - .suspend = exynos_pinctrl_suspend, - .resume = exynos_pinctrl_resume, + .suspend = gs101_pinctrl_suspend, + .resume = gs101_pinctrl_resume, }, { /* pin banks of gs101 pin-controller (PERIC1) */ .pin_banks = gs101_pin_peric1, .nr_banks = ARRAY_SIZE(gs101_pin_peric1), .eint_gpio_init = exynos_eint_gpio_init, - .suspend = exynos_pinctrl_suspend, - .resume = exynos_pinctrl_resume, + .suspend = gs101_pinctrl_suspend, + .resume = gs101_pinctrl_resume, }, { /* pin banks of gs101 pin-controller (HSI1) */ .pin_banks = gs101_pin_hsi1, .nr_banks = ARRAY_SIZE(gs101_pin_hsi1), .eint_gpio_init = exynos_eint_gpio_init, - .suspend = exynos_pinctrl_suspend, - .resume = exynos_pinctrl_resume, + .suspend = gs101_pinctrl_suspend, + .resume = gs101_pinctrl_resume, }, { /* pin banks of gs101 pin-controller (HSI2) */ .pin_banks = gs101_pin_hsi2, .nr_banks = ARRAY_SIZE(gs101_pin_hsi2), .eint_gpio_init = exynos_eint_gpio_init, - .suspend = exynos_pinctrl_suspend, - .resume = exynos_pinctrl_resume, + .suspend = gs101_pinctrl_suspend, + .resume = gs101_pinctrl_resume, }, }; diff --git a/drivers/pinctrl/samsung/pinctrl-exynos.c b/drivers/pinctrl/samsung/pinctrl-exynos.c index ac6dc22b37c98..7887fd4166511 100644 --- a/drivers/pinctrl/samsung/pinctrl-exynos.c +++ b/drivers/pinctrl/samsung/pinctrl-exynos.c @@ -761,153 +761,187 @@ __init int exynos_eint_wkup_init(struct samsung_pinctrl_drv_data *d) return 0; } -static void exynos_pinctrl_suspend_bank( - struct samsung_pinctrl_drv_data *drvdata, - struct samsung_pin_bank *bank) +static void exynos_set_wakeup(struct samsung_pin_bank *bank) { - struct exynos_eint_gpio_save *save = bank->soc_priv; - const void __iomem *regs = bank->eint_base; + struct exynos_irq_chip *irq_chip; - if (clk_enable(bank->drvdata->pclk)) { - dev_err(bank->gpio_chip.parent, - "unable to enable clock for saving state\n"); - return; + if (bank->irq_chip) { + irq_chip = bank->irq_chip; + irq_chip->set_eint_wakeup_mask(bank->drvdata, irq_chip); } - - save->eint_con = readl(regs + EXYNOS_GPIO_ECON_OFFSET - + bank->eint_offset); - save->eint_fltcon0 = readl(regs + EXYNOS_GPIO_EFLTCON_OFFSET - + 2 * bank->eint_offset); - save->eint_fltcon1 = readl(regs + EXYNOS_GPIO_EFLTCON_OFFSET - + 2 * bank->eint_offset + 4); - save->eint_mask = readl(regs + bank->irq_chip->eint_mask - + bank->eint_offset); - - clk_disable(bank->drvdata->pclk); - - pr_debug("%s: save con %#010x\n", bank->name, save->eint_con); - pr_debug("%s: save fltcon0 %#010x\n", bank->name, save->eint_fltcon0); - pr_debug("%s: save fltcon1 %#010x\n", bank->name, save->eint_fltcon1); - pr_debug("%s: save mask %#010x\n", bank->name, save->eint_mask); } -static void exynosauto_pinctrl_suspend_bank(struct samsung_pinctrl_drv_data *drvdata, - struct samsung_pin_bank *bank) +void exynos_pinctrl_suspend(struct samsung_pin_bank *bank) { struct exynos_eint_gpio_save *save = bank->soc_priv; const void __iomem *regs = bank->eint_base; - if (clk_enable(bank->drvdata->pclk)) { - dev_err(bank->gpio_chip.parent, - "unable to enable clock for saving state\n"); - return; + if (bank->eint_type == EINT_TYPE_GPIO) { + save->eint_con = readl(regs + EXYNOS_GPIO_ECON_OFFSET + + bank->eint_offset); + save->eint_fltcon0 = readl(regs + EXYNOS_GPIO_EFLTCON_OFFSET + + 2 * bank->eint_offset); + save->eint_fltcon1 = readl(regs + EXYNOS_GPIO_EFLTCON_OFFSET + + 2 * bank->eint_offset + 4); + save->eint_mask = readl(regs + bank->irq_chip->eint_mask + + bank->eint_offset); + + pr_debug("%s: save con %#010x\n", + bank->name, save->eint_con); + pr_debug("%s: save fltcon0 %#010x\n", + bank->name, save->eint_fltcon0); + pr_debug("%s: save fltcon1 %#010x\n", + bank->name, save->eint_fltcon1); + pr_debug("%s: save mask %#010x\n", + bank->name, save->eint_mask); + } else if (bank->eint_type == EINT_TYPE_WKUP) { + exynos_set_wakeup(bank); } - - save->eint_con = readl(regs + bank->pctl_offset + bank->eint_con_offset); - save->eint_mask = readl(regs + bank->pctl_offset + bank->eint_mask_offset); - - clk_disable(bank->drvdata->pclk); - - pr_debug("%s: save con %#010x\n", bank->name, save->eint_con); - pr_debug("%s: save mask %#010x\n", bank->name, save->eint_mask); } -void exynos_pinctrl_suspend(struct samsung_pinctrl_drv_data *drvdata) +void gs101_pinctrl_suspend(struct samsung_pin_bank *bank) { - struct samsung_pin_bank *bank = drvdata->pin_banks; - struct exynos_irq_chip *irq_chip = NULL; - int i; + struct exynos_eint_gpio_save *save = bank->soc_priv; + const void __iomem *regs = bank->eint_base; - for (i = 0; i < drvdata->nr_banks; ++i, ++bank) { - if (bank->eint_type == EINT_TYPE_GPIO) { - if (bank->eint_con_offset) - exynosauto_pinctrl_suspend_bank(drvdata, bank); - else - exynos_pinctrl_suspend_bank(drvdata, bank); - } - else if (bank->eint_type == EINT_TYPE_WKUP) { - if (!irq_chip) { - irq_chip = bank->irq_chip; - irq_chip->set_eint_wakeup_mask(drvdata, - irq_chip); - } - } + if (bank->eint_type == EINT_TYPE_GPIO) { + save->eint_con = readl(regs + EXYNOS_GPIO_ECON_OFFSET + + bank->eint_offset); + + save->eint_fltcon0 = readl(regs + EXYNOS_GPIO_EFLTCON_OFFSET + + bank->eint_fltcon_offset); + + /* fltcon1 register only exists for pins 4-7 */ + if (bank->nr_pins > 4) + save->eint_fltcon1 = readl(regs + + EXYNOS_GPIO_EFLTCON_OFFSET + + bank->eint_fltcon_offset + 4); + + save->eint_mask = readl(regs + bank->irq_chip->eint_mask + + bank->eint_offset); + + pr_debug("%s: save con %#010x\n", + bank->name, save->eint_con); + pr_debug("%s: save fltcon0 %#010x\n", + bank->name, save->eint_fltcon0); + if (bank->nr_pins > 4) + pr_debug("%s: save fltcon1 %#010x\n", + bank->name, save->eint_fltcon1); + pr_debug("%s: save mask %#010x\n", + bank->name, save->eint_mask); + } else if (bank->eint_type == EINT_TYPE_WKUP) { + exynos_set_wakeup(bank); } } -static void exynos_pinctrl_resume_bank( - struct samsung_pinctrl_drv_data *drvdata, - struct samsung_pin_bank *bank) +void exynosautov920_pinctrl_suspend(struct samsung_pin_bank *bank) { struct exynos_eint_gpio_save *save = bank->soc_priv; - void __iomem *regs = bank->eint_base; + const void __iomem *regs = bank->eint_base; - if (clk_enable(bank->drvdata->pclk)) { - dev_err(bank->gpio_chip.parent, - "unable to enable clock for restoring state\n"); - return; + if (bank->eint_type == EINT_TYPE_GPIO) { + save->eint_con = readl(regs + bank->pctl_offset + + bank->eint_con_offset); + save->eint_mask = readl(regs + bank->pctl_offset + + bank->eint_mask_offset); + pr_debug("%s: save con %#010x\n", + bank->name, save->eint_con); + pr_debug("%s: save mask %#010x\n", + bank->name, save->eint_mask); + } else if (bank->eint_type == EINT_TYPE_WKUP) { + exynos_set_wakeup(bank); } +} - pr_debug("%s: con %#010x => %#010x\n", bank->name, - readl(regs + EXYNOS_GPIO_ECON_OFFSET - + bank->eint_offset), save->eint_con); - pr_debug("%s: fltcon0 %#010x => %#010x\n", bank->name, - readl(regs + EXYNOS_GPIO_EFLTCON_OFFSET - + 2 * bank->eint_offset), save->eint_fltcon0); - pr_debug("%s: fltcon1 %#010x => %#010x\n", bank->name, - readl(regs + EXYNOS_GPIO_EFLTCON_OFFSET - + 2 * bank->eint_offset + 4), save->eint_fltcon1); - pr_debug("%s: mask %#010x => %#010x\n", bank->name, - readl(regs + bank->irq_chip->eint_mask - + bank->eint_offset), save->eint_mask); - - writel(save->eint_con, regs + EXYNOS_GPIO_ECON_OFFSET - + bank->eint_offset); - writel(save->eint_fltcon0, regs + EXYNOS_GPIO_EFLTCON_OFFSET - + 2 * bank->eint_offset); - writel(save->eint_fltcon1, regs + EXYNOS_GPIO_EFLTCON_OFFSET - + 2 * bank->eint_offset + 4); - writel(save->eint_mask, regs + bank->irq_chip->eint_mask - + bank->eint_offset); +void gs101_pinctrl_resume(struct samsung_pin_bank *bank) +{ + struct exynos_eint_gpio_save *save = bank->soc_priv; - clk_disable(bank->drvdata->pclk); + void __iomem *regs = bank->eint_base; + void __iomem *eint_fltcfg0 = regs + EXYNOS_GPIO_EFLTCON_OFFSET + + bank->eint_fltcon_offset; + + if (bank->eint_type == EINT_TYPE_GPIO) { + pr_debug("%s: con %#010x => %#010x\n", bank->name, + readl(regs + EXYNOS_GPIO_ECON_OFFSET + + bank->eint_offset), save->eint_con); + + pr_debug("%s: fltcon0 %#010x => %#010x\n", bank->name, + readl(eint_fltcfg0), save->eint_fltcon0); + + /* fltcon1 register only exists for pins 4-7 */ + if (bank->nr_pins > 4) + pr_debug("%s: fltcon1 %#010x => %#010x\n", bank->name, + readl(eint_fltcfg0 + 4), save->eint_fltcon1); + + pr_debug("%s: mask %#010x => %#010x\n", bank->name, + readl(regs + bank->irq_chip->eint_mask + + bank->eint_offset), save->eint_mask); + + writel(save->eint_con, regs + EXYNOS_GPIO_ECON_OFFSET + + bank->eint_offset); + writel(save->eint_fltcon0, eint_fltcfg0); + + if (bank->nr_pins > 4) + writel(save->eint_fltcon1, eint_fltcfg0 + 4); + writel(save->eint_mask, regs + bank->irq_chip->eint_mask + + bank->eint_offset); + } } -static void exynosauto_pinctrl_resume_bank(struct samsung_pinctrl_drv_data *drvdata, - struct samsung_pin_bank *bank) +void exynos_pinctrl_resume(struct samsung_pin_bank *bank) { struct exynos_eint_gpio_save *save = bank->soc_priv; void __iomem *regs = bank->eint_base; - if (clk_enable(bank->drvdata->pclk)) { - dev_err(bank->gpio_chip.parent, - "unable to enable clock for restoring state\n"); - return; + if (bank->eint_type == EINT_TYPE_GPIO) { + pr_debug("%s: con %#010x => %#010x\n", bank->name, + readl(regs + EXYNOS_GPIO_ECON_OFFSET + + bank->eint_offset), save->eint_con); + pr_debug("%s: fltcon0 %#010x => %#010x\n", bank->name, + readl(regs + EXYNOS_GPIO_EFLTCON_OFFSET + + 2 * bank->eint_offset), save->eint_fltcon0); + pr_debug("%s: fltcon1 %#010x => %#010x\n", bank->name, + readl(regs + EXYNOS_GPIO_EFLTCON_OFFSET + + 2 * bank->eint_offset + 4), + save->eint_fltcon1); + pr_debug("%s: mask %#010x => %#010x\n", bank->name, + readl(regs + bank->irq_chip->eint_mask + + bank->eint_offset), save->eint_mask); + + writel(save->eint_con, regs + EXYNOS_GPIO_ECON_OFFSET + + bank->eint_offset); + writel(save->eint_fltcon0, regs + EXYNOS_GPIO_EFLTCON_OFFSET + + 2 * bank->eint_offset); + writel(save->eint_fltcon1, regs + EXYNOS_GPIO_EFLTCON_OFFSET + + 2 * bank->eint_offset + 4); + writel(save->eint_mask, regs + bank->irq_chip->eint_mask + + bank->eint_offset); } - - pr_debug("%s: con %#010x => %#010x\n", bank->name, - readl(regs + bank->pctl_offset + bank->eint_con_offset), save->eint_con); - pr_debug("%s: mask %#010x => %#010x\n", bank->name, - readl(regs + bank->pctl_offset + bank->eint_mask_offset), save->eint_mask); - - writel(save->eint_con, regs + bank->pctl_offset + bank->eint_con_offset); - writel(save->eint_mask, regs + bank->pctl_offset + bank->eint_mask_offset); - - clk_disable(bank->drvdata->pclk); } -void exynos_pinctrl_resume(struct samsung_pinctrl_drv_data *drvdata) +void exynosautov920_pinctrl_resume(struct samsung_pin_bank *bank) { - struct samsung_pin_bank *bank = drvdata->pin_banks; - int i; + struct exynos_eint_gpio_save *save = bank->soc_priv; + void __iomem *regs = bank->eint_base; - for (i = 0; i < drvdata->nr_banks; ++i, ++bank) - if (bank->eint_type == EINT_TYPE_GPIO) { - if (bank->eint_con_offset) - exynosauto_pinctrl_resume_bank(drvdata, bank); - else - exynos_pinctrl_resume_bank(drvdata, bank); - } + if (bank->eint_type == EINT_TYPE_GPIO) { + /* exynosautov920 has eint_con_offset for all but one bank */ + if (!bank->eint_con_offset) + exynos_pinctrl_resume(bank); + + pr_debug("%s: con %#010x => %#010x\n", bank->name, + readl(regs + bank->pctl_offset + bank->eint_con_offset), + save->eint_con); + pr_debug("%s: mask %#010x => %#010x\n", bank->name, + readl(regs + bank->pctl_offset + + bank->eint_mask_offset), save->eint_mask); + + writel(save->eint_con, + regs + bank->pctl_offset + bank->eint_con_offset); + writel(save->eint_mask, + regs + bank->pctl_offset + bank->eint_mask_offset); + } } static void exynos_retention_enable(struct samsung_pinctrl_drv_data *drvdata) diff --git a/drivers/pinctrl/samsung/pinctrl-exynos.h b/drivers/pinctrl/samsung/pinctrl-exynos.h index 97a43fa4dfc56..c70b8ead56b4b 100644 --- a/drivers/pinctrl/samsung/pinctrl-exynos.h +++ b/drivers/pinctrl/samsung/pinctrl-exynos.h @@ -211,8 +211,12 @@ struct exynos_muxed_weint_data { int exynos_eint_gpio_init(struct samsung_pinctrl_drv_data *d); int exynos_eint_wkup_init(struct samsung_pinctrl_drv_data *d); -void exynos_pinctrl_suspend(struct samsung_pinctrl_drv_data *drvdata); -void exynos_pinctrl_resume(struct samsung_pinctrl_drv_data *drvdata); +void exynosautov920_pinctrl_resume(struct samsung_pin_bank *bank); +void exynosautov920_pinctrl_suspend(struct samsung_pin_bank *bank); +void exynos_pinctrl_suspend(struct samsung_pin_bank *bank); +void exynos_pinctrl_resume(struct samsung_pin_bank *bank); +void gs101_pinctrl_suspend(struct samsung_pin_bank *bank); +void gs101_pinctrl_resume(struct samsung_pin_bank *bank); struct samsung_retention_ctrl * exynos_retention_init(struct samsung_pinctrl_drv_data *drvdata, const struct samsung_retention_data *data); diff --git a/drivers/pinctrl/samsung/pinctrl-samsung.c b/drivers/pinctrl/samsung/pinctrl-samsung.c index 63ac89a802d30..210534586c0c0 100644 --- a/drivers/pinctrl/samsung/pinctrl-samsung.c +++ b/drivers/pinctrl/samsung/pinctrl-samsung.c @@ -1333,6 +1333,7 @@ static int samsung_pinctrl_probe(struct platform_device *pdev) static int __maybe_unused samsung_pinctrl_suspend(struct device *dev) { struct samsung_pinctrl_drv_data *drvdata = dev_get_drvdata(dev); + struct samsung_pin_bank *bank; int i; i = clk_enable(drvdata->pclk); @@ -1343,7 +1344,7 @@ static int __maybe_unused samsung_pinctrl_suspend(struct device *dev) } for (i = 0; i < drvdata->nr_banks; i++) { - struct samsung_pin_bank *bank = &drvdata->pin_banks[i]; + bank = &drvdata->pin_banks[i]; const void __iomem *reg = bank->pctl_base + bank->pctl_offset; const u8 *offs = bank->type->reg_offset; const u8 *widths = bank->type->fld_width; @@ -1371,10 +1372,14 @@ static int __maybe_unused samsung_pinctrl_suspend(struct device *dev) } } + for (i = 0; i < drvdata->nr_banks; i++) { + bank = &drvdata->pin_banks[i]; + if (drvdata->suspend) + drvdata->suspend(bank); + } + clk_disable(drvdata->pclk); - if (drvdata->suspend) - drvdata->suspend(drvdata); if (drvdata->retention_ctrl && drvdata->retention_ctrl->enable) drvdata->retention_ctrl->enable(drvdata); @@ -1392,6 +1397,7 @@ static int __maybe_unused samsung_pinctrl_suspend(struct device *dev) static int __maybe_unused samsung_pinctrl_resume(struct device *dev) { struct samsung_pinctrl_drv_data *drvdata = dev_get_drvdata(dev); + struct samsung_pin_bank *bank; int ret; int i; @@ -1406,11 +1412,14 @@ static int __maybe_unused samsung_pinctrl_resume(struct device *dev) return ret; } - if (drvdata->resume) - drvdata->resume(drvdata); + for (i = 0; i < drvdata->nr_banks; i++) { + bank = &drvdata->pin_banks[i]; + if (drvdata->resume) + drvdata->resume(bank); + } for (i = 0; i < drvdata->nr_banks; i++) { - struct samsung_pin_bank *bank = &drvdata->pin_banks[i]; + bank = &drvdata->pin_banks[i]; void __iomem *reg = bank->pctl_base + bank->pctl_offset; const u8 *offs = bank->type->reg_offset; const u8 *widths = bank->type->fld_width; diff --git a/drivers/pinctrl/samsung/pinctrl-samsung.h b/drivers/pinctrl/samsung/pinctrl-samsung.h index 14c3b6b965851..7ffd2e193e425 100644 --- a/drivers/pinctrl/samsung/pinctrl-samsung.h +++ b/drivers/pinctrl/samsung/pinctrl-samsung.h @@ -285,8 +285,8 @@ struct samsung_pin_ctrl { int (*eint_gpio_init)(struct samsung_pinctrl_drv_data *); int (*eint_wkup_init)(struct samsung_pinctrl_drv_data *); void (*pud_value_init)(struct samsung_pinctrl_drv_data *drvdata); - void (*suspend)(struct samsung_pinctrl_drv_data *); - void (*resume)(struct samsung_pinctrl_drv_data *); + void (*suspend)(struct samsung_pin_bank *bank); + void (*resume)(struct samsung_pin_bank *bank); }; /** @@ -335,8 +335,8 @@ struct samsung_pinctrl_drv_data { struct samsung_retention_ctrl *retention_ctrl; - void (*suspend)(struct samsung_pinctrl_drv_data *); - void (*resume)(struct samsung_pinctrl_drv_data *); + void (*suspend)(struct samsung_pin_bank *bank); + void (*resume)(struct samsung_pin_bank *bank); }; /** diff --git a/drivers/platform/loongarch/loongson-laptop.c b/drivers/platform/loongarch/loongson-laptop.c index 99203584949da..61b18ac206c9e 100644 --- a/drivers/platform/loongarch/loongson-laptop.c +++ b/drivers/platform/loongarch/loongson-laptop.c @@ -56,8 +56,7 @@ static struct input_dev *generic_inputdev; static acpi_handle hotkey_handle; static struct key_entry hotkey_keycode_map[GENERIC_HOTKEY_MAP_MAX]; -int loongson_laptop_turn_on_backlight(void); -int loongson_laptop_turn_off_backlight(void); +static bool bl_powered; static int loongson_laptop_backlight_update(struct backlight_device *bd); /* 2. ACPI Helpers and device model */ @@ -354,16 +353,42 @@ static int ec_backlight_level(u8 level) return level; } +static int ec_backlight_set_power(bool state) +{ + int status; + union acpi_object arg0 = { ACPI_TYPE_INTEGER }; + struct acpi_object_list args = { 1, &arg0 }; + + arg0.integer.value = state; + status = acpi_evaluate_object(NULL, "\\BLSW", &args, NULL); + if (ACPI_FAILURE(status)) { + pr_info("Loongson lvds error: 0x%x\n", status); + return -EIO; + } + + return 0; +} + static int loongson_laptop_backlight_update(struct backlight_device *bd) { - int lvl = ec_backlight_level(bd->props.brightness); + bool target_powered = !backlight_is_blank(bd); + int ret = 0, lvl = ec_backlight_level(bd->props.brightness); if (lvl < 0) return -EIO; + if (ec_set_brightness(lvl)) return -EIO; - return 0; + if (target_powered != bl_powered) { + ret = ec_backlight_set_power(target_powered); + if (ret < 0) + return ret; + + bl_powered = target_powered; + } + + return ret; } static int loongson_laptop_get_brightness(struct backlight_device *bd) @@ -384,7 +409,7 @@ static const struct backlight_ops backlight_laptop_ops = { static int laptop_backlight_register(void) { - int status = 0; + int status = 0, ret; struct backlight_properties props; memset(&props, 0, sizeof(props)); @@ -392,44 +417,20 @@ static int laptop_backlight_register(void) if (!acpi_evalf(hotkey_handle, &status, "ECLL", "d")) return -EIO; - props.brightness = 1; + ret = ec_backlight_set_power(true); + if (ret) + return ret; + + bl_powered = true; + props.max_brightness = status; + props.brightness = ec_get_brightness(); + props.power = BACKLIGHT_POWER_ON; props.type = BACKLIGHT_PLATFORM; backlight_device_register("loongson_laptop", NULL, NULL, &backlight_laptop_ops, &props); - return 0; -} - -int loongson_laptop_turn_on_backlight(void) -{ - int status; - union acpi_object arg0 = { ACPI_TYPE_INTEGER }; - struct acpi_object_list args = { 1, &arg0 }; - - arg0.integer.value = 1; - status = acpi_evaluate_object(NULL, "\\BLSW", &args, NULL); - if (ACPI_FAILURE(status)) { - pr_info("Loongson lvds error: 0x%x\n", status); - return -ENODEV; - } - - return 0; -} - -int loongson_laptop_turn_off_backlight(void) -{ - int status; - union acpi_object arg0 = { ACPI_TYPE_INTEGER }; - struct acpi_object_list args = { 1, &arg0 }; - - arg0.integer.value = 0; - status = acpi_evaluate_object(NULL, "\\BLSW", &args, NULL); - if (ACPI_FAILURE(status)) { - pr_info("Loongson lvds error: 0x%x\n", status); - return -ENODEV; - } return 0; } @@ -611,11 +612,17 @@ static int __init generic_acpi_laptop_init(void) static void __exit generic_acpi_laptop_exit(void) { + int i; + if (generic_inputdev) { - if (input_device_registered) - input_unregister_device(generic_inputdev); - else + if (!input_device_registered) { input_free_device(generic_inputdev); + } else { + input_unregister_device(generic_inputdev); + + for (i = 0; i < ARRAY_SIZE(generic_sub_drivers); i++) + generic_subdriver_exit(&generic_sub_drivers[i]); + } } } diff --git a/drivers/platform/mellanox/mlxbf-pmc.c b/drivers/platform/mellanox/mlxbf-pmc.c index 9ff7b487dc489..fbb8128d19de4 100644 --- a/drivers/platform/mellanox/mlxbf-pmc.c +++ b/drivers/platform/mellanox/mlxbf-pmc.c @@ -710,7 +710,7 @@ static const struct mlxbf_pmc_events mlxbf_pmc_llt_events[] = { {101, "GDC_BANK0_HIT_DCL_PARTIAL"}, {102, "GDC_BANK0_EVICT_DCL"}, {103, "GDC_BANK0_G_RSE_PIPE_CACHE_DATA0"}, - {103, "GDC_BANK0_G_RSE_PIPE_CACHE_DATA1"}, + {104, "GDC_BANK0_G_RSE_PIPE_CACHE_DATA1"}, {105, "GDC_BANK0_ARB_STRB"}, {106, "GDC_BANK0_ARB_WAIT"}, {107, "GDC_BANK0_GGA_STRB"}, diff --git a/drivers/platform/mellanox/mlxbf-tmfifo.c b/drivers/platform/mellanox/mlxbf-tmfifo.c index 6c834e39352d6..d2c27cc0733bb 100644 --- a/drivers/platform/mellanox/mlxbf-tmfifo.c +++ b/drivers/platform/mellanox/mlxbf-tmfifo.c @@ -281,7 +281,8 @@ static int mlxbf_tmfifo_alloc_vrings(struct mlxbf_tmfifo *fifo, vring->align = SMP_CACHE_BYTES; vring->index = i; vring->vdev_id = tm_vdev->vdev.id.device; - vring->drop_desc.len = VRING_DROP_DESC_MAX_LEN; + vring->drop_desc.len = cpu_to_virtio32(&tm_vdev->vdev, + VRING_DROP_DESC_MAX_LEN); dev = &tm_vdev->vdev.dev; size = vring_size(vring->num, vring->align); diff --git a/drivers/platform/mellanox/mlxreg-lc.c b/drivers/platform/mellanox/mlxreg-lc.c index 43d119e3a4734..99152676dbd28 100644 --- a/drivers/platform/mellanox/mlxreg-lc.c +++ b/drivers/platform/mellanox/mlxreg-lc.c @@ -688,7 +688,7 @@ static int mlxreg_lc_completion_notify(void *handle, struct i2c_adapter *parent, if (regval & mlxreg_lc->data->mask) { mlxreg_lc->state |= MLXREG_LC_SYNCED; mlxreg_lc_state_update_locked(mlxreg_lc, MLXREG_LC_SYNCED, 1); - if (mlxreg_lc->state & ~MLXREG_LC_POWERED) { + if (!(mlxreg_lc->state & MLXREG_LC_POWERED)) { err = mlxreg_lc_power_on_off(mlxreg_lc, 1); if (err) goto mlxreg_lc_regmap_power_on_off_fail; diff --git a/drivers/platform/mellanox/nvsw-sn2201.c b/drivers/platform/mellanox/nvsw-sn2201.c index abe7be602f846..e708521e52740 100644 --- a/drivers/platform/mellanox/nvsw-sn2201.c +++ b/drivers/platform/mellanox/nvsw-sn2201.c @@ -1088,7 +1088,7 @@ static int nvsw_sn2201_i2c_completion_notify(void *handle, int id) if (!nvsw_sn2201->main_mux_devs->adapter) { err = -ENODEV; dev_err(nvsw_sn2201->dev, "Failed to get adapter for bus %d\n", - nvsw_sn2201->cpld_devs->nr); + nvsw_sn2201->main_mux_devs->nr); goto i2c_get_adapter_main_fail; } diff --git a/drivers/platform/x86/amd/pmc/pmc-quirks.c b/drivers/platform/x86/amd/pmc/pmc-quirks.c index 2e3f6fc67c568..7ed12c1d3b34c 100644 --- a/drivers/platform/x86/amd/pmc/pmc-quirks.c +++ b/drivers/platform/x86/amd/pmc/pmc-quirks.c @@ -224,6 +224,15 @@ static const struct dmi_system_id fwbug_list[] = { DMI_MATCH(DMI_BOARD_NAME, "WUJIE14-GX4HRXL"), } }, + /* https://bugzilla.kernel.org/show_bug.cgi?id=220116 */ + { + .ident = "PCSpecialist Lafite Pro V 14M", + .driver_data = &quirk_spurious_8042, + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "PCSpecialist"), + DMI_MATCH(DMI_PRODUCT_NAME, "Lafite Pro V 14M"), + } + }, {} }; diff --git a/drivers/platform/x86/amd/pmc/pmc.c b/drivers/platform/x86/amd/pmc/pmc.c index dc071b4257d7b..357a46fdffeda 100644 --- a/drivers/platform/x86/amd/pmc/pmc.c +++ b/drivers/platform/x86/amd/pmc/pmc.c @@ -393,6 +393,8 @@ static int amd_pmc_setup_smu_logging(struct amd_pmc_dev *dev) return -ENOMEM; } + memset_io(dev->smu_virt_addr, 0, sizeof(struct smu_metrics)); + /* Start the logging */ amd_pmc_send_cmd(dev, 0, NULL, SMU_MSG_LOG_RESET, false); amd_pmc_send_cmd(dev, 0, NULL, SMU_MSG_LOG_START, false); diff --git a/drivers/platform/x86/amd/pmf/tee-if.c b/drivers/platform/x86/amd/pmf/tee-if.c index b6bcc1d57f968..a9b195ec6f33f 100644 --- a/drivers/platform/x86/amd/pmf/tee-if.c +++ b/drivers/platform/x86/amd/pmf/tee-if.c @@ -422,12 +422,12 @@ static int amd_pmf_ta_open_session(struct tee_context *ctx, u32 *id, const uuid_ rc = tee_client_open_session(ctx, &sess_arg, NULL); if (rc < 0 || sess_arg.ret != 0) { pr_err("Failed to open TEE session err:%#x, rc:%d\n", sess_arg.ret, rc); - return rc; + return rc ?: -EINVAL; } *id = sess_arg.session; - return rc; + return 0; } static int amd_pmf_register_input_device(struct amd_pmf_dev *dev) @@ -462,7 +462,9 @@ static int amd_pmf_tee_init(struct amd_pmf_dev *dev, const uuid_t *uuid) dev->tee_ctx = tee_client_open_context(NULL, amd_pmf_amdtee_ta_match, NULL, NULL); if (IS_ERR(dev->tee_ctx)) { dev_err(dev->dev, "Failed to open TEE context\n"); - return PTR_ERR(dev->tee_ctx); + ret = PTR_ERR(dev->tee_ctx); + dev->tee_ctx = NULL; + return ret; } ret = amd_pmf_ta_open_session(dev->tee_ctx, &dev->session_id, uuid); @@ -502,9 +504,12 @@ static int amd_pmf_tee_init(struct amd_pmf_dev *dev, const uuid_t *uuid) static void amd_pmf_tee_deinit(struct amd_pmf_dev *dev) { + if (!dev->tee_ctx) + return; tee_shm_free(dev->fw_shm_pool); tee_client_close_session(dev->tee_ctx, dev->session_id); tee_client_close_context(dev->tee_ctx); + dev->tee_ctx = NULL; } int amd_pmf_init_smart_pc(struct amd_pmf_dev *dev) diff --git a/drivers/platform/x86/dell/dell-wmi-sysman/dell-wmi-sysman.h b/drivers/platform/x86/dell/dell-wmi-sysman/dell-wmi-sysman.h index 3ad33a094588c..817ee7ba07ca0 100644 --- a/drivers/platform/x86/dell/dell-wmi-sysman/dell-wmi-sysman.h +++ b/drivers/platform/x86/dell/dell-wmi-sysman/dell-wmi-sysman.h @@ -89,6 +89,11 @@ extern struct wmi_sysman_priv wmi_priv; enum { ENUM, INT, STR, PO }; +#define ENUM_MIN_ELEMENTS 8 +#define INT_MIN_ELEMENTS 9 +#define STR_MIN_ELEMENTS 8 +#define PO_MIN_ELEMENTS 4 + enum { ATTR_NAME, DISPL_NAME_LANG_CODE, diff --git a/drivers/platform/x86/dell/dell-wmi-sysman/enum-attributes.c b/drivers/platform/x86/dell/dell-wmi-sysman/enum-attributes.c index 8cc212c852668..fc2f58b4cbc6e 100644 --- a/drivers/platform/x86/dell/dell-wmi-sysman/enum-attributes.c +++ b/drivers/platform/x86/dell/dell-wmi-sysman/enum-attributes.c @@ -23,9 +23,10 @@ static ssize_t current_value_show(struct kobject *kobj, struct kobj_attribute *a obj = get_wmiobj_pointer(instance_id, DELL_WMI_BIOS_ENUMERATION_ATTRIBUTE_GUID); if (!obj) return -EIO; - if (obj->package.elements[CURRENT_VAL].type != ACPI_TYPE_STRING) { + if (obj->type != ACPI_TYPE_PACKAGE || obj->package.count < ENUM_MIN_ELEMENTS || + obj->package.elements[CURRENT_VAL].type != ACPI_TYPE_STRING) { kfree(obj); - return -EINVAL; + return -EIO; } ret = snprintf(buf, PAGE_SIZE, "%s\n", obj->package.elements[CURRENT_VAL].string.pointer); kfree(obj); diff --git a/drivers/platform/x86/dell/dell-wmi-sysman/int-attributes.c b/drivers/platform/x86/dell/dell-wmi-sysman/int-attributes.c index 951e75b538fad..7352480642391 100644 --- a/drivers/platform/x86/dell/dell-wmi-sysman/int-attributes.c +++ b/drivers/platform/x86/dell/dell-wmi-sysman/int-attributes.c @@ -25,9 +25,10 @@ static ssize_t current_value_show(struct kobject *kobj, struct kobj_attribute *a obj = get_wmiobj_pointer(instance_id, DELL_WMI_BIOS_INTEGER_ATTRIBUTE_GUID); if (!obj) return -EIO; - if (obj->package.elements[CURRENT_VAL].type != ACPI_TYPE_INTEGER) { + if (obj->type != ACPI_TYPE_PACKAGE || obj->package.count < INT_MIN_ELEMENTS || + obj->package.elements[CURRENT_VAL].type != ACPI_TYPE_INTEGER) { kfree(obj); - return -EINVAL; + return -EIO; } ret = snprintf(buf, PAGE_SIZE, "%lld\n", obj->package.elements[CURRENT_VAL].integer.value); kfree(obj); diff --git a/drivers/platform/x86/dell/dell-wmi-sysman/passobj-attributes.c b/drivers/platform/x86/dell/dell-wmi-sysman/passobj-attributes.c index d8f1bf5e58a0f..3167e06d416ed 100644 --- a/drivers/platform/x86/dell/dell-wmi-sysman/passobj-attributes.c +++ b/drivers/platform/x86/dell/dell-wmi-sysman/passobj-attributes.c @@ -26,9 +26,10 @@ static ssize_t is_enabled_show(struct kobject *kobj, struct kobj_attribute *attr obj = get_wmiobj_pointer(instance_id, DELL_WMI_BIOS_PASSOBJ_ATTRIBUTE_GUID); if (!obj) return -EIO; - if (obj->package.elements[IS_PASS_SET].type != ACPI_TYPE_INTEGER) { + if (obj->type != ACPI_TYPE_PACKAGE || obj->package.count < PO_MIN_ELEMENTS || + obj->package.elements[IS_PASS_SET].type != ACPI_TYPE_INTEGER) { kfree(obj); - return -EINVAL; + return -EIO; } ret = snprintf(buf, PAGE_SIZE, "%lld\n", obj->package.elements[IS_PASS_SET].integer.value); kfree(obj); diff --git a/drivers/platform/x86/dell/dell-wmi-sysman/string-attributes.c b/drivers/platform/x86/dell/dell-wmi-sysman/string-attributes.c index c392f0ecf8b55..0d2c74f8d1aad 100644 --- a/drivers/platform/x86/dell/dell-wmi-sysman/string-attributes.c +++ b/drivers/platform/x86/dell/dell-wmi-sysman/string-attributes.c @@ -25,9 +25,10 @@ static ssize_t current_value_show(struct kobject *kobj, struct kobj_attribute *a obj = get_wmiobj_pointer(instance_id, DELL_WMI_BIOS_STRING_ATTRIBUTE_GUID); if (!obj) return -EIO; - if (obj->package.elements[CURRENT_VAL].type != ACPI_TYPE_STRING) { + if (obj->type != ACPI_TYPE_PACKAGE || obj->package.count < STR_MIN_ELEMENTS || + obj->package.elements[CURRENT_VAL].type != ACPI_TYPE_STRING) { kfree(obj); - return -EINVAL; + return -EIO; } ret = snprintf(buf, PAGE_SIZE, "%s\n", obj->package.elements[CURRENT_VAL].string.pointer); kfree(obj); diff --git a/drivers/platform/x86/dell/dell-wmi-sysman/sysman.c b/drivers/platform/x86/dell/dell-wmi-sysman/sysman.c index 40ddc6eb75624..f5402b7146572 100644 --- a/drivers/platform/x86/dell/dell-wmi-sysman/sysman.c +++ b/drivers/platform/x86/dell/dell-wmi-sysman/sysman.c @@ -25,7 +25,6 @@ struct wmi_sysman_priv wmi_priv = { /* reset bios to defaults */ static const char * const reset_types[] = {"builtinsafe", "lastknowngood", "factory", "custom"}; static int reset_option = -1; -static const struct class *fw_attr_class; /** @@ -408,10 +407,10 @@ static int init_bios_attributes(int attr_type, const char *guid) return retval; switch (attr_type) { - case ENUM: min_elements = 8; break; - case INT: min_elements = 9; break; - case STR: min_elements = 8; break; - case PO: min_elements = 4; break; + case ENUM: min_elements = ENUM_MIN_ELEMENTS; break; + case INT: min_elements = INT_MIN_ELEMENTS; break; + case STR: min_elements = STR_MIN_ELEMENTS; break; + case PO: min_elements = PO_MIN_ELEMENTS; break; default: pr_err("Error: Unknown attr_type: %d\n", attr_type); return -EINVAL; @@ -541,15 +540,11 @@ static int __init sysman_init(void) goto err_exit_bios_attr_pass_interface; } - ret = fw_attributes_class_get(&fw_attr_class); - if (ret) - goto err_exit_bios_attr_pass_interface; - - wmi_priv.class_dev = device_create(fw_attr_class, NULL, MKDEV(0, 0), + wmi_priv.class_dev = device_create(&firmware_attributes_class, NULL, MKDEV(0, 0), NULL, "%s", DRIVER_NAME); if (IS_ERR(wmi_priv.class_dev)) { ret = PTR_ERR(wmi_priv.class_dev); - goto err_unregister_class; + goto err_exit_bios_attr_pass_interface; } wmi_priv.main_dir_kset = kset_create_and_add("attributes", NULL, @@ -602,10 +597,7 @@ static int __init sysman_init(void) release_attributes_data(); err_destroy_classdev: - device_destroy(fw_attr_class, MKDEV(0, 0)); - -err_unregister_class: - fw_attributes_class_put(); + device_unregister(wmi_priv.class_dev); err_exit_bios_attr_pass_interface: exit_bios_attr_pass_interface(); @@ -619,8 +611,7 @@ static int __init sysman_init(void) static void __exit sysman_exit(void) { release_attributes_data(); - device_destroy(fw_attr_class, MKDEV(0, 0)); - fw_attributes_class_put(); + device_unregister(wmi_priv.class_dev); exit_bios_attr_set_interface(); exit_bios_attr_pass_interface(); } diff --git a/drivers/platform/x86/dell/dell_rbu.c b/drivers/platform/x86/dell/dell_rbu.c index 9f51e0fcab04e..fee20866b41e4 100644 --- a/drivers/platform/x86/dell/dell_rbu.c +++ b/drivers/platform/x86/dell/dell_rbu.c @@ -292,7 +292,7 @@ static int packet_read_list(char *data, size_t * pread_length) remaining_bytes = *pread_length; bytes_read = rbu_data.packet_read_count; - list_for_each_entry(newpacket, (&packet_data_head.list)->next, list) { + list_for_each_entry(newpacket, &packet_data_head.list, list) { bytes_copied = do_packet_read(pdest, newpacket, remaining_bytes, bytes_read, &temp_count); remaining_bytes -= bytes_copied; @@ -315,14 +315,14 @@ static void packet_empty_list(void) { struct packet_data *newpacket, *tmp; - list_for_each_entry_safe(newpacket, tmp, (&packet_data_head.list)->next, list) { + list_for_each_entry_safe(newpacket, tmp, &packet_data_head.list, list) { list_del(&newpacket->list); /* * zero out the RBU packet memory before freeing * to make sure there are no stale RBU packets left in memory */ - memset(newpacket->data, 0, rbu_data.packetsize); + memset(newpacket->data, 0, newpacket->length); set_memory_wb((unsigned long)newpacket->data, 1 << newpacket->ordernum); free_pages((unsigned long) newpacket->data, diff --git a/drivers/platform/x86/firmware_attributes_class.c b/drivers/platform/x86/firmware_attributes_class.c index 182a07d8ae3df..87672c49e86ae 100644 --- a/drivers/platform/x86/firmware_attributes_class.c +++ b/drivers/platform/x86/firmware_attributes_class.c @@ -2,48 +2,35 @@ /* Firmware attributes class helper module */ -#include -#include #include #include "firmware_attributes_class.h" -static DEFINE_MUTEX(fw_attr_lock); -static int fw_attr_inuse; - -static const struct class firmware_attributes_class = { +const struct class firmware_attributes_class = { .name = "firmware-attributes", }; +EXPORT_SYMBOL_GPL(firmware_attributes_class); + +static __init int fw_attributes_class_init(void) +{ + return class_register(&firmware_attributes_class); +} +module_init(fw_attributes_class_init); + +static __exit void fw_attributes_class_exit(void) +{ + class_unregister(&firmware_attributes_class); +} +module_exit(fw_attributes_class_exit); int fw_attributes_class_get(const struct class **fw_attr_class) { - int err; - - mutex_lock(&fw_attr_lock); - if (!fw_attr_inuse) { /*first time class is being used*/ - err = class_register(&firmware_attributes_class); - if (err) { - mutex_unlock(&fw_attr_lock); - return err; - } - } - fw_attr_inuse++; *fw_attr_class = &firmware_attributes_class; - mutex_unlock(&fw_attr_lock); return 0; } EXPORT_SYMBOL_GPL(fw_attributes_class_get); int fw_attributes_class_put(void) { - mutex_lock(&fw_attr_lock); - if (!fw_attr_inuse) { - mutex_unlock(&fw_attr_lock); - return -EINVAL; - } - fw_attr_inuse--; - if (!fw_attr_inuse) /* No more consumers */ - class_unregister(&firmware_attributes_class); - mutex_unlock(&fw_attr_lock); return 0; } EXPORT_SYMBOL_GPL(fw_attributes_class_put); diff --git a/drivers/platform/x86/firmware_attributes_class.h b/drivers/platform/x86/firmware_attributes_class.h index 363c75f1ac1b8..ef6c3764a8349 100644 --- a/drivers/platform/x86/firmware_attributes_class.h +++ b/drivers/platform/x86/firmware_attributes_class.h @@ -5,6 +5,9 @@ #ifndef FW_ATTR_CLASS_H #define FW_ATTR_CLASS_H +#include + +extern const struct class firmware_attributes_class; int fw_attributes_class_get(const struct class **fw_attr_class); int fw_attributes_class_put(void); diff --git a/drivers/platform/x86/fujitsu-laptop.c b/drivers/platform/x86/fujitsu-laptop.c index ae992ac1ab4ac..6d5300c54a421 100644 --- a/drivers/platform/x86/fujitsu-laptop.c +++ b/drivers/platform/x86/fujitsu-laptop.c @@ -17,13 +17,13 @@ /* * fujitsu-laptop.c - Fujitsu laptop support, providing access to additional * features made available on a range of Fujitsu laptops including the - * P2xxx/P5xxx/S6xxx/S7xxx series. + * P2xxx/P5xxx/S2xxx/S6xxx/S7xxx series. * * This driver implements a vendor-specific backlight control interface for * Fujitsu laptops and provides support for hotkeys present on certain Fujitsu * laptops. * - * This driver has been tested on a Fujitsu Lifebook S6410, S7020 and + * This driver has been tested on a Fujitsu Lifebook S2110, S6410, S7020 and * P8010. It should work on most P-series and S-series Lifebooks, but * YMMV. * @@ -107,7 +107,11 @@ #define KEY2_CODE 0x411 #define KEY3_CODE 0x412 #define KEY4_CODE 0x413 -#define KEY5_CODE 0x420 +#define KEY5_CODE 0x414 +#define KEY6_CODE 0x415 +#define KEY7_CODE 0x416 +#define KEY8_CODE 0x417 +#define KEY9_CODE 0x420 /* Hotkey ringbuffer limits */ #define MAX_HOTKEY_RINGBUFFER_SIZE 100 @@ -560,7 +564,7 @@ static const struct key_entry keymap_default[] = { { KE_KEY, KEY2_CODE, { KEY_PROG2 } }, { KE_KEY, KEY3_CODE, { KEY_PROG3 } }, { KE_KEY, KEY4_CODE, { KEY_PROG4 } }, - { KE_KEY, KEY5_CODE, { KEY_RFKILL } }, + { KE_KEY, KEY9_CODE, { KEY_RFKILL } }, /* Soft keys read from status flags */ { KE_KEY, FLAG_RFKILL, { KEY_RFKILL } }, { KE_KEY, FLAG_TOUCHPAD_TOGGLE, { KEY_TOUCHPAD_TOGGLE } }, @@ -584,6 +588,18 @@ static const struct key_entry keymap_p8010[] = { { KE_END, 0 } }; +static const struct key_entry keymap_s2110[] = { + { KE_KEY, KEY1_CODE, { KEY_PROG1 } }, /* "A" */ + { KE_KEY, KEY2_CODE, { KEY_PROG2 } }, /* "B" */ + { KE_KEY, KEY3_CODE, { KEY_WWW } }, /* "Internet" */ + { KE_KEY, KEY4_CODE, { KEY_EMAIL } }, /* "E-mail" */ + { KE_KEY, KEY5_CODE, { KEY_STOPCD } }, + { KE_KEY, KEY6_CODE, { KEY_PLAYPAUSE } }, + { KE_KEY, KEY7_CODE, { KEY_PREVIOUSSONG } }, + { KE_KEY, KEY8_CODE, { KEY_NEXTSONG } }, + { KE_END, 0 } +}; + static const struct key_entry *keymap = keymap_default; static int fujitsu_laptop_dmi_keymap_override(const struct dmi_system_id *id) @@ -621,6 +637,15 @@ static const struct dmi_system_id fujitsu_laptop_dmi_table[] = { }, .driver_data = (void *)keymap_p8010 }, + { + .callback = fujitsu_laptop_dmi_keymap_override, + .ident = "Fujitsu LifeBook S2110", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"), + DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK S2110"), + }, + .driver_data = (void *)keymap_s2110 + }, {} }; diff --git a/drivers/platform/x86/hp/hp-bioscfg/bioscfg.c b/drivers/platform/x86/hp/hp-bioscfg/bioscfg.c index 2dc50152158a3..00b04adb4f191 100644 --- a/drivers/platform/x86/hp/hp-bioscfg/bioscfg.c +++ b/drivers/platform/x86/hp/hp-bioscfg/bioscfg.c @@ -24,8 +24,6 @@ struct bioscfg_priv bioscfg_drv = { .mutex = __MUTEX_INITIALIZER(bioscfg_drv.mutex), }; -static const struct class *fw_attr_class; - ssize_t display_name_language_code_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) @@ -972,11 +970,7 @@ static int __init hp_init(void) if (ret) return ret; - ret = fw_attributes_class_get(&fw_attr_class); - if (ret) - goto err_unregister_class; - - bioscfg_drv.class_dev = device_create(fw_attr_class, NULL, MKDEV(0, 0), + bioscfg_drv.class_dev = device_create(&firmware_attributes_class, NULL, MKDEV(0, 0), NULL, "%s", DRIVER_NAME); if (IS_ERR(bioscfg_drv.class_dev)) { ret = PTR_ERR(bioscfg_drv.class_dev); @@ -1043,10 +1037,9 @@ static int __init hp_init(void) release_attributes_data(); err_destroy_classdev: - device_destroy(fw_attr_class, MKDEV(0, 0)); + device_unregister(bioscfg_drv.class_dev); err_unregister_class: - fw_attributes_class_put(); hp_exit_attr_set_interface(); return ret; @@ -1055,9 +1048,8 @@ static int __init hp_init(void) static void __exit hp_exit(void) { release_attributes_data(); - device_destroy(fw_attr_class, MKDEV(0, 0)); + device_unregister(bioscfg_drv.class_dev); - fw_attributes_class_put(); hp_exit_attr_set_interface(); } diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c index bdb4cbee42058..93aa72bff3f00 100644 --- a/drivers/platform/x86/ideapad-laptop.c +++ b/drivers/platform/x86/ideapad-laptop.c @@ -15,6 +15,7 @@ #include #include #include +#include #include #include #include @@ -267,6 +268,20 @@ static void ideapad_shared_exit(struct ideapad_private *priv) */ #define IDEAPAD_EC_TIMEOUT 200 /* in ms */ +/* + * Some models (e.g., ThinkBook since 2024) have a low tolerance for being + * polled too frequently. Doing so may break the state machine in the EC, + * resulting in a hard shutdown. + * + * It is also observed that frequent polls may disturb the ongoing operation + * and notably delay the availability of EC response. + * + * These values are used as the delay before the first poll and the interval + * between subsequent polls to solve the above issues. + */ +#define IDEAPAD_EC_POLL_MIN_US 150 +#define IDEAPAD_EC_POLL_MAX_US 300 + static int eval_int(acpi_handle handle, const char *name, unsigned long *res) { unsigned long long result; @@ -383,7 +398,7 @@ static int read_ec_data(acpi_handle handle, unsigned long cmd, unsigned long *da end_jiffies = jiffies + msecs_to_jiffies(IDEAPAD_EC_TIMEOUT) + 1; while (time_before(jiffies, end_jiffies)) { - schedule(); + usleep_range(IDEAPAD_EC_POLL_MIN_US, IDEAPAD_EC_POLL_MAX_US); err = eval_vpcr(handle, 1, &val); if (err) @@ -414,7 +429,7 @@ static int write_ec_cmd(acpi_handle handle, unsigned long cmd, unsigned long dat end_jiffies = jiffies + msecs_to_jiffies(IDEAPAD_EC_TIMEOUT) + 1; while (time_before(jiffies, end_jiffies)) { - schedule(); + usleep_range(IDEAPAD_EC_POLL_MIN_US, IDEAPAD_EC_POLL_MAX_US); err = eval_vpcr(handle, 1, &val); if (err) diff --git a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-tpmi.c b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-tpmi.c index 0591053813a28..5ab45b7516662 100644 --- a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-tpmi.c +++ b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-tpmi.c @@ -467,10 +467,13 @@ static int uncore_probe(struct auxiliary_device *auxdev, const struct auxiliary_ /* Get the package ID from the TPMI core */ plat_info = tpmi_get_platform_data(auxdev); - if (plat_info) - pkg = plat_info->package_id; - else + if (unlikely(!plat_info)) { dev_info(&auxdev->dev, "Platform information is NULL\n"); + ret = -ENODEV; + goto err_rem_common; + } + + pkg = plat_info->package_id; for (i = 0; i < num_resources; ++i) { struct tpmi_uncore_power_domain_info *pd_info; diff --git a/drivers/platform/x86/think-lmi.c b/drivers/platform/x86/think-lmi.c index 1abd8378f158d..6ad2af46248b9 100644 --- a/drivers/platform/x86/think-lmi.c +++ b/drivers/platform/x86/think-lmi.c @@ -192,7 +192,6 @@ static const char * const level_options[] = { [TLMI_LEVEL_MASTER] = "master", }; static struct think_lmi tlmi_priv; -static const struct class *fw_attr_class; static DEFINE_MUTEX(tlmi_mutex); static inline struct tlmi_pwd_setting *to_tlmi_pwd_setting(struct kobject *kobj) @@ -907,6 +906,7 @@ static const struct attribute_group auth_attr_group = { .is_visible = auth_attr_is_visible, .attrs = auth_attrs, }; +__ATTRIBUTE_GROUPS(auth_attr); /* ---- Attributes sysfs --------------------------------------------------------- */ static ssize_t display_name_show(struct kobject *kobj, struct kobj_attribute *attr, @@ -1122,6 +1122,7 @@ static const struct attribute_group tlmi_attr_group = { .is_visible = attr_is_visible, .attrs = tlmi_attrs, }; +__ATTRIBUTE_GROUPS(tlmi_attr); static void tlmi_attr_setting_release(struct kobject *kobj) { @@ -1141,11 +1142,13 @@ static void tlmi_pwd_setting_release(struct kobject *kobj) static const struct kobj_type tlmi_attr_setting_ktype = { .release = &tlmi_attr_setting_release, .sysfs_ops = &kobj_sysfs_ops, + .default_groups = tlmi_attr_groups, }; static const struct kobj_type tlmi_pwd_setting_ktype = { .release = &tlmi_pwd_setting_release, .sysfs_ops = &kobj_sysfs_ops, + .default_groups = auth_attr_groups, }; static ssize_t pending_reboot_show(struct kobject *kobj, struct kobj_attribute *attr, @@ -1314,21 +1317,18 @@ static struct kobj_attribute debug_cmd = __ATTR_WO(debug_cmd); /* ---- Initialisation --------------------------------------------------------- */ static void tlmi_release_attr(void) { - int i; + struct kobject *pos, *n; /* Attribute structures */ - for (i = 0; i < TLMI_SETTINGS_COUNT; i++) { - if (tlmi_priv.setting[i]) { - sysfs_remove_group(&tlmi_priv.setting[i]->kobj, &tlmi_attr_group); - kobject_put(&tlmi_priv.setting[i]->kobj); - } - } sysfs_remove_file(&tlmi_priv.attribute_kset->kobj, &pending_reboot.attr); sysfs_remove_file(&tlmi_priv.attribute_kset->kobj, &save_settings.attr); if (tlmi_priv.can_debug_cmd && debug_support) sysfs_remove_file(&tlmi_priv.attribute_kset->kobj, &debug_cmd.attr); + list_for_each_entry_safe(pos, n, &tlmi_priv.attribute_kset->list, entry) + kobject_put(pos); + kset_unregister(tlmi_priv.attribute_kset); /* Free up any saved signatures */ @@ -1336,19 +1336,8 @@ static void tlmi_release_attr(void) kfree(tlmi_priv.pwd_admin->save_signature); /* Authentication structures */ - sysfs_remove_group(&tlmi_priv.pwd_admin->kobj, &auth_attr_group); - kobject_put(&tlmi_priv.pwd_admin->kobj); - sysfs_remove_group(&tlmi_priv.pwd_power->kobj, &auth_attr_group); - kobject_put(&tlmi_priv.pwd_power->kobj); - - if (tlmi_priv.opcode_support) { - sysfs_remove_group(&tlmi_priv.pwd_system->kobj, &auth_attr_group); - kobject_put(&tlmi_priv.pwd_system->kobj); - sysfs_remove_group(&tlmi_priv.pwd_hdd->kobj, &auth_attr_group); - kobject_put(&tlmi_priv.pwd_hdd->kobj); - sysfs_remove_group(&tlmi_priv.pwd_nvme->kobj, &auth_attr_group); - kobject_put(&tlmi_priv.pwd_nvme->kobj); - } + list_for_each_entry_safe(pos, n, &tlmi_priv.authentication_kset->list, entry) + kobject_put(pos); kset_unregister(tlmi_priv.authentication_kset); } @@ -1375,11 +1364,7 @@ static int tlmi_sysfs_init(void) { int i, ret; - ret = fw_attributes_class_get(&fw_attr_class); - if (ret) - return ret; - - tlmi_priv.class_dev = device_create(fw_attr_class, NULL, MKDEV(0, 0), + tlmi_priv.class_dev = device_create(&firmware_attributes_class, NULL, MKDEV(0, 0), NULL, "%s", "thinklmi"); if (IS_ERR(tlmi_priv.class_dev)) { ret = PTR_ERR(tlmi_priv.class_dev); @@ -1393,6 +1378,14 @@ static int tlmi_sysfs_init(void) goto fail_device_created; } + tlmi_priv.authentication_kset = kset_create_and_add("authentication", NULL, + &tlmi_priv.class_dev->kobj); + if (!tlmi_priv.authentication_kset) { + kset_unregister(tlmi_priv.attribute_kset); + ret = -ENOMEM; + goto fail_device_created; + } + for (i = 0; i < TLMI_SETTINGS_COUNT; i++) { /* Check if index is a valid setting - skip if it isn't */ if (!tlmi_priv.setting[i]) @@ -1409,12 +1402,8 @@ static int tlmi_sysfs_init(void) /* Build attribute */ tlmi_priv.setting[i]->kobj.kset = tlmi_priv.attribute_kset; - ret = kobject_add(&tlmi_priv.setting[i]->kobj, NULL, - "%s", tlmi_priv.setting[i]->display_name); - if (ret) - goto fail_create_attr; - - ret = sysfs_create_group(&tlmi_priv.setting[i]->kobj, &tlmi_attr_group); + ret = kobject_init_and_add(&tlmi_priv.setting[i]->kobj, &tlmi_attr_setting_ktype, + NULL, "%s", tlmi_priv.setting[i]->display_name); if (ret) goto fail_create_attr; } @@ -1434,55 +1423,34 @@ static int tlmi_sysfs_init(void) } /* Create authentication entries */ - tlmi_priv.authentication_kset = kset_create_and_add("authentication", NULL, - &tlmi_priv.class_dev->kobj); - if (!tlmi_priv.authentication_kset) { - ret = -ENOMEM; - goto fail_create_attr; - } tlmi_priv.pwd_admin->kobj.kset = tlmi_priv.authentication_kset; - ret = kobject_add(&tlmi_priv.pwd_admin->kobj, NULL, "%s", "Admin"); - if (ret) - goto fail_create_attr; - - ret = sysfs_create_group(&tlmi_priv.pwd_admin->kobj, &auth_attr_group); + ret = kobject_init_and_add(&tlmi_priv.pwd_admin->kobj, &tlmi_pwd_setting_ktype, + NULL, "%s", "Admin"); if (ret) goto fail_create_attr; tlmi_priv.pwd_power->kobj.kset = tlmi_priv.authentication_kset; - ret = kobject_add(&tlmi_priv.pwd_power->kobj, NULL, "%s", "Power-on"); - if (ret) - goto fail_create_attr; - - ret = sysfs_create_group(&tlmi_priv.pwd_power->kobj, &auth_attr_group); + ret = kobject_init_and_add(&tlmi_priv.pwd_power->kobj, &tlmi_pwd_setting_ktype, + NULL, "%s", "Power-on"); if (ret) goto fail_create_attr; if (tlmi_priv.opcode_support) { tlmi_priv.pwd_system->kobj.kset = tlmi_priv.authentication_kset; - ret = kobject_add(&tlmi_priv.pwd_system->kobj, NULL, "%s", "System"); - if (ret) - goto fail_create_attr; - - ret = sysfs_create_group(&tlmi_priv.pwd_system->kobj, &auth_attr_group); + ret = kobject_init_and_add(&tlmi_priv.pwd_system->kobj, &tlmi_pwd_setting_ktype, + NULL, "%s", "System"); if (ret) goto fail_create_attr; tlmi_priv.pwd_hdd->kobj.kset = tlmi_priv.authentication_kset; - ret = kobject_add(&tlmi_priv.pwd_hdd->kobj, NULL, "%s", "HDD"); - if (ret) - goto fail_create_attr; - - ret = sysfs_create_group(&tlmi_priv.pwd_hdd->kobj, &auth_attr_group); + ret = kobject_init_and_add(&tlmi_priv.pwd_hdd->kobj, &tlmi_pwd_setting_ktype, + NULL, "%s", "HDD"); if (ret) goto fail_create_attr; tlmi_priv.pwd_nvme->kobj.kset = tlmi_priv.authentication_kset; - ret = kobject_add(&tlmi_priv.pwd_nvme->kobj, NULL, "%s", "NVMe"); - if (ret) - goto fail_create_attr; - - ret = sysfs_create_group(&tlmi_priv.pwd_nvme->kobj, &auth_attr_group); + ret = kobject_init_and_add(&tlmi_priv.pwd_nvme->kobj, &tlmi_pwd_setting_ktype, + NULL, "%s", "NVMe"); if (ret) goto fail_create_attr; } @@ -1492,9 +1460,8 @@ static int tlmi_sysfs_init(void) fail_create_attr: tlmi_release_attr(); fail_device_created: - device_destroy(fw_attr_class, MKDEV(0, 0)); + device_unregister(tlmi_priv.class_dev); fail_class_created: - fw_attributes_class_put(); return ret; } @@ -1516,8 +1483,6 @@ static struct tlmi_pwd_setting *tlmi_create_auth(const char *pwd_type, new_pwd->maxlen = tlmi_priv.pwdcfg.core.max_length; new_pwd->index = 0; - kobject_init(&new_pwd->kobj, &tlmi_pwd_setting_ktype); - return new_pwd; } @@ -1621,7 +1586,6 @@ static int tlmi_analyze(void) if (setting->possible_values) strreplace(setting->possible_values, ',', ';'); - kobject_init(&setting->kobj, &tlmi_attr_setting_ktype); tlmi_priv.setting[i] = setting; kfree(item); } @@ -1717,8 +1681,7 @@ static int tlmi_analyze(void) static void tlmi_remove(struct wmi_device *wdev) { tlmi_release_attr(); - device_destroy(fw_attr_class, MKDEV(0, 0)); - fw_attributes_class_put(); + device_unregister(tlmi_priv.class_dev); } static int tlmi_probe(struct wmi_device *wdev, const void *context) diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c index dea40da867552..0528af4ed8d69 100644 --- a/drivers/platform/x86/thinkpad_acpi.c +++ b/drivers/platform/x86/thinkpad_acpi.c @@ -232,6 +232,7 @@ enum tpacpi_hkey_event_t { /* Thermal events */ TP_HKEY_EV_ALARM_BAT_HOT = 0x6011, /* battery too hot */ TP_HKEY_EV_ALARM_BAT_XHOT = 0x6012, /* battery critically hot */ + TP_HKEY_EV_ALARM_BAT_LIM_CHANGE = 0x6013, /* battery charge limit changed*/ TP_HKEY_EV_ALARM_SENSOR_HOT = 0x6021, /* sensor too hot */ TP_HKEY_EV_ALARM_SENSOR_XHOT = 0x6022, /* sensor critically hot */ TP_HKEY_EV_THM_TABLE_CHANGED = 0x6030, /* windows; thermal table changed */ @@ -3778,6 +3779,10 @@ static bool hotkey_notify_6xxx(const u32 hkey, bool *send_acpi_ev) pr_alert("THERMAL EMERGENCY: battery is extremely hot!\n"); /* recommended action: immediate sleep/hibernate */ break; + case TP_HKEY_EV_ALARM_BAT_LIM_CHANGE: + pr_debug("Battery Info: battery charge threshold changed\n"); + /* User changed charging threshold. No action needed */ + return true; case TP_HKEY_EV_ALARM_SENSOR_HOT: pr_crit("THERMAL ALARM: a sensor reports something is too hot!\n"); /* recommended action: warn user through gui, that */ @@ -11472,6 +11477,8 @@ static int __must_check __init get_thinkpad_model_data( tp->vendor = PCI_VENDOR_ID_IBM; else if (dmi_name_in_vendors("LENOVO")) tp->vendor = PCI_VENDOR_ID_LENOVO; + else if (dmi_name_in_vendors("NEC")) + tp->vendor = PCI_VENDOR_ID_LENOVO; else return 0; diff --git a/drivers/pmdomain/core.c b/drivers/pmdomain/core.c index 05913e9fe0821..2643525a572bb 100644 --- a/drivers/pmdomain/core.c +++ b/drivers/pmdomain/core.c @@ -697,6 +697,37 @@ bool dev_pm_genpd_get_hwmode(struct device *dev) } EXPORT_SYMBOL_GPL(dev_pm_genpd_get_hwmode); +/** + * dev_pm_genpd_rpm_always_on() - Control if the PM domain can be powered off. + * + * @dev: Device for which the PM domain may need to stay on for. + * @on: Value to set or unset for the condition. + * + * For some usecases a consumer driver requires its device to remain power-on + * from the PM domain perspective during runtime. This function allows the + * behaviour to be dynamically controlled for a device attached to a genpd. + * + * It is assumed that the users guarantee that the genpd wouldn't be detached + * while this routine is getting called. + * + * Return: Returns 0 on success and negative error values on failures. + */ +int dev_pm_genpd_rpm_always_on(struct device *dev, bool on) +{ + struct generic_pm_domain *genpd; + + genpd = dev_to_genpd_safe(dev); + if (!genpd) + return -ENODEV; + + genpd_lock(genpd); + dev_gpd_data(dev)->rpm_always_on = on; + genpd_unlock(genpd); + + return 0; +} +EXPORT_SYMBOL_GPL(dev_pm_genpd_rpm_always_on); + static int _genpd_power_on(struct generic_pm_domain *genpd, bool timed) { unsigned int state_idx = genpd->state_idx; @@ -868,6 +899,10 @@ static int genpd_power_off(struct generic_pm_domain *genpd, bool one_dev_on, if (!pm_runtime_suspended(pdd->dev) || irq_safe_dev_in_sleep_domain(pdd->dev, genpd)) not_suspended++; + + /* The device may need its PM domain to stay powered on. */ + if (to_gpd_data(pdd)->rpm_always_on) + return -EBUSY; } if (not_suspended > 1 || (not_suspended == 1 && !one_dev_on)) @@ -2193,8 +2228,10 @@ static int genpd_alloc_data(struct generic_pm_domain *genpd) return 0; put: put_device(&genpd->dev); - if (genpd->free_states == genpd_free_default_power_state) + if (genpd->free_states == genpd_free_default_power_state) { kfree(genpd->states); + genpd->states = NULL; + } free: if (genpd_is_cpu_domain(genpd)) free_cpumask_var(genpd->cpus); diff --git a/drivers/power/reset/at91-reset.c b/drivers/power/reset/at91-reset.c index 16512654295f5..f1e0a0857a90c 100644 --- a/drivers/power/reset/at91-reset.c +++ b/drivers/power/reset/at91-reset.c @@ -129,12 +129,11 @@ static int at91_reset(struct notifier_block *this, unsigned long mode, " str %4, [%0, %6]\n\t" /* Disable SDRAM1 accesses */ "1: tst %1, #0\n\t" - " beq 2f\n\t" " strne %3, [%1, #" __stringify(AT91_DDRSDRC_RTR) "]\n\t" /* Power down SDRAM1 */ " strne %4, [%1, %6]\n\t" /* Reset CPU */ - "2: str %5, [%2, #" __stringify(AT91_RSTC_CR) "]\n\t" + " str %5, [%2, #" __stringify(AT91_RSTC_CR) "]\n\t" " b .\n\t" : @@ -145,7 +144,7 @@ static int at91_reset(struct notifier_block *this, unsigned long mode, "r" cpu_to_le32(AT91_DDRSDRC_LPCB_POWER_DOWN), "r" (reset->data->reset_args), "r" (reset->ramc_lpr) - : "r4"); + ); return NOTIFY_DONE; } diff --git a/drivers/power/supply/bq27xxx_battery.c b/drivers/power/supply/bq27xxx_battery.c index 1a20c775489c7..871f03d160c53 100644 --- a/drivers/power/supply/bq27xxx_battery.c +++ b/drivers/power/supply/bq27xxx_battery.c @@ -2062,7 +2062,7 @@ static int bq27xxx_battery_get_property(struct power_supply *psy, mutex_unlock(&di->lock); if (psp != POWER_SUPPLY_PROP_PRESENT && di->cache.flags < 0) - return -ENODEV; + return di->cache.flags; switch (psp) { case POWER_SUPPLY_PROP_STATUS: diff --git a/drivers/power/supply/bq27xxx_battery_i2c.c b/drivers/power/supply/bq27xxx_battery_i2c.c index ba0d22d904295..868e95f0887e1 100644 --- a/drivers/power/supply/bq27xxx_battery_i2c.c +++ b/drivers/power/supply/bq27xxx_battery_i2c.c @@ -6,6 +6,7 @@ * Andrew F. Davis */ +#include #include #include #include @@ -31,6 +32,7 @@ static int bq27xxx_battery_i2c_read(struct bq27xxx_device_info *di, u8 reg, struct i2c_msg msg[2]; u8 data[2]; int ret; + int retry = 0; if (!client->adapter) return -ENODEV; @@ -47,7 +49,16 @@ static int bq27xxx_battery_i2c_read(struct bq27xxx_device_info *di, u8 reg, else msg[1].len = 2; - ret = i2c_transfer(client->adapter, msg, ARRAY_SIZE(msg)); + do { + ret = i2c_transfer(client->adapter, msg, ARRAY_SIZE(msg)); + if (ret == -EBUSY && ++retry < 3) { + /* sleep 10 milliseconds when busy */ + usleep_range(10000, 11000); + continue; + } + break; + } while (1); + if (ret < 0) return ret; diff --git a/drivers/power/supply/collie_battery.c b/drivers/power/supply/collie_battery.c index 68390bd1004f0..3daf7befc0bf6 100644 --- a/drivers/power/supply/collie_battery.c +++ b/drivers/power/supply/collie_battery.c @@ -440,6 +440,7 @@ static int collie_bat_probe(struct ucb1x00_dev *dev) static void collie_bat_remove(struct ucb1x00_dev *dev) { + device_init_wakeup(&ucb->dev, 0); free_irq(gpiod_to_irq(collie_bat_main.gpio_full), &collie_bat_main); power_supply_unregister(collie_bat_bu.psy); power_supply_unregister(collie_bat_main.psy); diff --git a/drivers/power/supply/max17040_battery.c b/drivers/power/supply/max17040_battery.c index 51310f6e4803b..c1640bc6accd2 100644 --- a/drivers/power/supply/max17040_battery.c +++ b/drivers/power/supply/max17040_battery.c @@ -410,8 +410,9 @@ static int max17040_get_property(struct power_supply *psy, if (!chip->channel_temp) return -ENODATA; - iio_read_channel_processed_scale(chip->channel_temp, - &val->intval, 10); + iio_read_channel_processed(chip->channel_temp, &val->intval); + val->intval /= 100; /* Convert from milli- to deci-degree */ + break; default: return -EINVAL; diff --git a/drivers/powercap/intel_rapl_common.c b/drivers/powercap/intel_rapl_common.c index 5e793b80fd6b6..c3de52e78e016 100644 --- a/drivers/powercap/intel_rapl_common.c +++ b/drivers/powercap/intel_rapl_common.c @@ -340,12 +340,28 @@ static int set_domain_enable(struct powercap_zone *power_zone, bool mode) { struct rapl_domain *rd = power_zone_to_rapl_domain(power_zone); struct rapl_defaults *defaults = get_defaults(rd->rp); + u64 val; int ret; cpus_read_lock(); ret = rapl_write_pl_data(rd, POWER_LIMIT1, PL_ENABLE, mode); - if (!ret && defaults->set_floor_freq) + if (ret) + goto end; + + ret = rapl_read_pl_data(rd, POWER_LIMIT1, PL_ENABLE, false, &val); + if (ret) + goto end; + + if (mode != val) { + pr_debug("%s cannot be %s\n", power_zone->name, + str_enabled_disabled(mode)); + goto end; + } + + if (defaults->set_floor_freq) defaults->set_floor_freq(rd, mode); + +end: cpus_read_unlock(); return ret; diff --git a/drivers/ptp/ptp_clock.c b/drivers/ptp/ptp_clock.c index 1a1edd87122d3..b892a7323084d 100644 --- a/drivers/ptp/ptp_clock.c +++ b/drivers/ptp/ptp_clock.c @@ -121,7 +121,8 @@ static int ptp_clock_adjtime(struct posix_clock *pc, struct __kernel_timex *tx) struct ptp_clock_info *ops; int err = -EOPNOTSUPP; - if (ptp_clock_freerun(ptp)) { + if (tx->modes & (ADJ_SETOFFSET | ADJ_FREQUENCY | ADJ_OFFSET) && + ptp_clock_freerun(ptp)) { pr_err("ptp: physical clock is free running\n"); return -EBUSY; } diff --git a/drivers/ptp/ptp_private.h b/drivers/ptp/ptp_private.h index 18934e28469ee..a6aad743c282f 100644 --- a/drivers/ptp/ptp_private.h +++ b/drivers/ptp/ptp_private.h @@ -100,10 +100,20 @@ static inline bool ptp_vclock_in_use(struct ptp_clock *ptp) { bool in_use = false; + /* Virtual clocks can't be stacked on top of virtual clocks. + * Avoid acquiring the n_vclocks_mux on virtual clocks, to allow this + * function to be called from code paths where the n_vclocks_mux of the + * parent physical clock is already held. Functionally that's not an + * issue, but lockdep would complain, because they have the same lock + * class. + */ + if (ptp->is_virtual_clock) + return false; + if (mutex_lock_interruptible(&ptp->n_vclocks_mux)) return true; - if (!ptp->is_virtual_clock && ptp->n_vclocks) + if (ptp->n_vclocks) in_use = true; mutex_unlock(&ptp->n_vclocks_mux); diff --git a/drivers/pwm/core.c b/drivers/pwm/core.c index 174939359ae3e..3697781c01793 100644 --- a/drivers/pwm/core.c +++ b/drivers/pwm/core.c @@ -148,7 +148,7 @@ static bool pwm_state_valid(const struct pwm_state *state) * and supposed to be ignored. So also ignore any strange values and * consider the state ok. */ - if (state->enabled) + if (!state->enabled) return true; if (!state->period) diff --git a/drivers/pwm/pwm-axi-pwmgen.c b/drivers/pwm/pwm-axi-pwmgen.c index b5477659ba186..73c68f494e7f3 100644 --- a/drivers/pwm/pwm-axi-pwmgen.c +++ b/drivers/pwm/pwm-axi-pwmgen.c @@ -174,7 +174,7 @@ static int axi_pwmgen_probe(struct platform_device *pdev) struct regmap *regmap; struct pwm_chip *chip; struct axi_pwmgen_ddata *ddata; - struct clk *clk; + struct clk *axi_clk, *clk; void __iomem *io_base; int ret; @@ -197,9 +197,26 @@ static int axi_pwmgen_probe(struct platform_device *pdev) ddata = pwmchip_get_drvdata(chip); ddata->regmap = regmap; - clk = devm_clk_get_enabled(dev, NULL); + /* + * Using NULL here instead of "axi" for backwards compatibility. There + * are some dtbs that don't give clock-names and have the "ext" clock + * as the one and only clock (due to mistake in the original bindings). + */ + axi_clk = devm_clk_get_enabled(dev, NULL); + if (IS_ERR(axi_clk)) + return dev_err_probe(dev, PTR_ERR(axi_clk), "failed to get axi clock\n"); + + clk = devm_clk_get_optional_enabled(dev, "ext"); if (IS_ERR(clk)) - return dev_err_probe(dev, PTR_ERR(clk), "failed to get clock\n"); + return dev_err_probe(dev, PTR_ERR(clk), "failed to get ext clock\n"); + + /* + * If there is no "ext" clock, it means the HDL was compiled with + * ASYNC_CLK_EN=0. In this case, the AXI clock is also used for the + * PWM output clock. + */ + if (!clk) + clk = axi_clk; ret = devm_clk_rate_exclusive_get(dev, clk); if (ret) diff --git a/drivers/pwm/pwm-mediatek.c b/drivers/pwm/pwm-mediatek.c index 7eaab58314995..33d3554b9197a 100644 --- a/drivers/pwm/pwm-mediatek.c +++ b/drivers/pwm/pwm-mediatek.c @@ -130,8 +130,10 @@ static int pwm_mediatek_config(struct pwm_chip *chip, struct pwm_device *pwm, return ret; clk_rate = clk_get_rate(pc->clk_pwms[pwm->hwpwm]); - if (!clk_rate) - return -EINVAL; + if (!clk_rate) { + ret = -EINVAL; + goto out; + } /* Make sure we use the bus clock and not the 26MHz clock */ if (pc->soc->has_ck_26m_sel) @@ -150,9 +152,9 @@ static int pwm_mediatek_config(struct pwm_chip *chip, struct pwm_device *pwm, } if (clkdiv > PWM_CLK_DIV_MAX) { - pwm_mediatek_clk_disable(chip, pwm); dev_err(pwmchip_parent(chip), "period of %d ns not supported\n", period_ns); - return -EINVAL; + ret = -EINVAL; + goto out; } if (pc->soc->pwm45_fixup && pwm->hwpwm > 2) { @@ -169,9 +171,10 @@ static int pwm_mediatek_config(struct pwm_chip *chip, struct pwm_device *pwm, pwm_mediatek_writel(pc, pwm->hwpwm, reg_width, cnt_period); pwm_mediatek_writel(pc, pwm->hwpwm, reg_thres, cnt_duty); +out: pwm_mediatek_clk_disable(chip, pwm); - return 0; + return ret; } static int pwm_mediatek_enable(struct pwm_chip *chip, struct pwm_device *pwm) diff --git a/drivers/rapidio/rio_cm.c b/drivers/rapidio/rio_cm.c index 9135227301c8d..e548edf64eca0 100644 --- a/drivers/rapidio/rio_cm.c +++ b/drivers/rapidio/rio_cm.c @@ -789,6 +789,9 @@ static int riocm_ch_send(u16 ch_id, void *buf, int len) if (buf == NULL || ch_id == 0 || len == 0 || len > RIO_MAX_MSG_SIZE) return -EINVAL; + if (len < sizeof(struct rio_ch_chan_hdr)) + return -EINVAL; /* insufficient data from user */ + ch = riocm_get_channel(ch_id); if (!ch) { riocm_error("%s(%d) ch_%d not found", current->comm, diff --git a/drivers/regulator/fan53555.c b/drivers/regulator/fan53555.c index bd9447dac5967..c282236959b18 100644 --- a/drivers/regulator/fan53555.c +++ b/drivers/regulator/fan53555.c @@ -147,6 +147,7 @@ struct fan53555_device_info { unsigned int slew_mask; const unsigned int *ramp_delay_table; unsigned int n_ramp_values; + unsigned int enable_time; unsigned int slew_rate; }; @@ -282,6 +283,7 @@ static int fan53526_voltages_setup_fairchild(struct fan53555_device_info *di) di->slew_mask = CTL_SLEW_MASK; di->ramp_delay_table = slew_rates; di->n_ramp_values = ARRAY_SIZE(slew_rates); + di->enable_time = 250; di->vsel_count = FAN53526_NVOLTAGES; return 0; @@ -296,10 +298,12 @@ static int fan53555_voltages_setup_fairchild(struct fan53555_device_info *di) case FAN53555_CHIP_REV_00: di->vsel_min = 600000; di->vsel_step = 10000; + di->enable_time = 400; break; case FAN53555_CHIP_REV_13: di->vsel_min = 800000; di->vsel_step = 10000; + di->enable_time = 400; break; default: dev_err(di->dev, @@ -311,13 +315,19 @@ static int fan53555_voltages_setup_fairchild(struct fan53555_device_info *di) case FAN53555_CHIP_ID_01: case FAN53555_CHIP_ID_03: case FAN53555_CHIP_ID_05: + di->vsel_min = 600000; + di->vsel_step = 10000; + di->enable_time = 400; + break; case FAN53555_CHIP_ID_08: di->vsel_min = 600000; di->vsel_step = 10000; + di->enable_time = 175; break; case FAN53555_CHIP_ID_04: di->vsel_min = 603000; di->vsel_step = 12826; + di->enable_time = 400; break; default: dev_err(di->dev, @@ -350,6 +360,7 @@ static int fan53555_voltages_setup_rockchip(struct fan53555_device_info *di) di->slew_mask = CTL_SLEW_MASK; di->ramp_delay_table = slew_rates; di->n_ramp_values = ARRAY_SIZE(slew_rates); + di->enable_time = 360; di->vsel_count = FAN53555_NVOLTAGES; return 0; @@ -372,6 +383,7 @@ static int rk8602_voltages_setup_rockchip(struct fan53555_device_info *di) di->slew_mask = CTL_SLEW_MASK; di->ramp_delay_table = slew_rates; di->n_ramp_values = ARRAY_SIZE(slew_rates); + di->enable_time = 360; di->vsel_count = RK8602_NVOLTAGES; return 0; @@ -395,6 +407,7 @@ static int fan53555_voltages_setup_silergy(struct fan53555_device_info *di) di->slew_mask = CTL_SLEW_MASK; di->ramp_delay_table = slew_rates; di->n_ramp_values = ARRAY_SIZE(slew_rates); + di->enable_time = 400; di->vsel_count = FAN53555_NVOLTAGES; return 0; @@ -594,6 +607,7 @@ static int fan53555_regulator_register(struct fan53555_device_info *di, rdesc->ramp_mask = di->slew_mask; rdesc->ramp_delay_table = di->ramp_delay_table; rdesc->n_ramp_values = di->n_ramp_values; + rdesc->enable_time = di->enable_time; rdesc->owner = THIS_MODULE; rdev = devm_regulator_register(di->dev, &di->desc, config); diff --git a/drivers/regulator/gpio-regulator.c b/drivers/regulator/gpio-regulator.c index 65927fa2ef161..1bdd494cf8821 100644 --- a/drivers/regulator/gpio-regulator.c +++ b/drivers/regulator/gpio-regulator.c @@ -260,8 +260,10 @@ static int gpio_regulator_probe(struct platform_device *pdev) return -ENOMEM; } - drvdata->gpiods = devm_kzalloc(dev, sizeof(struct gpio_desc *), - GFP_KERNEL); + drvdata->gpiods = devm_kcalloc(dev, config->ngpios, + sizeof(struct gpio_desc *), GFP_KERNEL); + if (!drvdata->gpiods) + return -ENOMEM; if (config->input_supply) { drvdata->desc.supply_name = devm_kstrdup(&pdev->dev, @@ -274,8 +276,6 @@ static int gpio_regulator_probe(struct platform_device *pdev) } } - if (!drvdata->gpiods) - return -ENOMEM; for (i = 0; i < config->ngpios; i++) { drvdata->gpiods[i] = devm_gpiod_get_index(dev, NULL, diff --git a/drivers/regulator/max14577-regulator.c b/drivers/regulator/max14577-regulator.c index 5e7171b9065ae..41fd15adfd1fd 100644 --- a/drivers/regulator/max14577-regulator.c +++ b/drivers/regulator/max14577-regulator.c @@ -40,11 +40,14 @@ static int max14577_reg_get_current_limit(struct regulator_dev *rdev) struct max14577 *max14577 = rdev_get_drvdata(rdev); const struct maxim_charger_current *limits = &maxim_charger_currents[max14577->dev_type]; + int ret; if (rdev_get_id(rdev) != MAX14577_CHARGER) return -EINVAL; - max14577_read_reg(rmap, MAX14577_CHG_REG_CHG_CTRL4, ®_data); + ret = max14577_read_reg(rmap, MAX14577_CHG_REG_CHG_CTRL4, ®_data); + if (ret < 0) + return ret; if ((reg_data & CHGCTRL4_MBCICHWRCL_MASK) == 0) return limits->min; diff --git a/drivers/regulator/max20086-regulator.c b/drivers/regulator/max20086-regulator.c index 198d45f8e8849..fcdd2d0317a57 100644 --- a/drivers/regulator/max20086-regulator.c +++ b/drivers/regulator/max20086-regulator.c @@ -5,6 +5,7 @@ // Copyright (C) 2022 Laurent Pinchart // Copyright (C) 2018 Avnet, Inc. +#include #include #include #include @@ -28,7 +29,7 @@ #define MAX20086_REG_ADC4 0x09 /* DEVICE IDs */ -#define MAX20086_DEVICE_ID_MAX20086 0x40 +#define MAX20086_DEVICE_ID_MAX20086 0x30 #define MAX20086_DEVICE_ID_MAX20087 0x20 #define MAX20086_DEVICE_ID_MAX20088 0x10 #define MAX20086_DEVICE_ID_MAX20089 0x00 @@ -133,11 +134,11 @@ static int max20086_regulators_register(struct max20086 *chip) static int max20086_parse_regulators_dt(struct max20086 *chip, bool *boot_on) { struct of_regulator_match *matches; - struct device_node *node; unsigned int i; int ret; - node = of_get_child_by_name(chip->dev->of_node, "regulators"); + struct device_node *node __free(device_node) = + of_get_child_by_name(chip->dev->of_node, "regulators"); if (!node) { dev_err(chip->dev, "regulators node not found\n"); return -ENODEV; @@ -153,7 +154,6 @@ static int max20086_parse_regulators_dt(struct max20086 *chip, bool *boot_on) ret = of_regulator_match(chip->dev, node, matches, chip->info->num_outputs); - of_node_put(node); if (ret < 0) { dev_err(chip->dev, "Failed to match regulators\n"); return -EINVAL; @@ -264,7 +264,7 @@ static int max20086_i2c_probe(struct i2c_client *i2c) * shutdown. */ flags = boot_on ? GPIOD_OUT_HIGH : GPIOD_OUT_LOW; - chip->ena_gpiod = devm_gpiod_get(chip->dev, "enable", flags); + chip->ena_gpiod = devm_gpiod_get_optional(chip->dev, "enable", flags); if (IS_ERR(chip->ena_gpiod)) { ret = PTR_ERR(chip->ena_gpiod); dev_err(chip->dev, "Failed to get enable GPIO: %d\n", ret); diff --git a/drivers/remoteproc/qcom_wcnss_iris.c b/drivers/remoteproc/qcom_wcnss_iris.c index dd36fd077911a..1e197f7734742 100644 --- a/drivers/remoteproc/qcom_wcnss_iris.c +++ b/drivers/remoteproc/qcom_wcnss_iris.c @@ -197,6 +197,7 @@ struct qcom_iris *qcom_iris_probe(struct device *parent, bool *use_48mhz_xo) err_device_del: device_del(&iris->dev); + put_device(&iris->dev); return ERR_PTR(ret); } @@ -204,4 +205,5 @@ struct qcom_iris *qcom_iris_probe(struct device *parent, bool *use_48mhz_xo) void qcom_iris_remove(struct qcom_iris *iris) { device_del(&iris->dev); + put_device(&iris->dev); } diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c index d2308c2f97eb9..b7011eb384a5d 100644 --- a/drivers/remoteproc/remoteproc_core.c +++ b/drivers/remoteproc/remoteproc_core.c @@ -1617,7 +1617,7 @@ static int rproc_attach(struct rproc *rproc) ret = rproc_set_rsc_table(rproc); if (ret) { dev_err(dev, "can't load resource table: %d\n", ret); - goto unprepare_device; + goto clean_up_resources; } /* reset max_notifyid */ @@ -1634,7 +1634,7 @@ static int rproc_attach(struct rproc *rproc) ret = rproc_handle_resources(rproc, rproc_loading_handlers); if (ret) { dev_err(dev, "Failed to process resources: %d\n", ret); - goto unprepare_device; + goto clean_up_resources; } /* Allocate carveout resources associated to rproc */ @@ -1653,9 +1653,9 @@ static int rproc_attach(struct rproc *rproc) clean_up_resources: rproc_resource_cleanup(rproc); -unprepare_device: /* release HW resources if needed */ rproc_unprepare_device(rproc); + kfree(rproc->clean_table); disable_iommu: rproc_disable_iommu(rproc); return ret; diff --git a/drivers/remoteproc/ti_k3_dsp_remoteproc.c b/drivers/remoteproc/ti_k3_dsp_remoteproc.c index 8be3f631c1920..73be3d2167914 100644 --- a/drivers/remoteproc/ti_k3_dsp_remoteproc.c +++ b/drivers/remoteproc/ti_k3_dsp_remoteproc.c @@ -115,10 +115,6 @@ static void k3_dsp_rproc_mbox_callback(struct mbox_client *client, void *data) const char *name = kproc->rproc->name; u32 msg = omap_mbox_message(data); - /* Do not forward messages from a detached core */ - if (kproc->rproc->state == RPROC_DETACHED) - return; - dev_dbg(dev, "mbox msg: 0x%x\n", msg); switch (msg) { @@ -159,10 +155,6 @@ static void k3_dsp_rproc_kick(struct rproc *rproc, int vqid) mbox_msg_t msg = (mbox_msg_t)vqid; int ret; - /* Do not forward messages to a detached core */ - if (kproc->rproc->state == RPROC_DETACHED) - return; - /* send the index of the triggered virtqueue in the mailbox payload */ ret = mbox_send_message(kproc->mbox, (void *)msg); if (ret < 0) @@ -576,11 +568,9 @@ static int k3_dsp_reserved_mem_init(struct k3_dsp_rproc *kproc) return -EINVAL; rmem = of_reserved_mem_lookup(rmem_np); - if (!rmem) { - of_node_put(rmem_np); - return -EINVAL; - } of_node_put(rmem_np); + if (!rmem) + return -EINVAL; kproc->rmem[i].bus_addr = rmem->base; /* 64-bit address regions currently not supported */ diff --git a/drivers/remoteproc/ti_k3_m4_remoteproc.c b/drivers/remoteproc/ti_k3_m4_remoteproc.c index 09f0484a90e10..6cd50b16a8e82 100644 --- a/drivers/remoteproc/ti_k3_m4_remoteproc.c +++ b/drivers/remoteproc/ti_k3_m4_remoteproc.c @@ -228,7 +228,7 @@ static int k3_m4_rproc_unprepare(struct rproc *rproc) int ret; /* If the core is going to be detached do not assert the module reset */ - if (rproc->state == RPROC_ATTACHED) + if (rproc->state == RPROC_DETACHED) return 0; ret = kproc->ti_sci->ops.dev_ops.put_device(kproc->ti_sci, @@ -433,11 +433,9 @@ static int k3_m4_reserved_mem_init(struct k3_m4_rproc *kproc) return -EINVAL; rmem = of_reserved_mem_lookup(rmem_np); - if (!rmem) { - of_node_put(rmem_np); - return -EINVAL; - } of_node_put(rmem_np); + if (!rmem) + return -EINVAL; kproc->rmem[i].bus_addr = rmem->base; /* 64-bit address regions currently not supported */ diff --git a/drivers/remoteproc/ti_k3_r5_remoteproc.c b/drivers/remoteproc/ti_k3_r5_remoteproc.c index 747ee467da88c..941bb130c85c4 100644 --- a/drivers/remoteproc/ti_k3_r5_remoteproc.c +++ b/drivers/remoteproc/ti_k3_r5_remoteproc.c @@ -194,10 +194,6 @@ static void k3_r5_rproc_mbox_callback(struct mbox_client *client, void *data) const char *name = kproc->rproc->name; u32 msg = omap_mbox_message(data); - /* Do not forward message from a detached core */ - if (kproc->rproc->state == RPROC_DETACHED) - return; - dev_dbg(dev, "mbox msg: 0x%x\n", msg); switch (msg) { @@ -233,10 +229,6 @@ static void k3_r5_rproc_kick(struct rproc *rproc, int vqid) mbox_msg_t msg = (mbox_msg_t)vqid; int ret; - /* Do not forward message to a detached core */ - if (kproc->rproc->state == RPROC_DETACHED) - return; - /* send the index of the triggered virtqueue in the mailbox payload */ ret = mbox_send_message(kproc->mbox, (void *)msg); if (ret < 0) @@ -448,13 +440,36 @@ static int k3_r5_rproc_prepare(struct rproc *rproc) { struct k3_r5_rproc *kproc = rproc->priv; struct k3_r5_cluster *cluster = kproc->cluster; - struct k3_r5_core *core = kproc->core; + struct k3_r5_core *core = kproc->core, *core0, *core1; struct device *dev = kproc->dev; u32 ctrl = 0, cfg = 0, stat = 0; u64 boot_vec = 0; bool mem_init_dis; int ret; + /* + * R5 cores require to be powered on sequentially, core0 should be in + * higher power state than core1 in a cluster. So, wait for core0 to + * power up before proceeding to core1 and put timeout of 2sec. This + * waiting mechanism is necessary because rproc_auto_boot_callback() for + * core1 can be called before core0 due to thread execution order. + * + * By placing the wait mechanism here in .prepare() ops, this condition + * is enforced for rproc boot requests from sysfs as well. + */ + core0 = list_first_entry(&cluster->cores, struct k3_r5_core, elem); + core1 = list_last_entry(&cluster->cores, struct k3_r5_core, elem); + if (cluster->mode == CLUSTER_MODE_SPLIT && core == core1 && + !core0->released_from_reset) { + ret = wait_event_interruptible_timeout(cluster->core_transition, + core0->released_from_reset, + msecs_to_jiffies(2000)); + if (ret <= 0) { + dev_err(dev, "can not power up core1 before core0"); + return -EPERM; + } + } + ret = ti_sci_proc_get_status(core->tsp, &boot_vec, &cfg, &ctrl, &stat); if (ret < 0) return ret; @@ -470,6 +485,14 @@ static int k3_r5_rproc_prepare(struct rproc *rproc) return ret; } + /* + * Notify all threads in the wait queue when core0 state has changed so + * that threads waiting for this condition can be executed. + */ + core->released_from_reset = true; + if (core == core0) + wake_up_interruptible(&cluster->core_transition); + /* * Newer IP revisions like on J7200 SoCs support h/w auto-initialization * of TCMs, so there is no need to perform the s/w memzero. This bit is @@ -515,10 +538,30 @@ static int k3_r5_rproc_unprepare(struct rproc *rproc) { struct k3_r5_rproc *kproc = rproc->priv; struct k3_r5_cluster *cluster = kproc->cluster; - struct k3_r5_core *core = kproc->core; + struct k3_r5_core *core = kproc->core, *core0, *core1; struct device *dev = kproc->dev; int ret; + /* + * Ensure power-down of cores is sequential in split mode. Core1 must + * power down before Core0 to maintain the expected state. By placing + * the wait mechanism here in .unprepare() ops, this condition is + * enforced for rproc stop or shutdown requests from sysfs and device + * removal as well. + */ + core0 = list_first_entry(&cluster->cores, struct k3_r5_core, elem); + core1 = list_last_entry(&cluster->cores, struct k3_r5_core, elem); + if (cluster->mode == CLUSTER_MODE_SPLIT && core == core0 && + core1->released_from_reset) { + ret = wait_event_interruptible_timeout(cluster->core_transition, + !core1->released_from_reset, + msecs_to_jiffies(2000)); + if (ret <= 0) { + dev_err(dev, "can not power down core0 before core1"); + return -EPERM; + } + } + /* Re-use LockStep-mode reset logic for Single-CPU mode */ ret = (cluster->mode == CLUSTER_MODE_LOCKSTEP || cluster->mode == CLUSTER_MODE_SINGLECPU) ? @@ -526,6 +569,14 @@ static int k3_r5_rproc_unprepare(struct rproc *rproc) if (ret) dev_err(dev, "unable to disable cores, ret = %d\n", ret); + /* + * Notify all threads in the wait queue when core1 state has changed so + * that threads waiting for this condition can be executed. + */ + core->released_from_reset = false; + if (core == core1) + wake_up_interruptible(&cluster->core_transition); + return ret; } @@ -551,7 +602,7 @@ static int k3_r5_rproc_start(struct rproc *rproc) struct k3_r5_rproc *kproc = rproc->priv; struct k3_r5_cluster *cluster = kproc->cluster; struct device *dev = kproc->dev; - struct k3_r5_core *core0, *core; + struct k3_r5_core *core; u32 boot_addr; int ret; @@ -573,21 +624,9 @@ static int k3_r5_rproc_start(struct rproc *rproc) goto unroll_core_run; } } else { - /* do not allow core 1 to start before core 0 */ - core0 = list_first_entry(&cluster->cores, struct k3_r5_core, - elem); - if (core != core0 && core0->rproc->state == RPROC_OFFLINE) { - dev_err(dev, "%s: can not start core 1 before core 0\n", - __func__); - return -EPERM; - } - ret = k3_r5_core_run(core); if (ret) return ret; - - core->released_from_reset = true; - wake_up_interruptible(&cluster->core_transition); } return 0; @@ -628,8 +667,7 @@ static int k3_r5_rproc_stop(struct rproc *rproc) { struct k3_r5_rproc *kproc = rproc->priv; struct k3_r5_cluster *cluster = kproc->cluster; - struct device *dev = kproc->dev; - struct k3_r5_core *core1, *core = kproc->core; + struct k3_r5_core *core = kproc->core; int ret; /* halt all applicable cores */ @@ -642,16 +680,6 @@ static int k3_r5_rproc_stop(struct rproc *rproc) } } } else { - /* do not allow core 0 to stop before core 1 */ - core1 = list_last_entry(&cluster->cores, struct k3_r5_core, - elem); - if (core != core1 && core1->rproc->state != RPROC_OFFLINE) { - dev_err(dev, "%s: can not stop core 0 before core 1\n", - __func__); - ret = -EPERM; - goto out; - } - ret = k3_r5_core_halt(core); if (ret) goto out; @@ -955,6 +983,13 @@ static int k3_r5_rproc_configure(struct k3_r5_rproc *kproc) return ret; } +static void k3_r5_mem_release(void *data) +{ + struct device *dev = data; + + of_reserved_mem_device_release(dev); +} + static int k3_r5_reserved_mem_init(struct k3_r5_rproc *kproc) { struct device *dev = kproc->dev; @@ -985,28 +1020,25 @@ static int k3_r5_reserved_mem_init(struct k3_r5_rproc *kproc) return ret; } + ret = devm_add_action_or_reset(dev, k3_r5_mem_release, dev); + if (ret) + return ret; + num_rmems--; - kproc->rmem = kcalloc(num_rmems, sizeof(*kproc->rmem), GFP_KERNEL); - if (!kproc->rmem) { - ret = -ENOMEM; - goto release_rmem; - } + kproc->rmem = devm_kcalloc(dev, num_rmems, sizeof(*kproc->rmem), GFP_KERNEL); + if (!kproc->rmem) + return -ENOMEM; /* use remaining reserved memory regions for static carveouts */ for (i = 0; i < num_rmems; i++) { rmem_np = of_parse_phandle(np, "memory-region", i + 1); - if (!rmem_np) { - ret = -EINVAL; - goto unmap_rmem; - } + if (!rmem_np) + return -EINVAL; rmem = of_reserved_mem_lookup(rmem_np); - if (!rmem) { - of_node_put(rmem_np); - ret = -EINVAL; - goto unmap_rmem; - } of_node_put(rmem_np); + if (!rmem) + return -EINVAL; kproc->rmem[i].bus_addr = rmem->base; /* @@ -1021,12 +1053,11 @@ static int k3_r5_reserved_mem_init(struct k3_r5_rproc *kproc) */ kproc->rmem[i].dev_addr = (u32)rmem->base; kproc->rmem[i].size = rmem->size; - kproc->rmem[i].cpu_addr = ioremap_wc(rmem->base, rmem->size); + kproc->rmem[i].cpu_addr = devm_ioremap_wc(dev, rmem->base, rmem->size); if (!kproc->rmem[i].cpu_addr) { dev_err(dev, "failed to map reserved memory#%d at %pa of size %pa\n", i + 1, &rmem->base, &rmem->size); - ret = -ENOMEM; - goto unmap_rmem; + return -ENOMEM; } dev_dbg(dev, "reserved memory%d: bus addr %pa size 0x%zx va %pK da 0x%x\n", @@ -1037,25 +1068,6 @@ static int k3_r5_reserved_mem_init(struct k3_r5_rproc *kproc) kproc->num_rmems = num_rmems; return 0; - -unmap_rmem: - for (i--; i >= 0; i--) - iounmap(kproc->rmem[i].cpu_addr); - kfree(kproc->rmem); -release_rmem: - of_reserved_mem_device_release(dev); - return ret; -} - -static void k3_r5_reserved_mem_exit(struct k3_r5_rproc *kproc) -{ - int i; - - for (i = 0; i < kproc->num_rmems; i++) - iounmap(kproc->rmem[i].cpu_addr); - kfree(kproc->rmem); - - of_reserved_mem_device_release(kproc->dev); } /* @@ -1282,10 +1294,10 @@ static int k3_r5_cluster_rproc_init(struct platform_device *pdev) goto out; } - ret = rproc_add(rproc); + ret = devm_rproc_add(dev, rproc); if (ret) { - dev_err(dev, "rproc_add failed, ret = %d\n", ret); - goto err_add; + dev_err_probe(dev, ret, "rproc_add failed\n"); + goto out; } /* create only one rproc in lockstep, single-cpu or @@ -1295,26 +1307,6 @@ static int k3_r5_cluster_rproc_init(struct platform_device *pdev) cluster->mode == CLUSTER_MODE_SINGLECPU || cluster->mode == CLUSTER_MODE_SINGLECORE) break; - - /* - * R5 cores require to be powered on sequentially, core0 - * should be in higher power state than core1 in a cluster - * So, wait for current core to power up before proceeding - * to next core and put timeout of 2sec for each core. - * - * This waiting mechanism is necessary because - * rproc_auto_boot_callback() for core1 can be called before - * core0 due to thread execution order. - */ - ret = wait_event_interruptible_timeout(cluster->core_transition, - core->released_from_reset, - msecs_to_jiffies(2000)); - if (ret <= 0) { - dev_err(dev, - "Timed out waiting for %s core to power up!\n", - rproc->name); - goto err_powerup; - } } return 0; @@ -1329,10 +1321,6 @@ static int k3_r5_cluster_rproc_init(struct platform_device *pdev) } } -err_powerup: - rproc_del(rproc); -err_add: - k3_r5_reserved_mem_exit(kproc); out: /* undo core0 upon any failures on core1 in split-mode */ if (cluster->mode == CLUSTER_MODE_SPLIT && core == core1) { @@ -1375,10 +1363,6 @@ static void k3_r5_cluster_rproc_exit(void *data) } mbox_free_channel(kproc->mbox); - - rproc_del(rproc); - - k3_r5_reserved_mem_exit(kproc); } } diff --git a/drivers/rpmsg/qcom_smd.c b/drivers/rpmsg/qcom_smd.c index 43f601c84b4fc..79d35ab43729e 100644 --- a/drivers/rpmsg/qcom_smd.c +++ b/drivers/rpmsg/qcom_smd.c @@ -746,7 +746,7 @@ static int __qcom_smd_send(struct qcom_smd_channel *channel, const void *data, __le32 hdr[5] = { cpu_to_le32(len), }; int tlen = sizeof(hdr) + len; unsigned long flags; - int ret; + int ret = 0; /* Word aligned channels only accept word size aligned data */ if (channel->info_word && len % 4) diff --git a/drivers/rtc/class.c b/drivers/rtc/class.c index e31fa0ad127e9..a0afdeaac270f 100644 --- a/drivers/rtc/class.c +++ b/drivers/rtc/class.c @@ -327,7 +327,7 @@ static void rtc_device_get_offset(struct rtc_device *rtc) * * Otherwise the offset seconds should be 0. */ - if (rtc->start_secs > rtc->range_max || + if ((rtc->start_secs >= 0 && rtc->start_secs > rtc->range_max) || rtc->start_secs + range_secs - 1 < rtc->range_min) rtc->offset_secs = rtc->start_secs - rtc->range_min; else if (rtc->start_secs > rtc->range_min) diff --git a/drivers/rtc/lib.c b/drivers/rtc/lib.c index fe361652727a3..13b5b1f204651 100644 --- a/drivers/rtc/lib.c +++ b/drivers/rtc/lib.c @@ -46,24 +46,38 @@ EXPORT_SYMBOL(rtc_year_days); * rtc_time64_to_tm - converts time64_t to rtc_time. * * @time: The number of seconds since 01-01-1970 00:00:00. - * (Must be positive.) + * Works for values since at least 1900 * @tm: Pointer to the struct rtc_time. */ void rtc_time64_to_tm(time64_t time, struct rtc_time *tm) { - unsigned int secs; - int days; + int days, secs; u64 u64tmp; u32 u32tmp, udays, century, day_of_century, year_of_century, year, day_of_year, month, day; bool is_Jan_or_Feb, is_leap_year; - /* time must be positive */ + /* + * Get days and seconds while preserving the sign to + * handle negative time values (dates before 1970-01-01) + */ days = div_s64_rem(time, 86400, &secs); + /* + * We need 0 <= secs < 86400 which isn't given for negative + * values of time. Fixup accordingly. + */ + if (secs < 0) { + days -= 1; + secs += 86400; + } + /* day of the week, 1970-01-01 was a Thursday */ tm->tm_wday = (days + 4) % 7; + /* Ensure tm_wday is always positive */ + if (tm->tm_wday < 0) + tm->tm_wday += 7; /* * The following algorithm is, basically, Proposition 6.3 of Neri @@ -93,7 +107,7 @@ void rtc_time64_to_tm(time64_t time, struct rtc_time *tm) * thus, is slightly different from [1]. */ - udays = ((u32) days) + 719468; + udays = days + 719468; u32tmp = 4 * udays + 3; century = u32tmp / 146097; diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c index 5849d2970bba4..095de4e0e4f38 100644 --- a/drivers/rtc/rtc-cmos.c +++ b/drivers/rtc/rtc-cmos.c @@ -697,8 +697,12 @@ static irqreturn_t cmos_interrupt(int irq, void *p) { u8 irqstat; u8 rtc_control; + unsigned long flags; - spin_lock(&rtc_lock); + /* We cannot use spin_lock() here, as cmos_interrupt() is also called + * in a non-irq context. + */ + spin_lock_irqsave(&rtc_lock, flags); /* When the HPET interrupt handler calls us, the interrupt * status is passed as arg1 instead of the irq number. But @@ -732,7 +736,7 @@ static irqreturn_t cmos_interrupt(int irq, void *p) hpet_mask_rtc_irq_bit(RTC_AIE); CMOS_READ(RTC_INTR_FLAGS); } - spin_unlock(&rtc_lock); + spin_unlock_irqrestore(&rtc_lock, flags); if (is_intr(irqstat)) { rtc_update_irq(p, 1, irqstat); @@ -1300,9 +1304,7 @@ static void cmos_check_wkalrm(struct device *dev) * ACK the rtc irq here */ if (t_now >= cmos->alarm_expires && cmos_use_acpi_alarm()) { - local_irq_disable(); cmos_interrupt(0, (void *)cmos->rtc); - local_irq_enable(); return; } diff --git a/drivers/rtc/rtc-loongson.c b/drivers/rtc/rtc-loongson.c index 90e9d97a86b48..c9d5b91a6544d 100644 --- a/drivers/rtc/rtc-loongson.c +++ b/drivers/rtc/rtc-loongson.c @@ -129,6 +129,14 @@ static u32 loongson_rtc_handler(void *id) { struct loongson_rtc_priv *priv = (struct loongson_rtc_priv *)id; + rtc_update_irq(priv->rtcdev, 1, RTC_AF | RTC_IRQF); + + /* + * The TOY_MATCH0_REG should be cleared 0 here, + * otherwise the interrupt cannot be cleared. + */ + regmap_write(priv->regmap, TOY_MATCH0_REG, 0); + spin_lock(&priv->lock); /* Disable RTC alarm wakeup and interrupt */ writel(readl(priv->pm_base + PM1_EN_REG) & ~RTC_EN, diff --git a/drivers/rtc/rtc-pcf2127.c b/drivers/rtc/rtc-pcf2127.c index 9c04c4e1a49c3..fc079b9dcf719 100644 --- a/drivers/rtc/rtc-pcf2127.c +++ b/drivers/rtc/rtc-pcf2127.c @@ -1383,6 +1383,11 @@ static int pcf2127_i2c_probe(struct i2c_client *client) variant = &pcf21xx_cfg[type]; } + if (variant->type == PCF2131) { + config.read_flag_mask = 0x0; + config.write_flag_mask = 0x0; + } + config.max_register = variant->max_register, regmap = devm_regmap_init(&client->dev, &pcf2127_i2c_regmap, @@ -1456,7 +1461,7 @@ static int pcf2127_spi_probe(struct spi_device *spi) variant = &pcf21xx_cfg[type]; } - config.max_register = variant->max_register, + config.max_register = variant->max_register; regmap = devm_regmap_init_spi(spi, &config); if (IS_ERR(regmap)) { diff --git a/drivers/rtc/rtc-sh.c b/drivers/rtc/rtc-sh.c index 27a191fa3704c..e66c9c6fd3724 100644 --- a/drivers/rtc/rtc-sh.c +++ b/drivers/rtc/rtc-sh.c @@ -485,9 +485,15 @@ static int __init sh_rtc_probe(struct platform_device *pdev) return -ENOENT; } - rtc->periodic_irq = ret; - rtc->carry_irq = platform_get_irq(pdev, 1); - rtc->alarm_irq = platform_get_irq(pdev, 2); + if (!pdev->dev.of_node) { + rtc->periodic_irq = ret; + rtc->carry_irq = platform_get_irq(pdev, 1); + rtc->alarm_irq = platform_get_irq(pdev, 2); + } else { + rtc->alarm_irq = ret; + rtc->periodic_irq = platform_get_irq(pdev, 1); + rtc->carry_irq = platform_get_irq(pdev, 2); + } res = platform_get_resource(pdev, IORESOURCE_IO, 0); if (!res) diff --git a/drivers/s390/crypto/pkey_api.c b/drivers/s390/crypto/pkey_api.c index 3a39e167bdbff..d62fea0fbdfc1 100644 --- a/drivers/s390/crypto/pkey_api.c +++ b/drivers/s390/crypto/pkey_api.c @@ -85,7 +85,7 @@ static void *_copy_apqns_from_user(void __user *uapqns, size_t nr_apqns) if (!uapqns || nr_apqns == 0) return NULL; - return memdup_user(uapqns, nr_apqns * sizeof(struct pkey_apqn)); + return memdup_array_user(uapqns, nr_apqns, sizeof(struct pkey_apqn)); } static int pkey_ioctl_genseck(struct pkey_genseck __user *ugs) diff --git a/drivers/s390/scsi/zfcp_sysfs.c b/drivers/s390/scsi/zfcp_sysfs.c index cb67fa80fb12c..a95da6768f665 100644 --- a/drivers/s390/scsi/zfcp_sysfs.c +++ b/drivers/s390/scsi/zfcp_sysfs.c @@ -450,6 +450,8 @@ static ssize_t zfcp_sysfs_unit_add_store(struct device *dev, if (kstrtoull(buf, 0, (unsigned long long *) &fcp_lun)) return -EINVAL; + flush_work(&port->rport_work); + retval = zfcp_unit_add(port, fcp_lun); if (retval) return retval; diff --git a/drivers/scsi/elx/efct/efct_hw.c b/drivers/scsi/elx/efct/efct_hw.c index 5a5525054d71c..5b079b8b7a082 100644 --- a/drivers/scsi/elx/efct/efct_hw.c +++ b/drivers/scsi/elx/efct/efct_hw.c @@ -1120,7 +1120,7 @@ int efct_hw_parse_filter(struct efct_hw *hw, void *value) { int rc = 0; - char *p = NULL; + char *p = NULL, *pp = NULL; char *token; u32 idx = 0; @@ -1132,6 +1132,7 @@ efct_hw_parse_filter(struct efct_hw *hw, void *value) efc_log_err(hw->os, "p is NULL\n"); return -ENOMEM; } + pp = p; idx = 0; while ((token = strsep(&p, ",")) && *token) { @@ -1144,7 +1145,7 @@ efct_hw_parse_filter(struct efct_hw *hw, void *value) if (idx == ARRAY_SIZE(hw->config.filter_def)) break; } - kfree(p); + kfree(pp); return rc; } diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c index e98e6b2b9f570..d9500b7306905 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_main.c +++ b/drivers/scsi/hisi_sas/hisi_sas_main.c @@ -1850,33 +1850,14 @@ static int hisi_sas_I_T_nexus_reset(struct domain_device *device) } hisi_sas_dereg_device(hisi_hba, device); - rc = hisi_sas_debug_I_T_nexus_reset(device); - if (rc == TMF_RESP_FUNC_COMPLETE && dev_is_sata(device)) { - struct sas_phy *local_phy; - + if (dev_is_sata(device)) { rc = hisi_sas_softreset_ata_disk(device); - switch (rc) { - case -ECOMM: - rc = -ENODEV; - break; - case TMF_RESP_FUNC_FAILED: - case -EMSGSIZE: - case -EIO: - local_phy = sas_get_local_phy(device); - rc = sas_phy_enable(local_phy, 0); - if (!rc) { - local_phy->enabled = 0; - dev_err(dev, "Disabled local phy of ATA disk %016llx due to softreset fail (%d)\n", - SAS_ADDR(device->sas_addr), rc); - rc = -ENODEV; - } - sas_put_local_phy(local_phy); - break; - default: - break; - } + if (rc == TMF_RESP_FUNC_FAILED) + dev_err(dev, "ata disk %016llx reset (%d)\n", + SAS_ADDR(device->sas_addr), rc); } + rc = hisi_sas_debug_I_T_nexus_reset(device); if ((rc == TMF_RESP_FUNC_COMPLETE) || (rc == -ENODEV)) hisi_sas_release_task(hisi_hba, device); diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c index 85059b83ea6b4..1c6b024160da7 100644 --- a/drivers/scsi/lpfc/lpfc_bsg.c +++ b/drivers/scsi/lpfc/lpfc_bsg.c @@ -398,7 +398,11 @@ lpfc_bsg_send_mgmt_cmd(struct bsg_job *job) /* in case no data is transferred */ bsg_reply->reply_payload_rcv_len = 0; - if (ndlp->nlp_flag & NLP_ELS_SND_MASK) + if (test_bit(NLP_PLOGI_SND, &ndlp->nlp_flag) || + test_bit(NLP_PRLI_SND, &ndlp->nlp_flag) || + test_bit(NLP_ADISC_SND, &ndlp->nlp_flag) || + test_bit(NLP_LOGO_SND, &ndlp->nlp_flag) || + test_bit(NLP_RNID_SND, &ndlp->nlp_flag)) return -ENODEV; /* allocate our bsg tracking structure */ diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h index d4e46a08f94da..36470bd716173 100644 --- a/drivers/scsi/lpfc/lpfc_crtn.h +++ b/drivers/scsi/lpfc/lpfc_crtn.h @@ -571,7 +571,7 @@ int lpfc_issue_reg_vfi(struct lpfc_vport *); int lpfc_issue_unreg_vfi(struct lpfc_vport *); int lpfc_selective_reset(struct lpfc_hba *); int lpfc_sli4_read_config(struct lpfc_hba *); -void lpfc_sli4_node_prep(struct lpfc_hba *); +void lpfc_sli4_node_rpi_restore(struct lpfc_hba *phba); int lpfc_sli4_els_sgl_update(struct lpfc_hba *phba); int lpfc_sli4_nvmet_sgl_update(struct lpfc_hba *phba); int lpfc_io_buf_flush(struct lpfc_hba *phba, struct list_head *sglist); diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c index ce3a1f42713dd..30891ad17e2a4 100644 --- a/drivers/scsi/lpfc/lpfc_ct.c +++ b/drivers/scsi/lpfc/lpfc_ct.c @@ -735,7 +735,7 @@ lpfc_prep_node_fc4type(struct lpfc_vport *vport, uint32_t Did, uint8_t fc4_type) lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "0238 Process x%06x NameServer Rsp " - "Data: x%x x%x x%x x%lx x%x\n", Did, + "Data: x%lx x%x x%x x%lx x%x\n", Did, ndlp->nlp_flag, ndlp->nlp_fc4_type, ndlp->nlp_state, vport->fc_flag, vport->fc_rscn_id_cnt); @@ -744,7 +744,7 @@ lpfc_prep_node_fc4type(struct lpfc_vport *vport, uint32_t Did, uint8_t fc4_type) * state of ndlp hit devloss, change state to * allow rediscovery. */ - if (ndlp->nlp_flag & NLP_NPR_2B_DISC && + if (test_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag) && ndlp->nlp_state == NLP_STE_UNUSED_NODE) { lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); @@ -832,12 +832,10 @@ lpfc_ns_rsp_audit_did(struct lpfc_vport *vport, uint32_t Did, uint8_t fc4_type) if (ndlp->nlp_type != NLP_NVME_INITIATOR || ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) continue; - spin_lock_irq(&ndlp->lock); if (ndlp->nlp_DID == Did) - ndlp->nlp_flag &= ~NLP_NVMET_RECOV; + clear_bit(NLP_NVMET_RECOV, &ndlp->nlp_flag); else - ndlp->nlp_flag |= NLP_NVMET_RECOV; - spin_unlock_irq(&ndlp->lock); + set_bit(NLP_NVMET_RECOV, &ndlp->nlp_flag); } } } @@ -894,13 +892,11 @@ lpfc_ns_rsp(struct lpfc_vport *vport, struct lpfc_dmabuf *mp, uint8_t fc4_type, */ if (vport->phba->nvmet_support) { list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { - if (!(ndlp->nlp_flag & NLP_NVMET_RECOV)) + if (!test_bit(NLP_NVMET_RECOV, &ndlp->nlp_flag)) continue; lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RECOVERY); - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag &= ~NLP_NVMET_RECOV; - spin_unlock_irq(&ndlp->lock); + clear_bit(NLP_NVMET_RECOV, &ndlp->nlp_flag); } } @@ -1440,7 +1436,7 @@ lpfc_cmpl_ct_cmd_gff_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, if (ndlp) { lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "0242 Process x%x GFF " - "NameServer Rsp Data: x%x x%lx x%x\n", + "NameServer Rsp Data: x%lx x%lx x%x\n", did, ndlp->nlp_flag, vport->fc_flag, vport->fc_rscn_id_cnt); } else { diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c index a2d2b02b34187..3fd1aa5cc78cc 100644 --- a/drivers/scsi/lpfc/lpfc_debugfs.c +++ b/drivers/scsi/lpfc/lpfc_debugfs.c @@ -870,8 +870,8 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size) wwn_to_u64(ndlp->nlp_nodename.u.wwn)); len += scnprintf(buf+len, size-len, "RPI:x%04x ", ndlp->nlp_rpi); - len += scnprintf(buf+len, size-len, "flag:x%08x ", - ndlp->nlp_flag); + len += scnprintf(buf+len, size-len, "flag:x%08lx ", + ndlp->nlp_flag); if (!ndlp->nlp_type) len += scnprintf(buf+len, size-len, "UNKNOWN_TYPE "); if (ndlp->nlp_type & NLP_FC_NODE) diff --git a/drivers/scsi/lpfc/lpfc_disc.h b/drivers/scsi/lpfc/lpfc_disc.h index f5ae8cc158205..af5d5bd75642c 100644 --- a/drivers/scsi/lpfc/lpfc_disc.h +++ b/drivers/scsi/lpfc/lpfc_disc.h @@ -102,7 +102,7 @@ struct lpfc_nodelist { spinlock_t lock; /* Node management lock */ - uint32_t nlp_flag; /* entry flags */ + unsigned long nlp_flag; /* entry flags */ uint32_t nlp_DID; /* FC D_ID of entry */ uint32_t nlp_last_elscmd; /* Last ELS cmd sent */ uint16_t nlp_type; @@ -182,37 +182,37 @@ struct lpfc_node_rrq { #define lpfc_ndlp_check_qdepth(phba, ndlp) \ (ndlp->cmd_qdepth < phba->sli4_hba.max_cfg_param.max_xri) -/* Defines for nlp_flag (uint32) */ -#define NLP_IGNR_REG_CMPL 0x00000001 /* Rcvd rscn before we cmpl reg login */ -#define NLP_REG_LOGIN_SEND 0x00000002 /* sent reglogin to adapter */ -#define NLP_RELEASE_RPI 0x00000004 /* Release RPI to free pool */ -#define NLP_SUPPRESS_RSP 0x00000010 /* Remote NPort supports suppress rsp */ -#define NLP_PLOGI_SND 0x00000020 /* sent PLOGI request for this entry */ -#define NLP_PRLI_SND 0x00000040 /* sent PRLI request for this entry */ -#define NLP_ADISC_SND 0x00000080 /* sent ADISC request for this entry */ -#define NLP_LOGO_SND 0x00000100 /* sent LOGO request for this entry */ -#define NLP_RNID_SND 0x00000400 /* sent RNID request for this entry */ -#define NLP_ELS_SND_MASK 0x000007e0 /* sent ELS request for this entry */ -#define NLP_NVMET_RECOV 0x00001000 /* NVMET auditing node for recovery. */ -#define NLP_UNREG_INP 0x00008000 /* UNREG_RPI cmd is in progress */ -#define NLP_DROPPED 0x00010000 /* Init ref count has been dropped */ -#define NLP_DELAY_TMO 0x00020000 /* delay timeout is running for node */ -#define NLP_NPR_2B_DISC 0x00040000 /* node is included in num_disc_nodes */ -#define NLP_RCV_PLOGI 0x00080000 /* Rcv'ed PLOGI from remote system */ -#define NLP_LOGO_ACC 0x00100000 /* Process LOGO after ACC completes */ -#define NLP_TGT_NO_SCSIID 0x00200000 /* good PRLI but no binding for scsid */ -#define NLP_ISSUE_LOGO 0x00400000 /* waiting to issue a LOGO */ -#define NLP_IN_DEV_LOSS 0x00800000 /* devloss in progress */ -#define NLP_ACC_REGLOGIN 0x01000000 /* Issue Reg Login after successful +/* nlp_flag mask bits */ +enum lpfc_nlp_flag { + NLP_IGNR_REG_CMPL = 0, /* Rcvd rscn before we cmpl reg login */ + NLP_REG_LOGIN_SEND = 1, /* sent reglogin to adapter */ + NLP_SUPPRESS_RSP = 4, /* Remote NPort supports suppress rsp */ + NLP_PLOGI_SND = 5, /* sent PLOGI request for this entry */ + NLP_PRLI_SND = 6, /* sent PRLI request for this entry */ + NLP_ADISC_SND = 7, /* sent ADISC request for this entry */ + NLP_LOGO_SND = 8, /* sent LOGO request for this entry */ + NLP_RNID_SND = 10, /* sent RNID request for this entry */ + NLP_NVMET_RECOV = 12, /* NVMET auditing node for recovery. */ + NLP_UNREG_INP = 15, /* UNREG_RPI cmd is in progress */ + NLP_DROPPED = 16, /* Init ref count has been dropped */ + NLP_DELAY_TMO = 17, /* delay timeout is running for node */ + NLP_NPR_2B_DISC = 18, /* node is included in num_disc_nodes */ + NLP_RCV_PLOGI = 19, /* Rcv'ed PLOGI from remote system */ + NLP_LOGO_ACC = 20, /* Process LOGO after ACC completes */ + NLP_TGT_NO_SCSIID = 21, /* good PRLI but no binding for scsid */ + NLP_ISSUE_LOGO = 22, /* waiting to issue a LOGO */ + NLP_IN_DEV_LOSS = 23, /* devloss in progress */ + NLP_ACC_REGLOGIN = 24, /* Issue Reg Login after successful ACC */ -#define NLP_NPR_ADISC 0x02000000 /* Issue ADISC when dq'ed from + NLP_NPR_ADISC = 25, /* Issue ADISC when dq'ed from NPR list */ -#define NLP_RM_DFLT_RPI 0x04000000 /* need to remove leftover dflt RPI */ -#define NLP_NODEV_REMOVE 0x08000000 /* Defer removal till discovery ends */ -#define NLP_TARGET_REMOVE 0x10000000 /* Target remove in process */ -#define NLP_SC_REQ 0x20000000 /* Target requires authentication */ -#define NLP_FIRSTBURST 0x40000000 /* Target supports FirstBurst */ -#define NLP_RPI_REGISTERED 0x80000000 /* nlp_rpi is valid */ + NLP_RM_DFLT_RPI = 26, /* need to remove leftover dflt RPI */ + NLP_NODEV_REMOVE = 27, /* Defer removal till discovery ends */ + NLP_TARGET_REMOVE = 28, /* Target remove in process */ + NLP_SC_REQ = 29, /* Target requires authentication */ + NLP_FIRSTBURST = 30, /* Target supports FirstBurst */ + NLP_RPI_REGISTERED = 31 /* nlp_rpi is valid */ +}; /* There are 4 different double linked lists nodelist entries can reside on. * The Port Login (PLOGI) list and Address Discovery (ADISC) list are used diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c index d737b897ddd82..b5fa5054e952e 100644 --- a/drivers/scsi/lpfc/lpfc_els.c +++ b/drivers/scsi/lpfc/lpfc_els.c @@ -725,11 +725,9 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, list_for_each_entry_safe(np, next_np, &vport->fc_nodes, nlp_listp) { if ((np->nlp_state != NLP_STE_NPR_NODE) || - !(np->nlp_flag & NLP_NPR_ADISC)) + !test_bit(NLP_NPR_ADISC, &np->nlp_flag)) continue; - spin_lock_irq(&np->lock); - np->nlp_flag &= ~NLP_NPR_ADISC; - spin_unlock_irq(&np->lock); + clear_bit(NLP_NPR_ADISC, &np->nlp_flag); lpfc_unreg_rpi(vport, np); } lpfc_cleanup_pending_mbox(vport); @@ -864,9 +862,7 @@ lpfc_cmpl_els_flogi_nport(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, sizeof(struct lpfc_name)); /* Set state will put ndlp onto node list if not already done */ lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_NPR_2B_DISC; - spin_unlock_irq(&ndlp->lock); + set_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag); mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!mbox) @@ -1018,7 +1014,7 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, * registered with the SCSI transport, remove the initial * reference to trigger node release. */ - if (!(ndlp->nlp_flag & NLP_IN_DEV_LOSS) && + if (!test_bit(NLP_IN_DEV_LOSS, &ndlp->nlp_flag) && !(ndlp->fc4_xpt_flags & SCSI_XPT_REGD)) lpfc_nlp_put(ndlp); @@ -1548,7 +1544,7 @@ lpfc_initial_flogi(struct lpfc_vport *vport) * Otherwise, decrement node reference to trigger release. */ if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD)) && - !(ndlp->nlp_flag & NLP_IN_DEV_LOSS)) + !test_bit(NLP_IN_DEV_LOSS, &ndlp->nlp_flag)) lpfc_nlp_put(ndlp); return 0; } @@ -1597,7 +1593,7 @@ lpfc_initial_fdisc(struct lpfc_vport *vport) * Otherwise, decrement node reference to trigger release. */ if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD)) && - !(ndlp->nlp_flag & NLP_IN_DEV_LOSS)) + !test_bit(NLP_IN_DEV_LOSS, &ndlp->nlp_flag)) lpfc_nlp_put(ndlp); return 0; } @@ -1675,9 +1671,9 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp, struct lpfc_nodelist *new_ndlp; struct serv_parm *sp; uint8_t name[sizeof(struct lpfc_name)]; - uint32_t keepDID = 0, keep_nlp_flag = 0; + uint32_t keepDID = 0; int rc; - uint32_t keep_new_nlp_flag = 0; + unsigned long keep_nlp_flag = 0, keep_new_nlp_flag = 0; uint16_t keep_nlp_state; u32 keep_nlp_fc4_type = 0; struct lpfc_nvme_rport *keep_nrport = NULL; @@ -1704,8 +1700,8 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp, } lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_NODE, - "3178 PLOGI confirm: ndlp x%x x%x x%x: " - "new_ndlp x%x x%x x%x\n", + "3178 PLOGI confirm: ndlp x%x x%lx x%x: " + "new_ndlp x%x x%lx x%x\n", ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_fc4_type, (new_ndlp ? new_ndlp->nlp_DID : 0), (new_ndlp ? new_ndlp->nlp_flag : 0), @@ -1769,48 +1765,48 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp, new_ndlp->nlp_flag = ndlp->nlp_flag; /* if new_ndlp had NLP_UNREG_INP set, keep it */ - if (keep_new_nlp_flag & NLP_UNREG_INP) - new_ndlp->nlp_flag |= NLP_UNREG_INP; + if (test_bit(NLP_UNREG_INP, &keep_new_nlp_flag)) + set_bit(NLP_UNREG_INP, &new_ndlp->nlp_flag); else - new_ndlp->nlp_flag &= ~NLP_UNREG_INP; + clear_bit(NLP_UNREG_INP, &new_ndlp->nlp_flag); /* if new_ndlp had NLP_RPI_REGISTERED set, keep it */ - if (keep_new_nlp_flag & NLP_RPI_REGISTERED) - new_ndlp->nlp_flag |= NLP_RPI_REGISTERED; + if (test_bit(NLP_RPI_REGISTERED, &keep_new_nlp_flag)) + set_bit(NLP_RPI_REGISTERED, &new_ndlp->nlp_flag); else - new_ndlp->nlp_flag &= ~NLP_RPI_REGISTERED; + clear_bit(NLP_RPI_REGISTERED, &new_ndlp->nlp_flag); /* * Retain the DROPPED flag. This will take care of the init * refcount when affecting the state change */ - if (keep_new_nlp_flag & NLP_DROPPED) - new_ndlp->nlp_flag |= NLP_DROPPED; + if (test_bit(NLP_DROPPED, &keep_new_nlp_flag)) + set_bit(NLP_DROPPED, &new_ndlp->nlp_flag); else - new_ndlp->nlp_flag &= ~NLP_DROPPED; + clear_bit(NLP_DROPPED, &new_ndlp->nlp_flag); ndlp->nlp_flag = keep_new_nlp_flag; /* if ndlp had NLP_UNREG_INP set, keep it */ - if (keep_nlp_flag & NLP_UNREG_INP) - ndlp->nlp_flag |= NLP_UNREG_INP; + if (test_bit(NLP_UNREG_INP, &keep_nlp_flag)) + set_bit(NLP_UNREG_INP, &ndlp->nlp_flag); else - ndlp->nlp_flag &= ~NLP_UNREG_INP; + clear_bit(NLP_UNREG_INP, &ndlp->nlp_flag); /* if ndlp had NLP_RPI_REGISTERED set, keep it */ - if (keep_nlp_flag & NLP_RPI_REGISTERED) - ndlp->nlp_flag |= NLP_RPI_REGISTERED; + if (test_bit(NLP_RPI_REGISTERED, &keep_nlp_flag)) + set_bit(NLP_RPI_REGISTERED, &ndlp->nlp_flag); else - ndlp->nlp_flag &= ~NLP_RPI_REGISTERED; + clear_bit(NLP_RPI_REGISTERED, &ndlp->nlp_flag); /* * Retain the DROPPED flag. This will take care of the init * refcount when affecting the state change */ - if (keep_nlp_flag & NLP_DROPPED) - ndlp->nlp_flag |= NLP_DROPPED; + if (test_bit(NLP_DROPPED, &keep_nlp_flag)) + set_bit(NLP_DROPPED, &ndlp->nlp_flag); else - ndlp->nlp_flag &= ~NLP_DROPPED; + clear_bit(NLP_DROPPED, &ndlp->nlp_flag); spin_unlock_irq(&new_ndlp->lock); spin_unlock_irq(&ndlp->lock); @@ -1888,7 +1884,7 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp, phba->active_rrq_pool); lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_NODE, - "3173 PLOGI confirm exit: new_ndlp x%x x%x x%x\n", + "3173 PLOGI confirm exit: new_ndlp x%x x%lx x%x\n", new_ndlp->nlp_DID, new_ndlp->nlp_flag, new_ndlp->nlp_fc4_type); @@ -2009,7 +2005,7 @@ lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, IOCB_t *irsp; struct lpfc_nodelist *ndlp, *free_ndlp; struct lpfc_dmabuf *prsp; - int disc; + bool disc; struct serv_parm *sp = NULL; u32 ulp_status, ulp_word4, did, iotag; bool release_node = false; @@ -2044,10 +2040,7 @@ lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, /* Since ndlp can be freed in the disc state machine, note if this node * is being used during discovery. */ - spin_lock_irq(&ndlp->lock); - disc = (ndlp->nlp_flag & NLP_NPR_2B_DISC); - ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; - spin_unlock_irq(&ndlp->lock); + disc = test_and_clear_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag); /* PLOGI completes to NPort */ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, @@ -2060,9 +2053,7 @@ lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, /* Check to see if link went down during discovery */ if (lpfc_els_chk_latt(vport)) { - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_NPR_2B_DISC; - spin_unlock_irq(&ndlp->lock); + set_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag); goto out; } @@ -2070,11 +2061,8 @@ lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, /* Check for retry */ if (lpfc_els_retry(phba, cmdiocb, rspiocb)) { /* ELS command is being retried */ - if (disc) { - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_NPR_2B_DISC; - spin_unlock_irq(&ndlp->lock); - } + if (disc) + set_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag); goto out; } /* Warn PLOGI status Don't print the vport to vport rjts */ @@ -2097,7 +2085,8 @@ lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, * with the reglogin process. */ spin_lock_irq(&ndlp->lock); - if ((ndlp->nlp_flag & (NLP_ACC_REGLOGIN | NLP_RCV_PLOGI)) && + if ((test_bit(NLP_ACC_REGLOGIN, &ndlp->nlp_flag) || + test_bit(NLP_RCV_PLOGI, &ndlp->nlp_flag)) && ndlp->nlp_state == NLP_STE_REG_LOGIN_ISSUE) { spin_unlock_irq(&ndlp->lock); goto out; @@ -2108,8 +2097,8 @@ lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, * start the device remove process. */ if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD))) { - ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; - if (!(ndlp->nlp_flag & NLP_IN_DEV_LOSS)) + clear_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag); + if (!test_bit(NLP_IN_DEV_LOSS, &ndlp->nlp_flag)) release_node = true; } spin_unlock_irq(&ndlp->lock); @@ -2212,12 +2201,13 @@ lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry) * outstanding UNREG_RPI mbox command completes, unless we * are going offline. This logic does not apply for Fabric DIDs */ - if ((ndlp->nlp_flag & (NLP_IGNR_REG_CMPL | NLP_UNREG_INP)) && + if ((test_bit(NLP_IGNR_REG_CMPL, &ndlp->nlp_flag) || + test_bit(NLP_UNREG_INP, &ndlp->nlp_flag)) && ((ndlp->nlp_DID & Fabric_DID_MASK) != Fabric_DID_MASK) && !test_bit(FC_OFFLINE_MODE, &vport->fc_flag)) { lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "4110 Issue PLOGI x%x deferred " - "on NPort x%x rpi x%x flg x%x Data:" + "on NPort x%x rpi x%x flg x%lx Data:" " x%px\n", ndlp->nlp_defer_did, ndlp->nlp_DID, ndlp->nlp_rpi, ndlp->nlp_flag, ndlp); @@ -2335,10 +2325,10 @@ lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, ulp_status = get_job_ulpstatus(phba, rspiocb); ulp_word4 = get_job_word4(phba, rspiocb); - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag &= ~NLP_PRLI_SND; + clear_bit(NLP_PRLI_SND, &ndlp->nlp_flag); /* Driver supports multiple FC4 types. Counters matter. */ + spin_lock_irq(&ndlp->lock); vport->fc_prli_sent--; ndlp->fc4_prli_sent--; spin_unlock_irq(&ndlp->lock); @@ -2379,7 +2369,7 @@ lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, /* Warn PRLI status */ lpfc_printf_vlog(vport, mode, LOG_ELS, "2754 PRLI DID:%06X Status:x%x/x%x, " - "data: x%x x%x x%x\n", + "data: x%x x%x x%lx\n", ndlp->nlp_DID, ulp_status, ulp_word4, ndlp->nlp_state, ndlp->fc4_prli_sent, ndlp->nlp_flag); @@ -2396,10 +2386,10 @@ lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, if ((ndlp->nlp_state >= NLP_STE_PLOGI_ISSUE && ndlp->nlp_state <= NLP_STE_REG_LOGIN_ISSUE) || (ndlp->nlp_state == NLP_STE_NPR_NODE && - ndlp->nlp_flag & NLP_DELAY_TMO)) { - lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE, + test_bit(NLP_DELAY_TMO, &ndlp->nlp_flag))) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE, "2784 PRLI cmpl: Allow Node recovery " - "DID x%06x nstate x%x nflag x%x\n", + "DID x%06x nstate x%x nflag x%lx\n", ndlp->nlp_DID, ndlp->nlp_state, ndlp->nlp_flag); goto out; @@ -2420,8 +2410,8 @@ lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, spin_lock_irq(&ndlp->lock); if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD)) && !ndlp->fc4_prli_sent) { - ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; - if (!(ndlp->nlp_flag & NLP_IN_DEV_LOSS)) + clear_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag); + if (!test_bit(NLP_IN_DEV_LOSS, &ndlp->nlp_flag)) release_node = true; } spin_unlock_irq(&ndlp->lock); @@ -2496,7 +2486,8 @@ lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR); ndlp->nlp_type &= ~(NLP_NVME_TARGET | NLP_NVME_INITIATOR); ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE; - ndlp->nlp_flag &= ~(NLP_FIRSTBURST | NLP_NPR_2B_DISC); + clear_bit(NLP_FIRSTBURST, &ndlp->nlp_flag); + clear_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag); ndlp->nvme_fb_size = 0; send_next_prli: @@ -2627,8 +2618,8 @@ lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, * the ndlp is used to track outstanding PRLIs for different * FC4 types. */ + set_bit(NLP_PRLI_SND, &ndlp->nlp_flag); spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_PRLI_SND; vport->fc_prli_sent++; ndlp->fc4_prli_sent++; spin_unlock_irq(&ndlp->lock); @@ -2789,7 +2780,7 @@ lpfc_cmpl_els_adisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, struct lpfc_vport *vport = cmdiocb->vport; IOCB_t *irsp; struct lpfc_nodelist *ndlp; - int disc; + bool disc; u32 ulp_status, ulp_word4, tmo, iotag; bool release_node = false; @@ -2818,10 +2809,8 @@ lpfc_cmpl_els_adisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, /* Since ndlp can be freed in the disc state machine, note if this node * is being used during discovery. */ - spin_lock_irq(&ndlp->lock); - disc = (ndlp->nlp_flag & NLP_NPR_2B_DISC); - ndlp->nlp_flag &= ~(NLP_ADISC_SND | NLP_NPR_2B_DISC); - spin_unlock_irq(&ndlp->lock); + disc = test_and_clear_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag); + clear_bit(NLP_ADISC_SND, &ndlp->nlp_flag); /* ADISC completes to NPort */ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0104 ADISC completes to NPort x%x " @@ -2832,9 +2821,7 @@ lpfc_cmpl_els_adisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, /* Check to see if link went down during discovery */ if (lpfc_els_chk_latt(vport)) { - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_NPR_2B_DISC; - spin_unlock_irq(&ndlp->lock); + set_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag); goto out; } @@ -2843,9 +2830,7 @@ lpfc_cmpl_els_adisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, if (lpfc_els_retry(phba, cmdiocb, rspiocb)) { /* ELS command is being retried */ if (disc) { - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_NPR_2B_DISC; - spin_unlock_irq(&ndlp->lock); + set_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag); lpfc_set_disctmo(vport); } goto out; @@ -2864,8 +2849,8 @@ lpfc_cmpl_els_adisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, */ spin_lock_irq(&ndlp->lock); if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD))) { - ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; - if (!(ndlp->nlp_flag & NLP_IN_DEV_LOSS)) + clear_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag); + if (!test_bit(NLP_IN_DEV_LOSS, &ndlp->nlp_flag)) release_node = true; } spin_unlock_irq(&ndlp->lock); @@ -2938,9 +2923,7 @@ lpfc_issue_els_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, phba->fc_stat.elsXmitADISC++; elsiocb->cmd_cmpl = lpfc_cmpl_els_adisc; - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_ADISC_SND; - spin_unlock_irq(&ndlp->lock); + set_bit(NLP_ADISC_SND, &ndlp->nlp_flag); elsiocb->ndlp = lpfc_nlp_get(ndlp); if (!elsiocb->ndlp) { lpfc_els_free_iocb(phba, elsiocb); @@ -2961,9 +2944,7 @@ lpfc_issue_els_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, return 0; err: - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag &= ~NLP_ADISC_SND; - spin_unlock_irq(&ndlp->lock); + clear_bit(NLP_ADISC_SND, &ndlp->nlp_flag); return 1; } @@ -2985,7 +2966,6 @@ lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, struct lpfc_nodelist *ndlp = cmdiocb->ndlp; struct lpfc_vport *vport = ndlp->vport; IOCB_t *irsp; - unsigned long flags; uint32_t skip_recovery = 0; int wake_up_waiter = 0; u32 ulp_status; @@ -3007,8 +2987,8 @@ lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, iotag = irsp->ulpIoTag; } + clear_bit(NLP_LOGO_SND, &ndlp->nlp_flag); spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag &= ~NLP_LOGO_SND; if (ndlp->save_flags & NLP_WAIT_FOR_LOGO) { wake_up_waiter = 1; ndlp->save_flags &= ~NLP_WAIT_FOR_LOGO; @@ -3023,7 +3003,7 @@ lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, /* LOGO completes to NPort */ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0105 LOGO completes to NPort x%x " - "IoTag x%x refcnt %d nflags x%x xflags x%x " + "IoTag x%x refcnt %d nflags x%lx xflags x%x " "Data: x%x x%x x%x x%x\n", ndlp->nlp_DID, iotag, kref_read(&ndlp->kref), ndlp->nlp_flag, @@ -3061,12 +3041,8 @@ lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, /* The driver sets this flag for an NPIV instance that doesn't want to * log into the remote port. */ - if (ndlp->nlp_flag & NLP_TARGET_REMOVE) { - spin_lock_irq(&ndlp->lock); - if (phba->sli_rev == LPFC_SLI_REV4) - ndlp->nlp_flag |= NLP_RELEASE_RPI; - ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; - spin_unlock_irq(&ndlp->lock); + if (test_bit(NLP_TARGET_REMOVE, &ndlp->nlp_flag)) { + clear_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag); lpfc_disc_state_machine(vport, ndlp, cmdiocb, NLP_EVT_DEVICE_RM); goto out_rsrc_free; @@ -3089,9 +3065,7 @@ lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, if (ndlp->nlp_type & (NLP_FCP_TARGET | NLP_NVME_TARGET) && skip_recovery == 0) { lpfc_cancel_retry_delay_tmo(vport, ndlp); - spin_lock_irqsave(&ndlp->lock, flags); - ndlp->nlp_flag |= NLP_NPR_2B_DISC; - spin_unlock_irqrestore(&ndlp->lock, flags); + set_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag); lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "3187 LOGO completes to NPort x%x: Start " @@ -3113,9 +3087,7 @@ lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, * register with the transport. */ if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD))) { - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; - spin_unlock_irq(&ndlp->lock); + clear_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag); lpfc_disc_state_machine(vport, ndlp, cmdiocb, NLP_EVT_DEVICE_RM); } @@ -3156,12 +3128,8 @@ lpfc_issue_els_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, uint16_t cmdsize; int rc; - spin_lock_irq(&ndlp->lock); - if (ndlp->nlp_flag & NLP_LOGO_SND) { - spin_unlock_irq(&ndlp->lock); + if (test_bit(NLP_LOGO_SND, &ndlp->nlp_flag)) return 0; - } - spin_unlock_irq(&ndlp->lock); cmdsize = (2 * sizeof(uint32_t)) + sizeof(struct lpfc_name); elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, @@ -3180,10 +3148,8 @@ lpfc_issue_els_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, phba->fc_stat.elsXmitLOGO++; elsiocb->cmd_cmpl = lpfc_cmpl_els_logo; - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_LOGO_SND; - ndlp->nlp_flag &= ~NLP_ISSUE_LOGO; - spin_unlock_irq(&ndlp->lock); + set_bit(NLP_LOGO_SND, &ndlp->nlp_flag); + clear_bit(NLP_ISSUE_LOGO, &ndlp->nlp_flag); elsiocb->ndlp = lpfc_nlp_get(ndlp); if (!elsiocb->ndlp) { lpfc_els_free_iocb(phba, elsiocb); @@ -3208,9 +3174,7 @@ lpfc_issue_els_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, return 0; err: - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag &= ~NLP_LOGO_SND; - spin_unlock_irq(&ndlp->lock); + clear_bit(NLP_LOGO_SND, &ndlp->nlp_flag); return 1; } @@ -3286,13 +3250,13 @@ lpfc_cmpl_els_cmd(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, static int lpfc_reg_fab_ctrl_node(struct lpfc_vport *vport, struct lpfc_nodelist *fc_ndlp) { - int rc = 0; + int rc; struct lpfc_hba *phba = vport->phba; struct lpfc_nodelist *ns_ndlp; LPFC_MBOXQ_t *mbox; - if (fc_ndlp->nlp_flag & NLP_RPI_REGISTERED) - return rc; + if (test_bit(NLP_RPI_REGISTERED, &fc_ndlp->nlp_flag)) + return 0; ns_ndlp = lpfc_findnode_did(vport, NameServer_DID); if (!ns_ndlp) @@ -3309,7 +3273,7 @@ lpfc_reg_fab_ctrl_node(struct lpfc_vport *vport, struct lpfc_nodelist *fc_ndlp) if (!mbox) { lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE, "0936 %s: no memory for reg_login " - "Data: x%x x%x x%x x%x\n", __func__, + "Data: x%x x%x x%lx x%x\n", __func__, fc_ndlp->nlp_DID, fc_ndlp->nlp_state, fc_ndlp->nlp_flag, fc_ndlp->nlp_rpi); return -ENOMEM; @@ -3321,7 +3285,7 @@ lpfc_reg_fab_ctrl_node(struct lpfc_vport *vport, struct lpfc_nodelist *fc_ndlp) goto out; } - fc_ndlp->nlp_flag |= NLP_REG_LOGIN_SEND; + set_bit(NLP_REG_LOGIN_SEND, &fc_ndlp->nlp_flag); mbox->mbox_cmpl = lpfc_mbx_cmpl_fc_reg_login; mbox->ctx_ndlp = lpfc_nlp_get(fc_ndlp); if (!mbox->ctx_ndlp) { @@ -3345,7 +3309,7 @@ lpfc_reg_fab_ctrl_node(struct lpfc_vport *vport, struct lpfc_nodelist *fc_ndlp) lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED); lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE, "0938 %s: failed to format reg_login " - "Data: x%x x%x x%x x%x\n", __func__, + "Data: x%x x%x x%lx x%x\n", __func__, fc_ndlp->nlp_DID, fc_ndlp->nlp_state, fc_ndlp->nlp_flag, fc_ndlp->nlp_rpi); return rc; @@ -4384,11 +4348,8 @@ lpfc_cancel_retry_delay_tmo(struct lpfc_vport *vport, struct lpfc_nodelist *nlp) { struct lpfc_work_evt *evtp; - if (!(nlp->nlp_flag & NLP_DELAY_TMO)) + if (!test_and_clear_bit(NLP_DELAY_TMO, &nlp->nlp_flag)) return; - spin_lock_irq(&nlp->lock); - nlp->nlp_flag &= ~NLP_DELAY_TMO; - spin_unlock_irq(&nlp->lock); del_timer_sync(&nlp->nlp_delayfunc); nlp->nlp_last_elscmd = 0; if (!list_empty(&nlp->els_retry_evt.evt_listp)) { @@ -4397,10 +4358,7 @@ lpfc_cancel_retry_delay_tmo(struct lpfc_vport *vport, struct lpfc_nodelist *nlp) evtp = &nlp->els_retry_evt; lpfc_nlp_put((struct lpfc_nodelist *)evtp->evt_arg1); } - if (nlp->nlp_flag & NLP_NPR_2B_DISC) { - spin_lock_irq(&nlp->lock); - nlp->nlp_flag &= ~NLP_NPR_2B_DISC; - spin_unlock_irq(&nlp->lock); + if (test_and_clear_bit(NLP_NPR_2B_DISC, &nlp->nlp_flag)) { if (vport->num_disc_nodes) { if (vport->port_state < LPFC_VPORT_READY) { /* Check if there are more ADISCs to be sent */ @@ -4480,14 +4438,11 @@ lpfc_els_retry_delay_handler(struct lpfc_nodelist *ndlp) spin_lock_irq(&ndlp->lock); cmd = ndlp->nlp_last_elscmd; ndlp->nlp_last_elscmd = 0; + spin_unlock_irq(&ndlp->lock); - if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) { - spin_unlock_irq(&ndlp->lock); + if (!test_and_clear_bit(NLP_DELAY_TMO, &ndlp->nlp_flag)) return; - } - ndlp->nlp_flag &= ~NLP_DELAY_TMO; - spin_unlock_irq(&ndlp->lock); /* * If a discovery event readded nlp_delayfunc after timer * firing and before processing the timer, cancel the @@ -5010,9 +4965,7 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, /* delay is specified in milliseconds */ mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(delay)); - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_DELAY_TMO; - spin_unlock_irq(&ndlp->lock); + set_bit(NLP_DELAY_TMO, &ndlp->nlp_flag); ndlp->nlp_prev_state = ndlp->nlp_state; if ((cmd == ELS_CMD_PRLI) || @@ -5072,7 +5025,7 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0108 No retry ELS command x%x to remote " "NPORT x%x Retried:%d Error:x%x/%x " - "IoTag x%x nflags x%x\n", + "IoTag x%x nflags x%lx\n", cmd, did, cmdiocb->retry, ulp_status, ulp_word4, cmdiocb->iotag, (ndlp ? ndlp->nlp_flag : 0)); @@ -5239,7 +5192,7 @@ lpfc_cmpl_els_logo_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, /* ACC to LOGO completes to NPort */ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0109 ACC to LOGO completes to NPort x%x refcnt %d " - "last els x%x Data: x%x x%x x%x\n", + "last els x%x Data: x%lx x%x x%x\n", ndlp->nlp_DID, kref_read(&ndlp->kref), ndlp->nlp_last_elscmd, ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi); @@ -5254,16 +5207,14 @@ lpfc_cmpl_els_logo_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, goto out; if (ndlp->nlp_state == NLP_STE_NPR_NODE) { - if (ndlp->nlp_flag & NLP_RPI_REGISTERED) + if (test_bit(NLP_RPI_REGISTERED, &ndlp->nlp_flag)) lpfc_unreg_rpi(vport, ndlp); /* If came from PRLO, then PRLO_ACC is done. * Start rediscovery now. */ if (ndlp->nlp_last_elscmd == ELS_CMD_PRLO) { - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_NPR_2B_DISC; - spin_unlock_irq(&ndlp->lock); + set_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag); ndlp->nlp_prev_state = ndlp->nlp_state; lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); @@ -5300,7 +5251,7 @@ lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) if (ndlp) { lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE, - "0006 rpi x%x DID:%x flg:%x %d x%px " + "0006 rpi x%x DID:%x flg:%lx %d x%px " "mbx_cmd x%x mbx_flag x%x x%px\n", ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref), ndlp, mbx_cmd, @@ -5311,11 +5262,9 @@ lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) * first on an UNREG_LOGIN and then release the final * references. */ - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND; + clear_bit(NLP_REG_LOGIN_SEND, &ndlp->nlp_flag); if (mbx_cmd == MBX_UNREG_LOGIN) - ndlp->nlp_flag &= ~NLP_UNREG_INP; - spin_unlock_irq(&ndlp->lock); + clear_bit(NLP_UNREG_INP, &ndlp->nlp_flag); lpfc_nlp_put(ndlp); lpfc_drop_node(ndlp->vport, ndlp); } @@ -5381,23 +5330,23 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, /* ELS response tag completes */ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0110 ELS response tag x%x completes " - "Data: x%x x%x x%x x%x x%x x%x x%x x%x %p %p\n", + "Data: x%x x%x x%x x%x x%lx x%x x%x x%x %p %p\n", iotag, ulp_status, ulp_word4, tmo, ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi, kref_read(&ndlp->kref), mbox, ndlp); if (mbox) { - if (ulp_status == 0 - && (ndlp->nlp_flag & NLP_ACC_REGLOGIN)) { + if (ulp_status == 0 && + test_bit(NLP_ACC_REGLOGIN, &ndlp->nlp_flag)) { if (!lpfc_unreg_rpi(vport, ndlp) && !test_bit(FC_PT2PT, &vport->fc_flag)) { - if (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE || + if (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE || ndlp->nlp_state == NLP_STE_REG_LOGIN_ISSUE) { lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "0314 PLOGI recov " "DID x%x " - "Data: x%x x%x x%x\n", + "Data: x%x x%x x%lx\n", ndlp->nlp_DID, ndlp->nlp_state, ndlp->nlp_rpi, @@ -5414,18 +5363,17 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, goto out_free_mbox; mbox->vport = vport; - if (ndlp->nlp_flag & NLP_RM_DFLT_RPI) { + if (test_bit(NLP_RM_DFLT_RPI, &ndlp->nlp_flag)) { mbox->mbox_flag |= LPFC_MBX_IMED_UNREG; mbox->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi; - } - else { + } else { mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login; ndlp->nlp_prev_state = ndlp->nlp_state; lpfc_nlp_set_state(vport, ndlp, NLP_STE_REG_LOGIN_ISSUE); } - ndlp->nlp_flag |= NLP_REG_LOGIN_SEND; + set_bit(NLP_REG_LOGIN_SEND, &ndlp->nlp_flag); if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) != MBX_NOT_FINISHED) goto out; @@ -5434,12 +5382,12 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, * set for this failed mailbox command. */ lpfc_nlp_put(ndlp); - ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND; + clear_bit(NLP_REG_LOGIN_SEND, &ndlp->nlp_flag); /* ELS rsp: Cannot issue reg_login for */ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0138 ELS rsp: Cannot issue reg_login for x%x " - "Data: x%x x%x x%x\n", + "Data: x%lx x%x x%x\n", ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi); } @@ -5448,32 +5396,20 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, } out: if (ndlp && shost) { - spin_lock_irq(&ndlp->lock); if (mbox) - ndlp->nlp_flag &= ~NLP_ACC_REGLOGIN; - ndlp->nlp_flag &= ~NLP_RM_DFLT_RPI; - spin_unlock_irq(&ndlp->lock); + clear_bit(NLP_ACC_REGLOGIN, &ndlp->nlp_flag); + clear_bit(NLP_RM_DFLT_RPI, &ndlp->nlp_flag); } /* An SLI4 NPIV instance wants to drop the node at this point under - * these conditions and release the RPI. + * these conditions because it doesn't need the login. */ if (phba->sli_rev == LPFC_SLI_REV4 && vport && vport->port_type == LPFC_NPIV_PORT && !(ndlp->fc4_xpt_flags & SCSI_XPT_REGD)) { - if (ndlp->nlp_flag & NLP_RELEASE_RPI) { - if (ndlp->nlp_state != NLP_STE_PLOGI_ISSUE && - ndlp->nlp_state != NLP_STE_REG_LOGIN_ISSUE) { - lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi); - spin_lock_irq(&ndlp->lock); - ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR; - ndlp->nlp_flag &= ~NLP_RELEASE_RPI; - spin_unlock_irq(&ndlp->lock); - } - lpfc_drop_node(vport, ndlp); - } else if (ndlp->nlp_state != NLP_STE_PLOGI_ISSUE && - ndlp->nlp_state != NLP_STE_REG_LOGIN_ISSUE && - ndlp->nlp_state != NLP_STE_PRLI_ISSUE) { + if (ndlp->nlp_state != NLP_STE_PLOGI_ISSUE && + ndlp->nlp_state != NLP_STE_REG_LOGIN_ISSUE && + ndlp->nlp_state != NLP_STE_PRLI_ISSUE) { /* Drop ndlp if there is no planned or outstanding * issued PRLI. * @@ -5540,9 +5476,7 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag, elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, ndlp->nlp_DID, ELS_CMD_ACC); if (!elsiocb) { - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag &= ~NLP_LOGO_ACC; - spin_unlock_irq(&ndlp->lock); + clear_bit(NLP_LOGO_ACC, &ndlp->nlp_flag); return 1; } @@ -5570,7 +5504,7 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag, pcmd += sizeof(uint32_t); lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, - "Issue ACC: did:x%x flg:x%x", + "Issue ACC: did:x%x flg:x%lx", ndlp->nlp_DID, ndlp->nlp_flag, 0); break; case ELS_CMD_FLOGI: @@ -5649,7 +5583,7 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag, } lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, - "Issue ACC FLOGI/PLOGI: did:x%x flg:x%x", + "Issue ACC FLOGI/PLOGI: did:x%x flg:x%lx", ndlp->nlp_DID, ndlp->nlp_flag, 0); break; case ELS_CMD_PRLO: @@ -5687,7 +5621,7 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag, els_pkt_ptr->un.prlo.acceptRspCode = PRLO_REQ_EXECUTED; lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, - "Issue ACC PRLO: did:x%x flg:x%x", + "Issue ACC PRLO: did:x%x flg:x%lx", ndlp->nlp_DID, ndlp->nlp_flag, 0); break; case ELS_CMD_RDF: @@ -5732,12 +5666,10 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag, default: return 1; } - if (ndlp->nlp_flag & NLP_LOGO_ACC) { - spin_lock_irq(&ndlp->lock); - if (!(ndlp->nlp_flag & NLP_RPI_REGISTERED || - ndlp->nlp_flag & NLP_REG_LOGIN_SEND)) - ndlp->nlp_flag &= ~NLP_LOGO_ACC; - spin_unlock_irq(&ndlp->lock); + if (test_bit(NLP_LOGO_ACC, &ndlp->nlp_flag)) { + if (!test_bit(NLP_RPI_REGISTERED, &ndlp->nlp_flag) && + !test_bit(NLP_REG_LOGIN_SEND, &ndlp->nlp_flag)) + clear_bit(NLP_LOGO_ACC, &ndlp->nlp_flag); elsiocb->cmd_cmpl = lpfc_cmpl_els_logo_acc; } else { elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; @@ -5760,7 +5692,7 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag, /* Xmit ELS ACC response tag */ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0128 Xmit ELS ACC response Status: x%x, IoTag: x%x, " - "XRI: x%x, DID: x%x, nlp_flag: x%x nlp_state: x%x " + "XRI: x%x, DID: x%x, nlp_flag: x%lx nlp_state: x%x " "RPI: x%x, fc_flag x%lx refcnt %d\n", rc, elsiocb->iotag, elsiocb->sli4_xritag, ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, @@ -5835,13 +5767,13 @@ lpfc_els_rsp_reject(struct lpfc_vport *vport, uint32_t rejectError, /* Xmit ELS RJT response tag */ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0129 Xmit ELS RJT x%x response tag x%x " - "xri x%x, did x%x, nlp_flag x%x, nlp_state x%x, " + "xri x%x, did x%x, nlp_flag x%lx, nlp_state x%x, " "rpi x%x\n", rejectError, elsiocb->iotag, get_job_ulpcontext(phba, elsiocb), ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi); lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, - "Issue LS_RJT: did:x%x flg:x%x err:x%x", + "Issue LS_RJT: did:x%x flg:x%lx err:x%x", ndlp->nlp_DID, ndlp->nlp_flag, rejectError); phba->fc_stat.elsXmitLSRJT++; @@ -5852,18 +5784,6 @@ lpfc_els_rsp_reject(struct lpfc_vport *vport, uint32_t rejectError, return 1; } - /* The NPIV instance is rejecting this unsolicited ELS. Make sure the - * node's assigned RPI gets released provided this node is not already - * registered with the transport. - */ - if (phba->sli_rev == LPFC_SLI_REV4 && - vport->port_type == LPFC_NPIV_PORT && - !(ndlp->fc4_xpt_flags & SCSI_XPT_REGD)) { - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_RELEASE_RPI; - spin_unlock_irq(&ndlp->lock); - } - rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); if (rc == IOCB_ERROR) { lpfc_els_free_iocb(phba, elsiocb); @@ -5944,7 +5864,7 @@ lpfc_issue_els_edc_rsp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, lpfc_format_edc_lft_desc(phba, tlv); lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, - "Issue EDC ACC: did:x%x flg:x%x refcnt %d", + "Issue EDC ACC: did:x%x flg:x%lx refcnt %d", ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref)); elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; @@ -5966,7 +5886,7 @@ lpfc_issue_els_edc_rsp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, /* Xmit ELS ACC response tag */ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0152 Xmit EDC ACC response Status: x%x, IoTag: x%x, " - "XRI: x%x, DID: x%x, nlp_flag: x%x nlp_state: x%x " + "XRI: x%x, DID: x%x, nlp_flag: x%lx nlp_state: x%x " "RPI: x%x, fc_flag x%lx\n", rc, elsiocb->iotag, elsiocb->sli4_xritag, ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, @@ -6035,7 +5955,7 @@ lpfc_els_rsp_adisc_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb, /* Xmit ADISC ACC response tag */ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0130 Xmit ADISC ACC response iotag x%x xri: " - "x%x, did x%x, nlp_flag x%x, nlp_state x%x rpi x%x\n", + "x%x, did x%x, nlp_flag x%lx, nlp_state x%x rpi x%x\n", elsiocb->iotag, ulp_context, ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi); @@ -6051,7 +5971,7 @@ lpfc_els_rsp_adisc_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb, ap->DID = be32_to_cpu(vport->fc_myDID); lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, - "Issue ACC ADISC: did:x%x flg:x%x refcnt %d", + "Issue ACC ADISC: did:x%x flg:x%lx refcnt %d", ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref)); phba->fc_stat.elsXmitACC++; @@ -6157,7 +6077,7 @@ lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb, /* Xmit PRLI ACC response tag */ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0131 Xmit PRLI ACC response tag x%x xri x%x, " - "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n", + "did x%x, nlp_flag x%lx, nlp_state x%x, rpi x%x\n", elsiocb->iotag, ulp_context, ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi); @@ -6228,7 +6148,7 @@ lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb, lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC, "6015 NVME issue PRLI ACC word1 x%08x " - "word4 x%08x word5 x%08x flag x%x, " + "word4 x%08x word5 x%08x flag x%lx, " "fcp_info x%x nlp_type x%x\n", npr_nvme->word1, npr_nvme->word4, npr_nvme->word5, ndlp->nlp_flag, @@ -6243,7 +6163,7 @@ lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb, ndlp->nlp_DID); lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, - "Issue ACC PRLI: did:x%x flg:x%x", + "Issue ACC PRLI: did:x%x flg:x%lx", ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref)); phba->fc_stat.elsXmitACC++; @@ -6357,7 +6277,7 @@ lpfc_els_rsp_rnid_acc(struct lpfc_vport *vport, uint8_t format, } lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, - "Issue ACC RNID: did:x%x flg:x%x refcnt %d", + "Issue ACC RNID: did:x%x flg:x%lx refcnt %d", ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref)); phba->fc_stat.elsXmitACC++; @@ -6414,7 +6334,7 @@ lpfc_els_clear_rrq(struct lpfc_vport *vport, get_job_ulpcontext(phba, iocb)); lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, - "Clear RRQ: did:x%x flg:x%x exchg:x%.08x", + "Clear RRQ: did:x%x flg:x%lx exchg:x%.08x", ndlp->nlp_DID, ndlp->nlp_flag, rrq->rrq_exchg); if (vport->fc_myDID == be32_to_cpu(bf_get(rrq_did, rrq))) xri = bf_get(rrq_oxid, rrq); @@ -6491,7 +6411,7 @@ lpfc_els_rsp_echo_acc(struct lpfc_vport *vport, uint8_t *data, memcpy(pcmd, data, cmdsize - sizeof(uint32_t)); lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, - "Issue ACC ECHO: did:x%x flg:x%x refcnt %d", + "Issue ACC ECHO: did:x%x flg:x%lx refcnt %d", ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref)); phba->fc_stat.elsXmitACC++; @@ -6541,14 +6461,12 @@ lpfc_els_disc_adisc(struct lpfc_vport *vport) list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { if (ndlp->nlp_state != NLP_STE_NPR_NODE || - !(ndlp->nlp_flag & NLP_NPR_ADISC)) + !test_bit(NLP_NPR_ADISC, &ndlp->nlp_flag)) continue; - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag &= ~NLP_NPR_ADISC; - spin_unlock_irq(&ndlp->lock); + clear_bit(NLP_NPR_ADISC, &ndlp->nlp_flag); - if (!(ndlp->nlp_flag & NLP_NPR_2B_DISC)) { + if (!test_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag)) { /* This node was marked for ADISC but was not picked * for discovery. This is possible if the node was * missing in gidft response. @@ -6606,9 +6524,9 @@ lpfc_els_disc_plogi(struct lpfc_vport *vport) /* go thru NPR nodes and issue any remaining ELS PLOGIs */ list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { if (ndlp->nlp_state == NLP_STE_NPR_NODE && - (ndlp->nlp_flag & NLP_NPR_2B_DISC) != 0 && - (ndlp->nlp_flag & NLP_DELAY_TMO) == 0 && - (ndlp->nlp_flag & NLP_NPR_ADISC) == 0) { + test_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag) && + !test_bit(NLP_DELAY_TMO, &ndlp->nlp_flag) && + !test_bit(NLP_NPR_ADISC, &ndlp->nlp_flag)) { ndlp->nlp_prev_state = ndlp->nlp_state; lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); @@ -7104,7 +7022,7 @@ lpfc_els_rdp_cmpl(struct lpfc_hba *phba, struct lpfc_rdp_context *rdp_context, lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "2171 Xmit RDP response tag x%x xri x%x, " - "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x", + "did x%x, nlp_flag x%lx, nlp_state x%x, rpi x%x", elsiocb->iotag, ulp_context, ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi); @@ -8078,7 +7996,7 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, */ if (vport->port_state <= LPFC_NS_QRY) { lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, - "RCV RSCN ignore: did:x%x/ste:x%x flg:x%x", + "RCV RSCN ignore: did:x%x/ste:x%x flg:x%lx", ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag); lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); @@ -8108,7 +8026,7 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, vport->fc_flag, payload_len, *lp, vport->fc_rscn_id_cnt); lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, - "RCV RSCN vport: did:x%x/ste:x%x flg:x%x", + "RCV RSCN vport: did:x%x/ste:x%x flg:x%lx", ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag); @@ -8145,7 +8063,7 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, if (test_bit(FC_RSCN_MODE, &vport->fc_flag) || test_bit(FC_NDISC_ACTIVE, &vport->fc_flag)) { lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, - "RCV RSCN defer: did:x%x/ste:x%x flg:x%x", + "RCV RSCN defer: did:x%x/ste:x%x flg:x%lx", ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag); set_bit(FC_RSCN_DEFERRED, &vport->fc_flag); @@ -8201,7 +8119,7 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, return 0; } lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, - "RCV RSCN: did:x%x/ste:x%x flg:x%x", + "RCV RSCN: did:x%x/ste:x%x flg:x%lx", ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag); set_bit(FC_RSCN_MODE, &vport->fc_flag); @@ -8707,7 +8625,7 @@ lpfc_els_rsp_rls_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) /* Xmit ELS RLS ACC response tag */ lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_ELS, "2874 Xmit ELS RLS ACC response tag x%x xri x%x, " - "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n", + "did x%x, nlp_flag x%lx, nlp_state x%x, rpi x%x\n", elsiocb->iotag, ulp_context, ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi); @@ -8869,7 +8787,7 @@ lpfc_els_rcv_rtv(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, /* Xmit ELS RLS ACC response tag */ lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_ELS, "2875 Xmit ELS RTV ACC response tag x%x xri x%x, " - "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x, " + "did x%x, nlp_flag x%lx, nlp_state x%x, rpi x%x, " "Data: x%x x%x x%x\n", elsiocb->iotag, ulp_context, ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, @@ -9066,7 +8984,7 @@ lpfc_els_rsp_rpl_acc(struct lpfc_vport *vport, uint16_t cmdsize, /* Xmit ELS RPL ACC response tag */ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0120 Xmit ELS RPL ACC response tag x%x " - "xri x%x, did x%x, nlp_flag x%x, nlp_state x%x, " + "xri x%x, did x%x, nlp_flag x%lx, nlp_state x%x, " "rpi x%x\n", elsiocb->iotag, ulp_context, ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, @@ -10411,14 +10329,11 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, * Do not process any unsolicited ELS commands * if the ndlp is in DEV_LOSS */ - spin_lock_irq(&ndlp->lock); - if (ndlp->nlp_flag & NLP_IN_DEV_LOSS) { - spin_unlock_irq(&ndlp->lock); + if (test_bit(NLP_IN_DEV_LOSS, &ndlp->nlp_flag)) { if (newnode) lpfc_nlp_put(ndlp); goto dropit; } - spin_unlock_irq(&ndlp->lock); elsiocb->ndlp = lpfc_nlp_get(ndlp); if (!elsiocb->ndlp) @@ -10447,7 +10362,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, switch (cmd) { case ELS_CMD_PLOGI: lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, - "RCV PLOGI: did:x%x/ste:x%x flg:x%x", + "RCV PLOGI: did:x%x/ste:x%x flg:x%lx", did, vport->port_state, ndlp->nlp_flag); phba->fc_stat.elsRcvPLOGI++; @@ -10486,9 +10401,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, } } - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag &= ~NLP_TARGET_REMOVE; - spin_unlock_irq(&ndlp->lock); + clear_bit(NLP_TARGET_REMOVE, &ndlp->nlp_flag); lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_PLOGI); @@ -10496,7 +10409,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, break; case ELS_CMD_FLOGI: lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, - "RCV FLOGI: did:x%x/ste:x%x flg:x%x", + "RCV FLOGI: did:x%x/ste:x%x flg:x%lx", did, vport->port_state, ndlp->nlp_flag); phba->fc_stat.elsRcvFLOGI++; @@ -10523,7 +10436,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, break; case ELS_CMD_LOGO: lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, - "RCV LOGO: did:x%x/ste:x%x flg:x%x", + "RCV LOGO: did:x%x/ste:x%x flg:x%lx", did, vport->port_state, ndlp->nlp_flag); phba->fc_stat.elsRcvLOGO++; @@ -10540,7 +10453,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, break; case ELS_CMD_PRLO: lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, - "RCV PRLO: did:x%x/ste:x%x flg:x%x", + "RCV PRLO: did:x%x/ste:x%x flg:x%lx", did, vport->port_state, ndlp->nlp_flag); phba->fc_stat.elsRcvPRLO++; @@ -10569,7 +10482,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, break; case ELS_CMD_ADISC: lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, - "RCV ADISC: did:x%x/ste:x%x flg:x%x", + "RCV ADISC: did:x%x/ste:x%x flg:x%lx", did, vport->port_state, ndlp->nlp_flag); lpfc_send_els_event(vport, ndlp, payload); @@ -10584,7 +10497,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, break; case ELS_CMD_PDISC: lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, - "RCV PDISC: did:x%x/ste:x%x flg:x%x", + "RCV PDISC: did:x%x/ste:x%x flg:x%lx", did, vport->port_state, ndlp->nlp_flag); phba->fc_stat.elsRcvPDISC++; @@ -10598,7 +10511,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, break; case ELS_CMD_FARPR: lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, - "RCV FARPR: did:x%x/ste:x%x flg:x%x", + "RCV FARPR: did:x%x/ste:x%x flg:x%lx", did, vport->port_state, ndlp->nlp_flag); phba->fc_stat.elsRcvFARPR++; @@ -10606,7 +10519,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, break; case ELS_CMD_FARP: lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, - "RCV FARP: did:x%x/ste:x%x flg:x%x", + "RCV FARP: did:x%x/ste:x%x flg:x%lx", did, vport->port_state, ndlp->nlp_flag); phba->fc_stat.elsRcvFARP++; @@ -10614,7 +10527,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, break; case ELS_CMD_FAN: lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, - "RCV FAN: did:x%x/ste:x%x flg:x%x", + "RCV FAN: did:x%x/ste:x%x flg:x%lx", did, vport->port_state, ndlp->nlp_flag); phba->fc_stat.elsRcvFAN++; @@ -10623,7 +10536,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, case ELS_CMD_PRLI: case ELS_CMD_NVMEPRLI: lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, - "RCV PRLI: did:x%x/ste:x%x flg:x%x", + "RCV PRLI: did:x%x/ste:x%x flg:x%lx", did, vport->port_state, ndlp->nlp_flag); phba->fc_stat.elsRcvPRLI++; @@ -10637,7 +10550,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, break; case ELS_CMD_LIRR: lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, - "RCV LIRR: did:x%x/ste:x%x flg:x%x", + "RCV LIRR: did:x%x/ste:x%x flg:x%lx", did, vport->port_state, ndlp->nlp_flag); phba->fc_stat.elsRcvLIRR++; @@ -10648,7 +10561,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, break; case ELS_CMD_RLS: lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, - "RCV RLS: did:x%x/ste:x%x flg:x%x", + "RCV RLS: did:x%x/ste:x%x flg:x%lx", did, vport->port_state, ndlp->nlp_flag); phba->fc_stat.elsRcvRLS++; @@ -10659,7 +10572,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, break; case ELS_CMD_RPL: lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, - "RCV RPL: did:x%x/ste:x%x flg:x%x", + "RCV RPL: did:x%x/ste:x%x flg:x%lx", did, vport->port_state, ndlp->nlp_flag); phba->fc_stat.elsRcvRPL++; @@ -10670,7 +10583,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, break; case ELS_CMD_RNID: lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, - "RCV RNID: did:x%x/ste:x%x flg:x%x", + "RCV RNID: did:x%x/ste:x%x flg:x%lx", did, vport->port_state, ndlp->nlp_flag); phba->fc_stat.elsRcvRNID++; @@ -10681,7 +10594,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, break; case ELS_CMD_RTV: lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, - "RCV RTV: did:x%x/ste:x%x flg:x%x", + "RCV RTV: did:x%x/ste:x%x flg:x%lx", did, vport->port_state, ndlp->nlp_flag); phba->fc_stat.elsRcvRTV++; lpfc_els_rcv_rtv(vport, elsiocb, ndlp); @@ -10691,7 +10604,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, break; case ELS_CMD_RRQ: lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, - "RCV RRQ: did:x%x/ste:x%x flg:x%x", + "RCV RRQ: did:x%x/ste:x%x flg:x%lx", did, vport->port_state, ndlp->nlp_flag); phba->fc_stat.elsRcvRRQ++; @@ -10702,7 +10615,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, break; case ELS_CMD_ECHO: lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, - "RCV ECHO: did:x%x/ste:x%x flg:x%x", + "RCV ECHO: did:x%x/ste:x%x flg:x%lx", did, vport->port_state, ndlp->nlp_flag); phba->fc_stat.elsRcvECHO++; @@ -10718,7 +10631,8 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, break; case ELS_CMD_FPIN: lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, - "RCV FPIN: did:x%x/ste:x%x flg:x%x", + "RCV FPIN: did:x%x/ste:x%x " + "flg:x%lx", did, vport->port_state, ndlp->nlp_flag); lpfc_els_rcv_fpin(vport, (struct fc_els_fpin *)payload, @@ -11226,9 +11140,7 @@ lpfc_retry_pport_discovery(struct lpfc_hba *phba) return; mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(1000)); - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_DELAY_TMO; - spin_unlock_irq(&ndlp->lock); + set_bit(NLP_DELAY_TMO, &ndlp->nlp_flag); ndlp->nlp_last_elscmd = ELS_CMD_FLOGI; phba->pport->port_state = LPFC_FLOGI; return; @@ -11359,11 +11271,9 @@ lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, list_for_each_entry_safe(np, next_np, &vport->fc_nodes, nlp_listp) { if ((np->nlp_state != NLP_STE_NPR_NODE) || - !(np->nlp_flag & NLP_NPR_ADISC)) + !test_bit(NLP_NPR_ADISC, &np->nlp_flag)) continue; - spin_lock_irq(&ndlp->lock); - np->nlp_flag &= ~NLP_NPR_ADISC; - spin_unlock_irq(&ndlp->lock); + clear_bit(NLP_NPR_ADISC, &np->nlp_flag); lpfc_unreg_rpi(vport, np); } lpfc_cleanup_pending_mbox(vport); @@ -11566,7 +11476,7 @@ lpfc_cmpl_els_npiv_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, /* NPIV LOGO completes to NPort */ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "2928 NPIV LOGO completes to NPort x%x " - "Data: x%x x%x x%x x%x x%x x%x x%x\n", + "Data: x%x x%x x%x x%x x%x x%lx x%x\n", ndlp->nlp_DID, ulp_status, ulp_word4, tmo, vport->num_disc_nodes, kref_read(&ndlp->kref), ndlp->nlp_flag, @@ -11582,8 +11492,9 @@ lpfc_cmpl_els_npiv_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, /* Wake up lpfc_vport_delete if waiting...*/ if (ndlp->logo_waitq) wake_up(ndlp->logo_waitq); + clear_bit(NLP_ISSUE_LOGO, &ndlp->nlp_flag); + clear_bit(NLP_LOGO_SND, &ndlp->nlp_flag); spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag &= ~(NLP_ISSUE_LOGO | NLP_LOGO_SND); ndlp->save_flags &= ~NLP_WAIT_FOR_LOGO; spin_unlock_irq(&ndlp->lock); } @@ -11633,13 +11544,11 @@ lpfc_issue_els_npiv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) memcpy(pcmd, &vport->fc_portname, sizeof(struct lpfc_name)); lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, - "Issue LOGO npiv did:x%x flg:x%x", + "Issue LOGO npiv did:x%x flg:x%lx", ndlp->nlp_DID, ndlp->nlp_flag, 0); elsiocb->cmd_cmpl = lpfc_cmpl_els_npiv_logo; - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_LOGO_SND; - spin_unlock_irq(&ndlp->lock); + set_bit(NLP_LOGO_SND, &ndlp->nlp_flag); elsiocb->ndlp = lpfc_nlp_get(ndlp); if (!elsiocb->ndlp) { lpfc_els_free_iocb(phba, elsiocb); @@ -11655,9 +11564,7 @@ lpfc_issue_els_npiv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) return 0; err: - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag &= ~NLP_LOGO_SND; - spin_unlock_irq(&ndlp->lock); + clear_bit(NLP_LOGO_SND, &ndlp->nlp_flag); return 1; } @@ -12138,7 +12045,7 @@ lpfc_sli_abts_recover_port(struct lpfc_vport *vport, lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "3094 Start rport recovery on shost id 0x%x " "fc_id 0x%06x vpi 0x%x rpi 0x%x state 0x%x " - "flags 0x%x\n", + "flag 0x%lx\n", shost->host_no, ndlp->nlp_DID, vport->vpi, ndlp->nlp_rpi, ndlp->nlp_state, ndlp->nlp_flag); @@ -12148,8 +12055,8 @@ lpfc_sli_abts_recover_port(struct lpfc_vport *vport, */ spin_lock_irqsave(&ndlp->lock, flags); ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE; - ndlp->nlp_flag |= NLP_ISSUE_LOGO; spin_unlock_irqrestore(&ndlp->lock, flags); + set_bit(NLP_ISSUE_LOGO, &ndlp->nlp_flag); lpfc_unreg_rpi(vport, ndlp); } diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c index f2e4237ff3d99..b5dd17eecf82d 100644 --- a/drivers/scsi/lpfc/lpfc_hbadisc.c +++ b/drivers/scsi/lpfc/lpfc_hbadisc.c @@ -137,7 +137,7 @@ lpfc_terminate_rport_io(struct fc_rport *rport) ndlp = rdata->pnode; vport = ndlp->vport; lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT, - "rport terminate: sid:x%x did:x%x flg:x%x", + "rport terminate: sid:x%x did:x%x flg:x%lx", ndlp->nlp_sid, ndlp->nlp_DID, ndlp->nlp_flag); if (ndlp->nlp_sid != NLP_NO_SID) @@ -155,7 +155,7 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport) struct lpfc_hba *phba; struct lpfc_work_evt *evtp; unsigned long iflags; - bool nvme_reg = false; + bool drop_initial_node_ref = false; ndlp = ((struct lpfc_rport_data *)rport->dd_data)->pnode; if (!ndlp) @@ -165,11 +165,11 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport) phba = vport->phba; lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT, - "rport devlosscb: sid:x%x did:x%x flg:x%x", + "rport devlosscb: sid:x%x did:x%x flg:x%lx", ndlp->nlp_sid, ndlp->nlp_DID, ndlp->nlp_flag); lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE, - "3181 dev_loss_callbk x%06x, rport x%px flg x%x " + "3181 dev_loss_callbk x%06x, rport x%px flg x%lx " "load_flag x%lx refcnt %u state %d xpt x%x\n", ndlp->nlp_DID, ndlp->rport, ndlp->nlp_flag, vport->load_flag, kref_read(&ndlp->kref), @@ -182,8 +182,13 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport) spin_lock_irqsave(&ndlp->lock, iflags); ndlp->rport = NULL; - if (ndlp->fc4_xpt_flags & NVME_XPT_REGD) - nvme_reg = true; + /* Only 1 thread can drop the initial node reference. + * If not registered for NVME and NLP_DROPPED flag is + * clear, remove the initial reference. + */ + if (!(ndlp->fc4_xpt_flags & NVME_XPT_REGD)) + if (!test_and_set_bit(NLP_DROPPED, &ndlp->nlp_flag)) + drop_initial_node_ref = true; /* The scsi_transport is done with the rport so lpfc cannot * call to unregister. @@ -194,13 +199,16 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport) /* If NLP_XPT_REGD was cleared in lpfc_nlp_unreg_node, * unregister calls were made to the scsi and nvme * transports and refcnt was already decremented. Clear - * the NLP_XPT_REGD flag only if the NVME Rport is + * the NLP_XPT_REGD flag only if the NVME nrport is * confirmed unregistered. */ - if (!nvme_reg && ndlp->fc4_xpt_flags & NLP_XPT_REGD) { - ndlp->fc4_xpt_flags &= ~NLP_XPT_REGD; + if (ndlp->fc4_xpt_flags & NLP_XPT_REGD) { + if (!(ndlp->fc4_xpt_flags & NVME_XPT_REGD)) + ndlp->fc4_xpt_flags &= ~NLP_XPT_REGD; spin_unlock_irqrestore(&ndlp->lock, iflags); - lpfc_nlp_put(ndlp); /* may free ndlp */ + + /* Release scsi transport reference */ + lpfc_nlp_put(ndlp); } else { spin_unlock_irqrestore(&ndlp->lock, iflags); } @@ -208,19 +216,8 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport) spin_unlock_irqrestore(&ndlp->lock, iflags); } - spin_lock_irqsave(&ndlp->lock, iflags); - - /* Only 1 thread can drop the initial node reference. If - * another thread has set NLP_DROPPED, this thread is done. - */ - if (nvme_reg || (ndlp->nlp_flag & NLP_DROPPED)) { - spin_unlock_irqrestore(&ndlp->lock, iflags); - return; - } - - ndlp->nlp_flag |= NLP_DROPPED; - spin_unlock_irqrestore(&ndlp->lock, iflags); - lpfc_nlp_put(ndlp); + if (drop_initial_node_ref) + lpfc_nlp_put(ndlp); return; } @@ -253,14 +250,14 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport) return; } - spin_lock_irqsave(&ndlp->lock, iflags); - ndlp->nlp_flag |= NLP_IN_DEV_LOSS; + set_bit(NLP_IN_DEV_LOSS, &ndlp->nlp_flag); + spin_lock_irqsave(&ndlp->lock, iflags); /* If there is a PLOGI in progress, and we are in a * NLP_NPR_2B_DISC state, don't turn off the flag. */ if (ndlp->nlp_state != NLP_STE_PLOGI_ISSUE) - ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; + clear_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag); /* * The backend does not expect any more calls associated with this @@ -289,15 +286,13 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport) } else { lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE, "3188 worker thread is stopped %s x%06x, " - " rport x%px flg x%x load_flag x%lx refcnt " + " rport x%px flg x%lx load_flag x%lx refcnt " "%d\n", __func__, ndlp->nlp_DID, ndlp->rport, ndlp->nlp_flag, vport->load_flag, kref_read(&ndlp->kref)); if (!(ndlp->fc4_xpt_flags & NVME_XPT_REGD)) { - spin_lock_irqsave(&ndlp->lock, iflags); /* Node is in dev loss. No further transaction. */ - ndlp->nlp_flag &= ~NLP_IN_DEV_LOSS; - spin_unlock_irqrestore(&ndlp->lock, iflags); + clear_bit(NLP_IN_DEV_LOSS, &ndlp->nlp_flag); lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM); } @@ -430,7 +425,7 @@ lpfc_check_nlp_post_devloss(struct lpfc_vport *vport, lpfc_nlp_get(ndlp); lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY | LOG_NODE, "8438 Devloss timeout reversed on DID x%x " - "refcnt %d ndlp %p flag x%x " + "refcnt %d ndlp %p flag x%lx " "port_state = x%x\n", ndlp->nlp_DID, kref_read(&ndlp->kref), ndlp, ndlp->nlp_flag, vport->port_state); @@ -473,7 +468,7 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp) ndlp->nlp_DID, ndlp->nlp_type, ndlp->nlp_sid); lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE, - "3182 %s x%06x, nflag x%x xflags x%x refcnt %d\n", + "3182 %s x%06x, nflag x%lx xflags x%x refcnt %d\n", __func__, ndlp->nlp_DID, ndlp->nlp_flag, ndlp->fc4_xpt_flags, kref_read(&ndlp->kref)); @@ -487,9 +482,7 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp) *(name+4), *(name+5), *(name+6), *(name+7), ndlp->nlp_DID); - spin_lock_irqsave(&ndlp->lock, iflags); - ndlp->nlp_flag &= ~NLP_IN_DEV_LOSS; - spin_unlock_irqrestore(&ndlp->lock, iflags); + clear_bit(NLP_IN_DEV_LOSS, &ndlp->nlp_flag); return fcf_inuse; } @@ -517,7 +510,7 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp) } break; case Fabric_Cntl_DID: - if (ndlp->nlp_flag & NLP_REG_LOGIN_SEND) + if (test_bit(NLP_REG_LOGIN_SEND, &ndlp->nlp_flag)) recovering = true; break; case FDMI_DID: @@ -545,15 +538,13 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp) * the following lpfc_nlp_put is necessary after fabric node is * recovered. */ - spin_lock_irqsave(&ndlp->lock, iflags); - ndlp->nlp_flag &= ~NLP_IN_DEV_LOSS; - spin_unlock_irqrestore(&ndlp->lock, iflags); + clear_bit(NLP_IN_DEV_LOSS, &ndlp->nlp_flag); if (recovering) { lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY | LOG_NODE, "8436 Devloss timeout marked on " "DID x%x refcnt %d ndlp %p " - "flag x%x port_state = x%x\n", + "flag x%lx port_state = x%x\n", ndlp->nlp_DID, kref_read(&ndlp->kref), ndlp, ndlp->nlp_flag, vport->port_state); @@ -570,7 +561,7 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp) LOG_DISCOVERY | LOG_NODE, "8437 Devloss timeout ignored on " "DID x%x refcnt %d ndlp %p " - "flag x%x port_state = x%x\n", + "flag x%lx port_state = x%x\n", ndlp->nlp_DID, kref_read(&ndlp->kref), ndlp, ndlp->nlp_flag, vport->port_state); @@ -590,7 +581,7 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp) lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0203 Devloss timeout on " "WWPN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x " - "NPort x%06x Data: x%x x%x x%x refcnt %d\n", + "NPort x%06x Data: x%lx x%x x%x refcnt %d\n", *name, *(name+1), *(name+2), *(name+3), *(name+4), *(name+5), *(name+6), *(name+7), ndlp->nlp_DID, ndlp->nlp_flag, @@ -600,15 +591,13 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp) lpfc_printf_vlog(vport, KERN_INFO, LOG_TRACE_EVENT, "0204 Devloss timeout on " "WWPN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x " - "NPort x%06x Data: x%x x%x x%x\n", + "NPort x%06x Data: x%lx x%x x%x\n", *name, *(name+1), *(name+2), *(name+3), *(name+4), *(name+5), *(name+6), *(name+7), ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi); } - spin_lock_irqsave(&ndlp->lock, iflags); - ndlp->nlp_flag &= ~NLP_IN_DEV_LOSS; - spin_unlock_irqrestore(&ndlp->lock, iflags); + clear_bit(NLP_IN_DEV_LOSS, &ndlp->nlp_flag); /* If we are devloss, but we are in the process of rediscovering the * ndlp, don't issue a NLP_EVT_DEVICE_RM event. @@ -1373,7 +1362,7 @@ lpfc_linkup_cleanup_nodes(struct lpfc_vport *vport) if (ndlp->nlp_DID != Fabric_DID) lpfc_unreg_rpi(vport, ndlp); lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); - } else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) { + } else if (!test_bit(NLP_NPR_ADISC, &ndlp->nlp_flag)) { /* Fail outstanding IO now since device is * marked for PLOGI. */ @@ -3882,14 +3871,13 @@ lpfc_mbx_cmpl_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) pmb->ctx_ndlp = NULL; lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI | LOG_NODE | LOG_DISCOVERY, - "0002 rpi:%x DID:%x flg:%x %d x%px\n", + "0002 rpi:%x DID:%x flg:%lx %d x%px\n", ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref), ndlp); - if (ndlp->nlp_flag & NLP_REG_LOGIN_SEND) - ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND; + clear_bit(NLP_REG_LOGIN_SEND, &ndlp->nlp_flag); - if (ndlp->nlp_flag & NLP_IGNR_REG_CMPL || + if (test_bit(NLP_IGNR_REG_CMPL, &ndlp->nlp_flag) || ndlp->nlp_state != NLP_STE_REG_LOGIN_ISSUE) { /* We rcvd a rscn after issuing this * mbox reg login, we may have cycled @@ -3899,16 +3887,14 @@ lpfc_mbx_cmpl_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) * there is another reg login in * process. */ - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL; - spin_unlock_irq(&ndlp->lock); + clear_bit(NLP_IGNR_REG_CMPL, &ndlp->nlp_flag); /* * We cannot leave the RPI registered because * if we go thru discovery again for this ndlp * a subsequent REG_RPI will fail. */ - ndlp->nlp_flag |= NLP_RPI_REGISTERED; + set_bit(NLP_RPI_REGISTERED, &ndlp->nlp_flag); lpfc_unreg_rpi(vport, ndlp); } @@ -4221,7 +4207,7 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) if (phba->sli_rev < LPFC_SLI_REV4) ndlp->nlp_rpi = mb->un.varWords[0]; - ndlp->nlp_flag |= NLP_RPI_REGISTERED; + set_bit(NLP_RPI_REGISTERED, &ndlp->nlp_flag); ndlp->nlp_type |= NLP_FABRIC; lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); @@ -4352,9 +4338,7 @@ lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) * reference. */ if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD))) { - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; - spin_unlock_irq(&ndlp->lock); + clear_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag); lpfc_nlp_put(ndlp); } @@ -4375,11 +4359,11 @@ lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) if (phba->sli_rev < LPFC_SLI_REV4) ndlp->nlp_rpi = mb->un.varWords[0]; - ndlp->nlp_flag |= NLP_RPI_REGISTERED; + set_bit(NLP_RPI_REGISTERED, &ndlp->nlp_flag); ndlp->nlp_type |= NLP_FABRIC; lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_DISCOVERY, - "0003 rpi:%x DID:%x flg:%x %d x%px\n", + "0003 rpi:%x DID:%x flg:%lx %d x%px\n", ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref), ndlp); @@ -4471,8 +4455,8 @@ lpfc_mbx_cmpl_fc_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) __func__, ndlp->nlp_DID, ndlp->nlp_rpi, ndlp->nlp_state); - ndlp->nlp_flag |= NLP_RPI_REGISTERED; - ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND; + set_bit(NLP_RPI_REGISTERED, &ndlp->nlp_flag); + clear_bit(NLP_REG_LOGIN_SEND, &ndlp->nlp_flag); ndlp->nlp_type |= NLP_FABRIC; lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); @@ -4506,7 +4490,7 @@ lpfc_register_remote_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT, - "rport add: did:x%x flg:x%x type x%x", + "rport add: did:x%x flg:x%lx type x%x", ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type); /* Don't add the remote port if unloading. */ @@ -4574,7 +4558,7 @@ lpfc_unregister_remote_port(struct lpfc_nodelist *ndlp) return; lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT, - "rport delete: did:x%x flg:x%x type x%x", + "rport delete: did:x%x flg:x%lx type x%x", ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type); lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, @@ -4690,7 +4674,7 @@ lpfc_nlp_unreg_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_NODE | LOG_DISCOVERY, "0999 %s Not regd: ndlp x%px rport x%px DID " - "x%x FLG x%x XPT x%x\n", + "x%x FLG x%lx XPT x%x\n", __func__, ndlp, ndlp->rport, ndlp->nlp_DID, ndlp->nlp_flag, ndlp->fc4_xpt_flags); return; @@ -4706,7 +4690,7 @@ lpfc_nlp_unreg_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) } else if (!ndlp->rport) { lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_NODE | LOG_DISCOVERY, - "1999 %s NDLP in devloss x%px DID x%x FLG x%x" + "1999 %s NDLP in devloss x%px DID x%x FLG x%lx" " XPT x%x refcnt %u\n", __func__, ndlp, ndlp->nlp_DID, ndlp->nlp_flag, ndlp->fc4_xpt_flags, @@ -4751,7 +4735,7 @@ lpfc_handle_adisc_state(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, ndlp->nlp_type |= NLP_FC_NODE; fallthrough; case NLP_STE_MAPPED_NODE: - ndlp->nlp_flag &= ~NLP_NODEV_REMOVE; + clear_bit(NLP_NODEV_REMOVE, &ndlp->nlp_flag); lpfc_nlp_reg_node(vport, ndlp); break; @@ -4762,7 +4746,7 @@ lpfc_handle_adisc_state(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, * backend, attempt it now */ case NLP_STE_NPR_NODE: - ndlp->nlp_flag &= ~NLP_RCV_PLOGI; + clear_bit(NLP_RCV_PLOGI, &ndlp->nlp_flag); fallthrough; default: lpfc_nlp_unreg_node(vport, ndlp); @@ -4783,13 +4767,13 @@ lpfc_nlp_state_cleanup(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, } if (new_state == NLP_STE_UNMAPPED_NODE) { - ndlp->nlp_flag &= ~NLP_NODEV_REMOVE; + clear_bit(NLP_NODEV_REMOVE, &ndlp->nlp_flag); ndlp->nlp_type |= NLP_FC_NODE; } if (new_state == NLP_STE_MAPPED_NODE) - ndlp->nlp_flag &= ~NLP_NODEV_REMOVE; + clear_bit(NLP_NODEV_REMOVE, &ndlp->nlp_flag); if (new_state == NLP_STE_NPR_NODE) - ndlp->nlp_flag &= ~NLP_RCV_PLOGI; + clear_bit(NLP_RCV_PLOGI, &ndlp->nlp_flag); /* Reg/Unreg for FCP and NVME Transport interface */ if ((old_state == NLP_STE_MAPPED_NODE || @@ -4797,7 +4781,7 @@ lpfc_nlp_state_cleanup(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, /* For nodes marked for ADISC, Handle unreg in ADISC cmpl * if linkup. In linkdown do unreg_node */ - if (!(ndlp->nlp_flag & NLP_NPR_ADISC) || + if (!test_bit(NLP_NPR_ADISC, &ndlp->nlp_flag) || !lpfc_is_link_up(vport->phba)) lpfc_nlp_unreg_node(vport, ndlp); } @@ -4817,9 +4801,7 @@ lpfc_nlp_state_cleanup(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, (!ndlp->rport || ndlp->rport->scsi_target_id == -1 || ndlp->rport->scsi_target_id >= LPFC_MAX_TARGET)) { - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_TGT_NO_SCSIID; - spin_unlock_irq(&ndlp->lock); + set_bit(NLP_TGT_NO_SCSIID, &ndlp->nlp_flag); lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); } } @@ -4851,7 +4833,7 @@ lpfc_nlp_set_state(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, int state) { int old_state = ndlp->nlp_state; - int node_dropped = ndlp->nlp_flag & NLP_DROPPED; + bool node_dropped = test_bit(NLP_DROPPED, &ndlp->nlp_flag); char name1[16], name2[16]; unsigned long iflags; @@ -4867,7 +4849,7 @@ lpfc_nlp_set_state(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, if (node_dropped && old_state == NLP_STE_UNUSED_NODE && state != NLP_STE_UNUSED_NODE) { - ndlp->nlp_flag &= ~NLP_DROPPED; + clear_bit(NLP_DROPPED, &ndlp->nlp_flag); lpfc_nlp_get(ndlp); } @@ -4875,7 +4857,7 @@ lpfc_nlp_set_state(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, state != NLP_STE_NPR_NODE) lpfc_cancel_retry_delay_tmo(vport, ndlp); if (old_state == NLP_STE_UNMAPPED_NODE) { - ndlp->nlp_flag &= ~NLP_TGT_NO_SCSIID; + clear_bit(NLP_TGT_NO_SCSIID, &ndlp->nlp_flag); ndlp->nlp_type &= ~NLP_FC_NODE; } @@ -4972,14 +4954,8 @@ lpfc_drop_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) * reference from lpfc_nlp_init. If set, don't drop it again and * introduce an imbalance. */ - spin_lock_irq(&ndlp->lock); - if (!(ndlp->nlp_flag & NLP_DROPPED)) { - ndlp->nlp_flag |= NLP_DROPPED; - spin_unlock_irq(&ndlp->lock); + if (!test_and_set_bit(NLP_DROPPED, &ndlp->nlp_flag)) lpfc_nlp_put(ndlp); - return; - } - spin_unlock_irq(&ndlp->lock); } /* @@ -5082,7 +5058,7 @@ lpfc_check_sli_ndlp(struct lpfc_hba *phba, case CMD_GEN_REQUEST64_CR: if (iocb->ndlp == ndlp) return 1; - fallthrough; + break; case CMD_ELS_REQUEST64_CR: if (remote_id == ndlp->nlp_DID) return 1; @@ -5094,9 +5070,9 @@ lpfc_check_sli_ndlp(struct lpfc_hba *phba, } else if (pring->ringno == LPFC_FCP_RING) { /* Skip match check if waiting to relogin to FCP target */ if ((ndlp->nlp_type & NLP_FCP_TARGET) && - (ndlp->nlp_flag & NLP_DELAY_TMO)) { + test_bit(NLP_DELAY_TMO, &ndlp->nlp_flag)) return 0; - } + if (ulp_context == ndlp->nlp_rpi) return 1; } @@ -5166,7 +5142,7 @@ lpfc_no_rpi(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) * Everything that matches on txcmplq will be returned * by firmware with a no rpi error. */ - if (ndlp->nlp_flag & NLP_RPI_REGISTERED) { + if (test_bit(NLP_RPI_REGISTERED, &ndlp->nlp_flag)) { if (phba->sli_rev != LPFC_SLI_REV4) lpfc_sli3_dequeue_nport_iocbs(phba, ndlp, &completions); else @@ -5200,29 +5176,19 @@ lpfc_nlp_logo_unreg(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) lpfc_issue_els_logo(vport, ndlp, 0); /* Check to see if there are any deferred events to process */ - if ((ndlp->nlp_flag & NLP_UNREG_INP) && - (ndlp->nlp_defer_did != NLP_EVT_NOTHING_PENDING)) { + if (test_bit(NLP_UNREG_INP, &ndlp->nlp_flag) && + ndlp->nlp_defer_did != NLP_EVT_NOTHING_PENDING) { lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "1434 UNREG cmpl deferred logo x%x " "on NPort x%x Data: x%x x%px\n", ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_defer_did, ndlp); - ndlp->nlp_flag &= ~NLP_UNREG_INP; + clear_bit(NLP_UNREG_INP, &ndlp->nlp_flag); ndlp->nlp_defer_did = NLP_EVT_NOTHING_PENDING; lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); } else { - /* NLP_RELEASE_RPI is only set for SLI4 ports. */ - if (ndlp->nlp_flag & NLP_RELEASE_RPI) { - lpfc_sli4_free_rpi(vport->phba, ndlp->nlp_rpi); - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag &= ~NLP_RELEASE_RPI; - ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR; - spin_unlock_irq(&ndlp->lock); - } - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag &= ~NLP_UNREG_INP; - spin_unlock_irq(&ndlp->lock); + clear_bit(NLP_UNREG_INP, &ndlp->nlp_flag); } /* The node has an outstanding reference for the unreg. Now @@ -5242,8 +5208,6 @@ static void lpfc_set_unreg_login_mbx_cmpl(struct lpfc_hba *phba, struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, LPFC_MBOXQ_t *mbox) { - unsigned long iflags; - /* Driver always gets a reference on the mailbox job * in support of async jobs. */ @@ -5251,9 +5215,8 @@ lpfc_set_unreg_login_mbx_cmpl(struct lpfc_hba *phba, struct lpfc_vport *vport, if (!mbox->ctx_ndlp) return; - if (ndlp->nlp_flag & NLP_ISSUE_LOGO) { + if (test_bit(NLP_ISSUE_LOGO, &ndlp->nlp_flag)) { mbox->mbox_cmpl = lpfc_nlp_logo_unreg; - } else if (phba->sli_rev == LPFC_SLI_REV4 && !test_bit(FC_UNLOADING, &vport->load_flag) && (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >= @@ -5261,13 +5224,6 @@ lpfc_set_unreg_login_mbx_cmpl(struct lpfc_hba *phba, struct lpfc_vport *vport, (kref_read(&ndlp->kref) > 0)) { mbox->mbox_cmpl = lpfc_sli4_unreg_rpi_cmpl_clr; } else { - if (test_bit(FC_UNLOADING, &vport->load_flag)) { - if (phba->sli_rev == LPFC_SLI_REV4) { - spin_lock_irqsave(&ndlp->lock, iflags); - ndlp->nlp_flag |= NLP_RELEASE_RPI; - spin_unlock_irqrestore(&ndlp->lock, iflags); - } - } mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; } } @@ -5289,13 +5245,13 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) int rc, acc_plogi = 1; uint16_t rpi; - if (ndlp->nlp_flag & NLP_RPI_REGISTERED || - ndlp->nlp_flag & NLP_REG_LOGIN_SEND) { - if (ndlp->nlp_flag & NLP_REG_LOGIN_SEND) + if (test_bit(NLP_RPI_REGISTERED, &ndlp->nlp_flag) || + test_bit(NLP_REG_LOGIN_SEND, &ndlp->nlp_flag)) { + if (test_bit(NLP_REG_LOGIN_SEND, &ndlp->nlp_flag)) lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_DISCOVERY, "3366 RPI x%x needs to be " - "unregistered nlp_flag x%x " + "unregistered nlp_flag x%lx " "did x%x\n", ndlp->nlp_rpi, ndlp->nlp_flag, ndlp->nlp_DID); @@ -5303,11 +5259,11 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) /* If there is already an UNREG in progress for this ndlp, * no need to queue up another one. */ - if (ndlp->nlp_flag & NLP_UNREG_INP) { + if (test_bit(NLP_UNREG_INP, &ndlp->nlp_flag)) { lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_DISCOVERY, "1436 unreg_rpi SKIP UNREG x%x on " - "NPort x%x deferred x%x flg x%x " + "NPort x%x deferred x%x flg x%lx " "Data: x%px\n", ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_defer_did, @@ -5330,27 +5286,24 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) return 1; } + /* Accept PLOGIs after unreg_rpi_cmpl. */ if (mbox->mbox_cmpl == lpfc_sli4_unreg_rpi_cmpl_clr) - /* - * accept PLOGIs after unreg_rpi_cmpl - */ acc_plogi = 0; - if (((ndlp->nlp_DID & Fabric_DID_MASK) != - Fabric_DID_MASK) && - (!test_bit(FC_OFFLINE_MODE, &vport->fc_flag))) - ndlp->nlp_flag |= NLP_UNREG_INP; + + if (!test_bit(FC_OFFLINE_MODE, &vport->fc_flag)) + set_bit(NLP_UNREG_INP, &ndlp->nlp_flag); lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_DISCOVERY, "1433 unreg_rpi UNREG x%x on " - "NPort x%x deferred flg x%x " + "NPort x%x deferred flg x%lx " "Data:x%px\n", ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag, ndlp); rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); if (rc == MBX_NOT_FINISHED) { - ndlp->nlp_flag &= ~NLP_UNREG_INP; + clear_bit(NLP_UNREG_INP, &ndlp->nlp_flag); mempool_free(mbox, phba->mbox_mem_pool); acc_plogi = 1; lpfc_nlp_put(ndlp); @@ -5360,7 +5313,7 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) LOG_NODE | LOG_DISCOVERY, "1444 Failed to allocate mempool " "unreg_rpi UNREG x%x, " - "DID x%x, flag x%x, " + "DID x%x, flag x%lx, " "ndlp x%px\n", ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag, ndlp); @@ -5370,7 +5323,7 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) * not unloading. */ if (!test_bit(FC_UNLOADING, &vport->load_flag)) { - ndlp->nlp_flag &= ~NLP_UNREG_INP; + clear_bit(NLP_UNREG_INP, &ndlp->nlp_flag); lpfc_issue_els_logo(vport, ndlp, 0); ndlp->nlp_prev_state = ndlp->nlp_state; lpfc_nlp_set_state(vport, ndlp, @@ -5383,13 +5336,13 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) out: if (phba->sli_rev != LPFC_SLI_REV4) ndlp->nlp_rpi = 0; - ndlp->nlp_flag &= ~NLP_RPI_REGISTERED; - ndlp->nlp_flag &= ~NLP_NPR_ADISC; + clear_bit(NLP_RPI_REGISTERED, &ndlp->nlp_flag); + clear_bit(NLP_NPR_ADISC, &ndlp->nlp_flag); if (acc_plogi) - ndlp->nlp_flag &= ~NLP_LOGO_ACC; + clear_bit(NLP_LOGO_ACC, &ndlp->nlp_flag); return 1; } - ndlp->nlp_flag &= ~NLP_LOGO_ACC; + clear_bit(NLP_LOGO_ACC, &ndlp->nlp_flag); return 0; } @@ -5417,7 +5370,7 @@ lpfc_unreg_hba_rpis(struct lpfc_hba *phba) for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { spin_lock_irqsave(&vports[i]->fc_nodes_list_lock, iflags); list_for_each_entry(ndlp, &vports[i]->fc_nodes, nlp_listp) { - if (ndlp->nlp_flag & NLP_RPI_REGISTERED) { + if (test_bit(NLP_RPI_REGISTERED, &ndlp->nlp_flag)) { /* The mempool_alloc might sleep */ spin_unlock_irqrestore(&vports[i]->fc_nodes_list_lock, iflags); @@ -5505,7 +5458,7 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) /* Cleanup node for NPort */ lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, "0900 Cleanup node for NPort x%x " - "Data: x%x x%x x%x\n", + "Data: x%lx x%x x%x\n", ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi); lpfc_dequeue_node(vport, ndlp); @@ -5550,9 +5503,7 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) lpfc_els_abort(phba, ndlp); - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag &= ~NLP_DELAY_TMO; - spin_unlock_irq(&ndlp->lock); + clear_bit(NLP_DELAY_TMO, &ndlp->nlp_flag); ndlp->nlp_last_elscmd = 0; del_timer_sync(&ndlp->nlp_delayfunc); @@ -5561,10 +5512,6 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) list_del_init(&ndlp->dev_loss_evt.evt_listp); list_del_init(&ndlp->recovery_evt.evt_listp); lpfc_cleanup_vports_rrqs(vport, ndlp); - - if (phba->sli_rev == LPFC_SLI_REV4) - ndlp->nlp_flag |= NLP_RELEASE_RPI; - return 0; } @@ -5639,7 +5586,7 @@ __lpfc_findnode_did(struct lpfc_vport *vport, uint32_t did) ); lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE_VERBOSE, "0929 FIND node DID " - "Data: x%px x%x x%x x%x x%x x%px\n", + "Data: x%px x%x x%lx x%x x%x x%px\n", ndlp, ndlp->nlp_DID, ndlp->nlp_flag, data1, ndlp->nlp_rpi, ndlp->active_rrqs_xri_bitmap); @@ -5692,7 +5639,7 @@ lpfc_findnode_mapped(struct lpfc_vport *vport) iflags); lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE_VERBOSE, "2025 FIND node DID MAPPED " - "Data: x%px x%x x%x x%x x%px\n", + "Data: x%px x%x x%lx x%x x%px\n", ndlp, ndlp->nlp_DID, ndlp->nlp_flag, data1, ndlp->active_rrqs_xri_bitmap); @@ -5726,13 +5673,11 @@ lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did) lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "6453 Setup New Node 2B_DISC x%x " - "Data:x%x x%x x%lx\n", + "Data:x%lx x%x x%lx\n", ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, vport->fc_flag); - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_NPR_2B_DISC; - spin_unlock_irq(&ndlp->lock); + set_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag); return ndlp; } @@ -5751,7 +5696,7 @@ lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did) lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "6455 Setup RSCN Node 2B_DISC x%x " - "Data:x%x x%x x%lx\n", + "Data:x%lx x%x x%lx\n", ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, vport->fc_flag); @@ -5769,13 +5714,11 @@ lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did) NLP_EVT_DEVICE_RECOVERY); } - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_NPR_2B_DISC; - spin_unlock_irq(&ndlp->lock); + set_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag); } else { lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "6456 Skip Setup RSCN Node x%x " - "Data:x%x x%x x%lx\n", + "Data:x%lx x%x x%lx\n", ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, vport->fc_flag); ndlp = NULL; @@ -5783,7 +5726,7 @@ lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did) } else { lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "6457 Setup Active Node 2B_DISC x%x " - "Data:x%x x%x x%lx\n", + "Data:x%lx x%x x%lx\n", ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, vport->fc_flag); @@ -5794,7 +5737,7 @@ lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did) if (ndlp->nlp_state == NLP_STE_ADISC_ISSUE || ndlp->nlp_state == NLP_STE_PLOGI_ISSUE || (!vport->phba->nvmet_support && - ndlp->nlp_flag & NLP_RCV_PLOGI)) + test_bit(NLP_RCV_PLOGI, &ndlp->nlp_flag))) return NULL; if (vport->phba->nvmet_support) @@ -5804,10 +5747,7 @@ lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did) * allows for rediscovery */ lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); - - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_NPR_2B_DISC; - spin_unlock_irq(&ndlp->lock); + set_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag); } return ndlp; } @@ -6178,7 +6118,7 @@ lpfc_disc_timeout_handler(struct lpfc_vport *vport) /* Clean up the ndlp on Fabric connections */ lpfc_drop_node(vport, ndlp); - } else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) { + } else if (!test_bit(NLP_NPR_ADISC, &ndlp->nlp_flag)) { /* Fail outstanding IO now since device * is marked for PLOGI. */ @@ -6391,11 +6331,11 @@ lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) if (phba->sli_rev < LPFC_SLI_REV4) ndlp->nlp_rpi = mb->un.varWords[0]; - ndlp->nlp_flag |= NLP_RPI_REGISTERED; + set_bit(NLP_RPI_REGISTERED, &ndlp->nlp_flag); ndlp->nlp_type |= NLP_FABRIC; lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_DISCOVERY, - "0004 rpi:%x DID:%x flg:%x %d x%px\n", + "0004 rpi:%x DID:%x flg:%lx %d x%px\n", ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref), ndlp); @@ -6445,7 +6385,7 @@ __lpfc_find_node(struct lpfc_vport *vport, node_filter filter, void *param) if (filter(ndlp, param)) { lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE_VERBOSE, "3185 FIND node filter %ps DID " - "ndlp x%px did x%x flg x%x st x%x " + "ndlp x%px did x%x flg x%lx st x%x " "xri x%x type x%x rpi x%x\n", filter, ndlp, ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, @@ -6580,9 +6520,10 @@ lpfc_nlp_init(struct lpfc_vport *vport, uint32_t did) INIT_LIST_HEAD(&ndlp->nlp_listp); if (vport->phba->sli_rev == LPFC_SLI_REV4) { ndlp->nlp_rpi = rpi; - lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_DISCOVERY, - "0007 Init New ndlp x%px, rpi:x%x DID:%x " - "flg:x%x refcnt:%d\n", + lpfc_printf_vlog(vport, KERN_INFO, + LOG_ELS | LOG_NODE | LOG_DISCOVERY, + "0007 Init New ndlp x%px, rpi:x%x DID:x%x " + "flg:x%lx refcnt:%d\n", ndlp, ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref)); @@ -6614,7 +6555,7 @@ lpfc_nlp_release(struct kref *kref) struct lpfc_vport *vport = ndlp->vport; lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE, - "node release: did:x%x flg:x%x type:x%x", + "node release: did:x%x flg:x%lx type:x%x", ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type); lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, @@ -6626,19 +6567,12 @@ lpfc_nlp_release(struct kref *kref) lpfc_cancel_retry_delay_tmo(vport, ndlp); lpfc_cleanup_node(vport, ndlp); - /* Not all ELS transactions have registered the RPI with the port. - * In these cases the rpi usage is temporary and the node is - * released when the WQE is completed. Catch this case to free the - * RPI to the pool. Because this node is in the release path, a lock - * is unnecessary. All references are gone and the node has been - * dequeued. + /* All nodes are initialized with an RPI that needs to be released + * now. All references are gone and the node has been dequeued. */ - if (ndlp->nlp_flag & NLP_RELEASE_RPI) { - if (ndlp->nlp_rpi != LPFC_RPI_ALLOC_ERROR && - !(ndlp->nlp_flag & (NLP_RPI_REGISTERED | NLP_UNREG_INP))) { - lpfc_sli4_free_rpi(vport->phba, ndlp->nlp_rpi); - ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR; - } + if (vport->phba->sli_rev == LPFC_SLI_REV4) { + lpfc_sli4_free_rpi(vport->phba, ndlp->nlp_rpi); + ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR; } /* The node is not freed back to memory, it is released to a pool so @@ -6667,7 +6601,7 @@ lpfc_nlp_get(struct lpfc_nodelist *ndlp) if (ndlp) { lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE, - "node get: did:x%x flg:x%x refcnt:x%x", + "node get: did:x%x flg:x%lx refcnt:x%x", ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref)); @@ -6699,7 +6633,7 @@ lpfc_nlp_put(struct lpfc_nodelist *ndlp) { if (ndlp) { lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE, - "node put: did:x%x flg:x%x refcnt:x%x", + "node put: did:x%x flg:x%lx refcnt:x%x", ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref)); } else { @@ -6752,11 +6686,12 @@ lpfc_fcf_inuse(struct lpfc_hba *phba) spin_unlock_irqrestore(&vports[i]->fc_nodes_list_lock, iflags); goto out; - } else if (ndlp->nlp_flag & NLP_RPI_REGISTERED) { + } else if (test_bit(NLP_RPI_REGISTERED, + &ndlp->nlp_flag)) { ret = 1; lpfc_printf_log(phba, KERN_INFO, LOG_NODE | LOG_DISCOVERY, - "2624 RPI %x DID %x flag %x " + "2624 RPI %x DID %x flag %lx " "still logged in\n", ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag); diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index 50c761991191f..3ddcaa864f075 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -3092,7 +3092,8 @@ lpfc_cleanup(struct lpfc_vport *vport) lpfc_printf_vlog(ndlp->vport, KERN_ERR, LOG_DISCOVERY, "0282 did:x%x ndlp:x%px " - "refcnt:%d xflags x%x nflag x%x\n", + "refcnt:%d xflags x%x " + "nflag x%lx\n", ndlp->nlp_DID, (void *)ndlp, kref_read(&ndlp->kref), ndlp->fc4_xpt_flags, @@ -3379,7 +3380,7 @@ lpfc_block_mgmt_io(struct lpfc_hba *phba, int mbx_action) } /** - * lpfc_sli4_node_prep - Assign RPIs for active nodes. + * lpfc_sli4_node_rpi_restore - Recover assigned RPIs for active nodes. * @phba: pointer to lpfc hba data structure. * * Allocate RPIs for all active remote nodes. This is needed whenever @@ -3387,7 +3388,7 @@ lpfc_block_mgmt_io(struct lpfc_hba *phba, int mbx_action) * is to fixup the temporary rpi assignments. **/ void -lpfc_sli4_node_prep(struct lpfc_hba *phba) +lpfc_sli4_node_rpi_restore(struct lpfc_hba *phba) { struct lpfc_nodelist *ndlp, *next_ndlp; struct lpfc_vport **vports; @@ -3397,10 +3398,10 @@ lpfc_sli4_node_prep(struct lpfc_hba *phba) return; vports = lpfc_create_vport_work_array(phba); - if (vports == NULL) + if (!vports) return; - for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { + for (i = 0; i <= phba->max_vports && vports[i]; i++) { if (test_bit(FC_UNLOADING, &vports[i]->load_flag)) continue; @@ -3409,14 +3410,20 @@ lpfc_sli4_node_prep(struct lpfc_hba *phba) nlp_listp) { rpi = lpfc_sli4_alloc_rpi(phba); if (rpi == LPFC_RPI_ALLOC_ERROR) { - /* TODO print log? */ + lpfc_printf_vlog(ndlp->vport, KERN_INFO, + LOG_NODE | LOG_DISCOVERY, + "0099 RPI alloc error for " + "ndlp x%px DID:x%06x " + "flg:x%lx\n", + ndlp, ndlp->nlp_DID, + ndlp->nlp_flag); continue; } ndlp->nlp_rpi = rpi; lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE | LOG_DISCOVERY, "0009 Assign RPI x%x to ndlp x%px " - "DID:x%06x flg:x%x\n", + "DID:x%06x flg:x%lx\n", ndlp->nlp_rpi, ndlp, ndlp->nlp_DID, ndlp->nlp_flag); } @@ -3820,35 +3827,12 @@ lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action) &vports[i]->fc_nodes, nlp_listp) { - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag &= ~NLP_NPR_ADISC; - spin_unlock_irq(&ndlp->lock); - + clear_bit(NLP_NPR_ADISC, &ndlp->nlp_flag); if (offline || hba_pci_err) { - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag &= ~(NLP_UNREG_INP | - NLP_RPI_REGISTERED); - spin_unlock_irq(&ndlp->lock); - if (phba->sli_rev == LPFC_SLI_REV4) - lpfc_sli_rpi_release(vports[i], - ndlp); - } else { - lpfc_unreg_rpi(vports[i], ndlp); - } - /* - * Whenever an SLI4 port goes offline, free the - * RPI. Get a new RPI when the adapter port - * comes back online. - */ - if (phba->sli_rev == LPFC_SLI_REV4) { - lpfc_printf_vlog(vports[i], KERN_INFO, - LOG_NODE | LOG_DISCOVERY, - "0011 Free RPI x%x on " - "ndlp: x%px did x%x\n", - ndlp->nlp_rpi, ndlp, - ndlp->nlp_DID); - lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi); - ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR; + clear_bit(NLP_UNREG_INP, + &ndlp->nlp_flag); + clear_bit(NLP_RPI_REGISTERED, + &ndlp->nlp_flag); } if (ndlp->nlp_type & NLP_FABRIC) { @@ -6925,9 +6909,7 @@ lpfc_sli4_async_fip_evt(struct lpfc_hba *phba, */ mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(1000)); - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_DELAY_TMO; - spin_unlock_irq(&ndlp->lock); + set_bit(NLP_DELAY_TMO, &ndlp->nlp_flag); ndlp->nlp_last_elscmd = ELS_CMD_FDISC; vport->port_state = LPFC_FDISC; } else { diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c index 4574716c8764f..4d88cfe71caed 100644 --- a/drivers/scsi/lpfc/lpfc_nportdisc.c +++ b/drivers/scsi/lpfc/lpfc_nportdisc.c @@ -65,7 +65,7 @@ lpfc_check_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, struct lpfc_name *nn, struct lpfc_name *pn) { /* First, we MUST have a RPI registered */ - if (!(ndlp->nlp_flag & NLP_RPI_REGISTERED)) + if (!test_bit(NLP_RPI_REGISTERED, &ndlp->nlp_flag)) return 0; /* Compare the ADISC rsp WWNN / WWPN matches our internal node @@ -239,7 +239,7 @@ lpfc_els_abort(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) /* Abort outstanding I/O on NPort */ lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_DISCOVERY, "2819 Abort outstanding I/O on NPort x%x " - "Data: x%x x%x x%x\n", + "Data: x%lx x%x x%x\n", ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi); /* Clean up all fabric IOs first.*/ @@ -340,7 +340,7 @@ lpfc_defer_plogi_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *login_mbox) /* Now process the REG_RPI cmpl */ lpfc_mbx_cmpl_reg_login(phba, login_mbox); - ndlp->nlp_flag &= ~NLP_ACC_REGLOGIN; + clear_bit(NLP_ACC_REGLOGIN, &ndlp->nlp_flag); kfree(save_iocb); } @@ -404,7 +404,7 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, /* PLOGI chkparm OK */ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, - "0114 PLOGI chkparm OK Data: x%x x%x x%x " + "0114 PLOGI chkparm OK Data: x%x x%x x%lx " "x%x x%x x%lx\n", ndlp->nlp_DID, ndlp->nlp_state, ndlp->nlp_flag, ndlp->nlp_rpi, vport->port_state, @@ -429,7 +429,7 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, /* if already logged in, do implicit logout */ switch (ndlp->nlp_state) { case NLP_STE_NPR_NODE: - if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) + if (!test_bit(NLP_NPR_ADISC, &ndlp->nlp_flag)) break; fallthrough; case NLP_STE_REG_LOGIN_ISSUE: @@ -449,7 +449,7 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, ndlp->nlp_type &= ~(NLP_NVME_TARGET | NLP_NVME_INITIATOR); ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE; ndlp->nlp_nvme_info &= ~NLP_NVME_NSLER; - ndlp->nlp_flag &= ~NLP_FIRSTBURST; + clear_bit(NLP_FIRSTBURST, &ndlp->nlp_flag); lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, NULL); @@ -480,7 +480,7 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, ndlp->nlp_type &= ~(NLP_NVME_TARGET | NLP_NVME_INITIATOR); ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE; ndlp->nlp_nvme_info &= ~NLP_NVME_NSLER; - ndlp->nlp_flag &= ~NLP_FIRSTBURST; + clear_bit(NLP_FIRSTBURST, &ndlp->nlp_flag); login_mbox = NULL; link_mbox = NULL; @@ -552,13 +552,13 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, lpfc_can_disctmo(vport); } - ndlp->nlp_flag &= ~NLP_SUPPRESS_RSP; + clear_bit(NLP_SUPPRESS_RSP, &ndlp->nlp_flag); if ((phba->sli.sli_flag & LPFC_SLI_SUPPRESS_RSP) && sp->cmn.valid_vendor_ver_level) { vid = be32_to_cpu(sp->un.vv.vid); flag = be32_to_cpu(sp->un.vv.flags); if ((vid == LPFC_VV_EMLX_ID) && (flag & LPFC_VV_SUPPRESS_RSP)) - ndlp->nlp_flag |= NLP_SUPPRESS_RSP; + set_bit(NLP_SUPPRESS_RSP, &ndlp->nlp_flag); } login_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); @@ -627,10 +627,9 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, * this ELS request. The only way to do this is * to register, then unregister the RPI. */ - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= (NLP_RM_DFLT_RPI | NLP_ACC_REGLOGIN | - NLP_RCV_PLOGI); - spin_unlock_irq(&ndlp->lock); + set_bit(NLP_RM_DFLT_RPI, &ndlp->nlp_flag); + set_bit(NLP_ACC_REGLOGIN, &ndlp->nlp_flag); + set_bit(NLP_RCV_PLOGI, &ndlp->nlp_flag); } stat.un.b.lsRjtRsnCode = LSRJT_INVALID_CMD; @@ -665,9 +664,8 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, login_mbox->ctx_u.save_iocb = save_iocb; /* For PLOGI ACC */ - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= (NLP_ACC_REGLOGIN | NLP_RCV_PLOGI); - spin_unlock_irq(&ndlp->lock); + set_bit(NLP_ACC_REGLOGIN, &ndlp->nlp_flag); + set_bit(NLP_RCV_PLOGI, &ndlp->nlp_flag); /* Start the ball rolling by issuing REG_LOGIN here */ rc = lpfc_sli_issue_mbox(phba, login_mbox, MBX_NOWAIT); @@ -797,7 +795,7 @@ lpfc_rcv_padisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, */ if (ndlp->nlp_type & (NLP_FCP_TARGET | NLP_NVME_TARGET)) { if ((ndlp->nlp_state != NLP_STE_MAPPED_NODE) && - !(ndlp->nlp_flag & NLP_NPR_ADISC)) + !test_bit(NLP_NPR_ADISC, &ndlp->nlp_flag)) lpfc_nlp_set_state(vport, ndlp, NLP_STE_MAPPED_NODE); } @@ -814,9 +812,7 @@ lpfc_rcv_padisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, /* 1 sec timeout */ mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(1000)); - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_DELAY_TMO; - spin_unlock_irq(&ndlp->lock); + set_bit(NLP_DELAY_TMO, &ndlp->nlp_flag); ndlp->nlp_last_elscmd = ELS_CMD_PLOGI; ndlp->nlp_prev_state = ndlp->nlp_state; lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); @@ -835,9 +831,7 @@ lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, /* Only call LOGO ACC for first LOGO, this avoids sending unnecessary * PLOGIs during LOGO storms from a device. */ - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_LOGO_ACC; - spin_unlock_irq(&ndlp->lock); + set_bit(NLP_LOGO_ACC, &ndlp->nlp_flag); if (els_cmd == ELS_CMD_PRLO) lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL); else @@ -890,9 +884,7 @@ lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, */ mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(1000)); - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_DELAY_TMO; - spin_unlock_irq(&ndlp->lock); + set_bit(NLP_DELAY_TMO, &ndlp->nlp_flag); ndlp->nlp_last_elscmd = ELS_CMD_FDISC; vport->port_state = LPFC_FDISC; } else { @@ -915,14 +907,12 @@ lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, ndlp->nlp_state <= NLP_STE_PRLI_ISSUE)) { mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(1000 * 1)); - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_DELAY_TMO; - spin_unlock_irq(&ndlp->lock); + set_bit(NLP_DELAY_TMO, &ndlp->nlp_flag); ndlp->nlp_last_elscmd = ELS_CMD_PLOGI; lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_ELS | LOG_DISCOVERY, "3204 Start nlpdelay on DID x%06x " - "nflag x%x lastels x%x ref cnt %u", + "nflag x%lx lastels x%x ref cnt %u", ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_last_elscmd, kref_read(&ndlp->kref)); @@ -935,9 +925,7 @@ lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, ndlp->nlp_prev_state = ndlp->nlp_state; lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag &= ~NLP_NPR_ADISC; - spin_unlock_irq(&ndlp->lock); + clear_bit(NLP_NPR_ADISC, &ndlp->nlp_flag); /* The driver has to wait until the ACC completes before it continues * processing the LOGO. The action will resume in * lpfc_cmpl_els_logo_acc routine. Since part of processing includes an @@ -978,7 +966,7 @@ lpfc_rcv_prli_support_check(struct lpfc_vport *vport, out: lpfc_printf_vlog(vport, KERN_WARNING, LOG_DISCOVERY, "6115 Rcv PRLI (%x) check failed: ndlp rpi %d " - "state x%x flags x%x port_type: x%x " + "state x%x flags x%lx port_type: x%x " "npr->initfcn: x%x npr->tgtfcn: x%x\n", cmd, ndlp->nlp_rpi, ndlp->nlp_state, ndlp->nlp_flag, vport->port_type, @@ -1020,7 +1008,7 @@ lpfc_rcv_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, if (npr->prliType == PRLI_NVME_TYPE) ndlp->nlp_type |= NLP_NVME_TARGET; if (npr->writeXferRdyDis) - ndlp->nlp_flag |= NLP_FIRSTBURST; + set_bit(NLP_FIRSTBURST, &ndlp->nlp_flag); } if (npr->Retry && ndlp->nlp_type & (NLP_FCP_INITIATOR | NLP_FCP_TARGET)) @@ -1057,7 +1045,7 @@ lpfc_rcv_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, roles |= FC_RPORT_ROLE_FCP_TARGET; lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT, - "rport rolechg: role:x%x did:x%x flg:x%x", + "rport rolechg: role:x%x did:x%x flg:x%lx", roles, ndlp->nlp_DID, ndlp->nlp_flag); if (vport->cfg_enable_fc4_type != LPFC_ENABLE_NVME) @@ -1068,10 +1056,8 @@ lpfc_rcv_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, static uint32_t lpfc_disc_set_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) { - if (!(ndlp->nlp_flag & NLP_RPI_REGISTERED)) { - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag &= ~NLP_NPR_ADISC; - spin_unlock_irq(&ndlp->lock); + if (!test_bit(NLP_RPI_REGISTERED, &ndlp->nlp_flag)) { + clear_bit(NLP_NPR_ADISC, &ndlp->nlp_flag); return 0; } @@ -1081,16 +1067,12 @@ lpfc_disc_set_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) (test_bit(FC_RSCN_MODE, &vport->fc_flag) || ((ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) && (ndlp->nlp_type & NLP_FCP_TARGET)))) { - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_NPR_ADISC; - spin_unlock_irq(&ndlp->lock); + set_bit(NLP_NPR_ADISC, &ndlp->nlp_flag); return 1; } } - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag &= ~NLP_NPR_ADISC; - spin_unlock_irq(&ndlp->lock); + clear_bit(NLP_NPR_ADISC, &ndlp->nlp_flag); lpfc_unreg_rpi(vport, ndlp); return 0; } @@ -1115,10 +1097,10 @@ lpfc_release_rpi(struct lpfc_hba *phba, struct lpfc_vport *vport, /* If there is already an UNREG in progress for this ndlp, * no need to queue up another one. */ - if (ndlp->nlp_flag & NLP_UNREG_INP) { + if (test_bit(NLP_UNREG_INP, &ndlp->nlp_flag)) { lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "1435 release_rpi SKIP UNREG x%x on " - "NPort x%x deferred x%x flg x%x " + "NPort x%x deferred x%x flg x%lx " "Data: x%px\n", ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_defer_did, @@ -1143,11 +1125,11 @@ lpfc_release_rpi(struct lpfc_hba *phba, struct lpfc_vport *vport, if (((ndlp->nlp_DID & Fabric_DID_MASK) != Fabric_DID_MASK) && (!test_bit(FC_OFFLINE_MODE, &vport->fc_flag))) - ndlp->nlp_flag |= NLP_UNREG_INP; + set_bit(NLP_UNREG_INP, &ndlp->nlp_flag); lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "1437 release_rpi UNREG x%x " - "on NPort x%x flg x%x\n", + "on NPort x%x flg x%lx\n", ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag); rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); @@ -1175,7 +1157,7 @@ lpfc_disc_illegal(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, } lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0271 Illegal State Transition: node x%x " - "event x%x, state x%x Data: x%x x%x\n", + "event x%x, state x%x Data: x%x x%lx\n", ndlp->nlp_DID, evt, ndlp->nlp_state, ndlp->nlp_rpi, ndlp->nlp_flag); return ndlp->nlp_state; @@ -1190,13 +1172,12 @@ lpfc_cmpl_plogi_illegal(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, * working on the same NPortID, do nothing for this thread * to stop it. */ - if (!(ndlp->nlp_flag & NLP_RCV_PLOGI)) { + if (!test_bit(NLP_RCV_PLOGI, &ndlp->nlp_flag)) lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0272 Illegal State Transition: node x%x " - "event x%x, state x%x Data: x%x x%x\n", + "event x%x, state x%x Data: x%x x%lx\n", ndlp->nlp_DID, evt, ndlp->nlp_state, ndlp->nlp_rpi, ndlp->nlp_flag); - } return ndlp->nlp_state; } @@ -1230,9 +1211,7 @@ lpfc_rcv_logo_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, { struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_LOGO_ACC; - spin_unlock_irq(&ndlp->lock); + set_bit(NLP_LOGO_ACC, &ndlp->nlp_flag); lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); return ndlp->nlp_state; @@ -1290,11 +1269,9 @@ lpfc_rcv_plogi_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, NULL); } else { if (lpfc_rcv_plogi(vport, ndlp, cmdiocb) && - (ndlp->nlp_flag & NLP_NPR_2B_DISC) && - (vport->num_disc_nodes)) { - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; - spin_unlock_irq(&ndlp->lock); + test_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag) && + vport->num_disc_nodes) { + clear_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag); /* Check if there are more PLOGIs to be sent */ lpfc_more_plogi(vport); if (vport->num_disc_nodes == 0) { @@ -1356,9 +1333,7 @@ lpfc_rcv_els_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, /* Put ndlp in npr state set plogi timer for 1 sec */ mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(1000 * 1)); - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_DELAY_TMO; - spin_unlock_irq(&ndlp->lock); + set_bit(NLP_DELAY_TMO, &ndlp->nlp_flag); ndlp->nlp_last_elscmd = ELS_CMD_PLOGI; ndlp->nlp_prev_state = NLP_STE_PLOGI_ISSUE; lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); @@ -1389,7 +1364,7 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport, ulp_status = get_job_ulpstatus(phba, rspiocb); - if (ndlp->nlp_flag & NLP_ACC_REGLOGIN) { + if (test_bit(NLP_ACC_REGLOGIN, &ndlp->nlp_flag)) { /* Recovery from PLOGI collision logic */ return ndlp->nlp_state; } @@ -1418,7 +1393,7 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport, goto out; /* PLOGI chkparm OK */ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, - "0121 PLOGI chkparm OK Data: x%x x%x x%x x%x\n", + "0121 PLOGI chkparm OK Data: x%x x%x x%lx x%x\n", ndlp->nlp_DID, ndlp->nlp_state, ndlp->nlp_flag, ndlp->nlp_rpi); if (vport->cfg_fcp_class == 2 && (sp->cls2.classValid)) @@ -1446,14 +1421,14 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport, ed_tov = (phba->fc_edtov + 999999) / 1000000; } - ndlp->nlp_flag &= ~NLP_SUPPRESS_RSP; + clear_bit(NLP_SUPPRESS_RSP, &ndlp->nlp_flag); if ((phba->sli.sli_flag & LPFC_SLI_SUPPRESS_RSP) && sp->cmn.valid_vendor_ver_level) { vid = be32_to_cpu(sp->un.vv.vid); flag = be32_to_cpu(sp->un.vv.flags); if ((vid == LPFC_VV_EMLX_ID) && (flag & LPFC_VV_SUPPRESS_RSP)) - ndlp->nlp_flag |= NLP_SUPPRESS_RSP; + set_bit(NLP_SUPPRESS_RSP, &ndlp->nlp_flag); } /* @@ -1476,7 +1451,7 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport, LOG_TRACE_EVENT, "0133 PLOGI: no memory " "for config_link " - "Data: x%x x%x x%x x%x\n", + "Data: x%x x%x x%lx x%x\n", ndlp->nlp_DID, ndlp->nlp_state, ndlp->nlp_flag, ndlp->nlp_rpi); goto out; @@ -1500,7 +1475,7 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport, if (!mbox) { lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0018 PLOGI: no memory for reg_login " - "Data: x%x x%x x%x x%x\n", + "Data: x%x x%x x%lx x%x\n", ndlp->nlp_DID, ndlp->nlp_state, ndlp->nlp_flag, ndlp->nlp_rpi); goto out; @@ -1520,7 +1495,7 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport, mbox->mbox_cmpl = lpfc_mbx_cmpl_fdmi_reg_login; break; default: - ndlp->nlp_flag |= NLP_REG_LOGIN_SEND; + set_bit(NLP_REG_LOGIN_SEND, &ndlp->nlp_flag); mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login; } @@ -1535,8 +1510,7 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport, NLP_STE_REG_LOGIN_ISSUE); return ndlp->nlp_state; } - if (ndlp->nlp_flag & NLP_REG_LOGIN_SEND) - ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND; + clear_bit(NLP_REG_LOGIN_SEND, &ndlp->nlp_flag); /* decrement node reference count to the failed mbox * command */ @@ -1544,7 +1518,7 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport, lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED); lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0134 PLOGI: cannot issue reg_login " - "Data: x%x x%x x%x x%x\n", + "Data: x%x x%x x%lx x%x\n", ndlp->nlp_DID, ndlp->nlp_state, ndlp->nlp_flag, ndlp->nlp_rpi); } else { @@ -1552,7 +1526,7 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport, lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0135 PLOGI: cannot format reg_login " - "Data: x%x x%x x%x x%x\n", + "Data: x%x x%x x%lx x%x\n", ndlp->nlp_DID, ndlp->nlp_state, ndlp->nlp_flag, ndlp->nlp_rpi); } @@ -1605,18 +1579,15 @@ static uint32_t lpfc_device_rm_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, void *arg, uint32_t evt) { - if (ndlp->nlp_flag & NLP_NPR_2B_DISC) { - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_NODEV_REMOVE; - spin_unlock_irq(&ndlp->lock); + if (test_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag)) { + set_bit(NLP_NODEV_REMOVE, &ndlp->nlp_flag); return ndlp->nlp_state; - } else { - /* software abort outstanding PLOGI */ - lpfc_els_abort(vport->phba, ndlp); - - lpfc_drop_node(vport, ndlp); - return NLP_STE_FREED_NODE; } + /* software abort outstanding PLOGI */ + lpfc_els_abort(vport->phba, ndlp); + + lpfc_drop_node(vport, ndlp); + return NLP_STE_FREED_NODE; } static uint32_t @@ -1636,9 +1607,8 @@ lpfc_device_recov_plogi_issue(struct lpfc_vport *vport, ndlp->nlp_prev_state = NLP_STE_PLOGI_ISSUE; lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); - spin_unlock_irq(&ndlp->lock); + clear_bit(NLP_NODEV_REMOVE, &ndlp->nlp_flag); + clear_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag); return ndlp->nlp_state; } @@ -1656,10 +1626,7 @@ lpfc_rcv_plogi_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, cmdiocb = (struct lpfc_iocbq *) arg; if (lpfc_rcv_plogi(vport, ndlp, cmdiocb)) { - if (ndlp->nlp_flag & NLP_NPR_2B_DISC) { - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; - spin_unlock_irq(&ndlp->lock); + if (test_and_clear_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag)) { if (vport->num_disc_nodes) lpfc_more_adisc(vport); } @@ -1748,9 +1715,7 @@ lpfc_cmpl_adisc_adisc_issue(struct lpfc_vport *vport, /* 1 sec timeout */ mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(1000)); - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_DELAY_TMO; - spin_unlock_irq(&ndlp->lock); + set_bit(NLP_DELAY_TMO, &ndlp->nlp_flag); ndlp->nlp_last_elscmd = ELS_CMD_PLOGI; ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE; @@ -1789,18 +1754,15 @@ static uint32_t lpfc_device_rm_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, void *arg, uint32_t evt) { - if (ndlp->nlp_flag & NLP_NPR_2B_DISC) { - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_NODEV_REMOVE; - spin_unlock_irq(&ndlp->lock); + if (test_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag)) { + set_bit(NLP_NODEV_REMOVE, &ndlp->nlp_flag); return ndlp->nlp_state; - } else { - /* software abort outstanding ADISC */ - lpfc_els_abort(vport->phba, ndlp); - - lpfc_drop_node(vport, ndlp); - return NLP_STE_FREED_NODE; } + /* software abort outstanding ADISC */ + lpfc_els_abort(vport->phba, ndlp); + + lpfc_drop_node(vport, ndlp); + return NLP_STE_FREED_NODE; } static uint32_t @@ -1820,9 +1782,8 @@ lpfc_device_recov_adisc_issue(struct lpfc_vport *vport, ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE; lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); - spin_unlock_irq(&ndlp->lock); + clear_bit(NLP_NODEV_REMOVE, &ndlp->nlp_flag); + clear_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag); lpfc_disc_set_adisc(vport, ndlp); return ndlp->nlp_state; } @@ -1856,7 +1817,7 @@ lpfc_rcv_prli_reglogin_issue(struct lpfc_vport *vport, * transition to UNMAPPED provided the RPI has completed * registration. */ - if (ndlp->nlp_flag & NLP_RPI_REGISTERED) { + if (test_bit(NLP_RPI_REGISTERED, &ndlp->nlp_flag)) { lpfc_rcv_prli(vport, ndlp, cmdiocb); lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp); } else { @@ -1895,7 +1856,7 @@ lpfc_rcv_logo_reglogin_issue(struct lpfc_vport *vport, if ((mb = phba->sli.mbox_active)) { if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) && (ndlp == mb->ctx_ndlp)) { - ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND; + clear_bit(NLP_REG_LOGIN_SEND, &ndlp->nlp_flag); lpfc_nlp_put(ndlp); mb->ctx_ndlp = NULL; mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; @@ -1906,7 +1867,7 @@ lpfc_rcv_logo_reglogin_issue(struct lpfc_vport *vport, list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) { if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) && (ndlp == mb->ctx_ndlp)) { - ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND; + clear_bit(NLP_REG_LOGIN_SEND, &ndlp->nlp_flag); lpfc_nlp_put(ndlp); list_del(&mb->list); phba->sli.mboxq_cnt--; @@ -1976,9 +1937,7 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport, /* Put ndlp in npr state set plogi timer for 1 sec */ mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(1000 * 1)); - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_DELAY_TMO; - spin_unlock_irq(&ndlp->lock); + set_bit(NLP_DELAY_TMO, &ndlp->nlp_flag); ndlp->nlp_last_elscmd = ELS_CMD_PLOGI; lpfc_issue_els_logo(vport, ndlp, 0); @@ -1989,7 +1948,7 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport, if (phba->sli_rev < LPFC_SLI_REV4) ndlp->nlp_rpi = mb->un.varWords[0]; - ndlp->nlp_flag |= NLP_RPI_REGISTERED; + set_bit(NLP_RPI_REGISTERED, &ndlp->nlp_flag); /* Only if we are not a fabric nport do we issue PRLI */ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, @@ -2061,15 +2020,12 @@ lpfc_device_rm_reglogin_issue(struct lpfc_vport *vport, void *arg, uint32_t evt) { - if (ndlp->nlp_flag & NLP_NPR_2B_DISC) { - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_NODEV_REMOVE; - spin_unlock_irq(&ndlp->lock); + if (test_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag)) { + set_bit(NLP_NODEV_REMOVE, &ndlp->nlp_flag); return ndlp->nlp_state; - } else { - lpfc_drop_node(vport, ndlp); - return NLP_STE_FREED_NODE; } + lpfc_drop_node(vport, ndlp); + return NLP_STE_FREED_NODE; } static uint32_t @@ -2084,17 +2040,16 @@ lpfc_device_recov_reglogin_issue(struct lpfc_vport *vport, ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE; lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); - spin_lock_irq(&ndlp->lock); /* If we are a target we won't immediately transition into PRLI, * so if REG_LOGIN already completed we don't need to ignore it. */ - if (!(ndlp->nlp_flag & NLP_RPI_REGISTERED) || + if (!test_bit(NLP_RPI_REGISTERED, &ndlp->nlp_flag) || !vport->phba->nvmet_support) - ndlp->nlp_flag |= NLP_IGNR_REG_CMPL; + set_bit(NLP_IGNR_REG_CMPL, &ndlp->nlp_flag); - ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); - spin_unlock_irq(&ndlp->lock); + clear_bit(NLP_NODEV_REMOVE, &ndlp->nlp_flag); + clear_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag); lpfc_disc_set_adisc(vport, ndlp); return ndlp->nlp_state; } @@ -2228,7 +2183,8 @@ lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, if (npr->targetFunc) { ndlp->nlp_type |= NLP_FCP_TARGET; if (npr->writeXferRdyDis) - ndlp->nlp_flag |= NLP_FIRSTBURST; + set_bit(NLP_FIRSTBURST, + &ndlp->nlp_flag); } if (npr->Retry) ndlp->nlp_fcp_info |= NLP_FCP_2_DEVICE; @@ -2272,7 +2228,7 @@ lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, /* Both sides support FB. The target's first * burst size is a 512 byte encoded value. */ - ndlp->nlp_flag |= NLP_FIRSTBURST; + set_bit(NLP_FIRSTBURST, &ndlp->nlp_flag); ndlp->nvme_fb_size = bf_get_be32(prli_fb_sz, nvpr); @@ -2287,7 +2243,7 @@ lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC, "6029 NVME PRLI Cmpl w1 x%08x " - "w4 x%08x w5 x%08x flag x%x, " + "w4 x%08x w5 x%08x flag x%lx, " "fcp_info x%x nlp_type x%x\n", be32_to_cpu(nvpr->word1), be32_to_cpu(nvpr->word4), @@ -2299,9 +2255,7 @@ lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, (vport->port_type == LPFC_NPIV_PORT) && vport->cfg_restrict_login) { out: - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_TARGET_REMOVE; - spin_unlock_irq(&ndlp->lock); + set_bit(NLP_TARGET_REMOVE, &ndlp->nlp_flag); lpfc_issue_els_logo(vport, ndlp, 0); ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE; @@ -2353,18 +2307,15 @@ static uint32_t lpfc_device_rm_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, void *arg, uint32_t evt) { - if (ndlp->nlp_flag & NLP_NPR_2B_DISC) { - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_NODEV_REMOVE; - spin_unlock_irq(&ndlp->lock); + if (test_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag)) { + set_bit(NLP_NODEV_REMOVE, &ndlp->nlp_flag); return ndlp->nlp_state; - } else { - /* software abort outstanding PLOGI */ - lpfc_els_abort(vport->phba, ndlp); - - lpfc_drop_node(vport, ndlp); - return NLP_STE_FREED_NODE; } + /* software abort outstanding PLOGI */ + lpfc_els_abort(vport->phba, ndlp); + + lpfc_drop_node(vport, ndlp); + return NLP_STE_FREED_NODE; } @@ -2401,9 +2352,8 @@ lpfc_device_recov_prli_issue(struct lpfc_vport *vport, ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE; lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); - spin_unlock_irq(&ndlp->lock); + clear_bit(NLP_NODEV_REMOVE, &ndlp->nlp_flag); + clear_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag); lpfc_disc_set_adisc(vport, ndlp); return ndlp->nlp_state; } @@ -2442,9 +2392,7 @@ lpfc_rcv_logo_logo_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, { struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *)arg; - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_LOGO_ACC; - spin_unlock_irq(&ndlp->lock); + set_bit(NLP_LOGO_ACC, &ndlp->nlp_flag); lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); return ndlp->nlp_state; } @@ -2483,9 +2431,8 @@ lpfc_cmpl_logo_logo_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, { ndlp->nlp_prev_state = NLP_STE_LOGO_ISSUE; lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); - spin_unlock_irq(&ndlp->lock); + clear_bit(NLP_NODEV_REMOVE, &ndlp->nlp_flag); + clear_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag); lpfc_disc_set_adisc(vport, ndlp); return ndlp->nlp_state; } @@ -2591,8 +2538,9 @@ lpfc_device_recov_unmap_node(struct lpfc_vport *vport, { ndlp->nlp_prev_state = NLP_STE_UNMAPPED_NODE; lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); + clear_bit(NLP_NODEV_REMOVE, &ndlp->nlp_flag); + clear_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag); spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); ndlp->nlp_fc4_type &= ~(NLP_FC4_FCP | NLP_FC4_NVME); spin_unlock_irq(&ndlp->lock); lpfc_disc_set_adisc(vport, ndlp); @@ -2653,9 +2601,7 @@ lpfc_rcv_prlo_mapped_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, lpfc_sli_abort_iocb(vport, ndlp->nlp_sid, 0, LPFC_CTX_TGT); /* Send PRLO_ACC */ - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_LOGO_ACC; - spin_unlock_irq(&ndlp->lock); + set_bit(NLP_LOGO_ACC, &ndlp->nlp_flag); lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL); /* Save ELS_CMD_PRLO as the last elscmd and then set to NPR. @@ -2665,7 +2611,7 @@ lpfc_rcv_prlo_mapped_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, ndlp->nlp_prev_state = ndlp->nlp_state; lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_ELS | LOG_DISCOVERY, - "3422 DID x%06x nflag x%x lastels x%x ref cnt %u\n", + "3422 DID x%06x nflag x%lx lastels x%x ref cnt %u\n", ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_last_elscmd, kref_read(&ndlp->kref)); @@ -2685,8 +2631,9 @@ lpfc_device_recov_mapped_node(struct lpfc_vport *vport, ndlp->nlp_prev_state = NLP_STE_MAPPED_NODE; lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); + clear_bit(NLP_NODEV_REMOVE, &ndlp->nlp_flag); + clear_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag); spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); ndlp->nlp_fc4_type &= ~(NLP_FC4_FCP | NLP_FC4_NVME); spin_unlock_irq(&ndlp->lock); return ndlp->nlp_state; @@ -2699,16 +2646,16 @@ lpfc_rcv_plogi_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; /* Ignore PLOGI if we have an outstanding LOGO */ - if (ndlp->nlp_flag & (NLP_LOGO_SND | NLP_LOGO_ACC)) + if (test_bit(NLP_LOGO_SND, &ndlp->nlp_flag) || + test_bit(NLP_LOGO_ACC, &ndlp->nlp_flag)) return ndlp->nlp_state; if (lpfc_rcv_plogi(vport, ndlp, cmdiocb)) { lpfc_cancel_retry_delay_tmo(vport, ndlp); - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag &= ~(NLP_NPR_ADISC | NLP_NPR_2B_DISC); - spin_unlock_irq(&ndlp->lock); - } else if (!(ndlp->nlp_flag & NLP_NPR_2B_DISC)) { + clear_bit(NLP_NPR_ADISC, &ndlp->nlp_flag); + clear_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag); + } else if (!test_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag)) { /* send PLOGI immediately, move to PLOGI issue state */ - if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) { + if (!test_bit(NLP_DELAY_TMO, &ndlp->nlp_flag)) { ndlp->nlp_prev_state = NLP_STE_NPR_NODE; lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); @@ -2729,14 +2676,14 @@ lpfc_rcv_prli_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE; lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); - if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) { + if (!test_bit(NLP_DELAY_TMO, &ndlp->nlp_flag)) { /* * ADISC nodes will be handled in regular discovery path after * receiving response from NS. * * For other nodes, Send PLOGI to trigger an implicit LOGO. */ - if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) { + if (!test_bit(NLP_NPR_ADISC, &ndlp->nlp_flag)) { ndlp->nlp_prev_state = NLP_STE_NPR_NODE; lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); @@ -2767,15 +2714,15 @@ lpfc_rcv_padisc_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, * or discovery in progress for this node. Starting discovery * here will affect the counting of discovery threads. */ - if (!(ndlp->nlp_flag & NLP_DELAY_TMO) && - !(ndlp->nlp_flag & NLP_NPR_2B_DISC)) { + if (!test_bit(NLP_DELAY_TMO, &ndlp->nlp_flag) && + !test_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag)) { /* * ADISC nodes will be handled in regular discovery path after * receiving response from NS. * * For other nodes, Send PLOGI to trigger an implicit LOGO. */ - if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) { + if (!test_bit(NLP_NPR_ADISC, &ndlp->nlp_flag)) { ndlp->nlp_prev_state = NLP_STE_NPR_NODE; lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); @@ -2790,24 +2737,18 @@ lpfc_rcv_prlo_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, { struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_LOGO_ACC; - spin_unlock_irq(&ndlp->lock); + set_bit(NLP_LOGO_ACC, &ndlp->nlp_flag); lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); - if ((ndlp->nlp_flag & NLP_DELAY_TMO) == 0) { + if (!test_bit(NLP_DELAY_TMO, &ndlp->nlp_flag)) { mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(1000 * 1)); - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_DELAY_TMO; - ndlp->nlp_flag &= ~NLP_NPR_ADISC; - spin_unlock_irq(&ndlp->lock); + set_bit(NLP_DELAY_TMO, &ndlp->nlp_flag); + clear_bit(NLP_NPR_ADISC, &ndlp->nlp_flag); ndlp->nlp_last_elscmd = ELS_CMD_PLOGI; } else { - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag &= ~NLP_NPR_ADISC; - spin_unlock_irq(&ndlp->lock); + clear_bit(NLP_NPR_ADISC, &ndlp->nlp_flag); } return ndlp->nlp_state; } @@ -2844,7 +2785,7 @@ lpfc_cmpl_prli_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, ulp_status = get_job_ulpstatus(phba, rspiocb); - if (ulp_status && (ndlp->nlp_flag & NLP_NODEV_REMOVE)) { + if (ulp_status && test_bit(NLP_NODEV_REMOVE, &ndlp->nlp_flag)) { lpfc_drop_node(vport, ndlp); return NLP_STE_FREED_NODE; } @@ -2877,7 +2818,7 @@ lpfc_cmpl_adisc_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, ulp_status = get_job_ulpstatus(phba, rspiocb); - if (ulp_status && (ndlp->nlp_flag & NLP_NODEV_REMOVE)) { + if (ulp_status && test_bit(NLP_NODEV_REMOVE, &ndlp->nlp_flag)) { lpfc_drop_node(vport, ndlp); return NLP_STE_FREED_NODE; } @@ -2896,12 +2837,11 @@ lpfc_cmpl_reglogin_npr_node(struct lpfc_vport *vport, /* SLI4 ports have preallocated logical rpis. */ if (vport->phba->sli_rev < LPFC_SLI_REV4) ndlp->nlp_rpi = mb->un.varWords[0]; - ndlp->nlp_flag |= NLP_RPI_REGISTERED; - if (ndlp->nlp_flag & NLP_LOGO_ACC) { + set_bit(NLP_RPI_REGISTERED, &ndlp->nlp_flag); + if (test_bit(NLP_LOGO_ACC, &ndlp->nlp_flag)) lpfc_unreg_rpi(vport, ndlp); - } } else { - if (ndlp->nlp_flag & NLP_NODEV_REMOVE) { + if (test_bit(NLP_NODEV_REMOVE, &ndlp->nlp_flag)) { lpfc_drop_node(vport, ndlp); return NLP_STE_FREED_NODE; } @@ -2913,10 +2853,8 @@ static uint32_t lpfc_device_rm_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, void *arg, uint32_t evt) { - if (ndlp->nlp_flag & NLP_NPR_2B_DISC) { - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_NODEV_REMOVE; - spin_unlock_irq(&ndlp->lock); + if (test_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag)) { + set_bit(NLP_NODEV_REMOVE, &ndlp->nlp_flag); return ndlp->nlp_state; } lpfc_drop_node(vport, ndlp); @@ -2932,8 +2870,9 @@ lpfc_device_recov_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, return ndlp->nlp_state; lpfc_cancel_retry_delay_tmo(vport, ndlp); + clear_bit(NLP_NODEV_REMOVE, &ndlp->nlp_flag); + clear_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag); spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); ndlp->nlp_fc4_type &= ~(NLP_FC4_FCP | NLP_FC4_NVME); spin_unlock_irq(&ndlp->lock); return ndlp->nlp_state; @@ -3146,7 +3085,7 @@ lpfc_disc_state_machine(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, /* DSM in event on NPort in state */ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "0211 DSM in event x%x on NPort x%x in " - "state %d rpi x%x Data: x%x x%x\n", + "state %d rpi x%x Data: x%lx x%x\n", evt, ndlp->nlp_DID, cur_state, ndlp->nlp_rpi, ndlp->nlp_flag, data1); @@ -3163,12 +3102,12 @@ lpfc_disc_state_machine(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, ((uint32_t)ndlp->nlp_type)); lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "0212 DSM out state %d on NPort x%x " - "rpi x%x Data: x%x x%x\n", + "rpi x%x Data: x%lx x%x\n", rc, ndlp->nlp_DID, ndlp->nlp_rpi, ndlp->nlp_flag, data1); lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_DSM, - "DSM out: ste:%d did:x%x flg:x%x", + "DSM out: ste:%d did:x%x flg:x%lx", rc, ndlp->nlp_DID, ndlp->nlp_flag); /* Decrement the ndlp reference count held for this function */ lpfc_nlp_put(ndlp); diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c index fec23c7237304..e9d9884830f30 100644 --- a/drivers/scsi/lpfc/lpfc_nvme.c +++ b/drivers/scsi/lpfc/lpfc_nvme.c @@ -1232,7 +1232,7 @@ lpfc_nvme_prep_io_cmd(struct lpfc_vport *vport, /* Word 5 */ if ((phba->cfg_nvme_enable_fb) && - (pnode->nlp_flag & NLP_FIRSTBURST)) { + test_bit(NLP_FIRSTBURST, &pnode->nlp_flag)) { req_len = lpfc_ncmd->nvmeCmd->payload_length; if (req_len < pnode->nvme_fb_size) wqe->fcp_iwrite.initial_xfer_len = @@ -2644,14 +2644,11 @@ lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) * reference. Check if another thread has set * NLP_DROPPED. */ - spin_lock_irq(&ndlp->lock); - if (!(ndlp->nlp_flag & NLP_DROPPED)) { - ndlp->nlp_flag |= NLP_DROPPED; - spin_unlock_irq(&ndlp->lock); + if (!test_and_set_bit(NLP_DROPPED, + &ndlp->nlp_flag)) { lpfc_nlp_put(ndlp); return; } - spin_unlock_irq(&ndlp->lock); } } } diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c index 55c3e2c2bf8f7..e6c9112a88627 100644 --- a/drivers/scsi/lpfc/lpfc_nvmet.c +++ b/drivers/scsi/lpfc/lpfc_nvmet.c @@ -2854,7 +2854,7 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba, /* In template ar=1 wqes=0 sup=0 irsp=0 irsplen=0 */ if (rsp->rsplen == LPFC_NVMET_SUCCESS_LEN) { - if (ndlp->nlp_flag & NLP_SUPPRESS_RSP) + if (test_bit(NLP_SUPPRESS_RSP, &ndlp->nlp_flag)) bf_set(wqe_sup, &wqe->fcp_tsend.wqe_com, 1); } else { diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c index 11c974bffa720..905026a4782cf 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.c +++ b/drivers/scsi/lpfc/lpfc_scsi.c @@ -4629,7 +4629,7 @@ static int lpfc_scsi_prep_cmnd_buf_s3(struct lpfc_vport *vport, iocb_cmd->ulpCommand = CMD_FCP_IWRITE64_CR; iocb_cmd->ulpPU = PARM_READ_CHECK; if (vport->cfg_first_burst_size && - (pnode->nlp_flag & NLP_FIRSTBURST)) { + test_bit(NLP_FIRSTBURST, &pnode->nlp_flag)) { u32 xrdy_len; fcpdl = scsi_bufflen(scsi_cmnd); @@ -5829,7 +5829,7 @@ lpfc_send_taskmgmt(struct lpfc_vport *vport, struct fc_rport *rport, lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, "0702 Issue %s to TGT %d LUN %llu " - "rpi x%x nlp_flag x%x Data: x%x x%x\n", + "rpi x%x nlp_flag x%lx Data: x%x x%x\n", lpfc_taskmgmt_name(task_mgmt_cmd), tgt_id, lun_id, pnode->nlp_rpi, pnode->nlp_flag, iocbq->sli4_xritag, iocbq->cmd_flag); @@ -6094,8 +6094,8 @@ lpfc_target_reset_handler(struct scsi_cmnd *cmnd) lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0722 Target Reset rport failure: rdata x%px\n", rdata); if (pnode) { + clear_bit(NLP_NPR_ADISC, &pnode->nlp_flag); spin_lock_irqsave(&pnode->lock, flags); - pnode->nlp_flag &= ~NLP_NPR_ADISC; pnode->nlp_fcp_info &= ~NLP_FCP_2_DEVICE; spin_unlock_irqrestore(&pnode->lock, flags); } @@ -6124,7 +6124,7 @@ lpfc_target_reset_handler(struct scsi_cmnd *cmnd) !pnode->logo_waitq) { pnode->logo_waitq = &waitq; pnode->nlp_fcp_info &= ~NLP_FCP_2_DEVICE; - pnode->nlp_flag |= NLP_ISSUE_LOGO; + set_bit(NLP_ISSUE_LOGO, &pnode->nlp_flag); pnode->save_flags |= NLP_WAIT_FOR_LOGO; spin_unlock_irqrestore(&pnode->lock, flags); lpfc_unreg_rpi(vport, pnode); diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index 6748fba48a07e..c4acf594286e5 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -2842,27 +2842,6 @@ lpfc_sli_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) return; } -static void -__lpfc_sli_rpi_release(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) -{ - unsigned long iflags; - - if (ndlp->nlp_flag & NLP_RELEASE_RPI) { - lpfc_sli4_free_rpi(vport->phba, ndlp->nlp_rpi); - spin_lock_irqsave(&ndlp->lock, iflags); - ndlp->nlp_flag &= ~NLP_RELEASE_RPI; - ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR; - spin_unlock_irqrestore(&ndlp->lock, iflags); - } - ndlp->nlp_flag &= ~NLP_UNREG_INP; -} - -void -lpfc_sli_rpi_release(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) -{ - __lpfc_sli_rpi_release(vport, ndlp); -} - /** * lpfc_sli_def_mbox_cmpl - Default mailbox completion handler * @phba: Pointer to HBA context object. @@ -2932,18 +2911,18 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) vport, KERN_INFO, LOG_MBOX | LOG_DISCOVERY, "1438 UNREG cmpl deferred mbox x%x " - "on NPort x%x Data: x%x x%x x%px x%lx x%x\n", + "on NPort x%x Data: x%lx x%x x%px x%lx x%x\n", ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_defer_did, ndlp, vport->load_flag, kref_read(&ndlp->kref)); - if ((ndlp->nlp_flag & NLP_UNREG_INP) && - (ndlp->nlp_defer_did != NLP_EVT_NOTHING_PENDING)) { - ndlp->nlp_flag &= ~NLP_UNREG_INP; + if (test_bit(NLP_UNREG_INP, &ndlp->nlp_flag) && + ndlp->nlp_defer_did != NLP_EVT_NOTHING_PENDING) { + clear_bit(NLP_UNREG_INP, &ndlp->nlp_flag); ndlp->nlp_defer_did = NLP_EVT_NOTHING_PENDING; lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); } else { - __lpfc_sli_rpi_release(vport, ndlp); + clear_bit(NLP_UNREG_INP, &ndlp->nlp_flag); } /* The unreg_login mailbox is complete and had a @@ -2991,6 +2970,7 @@ lpfc_sli4_unreg_rpi_cmpl_clr(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) { struct lpfc_vport *vport = pmb->vport; struct lpfc_nodelist *ndlp; + bool unreg_inp; ndlp = pmb->ctx_ndlp; if (pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) { @@ -3003,20 +2983,26 @@ lpfc_sli4_unreg_rpi_cmpl_clr(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) vport, KERN_INFO, LOG_MBOX | LOG_SLI | LOG_NODE, "0010 UNREG_LOGIN vpi:x%x " - "rpi:%x DID:%x defer x%x flg x%x " + "rpi:%x DID:%x defer x%x flg x%lx " "x%px\n", vport->vpi, ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_defer_did, ndlp->nlp_flag, ndlp); - ndlp->nlp_flag &= ~NLP_LOGO_ACC; + + /* Cleanup the nlp_flag now that the UNREG RPI + * has completed. + */ + unreg_inp = test_and_clear_bit(NLP_UNREG_INP, + &ndlp->nlp_flag); + clear_bit(NLP_LOGO_ACC, &ndlp->nlp_flag); /* Check to see if there are any deferred * events to process */ - if ((ndlp->nlp_flag & NLP_UNREG_INP) && - (ndlp->nlp_defer_did != - NLP_EVT_NOTHING_PENDING)) { + if (unreg_inp && + ndlp->nlp_defer_did != + NLP_EVT_NOTHING_PENDING) { lpfc_printf_vlog( vport, KERN_INFO, LOG_MBOX | LOG_SLI | LOG_NODE, @@ -3025,14 +3011,12 @@ lpfc_sli4_unreg_rpi_cmpl_clr(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) "NPort x%x Data: x%x x%px\n", ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_defer_did, ndlp); - ndlp->nlp_flag &= ~NLP_UNREG_INP; ndlp->nlp_defer_did = NLP_EVT_NOTHING_PENDING; lpfc_issue_els_plogi( vport, ndlp->nlp_DID, 0); - } else { - __lpfc_sli_rpi_release(vport, ndlp); } + lpfc_nlp_put(ndlp); } } @@ -6020,9 +6004,9 @@ lpfc_sli4_get_ctl_attr(struct lpfc_hba *phba) phba->sli4_hba.flash_id = bf_get(lpfc_cntl_attr_flash_id, cntl_attr); phba->sli4_hba.asic_rev = bf_get(lpfc_cntl_attr_asic_rev, cntl_attr); - memset(phba->BIOSVersion, 0, sizeof(phba->BIOSVersion)); - strlcat(phba->BIOSVersion, (char *)cntl_attr->bios_ver_str, + memcpy(phba->BIOSVersion, cntl_attr->bios_ver_str, sizeof(phba->BIOSVersion)); + phba->BIOSVersion[sizeof(phba->BIOSVersion) - 1] = '\0'; lpfc_printf_log(phba, KERN_INFO, LOG_SLI, "3086 lnk_type:%d, lnk_numb:%d, bios_ver:%s, " @@ -8750,6 +8734,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) lpfc_sli_config_mbox_opcode_get( phba, mboxq), rc, dd); + /* * Allocate all resources (xri,rpi,vpi,vfi) now. Subsequent * calls depends on these resources to complete port setup. @@ -8762,6 +8747,8 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) goto out_free_mbox; } + lpfc_sli4_node_rpi_restore(phba); + lpfc_set_host_data(phba, mboxq); rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); @@ -8949,7 +8936,6 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) rc = -ENODEV; goto out_free_iocblist; } - lpfc_sli4_node_prep(phba); if (!test_bit(HBA_FCOE_MODE, &phba->hba_flag)) { if ((phba->nvmet_support == 0) || (phba->cfg_nvmet_mrq == 1)) { @@ -14354,9 +14340,7 @@ lpfc_sli4_sp_handle_mbox_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe) * an unsolicited PLOGI from the same NPortId from * starting another mailbox transaction. */ - spin_lock_irqsave(&ndlp->lock, iflags); - ndlp->nlp_flag |= NLP_UNREG_INP; - spin_unlock_irqrestore(&ndlp->lock, iflags); + set_bit(NLP_UNREG_INP, &ndlp->nlp_flag); lpfc_unreg_login(phba, vport->vpi, pmbox->un.varWords[0], pmb); pmb->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi; @@ -19105,9 +19089,9 @@ lpfc_sli4_seq_abort_rsp(struct lpfc_vport *vport, * to free ndlp when transmit completes */ if (ndlp->nlp_state == NLP_STE_UNUSED_NODE && - !(ndlp->nlp_flag & NLP_DROPPED) && + !test_bit(NLP_DROPPED, &ndlp->nlp_flag) && !(ndlp->fc4_xpt_flags & (NVME_XPT_REGD | SCSI_XPT_REGD))) { - ndlp->nlp_flag |= NLP_DROPPED; + set_bit(NLP_DROPPED, &ndlp->nlp_flag); lpfc_nlp_put(ndlp); } } @@ -21125,11 +21109,7 @@ lpfc_cleanup_pending_mbox(struct lpfc_vport *vport) /* Unregister the RPI when mailbox complete */ mb->mbox_flag |= LPFC_MBX_IMED_UNREG; restart_loop = 1; - spin_unlock_irq(&phba->hbalock); - spin_lock(&ndlp->lock); - ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL; - spin_unlock(&ndlp->lock); - spin_lock_irq(&phba->hbalock); + clear_bit(NLP_IGNR_REG_CMPL, &ndlp->nlp_flag); break; } } @@ -21144,9 +21124,7 @@ lpfc_cleanup_pending_mbox(struct lpfc_vport *vport) ndlp = mb->ctx_ndlp; mb->ctx_ndlp = NULL; if (ndlp) { - spin_lock(&ndlp->lock); - ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL; - spin_unlock(&ndlp->lock); + clear_bit(NLP_IGNR_REG_CMPL, &ndlp->nlp_flag); lpfc_nlp_put(ndlp); } } @@ -21155,9 +21133,7 @@ lpfc_cleanup_pending_mbox(struct lpfc_vport *vport) /* Release the ndlp with the cleaned-up active mailbox command */ if (act_mbx_ndlp) { - spin_lock(&act_mbx_ndlp->lock); - act_mbx_ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL; - spin_unlock(&act_mbx_ndlp->lock); + clear_bit(NLP_IGNR_REG_CMPL, &act_mbx_ndlp->nlp_flag); lpfc_nlp_put(act_mbx_ndlp); } } diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c index 7a4d4d8e2ad55..9e0e357633779 100644 --- a/drivers/scsi/lpfc/lpfc_vport.c +++ b/drivers/scsi/lpfc/lpfc_vport.c @@ -496,7 +496,7 @@ lpfc_send_npiv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) !ndlp->logo_waitq) { ndlp->logo_waitq = &waitq; ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE; - ndlp->nlp_flag |= NLP_ISSUE_LOGO; + set_bit(NLP_ISSUE_LOGO, &ndlp->nlp_flag); ndlp->save_flags |= NLP_WAIT_FOR_LOGO; } spin_unlock_irq(&ndlp->lock); @@ -515,8 +515,8 @@ lpfc_send_npiv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) } /* Error - clean up node flags. */ + clear_bit(NLP_ISSUE_LOGO, &ndlp->nlp_flag); spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag &= ~NLP_ISSUE_LOGO; ndlp->save_flags &= ~NLP_WAIT_FOR_LOGO; spin_unlock_irq(&ndlp->lock); @@ -708,7 +708,7 @@ lpfc_vport_delete(struct fc_vport *fc_vport) lpfc_printf_vlog(vport, KERN_INFO, LOG_VPORT | LOG_ELS, "1829 DA_ID issue status %d. " - "SFlag x%x NState x%x, NFlag x%x " + "SFlag x%x NState x%x, NFlag x%lx " "Rpi x%x\n", rc, ndlp->save_flags, ndlp->nlp_state, ndlp->nlp_flag, ndlp->nlp_rpi); diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c index 21f22e913cd08..8a44e01ebf9b6 100644 --- a/drivers/scsi/megaraid/megaraid_sas_base.c +++ b/drivers/scsi/megaraid/megaraid_sas_base.c @@ -5910,7 +5910,11 @@ megasas_set_high_iops_queue_affinity_and_hint(struct megasas_instance *instance) const struct cpumask *mask; if (instance->perf_mode == MR_BALANCED_PERF_MODE) { - mask = cpumask_of_node(dev_to_node(&instance->pdev->dev)); + int nid = dev_to_node(&instance->pdev->dev); + + if (nid == NUMA_NO_NODE) + nid = 0; + mask = cpumask_of_node(nid); for (i = 0; i < instance->low_latency_index_start; i++) { irq = pci_irq_vector(instance->pdev, i); diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c index e979ec1478c18..e895bd25098fd 100644 --- a/drivers/scsi/qedf/qedf_main.c +++ b/drivers/scsi/qedf/qedf_main.c @@ -699,7 +699,7 @@ static u32 qedf_get_login_failures(void *cookie) } static struct qed_fcoe_cb_ops qedf_cb_ops = { - { + .common = { .link_update = qedf_link_update, .bw_update = qedf_bw_update, .schedule_recovery_handler = qedf_schedule_recovery_handler, diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c index 0cd6f3e148824..13b6cb1b93acd 100644 --- a/drivers/scsi/qla2xxx/qla_mbx.c +++ b/drivers/scsi/qla2xxx/qla_mbx.c @@ -2147,7 +2147,7 @@ qla24xx_get_port_database(scsi_qla_host_t *vha, u16 nport_handle, pdb_dma = dma_map_single(&vha->hw->pdev->dev, pdb, sizeof(*pdb), DMA_FROM_DEVICE); - if (!pdb_dma) { + if (dma_mapping_error(&vha->hw->pdev->dev, pdb_dma)) { ql_log(ql_log_warn, vha, 0x1116, "Failed to map dma buffer.\n"); return QLA_MEMORY_ALLOC_FAILED; } diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c index d91f54a6e752f..97e9ca5a2a02c 100644 --- a/drivers/scsi/qla4xxx/ql4_os.c +++ b/drivers/scsi/qla4xxx/ql4_os.c @@ -3420,6 +3420,8 @@ static int qla4xxx_alloc_pdu(struct iscsi_task *task, uint8_t opcode) task_data->data_dma = dma_map_single(&ha->pdev->dev, task->data, task->data_count, DMA_TO_DEVICE); + if (dma_mapping_error(&ha->pdev->dev, task_data->data_dma)) + return -ENOMEM; } DEBUG2(ql4_printk(KERN_INFO, ha, "%s: MaxRecvLen %u, iscsi hrd %d\n", diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c index 8274fe0ec7146..7a5bebf5b096c 100644 --- a/drivers/scsi/scsi_transport_iscsi.c +++ b/drivers/scsi/scsi_transport_iscsi.c @@ -3526,7 +3526,7 @@ static int iscsi_new_flashnode(struct iscsi_transport *transport, pr_err("%s could not find host no %u\n", __func__, ev->u.new_flashnode.host_no); err = -ENODEV; - goto put_host; + goto exit_new_fnode; } index = transport->new_flashnode(shost, data, len); @@ -3536,7 +3536,6 @@ static int iscsi_new_flashnode(struct iscsi_transport *transport, else err = -EIO; -put_host: scsi_host_put(shost); exit_new_fnode: @@ -3561,7 +3560,7 @@ static int iscsi_del_flashnode(struct iscsi_transport *transport, pr_err("%s could not find host no %u\n", __func__, ev->u.del_flashnode.host_no); err = -ENODEV; - goto put_host; + goto exit_del_fnode; } idx = ev->u.del_flashnode.flashnode_idx; @@ -3603,7 +3602,7 @@ static int iscsi_login_flashnode(struct iscsi_transport *transport, pr_err("%s could not find host no %u\n", __func__, ev->u.login_flashnode.host_no); err = -ENODEV; - goto put_host; + goto exit_login_fnode; } idx = ev->u.login_flashnode.flashnode_idx; @@ -3655,7 +3654,7 @@ static int iscsi_logout_flashnode(struct iscsi_transport *transport, pr_err("%s could not find host no %u\n", __func__, ev->u.logout_flashnode.host_no); err = -ENODEV; - goto put_host; + goto exit_logout_fnode; } idx = ev->u.logout_flashnode.flashnode_idx; @@ -3705,7 +3704,7 @@ static int iscsi_logout_flashnode_sid(struct iscsi_transport *transport, pr_err("%s could not find host no %u\n", __func__, ev->u.logout_flashnode.host_no); err = -ENODEV; - goto put_host; + goto exit_logout_sid; } session = iscsi_session_lookup(ev->u.logout_flashnode_sid.sid); diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index 8947dab132d78..86dde3e7debba 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -3388,7 +3388,7 @@ static void sd_read_block_limits_ext(struct scsi_disk *sdkp) rcu_read_lock(); vpd = rcu_dereference(sdkp->device->vpd_pgb7); - if (vpd && vpd->len >= 2) + if (vpd && vpd->len >= 6) sdkp->rscs = vpd->data[5] & 1; rcu_read_unlock(); } diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c index d919a74746a05..c5a21e369e167 100644 --- a/drivers/scsi/smartpqi/smartpqi_init.c +++ b/drivers/scsi/smartpqi/smartpqi_init.c @@ -5990,7 +5990,7 @@ static bool pqi_is_parity_write_stream(struct pqi_ctrl_info *ctrl_info, pqi_stream_data->next_lba = rmd.first_block + rmd.block_cnt; pqi_stream_data->last_accessed = jiffies; - per_cpu_ptr(device->raid_io_stats, smp_processor_id())->write_stream_cnt++; + per_cpu_ptr(device->raid_io_stats, raw_smp_processor_id())->write_stream_cnt++; return true; } @@ -6069,7 +6069,7 @@ static int pqi_scsi_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scm rc = pqi_raid_bypass_submit_scsi_cmd(ctrl_info, device, scmd, queue_group); if (rc == 0 || rc == SCSI_MLQUEUE_HOST_BUSY) { raid_bypassed = true; - per_cpu_ptr(device->raid_io_stats, smp_processor_id())->raid_bypass_cnt++; + per_cpu_ptr(device->raid_io_stats, raw_smp_processor_id())->raid_bypass_cnt++; } } if (!raid_bypassed) @@ -9708,6 +9708,10 @@ static const struct pci_device_id pqi_pci_id_table[] = { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 0x1bd4, 0x0089) }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1bd4, 0x00a3) + }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 0x1ff9, 0x00a1) @@ -10044,6 +10048,30 @@ static const struct pci_device_id pqi_pci_id_table[] = { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, PCI_VENDOR_ID_ADAPTEC2, 0x14f0) }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x207d, 0x4044) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x207d, 0x4054) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x207d, 0x4084) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x207d, 0x4094) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x207d, 0x4140) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x207d, 0x4240) + }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, PCI_VENDOR_ID_ADVANTECH, 0x8312) @@ -10260,6 +10288,14 @@ static const struct pci_device_id pqi_pci_id_table[] = { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 0x1cc4, 0x0201) }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1018, 0x8238) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1f3f, 0x0610) + }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, PCI_VENDOR_ID_LENOVO, 0x0220) @@ -10268,10 +10304,30 @@ static const struct pci_device_id pqi_pci_id_table[] = { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, PCI_VENDOR_ID_LENOVO, 0x0221) }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_LENOVO, 0x0222) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_LENOVO, 0x0223) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_LENOVO, 0x0224) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_LENOVO, 0x0225) + }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, PCI_VENDOR_ID_LENOVO, 0x0520) }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_LENOVO, 0x0521) + }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, PCI_VENDOR_ID_LENOVO, 0x0522) @@ -10292,6 +10348,26 @@ static const struct pci_device_id pqi_pci_id_table[] = { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, PCI_VENDOR_ID_LENOVO, 0x0623) }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_LENOVO, 0x0624) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_LENOVO, 0x0625) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_LENOVO, 0x0626) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_LENOVO, 0x0627) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_LENOVO, 0x0628) + }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 0x1014, 0x0718) @@ -10320,6 +10396,10 @@ static const struct pci_device_id pqi_pci_id_table[] = { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 0x1137, 0x0300) }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1ded, 0x3301) + }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 0x1ff9, 0x0045) @@ -10468,6 +10548,10 @@ static const struct pci_device_id pqi_pci_id_table[] = { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 0x1f51, 0x100a) }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1f51, 0x100b) + }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 0x1f51, 0x100e) diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c index 48b0ca92b44fb..954a1cc50ba74 100644 --- a/drivers/scsi/storvsc_drv.c +++ b/drivers/scsi/storvsc_drv.c @@ -362,7 +362,7 @@ MODULE_PARM_DESC(ring_avail_percent_lowater, /* * Timeout in seconds for all devices managed by this driver. */ -static int storvsc_timeout = 180; +static const int storvsc_timeout = 180; #if IS_ENABLED(CONFIG_SCSI_FC_ATTRS) static struct scsi_transport_template *fc_transport_template; @@ -768,7 +768,7 @@ static void handle_multichannel_storage(struct hv_device *device, int max_chns) return; } - t = wait_for_completion_timeout(&request->wait_event, 10*HZ); + t = wait_for_completion_timeout(&request->wait_event, storvsc_timeout * HZ); if (t == 0) { dev_err(dev, "Failed to create sub-channel: timed out\n"); return; @@ -833,7 +833,7 @@ static int storvsc_execute_vstor_op(struct hv_device *device, if (ret != 0) return ret; - t = wait_for_completion_timeout(&request->wait_event, 5*HZ); + t = wait_for_completion_timeout(&request->wait_event, storvsc_timeout * HZ); if (t == 0) return -ETIMEDOUT; @@ -1351,6 +1351,8 @@ static int storvsc_connect_to_vsp(struct hv_device *device, u32 ring_size, return ret; ret = storvsc_channel_init(device, is_fc); + if (ret) + vmbus_close(device->channel); return ret; } @@ -1668,7 +1670,7 @@ static int storvsc_host_reset_handler(struct scsi_cmnd *scmnd) if (ret != 0) return FAILED; - t = wait_for_completion_timeout(&request->wait_event, 5*HZ); + t = wait_for_completion_timeout(&request->wait_event, storvsc_timeout * HZ); if (t == 0) return TIMEOUT_ERROR; diff --git a/drivers/soc/aspeed/aspeed-lpc-snoop.c b/drivers/soc/aspeed/aspeed-lpc-snoop.c index 888b5840c0150..d2e63277f0aa9 100644 --- a/drivers/soc/aspeed/aspeed-lpc-snoop.c +++ b/drivers/soc/aspeed/aspeed-lpc-snoop.c @@ -166,7 +166,7 @@ static int aspeed_lpc_snoop_config_irq(struct aspeed_lpc_snoop *lpc_snoop, int rc; lpc_snoop->irq = platform_get_irq(pdev, 0); - if (!lpc_snoop->irq) + if (lpc_snoop->irq < 0) return -ENODEV; rc = devm_request_irq(dev, lpc_snoop->irq, @@ -200,11 +200,15 @@ static int aspeed_lpc_enable_snoop(struct aspeed_lpc_snoop *lpc_snoop, lpc_snoop->chan[channel].miscdev.minor = MISC_DYNAMIC_MINOR; lpc_snoop->chan[channel].miscdev.name = devm_kasprintf(dev, GFP_KERNEL, "%s%d", DEVICE_NAME, channel); + if (!lpc_snoop->chan[channel].miscdev.name) { + rc = -ENOMEM; + goto err_free_fifo; + } lpc_snoop->chan[channel].miscdev.fops = &snoop_fops; lpc_snoop->chan[channel].miscdev.parent = dev; rc = misc_register(&lpc_snoop->chan[channel].miscdev); if (rc) - return rc; + goto err_free_fifo; /* Enable LPC snoop channel at requested port */ switch (channel) { @@ -221,7 +225,8 @@ static int aspeed_lpc_enable_snoop(struct aspeed_lpc_snoop *lpc_snoop, hicrb_en = HICRB_ENSNP1D; break; default: - return -EINVAL; + rc = -EINVAL; + goto err_misc_deregister; } regmap_update_bits(lpc_snoop->regmap, HICR5, hicr5_en, hicr5_en); @@ -231,6 +236,12 @@ static int aspeed_lpc_enable_snoop(struct aspeed_lpc_snoop *lpc_snoop, regmap_update_bits(lpc_snoop->regmap, HICRB, hicrb_en, hicrb_en); + return 0; + +err_misc_deregister: + misc_deregister(&lpc_snoop->chan[channel].miscdev); +err_free_fifo: + kfifo_free(&lpc_snoop->chan[channel].fifo); return rc; } diff --git a/drivers/soc/qcom/pmic_glink_altmode.c b/drivers/soc/qcom/pmic_glink_altmode.c index 463b1c5288318..db25f406878b0 100644 --- a/drivers/soc/qcom/pmic_glink_altmode.c +++ b/drivers/soc/qcom/pmic_glink_altmode.c @@ -219,21 +219,29 @@ static void pmic_glink_altmode_worker(struct work_struct *work) { struct pmic_glink_altmode_port *alt_port = work_to_altmode_port(work); struct pmic_glink_altmode *altmode = alt_port->altmode; + enum drm_connector_status conn_status; typec_switch_set(alt_port->typec_switch, alt_port->orientation); - if (alt_port->svid == USB_TYPEC_DP_SID && alt_port->mode == 0xff) - pmic_glink_altmode_safe(altmode, alt_port); - else if (alt_port->svid == USB_TYPEC_DP_SID) - pmic_glink_altmode_enable_dp(altmode, alt_port, alt_port->mode, - alt_port->hpd_state, alt_port->hpd_irq); - else - pmic_glink_altmode_enable_usb(altmode, alt_port); + if (alt_port->svid == USB_TYPEC_DP_SID) { + if (alt_port->mode == 0xff) { + pmic_glink_altmode_safe(altmode, alt_port); + } else { + pmic_glink_altmode_enable_dp(altmode, alt_port, + alt_port->mode, + alt_port->hpd_state, + alt_port->hpd_irq); + } - drm_aux_hpd_bridge_notify(&alt_port->bridge->dev, - alt_port->hpd_state ? - connector_status_connected : - connector_status_disconnected); + if (alt_port->hpd_state) + conn_status = connector_status_connected; + else + conn_status = connector_status_disconnected; + + drm_aux_hpd_bridge_notify(&alt_port->bridge->dev, conn_status); + } else { + pmic_glink_altmode_enable_usb(altmode, alt_port); + } pmic_glink_altmode_request(altmode, ALTMODE_PAN_ACK, alt_port->index); } diff --git a/drivers/soc/qcom/smp2p.c b/drivers/soc/qcom/smp2p.c index cefcbd61c6281..95d8a8f728db5 100644 --- a/drivers/soc/qcom/smp2p.c +++ b/drivers/soc/qcom/smp2p.c @@ -578,7 +578,7 @@ static int qcom_smp2p_probe(struct platform_device *pdev) smp2p->mbox_client.knows_txdone = true; smp2p->mbox_chan = mbox_request_channel(&smp2p->mbox_client, 0); if (IS_ERR(smp2p->mbox_chan)) { - if (PTR_ERR(smp2p->mbox_chan) != -ENODEV) + if (PTR_ERR(smp2p->mbox_chan) != -ENOENT) return PTR_ERR(smp2p->mbox_chan); smp2p->mbox_chan = NULL; diff --git a/drivers/spi/spi-bcm63xx-hsspi.c b/drivers/spi/spi-bcm63xx-hsspi.c index 1ca857c2a4aa3..8df12efeea21c 100644 --- a/drivers/spi/spi-bcm63xx-hsspi.c +++ b/drivers/spi/spi-bcm63xx-hsspi.c @@ -745,7 +745,7 @@ static int bcm63xx_hsspi_probe(struct platform_device *pdev) if (IS_ERR(clk)) return PTR_ERR(clk); - reset = devm_reset_control_get_optional_exclusive(dev, NULL); + reset = devm_reset_control_get_optional_shared(dev, NULL); if (IS_ERR(reset)) return PTR_ERR(reset); diff --git a/drivers/spi/spi-bcm63xx.c b/drivers/spi/spi-bcm63xx.c index ef3a7226db125..a95badb7b7114 100644 --- a/drivers/spi/spi-bcm63xx.c +++ b/drivers/spi/spi-bcm63xx.c @@ -523,7 +523,7 @@ static int bcm63xx_spi_probe(struct platform_device *pdev) return PTR_ERR(clk); } - reset = devm_reset_control_get_optional_exclusive(dev, NULL); + reset = devm_reset_control_get_optional_shared(dev, NULL); if (IS_ERR(reset)) return PTR_ERR(reset); diff --git a/drivers/spi/spi-cadence-quadspi.c b/drivers/spi/spi-cadence-quadspi.c index f9463f263fba1..12f8073cb5968 100644 --- a/drivers/spi/spi-cadence-quadspi.c +++ b/drivers/spi/spi-cadence-quadspi.c @@ -1929,10 +1929,10 @@ static int cqspi_probe(struct platform_device *pdev) goto probe_setup_failed; } - ret = devm_pm_runtime_enable(dev); - if (ret) { - if (cqspi->rx_chan) - dma_release_channel(cqspi->rx_chan); + pm_runtime_enable(dev); + + if (cqspi->rx_chan) { + dma_release_channel(cqspi->rx_chan); goto probe_setup_failed; } @@ -1952,6 +1952,7 @@ static int cqspi_probe(struct platform_device *pdev) return 0; probe_setup_failed: cqspi_controller_enable(cqspi, 0); + pm_runtime_disable(dev); probe_reset_failed: if (cqspi->is_jh7110) cqspi_jh7110_disable_clk(pdev, cqspi); @@ -1970,7 +1971,8 @@ static void cqspi_remove(struct platform_device *pdev) if (cqspi->rx_chan) dma_release_channel(cqspi->rx_chan); - clk_disable_unprepare(cqspi->clk); + if (pm_runtime_get_sync(&pdev->dev) >= 0) + clk_disable(cqspi->clk); if (cqspi->is_jh7110) cqspi_jh7110_disable_clk(pdev, cqspi); diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c index 7c43df252328d..e26363ae74890 100644 --- a/drivers/spi/spi-fsl-dspi.c +++ b/drivers/spi/spi-fsl-dspi.c @@ -983,11 +983,20 @@ static int dspi_transfer_one_message(struct spi_controller *ctlr, if (dspi->devtype_data->trans_mode == DSPI_DMA_MODE) { status = dspi_dma_xfer(dspi); } else { + /* + * Reinitialize the completion before transferring data + * to avoid the case where it might remain in the done + * state due to a spurious interrupt from a previous + * transfer. This could falsely signal that the current + * transfer has completed. + */ + if (dspi->irq) + reinit_completion(&dspi->xfer_done); + dspi_fifo_write(dspi); if (dspi->irq) { wait_for_completion(&dspi->xfer_done); - reinit_completion(&dspi->xfer_done); } else { do { status = dspi_poll(dspi); diff --git a/drivers/spi/spi-fsl-qspi.c b/drivers/spi/spi-fsl-qspi.c index 79bac30e79af6..21e357966d2a2 100644 --- a/drivers/spi/spi-fsl-qspi.c +++ b/drivers/spi/spi-fsl-qspi.c @@ -839,6 +839,19 @@ static const struct spi_controller_mem_ops fsl_qspi_mem_ops = { .get_name = fsl_qspi_get_name, }; +static void fsl_qspi_cleanup(void *data) +{ + struct fsl_qspi *q = data; + + /* disable the hardware */ + qspi_writel(q, QUADSPI_MCR_MDIS_MASK, q->iobase + QUADSPI_MCR); + qspi_writel(q, 0x0, q->iobase + QUADSPI_RSER); + + fsl_qspi_clk_disable_unprep(q); + + mutex_destroy(&q->lock); +} + static int fsl_qspi_probe(struct platform_device *pdev) { struct spi_controller *ctlr; @@ -928,15 +941,16 @@ static int fsl_qspi_probe(struct platform_device *pdev) ctlr->dev.of_node = np; + ret = devm_add_action_or_reset(dev, fsl_qspi_cleanup, q); + if (ret) + goto err_put_ctrl; + ret = devm_spi_register_controller(dev, ctlr); if (ret) - goto err_destroy_mutex; + goto err_put_ctrl; return 0; -err_destroy_mutex: - mutex_destroy(&q->lock); - err_disable_clk: fsl_qspi_clk_disable_unprep(q); @@ -947,19 +961,6 @@ static int fsl_qspi_probe(struct platform_device *pdev) return ret; } -static void fsl_qspi_remove(struct platform_device *pdev) -{ - struct fsl_qspi *q = platform_get_drvdata(pdev); - - /* disable the hardware */ - qspi_writel(q, QUADSPI_MCR_MDIS_MASK, q->iobase + QUADSPI_MCR); - qspi_writel(q, 0x0, q->iobase + QUADSPI_RSER); - - fsl_qspi_clk_disable_unprep(q); - - mutex_destroy(&q->lock); -} - static int fsl_qspi_suspend(struct device *dev) { return 0; @@ -997,7 +998,6 @@ static struct platform_driver fsl_qspi_driver = { .pm = &fsl_qspi_pm_ops, }, .probe = fsl_qspi_probe, - .remove_new = fsl_qspi_remove, }; module_platform_driver(fsl_qspi_driver); diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c index 532b2e9c31d0d..4c5f12b76de6a 100644 --- a/drivers/spi/spi-omap2-mcspi.c +++ b/drivers/spi/spi-omap2-mcspi.c @@ -134,6 +134,7 @@ struct omap2_mcspi { size_t max_xfer_len; u32 ref_clk_hz; bool use_multi_mode; + bool last_msg_kept_cs; }; struct omap2_mcspi_cs { @@ -1269,6 +1270,10 @@ static int omap2_mcspi_prepare_message(struct spi_controller *ctlr, * multi-mode is applicable. */ mcspi->use_multi_mode = true; + + if (mcspi->last_msg_kept_cs) + mcspi->use_multi_mode = false; + list_for_each_entry(tr, &msg->transfers, transfer_list) { if (!tr->bits_per_word) bits_per_word = msg->spi->bits_per_word; @@ -1287,18 +1292,19 @@ static int omap2_mcspi_prepare_message(struct spi_controller *ctlr, mcspi->use_multi_mode = false; } - /* Check if transfer asks to change the CS status after the transfer */ - if (!tr->cs_change) - mcspi->use_multi_mode = false; - - /* - * If at least one message is not compatible, switch back to single mode - * - * The bits_per_word of certain transfer can be different, but it will have no - * impact on the signal itself. - */ - if (!mcspi->use_multi_mode) - break; + if (list_is_last(&tr->transfer_list, &msg->transfers)) { + /* Check if transfer asks to keep the CS status after the whole message */ + if (tr->cs_change) { + mcspi->use_multi_mode = false; + mcspi->last_msg_kept_cs = true; + } else { + mcspi->last_msg_kept_cs = false; + } + } else { + /* Check if transfer asks to change the CS status after the transfer */ + if (!tr->cs_change) + mcspi->use_multi_mode = false; + } } omap2_mcspi_set_mode(ctlr); diff --git a/drivers/spi/spi-sh-msiof.c b/drivers/spi/spi-sh-msiof.c index 3519656515ea1..1870f8c852131 100644 --- a/drivers/spi/spi-sh-msiof.c +++ b/drivers/spi/spi-sh-msiof.c @@ -918,6 +918,7 @@ static int sh_msiof_transfer_one(struct spi_controller *ctlr, void *rx_buf = t->rx_buf; unsigned int len = t->len; unsigned int bits = t->bits_per_word; + unsigned int max_wdlen = 256; unsigned int bytes_per_word; unsigned int words; int n; @@ -931,17 +932,17 @@ static int sh_msiof_transfer_one(struct spi_controller *ctlr, if (!spi_controller_is_target(p->ctlr)) sh_msiof_spi_set_clk_regs(p, t); + if (tx_buf) + max_wdlen = min(max_wdlen, p->tx_fifo_size); + if (rx_buf) + max_wdlen = min(max_wdlen, p->rx_fifo_size); + while (ctlr->dma_tx && len > 15) { /* * DMA supports 32-bit words only, hence pack 8-bit and 16-bit * words, with byte resp. word swapping. */ - unsigned int l = 0; - - if (tx_buf) - l = min(round_down(len, 4), p->tx_fifo_size * 4); - if (rx_buf) - l = min(round_down(len, 4), p->rx_fifo_size * 4); + unsigned int l = min(round_down(len, 4), max_wdlen * 4); if (bits <= 8) { copy32 = copy_bswap32; diff --git a/drivers/spi/spi-sun4i.c b/drivers/spi/spi-sun4i.c index 2ee6755b43f54..3019f57e65841 100644 --- a/drivers/spi/spi-sun4i.c +++ b/drivers/spi/spi-sun4i.c @@ -264,6 +264,9 @@ static int sun4i_spi_transfer_one(struct spi_controller *host, else reg |= SUN4I_CTL_DHB; + /* Now that the settings are correct, enable the interface */ + reg |= SUN4I_CTL_ENABLE; + sun4i_spi_write(sspi, SUN4I_CTL_REG, reg); /* Ensure that we have a parent clock fast enough */ @@ -404,7 +407,7 @@ static int sun4i_spi_runtime_resume(struct device *dev) } sun4i_spi_write(sspi, SUN4I_CTL_REG, - SUN4I_CTL_ENABLE | SUN4I_CTL_MASTER | SUN4I_CTL_TP); + SUN4I_CTL_MASTER | SUN4I_CTL_TP); return 0; diff --git a/drivers/spi/spi-tegra210-quad.c b/drivers/spi/spi-tegra210-quad.c index 2d48ad844fb80..92348ebc60c78 100644 --- a/drivers/spi/spi-tegra210-quad.c +++ b/drivers/spi/spi-tegra210-quad.c @@ -134,7 +134,7 @@ #define QSPI_COMMAND_VALUE_SET(X) (((x) & 0xFF) << 0) #define QSPI_CMB_SEQ_CMD_CFG 0x1a0 -#define QSPI_COMMAND_X1_X2_X4(x) (((x) & 0x3) << 13) +#define QSPI_COMMAND_X1_X2_X4(x) ((((x) >> 1) & 0x3) << 13) #define QSPI_COMMAND_X1_X2_X4_MASK (0x03 << 13) #define QSPI_COMMAND_SDR_DDR BIT(12) #define QSPI_COMMAND_SIZE_SET(x) (((x) & 0xFF) << 0) @@ -147,7 +147,7 @@ #define QSPI_ADDRESS_VALUE_SET(X) (((x) & 0xFFFF) << 0) #define QSPI_CMB_SEQ_ADDR_CFG 0x1ac -#define QSPI_ADDRESS_X1_X2_X4(x) (((x) & 0x3) << 13) +#define QSPI_ADDRESS_X1_X2_X4(x) ((((x) >> 1) & 0x3) << 13) #define QSPI_ADDRESS_X1_X2_X4_MASK (0x03 << 13) #define QSPI_ADDRESS_SDR_DDR BIT(12) #define QSPI_ADDRESS_SIZE_SET(x) (((x) & 0xFF) << 0) @@ -1036,10 +1036,6 @@ static u32 tegra_qspi_addr_config(bool is_ddr, u8 bus_width, u8 len) { u32 addr_config = 0; - /* Extract Address configuration and value */ - is_ddr = 0; //Only SDR mode supported - bus_width = 0; //X1 mode - if (is_ddr) addr_config |= QSPI_ADDRESS_SDR_DDR; else @@ -1079,13 +1075,13 @@ static int tegra_qspi_combined_seq_xfer(struct tegra_qspi *tqspi, switch (transfer_phase) { case CMD_TRANSFER: /* X1 SDR mode */ - cmd_config = tegra_qspi_cmd_config(false, 0, + cmd_config = tegra_qspi_cmd_config(false, xfer->tx_nbits, xfer->len); cmd_value = *((const u8 *)(xfer->tx_buf)); break; case ADDR_TRANSFER: /* X1 SDR mode */ - addr_config = tegra_qspi_addr_config(false, 0, + addr_config = tegra_qspi_addr_config(false, xfer->tx_nbits, xfer->len); address_value = *((const u32 *)(xfer->tx_buf)); break; @@ -1163,26 +1159,22 @@ static int tegra_qspi_combined_seq_xfer(struct tegra_qspi *tqspi, ret = -EIO; goto exit; } - if (!xfer->cs_change) { - tegra_qspi_transfer_end(spi); - spi_transfer_delay_exec(xfer); - } break; default: ret = -EINVAL; goto exit; } msg->actual_length += xfer->len; + if (!xfer->cs_change && transfer_phase == DATA_TRANSFER) { + tegra_qspi_transfer_end(spi); + spi_transfer_delay_exec(xfer); + } transfer_phase++; } ret = 0; exit: msg->status = ret; - if (ret < 0) { - tegra_qspi_transfer_end(spi); - spi_transfer_delay_exec(xfer); - } return ret; } diff --git a/drivers/staging/iio/impedance-analyzer/ad5933.c b/drivers/staging/iio/impedance-analyzer/ad5933.c index 4ae1a7039418b..1f806ee966c37 100644 --- a/drivers/staging/iio/impedance-analyzer/ad5933.c +++ b/drivers/staging/iio/impedance-analyzer/ad5933.c @@ -411,7 +411,7 @@ static ssize_t ad5933_store(struct device *dev, ret = ad5933_cmd(st, 0); break; case AD5933_OUT_SETTLING_CYCLES: - val = clamp(val, (u16)0, (u16)0x7FF); + val = clamp(val, (u16)0, (u16)0x7FC); st->settling_cycles = val; /* 2x, 4x handling, see datasheet */ diff --git a/drivers/staging/media/rkvdec/rkvdec.c b/drivers/staging/media/rkvdec/rkvdec.c index ac398b5a97360..a1d941b0be00b 100644 --- a/drivers/staging/media/rkvdec/rkvdec.c +++ b/drivers/staging/media/rkvdec/rkvdec.c @@ -213,8 +213,14 @@ static int rkvdec_enum_framesizes(struct file *file, void *priv, if (!fmt) return -EINVAL; - fsize->type = V4L2_FRMSIZE_TYPE_STEPWISE; - fsize->stepwise = fmt->frmsize; + fsize->type = V4L2_FRMSIZE_TYPE_CONTINUOUS; + fsize->stepwise.min_width = 1; + fsize->stepwise.max_width = fmt->frmsize.max_width; + fsize->stepwise.step_width = 1; + fsize->stepwise.min_height = 1; + fsize->stepwise.max_height = fmt->frmsize.max_height; + fsize->stepwise.step_height = 1; + return 0; } diff --git a/drivers/staging/rtl8723bs/core/rtw_security.c b/drivers/staging/rtl8723bs/core/rtw_security.c index 1e9eff01b1aa5..e9f382c280d9b 100644 --- a/drivers/staging/rtl8723bs/core/rtw_security.c +++ b/drivers/staging/rtl8723bs/core/rtw_security.c @@ -868,29 +868,21 @@ static signed int aes_cipher(u8 *key, uint hdrlen, num_blocks, payload_index; u8 pn_vector[6]; - u8 mic_iv[16]; - u8 mic_header1[16]; - u8 mic_header2[16]; - u8 ctr_preload[16]; + u8 mic_iv[16] = {}; + u8 mic_header1[16] = {}; + u8 mic_header2[16] = {}; + u8 ctr_preload[16] = {}; /* Intermediate Buffers */ - u8 chain_buffer[16]; - u8 aes_out[16]; - u8 padded_buffer[16]; + u8 chain_buffer[16] = {}; + u8 aes_out[16] = {}; + u8 padded_buffer[16] = {}; u8 mic[8]; uint frtype = GetFrameType(pframe); uint frsubtype = GetFrameSubType(pframe); frsubtype = frsubtype>>4; - memset((void *)mic_iv, 0, 16); - memset((void *)mic_header1, 0, 16); - memset((void *)mic_header2, 0, 16); - memset((void *)ctr_preload, 0, 16); - memset((void *)chain_buffer, 0, 16); - memset((void *)aes_out, 0, 16); - memset((void *)padded_buffer, 0, 16); - if ((hdrlen == WLAN_HDR_A3_LEN) || (hdrlen == WLAN_HDR_A3_QOS_LEN)) a4_exists = 0; else @@ -1080,15 +1072,15 @@ static signed int aes_decipher(u8 *key, uint hdrlen, num_blocks, payload_index; signed int res = _SUCCESS; u8 pn_vector[6]; - u8 mic_iv[16]; - u8 mic_header1[16]; - u8 mic_header2[16]; - u8 ctr_preload[16]; + u8 mic_iv[16] = {}; + u8 mic_header1[16] = {}; + u8 mic_header2[16] = {}; + u8 ctr_preload[16] = {}; /* Intermediate Buffers */ - u8 chain_buffer[16]; - u8 aes_out[16]; - u8 padded_buffer[16]; + u8 chain_buffer[16] = {}; + u8 aes_out[16] = {}; + u8 padded_buffer[16] = {}; u8 mic[8]; uint frtype = GetFrameType(pframe); @@ -1096,14 +1088,6 @@ static signed int aes_decipher(u8 *key, uint hdrlen, frsubtype = frsubtype>>4; - memset((void *)mic_iv, 0, 16); - memset((void *)mic_header1, 0, 16); - memset((void *)mic_header2, 0, 16); - memset((void *)ctr_preload, 0, 16); - memset((void *)chain_buffer, 0, 16); - memset((void *)aes_out, 0, 16); - memset((void *)padded_buffer, 0, 16); - /* start to decrypt the payload */ num_blocks = (plen-8) / 16; /* plen including LLC, payload_length and mic) */ diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c index 4f4ad6af416c8..47fe50b80c229 100644 --- a/drivers/target/target_core_pr.c +++ b/drivers/target/target_core_pr.c @@ -1842,7 +1842,9 @@ core_scsi3_decode_spec_i_port( } kmem_cache_free(t10_pr_reg_cache, dest_pr_reg); - core_scsi3_lunacl_undepend_item(dest_se_deve); + + if (dest_se_deve) + core_scsi3_lunacl_undepend_item(dest_se_deve); if (is_local) continue; diff --git a/drivers/tee/optee/ffa_abi.c b/drivers/tee/optee/ffa_abi.c index f3af5666bb118..f9ef7d94cebd7 100644 --- a/drivers/tee/optee/ffa_abi.c +++ b/drivers/tee/optee/ffa_abi.c @@ -728,12 +728,21 @@ static bool optee_ffa_exchange_caps(struct ffa_device *ffa_dev, return true; } +static void notif_work_fn(struct work_struct *work) +{ + struct optee_ffa *optee_ffa = container_of(work, struct optee_ffa, + notif_work); + struct optee *optee = container_of(optee_ffa, struct optee, ffa); + + optee_do_bottom_half(optee->ctx); +} + static void notif_callback(int notify_id, void *cb_data) { struct optee *optee = cb_data; if (notify_id == optee->ffa.bottom_half_value) - optee_do_bottom_half(optee->ctx); + queue_work(optee->ffa.notif_wq, &optee->ffa.notif_work); else optee_notif_send(optee, notify_id); } @@ -817,9 +826,11 @@ static void optee_ffa_remove(struct ffa_device *ffa_dev) struct optee *optee = ffa_dev_get_drvdata(ffa_dev); u32 bottom_half_id = optee->ffa.bottom_half_value; - if (bottom_half_id != U32_MAX) + if (bottom_half_id != U32_MAX) { ffa_dev->ops->notifier_ops->notify_relinquish(ffa_dev, bottom_half_id); + destroy_workqueue(optee->ffa.notif_wq); + } optee_remove_common(optee); mutex_destroy(&optee->ffa.mutex); @@ -835,6 +846,13 @@ static int optee_ffa_async_notif_init(struct ffa_device *ffa_dev, u32 notif_id = 0; int rc; + INIT_WORK(&optee->ffa.notif_work, notif_work_fn); + optee->ffa.notif_wq = create_workqueue("optee_notification"); + if (!optee->ffa.notif_wq) { + rc = -EINVAL; + goto err; + } + while (true) { rc = ffa_dev->ops->notifier_ops->notify_request(ffa_dev, is_per_vcpu, @@ -851,19 +869,24 @@ static int optee_ffa_async_notif_init(struct ffa_device *ffa_dev, * notifications in that case. */ if (rc != -EACCES) - return rc; + goto err_wq; notif_id++; if (notif_id >= OPTEE_FFA_MAX_ASYNC_NOTIF_VALUE) - return rc; + goto err_wq; } optee->ffa.bottom_half_value = notif_id; rc = enable_async_notif(optee); - if (rc < 0) { - ffa_dev->ops->notifier_ops->notify_relinquish(ffa_dev, - notif_id); - optee->ffa.bottom_half_value = U32_MAX; - } + if (rc < 0) + goto err_rel; + + return 0; +err_rel: + ffa_dev->ops->notifier_ops->notify_relinquish(ffa_dev, notif_id); +err_wq: + destroy_workqueue(optee->ffa.notif_wq); +err: + optee->ffa.bottom_half_value = U32_MAX; return rc; } diff --git a/drivers/tee/optee/optee_private.h b/drivers/tee/optee/optee_private.h index dc0f355ef72aa..9526087f0e680 100644 --- a/drivers/tee/optee/optee_private.h +++ b/drivers/tee/optee/optee_private.h @@ -165,6 +165,8 @@ struct optee_ffa { /* Serializes access to @global_ids */ struct mutex mutex; struct rhashtable global_ids; + struct workqueue_struct *notif_wq; + struct work_struct notif_work; }; struct optee; diff --git a/drivers/tee/tee_core.c b/drivers/tee/tee_core.c index d113679b1e2d7..acc7998758ad8 100644 --- a/drivers/tee/tee_core.c +++ b/drivers/tee/tee_core.c @@ -10,6 +10,7 @@ #include #include #include +#include #include #include #include @@ -19,7 +20,7 @@ #define TEE_NUM_DEVICES 32 -#define TEE_IOCTL_PARAM_SIZE(x) (sizeof(struct tee_param) * (x)) +#define TEE_IOCTL_PARAM_SIZE(x) (size_mul(sizeof(struct tee_param), (x))) #define TEE_UUID_NS_NAME_SIZE 128 @@ -487,7 +488,7 @@ static int tee_ioctl_open_session(struct tee_context *ctx, if (copy_from_user(&arg, uarg, sizeof(arg))) return -EFAULT; - if (sizeof(arg) + TEE_IOCTL_PARAM_SIZE(arg.num_params) != buf.buf_len) + if (size_add(sizeof(arg), TEE_IOCTL_PARAM_SIZE(arg.num_params)) != buf.buf_len) return -EINVAL; if (arg.num_params) { @@ -565,7 +566,7 @@ static int tee_ioctl_invoke(struct tee_context *ctx, if (copy_from_user(&arg, uarg, sizeof(arg))) return -EFAULT; - if (sizeof(arg) + TEE_IOCTL_PARAM_SIZE(arg.num_params) != buf.buf_len) + if (size_add(sizeof(arg), TEE_IOCTL_PARAM_SIZE(arg.num_params)) != buf.buf_len) return -EINVAL; if (arg.num_params) { @@ -699,7 +700,7 @@ static int tee_ioctl_supp_recv(struct tee_context *ctx, if (get_user(num_params, &uarg->num_params)) return -EFAULT; - if (sizeof(*uarg) + TEE_IOCTL_PARAM_SIZE(num_params) != buf.buf_len) + if (size_add(sizeof(*uarg), TEE_IOCTL_PARAM_SIZE(num_params)) != buf.buf_len) return -EINVAL; params = kcalloc(num_params, sizeof(struct tee_param), GFP_KERNEL); @@ -798,7 +799,7 @@ static int tee_ioctl_supp_send(struct tee_context *ctx, get_user(num_params, &uarg->num_params)) return -EFAULT; - if (sizeof(*uarg) + TEE_IOCTL_PARAM_SIZE(num_params) > buf.buf_len) + if (size_add(sizeof(*uarg), TEE_IOCTL_PARAM_SIZE(num_params)) > buf.buf_len) return -EINVAL; params = kcalloc(num_params, sizeof(struct tee_param), GFP_KERNEL); diff --git a/drivers/thermal/mediatek/lvts_thermal.c b/drivers/thermal/mediatek/lvts_thermal.c index 3295b27ab70d2..ae063d1bc95f8 100644 --- a/drivers/thermal/mediatek/lvts_thermal.c +++ b/drivers/thermal/mediatek/lvts_thermal.c @@ -209,6 +209,13 @@ static const struct debugfs_reg32 lvts_regs[] = { LVTS_DEBUG_FS_REGS(LVTS_CLKEN), }; +static void lvts_debugfs_exit(void *data) +{ + struct lvts_domain *lvts_td = data; + + debugfs_remove_recursive(lvts_td->dom_dentry); +} + static int lvts_debugfs_init(struct device *dev, struct lvts_domain *lvts_td) { struct debugfs_regset32 *regset; @@ -241,12 +248,7 @@ static int lvts_debugfs_init(struct device *dev, struct lvts_domain *lvts_td) debugfs_create_regset32("registers", 0400, dentry, regset); } - return 0; -} - -static void lvts_debugfs_exit(struct lvts_domain *lvts_td) -{ - debugfs_remove_recursive(lvts_td->dom_dentry); + return devm_add_action_or_reset(dev, lvts_debugfs_exit, lvts_td); } #else @@ -257,8 +259,6 @@ static inline int lvts_debugfs_init(struct device *dev, return 0; } -static void lvts_debugfs_exit(struct lvts_domain *lvts_td) { } - #endif static int lvts_raw_to_temp(u32 raw_temp, int temp_factor) @@ -1352,8 +1352,6 @@ static void lvts_remove(struct platform_device *pdev) for (i = 0; i < lvts_td->num_lvts_ctrl; i++) lvts_ctrl_set_enable(&lvts_td->lvts_ctrl[i], false); - - lvts_debugfs_exit(lvts_td); } static const struct lvts_ctrl_data mt7988_lvts_ap_data_ctrl[] = { diff --git a/drivers/thunderbolt/ctl.c b/drivers/thunderbolt/ctl.c index 4bdb2d45e0bff..58ab3d86bc25e 100644 --- a/drivers/thunderbolt/ctl.c +++ b/drivers/thunderbolt/ctl.c @@ -148,6 +148,11 @@ static void tb_cfg_request_dequeue(struct tb_cfg_request *req) struct tb_ctl *ctl = req->ctl; mutex_lock(&ctl->request_queue_lock); + if (!test_bit(TB_CFG_REQUEST_ACTIVE, &req->flags)) { + mutex_unlock(&ctl->request_queue_lock); + return; + } + list_del(&req->list); clear_bit(TB_CFG_REQUEST_ACTIVE, &req->flags); if (test_bit(TB_CFG_REQUEST_CANCELED, &req->flags)) diff --git a/drivers/thunderbolt/usb4.c b/drivers/thunderbolt/usb4.c index 402fdf8b1cdec..57821b6f4e468 100644 --- a/drivers/thunderbolt/usb4.c +++ b/drivers/thunderbolt/usb4.c @@ -440,10 +440,10 @@ int usb4_switch_set_wake(struct tb_switch *sw, unsigned int flags) bool configured = val & PORT_CS_19_PC; usb4 = port->usb4; - if (((flags & TB_WAKE_ON_CONNECT) | + if (((flags & TB_WAKE_ON_CONNECT) && device_may_wakeup(&usb4->dev)) && !configured) val |= PORT_CS_19_WOC; - if (((flags & TB_WAKE_ON_DISCONNECT) | + if (((flags & TB_WAKE_ON_DISCONNECT) && device_may_wakeup(&usb4->dev)) && configured) val |= PORT_CS_19_WOD; if ((flags & TB_WAKE_ON_USB4) && configured) diff --git a/drivers/tty/serial/8250/8250_omap.c b/drivers/tty/serial/8250/8250_omap.c index 0dd68bdbfbcf7..4f57991944dc4 100644 --- a/drivers/tty/serial/8250/8250_omap.c +++ b/drivers/tty/serial/8250/8250_omap.c @@ -1168,16 +1168,6 @@ static int omap_8250_tx_dma(struct uart_8250_port *p) return 0; } - sg_init_table(&sg, 1); - ret = kfifo_dma_out_prepare_mapped(&tport->xmit_fifo, &sg, 1, - UART_XMIT_SIZE, dma->tx_addr); - if (ret != 1) { - serial8250_clear_THRI(p); - return 0; - } - - dma->tx_size = sg_dma_len(&sg); - if (priv->habit & OMAP_DMA_TX_KICK) { unsigned char c; u8 tx_lvl; @@ -1202,18 +1192,22 @@ static int omap_8250_tx_dma(struct uart_8250_port *p) ret = -EBUSY; goto err; } - if (dma->tx_size < 4) { + if (kfifo_len(&tport->xmit_fifo) < 4) { ret = -EINVAL; goto err; } - if (!kfifo_get(&tport->xmit_fifo, &c)) { + if (!uart_fifo_out(&p->port, &c, 1)) { ret = -EINVAL; goto err; } skip_byte = c; - /* now we need to recompute due to kfifo_get */ - kfifo_dma_out_prepare_mapped(&tport->xmit_fifo, &sg, 1, - UART_XMIT_SIZE, dma->tx_addr); + } + + sg_init_table(&sg, 1); + ret = kfifo_dma_out_prepare_mapped(&tport->xmit_fifo, &sg, 1, UART_XMIT_SIZE, dma->tx_addr); + if (ret != 1) { + ret = -EINVAL; + goto err; } desc = dmaengine_prep_slave_sg(dma->txchan, &sg, 1, DMA_MEM_TO_DEV, @@ -1223,6 +1217,7 @@ static int omap_8250_tx_dma(struct uart_8250_port *p) goto err; } + dma->tx_size = sg_dma_len(&sg); dma->tx_running = 1; desc->callback = omap_8250_dma_tx_complete; diff --git a/drivers/tty/serial/8250/8250_pci1xxxx.c b/drivers/tty/serial/8250/8250_pci1xxxx.c index f462b3d1c104c..d6b01e015a96b 100644 --- a/drivers/tty/serial/8250/8250_pci1xxxx.c +++ b/drivers/tty/serial/8250/8250_pci1xxxx.c @@ -115,6 +115,7 @@ #define UART_RESET_REG 0x94 #define UART_RESET_D3_RESET_DISABLE BIT(16) +#define UART_RESET_HOT_RESET_DISABLE BIT(17) #define UART_BURST_STATUS_REG 0x9C #define UART_TX_BURST_FIFO 0xA0 @@ -620,6 +621,10 @@ static int pci1xxxx_suspend(struct device *dev) } data = readl(p + UART_RESET_REG); + + if (priv->dev_rev >= 0xC0) + data |= UART_RESET_HOT_RESET_DISABLE; + writel(data | UART_RESET_D3_RESET_DISABLE, p + UART_RESET_REG); if (wakeup) @@ -647,7 +652,12 @@ static int pci1xxxx_resume(struct device *dev) } data = readl(p + UART_RESET_REG); + + if (priv->dev_rev >= 0xC0) + data &= ~UART_RESET_HOT_RESET_DISABLE; + writel(data & ~UART_RESET_D3_RESET_DISABLE, p + UART_RESET_REG); + iounmap(p); for (i = 0; i < priv->nr; i++) { diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c index 8e3b15534bc72..deb9635cb48dc 100644 --- a/drivers/tty/serial/imx.c +++ b/drivers/tty/serial/imx.c @@ -233,6 +233,7 @@ struct imx_port { enum imx_tx_state tx_state; struct hrtimer trigger_start_tx; struct hrtimer trigger_stop_tx; + unsigned int rxtl; }; struct imx_port_ucrs { @@ -1328,6 +1329,7 @@ static void imx_uart_clear_rx_errors(struct imx_port *sport) #define TXTL_DEFAULT 8 #define RXTL_DEFAULT 8 /* 8 characters or aging timer */ +#define RXTL_CONSOLE_DEFAULT 1 #define TXTL_DMA 8 /* DMA burst setting */ #define RXTL_DMA 9 /* DMA burst setting */ @@ -1445,7 +1447,7 @@ static void imx_uart_disable_dma(struct imx_port *sport) ucr1 &= ~(UCR1_RXDMAEN | UCR1_TXDMAEN | UCR1_ATDMAEN); imx_uart_writel(sport, ucr1, UCR1); - imx_uart_setup_ufcr(sport, TXTL_DEFAULT, RXTL_DEFAULT); + imx_uart_setup_ufcr(sport, TXTL_DEFAULT, sport->rxtl); sport->dma_is_enabled = 0; } @@ -1470,7 +1472,12 @@ static int imx_uart_startup(struct uart_port *port) return retval; } - imx_uart_setup_ufcr(sport, TXTL_DEFAULT, RXTL_DEFAULT); + if (uart_console(&sport->port)) + sport->rxtl = RXTL_CONSOLE_DEFAULT; + else + sport->rxtl = RXTL_DEFAULT; + + imx_uart_setup_ufcr(sport, TXTL_DEFAULT, sport->rxtl); /* disable the DREN bit (Data Ready interrupt enable) before * requesting IRQs @@ -1936,7 +1943,7 @@ static int imx_uart_poll_init(struct uart_port *port) if (retval) clk_disable_unprepare(sport->clk_ipg); - imx_uart_setup_ufcr(sport, TXTL_DEFAULT, RXTL_DEFAULT); + imx_uart_setup_ufcr(sport, TXTL_DEFAULT, sport->rxtl); uart_port_lock_irqsave(&sport->port, &flags); @@ -2028,7 +2035,7 @@ static int imx_uart_rs485_config(struct uart_port *port, struct ktermios *termio /* If the receiver trigger is 0, set it to a default value */ ufcr = imx_uart_readl(sport, UFCR); if ((ufcr & UFCR_RXTL_MASK) == 0) - imx_uart_setup_ufcr(sport, TXTL_DEFAULT, RXTL_DEFAULT); + imx_uart_setup_ufcr(sport, TXTL_DEFAULT, sport->rxtl); imx_uart_start_rx(port); } @@ -2213,7 +2220,7 @@ imx_uart_console_setup(struct console *co, char *options) else imx_uart_console_get_options(sport, &baud, &parity, &bits); - imx_uart_setup_ufcr(sport, TXTL_DEFAULT, RXTL_DEFAULT); + imx_uart_setup_ufcr(sport, TXTL_DEFAULT, sport->rxtl); retval = uart_set_options(&sport->port, co, baud, parity, bits, flow); diff --git a/drivers/tty/serial/jsm/jsm_tty.c b/drivers/tty/serial/jsm/jsm_tty.c index ce0fef7e2c665..be2f130696b3a 100644 --- a/drivers/tty/serial/jsm/jsm_tty.c +++ b/drivers/tty/serial/jsm/jsm_tty.c @@ -451,6 +451,7 @@ int jsm_uart_port_init(struct jsm_board *brd) if (!brd->channels[i]) continue; + brd->channels[i]->uart_port.dev = &brd->pci_dev->dev; brd->channels[i]->uart_port.irq = brd->irq; brd->channels[i]->uart_port.uartclk = 14745600; brd->channels[i]->uart_port.type = PORT_JSM; diff --git a/drivers/tty/serial/milbeaut_usio.c b/drivers/tty/serial/milbeaut_usio.c index fb082ee73d5b2..9b54f017f2e8a 100644 --- a/drivers/tty/serial/milbeaut_usio.c +++ b/drivers/tty/serial/milbeaut_usio.c @@ -523,7 +523,10 @@ static int mlb_usio_probe(struct platform_device *pdev) } port->membase = devm_ioremap(&pdev->dev, res->start, resource_size(res)); - + if (!port->membase) { + ret = -ENOMEM; + goto failed; + } ret = platform_get_irq_byname(pdev, "rx"); mlb_usio_irq[index][RX] = ret; diff --git a/drivers/tty/serial/serial_base_bus.c b/drivers/tty/serial/serial_base_bus.c index 5d1677f1b651c..cb3b127b06b61 100644 --- a/drivers/tty/serial/serial_base_bus.c +++ b/drivers/tty/serial/serial_base_bus.c @@ -72,6 +72,7 @@ static int serial_base_device_init(struct uart_port *port, dev->parent = parent_dev; dev->bus = &serial_base_bus_type; dev->release = release; + device_set_of_node_from_dev(dev, parent_dev); if (!serial_base_initialized) { dev_dbg(port->dev, "uart_add_one_port() called before arch_initcall()?\n"); diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c index 76cf177b040eb..53236e3e4fa47 100644 --- a/drivers/tty/serial/sh-sci.c +++ b/drivers/tty/serial/sh-sci.c @@ -183,6 +183,7 @@ static struct sci_port sci_ports[SCI_NPORTS]; static unsigned long sci_ports_in_use; static struct uart_driver sci_uart_driver; static bool sci_uart_earlycon; +static bool sci_uart_earlycon_dev_probing; static inline struct sci_port * to_sci_port(struct uart_port *uart) @@ -3074,10 +3075,6 @@ static int sci_init_single(struct platform_device *dev, ret = sci_init_clocks(sci_port, &dev->dev); if (ret < 0) return ret; - - port->dev = &dev->dev; - - pm_runtime_enable(&dev->dev); } port->type = p->type; @@ -3104,11 +3101,6 @@ static int sci_init_single(struct platform_device *dev, return 0; } -static void sci_cleanup_single(struct sci_port *port) -{ - pm_runtime_disable(port->port.dev); -} - #if defined(CONFIG_SERIAL_SH_SCI_CONSOLE) || \ defined(CONFIG_SERIAL_SH_SCI_EARLYCON) static void serial_console_putchar(struct uart_port *port, unsigned char ch) @@ -3278,8 +3270,6 @@ static void sci_remove(struct platform_device *dev) sci_ports_in_use &= ~BIT(port->port.line); uart_remove_one_port(&sci_uart_driver, &port->port); - sci_cleanup_single(port); - if (port->port.fifosize > 1) device_remove_file(&dev->dev, &dev_attr_rx_fifo_trigger); if (type == PORT_SCIFA || type == PORT_SCIFB || type == PORT_HSCIF) @@ -3415,7 +3405,8 @@ static struct plat_sci_port *sci_parse_dt(struct platform_device *pdev, static int sci_probe_single(struct platform_device *dev, unsigned int index, struct plat_sci_port *p, - struct sci_port *sciport) + struct sci_port *sciport, + struct resource *sci_res) { int ret; @@ -3444,6 +3435,11 @@ static int sci_probe_single(struct platform_device *dev, if (ret) return ret; + sciport->port.dev = &dev->dev; + ret = devm_pm_runtime_enable(&dev->dev); + if (ret) + return ret; + sciport->gpios = mctrl_gpio_init(&sciport->port, 0); if (IS_ERR(sciport->gpios)) return PTR_ERR(sciport->gpios); @@ -3457,13 +3453,31 @@ static int sci_probe_single(struct platform_device *dev, sciport->port.flags |= UPF_HARD_FLOW; } - ret = uart_add_one_port(&sci_uart_driver, &sciport->port); - if (ret) { - sci_cleanup_single(sciport); - return ret; + if (sci_uart_earlycon && sci_ports[0].port.mapbase == sci_res->start) { + /* + * In case: + * - this is the earlycon port (mapped on index 0 in sci_ports[]) and + * - it now maps to an alias other than zero and + * - the earlycon is still alive (e.g., "earlycon keep_bootcon" is + * available in bootargs) + * + * we need to avoid disabling clocks and PM domains through the runtime + * PM APIs called in __device_attach(). For this, increment the runtime + * PM reference counter (the clocks and PM domains were already enabled + * by the bootloader). Otherwise the earlycon may access the HW when it + * has no clocks enabled leading to failures (infinite loop in + * sci_poll_put_char()). + */ + pm_runtime_get_noresume(&dev->dev); + + /* + * Skip cleanup the sci_port[0] in early_console_exit(), this + * port is the same as the earlycon one. + */ + sci_uart_earlycon_dev_probing = true; } - return 0; + return uart_add_one_port(&sci_uart_driver, &sciport->port); } static int sci_probe(struct platform_device *dev) @@ -3521,7 +3535,7 @@ static int sci_probe(struct platform_device *dev) platform_set_drvdata(dev, sp); - ret = sci_probe_single(dev, dev_id, p, sp); + ret = sci_probe_single(dev, dev_id, p, sp, res); if (ret) return ret; @@ -3678,6 +3692,22 @@ sh_early_platform_init_buffer("earlyprintk", &sci_driver, #ifdef CONFIG_SERIAL_SH_SCI_EARLYCON static struct plat_sci_port port_cfg; +static int early_console_exit(struct console *co) +{ + struct sci_port *sci_port = &sci_ports[0]; + + /* + * Clean the slot used by earlycon. A new SCI device might + * map to this slot. + */ + if (!sci_uart_earlycon_dev_probing) { + memset(sci_port, 0, sizeof(*sci_port)); + sci_uart_earlycon = false; + } + + return 0; +} + static int __init early_console_setup(struct earlycon_device *device, int type) { @@ -3695,6 +3725,8 @@ static int __init early_console_setup(struct earlycon_device *device, SCSCR_RE | SCSCR_TE | port_cfg.scscr); device->con->write = serial_console_write; + device->con->exit = early_console_exit; + return 0; } static int __init sci_early_console_setup(struct earlycon_device *device, diff --git a/drivers/tty/serial/uartlite.c b/drivers/tty/serial/uartlite.c index 68357ac8ffe3c..71890f3244a0f 100644 --- a/drivers/tty/serial/uartlite.c +++ b/drivers/tty/serial/uartlite.c @@ -880,16 +880,6 @@ static int ulite_probe(struct platform_device *pdev) pm_runtime_set_active(&pdev->dev); pm_runtime_enable(&pdev->dev); - if (!ulite_uart_driver.state) { - dev_dbg(&pdev->dev, "uartlite: calling uart_register_driver()\n"); - ret = uart_register_driver(&ulite_uart_driver); - if (ret < 0) { - dev_err(&pdev->dev, "Failed to register driver\n"); - clk_disable_unprepare(pdata->clk); - return ret; - } - } - ret = ulite_assign(&pdev->dev, id, res->start, irq, pdata); pm_runtime_mark_last_busy(&pdev->dev); @@ -929,16 +919,25 @@ static struct platform_driver ulite_platform_driver = { static int __init ulite_init(void) { + int ret; + + pr_debug("uartlite: calling uart_register_driver()\n"); + ret = uart_register_driver(&ulite_uart_driver); + if (ret) + return ret; pr_debug("uartlite: calling platform_driver_register()\n"); - return platform_driver_register(&ulite_platform_driver); + ret = platform_driver_register(&ulite_platform_driver); + if (ret) + uart_unregister_driver(&ulite_uart_driver); + + return ret; } static void __exit ulite_exit(void) { platform_driver_unregister(&ulite_platform_driver); - if (ulite_uart_driver.state) - uart_unregister_driver(&ulite_uart_driver); + uart_unregister_driver(&ulite_uart_driver); } module_init(ulite_init); diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c index be5564ed8c018..5b09ce71345b6 100644 --- a/drivers/tty/vt/vt.c +++ b/drivers/tty/vt/vt.c @@ -4566,6 +4566,7 @@ void do_unblank_screen(int leaving_gfx) set_palette(vc); set_cursor(vc); vt_event_post(VT_EVENT_UNBLANK, vc->vc_num, vc->vc_num); + notify_update(vc); } EXPORT_SYMBOL(do_unblank_screen); diff --git a/drivers/tty/vt/vt_ioctl.c b/drivers/tty/vt/vt_ioctl.c index 4b91072f3a4e9..1f2bdd2e1cc59 100644 --- a/drivers/tty/vt/vt_ioctl.c +++ b/drivers/tty/vt/vt_ioctl.c @@ -1103,8 +1103,6 @@ long vt_compat_ioctl(struct tty_struct *tty, case VT_WAITACTIVE: case VT_RELDISP: case VT_DISALLOCATE: - case VT_RESIZE: - case VT_RESIZEX: return vt_ioctl(tty, cmd, arg); /* diff --git a/drivers/ufs/core/ufs-mcq.c b/drivers/ufs/core/ufs-mcq.c index 45b04f3c37764..420e943bb73a7 100644 --- a/drivers/ufs/core/ufs-mcq.c +++ b/drivers/ufs/core/ufs-mcq.c @@ -670,7 +670,6 @@ int ufshcd_mcq_abort(struct scsi_cmnd *cmd) int tag = scsi_cmd_to_rq(cmd)->tag; struct ufshcd_lrb *lrbp = &hba->lrb[tag]; struct ufs_hw_queue *hwq; - unsigned long flags; int err; /* Skip task abort in case previous aborts failed and report failure */ @@ -709,10 +708,5 @@ int ufshcd_mcq_abort(struct scsi_cmnd *cmd) return FAILED; } - spin_lock_irqsave(&hwq->cq_lock, flags); - if (ufshcd_cmd_inflight(lrbp->cmd)) - ufshcd_release_scsi_cmd(hba, lrbp); - spin_unlock_irqrestore(&hwq->cq_lock, flags); - return SUCCESS; } diff --git a/drivers/ufs/core/ufs-sysfs.c b/drivers/ufs/core/ufs-sysfs.c index 796e37a1d859f..f8397ef3cf8df 100644 --- a/drivers/ufs/core/ufs-sysfs.c +++ b/drivers/ufs/core/ufs-sysfs.c @@ -1608,7 +1608,7 @@ UFS_UNIT_DESC_PARAM(logical_block_size, _LOGICAL_BLK_SIZE, 1); UFS_UNIT_DESC_PARAM(logical_block_count, _LOGICAL_BLK_COUNT, 8); UFS_UNIT_DESC_PARAM(erase_block_size, _ERASE_BLK_SIZE, 4); UFS_UNIT_DESC_PARAM(provisioning_type, _PROVISIONING_TYPE, 1); -UFS_UNIT_DESC_PARAM(physical_memory_resourse_count, _PHY_MEM_RSRC_CNT, 8); +UFS_UNIT_DESC_PARAM(physical_memory_resource_count, _PHY_MEM_RSRC_CNT, 8); UFS_UNIT_DESC_PARAM(context_capabilities, _CTX_CAPABILITIES, 2); UFS_UNIT_DESC_PARAM(large_unit_granularity, _LARGE_UNIT_SIZE_M1, 1); UFS_UNIT_DESC_PARAM(wb_buf_alloc_units, _WB_BUF_ALLOC_UNITS, 4); @@ -1625,7 +1625,7 @@ static struct attribute *ufs_sysfs_unit_descriptor[] = { &dev_attr_logical_block_count.attr, &dev_attr_erase_block_size.attr, &dev_attr_provisioning_type.attr, - &dev_attr_physical_memory_resourse_count.attr, + &dev_attr_physical_memory_resource_count.attr, &dev_attr_context_capabilities.attr, &dev_attr_large_unit_granularity.attr, &dev_attr_wb_buf_alloc_units.attr, diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c index 247e425428c88..a6299cb19237c 100644 --- a/drivers/ufs/core/ufshcd.c +++ b/drivers/ufs/core/ufshcd.c @@ -1392,6 +1392,7 @@ static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba, u64 timeout_us) * make sure that there are no outstanding requests when * clock scaling is in progress */ + mutex_lock(&hba->host->scan_mutex); blk_mq_quiesce_tagset(&hba->host->tag_set); mutex_lock(&hba->wb_mutex); down_write(&hba->clk_scaling_lock); @@ -1402,6 +1403,7 @@ static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba, u64 timeout_us) up_write(&hba->clk_scaling_lock); mutex_unlock(&hba->wb_mutex); blk_mq_unquiesce_tagset(&hba->host->tag_set); + mutex_unlock(&hba->host->scan_mutex); goto out; } @@ -1423,6 +1425,7 @@ static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba, int err, bool sc mutex_unlock(&hba->wb_mutex); blk_mq_unquiesce_tagset(&hba->host->tag_set); + mutex_unlock(&hba->host->scan_mutex); ufshcd_release(hba); } @@ -6577,9 +6580,14 @@ static void ufshcd_err_handler(struct work_struct *work) up(&hba->host_sem); return; } - ufshcd_set_eh_in_progress(hba); spin_unlock_irqrestore(hba->host->host_lock, flags); + ufshcd_err_handling_prepare(hba); + + spin_lock_irqsave(hba->host->host_lock, flags); + ufshcd_set_eh_in_progress(hba); + spin_unlock_irqrestore(hba->host->host_lock, flags); + /* Complete requests that have door-bell cleared by h/w */ ufshcd_complete_requests(hba, false); spin_lock_irqsave(hba->host->host_lock, flags); @@ -7735,7 +7743,8 @@ static int ufshcd_host_reset_and_restore(struct ufs_hba *hba) hba->silence_err_logs = false; /* scale up clocks to max frequency before full reinitialization */ - ufshcd_scale_clks(hba, ULONG_MAX, true); + if (ufshcd_is_clkscaling_supported(hba)) + ufshcd_scale_clks(hba, ULONG_MAX, true); err = ufshcd_hba_enable(hba); diff --git a/drivers/ufs/host/ufs-qcom.c b/drivers/ufs/host/ufs-qcom.c index 4557b1bcd6356..a715f377d0a80 100644 --- a/drivers/ufs/host/ufs-qcom.c +++ b/drivers/ufs/host/ufs-qcom.c @@ -366,10 +366,9 @@ static int ufs_qcom_power_up_sequence(struct ufs_hba *hba) if (ret) return ret; - if (phy->power_count) { + if (phy->power_count) phy_power_off(phy); - phy_exit(phy); - } + /* phy initialization - calibrate the phy */ ret = phy_init(phy); diff --git a/drivers/uio/uio_hv_generic.c b/drivers/uio/uio_hv_generic.c index c2759bbeed849..a6551a795f744 100644 --- a/drivers/uio/uio_hv_generic.c +++ b/drivers/uio/uio_hv_generic.c @@ -243,6 +243,9 @@ hv_uio_probe(struct hv_device *dev, if (!ring_size) ring_size = SZ_2M; + /* Adjust ring size if necessary to have it page aligned */ + ring_size = VMBUS_RING_SIZE(ring_size); + pdata = devm_kzalloc(&dev->device, sizeof(*pdata), GFP_KERNEL); if (!pdata) return -ENOMEM; @@ -274,13 +277,13 @@ hv_uio_probe(struct hv_device *dev, pdata->info.mem[INT_PAGE_MAP].name = "int_page"; pdata->info.mem[INT_PAGE_MAP].addr = (uintptr_t)vmbus_connection.int_page; - pdata->info.mem[INT_PAGE_MAP].size = PAGE_SIZE; + pdata->info.mem[INT_PAGE_MAP].size = HV_HYP_PAGE_SIZE; pdata->info.mem[INT_PAGE_MAP].memtype = UIO_MEM_LOGICAL; pdata->info.mem[MON_PAGE_MAP].name = "monitor_page"; pdata->info.mem[MON_PAGE_MAP].addr = (uintptr_t)vmbus_connection.monitor_pages[1]; - pdata->info.mem[MON_PAGE_MAP].size = PAGE_SIZE; + pdata->info.mem[MON_PAGE_MAP].size = HV_HYP_PAGE_SIZE; pdata->info.mem[MON_PAGE_MAP].memtype = UIO_MEM_LOGICAL; pdata->recv_buf = vzalloc(RECV_BUFFER_SIZE); diff --git a/drivers/usb/cdns3/cdnsp-debug.h b/drivers/usb/cdns3/cdnsp-debug.h index cd138acdcce16..86860686d8363 100644 --- a/drivers/usb/cdns3/cdnsp-debug.h +++ b/drivers/usb/cdns3/cdnsp-debug.h @@ -327,12 +327,13 @@ static inline const char *cdnsp_decode_trb(char *str, size_t size, u32 field0, case TRB_RESET_EP: case TRB_HALT_ENDPOINT: ret = scnprintf(str, size, - "%s: ep%d%s(%d) ctx %08x%08x slot %ld flags %c", + "%s: ep%d%s(%d) ctx %08x%08x slot %ld flags %c %c", cdnsp_trb_type_string(type), ep_num, ep_id % 2 ? "out" : "in", TRB_TO_EP_INDEX(field3), field1, field0, TRB_TO_SLOT_ID(field3), - field3 & TRB_CYCLE ? 'C' : 'c'); + field3 & TRB_CYCLE ? 'C' : 'c', + field3 & TRB_ESP ? 'P' : 'p'); break; case TRB_STOP_RING: ret = scnprintf(str, size, diff --git a/drivers/usb/cdns3/cdnsp-ep0.c b/drivers/usb/cdns3/cdnsp-ep0.c index f317d3c847810..5cd9b898ce971 100644 --- a/drivers/usb/cdns3/cdnsp-ep0.c +++ b/drivers/usb/cdns3/cdnsp-ep0.c @@ -414,6 +414,7 @@ static int cdnsp_ep0_std_request(struct cdnsp_device *pdev, void cdnsp_setup_analyze(struct cdnsp_device *pdev) { struct usb_ctrlrequest *ctrl = &pdev->setup; + struct cdnsp_ep *pep; int ret = -EINVAL; u16 len; @@ -427,10 +428,21 @@ void cdnsp_setup_analyze(struct cdnsp_device *pdev) goto out; } + pep = &pdev->eps[0]; + /* Restore the ep0 to Stopped/Running state. */ - if (pdev->eps[0].ep_state & EP_HALTED) { - trace_cdnsp_ep0_halted("Restore to normal state"); - cdnsp_halt_endpoint(pdev, &pdev->eps[0], 0); + if (pep->ep_state & EP_HALTED) { + if (GET_EP_CTX_STATE(pep->out_ctx) == EP_STATE_HALTED) + cdnsp_halt_endpoint(pdev, pep, 0); + + /* + * Halt Endpoint Command for SSP2 for ep0 preserve current + * endpoint state and driver has to synchronize the + * software endpoint state with endpoint output context + * state. + */ + pep->ep_state &= ~EP_HALTED; + pep->ep_state |= EP_STOPPED; } /* diff --git a/drivers/usb/cdns3/cdnsp-gadget.c b/drivers/usb/cdns3/cdnsp-gadget.c index 79d06958d6193..38e693cd3efc0 100644 --- a/drivers/usb/cdns3/cdnsp-gadget.c +++ b/drivers/usb/cdns3/cdnsp-gadget.c @@ -28,7 +28,8 @@ unsigned int cdnsp_port_speed(unsigned int port_status) { /*Detect gadget speed based on PORTSC register*/ - if (DEV_SUPERSPEEDPLUS(port_status)) + if (DEV_SUPERSPEEDPLUS(port_status) || + DEV_SSP_GEN1x2(port_status) || DEV_SSP_GEN2x2(port_status)) return USB_SPEED_SUPER_PLUS; else if (DEV_SUPERSPEED(port_status)) return USB_SPEED_SUPER; @@ -546,6 +547,7 @@ int cdnsp_wait_for_cmd_compl(struct cdnsp_device *pdev) dma_addr_t cmd_deq_dma; union cdnsp_trb *event; u32 cycle_state; + u32 retry = 10; int ret, val; u64 cmd_dma; u32 flags; @@ -577,8 +579,23 @@ int cdnsp_wait_for_cmd_compl(struct cdnsp_device *pdev) flags = le32_to_cpu(event->event_cmd.flags); /* Check the owner of the TRB. */ - if ((flags & TRB_CYCLE) != cycle_state) + if ((flags & TRB_CYCLE) != cycle_state) { + /* + * Give some extra time to get chance controller + * to finish command before returning error code. + * Checking CMD_RING_BUSY is not sufficient because + * this bit is cleared to '0' when the Command + * Descriptor has been executed by controller + * and not when command completion event has + * be added to event ring. + */ + if (retry--) { + udelay(20); + continue; + } + return -EINVAL; + } cmd_dma = le64_to_cpu(event->event_cmd.cmd_trb); diff --git a/drivers/usb/cdns3/cdnsp-gadget.h b/drivers/usb/cdns3/cdnsp-gadget.h index 12534be52f39d..a91cca509db08 100644 --- a/drivers/usb/cdns3/cdnsp-gadget.h +++ b/drivers/usb/cdns3/cdnsp-gadget.h @@ -285,11 +285,15 @@ struct cdnsp_port_regs { #define XDEV_HS (0x3 << 10) #define XDEV_SS (0x4 << 10) #define XDEV_SSP (0x5 << 10) +#define XDEV_SSP1x2 (0x6 << 10) +#define XDEV_SSP2x2 (0x7 << 10) #define DEV_UNDEFSPEED(p) (((p) & DEV_SPEED_MASK) == (0x0 << 10)) #define DEV_FULLSPEED(p) (((p) & DEV_SPEED_MASK) == XDEV_FS) #define DEV_HIGHSPEED(p) (((p) & DEV_SPEED_MASK) == XDEV_HS) #define DEV_SUPERSPEED(p) (((p) & DEV_SPEED_MASK) == XDEV_SS) #define DEV_SUPERSPEEDPLUS(p) (((p) & DEV_SPEED_MASK) == XDEV_SSP) +#define DEV_SSP_GEN1x2(p) (((p) & DEV_SPEED_MASK) == XDEV_SSP1x2) +#define DEV_SSP_GEN2x2(p) (((p) & DEV_SPEED_MASK) == XDEV_SSP2x2) #define DEV_SUPERSPEED_ANY(p) (((p) & DEV_SPEED_MASK) >= XDEV_SS) #define DEV_PORT_SPEED(p) (((p) >> 10) & 0x0f) /* Port Link State Write Strobe - set this when changing link state */ @@ -983,6 +987,12 @@ enum cdnsp_setup_dev { #define STREAM_ID_FOR_TRB(p) ((((p)) << 16) & GENMASK(31, 16)) #define SCT_FOR_TRB(p) (((p) << 1) & 0x7) +/* + * Halt Endpoint Command TRB field. + * The ESP bit only exists in the SSP2 controller. + */ +#define TRB_ESP BIT(9) + /* Link TRB specific fields. */ #define TRB_TC BIT(1) diff --git a/drivers/usb/cdns3/cdnsp-ring.c b/drivers/usb/cdns3/cdnsp-ring.c index fd06cb85c4ea8..0758f171f73ec 100644 --- a/drivers/usb/cdns3/cdnsp-ring.c +++ b/drivers/usb/cdns3/cdnsp-ring.c @@ -772,7 +772,9 @@ static int cdnsp_update_port_id(struct cdnsp_device *pdev, u32 port_id) } if (port_id != old_port) { - cdnsp_disable_slot(pdev); + if (pdev->slot_id) + cdnsp_disable_slot(pdev); + pdev->active_port = port; cdnsp_enable_slot(pdev); } @@ -2483,7 +2485,8 @@ void cdnsp_queue_halt_endpoint(struct cdnsp_device *pdev, unsigned int ep_index) { cdnsp_queue_command(pdev, 0, 0, 0, TRB_TYPE(TRB_HALT_ENDPOINT) | SLOT_ID_FOR_TRB(pdev->slot_id) | - EP_ID_FOR_TRB(ep_index)); + EP_ID_FOR_TRB(ep_index) | + (!ep_index ? TRB_ESP : 0)); } void cdnsp_force_header_wakeup(struct cdnsp_device *pdev, int intf_num) diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c index fd6032874bf33..8f73bd5057a64 100644 --- a/drivers/usb/chipidea/udc.c +++ b/drivers/usb/chipidea/udc.c @@ -2362,6 +2362,10 @@ static void udc_suspend(struct ci_hdrc *ci) */ if (hw_read(ci, OP_ENDPTLISTADDR, ~0) == 0) hw_write(ci, OP_ENDPTLISTADDR, ~0, ~0); + + if (ci->gadget.connected && + (!ci->suspended || !device_may_wakeup(ci->dev))) + usb_gadget_disconnect(&ci->gadget); } static void udc_resume(struct ci_hdrc *ci, bool power_lost) @@ -2372,6 +2376,9 @@ static void udc_resume(struct ci_hdrc *ci, bool power_lost) OTGSC_BSVIS | OTGSC_BSVIE); if (ci->vbus_active) usb_gadget_vbus_disconnect(&ci->gadget); + } else if (ci->vbus_active && ci->driver && + !ci->gadget.connected) { + usb_gadget_connect(&ci->gadget); } /* Restore value 0 if it was set for power lost check */ diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c index 16e7fa4d488d3..ecd6d1f39e498 100644 --- a/drivers/usb/class/cdc-wdm.c +++ b/drivers/usb/class/cdc-wdm.c @@ -92,7 +92,6 @@ struct wdm_device { u16 wMaxCommand; u16 wMaxPacketSize; __le16 inum; - int reslength; int length; int read; int count; @@ -214,6 +213,11 @@ static void wdm_in_callback(struct urb *urb) if (desc->rerr == 0 && status != -EPIPE) desc->rerr = status; + if (length == 0) { + dev_dbg(&desc->intf->dev, "received ZLP\n"); + goto skip_zlp; + } + if (length + desc->length > desc->wMaxCommand) { /* The buffer would overflow */ set_bit(WDM_OVERFLOW, &desc->flags); @@ -222,18 +226,18 @@ static void wdm_in_callback(struct urb *urb) if (!test_bit(WDM_OVERFLOW, &desc->flags)) { memmove(desc->ubuf + desc->length, desc->inbuf, length); desc->length += length; - desc->reslength = length; } } skip_error: if (desc->rerr) { /* - * Since there was an error, userspace may decide to not read - * any data after poll'ing. + * If there was a ZLP or an error, userspace may decide to not + * read any data after poll'ing. * We should respond to further attempts from the device to send * data, so that we can get unstuck. */ +skip_zlp: schedule_work(&desc->service_outs_intr); } else { set_bit(WDM_READ, &desc->flags); @@ -585,15 +589,6 @@ static ssize_t wdm_read goto retry; } - if (!desc->reslength) { /* zero length read */ - dev_dbg(&desc->intf->dev, "zero length - clearing WDM_READ\n"); - clear_bit(WDM_READ, &desc->flags); - rv = service_outstanding_interrupt(desc); - spin_unlock_irq(&desc->iuspin); - if (rv < 0) - goto err; - goto retry; - } cntr = desc->length; spin_unlock_irq(&desc->iuspin); } @@ -1016,7 +1011,7 @@ static void service_interrupt_work(struct work_struct *work) spin_lock_irq(&desc->iuspin); service_outstanding_interrupt(desc); - if (!desc->resp_count) { + if (!desc->resp_count && (desc->length || desc->rerr)) { set_bit(WDM_READ, &desc->flags); wake_up(&desc->wait); } diff --git a/drivers/usb/class/usbtmc.c b/drivers/usb/class/usbtmc.c index 740d2d2b19fbe..75de29725a450 100644 --- a/drivers/usb/class/usbtmc.c +++ b/drivers/usb/class/usbtmc.c @@ -483,6 +483,7 @@ static int usbtmc_get_stb(struct usbtmc_file_data *file_data, __u8 *stb) u8 tag; int rv; long wait_rv; + unsigned long expire; dev_dbg(dev, "Enter ioctl_read_stb iin_ep_present: %d\n", data->iin_ep_present); @@ -512,10 +513,11 @@ static int usbtmc_get_stb(struct usbtmc_file_data *file_data, __u8 *stb) } if (data->iin_ep_present) { + expire = msecs_to_jiffies(file_data->timeout); wait_rv = wait_event_interruptible_timeout( data->waitq, atomic_read(&data->iin_data_valid) != 0, - file_data->timeout); + expire); if (wait_rv < 0) { dev_dbg(dev, "wait interrupted %ld\n", wait_rv); rv = wait_rv; @@ -563,14 +565,15 @@ static int usbtmc488_ioctl_read_stb(struct usbtmc_file_data *file_data, rv = usbtmc_get_stb(file_data, &stb); - if (rv > 0) { - srq_asserted = atomic_xchg(&file_data->srq_asserted, - srq_asserted); - if (srq_asserted) - stb |= 0x40; /* Set RQS bit */ + if (rv < 0) + return rv; + + srq_asserted = atomic_xchg(&file_data->srq_asserted, srq_asserted); + if (srq_asserted) + stb |= 0x40; /* Set RQS bit */ + + rv = put_user(stb, (__u8 __user *)arg); - rv = put_user(stb, (__u8 __user *)arg); - } return rv; } @@ -2199,7 +2202,7 @@ static long usbtmc_ioctl(struct file *file, unsigned int cmd, unsigned long arg) case USBTMC_IOCTL_GET_STB: retval = usbtmc_get_stb(file_data, &tmp_byte); - if (retval > 0) + if (!retval) retval = put_user(tmp_byte, (__u8 __user *)arg); break; diff --git a/drivers/usb/common/usb-conn-gpio.c b/drivers/usb/common/usb-conn-gpio.c index 501e8bc9738eb..1096a884c8d70 100644 --- a/drivers/usb/common/usb-conn-gpio.c +++ b/drivers/usb/common/usb-conn-gpio.c @@ -20,6 +20,9 @@ #include #include #include +#include + +static DEFINE_IDA(usb_conn_ida); #define USB_GPIO_DEB_MS 20 /* ms */ #define USB_GPIO_DEB_US ((USB_GPIO_DEB_MS) * 1000) /* us */ @@ -29,6 +32,7 @@ struct usb_conn_info { struct device *dev; + int conn_id; /* store the IDA-allocated ID */ struct usb_role_switch *role_sw; enum usb_role last_role; struct regulator *vbus; @@ -160,7 +164,17 @@ static int usb_conn_psy_register(struct usb_conn_info *info) .of_node = dev->of_node, }; - desc->name = "usb-charger"; + info->conn_id = ida_alloc(&usb_conn_ida, GFP_KERNEL); + if (info->conn_id < 0) + return info->conn_id; + + desc->name = devm_kasprintf(dev, GFP_KERNEL, "usb-charger-%d", + info->conn_id); + if (!desc->name) { + ida_free(&usb_conn_ida, info->conn_id); + return -ENOMEM; + } + desc->properties = usb_charger_properties; desc->num_properties = ARRAY_SIZE(usb_charger_properties); desc->get_property = usb_charger_get_property; @@ -168,8 +182,10 @@ static int usb_conn_psy_register(struct usb_conn_info *info) cfg.drv_data = info; info->charger = devm_power_supply_register(dev, desc, &cfg); - if (IS_ERR(info->charger)) - dev_err(dev, "Unable to register charger\n"); + if (IS_ERR(info->charger)) { + dev_err(dev, "Unable to register charger %d\n", info->conn_id); + ida_free(&usb_conn_ida, info->conn_id); + } return PTR_ERR_OR_ZERO(info->charger); } @@ -277,6 +293,9 @@ static void usb_conn_remove(struct platform_device *pdev) cancel_delayed_work_sync(&info->dw_det); + if (info->charger) + ida_free(&usb_conn_ida, info->conn_id); + if (info->last_role == USB_ROLE_HOST && info->vbus) regulator_disable(info->vbus); diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c index 145787c424e0c..da6da5ec42372 100644 --- a/drivers/usb/core/hub.c +++ b/drivers/usb/core/hub.c @@ -2336,6 +2336,9 @@ void usb_disconnect(struct usb_device **pdev) usb_remove_ep_devs(&udev->ep0); usb_unlock_device(udev); + if (udev->usb4_link) + device_link_del(udev->usb4_link); + /* Unregister the device. The device driver is responsible * for de-configuring the device and invoking the remove-device * notifier chain (used by usbfs and possibly others). @@ -6135,6 +6138,7 @@ static int usb_reset_and_verify_device(struct usb_device *udev) struct usb_hub *parent_hub; struct usb_hcd *hcd = bus_to_hcd(udev->bus); struct usb_device_descriptor descriptor; + struct usb_interface *intf; struct usb_host_bos *bos; int i, j, ret = 0; int port1 = udev->portnum; @@ -6192,6 +6196,18 @@ static int usb_reset_and_verify_device(struct usb_device *udev) if (!udev->actconfig) goto done; + /* + * Some devices can't handle setting default altsetting 0 with a + * Set-Interface request. Disable host-side endpoints of those + * interfaces here. Enable and reset them back after host has set + * its internal endpoint structures during usb_hcd_alloc_bandwith() + */ + for (i = 0; i < udev->actconfig->desc.bNumInterfaces; i++) { + intf = udev->actconfig->interface[i]; + if (intf->cur_altsetting->desc.bAlternateSetting == 0) + usb_disable_interface(udev, intf, true); + } + mutex_lock(hcd->bandwidth_mutex); ret = usb_hcd_alloc_bandwidth(udev, udev->actconfig, NULL, NULL); if (ret < 0) { @@ -6223,12 +6239,11 @@ static int usb_reset_and_verify_device(struct usb_device *udev) */ for (i = 0; i < udev->actconfig->desc.bNumInterfaces; i++) { struct usb_host_config *config = udev->actconfig; - struct usb_interface *intf = config->interface[i]; struct usb_interface_descriptor *desc; + intf = config->interface[i]; desc = &intf->cur_altsetting->desc; if (desc->bAlternateSetting == 0) { - usb_disable_interface(udev, intf, true); usb_enable_interface(udev, intf, true); ret = 0; } else { diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c index 4903c733d37ae..46db600fdd824 100644 --- a/drivers/usb/core/quirks.c +++ b/drivers/usb/core/quirks.c @@ -227,7 +227,8 @@ static const struct usb_device_id usb_quirk_list[] = { { USB_DEVICE(0x046a, 0x0023), .driver_info = USB_QUIRK_RESET_RESUME }, /* Logitech HD Webcam C270 */ - { USB_DEVICE(0x046d, 0x0825), .driver_info = USB_QUIRK_RESET_RESUME }, + { USB_DEVICE(0x046d, 0x0825), .driver_info = USB_QUIRK_RESET_RESUME | + USB_QUIRK_NO_LPM}, /* Logitech HD Pro Webcams C920, C920-C, C922, C925e and C930e */ { USB_DEVICE(0x046d, 0x082d), .driver_info = USB_QUIRK_DELAY_INIT }, @@ -372,6 +373,9 @@ static const struct usb_device_id usb_quirk_list[] = { /* SanDisk Corp. SanDisk 3.2Gen1 */ { USB_DEVICE(0x0781, 0x55a3), .driver_info = USB_QUIRK_DELAY_INIT }, + /* SanDisk Extreme 55AE */ + { USB_DEVICE(0x0781, 0x55ae), .driver_info = USB_QUIRK_NO_LPM }, + /* Realforce 87U Keyboard */ { USB_DEVICE(0x0853, 0x011b), .driver_info = USB_QUIRK_NO_LPM }, diff --git a/drivers/usb/core/usb-acpi.c b/drivers/usb/core/usb-acpi.c index 03c22114214b5..3bc68534dbcd3 100644 --- a/drivers/usb/core/usb-acpi.c +++ b/drivers/usb/core/usb-acpi.c @@ -157,7 +157,7 @@ EXPORT_SYMBOL_GPL(usb_acpi_set_power_state); */ static int usb_acpi_add_usb4_devlink(struct usb_device *udev) { - const struct device_link *link; + struct device_link *link; struct usb_port *port_dev; struct usb_hub *hub; @@ -165,6 +165,8 @@ static int usb_acpi_add_usb4_devlink(struct usb_device *udev) return 0; hub = usb_hub_to_struct_hub(udev->parent); + if (!hub) + return 0; port_dev = hub->ports[udev->portnum - 1]; struct fwnode_handle *nhi_fwnode __free(fwnode_handle) = @@ -186,6 +188,8 @@ static int usb_acpi_add_usb4_devlink(struct usb_device *udev) dev_dbg(&port_dev->dev, "Created device link from %s to %s\n", dev_name(&port_dev->child->dev), dev_name(nhi_fwnode->dev)); + udev->usb4_link = link; + return 0; } diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c index 0b4685aad2d50..118fa4c93a795 100644 --- a/drivers/usb/core/usb.c +++ b/drivers/usb/core/usb.c @@ -695,15 +695,16 @@ struct usb_device *usb_alloc_dev(struct usb_device *parent, device_set_of_node_from_dev(&dev->dev, bus->sysdev); dev_set_name(&dev->dev, "usb%d", bus->busnum); } else { + int n; + /* match any labeling on the hubs; it's one-based */ if (parent->devpath[0] == '0') { - snprintf(dev->devpath, sizeof dev->devpath, - "%d", port1); + n = snprintf(dev->devpath, sizeof(dev->devpath), "%d", port1); /* Root ports are not counted in route string */ dev->route = 0; } else { - snprintf(dev->devpath, sizeof dev->devpath, - "%s.%d", parent->devpath, port1); + n = snprintf(dev->devpath, sizeof(dev->devpath), "%s.%d", + parent->devpath, port1); /* Route string assumes hubs have less than 16 ports */ if (port1 < 15) dev->route = parent->route + @@ -712,6 +713,11 @@ struct usb_device *usb_alloc_dev(struct usb_device *parent, dev->route = parent->route + (15 << ((parent->level - 1)*4)); } + if (n >= sizeof(dev->devpath)) { + usb_put_hcd(bus_to_hcd(bus)); + usb_put_dev(dev); + return NULL; + } dev->dev.parent = &parent->dev; dev_set_name(&dev->dev, "%d-%s", bus->busnum, dev->devpath); diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c index bd4c788f03bc1..d3d0d75ab1f59 100644 --- a/drivers/usb/dwc2/gadget.c +++ b/drivers/usb/dwc2/gadget.c @@ -4604,6 +4604,12 @@ static int dwc2_hsotg_udc_stop(struct usb_gadget *gadget) if (!hsotg) return -ENODEV; + /* Exit clock gating when driver is stopped. */ + if (hsotg->params.power_down == DWC2_POWER_DOWN_PARAM_NONE && + hsotg->bus_suspended && !hsotg->params.no_clock_gating) { + dwc2_gadget_exit_clock_gating(hsotg, 0); + } + /* all endpoints should be shutdown */ for (ep = 1; ep < hsotg->num_of_eps; ep++) { if (hsotg->eps_in[ep]) diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c index abcd7f85ab50c..8b3560a099e48 100644 --- a/drivers/usb/dwc3/core.c +++ b/drivers/usb/dwc3/core.c @@ -2404,6 +2404,7 @@ static int dwc3_suspend_common(struct dwc3 *dwc, pm_message_t msg) { u32 reg; int i; + int ret; if (!pm_runtime_suspended(dwc->dev) && !PMSG_IS_AUTO(msg)) { dwc->susphy_state = (dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0)) & @@ -2422,7 +2423,9 @@ static int dwc3_suspend_common(struct dwc3 *dwc, pm_message_t msg) case DWC3_GCTL_PRTCAP_DEVICE: if (pm_runtime_suspended(dwc->dev)) break; - dwc3_gadget_suspend(dwc); + ret = dwc3_gadget_suspend(dwc); + if (ret) + return ret; synchronize_irq(dwc->irq_gadget); dwc3_core_exit(dwc); break; @@ -2457,7 +2460,9 @@ static int dwc3_suspend_common(struct dwc3 *dwc, pm_message_t msg) break; if (dwc->current_otg_role == DWC3_OTG_ROLE_DEVICE) { - dwc3_gadget_suspend(dwc); + ret = dwc3_gadget_suspend(dwc); + if (ret) + return ret; synchronize_irq(dwc->irq_gadget); } diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c index b81c965684b9d..bff3f445cbc1f 100644 --- a/drivers/usb/dwc3/gadget.c +++ b/drivers/usb/dwc3/gadget.c @@ -4796,26 +4796,22 @@ int dwc3_gadget_suspend(struct dwc3 *dwc) int ret; ret = dwc3_gadget_soft_disconnect(dwc); - if (ret) - goto err; - - spin_lock_irqsave(&dwc->lock, flags); - if (dwc->gadget_driver) - dwc3_disconnect_gadget(dwc); - spin_unlock_irqrestore(&dwc->lock, flags); - - return 0; - -err: /* * Attempt to reset the controller's state. Likely no * communication can be established until the host * performs a port reset. */ - if (dwc->softconnect) + if (ret && dwc->softconnect) { dwc3_gadget_soft_connect(dwc); + return -EAGAIN; + } - return ret; + spin_lock_irqsave(&dwc->lock, flags); + if (dwc->gadget_driver) + dwc3_disconnect_gadget(dwc); + spin_unlock_irqrestore(&dwc->lock, flags); + + return 0; } int dwc3_gadget_resume(struct dwc3 *dwc) diff --git a/drivers/usb/gadget/function/f_hid.c b/drivers/usb/gadget/function/f_hid.c index 740311c4fa249..d8bd2d82e9ec6 100644 --- a/drivers/usb/gadget/function/f_hid.c +++ b/drivers/usb/gadget/function/f_hid.c @@ -75,6 +75,7 @@ struct f_hidg { /* recv report */ spinlock_t read_spinlock; wait_queue_head_t read_queue; + bool disabled; /* recv report - interrupt out only (use_out_ep == 1) */ struct list_head completed_out_req; unsigned int qlen; @@ -144,8 +145,8 @@ static struct hid_descriptor hidg_desc = { .bcdHID = cpu_to_le16(0x0101), .bCountryCode = 0x00, .bNumDescriptors = 0x1, - /*.desc[0].bDescriptorType = DYNAMIC */ - /*.desc[0].wDescriptorLenght = DYNAMIC */ + /*.rpt_desc.bDescriptorType = DYNAMIC */ + /*.rpt_desc.wDescriptorLength = DYNAMIC */ }; /* Super-Speed Support */ @@ -329,7 +330,7 @@ static ssize_t f_hidg_intout_read(struct file *file, char __user *buffer, spin_lock_irqsave(&hidg->read_spinlock, flags); -#define READ_COND_INTOUT (!list_empty(&hidg->completed_out_req)) +#define READ_COND_INTOUT (!list_empty(&hidg->completed_out_req) || hidg->disabled) /* wait for at least one buffer to complete */ while (!READ_COND_INTOUT) { @@ -343,6 +344,11 @@ static ssize_t f_hidg_intout_read(struct file *file, char __user *buffer, spin_lock_irqsave(&hidg->read_spinlock, flags); } + if (hidg->disabled) { + spin_unlock_irqrestore(&hidg->read_spinlock, flags); + return -ESHUTDOWN; + } + /* pick the first one */ list = list_first_entry(&hidg->completed_out_req, struct f_hidg_req_list, list); @@ -387,7 +393,7 @@ static ssize_t f_hidg_intout_read(struct file *file, char __user *buffer, return count; } -#define READ_COND_SSREPORT (hidg->set_report_buf != NULL) +#define READ_COND_SSREPORT (hidg->set_report_buf != NULL || hidg->disabled) static ssize_t f_hidg_ssreport_read(struct file *file, char __user *buffer, size_t count, loff_t *ptr) @@ -939,8 +945,8 @@ static int hidg_setup(struct usb_function *f, struct hid_descriptor hidg_desc_copy = hidg_desc; VDBG(cdev, "USB_REQ_GET_DESCRIPTOR: HID\n"); - hidg_desc_copy.desc[0].bDescriptorType = HID_DT_REPORT; - hidg_desc_copy.desc[0].wDescriptorLength = + hidg_desc_copy.rpt_desc.bDescriptorType = HID_DT_REPORT; + hidg_desc_copy.rpt_desc.wDescriptorLength = cpu_to_le16(hidg->report_desc_length); length = min_t(unsigned short, length, @@ -1012,6 +1018,11 @@ static void hidg_disable(struct usb_function *f) } spin_unlock_irqrestore(&hidg->get_report_spinlock, flags); + spin_lock_irqsave(&hidg->read_spinlock, flags); + hidg->disabled = true; + spin_unlock_irqrestore(&hidg->read_spinlock, flags); + wake_up(&hidg->read_queue); + spin_lock_irqsave(&hidg->write_spinlock, flags); if (!hidg->write_pending) { free_ep_req(hidg->in_ep, hidg->req); @@ -1097,6 +1108,10 @@ static int hidg_set_alt(struct usb_function *f, unsigned intf, unsigned alt) } } + spin_lock_irqsave(&hidg->read_spinlock, flags); + hidg->disabled = false; + spin_unlock_irqrestore(&hidg->read_spinlock, flags); + if (hidg->in_ep != NULL) { spin_lock_irqsave(&hidg->write_spinlock, flags); hidg->req = req_in; @@ -1210,8 +1225,8 @@ static int hidg_bind(struct usb_configuration *c, struct usb_function *f) * We can use hidg_desc struct here but we should not relay * that its content won't change after returning from this function. */ - hidg_desc.desc[0].bDescriptorType = HID_DT_REPORT; - hidg_desc.desc[0].wDescriptorLength = + hidg_desc.rpt_desc.bDescriptorType = HID_DT_REPORT; + hidg_desc.rpt_desc.wDescriptorLength = cpu_to_le16(hidg->report_desc_length); hidg_hs_in_ep_desc.bEndpointAddress = diff --git a/drivers/usb/gadget/function/f_tcm.c b/drivers/usb/gadget/function/f_tcm.c index 7b23631f47449..6ad205046032c 100644 --- a/drivers/usb/gadget/function/f_tcm.c +++ b/drivers/usb/gadget/function/f_tcm.c @@ -1297,14 +1297,14 @@ static struct se_portal_group *usbg_make_tpg(struct se_wwn *wwn, struct usbg_tport *tport = container_of(wwn, struct usbg_tport, tport_wwn); struct usbg_tpg *tpg; - unsigned long tpgt; + u16 tpgt; int ret; struct f_tcm_opts *opts; unsigned i; if (strstr(name, "tpgt_") != name) return ERR_PTR(-EINVAL); - if (kstrtoul(name + 5, 0, &tpgt) || tpgt > UINT_MAX) + if (kstrtou16(name + 5, 0, &tpgt)) return ERR_PTR(-EINVAL); ret = -ENODEV; mutex_lock(&tpg_instances_lock); diff --git a/drivers/usb/gadget/function/u_serial.c b/drivers/usb/gadget/function/u_serial.c index 53d9fc41acc52..2412f81f44120 100644 --- a/drivers/usb/gadget/function/u_serial.c +++ b/drivers/usb/gadget/function/u_serial.c @@ -294,8 +294,8 @@ __acquires(&port->port_lock) break; } - if (do_tty_wake && port->port.tty) - tty_wakeup(port->port.tty); + if (do_tty_wake) + tty_port_tty_wakeup(&port->port); return status; } @@ -543,20 +543,16 @@ static int gs_alloc_requests(struct usb_ep *ep, struct list_head *head, static int gs_start_io(struct gs_port *port) { struct list_head *head = &port->read_pool; - struct usb_ep *ep; + struct usb_ep *ep = port->port_usb->out; int status; unsigned started; - if (!port->port_usb || !port->port.tty) - return -EIO; - /* Allocate RX and TX I/O buffers. We can't easily do this much * earlier (with GFP_KERNEL) because the requests are coupled to * endpoints, as are the packet sizes we'll be using. Different * configurations may use different endpoints with a given port; * and high speed vs full speed changes packet sizes too. */ - ep = port->port_usb->out; status = gs_alloc_requests(ep, head, gs_read_complete, &port->read_allocated); if (status) @@ -577,7 +573,7 @@ static int gs_start_io(struct gs_port *port) gs_start_tx(port); /* Unblock any pending writes into our circular buffer, in case * we didn't in gs_start_tx() */ - tty_wakeup(port->port.tty); + tty_port_tty_wakeup(&port->port); } else { /* Free reqs only if we are still connected */ if (port->port_usb) { diff --git a/drivers/usb/gadget/udc/core.c b/drivers/usb/gadget/udc/core.c index 4b3d5075621aa..d709e24c1fd42 100644 --- a/drivers/usb/gadget/udc/core.c +++ b/drivers/usb/gadget/udc/core.c @@ -1570,7 +1570,7 @@ static int gadget_match_driver(struct device *dev, const struct device_driver *d { struct usb_gadget *gadget = dev_to_usb_gadget(dev); struct usb_udc *udc = gadget->udc; - struct usb_gadget_driver *driver = container_of(drv, + const struct usb_gadget_driver *driver = container_of(drv, struct usb_gadget_driver, driver); /* If the driver specifies a udc_name, it must match the UDC's name */ diff --git a/drivers/usb/host/xhci-dbgcap.c b/drivers/usb/host/xhci-dbgcap.c index d35f3a18dd13b..bdc664ad6a934 100644 --- a/drivers/usb/host/xhci-dbgcap.c +++ b/drivers/usb/host/xhci-dbgcap.c @@ -651,6 +651,10 @@ static void xhci_dbc_stop(struct xhci_dbc *dbc) case DS_DISABLED: return; case DS_CONFIGURED: + spin_lock(&dbc->lock); + xhci_dbc_flush_requests(dbc); + spin_unlock(&dbc->lock); + if (dbc->driver->disconnect) dbc->driver->disconnect(dbc); break; diff --git a/drivers/usb/host/xhci-dbgtty.c b/drivers/usb/host/xhci-dbgtty.c index d719c16ea30b5..2b8558005cbb0 100644 --- a/drivers/usb/host/xhci-dbgtty.c +++ b/drivers/usb/host/xhci-dbgtty.c @@ -585,6 +585,7 @@ int dbc_tty_init(void) dbc_tty_driver->type = TTY_DRIVER_TYPE_SERIAL; dbc_tty_driver->subtype = SERIAL_TYPE_NORMAL; dbc_tty_driver->init_termios = tty_std_termios; + dbc_tty_driver->init_termios.c_lflag &= ~ECHO; dbc_tty_driver->init_termios.c_cflag = B9600 | CS8 | CREAD | HUPCL | CLOCAL; dbc_tty_driver->init_termios.c_ispeed = 9600; diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c index f9c51e0f2e37c..91178b8dbbf08 100644 --- a/drivers/usb/host/xhci-mem.c +++ b/drivers/usb/host/xhci-mem.c @@ -1426,6 +1426,10 @@ int xhci_endpoint_init(struct xhci_hcd *xhci, /* Periodic endpoint bInterval limit quirk */ if (usb_endpoint_xfer_int(&ep->desc) || usb_endpoint_xfer_isoc(&ep->desc)) { + if ((xhci->quirks & XHCI_LIMIT_ENDPOINT_INTERVAL_9) && + interval >= 9) { + interval = 8; + } if ((xhci->quirks & XHCI_LIMIT_ENDPOINT_INTERVAL_7) && udev->speed >= USB_SPEED_HIGH && interval >= 7) { diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c index 1b033c8ce188e..234efb9731b2c 100644 --- a/drivers/usb/host/xhci-pci.c +++ b/drivers/usb/host/xhci-pci.c @@ -71,12 +71,22 @@ #define PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_XHCI 0x15ec #define PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_XHCI 0x15f0 +#define PCI_DEVICE_ID_AMD_ARIEL_TYPEC_XHCI 0x13ed +#define PCI_DEVICE_ID_AMD_ARIEL_TYPEA_XHCI 0x13ee +#define PCI_DEVICE_ID_AMD_STARSHIP_XHCI 0x148c +#define PCI_DEVICE_ID_AMD_FIREFLIGHT_15D4_XHCI 0x15d4 +#define PCI_DEVICE_ID_AMD_FIREFLIGHT_15D5_XHCI 0x15d5 +#define PCI_DEVICE_ID_AMD_RAVEN_15E0_XHCI 0x15e0 +#define PCI_DEVICE_ID_AMD_RAVEN_15E1_XHCI 0x15e1 +#define PCI_DEVICE_ID_AMD_RAVEN2_XHCI 0x15e5 #define PCI_DEVICE_ID_AMD_RENOIR_XHCI 0x1639 #define PCI_DEVICE_ID_AMD_PROMONTORYA_4 0x43b9 #define PCI_DEVICE_ID_AMD_PROMONTORYA_3 0x43ba #define PCI_DEVICE_ID_AMD_PROMONTORYA_2 0x43bb #define PCI_DEVICE_ID_AMD_PROMONTORYA_1 0x43bc +#define PCI_DEVICE_ID_ATI_NAVI10_7316_XHCI 0x7316 + #define PCI_DEVICE_ID_ASMEDIA_1042_XHCI 0x1042 #define PCI_DEVICE_ID_ASMEDIA_1042A_XHCI 0x1142 #define PCI_DEVICE_ID_ASMEDIA_1142_XHCI 0x1242 @@ -286,6 +296,21 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) if (pdev->vendor == PCI_VENDOR_ID_NEC) xhci->quirks |= XHCI_NEC_HOST; + if (pdev->vendor == PCI_VENDOR_ID_AMD && + (pdev->device == PCI_DEVICE_ID_AMD_ARIEL_TYPEC_XHCI || + pdev->device == PCI_DEVICE_ID_AMD_ARIEL_TYPEA_XHCI || + pdev->device == PCI_DEVICE_ID_AMD_STARSHIP_XHCI || + pdev->device == PCI_DEVICE_ID_AMD_FIREFLIGHT_15D4_XHCI || + pdev->device == PCI_DEVICE_ID_AMD_FIREFLIGHT_15D5_XHCI || + pdev->device == PCI_DEVICE_ID_AMD_RAVEN_15E0_XHCI || + pdev->device == PCI_DEVICE_ID_AMD_RAVEN_15E1_XHCI || + pdev->device == PCI_DEVICE_ID_AMD_RAVEN2_XHCI)) + xhci->quirks |= XHCI_LIMIT_ENDPOINT_INTERVAL_9; + + if (pdev->vendor == PCI_VENDOR_ID_ATI && + pdev->device == PCI_DEVICE_ID_ATI_NAVI10_7316_XHCI) + xhci->quirks |= XHCI_LIMIT_ENDPOINT_INTERVAL_9; + if (pdev->vendor == PCI_VENDOR_ID_AMD && xhci->hci_version == 0x96) xhci->quirks |= XHCI_AMD_0x96_HOST; diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c index 2379a67e34e12..3a9bdf9167556 100644 --- a/drivers/usb/host/xhci-plat.c +++ b/drivers/usb/host/xhci-plat.c @@ -326,7 +326,8 @@ int xhci_plat_probe(struct platform_device *pdev, struct device *sysdev, const s } usb3_hcd = xhci_get_usb3_hcd(xhci); - if (usb3_hcd && HCC_MAX_PSA(xhci->hcc_params) >= 4) + if (usb3_hcd && HCC_MAX_PSA(xhci->hcc_params) >= 4 && + !(xhci->quirks & XHCI_BROKEN_STREAMS)) usb3_hcd->can_do_streams = 1; if (xhci->shared_hcd) { diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index fbc8419a54730..2ff8787f753c9 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c @@ -461,9 +461,8 @@ static int xhci_abort_cmd_ring(struct xhci_hcd *xhci, unsigned long flags) * In the future we should distinguish between -ENODEV and -ETIMEDOUT * and try to recover a -ETIMEDOUT with a host controller reset. */ - ret = xhci_handshake_check_state(xhci, &xhci->op_regs->cmd_ring, - CMD_RING_RUNNING, 0, 5 * 1000 * 1000, - XHCI_STATE_REMOVING); + ret = xhci_handshake(&xhci->op_regs->cmd_ring, + CMD_RING_RUNNING, 0, 5 * 1000 * 1000); if (ret < 0) { xhci_err(xhci, "Abort failed to stop command ring: %d\n", ret); xhci_halt(xhci); diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index 799941b6ad6c6..09a5a66049620 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c @@ -82,29 +82,6 @@ int xhci_handshake(void __iomem *ptr, u32 mask, u32 done, u64 timeout_us) return ret; } -/* - * xhci_handshake_check_state - same as xhci_handshake but takes an additional - * exit_state parameter, and bails out with an error immediately when xhc_state - * has exit_state flag set. - */ -int xhci_handshake_check_state(struct xhci_hcd *xhci, void __iomem *ptr, - u32 mask, u32 done, int usec, unsigned int exit_state) -{ - u32 result; - int ret; - - ret = readl_poll_timeout_atomic(ptr, result, - (result & mask) == done || - result == U32_MAX || - xhci->xhc_state & exit_state, - 1, usec); - - if (result == U32_MAX || xhci->xhc_state & exit_state) - return -ENODEV; - - return ret; -} - /* * Disable interrupts and begin the xHCI halting process. */ @@ -225,8 +202,7 @@ int xhci_reset(struct xhci_hcd *xhci, u64 timeout_us) if (xhci->quirks & XHCI_INTEL_HOST) udelay(1000); - ret = xhci_handshake_check_state(xhci, &xhci->op_regs->command, - CMD_RESET, 0, timeout_us, XHCI_STATE_REMOVING); + ret = xhci_handshake(&xhci->op_regs->command, CMD_RESET, 0, timeout_us); if (ret) return ret; @@ -1094,7 +1070,10 @@ int xhci_resume(struct xhci_hcd *xhci, pm_message_t msg) xhci_dbg(xhci, "Stop HCD\n"); xhci_halt(xhci); xhci_zero_64b_regs(xhci); - retval = xhci_reset(xhci, XHCI_RESET_LONG_USEC); + if (xhci->xhc_state & XHCI_STATE_REMOVING) + retval = -ENODEV; + else + retval = xhci_reset(xhci, XHCI_RESET_LONG_USEC); spin_unlock_irq(&xhci->lock); if (retval) return retval; diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h index c4d5b90ef90a8..11580495e09c1 100644 --- a/drivers/usb/host/xhci.h +++ b/drivers/usb/host/xhci.h @@ -1626,6 +1626,7 @@ struct xhci_hcd { #define XHCI_WRITE_64_HI_LO BIT_ULL(47) #define XHCI_CDNS_SCTX_QUIRK BIT_ULL(48) #define XHCI_ETRON_HOST BIT_ULL(49) +#define XHCI_LIMIT_ENDPOINT_INTERVAL_9 BIT_ULL(50) unsigned int num_active_eps; unsigned int limit_active_eps; @@ -1846,8 +1847,6 @@ void xhci_remove_secondary_interrupter(struct usb_hcd /* xHCI host controller glue */ typedef void (*xhci_get_quirks_t)(struct device *, struct xhci_hcd *); int xhci_handshake(void __iomem *ptr, u32 mask, u32 done, u64 timeout_us); -int xhci_handshake_check_state(struct xhci_hcd *xhci, void __iomem *ptr, - u32 mask, u32 done, int usec, unsigned int exit_state); void xhci_quiesce(struct xhci_hcd *xhci); int xhci_halt(struct xhci_hcd *xhci); int xhci_start(struct xhci_hcd *xhci); diff --git a/drivers/usb/misc/onboard_usb_dev.c b/drivers/usb/misc/onboard_usb_dev.c index b4d5408a4371b..cf716ae870b80 100644 --- a/drivers/usb/misc/onboard_usb_dev.c +++ b/drivers/usb/misc/onboard_usb_dev.c @@ -36,9 +36,10 @@ #define USB5744_CMD_CREG_ACCESS 0x99 #define USB5744_CMD_CREG_ACCESS_LSB 0x37 #define USB5744_CREG_MEM_ADDR 0x00 +#define USB5744_CREG_MEM_RD_ADDR 0x04 #define USB5744_CREG_WRITE 0x00 -#define USB5744_CREG_RUNTIMEFLAGS2 0x41 -#define USB5744_CREG_RUNTIMEFLAGS2_LSB 0x1D +#define USB5744_CREG_READ 0x01 +#define USB5744_CREG_RUNTIMEFLAGS2 0x411D #define USB5744_CREG_BYPASS_UDC_SUSPEND BIT(3) static void onboard_dev_attach_usb_driver(struct work_struct *work); @@ -309,11 +310,88 @@ static void onboard_dev_attach_usb_driver(struct work_struct *work) pr_err("Failed to attach USB driver: %pe\n", ERR_PTR(err)); } +#if IS_ENABLED(CONFIG_USB_ONBOARD_DEV_USB5744) +static int onboard_dev_5744_i2c_read_byte(struct i2c_client *client, u16 addr, u8 *data) +{ + struct i2c_msg msg[2]; + u8 rd_buf[3]; + int ret; + + u8 wr_buf[7] = {0, USB5744_CREG_MEM_ADDR, 4, + USB5744_CREG_READ, 1, + addr >> 8 & 0xff, + addr & 0xff}; + msg[0].addr = client->addr; + msg[0].flags = 0; + msg[0].len = sizeof(wr_buf); + msg[0].buf = wr_buf; + + ret = i2c_transfer(client->adapter, msg, 1); + if (ret < 0) + return ret; + + wr_buf[0] = USB5744_CMD_CREG_ACCESS; + wr_buf[1] = USB5744_CMD_CREG_ACCESS_LSB; + wr_buf[2] = 0; + msg[0].len = 3; + + ret = i2c_transfer(client->adapter, msg, 1); + if (ret < 0) + return ret; + + wr_buf[0] = 0; + wr_buf[1] = USB5744_CREG_MEM_RD_ADDR; + msg[0].len = 2; + + msg[1].addr = client->addr; + msg[1].flags = I2C_M_RD; + msg[1].len = 2; + msg[1].buf = rd_buf; + + ret = i2c_transfer(client->adapter, msg, 2); + if (ret < 0) + return ret; + *data = rd_buf[1]; + + return 0; +} + +static int onboard_dev_5744_i2c_write_byte(struct i2c_client *client, u16 addr, u8 data) +{ + struct i2c_msg msg[2]; + int ret; + + u8 wr_buf[8] = {0, USB5744_CREG_MEM_ADDR, 5, + USB5744_CREG_WRITE, 1, + addr >> 8 & 0xff, + addr & 0xff, + data}; + msg[0].addr = client->addr; + msg[0].flags = 0; + msg[0].len = sizeof(wr_buf); + msg[0].buf = wr_buf; + + ret = i2c_transfer(client->adapter, msg, 1); + if (ret < 0) + return ret; + + msg[0].len = 3; + wr_buf[0] = USB5744_CMD_CREG_ACCESS; + wr_buf[1] = USB5744_CMD_CREG_ACCESS_LSB; + wr_buf[2] = 0; + + ret = i2c_transfer(client->adapter, msg, 1); + if (ret < 0) + return ret; + + return 0; +} + static int onboard_dev_5744_i2c_init(struct i2c_client *client) { -#if IS_ENABLED(CONFIG_USB_ONBOARD_DEV_USB5744) struct device *dev = &client->dev; int ret; + u8 reg; /* * Set BYPASS_UDC_SUSPEND bit to ensure MCU is always enabled @@ -321,20 +399,16 @@ static int onboard_dev_5744_i2c_init(struct i2c_client *client) * The command writes 5 bytes to memory and single data byte in * configuration register. */ - char wr_buf[7] = {USB5744_CREG_MEM_ADDR, 5, - USB5744_CREG_WRITE, 1, - USB5744_CREG_RUNTIMEFLAGS2, - USB5744_CREG_RUNTIMEFLAGS2_LSB, - USB5744_CREG_BYPASS_UDC_SUSPEND}; - - ret = i2c_smbus_write_block_data(client, 0, sizeof(wr_buf), wr_buf); + ret = onboard_dev_5744_i2c_read_byte(client, + USB5744_CREG_RUNTIMEFLAGS2, ®); if (ret) - return dev_err_probe(dev, ret, "BYPASS_UDC_SUSPEND bit configuration failed\n"); + return dev_err_probe(dev, ret, "CREG_RUNTIMEFLAGS2 read failed\n"); - ret = i2c_smbus_write_word_data(client, USB5744_CMD_CREG_ACCESS, - USB5744_CMD_CREG_ACCESS_LSB); + reg |= USB5744_CREG_BYPASS_UDC_SUSPEND; + ret = onboard_dev_5744_i2c_write_byte(client, + USB5744_CREG_RUNTIMEFLAGS2, reg); if (ret) - return dev_err_probe(dev, ret, "Configuration Register Access Command failed\n"); + return dev_err_probe(dev, ret, "BYPASS_UDC_SUSPEND bit configuration failed\n"); /* Send SMBus command to boot hub. */ ret = i2c_smbus_write_word_data(client, USB5744_CMD_ATTACH, @@ -343,10 +417,13 @@ static int onboard_dev_5744_i2c_init(struct i2c_client *client) return dev_err_probe(dev, ret, "USB Attach with SMBus command failed\n"); return ret; +} #else +static int onboard_dev_5744_i2c_init(struct i2c_client *client) +{ return -ENODEV; -#endif } +#endif static int onboard_dev_probe(struct platform_device *pdev) { diff --git a/drivers/usb/renesas_usbhs/common.c b/drivers/usb/renesas_usbhs/common.c index 7324de52d9505..161786e9b7e47 100644 --- a/drivers/usb/renesas_usbhs/common.c +++ b/drivers/usb/renesas_usbhs/common.c @@ -685,10 +685,29 @@ static int usbhs_probe(struct platform_device *pdev) INIT_DELAYED_WORK(&priv->notify_hotplug_work, usbhsc_notify_hotplug); spin_lock_init(usbhs_priv_to_lock(priv)); + /* + * Acquire clocks and enable power management (PM) early in the + * probe process, as the driver accesses registers during + * initialization. Ensure the device is active before proceeding. + */ + pm_runtime_enable(dev); + + ret = usbhsc_clk_get(dev, priv); + if (ret) + goto probe_pm_disable; + + ret = pm_runtime_resume_and_get(dev); + if (ret) + goto probe_clk_put; + + ret = usbhsc_clk_prepare_enable(priv); + if (ret) + goto probe_pm_put; + /* call pipe and module init */ ret = usbhs_pipe_probe(priv); if (ret < 0) - return ret; + goto probe_clk_dis_unprepare; ret = usbhs_fifo_probe(priv); if (ret < 0) @@ -705,10 +724,6 @@ static int usbhs_probe(struct platform_device *pdev) if (ret) goto probe_fail_rst; - ret = usbhsc_clk_get(dev, priv); - if (ret) - goto probe_fail_clks; - /* * deviece reset here because * USB device might be used in boot loader. @@ -721,7 +736,7 @@ static int usbhs_probe(struct platform_device *pdev) if (ret) { dev_warn(dev, "USB function not selected (GPIO)\n"); ret = -ENOTSUPP; - goto probe_end_mod_exit; + goto probe_assert_rest; } } @@ -735,14 +750,19 @@ static int usbhs_probe(struct platform_device *pdev) ret = usbhs_platform_call(priv, hardware_init, pdev); if (ret < 0) { dev_err(dev, "platform init failed.\n"); - goto probe_end_mod_exit; + goto probe_assert_rest; } /* reset phy for connection */ usbhs_platform_call(priv, phy_reset, pdev); - /* power control */ - pm_runtime_enable(dev); + /* + * Disable the clocks that were enabled earlier in the probe path, + * and let the driver handle the clocks beyond this point. + */ + usbhsc_clk_disable_unprepare(priv); + pm_runtime_put(dev); + if (!usbhs_get_dparam(priv, runtime_pwctrl)) { usbhsc_power_ctrl(priv, 1); usbhs_mod_autonomy_mode(priv); @@ -759,9 +779,7 @@ static int usbhs_probe(struct platform_device *pdev) return ret; -probe_end_mod_exit: - usbhsc_clk_put(priv); -probe_fail_clks: +probe_assert_rest: reset_control_assert(priv->rsts); probe_fail_rst: usbhs_mod_remove(priv); @@ -769,6 +787,14 @@ static int usbhs_probe(struct platform_device *pdev) usbhs_fifo_remove(priv); probe_end_pipe_exit: usbhs_pipe_remove(priv); +probe_clk_dis_unprepare: + usbhsc_clk_disable_unprepare(priv); +probe_pm_put: + pm_runtime_put(dev); +probe_clk_put: + usbhsc_clk_put(priv); +probe_pm_disable: + pm_runtime_disable(dev); dev_info(dev, "probe failed (%d)\n", ret); diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c index ad41363e3cea5..9708c3d40f078 100644 --- a/drivers/usb/serial/pl2303.c +++ b/drivers/usb/serial/pl2303.c @@ -457,6 +457,8 @@ static int pl2303_detect_type(struct usb_serial *serial) case 0x605: case 0x700: /* GR */ case 0x705: + case 0x905: /* GT-2AB */ + case 0x1005: /* GC-Q20 */ return TYPE_HXN; } break; diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h index d460d71b42578..1477e31d77632 100644 --- a/drivers/usb/storage/unusual_uas.h +++ b/drivers/usb/storage/unusual_uas.h @@ -52,6 +52,13 @@ UNUSUAL_DEV(0x059f, 0x1061, 0x0000, 0x9999, USB_SC_DEVICE, USB_PR_DEVICE, NULL, US_FL_NO_REPORT_OPCODES | US_FL_NO_SAME), +/* Reported-by: Zhihong Zhou */ +UNUSUAL_DEV(0x0781, 0x55e8, 0x0000, 0x9999, + "SanDisk", + "", + USB_SC_DEVICE, USB_PR_DEVICE, NULL, + US_FL_IGNORE_UAS), + /* Reported-by: Hongling Zeng */ UNUSUAL_DEV(0x090c, 0x2000, 0x0000, 0x9999, "Hiksemi", diff --git a/drivers/usb/typec/altmodes/displayport.c b/drivers/usb/typec/altmodes/displayport.c index 92cc1b1361208..6964f403a2d53 100644 --- a/drivers/usb/typec/altmodes/displayport.c +++ b/drivers/usb/typec/altmodes/displayport.c @@ -393,6 +393,9 @@ static int dp_altmode_vdm(struct typec_altmode *alt, break; case CMDT_RSP_NAK: switch (cmd) { + case DP_CMD_STATUS_UPDATE: + dp->state = DP_STATE_EXIT; + break; case DP_CMD_CONFIGURE: dp->data.conf = 0; ret = dp_altmode_configured(dp); @@ -673,7 +676,7 @@ static ssize_t pin_assignment_show(struct device *dev, assignments = get_current_pin_assignments(dp); - for (i = 0; assignments; assignments >>= 1, i++) { + for (i = 0; assignments && i < DP_PIN_ASSIGN_MAX; assignments >>= 1, i++) { if (assignments & 1) { if (i == cur) len += sprintf(buf + len, "[%s] ", diff --git a/drivers/usb/typec/bus.c b/drivers/usb/typec/bus.c index aa879253d3b81..13044ee5be10d 100644 --- a/drivers/usb/typec/bus.c +++ b/drivers/usb/typec/bus.c @@ -449,7 +449,7 @@ ATTRIBUTE_GROUPS(typec); static int typec_match(struct device *dev, const struct device_driver *driver) { - struct typec_altmode_driver *drv = to_altmode_driver(driver); + const struct typec_altmode_driver *drv = to_altmode_driver(driver); struct typec_altmode *altmode = to_typec_altmode(dev); const struct typec_device_id *id; diff --git a/drivers/usb/typec/mux.c b/drivers/usb/typec/mux.c index 49926d6e72c71..182c902c42f61 100644 --- a/drivers/usb/typec/mux.c +++ b/drivers/usb/typec/mux.c @@ -214,7 +214,7 @@ int typec_switch_set(struct typec_switch *sw, sw_dev = sw->sw_devs[i]; ret = sw_dev->set(sw_dev, orientation); - if (ret) + if (ret && ret != -EOPNOTSUPP) return ret; } @@ -378,7 +378,7 @@ int typec_mux_set(struct typec_mux *mux, struct typec_mux_state *state) mux_dev = mux->mux_devs[i]; ret = mux_dev->set(mux_dev, state); - if (ret) + if (ret && ret != -EOPNOTSUPP) return ret; } diff --git a/drivers/usb/typec/tcpm/tcpci_maxim_core.c b/drivers/usb/typec/tcpm/tcpci_maxim_core.c index fd1b805933676..648311f5e3cf1 100644 --- a/drivers/usb/typec/tcpm/tcpci_maxim_core.c +++ b/drivers/usb/typec/tcpm/tcpci_maxim_core.c @@ -166,7 +166,8 @@ static void process_rx(struct max_tcpci_chip *chip, u16 status) return; } - if (count > sizeof(struct pd_message) || count + 1 > TCPC_RECEIVE_BUFFER_LEN) { + if (count > sizeof(struct pd_message) + 1 || + count + 1 > TCPC_RECEIVE_BUFFER_LEN) { dev_err(chip->dev, "Invalid TCPC_RX_BYTE_CNT %d\n", count); return; } diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c index bbd7f53f7d598..9838a2c8c1b85 100644 --- a/drivers/usb/typec/tcpm/tcpm.c +++ b/drivers/usb/typec/tcpm/tcpm.c @@ -568,6 +568,15 @@ struct pd_rx_event { enum tcpm_transmit_type rx_sop_type; }; +struct altmode_vdm_event { + struct kthread_work work; + struct tcpm_port *port; + u32 header; + u32 *data; + int cnt; + enum tcpm_transmit_type tx_sop_type; +}; + static const char * const pd_rev[] = { [PD_REV10] = "rev1", [PD_REV20] = "rev2", @@ -1562,18 +1571,68 @@ static void tcpm_queue_vdm(struct tcpm_port *port, const u32 header, mod_vdm_delayed_work(port, 0); } -static void tcpm_queue_vdm_unlocked(struct tcpm_port *port, const u32 header, - const u32 *data, int cnt, enum tcpm_transmit_type tx_sop_type) +static void tcpm_queue_vdm_work(struct kthread_work *work) { - if (port->state != SRC_READY && port->state != SNK_READY && - port->state != SRC_VDM_IDENTITY_REQUEST) - return; + struct altmode_vdm_event *event = container_of(work, + struct altmode_vdm_event, + work); + struct tcpm_port *port = event->port; mutex_lock(&port->lock); - tcpm_queue_vdm(port, header, data, cnt, tx_sop_type); + if (port->state != SRC_READY && port->state != SNK_READY && + port->state != SRC_VDM_IDENTITY_REQUEST) { + tcpm_log_force(port, "dropping altmode_vdm_event"); + goto port_unlock; + } + + tcpm_queue_vdm(port, event->header, event->data, event->cnt, event->tx_sop_type); + +port_unlock: + kfree(event->data); + kfree(event); mutex_unlock(&port->lock); } +static int tcpm_queue_vdm_unlocked(struct tcpm_port *port, const u32 header, + const u32 *data, int cnt, enum tcpm_transmit_type tx_sop_type) +{ + struct altmode_vdm_event *event; + u32 *data_cpy; + int ret = -ENOMEM; + + event = kzalloc(sizeof(*event), GFP_KERNEL); + if (!event) + goto err_event; + + data_cpy = kcalloc(cnt, sizeof(u32), GFP_KERNEL); + if (!data_cpy) + goto err_data; + + kthread_init_work(&event->work, tcpm_queue_vdm_work); + event->port = port; + event->header = header; + memcpy(data_cpy, data, sizeof(u32) * cnt); + event->data = data_cpy; + event->cnt = cnt; + event->tx_sop_type = tx_sop_type; + + ret = kthread_queue_work(port->wq, &event->work); + if (!ret) { + ret = -EBUSY; + goto err_queue; + } + + return 0; + +err_queue: + kfree(data_cpy); +err_data: + kfree(event); +err_event: + tcpm_log_force(port, "failed to queue altmode vdm, err:%d", ret); + return ret; +} + static void svdm_consume_identity(struct tcpm_port *port, const u32 *p, int cnt) { u32 vdo = p[VDO_INDEX_IDH]; @@ -2784,8 +2843,7 @@ static int tcpm_altmode_enter(struct typec_altmode *altmode, u32 *vdo) header = VDO(altmode->svid, vdo ? 2 : 1, svdm_version, CMD_ENTER_MODE); header |= VDO_OPOS(altmode->mode); - tcpm_queue_vdm_unlocked(port, header, vdo, vdo ? 1 : 0, TCPC_TX_SOP); - return 0; + return tcpm_queue_vdm_unlocked(port, header, vdo, vdo ? 1 : 0, TCPC_TX_SOP); } static int tcpm_altmode_exit(struct typec_altmode *altmode) @@ -2801,8 +2859,7 @@ static int tcpm_altmode_exit(struct typec_altmode *altmode) header = VDO(altmode->svid, 1, svdm_version, CMD_EXIT_MODE); header |= VDO_OPOS(altmode->mode); - tcpm_queue_vdm_unlocked(port, header, NULL, 0, TCPC_TX_SOP); - return 0; + return tcpm_queue_vdm_unlocked(port, header, NULL, 0, TCPC_TX_SOP); } static int tcpm_altmode_vdm(struct typec_altmode *altmode, @@ -2810,9 +2867,7 @@ static int tcpm_altmode_vdm(struct typec_altmode *altmode, { struct tcpm_port *port = typec_altmode_get_drvdata(altmode); - tcpm_queue_vdm_unlocked(port, header, data, count - 1, TCPC_TX_SOP); - - return 0; + return tcpm_queue_vdm_unlocked(port, header, data, count - 1, TCPC_TX_SOP); } static const struct typec_altmode_ops tcpm_altmode_ops = { @@ -2836,8 +2891,7 @@ static int tcpm_cable_altmode_enter(struct typec_altmode *altmode, enum typec_pl header = VDO(altmode->svid, vdo ? 2 : 1, svdm_version, CMD_ENTER_MODE); header |= VDO_OPOS(altmode->mode); - tcpm_queue_vdm_unlocked(port, header, vdo, vdo ? 1 : 0, TCPC_TX_SOP_PRIME); - return 0; + return tcpm_queue_vdm_unlocked(port, header, vdo, vdo ? 1 : 0, TCPC_TX_SOP_PRIME); } static int tcpm_cable_altmode_exit(struct typec_altmode *altmode, enum typec_plug_index sop) @@ -2853,8 +2907,7 @@ static int tcpm_cable_altmode_exit(struct typec_altmode *altmode, enum typec_plu header = VDO(altmode->svid, 1, svdm_version, CMD_EXIT_MODE); header |= VDO_OPOS(altmode->mode); - tcpm_queue_vdm_unlocked(port, header, NULL, 0, TCPC_TX_SOP_PRIME); - return 0; + return tcpm_queue_vdm_unlocked(port, header, NULL, 0, TCPC_TX_SOP_PRIME); } static int tcpm_cable_altmode_vdm(struct typec_altmode *altmode, enum typec_plug_index sop, @@ -2862,9 +2915,7 @@ static int tcpm_cable_altmode_vdm(struct typec_altmode *altmode, enum typec_plug { struct tcpm_port *port = typec_altmode_get_drvdata(altmode); - tcpm_queue_vdm_unlocked(port, header, data, count - 1, TCPC_TX_SOP_PRIME); - - return 0; + return tcpm_queue_vdm_unlocked(port, header, data, count - 1, TCPC_TX_SOP_PRIME); } static const struct typec_cable_ops tcpm_cable_ops = { @@ -5515,8 +5566,7 @@ static void run_state_machine(struct tcpm_port *port) tcpm_set_auto_vbus_discharge_threshold(port, TYPEC_PWR_MODE_USB, port->pps_data.active, 0); tcpm_set_charge(port, false); - tcpm_set_state(port, hard_reset_state(port), - PD_T_PS_SOURCE_OFF); + tcpm_set_state(port, ERROR_RECOVERY, PD_T_PS_SOURCE_OFF); break; case PR_SWAP_SNK_SRC_SOURCE_ON: tcpm_enable_auto_vbus_discharge(port, true); diff --git a/drivers/usb/typec/ucsi/ucsi.h b/drivers/usb/typec/ucsi/ucsi.h index 5863a20b6c5dd..0568e643e8447 100644 --- a/drivers/usb/typec/ucsi/ucsi.h +++ b/drivers/usb/typec/ucsi/ucsi.h @@ -367,7 +367,7 @@ struct ucsi_debugfs_entry { u64 low; u64 high; } response; - u32 status; + int status; struct dentry *dentry; }; diff --git a/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c b/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c index 0d632ba5d2a3c..dda8cb3262e0b 100644 --- a/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c +++ b/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c @@ -350,6 +350,32 @@ static int vf_qm_func_stop(struct hisi_qm *qm) return hisi_qm_mb(qm, QM_MB_CMD_PAUSE_QM, 0, 0, 0); } +static int vf_qm_version_check(struct acc_vf_data *vf_data, struct device *dev) +{ + switch (vf_data->acc_magic) { + case ACC_DEV_MAGIC_V2: + if (vf_data->major_ver != ACC_DRV_MAJOR_VER) { + dev_info(dev, "migration driver version<%u.%u> not match!\n", + vf_data->major_ver, vf_data->minor_ver); + return -EINVAL; + } + break; + case ACC_DEV_MAGIC_V1: + /* Correct dma address */ + vf_data->eqe_dma = vf_data->qm_eqc_dw[QM_XQC_ADDR_HIGH]; + vf_data->eqe_dma <<= QM_XQC_ADDR_OFFSET; + vf_data->eqe_dma |= vf_data->qm_eqc_dw[QM_XQC_ADDR_LOW]; + vf_data->aeqe_dma = vf_data->qm_aeqc_dw[QM_XQC_ADDR_HIGH]; + vf_data->aeqe_dma <<= QM_XQC_ADDR_OFFSET; + vf_data->aeqe_dma |= vf_data->qm_aeqc_dw[QM_XQC_ADDR_LOW]; + break; + default: + return -EINVAL; + } + + return 0; +} + static int vf_qm_check_match(struct hisi_acc_vf_core_device *hisi_acc_vdev, struct hisi_acc_vf_migration_file *migf) { @@ -363,7 +389,8 @@ static int vf_qm_check_match(struct hisi_acc_vf_core_device *hisi_acc_vdev, if (migf->total_length < QM_MATCH_SIZE || hisi_acc_vdev->match_done) return 0; - if (vf_data->acc_magic != ACC_DEV_MAGIC) { + ret = vf_qm_version_check(vf_data, dev); + if (ret) { dev_err(dev, "failed to match ACC_DEV_MAGIC\n"); return -EINVAL; } @@ -399,13 +426,6 @@ static int vf_qm_check_match(struct hisi_acc_vf_core_device *hisi_acc_vdev, return -EINVAL; } - ret = qm_write_regs(vf_qm, QM_VF_STATE, &vf_data->vf_qm_state, 1); - if (ret) { - dev_err(dev, "failed to write QM_VF_STATE\n"); - return ret; - } - - hisi_acc_vdev->vf_qm_state = vf_data->vf_qm_state; hisi_acc_vdev->match_done = true; return 0; } @@ -418,7 +438,9 @@ static int vf_qm_get_match_data(struct hisi_acc_vf_core_device *hisi_acc_vdev, int vf_id = hisi_acc_vdev->vf_id; int ret; - vf_data->acc_magic = ACC_DEV_MAGIC; + vf_data->acc_magic = ACC_DEV_MAGIC_V2; + vf_data->major_ver = ACC_DRV_MAJOR_VER; + vf_data->minor_ver = ACC_DRV_MINOR_VER; /* Save device id */ vf_data->dev_id = hisi_acc_vdev->vf_dev->device; @@ -441,6 +463,19 @@ static int vf_qm_get_match_data(struct hisi_acc_vf_core_device *hisi_acc_vdev, return 0; } +static void vf_qm_xeqc_save(struct hisi_qm *qm, + struct hisi_acc_vf_migration_file *migf) +{ + struct acc_vf_data *vf_data = &migf->vf_data; + u16 eq_head, aeq_head; + + eq_head = vf_data->qm_eqc_dw[0] & 0xFFFF; + qm_db(qm, 0, QM_DOORBELL_CMD_EQ, eq_head, 0); + + aeq_head = vf_data->qm_aeqc_dw[0] & 0xFFFF; + qm_db(qm, 0, QM_DOORBELL_CMD_AEQ, aeq_head, 0); +} + static int vf_qm_load_data(struct hisi_acc_vf_core_device *hisi_acc_vdev, struct hisi_acc_vf_migration_file *migf) { @@ -456,6 +491,20 @@ static int vf_qm_load_data(struct hisi_acc_vf_core_device *hisi_acc_vdev, if (migf->total_length < sizeof(struct acc_vf_data)) return -EINVAL; + if (!vf_data->eqe_dma || !vf_data->aeqe_dma || + !vf_data->sqc_dma || !vf_data->cqc_dma) { + dev_info(dev, "resume dma addr is NULL!\n"); + hisi_acc_vdev->vf_qm_state = QM_NOT_READY; + return 0; + } + + ret = qm_write_regs(qm, QM_VF_STATE, &vf_data->vf_qm_state, 1); + if (ret) { + dev_err(dev, "failed to write QM_VF_STATE\n"); + return -EINVAL; + } + hisi_acc_vdev->vf_qm_state = vf_data->vf_qm_state; + qm->eqe_dma = vf_data->eqe_dma; qm->aeqe_dma = vf_data->aeqe_dma; qm->sqc_dma = vf_data->sqc_dma; @@ -505,23 +554,17 @@ static int vf_qm_state_save(struct hisi_acc_vf_core_device *hisi_acc_vdev, vf_data->vf_qm_state = QM_READY; hisi_acc_vdev->vf_qm_state = vf_data->vf_qm_state; - ret = vf_qm_cache_wb(vf_qm); - if (ret) { - dev_err(dev, "failed to writeback QM Cache!\n"); - return ret; - } - ret = qm_get_regs(vf_qm, vf_data); if (ret) return -EINVAL; /* Every reg is 32 bit, the dma address is 64 bit. */ - vf_data->eqe_dma = vf_data->qm_eqc_dw[1]; + vf_data->eqe_dma = vf_data->qm_eqc_dw[QM_XQC_ADDR_HIGH]; vf_data->eqe_dma <<= QM_XQC_ADDR_OFFSET; - vf_data->eqe_dma |= vf_data->qm_eqc_dw[0]; - vf_data->aeqe_dma = vf_data->qm_aeqc_dw[1]; + vf_data->eqe_dma |= vf_data->qm_eqc_dw[QM_XQC_ADDR_LOW]; + vf_data->aeqe_dma = vf_data->qm_aeqc_dw[QM_XQC_ADDR_HIGH]; vf_data->aeqe_dma <<= QM_XQC_ADDR_OFFSET; - vf_data->aeqe_dma |= vf_data->qm_aeqc_dw[0]; + vf_data->aeqe_dma |= vf_data->qm_aeqc_dw[QM_XQC_ADDR_LOW]; /* Through SQC_BT/CQC_BT to get sqc and cqc address */ ret = qm_get_sqc(vf_qm, &vf_data->sqc_dma); @@ -537,6 +580,9 @@ static int vf_qm_state_save(struct hisi_acc_vf_core_device *hisi_acc_vdev, } migf->total_length = sizeof(struct acc_vf_data); + /* Save eqc and aeqc interrupt information */ + vf_qm_xeqc_save(vf_qm, migf); + return 0; } @@ -933,6 +979,13 @@ static int hisi_acc_vf_stop_device(struct hisi_acc_vf_core_device *hisi_acc_vdev dev_err(dev, "failed to check QM INT state!\n"); return ret; } + + ret = vf_qm_cache_wb(vf_qm); + if (ret) { + dev_err(dev, "failed to writeback QM cache!\n"); + return ret; + } + return 0; } @@ -1306,6 +1359,7 @@ static void hisi_acc_vfio_pci_close_device(struct vfio_device *core_vdev) struct hisi_acc_vf_core_device, core_device.vdev); struct hisi_qm *vf_qm = &hisi_acc_vdev->vf_qm; + hisi_acc_vf_disable_fds(hisi_acc_vdev); iounmap(vf_qm->io_base); vfio_pci_core_close_device(core_vdev); } @@ -1326,6 +1380,7 @@ static int hisi_acc_vfio_pci_migrn_init_dev(struct vfio_device *core_vdev) hisi_acc_vdev->vf_id = pci_iov_vf_id(pdev) + 1; hisi_acc_vdev->pf_qm = pf_qm; hisi_acc_vdev->vf_dev = pdev; + hisi_acc_vdev->vf_qm_state = QM_NOT_READY; mutex_init(&hisi_acc_vdev->state_mutex); core_vdev->migration_flags = VFIO_MIGRATION_STOP_COPY | VFIO_MIGRATION_PRE_COPY; diff --git a/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.h b/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.h index 5bab46602fad2..465284168906b 100644 --- a/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.h +++ b/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.h @@ -38,6 +38,9 @@ #define QM_REG_ADDR_OFFSET 0x0004 #define QM_XQC_ADDR_OFFSET 32U +#define QM_XQC_ADDR_LOW 0x1 +#define QM_XQC_ADDR_HIGH 0x2 + #define QM_VF_AEQ_INT_MASK 0x0004 #define QM_VF_EQ_INT_MASK 0x000c #define QM_IFC_INT_SOURCE_V 0x0020 @@ -49,10 +52,15 @@ #define QM_EQC_DW0 0X8000 #define QM_AEQC_DW0 0X8020 +#define ACC_DRV_MAJOR_VER 1 +#define ACC_DRV_MINOR_VER 0 + +#define ACC_DEV_MAGIC_V1 0XCDCDCDCDFEEDAACC +#define ACC_DEV_MAGIC_V2 0xAACCFEEDDECADEDE + struct acc_vf_data { #define QM_MATCH_SIZE offsetofend(struct acc_vf_data, qm_rsv_state) /* QM match information */ -#define ACC_DEV_MAGIC 0XCDCDCDCDFEEDAACC u64 acc_magic; u32 qp_num; u32 dev_id; @@ -60,7 +68,9 @@ struct acc_vf_data { u32 qp_base; u32 vf_qm_state; /* QM reserved match information */ - u32 qm_rsv_state[3]; + u16 major_ver; + u16 minor_ver; + u32 qm_rsv_state[2]; /* QM RW regs */ u32 aeq_int_mask; diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c index bf391b40e576f..8338cfd61fe14 100644 --- a/drivers/vfio/vfio_iommu_type1.c +++ b/drivers/vfio/vfio_iommu_type1.c @@ -294,7 +294,7 @@ static int vfio_dma_bitmap_alloc_all(struct vfio_iommu *iommu, size_t pgsize) struct rb_node *p; for (p = rb_prev(n); p; p = rb_prev(p)) { - struct vfio_dma *dma = rb_entry(n, + struct vfio_dma *dma = rb_entry(p, struct vfio_dma, node); vfio_dma_bitmap_free(dma); diff --git a/drivers/video/backlight/qcom-wled.c b/drivers/video/backlight/qcom-wled.c index 10129095a4c17..b19e5f73de8bb 100644 --- a/drivers/video/backlight/qcom-wled.c +++ b/drivers/video/backlight/qcom-wled.c @@ -1406,9 +1406,11 @@ static int wled_configure(struct wled *wled) wled->ctrl_addr = be32_to_cpu(*prop_addr); rc = of_property_read_string(dev->of_node, "label", &wled->name); - if (rc) + if (rc) { wled->name = devm_kasprintf(dev, GFP_KERNEL, "%pOFn", dev->of_node); - + if (!wled->name) + return -ENOMEM; + } switch (wled->version) { case 3: u32_opts = wled3_opts; diff --git a/drivers/video/console/dummycon.c b/drivers/video/console/dummycon.c index 139049368fdcf..7d02470f19b93 100644 --- a/drivers/video/console/dummycon.c +++ b/drivers/video/console/dummycon.c @@ -85,6 +85,15 @@ static bool dummycon_blank(struct vc_data *vc, enum vesa_blank_mode blank, /* Redraw, so that we get putc(s) for output done while blanked */ return true; } + +static bool dummycon_switch(struct vc_data *vc) +{ + /* + * Redraw, so that we get putc(s) for output done while switched + * away. Informs deferred consoles to take over the display. + */ + return true; +} #else static void dummycon_putc(struct vc_data *vc, u16 c, unsigned int y, unsigned int x) { } @@ -95,6 +104,10 @@ static bool dummycon_blank(struct vc_data *vc, enum vesa_blank_mode blank, { return false; } +static bool dummycon_switch(struct vc_data *vc) +{ + return false; +} #endif static const char *dummycon_startup(void) @@ -124,11 +137,6 @@ static bool dummycon_scroll(struct vc_data *vc, unsigned int top, return false; } -static bool dummycon_switch(struct vc_data *vc) -{ - return false; -} - /* * The console `switch' structure for the dummy console * diff --git a/drivers/video/console/vgacon.c b/drivers/video/console/vgacon.c index 37bd18730fe0d..f9cdbf8c53e34 100644 --- a/drivers/video/console/vgacon.c +++ b/drivers/video/console/vgacon.c @@ -1168,7 +1168,7 @@ static bool vgacon_scroll(struct vc_data *c, unsigned int t, unsigned int b, c->vc_screenbuf_size - delta); c->vc_origin = vga_vram_end - c->vc_screenbuf_size; vga_rolled_over = 0; - } else + } else if (oldo - delta >= (unsigned long)c->vc_screenbuf) c->vc_origin -= delta; c->vc_scr_end = c->vc_origin + c->vc_screenbuf_size; scr_memsetw((u16 *) (c->vc_origin), c->vc_video_erase_char, diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c index 07d127110ca4c..c98786996c647 100644 --- a/drivers/video/fbdev/core/fbcon.c +++ b/drivers/video/fbdev/core/fbcon.c @@ -117,9 +117,14 @@ static signed char con2fb_map_boot[MAX_NR_CONSOLES]; static struct fb_info *fbcon_info_from_console(int console) { + signed char fb; WARN_CONSOLE_UNLOCKED(); - return fbcon_registered_fb[con2fb_map[console]]; + fb = con2fb_map[console]; + if (fb < 0 || fb >= ARRAY_SIZE(fbcon_registered_fb)) + return NULL; + + return fbcon_registered_fb[fb]; } static int logo_lines; diff --git a/drivers/video/fbdev/core/fbcvt.c b/drivers/video/fbdev/core/fbcvt.c index 64843464c6613..cd3821bd82e56 100644 --- a/drivers/video/fbdev/core/fbcvt.c +++ b/drivers/video/fbdev/core/fbcvt.c @@ -312,7 +312,7 @@ int fb_find_mode_cvt(struct fb_videomode *mode, int margins, int rb) cvt.f_refresh = cvt.refresh; cvt.interlace = 1; - if (!cvt.xres || !cvt.yres || !cvt.refresh) { + if (!cvt.xres || !cvt.yres || !cvt.refresh || cvt.f_refresh > INT_MAX) { printk(KERN_INFO "fbcvt: Invalid input parameters\n"); return 1; } diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c index 3c568cff2913e..eca2498f24368 100644 --- a/drivers/video/fbdev/core/fbmem.c +++ b/drivers/video/fbdev/core/fbmem.c @@ -328,8 +328,10 @@ fb_set_var(struct fb_info *info, struct fb_var_screeninfo *var) !list_empty(&info->modelist)) ret = fb_add_videomode(&mode, &info->modelist); - if (ret) + if (ret) { + info->var = old_var; return ret; + } event.info = info; event.data = &mode; @@ -388,7 +390,7 @@ static int fb_check_foreignness(struct fb_info *fi) static int do_register_framebuffer(struct fb_info *fb_info) { - int i; + int i, err = 0; struct fb_videomode mode; if (fb_check_foreignness(fb_info)) @@ -397,10 +399,18 @@ static int do_register_framebuffer(struct fb_info *fb_info) if (num_registered_fb == FB_MAX) return -ENXIO; - num_registered_fb++; for (i = 0 ; i < FB_MAX; i++) if (!registered_fb[i]) break; + + if (!fb_info->modelist.prev || !fb_info->modelist.next) + INIT_LIST_HEAD(&fb_info->modelist); + + fb_var_to_videomode(&mode, &fb_info->var); + err = fb_add_videomode(&mode, &fb_info->modelist); + if (err < 0) + return err; + fb_info->node = i; refcount_set(&fb_info->count, 1); mutex_init(&fb_info->lock); @@ -426,16 +436,12 @@ static int do_register_framebuffer(struct fb_info *fb_info) if (bitmap_empty(fb_info->pixmap.blit_y, FB_MAX_BLIT_HEIGHT)) bitmap_fill(fb_info->pixmap.blit_y, FB_MAX_BLIT_HEIGHT); - if (!fb_info->modelist.prev || !fb_info->modelist.next) - INIT_LIST_HEAD(&fb_info->modelist); - if (fb_info->skip_vt_switch) pm_vt_switch_required(fb_info->device, false); else pm_vt_switch_required(fb_info->device, true); - fb_var_to_videomode(&mode, &fb_info->var); - fb_add_videomode(&mode, &fb_info->modelist); + num_registered_fb++; registered_fb[i] = fb_info; #ifdef CONFIG_GUMSTIX_AM200EPD diff --git a/drivers/video/screen_info_pci.c b/drivers/video/screen_info_pci.c index 6c58335171410..66bfc1d0a6dc8 100644 --- a/drivers/video/screen_info_pci.c +++ b/drivers/video/screen_info_pci.c @@ -7,8 +7,8 @@ static struct pci_dev *screen_info_lfb_pdev; static size_t screen_info_lfb_bar; -static resource_size_t screen_info_lfb_offset; -static struct resource screen_info_lfb_res = DEFINE_RES_MEM(0, 0); +static resource_size_t screen_info_lfb_res_start; // original start of resource +static resource_size_t screen_info_lfb_offset; // framebuffer offset within resource static bool __screen_info_relocation_is_valid(const struct screen_info *si, struct resource *pr) { @@ -31,7 +31,7 @@ void screen_info_apply_fixups(void) if (screen_info_lfb_pdev) { struct resource *pr = &screen_info_lfb_pdev->resource[screen_info_lfb_bar]; - if (pr->start != screen_info_lfb_res.start) { + if (pr->start != screen_info_lfb_res_start) { if (__screen_info_relocation_is_valid(si, pr)) { /* * Only update base if we have an actual @@ -47,46 +47,67 @@ void screen_info_apply_fixups(void) } } +static int __screen_info_lfb_pci_bus_region(const struct screen_info *si, unsigned int type, + struct pci_bus_region *r) +{ + u64 base, size; + + base = __screen_info_lfb_base(si); + if (!base) + return -EINVAL; + + size = __screen_info_lfb_size(si, type); + if (!size) + return -EINVAL; + + r->start = base; + r->end = base + size - 1; + + return 0; +} + static void screen_info_fixup_lfb(struct pci_dev *pdev) { unsigned int type; - struct resource res[SCREEN_INFO_MAX_RESOURCES]; - size_t i, numres; + struct pci_bus_region bus_region; int ret; + struct resource r = { + .flags = IORESOURCE_MEM, + }; + const struct resource *pr; const struct screen_info *si = &screen_info; if (screen_info_lfb_pdev) return; // already found type = screen_info_video_type(si); - if (type != VIDEO_TYPE_EFI) - return; // only applies to EFI + if (!__screen_info_has_lfb(type)) + return; // only applies to EFI; maybe VESA - ret = screen_info_resources(si, res, ARRAY_SIZE(res)); + ret = __screen_info_lfb_pci_bus_region(si, type, &bus_region); if (ret < 0) return; - numres = ret; - for (i = 0; i < numres; ++i) { - struct resource *r = &res[i]; - const struct resource *pr; - - if (!(r->flags & IORESOURCE_MEM)) - continue; - pr = pci_find_resource(pdev, r); - if (!pr) - continue; - - /* - * We've found a PCI device with the framebuffer - * resource. Store away the parameters to track - * relocation of the framebuffer aperture. - */ - screen_info_lfb_pdev = pdev; - screen_info_lfb_bar = pr - pdev->resource; - screen_info_lfb_offset = r->start - pr->start; - memcpy(&screen_info_lfb_res, r, sizeof(screen_info_lfb_res)); - } + /* + * Translate the PCI bus address to resource. Account + * for an offset if the framebuffer is behind a PCI host + * bridge. + */ + pcibios_bus_to_resource(pdev->bus, &r, &bus_region); + + pr = pci_find_resource(pdev, &r); + if (!pr) + return; + + /* + * We've found a PCI device with the framebuffer + * resource. Store away the parameters to track + * relocation of the framebuffer aperture. + */ + screen_info_lfb_pdev = pdev; + screen_info_lfb_bar = pr - pdev->resource; + screen_info_lfb_offset = r.start - pr->start; + screen_info_lfb_res_start = bus_region.start; } DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_ANY_ID, PCI_ANY_ID, PCI_BASE_CLASS_DISPLAY, 16, screen_info_fixup_lfb); diff --git a/drivers/virt/coco/tsm.c b/drivers/virt/coco/tsm.c index 9432d4e303f16..8a638bc34d4a9 100644 --- a/drivers/virt/coco/tsm.c +++ b/drivers/virt/coco/tsm.c @@ -15,6 +15,7 @@ static struct tsm_provider { const struct tsm_ops *ops; void *data; + atomic_t count; } provider; static DECLARE_RWSEM(tsm_rwsem); @@ -92,6 +93,10 @@ static ssize_t tsm_report_privlevel_store(struct config_item *cfg, if (rc) return rc; + guard(rwsem_write)(&tsm_rwsem); + if (!provider.ops) + return -ENXIO; + /* * The valid privilege levels that a TSM might accept, if it accepts a * privilege level setting at all, are a max of TSM_PRIVLEVEL_MAX (see @@ -101,7 +106,6 @@ static ssize_t tsm_report_privlevel_store(struct config_item *cfg, if (provider.ops->privlevel_floor > val || val > TSM_PRIVLEVEL_MAX) return -EINVAL; - guard(rwsem_write)(&tsm_rwsem); rc = try_advance_write_generation(report); if (rc) return rc; @@ -115,6 +119,10 @@ static ssize_t tsm_report_privlevel_floor_show(struct config_item *cfg, char *buf) { guard(rwsem_read)(&tsm_rwsem); + + if (!provider.ops) + return -ENXIO; + return sysfs_emit(buf, "%u\n", provider.ops->privlevel_floor); } CONFIGFS_ATTR_RO(tsm_report_, privlevel_floor); @@ -217,6 +225,9 @@ CONFIGFS_ATTR_RO(tsm_report_, generation); static ssize_t tsm_report_provider_show(struct config_item *cfg, char *buf) { guard(rwsem_read)(&tsm_rwsem); + if (!provider.ops) + return -ENXIO; + return sysfs_emit(buf, "%s\n", provider.ops->name); } CONFIGFS_ATTR_RO(tsm_report_, provider); @@ -284,7 +295,7 @@ static ssize_t tsm_report_read(struct tsm_report *report, void *buf, guard(rwsem_write)(&tsm_rwsem); ops = provider.ops; if (!ops) - return -ENOTTY; + return -ENXIO; if (!report->desc.inblob_len) return -EINVAL; @@ -421,12 +432,20 @@ static struct config_item *tsm_report_make_item(struct config_group *group, if (!state) return ERR_PTR(-ENOMEM); + atomic_inc(&provider.count); config_item_init_type_name(&state->cfg, name, &tsm_report_type); return &state->cfg; } +static void tsm_report_drop_item(struct config_group *group, struct config_item *item) +{ + config_item_put(item); + atomic_dec(&provider.count); +} + static struct configfs_group_operations tsm_report_group_ops = { .make_item = tsm_report_make_item, + .drop_item = tsm_report_drop_item, }; static const struct config_item_type tsm_reports_type = { @@ -459,6 +478,11 @@ int tsm_register(const struct tsm_ops *ops, void *priv) return -EBUSY; } + if (atomic_read(&provider.count)) { + pr_err("configfs/tsm/report not empty\n"); + return -EBUSY; + } + provider.ops = ops; provider.data = priv; return 0; @@ -470,6 +494,9 @@ int tsm_unregister(const struct tsm_ops *ops) guard(rwsem_write)(&tsm_rwsem); if (ops != provider.ops) return -EBUSY; + if (atomic_read(&provider.count)) + pr_warn("\"%s\" unregistered with items present in configfs/tsm/report\n", + provider.ops->name); provider.ops = NULL; provider.data = NULL; return 0; diff --git a/drivers/watchdog/da9052_wdt.c b/drivers/watchdog/da9052_wdt.c index d708c091bf1b1..180526220d8c4 100644 --- a/drivers/watchdog/da9052_wdt.c +++ b/drivers/watchdog/da9052_wdt.c @@ -164,6 +164,7 @@ static int da9052_wdt_probe(struct platform_device *pdev) da9052_wdt = &driver_data->wdt; da9052_wdt->timeout = DA9052_DEF_TIMEOUT; + da9052_wdt->min_hw_heartbeat_ms = DA9052_TWDMIN; da9052_wdt->info = &da9052_wdt_info; da9052_wdt->ops = &da9052_wdt_ops; da9052_wdt->parent = dev; diff --git a/drivers/watchdog/exar_wdt.c b/drivers/watchdog/exar_wdt.c index 7c61ff3432711..c2e3bb08df899 100644 --- a/drivers/watchdog/exar_wdt.c +++ b/drivers/watchdog/exar_wdt.c @@ -221,7 +221,7 @@ static const struct watchdog_info exar_wdt_info = { .options = WDIOF_KEEPALIVEPING | WDIOF_SETTIMEOUT | WDIOF_MAGICCLOSE, - .identity = "Exar/MaxLinear XR28V38x Watchdog", + .identity = "Exar XR28V38x Watchdog", }; static const struct watchdog_ops exar_wdt_ops = { diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c index 4bd31242bd773..e47bb157aa090 100644 --- a/drivers/xen/balloon.c +++ b/drivers/xen/balloon.c @@ -700,15 +700,18 @@ static int __init balloon_add_regions(void) /* * Extra regions are accounted for in the physmap, but need - * decreasing from current_pages to balloon down the initial - * allocation, because they are already accounted for in - * total_pages. + * decreasing from current_pages and target_pages to balloon + * down the initial allocation, because they are already + * accounted for in total_pages. */ - if (extra_pfn_end - start_pfn >= balloon_stats.current_pages) { + pages = extra_pfn_end - start_pfn; + if (pages >= balloon_stats.current_pages || + pages >= balloon_stats.target_pages) { WARN(1, "Extra pages underflow current target"); return -ERANGE; } - balloon_stats.current_pages -= extra_pfn_end - start_pfn; + balloon_stats.current_pages -= pages; + balloon_stats.target_pages -= pages; } return 0; diff --git a/fs/9p/vfs_addr.c b/fs/9p/vfs_addr.c index 819c752332355..db78c06ba0cc6 100644 --- a/fs/9p/vfs_addr.c +++ b/fs/9p/vfs_addr.c @@ -160,4 +160,5 @@ const struct address_space_operations v9fs_addr_operations = { .invalidate_folio = netfs_invalidate_folio, .direct_IO = noop_direct_IO, .writepages = netfs_writepages, + .migrate_folio = filemap_migrate_folio, }; diff --git a/fs/anon_inodes.c b/fs/anon_inodes.c index 42bd1cb7c9cdd..35f765610802a 100644 --- a/fs/anon_inodes.c +++ b/fs/anon_inodes.c @@ -55,25 +55,37 @@ static struct file_system_type anon_inode_fs_type = { .kill_sb = kill_anon_super, }; -static struct inode *anon_inode_make_secure_inode( - const char *name, - const struct inode *context_inode) +/** + * anon_inode_make_secure_inode - allocate an anonymous inode with security context + * @sb: [in] Superblock to allocate from + * @name: [in] Name of the class of the newfile (e.g., "secretmem") + * @context_inode: + * [in] Optional parent inode for security inheritance + * + * The function ensures proper security initialization through the LSM hook + * security_inode_init_security_anon(). + * + * Return: Pointer to new inode on success, ERR_PTR on failure. + */ +struct inode *anon_inode_make_secure_inode(struct super_block *sb, const char *name, + const struct inode *context_inode) { struct inode *inode; - const struct qstr qname = QSTR_INIT(name, strlen(name)); int error; - inode = alloc_anon_inode(anon_inode_mnt->mnt_sb); + inode = alloc_anon_inode(sb); if (IS_ERR(inode)) return inode; inode->i_flags &= ~S_PRIVATE; - error = security_inode_init_security_anon(inode, &qname, context_inode); + error = security_inode_init_security_anon(inode, &QSTR(name), + context_inode); if (error) { iput(inode); return ERR_PTR(error); } return inode; } +EXPORT_SYMBOL_GPL_FOR_MODULES(anon_inode_make_secure_inode, "kvm"); static struct file *__anon_inode_getfile(const char *name, const struct file_operations *fops, @@ -88,7 +100,8 @@ static struct file *__anon_inode_getfile(const char *name, return ERR_PTR(-ENOENT); if (make_inode) { - inode = anon_inode_make_secure_inode(name, context_inode); + inode = anon_inode_make_secure_inode(anon_inode_mnt->mnt_sb, + name, context_inode); if (IS_ERR(inode)) { file = ERR_CAST(inode); goto err; diff --git a/fs/bcachefs/fsck.c b/fs/bcachefs/fsck.c index 75c8a97a6954c..7b3b63ed747cf 100644 --- a/fs/bcachefs/fsck.c +++ b/fs/bcachefs/fsck.c @@ -405,7 +405,7 @@ static int reattach_inode(struct btree_trans *trans, struct bch_inode_unpacked * return ret; struct bch_hash_info dir_hash = bch2_hash_info_init(c, &lostfound); - struct qstr name = (struct qstr) QSTR(name_buf); + struct qstr name = QSTR(name_buf); inode->bi_dir = lostfound.bi_inum; diff --git a/fs/bcachefs/recovery.c b/fs/bcachefs/recovery.c index 3c7f941dde39a..ebabba2968821 100644 --- a/fs/bcachefs/recovery.c +++ b/fs/bcachefs/recovery.c @@ -32,8 +32,6 @@ #include #include -#define QSTR(n) { { { .len = strlen(n) } }, .name = n } - void bch2_btree_lost_data(struct bch_fs *c, enum btree_id btree) { if (btree >= BTREE_ID_NR_MAX) diff --git a/fs/bcachefs/util.h b/fs/bcachefs/util.h index fb02c1c360044..a27f4b84fe775 100644 --- a/fs/bcachefs/util.h +++ b/fs/bcachefs/util.h @@ -647,8 +647,6 @@ static inline int cmp_le32(__le32 l, __le32 r) #include -#define QSTR(n) { { { .len = strlen(n) } }, .name = n } - static inline bool qstr_eq(const struct qstr l, const struct qstr r) { return l.len == r.len && !memcmp(l.name, r.name, l.len); diff --git a/fs/btrfs/backref.h b/fs/btrfs/backref.h index e8c22cccb5c13..7dfcc9351bce5 100644 --- a/fs/btrfs/backref.h +++ b/fs/btrfs/backref.h @@ -427,8 +427,8 @@ struct btrfs_backref_node *btrfs_backref_alloc_node( struct btrfs_backref_edge *btrfs_backref_alloc_edge( struct btrfs_backref_cache *cache); -#define LINK_LOWER (1 << 0) -#define LINK_UPPER (1 << 1) +#define LINK_LOWER (1U << 0) +#define LINK_UPPER (1U << 1) void btrfs_backref_link_edge(struct btrfs_backref_edge *edge, struct btrfs_backref_node *lower, diff --git a/fs/btrfs/direct-io.c b/fs/btrfs/direct-io.c index bd38df5647e35..71984d7db839b 100644 --- a/fs/btrfs/direct-io.c +++ b/fs/btrfs/direct-io.c @@ -151,8 +151,8 @@ static struct extent_map *btrfs_create_dio_extent(struct btrfs_inode *inode, } ordered = btrfs_alloc_ordered_extent(inode, start, file_extent, - (1 << type) | - (1 << BTRFS_ORDERED_DIRECT)); + (1U << type) | + (1U << BTRFS_ORDERED_DIRECT)); if (IS_ERR(ordered)) { if (em) { free_extent_map(em); diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 147c50ef912ac..e655fa3bfd9be 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -2168,8 +2168,7 @@ static int load_global_roots_objectid(struct btrfs_root *tree_root, found = true; root = read_tree_root_path(tree_root, path, &key); if (IS_ERR(root)) { - if (!btrfs_test_opt(fs_info, IGNOREBADROOTS)) - ret = PTR_ERR(root); + ret = PTR_ERR(root); break; } set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state); @@ -2786,6 +2785,7 @@ void btrfs_init_fs_info(struct btrfs_fs_info *fs_info) btrfs_init_scrub(fs_info); btrfs_init_balance(fs_info); btrfs_init_async_reclaim_work(fs_info); + btrfs_init_extent_map_shrinker_work(fs_info); rwlock_init(&fs_info->block_group_cache_lock); fs_info->block_group_cache_tree = RB_ROOT_CACHED; @@ -4335,6 +4335,7 @@ void __cold close_ctree(struct btrfs_fs_info *fs_info) cancel_work_sync(&fs_info->async_reclaim_work); cancel_work_sync(&fs_info->async_data_reclaim_work); cancel_work_sync(&fs_info->preempt_reclaim_work); + cancel_work_sync(&fs_info->extent_map_shrinker_work); /* Cancel or finish ongoing discard work */ btrfs_discard_cleanup(fs_info); diff --git a/fs/btrfs/extent-io-tree.c b/fs/btrfs/extent-io-tree.c index 6d08c100b01de..5f9a43734812e 100644 --- a/fs/btrfs/extent-io-tree.c +++ b/fs/btrfs/extent-io-tree.c @@ -1252,8 +1252,11 @@ static int __set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, if (!prealloc) goto search_again; ret = split_state(tree, state, prealloc, end + 1); - if (ret) + if (ret) { extent_io_tree_panic(tree, state, "split", ret); + prealloc = NULL; + goto out; + } set_state_bits(tree, prealloc, bits, changeset); cache_state(prealloc, cached_state); @@ -1456,6 +1459,7 @@ int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, if (IS_ERR(inserted_state)) { ret = PTR_ERR(inserted_state); extent_io_tree_panic(tree, prealloc, "insert", ret); + goto out; } cache_state(inserted_state, cached_state); if (inserted_state == prealloc) diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h index fcb60837d7dc6..039a73731135a 100644 --- a/fs/btrfs/extent_io.h +++ b/fs/btrfs/extent_io.h @@ -79,7 +79,7 @@ enum { * single word in a bitmap may straddle two pages in the extent buffer. */ #define BIT_BYTE(nr) ((nr) / BITS_PER_BYTE) -#define BYTE_MASK ((1 << BITS_PER_BYTE) - 1) +#define BYTE_MASK ((1U << BITS_PER_BYTE) - 1) #define BITMAP_FIRST_BYTE_MASK(start) \ ((BYTE_MASK << ((start) & (BITS_PER_BYTE - 1))) & BYTE_MASK) #define BITMAP_LAST_BYTE_MASK(nbits) \ diff --git a/fs/btrfs/extent_map.c b/fs/btrfs/extent_map.c index 1d93e1202c339..36af9aa9aab13 100644 --- a/fs/btrfs/extent_map.c +++ b/fs/btrfs/extent_map.c @@ -1128,11 +1128,14 @@ struct btrfs_em_shrink_ctx { static long btrfs_scan_inode(struct btrfs_inode *inode, struct btrfs_em_shrink_ctx *ctx) { - const u64 cur_fs_gen = btrfs_get_fs_generation(inode->root->fs_info); + struct btrfs_fs_info *fs_info = inode->root->fs_info; + const u64 cur_fs_gen = btrfs_get_fs_generation(fs_info); struct extent_map_tree *tree = &inode->extent_tree; long nr_dropped = 0; struct rb_node *node; + lockdep_assert_held_write(&tree->lock); + /* * Take the mmap lock so that we serialize with the inode logging phase * of fsync because we may need to set the full sync flag on the inode, @@ -1144,28 +1147,12 @@ static long btrfs_scan_inode(struct btrfs_inode *inode, struct btrfs_em_shrink_c * to find new extents, which may not be there yet because ordered * extents haven't completed yet. * - * We also do a try lock because otherwise we could deadlock. This is - * because the shrinker for this filesystem may be invoked while we are - * in a path that is holding the mmap lock in write mode. For example in - * a reflink operation while COWing an extent buffer, when allocating - * pages for a new extent buffer and under memory pressure, the shrinker - * may be invoked, and therefore we would deadlock by attempting to read - * lock the mmap lock while we are holding already a write lock on it. + * We also do a try lock because we don't want to block for too long and + * we are holding the extent map tree's lock in write mode. */ if (!down_read_trylock(&inode->i_mmap_lock)) return 0; - /* - * We want to be fast so if the lock is busy we don't want to spend time - * waiting for it - either some task is about to do IO for the inode or - * we may have another task shrinking extent maps, here in this code, so - * skip this inode. - */ - if (!write_trylock(&tree->lock)) { - up_read(&inode->i_mmap_lock); - return 0; - } - node = rb_first(&tree->root); while (node) { struct rb_node *next = rb_next(node); @@ -1201,36 +1188,89 @@ static long btrfs_scan_inode(struct btrfs_inode *inode, struct btrfs_em_shrink_c * lock. This is to avoid slowing other tasks trying to take the * lock. */ - if (need_resched() || rwlock_needbreak(&tree->lock)) + if (need_resched() || rwlock_needbreak(&tree->lock) || + btrfs_fs_closing(fs_info)) break; node = next; } - write_unlock(&tree->lock); up_read(&inode->i_mmap_lock); return nr_dropped; } +static struct btrfs_inode *find_first_inode_to_shrink(struct btrfs_root *root, + u64 min_ino) +{ + struct btrfs_inode *inode; + unsigned long from = min_ino; + + xa_lock(&root->inodes); + while (true) { + struct extent_map_tree *tree; + + inode = xa_find(&root->inodes, &from, ULONG_MAX, XA_PRESENT); + if (!inode) + break; + + tree = &inode->extent_tree; + + /* + * We want to be fast so if the lock is busy we don't want to + * spend time waiting for it (some task is about to do IO for + * the inode). + */ + if (!write_trylock(&tree->lock)) + goto next; + + /* + * Skip inode if it doesn't have loaded extent maps, so we avoid + * getting a reference and doing an iput later. This includes + * cases like files that were opened for things like stat(2), or + * files with all extent maps previously released through the + * release folio callback (btrfs_release_folio()) or released in + * a previous run, or directories which never have extent maps. + */ + if (RB_EMPTY_ROOT(&tree->root)) { + write_unlock(&tree->lock); + goto next; + } + + if (igrab(&inode->vfs_inode)) + break; + + write_unlock(&tree->lock); +next: + from = btrfs_ino(inode) + 1; + cond_resched_lock(&root->inodes.xa_lock); + } + xa_unlock(&root->inodes); + + return inode; +} + static long btrfs_scan_root(struct btrfs_root *root, struct btrfs_em_shrink_ctx *ctx) { + struct btrfs_fs_info *fs_info = root->fs_info; struct btrfs_inode *inode; long nr_dropped = 0; u64 min_ino = ctx->last_ino + 1; - inode = btrfs_find_first_inode(root, min_ino); + inode = find_first_inode_to_shrink(root, min_ino); while (inode) { nr_dropped += btrfs_scan_inode(inode, ctx); + write_unlock(&inode->extent_tree.lock); min_ino = btrfs_ino(inode) + 1; ctx->last_ino = btrfs_ino(inode); - btrfs_add_delayed_iput(inode); + iput(&inode->vfs_inode); - if (ctx->scanned >= ctx->nr_to_scan) + if (ctx->scanned >= ctx->nr_to_scan || + btrfs_fs_closing(fs_info)) break; cond_resched(); - inode = btrfs_find_first_inode(root, min_ino); + inode = find_first_inode_to_shrink(root, min_ino); } if (inode) { @@ -1254,16 +1294,19 @@ static long btrfs_scan_root(struct btrfs_root *root, struct btrfs_em_shrink_ctx return nr_dropped; } -long btrfs_free_extent_maps(struct btrfs_fs_info *fs_info, long nr_to_scan) +static void btrfs_extent_map_shrinker_worker(struct work_struct *work) { + struct btrfs_fs_info *fs_info; struct btrfs_em_shrink_ctx ctx; u64 start_root_id; u64 next_root_id; bool cycled = false; long nr_dropped = 0; + fs_info = container_of(work, struct btrfs_fs_info, extent_map_shrinker_work); + ctx.scanned = 0; - ctx.nr_to_scan = nr_to_scan; + ctx.nr_to_scan = atomic64_read(&fs_info->extent_map_shrinker_nr_to_scan); /* * In case we have multiple tasks running this shrinker, make the next @@ -1281,12 +1324,12 @@ long btrfs_free_extent_maps(struct btrfs_fs_info *fs_info, long nr_to_scan) if (trace_btrfs_extent_map_shrinker_scan_enter_enabled()) { s64 nr = percpu_counter_sum_positive(&fs_info->evictable_extent_maps); - trace_btrfs_extent_map_shrinker_scan_enter(fs_info, nr_to_scan, + trace_btrfs_extent_map_shrinker_scan_enter(fs_info, ctx.nr_to_scan, nr, ctx.last_root, ctx.last_ino); } - while (ctx.scanned < ctx.nr_to_scan) { + while (ctx.scanned < ctx.nr_to_scan && !btrfs_fs_closing(fs_info)) { struct btrfs_root *root; unsigned long count; @@ -1344,5 +1387,34 @@ long btrfs_free_extent_maps(struct btrfs_fs_info *fs_info, long nr_to_scan) ctx.last_ino); } - return nr_dropped; + atomic64_set(&fs_info->extent_map_shrinker_nr_to_scan, 0); +} + +void btrfs_free_extent_maps(struct btrfs_fs_info *fs_info, long nr_to_scan) +{ + /* + * Do nothing if the shrinker is already running. In case of high memory + * pressure we can have a lot of tasks calling us and all passing the + * same nr_to_scan value, but in reality we may need only to free + * nr_to_scan extent maps (or less). In case we need to free more than + * that, we will be called again by the fs shrinker, so no worries about + * not doing enough work to reclaim memory from extent maps. + * We can also be repeatedly called with the same nr_to_scan value + * simply because the shrinker runs asynchronously and multiple calls + * to this function are made before the shrinker does enough progress. + * + * That's why we set the atomic counter to nr_to_scan only if its + * current value is zero, instead of incrementing the counter by + * nr_to_scan. + */ + if (atomic64_cmpxchg(&fs_info->extent_map_shrinker_nr_to_scan, 0, nr_to_scan) != 0) + return; + + queue_work(system_unbound_wq, &fs_info->extent_map_shrinker_work); +} + +void btrfs_init_extent_map_shrinker_work(struct btrfs_fs_info *fs_info) +{ + atomic64_set(&fs_info->extent_map_shrinker_nr_to_scan, 0); + INIT_WORK(&fs_info->extent_map_shrinker_work, btrfs_extent_map_shrinker_worker); } diff --git a/fs/btrfs/extent_map.h b/fs/btrfs/extent_map.h index 5154a8f1d26c9..cd123b266b641 100644 --- a/fs/btrfs/extent_map.h +++ b/fs/btrfs/extent_map.h @@ -189,6 +189,7 @@ void btrfs_drop_extent_map_range(struct btrfs_inode *inode, int btrfs_replace_extent_map_range(struct btrfs_inode *inode, struct extent_map *new_em, bool modified); -long btrfs_free_extent_maps(struct btrfs_fs_info *fs_info, long nr_to_scan); +void btrfs_free_extent_maps(struct btrfs_fs_info *fs_info, long nr_to_scan); +void btrfs_init_extent_map_shrinker_work(struct btrfs_fs_info *fs_info); #endif diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index eaa991e698049..0e63603ac5c78 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -1912,6 +1912,7 @@ static vm_fault_t btrfs_page_mkwrite(struct vm_fault *vmf) struct extent_changeset *data_reserved = NULL; unsigned long zero_start; loff_t size; + size_t fsize = folio_size(folio); vm_fault_t ret; int ret2; int reserved = 0; @@ -1922,7 +1923,7 @@ static vm_fault_t btrfs_page_mkwrite(struct vm_fault *vmf) ASSERT(folio_order(folio) == 0); - reserved_space = PAGE_SIZE; + reserved_space = fsize; sb_start_pagefault(inode->i_sb); page_start = folio_pos(folio); @@ -1976,7 +1977,7 @@ static vm_fault_t btrfs_page_mkwrite(struct vm_fault *vmf) * We can't set the delalloc bits if there are pending ordered * extents. Drop our locks and wait for them to finish. */ - ordered = btrfs_lookup_ordered_range(BTRFS_I(inode), page_start, PAGE_SIZE); + ordered = btrfs_lookup_ordered_range(BTRFS_I(inode), page_start, fsize); if (ordered) { unlock_extent(io_tree, page_start, page_end, &cached_state); folio_unlock(folio); @@ -1988,11 +1989,11 @@ static vm_fault_t btrfs_page_mkwrite(struct vm_fault *vmf) if (folio->index == ((size - 1) >> PAGE_SHIFT)) { reserved_space = round_up(size - page_start, fs_info->sectorsize); - if (reserved_space < PAGE_SIZE) { + if (reserved_space < fsize) { end = page_start + reserved_space - 1; btrfs_delalloc_release_space(BTRFS_I(inode), - data_reserved, page_start, - PAGE_SIZE - reserved_space, true); + data_reserved, end + 1, + fsize - reserved_space, true); } } @@ -2019,12 +2020,12 @@ static vm_fault_t btrfs_page_mkwrite(struct vm_fault *vmf) if (page_start + folio_size(folio) > size) zero_start = offset_in_folio(folio, size); else - zero_start = PAGE_SIZE; + zero_start = fsize; - if (zero_start != PAGE_SIZE) + if (zero_start != fsize) folio_zero_range(folio, zero_start, folio_size(folio) - zero_start); - btrfs_folio_clear_checked(fs_info, folio, page_start, PAGE_SIZE); + btrfs_folio_clear_checked(fs_info, folio, page_start, fsize); btrfs_folio_set_dirty(fs_info, folio, page_start, end + 1 - page_start); btrfs_folio_set_uptodate(fs_info, folio, page_start, end + 1 - page_start); @@ -2033,7 +2034,7 @@ static vm_fault_t btrfs_page_mkwrite(struct vm_fault *vmf) unlock_extent(io_tree, page_start, page_end, &cached_state); up_read(&BTRFS_I(inode)->i_mmap_lock); - btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE); + btrfs_delalloc_release_extents(BTRFS_I(inode), fsize); sb_end_pagefault(inode->i_sb); extent_changeset_free(data_reserved); return VM_FAULT_LOCKED; @@ -2042,7 +2043,7 @@ static vm_fault_t btrfs_page_mkwrite(struct vm_fault *vmf) folio_unlock(folio); up_read(&BTRFS_I(inode)->i_mmap_lock); out: - btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE); + btrfs_delalloc_release_extents(BTRFS_I(inode), fsize); btrfs_delalloc_release_space(BTRFS_I(inode), data_reserved, page_start, reserved_space, (ret != 0)); out_noreserve: diff --git a/fs/btrfs/free-space-tree.c b/fs/btrfs/free-space-tree.c index 7ba50e133921a..308abbf8855b0 100644 --- a/fs/btrfs/free-space-tree.c +++ b/fs/btrfs/free-space-tree.c @@ -1104,11 +1104,21 @@ static int populate_free_space_tree(struct btrfs_trans_handle *trans, ret = btrfs_search_slot_for_read(extent_root, &key, path, 1, 0); if (ret < 0) goto out_locked; - ASSERT(ret == 0); + /* + * If ret is 1 (no key found), it means this is an empty block group, + * without any extents allocated from it and there's no block group + * item (key BTRFS_BLOCK_GROUP_ITEM_KEY) located in the extent tree + * because we are using the block group tree feature, so block group + * items are stored in the block group tree. It also means there are no + * extents allocated for block groups with a start offset beyond this + * block group's end offset (this is the last, highest, block group). + */ + if (!btrfs_fs_compat_ro(trans->fs_info, BLOCK_GROUP_TREE)) + ASSERT(ret == 0); start = block_group->start; end = block_group->start + block_group->length; - while (1) { + while (ret == 0) { btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); if (key.type == BTRFS_EXTENT_ITEM_KEY || @@ -1138,8 +1148,6 @@ static int populate_free_space_tree(struct btrfs_trans_handle *trans, ret = btrfs_next_item(extent_root, path); if (ret < 0) goto out_locked; - if (ret) - break; } if (start < end) { ret = __add_to_free_space_tree(trans, block_group, path2, diff --git a/fs/btrfs/fs.h b/fs/btrfs/fs.h index bb822e425d7fa..374843aca60d8 100644 --- a/fs/btrfs/fs.h +++ b/fs/btrfs/fs.h @@ -639,6 +639,8 @@ struct btrfs_fs_info { spinlock_t extent_map_shrinker_lock; u64 extent_map_shrinker_last_root; u64 extent_map_shrinker_last_ino; + atomic64_t extent_map_shrinker_nr_to_scan; + struct work_struct extent_map_shrinker_work; /* Protected by 'trans_lock'. */ struct list_head dirty_cowonly_roots; diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 9ce1270addb04..f84e3f9fad84a 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -1249,7 +1249,7 @@ static void submit_one_async_extent(struct async_chunk *async_chunk, free_extent_map(em); ordered = btrfs_alloc_ordered_extent(inode, start, &file_extent, - 1 << BTRFS_ORDERED_COMPRESSED); + 1U << BTRFS_ORDERED_COMPRESSED); if (IS_ERR(ordered)) { btrfs_drop_extent_map_range(inode, start, end, false); ret = PTR_ERR(ordered); @@ -1408,6 +1408,17 @@ static noinline int cow_file_range(struct btrfs_inode *inode, alloc_hint = btrfs_get_extent_allocation_hint(inode, start, num_bytes); + /* + * We're not doing compressed IO, don't unlock the first page (which + * the caller expects to stay locked), don't clear any dirty bits and + * don't set any writeback bits. + * + * Do set the Ordered (Private2) bit so we know this page was properly + * setup for writepage. + */ + page_ops = (keep_locked ? 0 : PAGE_UNLOCK); + page_ops |= PAGE_SET_ORDERED; + /* * Relocation relies on the relocated extents to have exactly the same * size as the original extents. Normally writeback for relocation data @@ -1452,8 +1463,13 @@ static noinline int cow_file_range(struct btrfs_inode *inode, continue; } if (done_offset) { - *done_offset = start - 1; - return 0; + /* + * Move @end to the end of the processed range, + * and exit the loop to unlock the processed extents. + */ + end = start - 1; + ret = 0; + break; } ret = -ENOSPC; } @@ -1470,6 +1486,10 @@ static noinline int cow_file_range(struct btrfs_inode *inode, file_extent.offset = 0; file_extent.compression = BTRFS_COMPRESS_NONE; + /* + * Locked range will be released either during error clean up or + * after the whole range is finished. + */ lock_extent(&inode->io_tree, start, start + ram_size - 1, &cached); @@ -1484,7 +1504,7 @@ static noinline int cow_file_range(struct btrfs_inode *inode, free_extent_map(em); ordered = btrfs_alloc_ordered_extent(inode, start, &file_extent, - 1 << BTRFS_ORDERED_REGULAR); + 1U << BTRFS_ORDERED_REGULAR); if (IS_ERR(ordered)) { unlock_extent(&inode->io_tree, start, start + ram_size - 1, &cached); @@ -1515,27 +1535,12 @@ static noinline int cow_file_range(struct btrfs_inode *inode, btrfs_dec_block_group_reservations(fs_info, ins.objectid); - /* - * We're not doing compressed IO, don't unlock the first page - * (which the caller expects to stay locked), don't clear any - * dirty bits and don't set any writeback bits - * - * Do set the Ordered (Private2) bit so we know this page was - * properly setup for writepage. - */ - page_ops = (keep_locked ? 0 : PAGE_UNLOCK); - page_ops |= PAGE_SET_ORDERED; - - extent_clear_unlock_delalloc(inode, start, start + ram_size - 1, - locked_folio, &cached, - EXTENT_LOCKED | EXTENT_DELALLOC, - page_ops); - if (num_bytes < cur_alloc_size) + if (num_bytes < ram_size) num_bytes = 0; else - num_bytes -= cur_alloc_size; + num_bytes -= ram_size; alloc_hint = ins.objectid + ins.offset; - start += cur_alloc_size; + start += ram_size; extent_reserved = false; /* @@ -1546,6 +1551,8 @@ static noinline int cow_file_range(struct btrfs_inode *inode, if (ret) goto out_unlock; } + extent_clear_unlock_delalloc(inode, orig_start, end, locked_folio, &cached, + EXTENT_LOCKED | EXTENT_DELALLOC, page_ops); done: if (done_offset) *done_offset = end; @@ -1561,40 +1568,35 @@ static noinline int cow_file_range(struct btrfs_inode *inode, * Now, we have three regions to clean up: * * |-------(1)----|---(2)---|-------------(3)----------| - * `- orig_start `- start `- start + cur_alloc_size `- end + * `- orig_start `- start `- start + ram_size `- end * * We process each region below. */ - clear_bits = EXTENT_LOCKED | EXTENT_DELALLOC | EXTENT_DELALLOC_NEW | - EXTENT_DEFRAG | EXTENT_CLEAR_META_RESV; - page_ops = PAGE_UNLOCK | PAGE_START_WRITEBACK | PAGE_END_WRITEBACK; - /* * For the range (1). We have already instantiated the ordered extents * for this region. They are cleaned up by * btrfs_cleanup_ordered_extents() in e.g, - * btrfs_run_delalloc_range(). EXTENT_LOCKED | EXTENT_DELALLOC are - * already cleared in the above loop. And, EXTENT_DELALLOC_NEW | - * EXTENT_DEFRAG | EXTENT_CLEAR_META_RESV are handled by the cleanup - * function. + * btrfs_run_delalloc_range(). + * EXTENT_DELALLOC_NEW | EXTENT_DEFRAG | EXTENT_CLEAR_META_RESV + * are also handled by the cleanup function. * - * However, in case of @keep_locked, we still need to unlock the pages - * (except @locked_folio) to ensure all the pages are unlocked. + * So here we only clear EXTENT_LOCKED and EXTENT_DELALLOC flag, and + * finish the writeback of the involved folios, which will be never submitted. */ - if (keep_locked && orig_start < start) { + if (orig_start < start) { + clear_bits = EXTENT_LOCKED | EXTENT_DELALLOC; + page_ops = PAGE_UNLOCK | PAGE_START_WRITEBACK | PAGE_END_WRITEBACK; + if (!locked_folio) mapping_set_error(inode->vfs_inode.i_mapping, ret); extent_clear_unlock_delalloc(inode, orig_start, start - 1, - locked_folio, NULL, 0, page_ops); + locked_folio, NULL, clear_bits, page_ops); } - /* - * At this point we're unlocked, we want to make sure we're only - * clearing these flags under the extent lock, so lock the rest of the - * range and clear everything up. - */ - lock_extent(&inode->io_tree, start, end, NULL); + clear_bits = EXTENT_LOCKED | EXTENT_DELALLOC | EXTENT_DELALLOC_NEW | + EXTENT_DEFRAG | EXTENT_CLEAR_META_RESV; + page_ops = PAGE_UNLOCK | PAGE_START_WRITEBACK | PAGE_END_WRITEBACK; /* * For the range (2). If we reserved an extent for our delalloc range @@ -1608,11 +1610,11 @@ static noinline int cow_file_range(struct btrfs_inode *inode, */ if (extent_reserved) { extent_clear_unlock_delalloc(inode, start, - start + cur_alloc_size - 1, + start + ram_size - 1, locked_folio, &cached, clear_bits, page_ops); - btrfs_qgroup_free_data(inode, NULL, start, cur_alloc_size, NULL); - start += cur_alloc_size; + btrfs_qgroup_free_data(inode, NULL, start, ram_size, NULL); + start += ram_size; } /* @@ -2055,6 +2057,63 @@ static void cleanup_dirty_folios(struct btrfs_inode *inode, mapping_set_error(mapping, error); } +static int nocow_one_range(struct btrfs_inode *inode, struct folio *locked_folio, + struct extent_state **cached, + struct can_nocow_file_extent_args *nocow_args, + u64 file_pos, bool is_prealloc) +{ + struct btrfs_ordered_extent *ordered; + u64 len = nocow_args->file_extent.num_bytes; + u64 end = file_pos + len - 1; + int ret = 0; + + lock_extent(&inode->io_tree, file_pos, end, cached); + + if (is_prealloc) { + struct extent_map *em; + + em = btrfs_create_io_em(inode, file_pos, &nocow_args->file_extent, + BTRFS_ORDERED_PREALLOC); + if (IS_ERR(em)) { + unlock_extent(&inode->io_tree, file_pos, end, cached); + return PTR_ERR(em); + } + free_extent_map(em); + } + + ordered = btrfs_alloc_ordered_extent(inode, file_pos, &nocow_args->file_extent, + is_prealloc + ? (1U << BTRFS_ORDERED_PREALLOC) + : (1U << BTRFS_ORDERED_NOCOW)); + if (IS_ERR(ordered)) { + if (is_prealloc) + btrfs_drop_extent_map_range(inode, file_pos, end, false); + unlock_extent(&inode->io_tree, file_pos, end, cached); + return PTR_ERR(ordered); + } + + if (btrfs_is_data_reloc_root(inode->root)) + /* + * Errors are handled later, as we must prevent + * extent_clear_unlock_delalloc() in error handler from freeing + * metadata of the created ordered extent. + */ + ret = btrfs_reloc_clone_csums(ordered); + btrfs_put_ordered_extent(ordered); + + extent_clear_unlock_delalloc(inode, file_pos, end, locked_folio, cached, + EXTENT_LOCKED | EXTENT_DELALLOC | + EXTENT_CLEAR_DATA_RESV, + PAGE_UNLOCK | PAGE_SET_ORDERED); + + /* + * btrfs_reloc_clone_csums() error, now we're OK to call error handler, + * as metadata for created ordered extent will only be freed by + * btrfs_finish_ordered_io(). + */ + return ret; +} + /* * when nowcow writeback call back. This checks for snapshots or COW copies * of the extents that exist in the file, and COWs the file as required. @@ -2099,15 +2158,12 @@ static noinline int run_delalloc_nocow(struct btrfs_inode *inode, while (cur_offset <= end) { struct btrfs_block_group *nocow_bg = NULL; - struct btrfs_ordered_extent *ordered; struct btrfs_key found_key; struct btrfs_file_extent_item *fi; struct extent_buffer *leaf; struct extent_state *cached_state = NULL; u64 extent_end; - u64 nocow_end; int extent_type; - bool is_prealloc; ret = btrfs_lookup_file_extent(NULL, root, path, ino, cur_offset, 0); @@ -2242,67 +2298,13 @@ static noinline int run_delalloc_nocow(struct btrfs_inode *inode, } } - nocow_end = cur_offset + nocow_args.file_extent.num_bytes - 1; - lock_extent(&inode->io_tree, cur_offset, nocow_end, &cached_state); - - is_prealloc = extent_type == BTRFS_FILE_EXTENT_PREALLOC; - if (is_prealloc) { - struct extent_map *em; - - em = btrfs_create_io_em(inode, cur_offset, - &nocow_args.file_extent, - BTRFS_ORDERED_PREALLOC); - if (IS_ERR(em)) { - unlock_extent(&inode->io_tree, cur_offset, - nocow_end, &cached_state); - btrfs_dec_nocow_writers(nocow_bg); - ret = PTR_ERR(em); - goto error; - } - free_extent_map(em); - } - - ordered = btrfs_alloc_ordered_extent(inode, cur_offset, - &nocow_args.file_extent, - is_prealloc - ? (1 << BTRFS_ORDERED_PREALLOC) - : (1 << BTRFS_ORDERED_NOCOW)); + ret = nocow_one_range(inode, locked_folio, &cached_state, + &nocow_args, cur_offset, + extent_type == BTRFS_FILE_EXTENT_PREALLOC); btrfs_dec_nocow_writers(nocow_bg); - if (IS_ERR(ordered)) { - if (is_prealloc) { - btrfs_drop_extent_map_range(inode, cur_offset, - nocow_end, false); - } - unlock_extent(&inode->io_tree, cur_offset, - nocow_end, &cached_state); - ret = PTR_ERR(ordered); + if (ret < 0) goto error; - } - - if (btrfs_is_data_reloc_root(root)) - /* - * Error handled later, as we must prevent - * extent_clear_unlock_delalloc() in error handler - * from freeing metadata of created ordered extent. - */ - ret = btrfs_reloc_clone_csums(ordered); - btrfs_put_ordered_extent(ordered); - - extent_clear_unlock_delalloc(inode, cur_offset, nocow_end, - locked_folio, &cached_state, - EXTENT_LOCKED | EXTENT_DELALLOC | - EXTENT_CLEAR_DATA_RESV, - PAGE_UNLOCK | PAGE_SET_ORDERED); - cur_offset = extent_end; - - /* - * btrfs_reloc_clone_csums() error, now we're OK to call error - * handler, as metadata for created ordered extent will only - * be freed by btrfs_finish_ordered_io(). - */ - if (ret) - goto error; } btrfs_release_path(path); @@ -4732,7 +4734,6 @@ static int btrfs_rmdir(struct inode *dir, struct dentry *dentry) struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info; int ret = 0; struct btrfs_trans_handle *trans; - u64 last_unlink_trans; struct fscrypt_name fname; if (inode->i_size > BTRFS_EMPTY_DIR_SIZE) @@ -4758,6 +4759,23 @@ static int btrfs_rmdir(struct inode *dir, struct dentry *dentry) goto out_notrans; } + /* + * Propagate the last_unlink_trans value of the deleted dir to its + * parent directory. This is to prevent an unrecoverable log tree in the + * case we do something like this: + * 1) create dir foo + * 2) create snapshot under dir foo + * 3) delete the snapshot + * 4) rmdir foo + * 5) mkdir foo + * 6) fsync foo or some file inside foo + * + * This is because we can't unlink other roots when replaying the dir + * deletes for directory foo. + */ + if (BTRFS_I(inode)->last_unlink_trans >= trans->transid) + btrfs_record_snapshot_destroy(trans, BTRFS_I(dir)); + if (unlikely(btrfs_ino(BTRFS_I(inode)) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) { ret = btrfs_unlink_subvol(trans, BTRFS_I(dir), dentry); goto out; @@ -4767,27 +4785,11 @@ static int btrfs_rmdir(struct inode *dir, struct dentry *dentry) if (ret) goto out; - last_unlink_trans = BTRFS_I(inode)->last_unlink_trans; - /* now the directory is empty */ ret = btrfs_unlink_inode(trans, BTRFS_I(dir), BTRFS_I(d_inode(dentry)), &fname.disk_name); - if (!ret) { + if (!ret) btrfs_i_size_write(BTRFS_I(inode), 0); - /* - * Propagate the last_unlink_trans value of the deleted dir to - * its parent directory. This is to prevent an unrecoverable - * log tree in the case we do something like this: - * 1) create dir foo - * 2) create snapshot under dir foo - * 3) delete the snapshot - * 4) rmdir foo - * 5) mkdir foo - * 6) fsync foo or some file inside foo - */ - if (last_unlink_trans >= trans->transid) - BTRFS_I(dir)->last_unlink_trans = last_unlink_trans; - } out: btrfs_end_transaction(trans); out_notrans: @@ -4857,8 +4859,11 @@ int btrfs_truncate_block(struct btrfs_inode *inode, loff_t from, loff_t len, folio = __filemap_get_folio(mapping, index, FGP_LOCK | FGP_ACCESSED | FGP_CREAT, mask); if (IS_ERR(folio)) { - btrfs_delalloc_release_space(inode, data_reserved, block_start, - blocksize, true); + if (only_release_metadata) + btrfs_delalloc_release_metadata(inode, blocksize, true); + else + btrfs_delalloc_release_space(inode, data_reserved, + block_start, blocksize, true); btrfs_delalloc_release_extents(inode, blocksize); ret = -ENOMEM; goto out; @@ -7994,6 +7999,7 @@ static int btrfs_rename_exchange(struct inode *old_dir, int ret; int ret2; bool need_abort = false; + bool logs_pinned = false; struct fscrypt_name old_fname, new_fname; struct fscrypt_str *old_name, *new_name; @@ -8117,6 +8123,31 @@ static int btrfs_rename_exchange(struct inode *old_dir, inode_inc_iversion(new_inode); simple_rename_timestamp(old_dir, old_dentry, new_dir, new_dentry); + if (old_ino != BTRFS_FIRST_FREE_OBJECTID && + new_ino != BTRFS_FIRST_FREE_OBJECTID) { + /* + * If we are renaming in the same directory (and it's not for + * root entries) pin the log early to prevent any concurrent + * task from logging the directory after we removed the old + * entries and before we add the new entries, otherwise that + * task can sync a log without any entry for the inodes we are + * renaming and therefore replaying that log, if a power failure + * happens after syncing the log, would result in deleting the + * inodes. + * + * If the rename affects two different directories, we want to + * make sure the that there's no log commit that contains + * updates for only one of the directories but not for the + * other. + * + * If we are renaming an entry for a root, we don't care about + * log updates since we called btrfs_set_log_full_commit(). + */ + btrfs_pin_log_trans(root); + btrfs_pin_log_trans(dest); + logs_pinned = true; + } + if (old_dentry->d_parent != new_dentry->d_parent) { btrfs_record_unlink_dir(trans, BTRFS_I(old_dir), BTRFS_I(old_inode), true); @@ -8174,30 +8205,23 @@ static int btrfs_rename_exchange(struct inode *old_dir, BTRFS_I(new_inode)->dir_index = new_idx; /* - * Now pin the logs of the roots. We do it to ensure that no other task - * can sync the logs while we are in progress with the rename, because - * that could result in an inconsistency in case any of the inodes that - * are part of this rename operation were logged before. + * Do the log updates for all inodes. + * + * If either entry is for a root we don't need to update the logs since + * we've called btrfs_set_log_full_commit() before. */ - if (old_ino != BTRFS_FIRST_FREE_OBJECTID) - btrfs_pin_log_trans(root); - if (new_ino != BTRFS_FIRST_FREE_OBJECTID) - btrfs_pin_log_trans(dest); - - /* Do the log updates for all inodes. */ - if (old_ino != BTRFS_FIRST_FREE_OBJECTID) + if (logs_pinned) { btrfs_log_new_name(trans, old_dentry, BTRFS_I(old_dir), old_rename_ctx.index, new_dentry->d_parent); - if (new_ino != BTRFS_FIRST_FREE_OBJECTID) btrfs_log_new_name(trans, new_dentry, BTRFS_I(new_dir), new_rename_ctx.index, old_dentry->d_parent); + } - /* Now unpin the logs. */ - if (old_ino != BTRFS_FIRST_FREE_OBJECTID) +out_fail: + if (logs_pinned) { btrfs_end_log_trans(root); - if (new_ino != BTRFS_FIRST_FREE_OBJECTID) btrfs_end_log_trans(dest); -out_fail: + } ret2 = btrfs_end_transaction(trans); ret = ret ? ret : ret2; out_notrans: @@ -8247,6 +8271,7 @@ static int btrfs_rename(struct mnt_idmap *idmap, int ret2; u64 old_ino = btrfs_ino(BTRFS_I(old_inode)); struct fscrypt_name old_fname, new_fname; + bool logs_pinned = false; if (btrfs_ino(BTRFS_I(new_dir)) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID) return -EPERM; @@ -8381,6 +8406,29 @@ static int btrfs_rename(struct mnt_idmap *idmap, inode_inc_iversion(old_inode); simple_rename_timestamp(old_dir, old_dentry, new_dir, new_dentry); + if (old_ino != BTRFS_FIRST_FREE_OBJECTID) { + /* + * If we are renaming in the same directory (and it's not a + * root entry) pin the log to prevent any concurrent task from + * logging the directory after we removed the old entry and + * before we add the new entry, otherwise that task can sync + * a log without any entry for the inode we are renaming and + * therefore replaying that log, if a power failure happens + * after syncing the log, would result in deleting the inode. + * + * If the rename affects two different directories, we want to + * make sure the that there's no log commit that contains + * updates for only one of the directories but not for the + * other. + * + * If we are renaming an entry for a root, we don't care about + * log updates since we called btrfs_set_log_full_commit(). + */ + btrfs_pin_log_trans(root); + btrfs_pin_log_trans(dest); + logs_pinned = true; + } + if (old_dentry->d_parent != new_dentry->d_parent) btrfs_record_unlink_dir(trans, BTRFS_I(old_dir), BTRFS_I(old_inode), true); @@ -8429,7 +8477,7 @@ static int btrfs_rename(struct mnt_idmap *idmap, if (old_inode->i_nlink == 1) BTRFS_I(old_inode)->dir_index = index; - if (old_ino != BTRFS_FIRST_FREE_OBJECTID) + if (logs_pinned) btrfs_log_new_name(trans, old_dentry, BTRFS_I(old_dir), rename_ctx.index, new_dentry->d_parent); @@ -8445,6 +8493,10 @@ static int btrfs_rename(struct mnt_idmap *idmap, } } out_fail: + if (logs_pinned) { + btrfs_end_log_trans(root); + btrfs_end_log_trans(dest); + } ret2 = btrfs_end_transaction(trans); ret = ret ? ret : ret2; out_notrans: @@ -9680,8 +9732,8 @@ ssize_t btrfs_do_encoded_write(struct kiocb *iocb, struct iov_iter *from, free_extent_map(em); ordered = btrfs_alloc_ordered_extent(inode, start, &file_extent, - (1 << BTRFS_ORDERED_ENCODED) | - (1 << BTRFS_ORDERED_COMPRESSED)); + (1U << BTRFS_ORDERED_ENCODED) | + (1U << BTRFS_ORDERED_COMPRESSED)); if (IS_ERR(ordered)) { btrfs_drop_extent_map_range(inode, start, end, false); ret = PTR_ERR(ordered); diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index 3e3722a732393..1706f6d9b12e6 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -758,14 +758,14 @@ static noinline int create_subvol(struct mnt_idmap *idmap, goto out; } + btrfs_record_new_subvolume(trans, BTRFS_I(dir)); + ret = btrfs_create_new_inode(trans, &new_inode_args); if (ret) { btrfs_abort_transaction(trans, ret); goto out; } - btrfs_record_new_subvolume(trans, BTRFS_I(dir)); - d_instantiate_new(dentry, new_inode_args.inode); new_inode_args.inode = NULL; diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c index 4ed11b089ea95..880f9553d79d3 100644 --- a/fs/btrfs/ordered-data.c +++ b/fs/btrfs/ordered-data.c @@ -153,9 +153,10 @@ static struct btrfs_ordered_extent *alloc_ordered_extent( struct btrfs_ordered_extent *entry; int ret; u64 qgroup_rsv = 0; + const bool is_nocow = (flags & + ((1U << BTRFS_ORDERED_NOCOW) | (1U << BTRFS_ORDERED_PREALLOC))); - if (flags & - ((1 << BTRFS_ORDERED_NOCOW) | (1 << BTRFS_ORDERED_PREALLOC))) { + if (is_nocow) { /* For nocow write, we can release the qgroup rsv right now */ ret = btrfs_qgroup_free_data(inode, NULL, file_offset, num_bytes, &qgroup_rsv); if (ret < 0) @@ -170,8 +171,13 @@ static struct btrfs_ordered_extent *alloc_ordered_extent( return ERR_PTR(ret); } entry = kmem_cache_zalloc(btrfs_ordered_extent_cache, GFP_NOFS); - if (!entry) + if (!entry) { + if (!is_nocow) + btrfs_qgroup_free_refroot(inode->root->fs_info, + btrfs_root_id(inode->root), + qgroup_rsv, BTRFS_QGROUP_RSV_DATA); return ERR_PTR(-ENOMEM); + } entry->file_offset = file_offset; entry->num_bytes = num_bytes; @@ -253,7 +259,7 @@ static void insert_ordered_extent(struct btrfs_ordered_extent *entry) * @disk_bytenr: Offset of extent on disk. * @disk_num_bytes: Size of extent on disk. * @offset: Offset into unencoded data where file data starts. - * @flags: Flags specifying type of extent (1 << BTRFS_ORDERED_*). + * @flags: Flags specifying type of extent (1U << BTRFS_ORDERED_*). * @compress_type: Compression algorithm used for data. * * Most of these parameters correspond to &struct btrfs_file_extent_item. The diff --git a/fs/btrfs/raid56.c b/fs/btrfs/raid56.c index 39bec672df0cc..8afadf994b8c8 100644 --- a/fs/btrfs/raid56.c +++ b/fs/btrfs/raid56.c @@ -200,8 +200,7 @@ int btrfs_alloc_stripe_hash_table(struct btrfs_fs_info *info) struct btrfs_stripe_hash_table *x; struct btrfs_stripe_hash *cur; struct btrfs_stripe_hash *h; - int num_entries = 1 << BTRFS_STRIPE_HASH_TABLE_BITS; - int i; + unsigned int num_entries = 1U << BTRFS_STRIPE_HASH_TABLE_BITS; if (info->stripe_hash_table) return 0; @@ -222,7 +221,7 @@ int btrfs_alloc_stripe_hash_table(struct btrfs_fs_info *info) h = table->table; - for (i = 0; i < num_entries; i++) { + for (unsigned int i = 0; i < num_entries; i++) { cur = h + i; INIT_LIST_HEAD(&cur->hash_list); spin_lock_init(&cur->lock); diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c index d8fcc3eb85c88..3fcc7c092c5ec 100644 --- a/fs/btrfs/scrub.c +++ b/fs/btrfs/scrub.c @@ -153,12 +153,14 @@ struct scrub_stripe { unsigned int init_nr_io_errors; unsigned int init_nr_csum_errors; unsigned int init_nr_meta_errors; + unsigned int init_nr_meta_gen_errors; /* * The following error bitmaps are all for the current status. * Every time we submit a new read, these bitmaps may be updated. * - * error_bitmap = io_error_bitmap | csum_error_bitmap | meta_error_bitmap; + * error_bitmap = io_error_bitmap | csum_error_bitmap | + * meta_error_bitmap | meta_generation_bitmap; * * IO and csum errors can happen for both metadata and data. */ @@ -166,6 +168,7 @@ struct scrub_stripe { unsigned long io_error_bitmap; unsigned long csum_error_bitmap; unsigned long meta_error_bitmap; + unsigned long meta_gen_error_bitmap; /* For writeback (repair or replace) error reporting. */ unsigned long write_error_bitmap; @@ -616,7 +619,7 @@ static void scrub_verify_one_metadata(struct scrub_stripe *stripe, int sector_nr memcpy(on_disk_csum, header->csum, fs_info->csum_size); if (logical != btrfs_stack_header_bytenr(header)) { - bitmap_set(&stripe->csum_error_bitmap, sector_nr, sectors_per_tree); + bitmap_set(&stripe->meta_error_bitmap, sector_nr, sectors_per_tree); bitmap_set(&stripe->error_bitmap, sector_nr, sectors_per_tree); btrfs_warn_rl(fs_info, "tree block %llu mirror %u has bad bytenr, has %llu want %llu", @@ -672,7 +675,7 @@ static void scrub_verify_one_metadata(struct scrub_stripe *stripe, int sector_nr } if (stripe->sectors[sector_nr].generation != btrfs_stack_header_generation(header)) { - bitmap_set(&stripe->meta_error_bitmap, sector_nr, sectors_per_tree); + bitmap_set(&stripe->meta_gen_error_bitmap, sector_nr, sectors_per_tree); bitmap_set(&stripe->error_bitmap, sector_nr, sectors_per_tree); btrfs_warn_rl(fs_info, "tree block %llu mirror %u has bad generation, has %llu want %llu", @@ -684,6 +687,7 @@ static void scrub_verify_one_metadata(struct scrub_stripe *stripe, int sector_nr bitmap_clear(&stripe->error_bitmap, sector_nr, sectors_per_tree); bitmap_clear(&stripe->csum_error_bitmap, sector_nr, sectors_per_tree); bitmap_clear(&stripe->meta_error_bitmap, sector_nr, sectors_per_tree); + bitmap_clear(&stripe->meta_gen_error_bitmap, sector_nr, sectors_per_tree); } static void scrub_verify_one_sector(struct scrub_stripe *stripe, int sector_nr) @@ -972,8 +976,22 @@ static void scrub_stripe_report_errors(struct scrub_ctx *sctx, if (__ratelimit(&rs) && dev) scrub_print_common_warning("header error", dev, false, stripe->logical, physical); + if (test_bit(sector_nr, &stripe->meta_gen_error_bitmap)) + if (__ratelimit(&rs) && dev) + scrub_print_common_warning("generation error", dev, false, + stripe->logical, physical); } + /* Update the device stats. */ + for (int i = 0; i < stripe->init_nr_io_errors; i++) + btrfs_dev_stat_inc_and_print(stripe->dev, BTRFS_DEV_STAT_READ_ERRS); + for (int i = 0; i < stripe->init_nr_csum_errors; i++) + btrfs_dev_stat_inc_and_print(stripe->dev, BTRFS_DEV_STAT_CORRUPTION_ERRS); + /* Generation mismatch error is based on each metadata, not each block. */ + for (int i = 0; i < stripe->init_nr_meta_gen_errors; + i += (fs_info->nodesize >> fs_info->sectorsize_bits)) + btrfs_dev_stat_inc_and_print(stripe->dev, BTRFS_DEV_STAT_GENERATION_ERRS); + spin_lock(&sctx->stat_lock); sctx->stat.data_extents_scrubbed += stripe->nr_data_extents; sctx->stat.tree_extents_scrubbed += stripe->nr_meta_extents; @@ -982,7 +1000,8 @@ static void scrub_stripe_report_errors(struct scrub_ctx *sctx, sctx->stat.no_csum += nr_nodatacsum_sectors; sctx->stat.read_errors += stripe->init_nr_io_errors; sctx->stat.csum_errors += stripe->init_nr_csum_errors; - sctx->stat.verify_errors += stripe->init_nr_meta_errors; + sctx->stat.verify_errors += stripe->init_nr_meta_errors + + stripe->init_nr_meta_gen_errors; sctx->stat.uncorrectable_errors += bitmap_weight(&stripe->error_bitmap, stripe->nr_sectors); sctx->stat.corrected_errors += nr_repaired_sectors; @@ -1028,6 +1047,8 @@ static void scrub_stripe_read_repair_worker(struct work_struct *work) stripe->nr_sectors); stripe->init_nr_meta_errors = bitmap_weight(&stripe->meta_error_bitmap, stripe->nr_sectors); + stripe->init_nr_meta_gen_errors = bitmap_weight(&stripe->meta_gen_error_bitmap, + stripe->nr_sectors); if (bitmap_empty(&stripe->init_error_bitmap, stripe->nr_sectors)) goto out; @@ -1142,6 +1163,9 @@ static void scrub_write_endio(struct btrfs_bio *bbio) bitmap_set(&stripe->write_error_bitmap, sector_nr, bio_size >> fs_info->sectorsize_bits); spin_unlock_irqrestore(&stripe->write_error_lock, flags); + for (int i = 0; i < (bio_size >> fs_info->sectorsize_bits); i++) + btrfs_dev_stat_inc_and_print(stripe->dev, + BTRFS_DEV_STAT_WRITE_ERRS); } bio_put(&bbio->bio); @@ -1508,10 +1532,12 @@ static void scrub_stripe_reset_bitmaps(struct scrub_stripe *stripe) stripe->init_nr_io_errors = 0; stripe->init_nr_csum_errors = 0; stripe->init_nr_meta_errors = 0; + stripe->init_nr_meta_gen_errors = 0; stripe->error_bitmap = 0; stripe->io_error_bitmap = 0; stripe->csum_error_bitmap = 0; stripe->meta_error_bitmap = 0; + stripe->meta_gen_error_bitmap = 0; } /* diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index bcb8def4ade20..6119a06b05693 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c @@ -28,7 +28,6 @@ #include #include #include -#include #include "messages.h" #include "delayed-inode.h" #include "ctree.h" @@ -2399,16 +2398,10 @@ static long btrfs_free_cached_objects(struct super_block *sb, struct shrink_cont const long nr_to_scan = min_t(unsigned long, LONG_MAX, sc->nr_to_scan); struct btrfs_fs_info *fs_info = btrfs_sb(sb); - /* - * We may be called from any task trying to allocate memory and we don't - * want to slow it down with scanning and dropping extent maps. It would - * also cause heavy lock contention if many tasks concurrently enter - * here. Therefore only allow kswapd tasks to scan and drop extent maps. - */ - if (!current_is_kswapd()) - return 0; + btrfs_free_extent_maps(fs_info, nr_to_scan); - return btrfs_free_extent_maps(fs_info, nr_to_scan); + /* The extent map shrinker runs asynchronously, so always return 0. */ + return 0; } static const struct super_operations btrfs_super_ops = { diff --git a/fs/btrfs/tests/extent-io-tests.c b/fs/btrfs/tests/extent-io-tests.c index 0a2dbfaaf49e2..de226209220fe 100644 --- a/fs/btrfs/tests/extent-io-tests.c +++ b/fs/btrfs/tests/extent-io-tests.c @@ -14,9 +14,9 @@ #include "../disk-io.h" #include "../btrfs_inode.h" -#define PROCESS_UNLOCK (1 << 0) -#define PROCESS_RELEASE (1 << 1) -#define PROCESS_TEST_LOCKED (1 << 2) +#define PROCESS_UNLOCK (1U << 0) +#define PROCESS_RELEASE (1U << 1) +#define PROCESS_TEST_LOCKED (1U << 2) static noinline int process_page_range(struct inode *inode, u64 start, u64 end, unsigned long flags) diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index 9637c7cdc0cf9..16b4474ded4bc 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -138,11 +138,14 @@ static void wait_log_commit(struct btrfs_root *root, int transid); * and once to do all the other items. */ -static struct inode *btrfs_iget_logging(u64 objectid, struct btrfs_root *root) +static struct btrfs_inode *btrfs_iget_logging(u64 objectid, struct btrfs_root *root) { unsigned int nofs_flag; struct inode *inode; + /* Only meant to be called for subvolume roots and not for log roots. */ + ASSERT(is_fstree(btrfs_root_id(root))); + /* * We're holding a transaction handle whether we are logging or * replaying a log tree, so we must make sure NOFS semantics apply @@ -154,7 +157,10 @@ static struct inode *btrfs_iget_logging(u64 objectid, struct btrfs_root *root) inode = btrfs_iget(objectid, root); memalloc_nofs_restore(nofs_flag); - return inode; + if (IS_ERR(inode)) + return ERR_CAST(inode); + + return BTRFS_I(inode); } /* @@ -610,21 +616,6 @@ static int read_alloc_one_name(struct extent_buffer *eb, void *start, int len, return 0; } -/* - * simple helper to read an inode off the disk from a given root - * This can only be called for subvolume roots and not for the log - */ -static noinline struct inode *read_one_inode(struct btrfs_root *root, - u64 objectid) -{ - struct inode *inode; - - inode = btrfs_iget_logging(objectid, root); - if (IS_ERR(inode)) - inode = NULL; - return inode; -} - /* replays a single extent in 'eb' at 'slot' with 'key' into the * subvolume 'root'. path is released on entry and should be released * on exit. @@ -650,7 +641,7 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans, u64 start = key->offset; u64 nbytes = 0; struct btrfs_file_extent_item *item; - struct inode *inode = NULL; + struct btrfs_inode *inode = NULL; unsigned long size; int ret = 0; @@ -674,23 +665,19 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans, extent_end = ALIGN(start + size, fs_info->sectorsize); } else { - ret = 0; - goto out; + return 0; } - inode = read_one_inode(root, key->objectid); - if (!inode) { - ret = -EIO; - goto out; - } + inode = btrfs_iget_logging(key->objectid, root); + if (IS_ERR(inode)) + return PTR_ERR(inode); /* * first check to see if we already have this extent in the * file. This must be done before the btrfs_drop_extents run * so we don't try to drop this extent. */ - ret = btrfs_lookup_file_extent(trans, root, path, - btrfs_ino(BTRFS_I(inode)), start, 0); + ret = btrfs_lookup_file_extent(trans, root, path, btrfs_ino(inode), start, 0); if (ret == 0 && (found_type == BTRFS_FILE_EXTENT_REG || @@ -724,7 +711,7 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans, drop_args.start = start; drop_args.end = extent_end; drop_args.drop_cache = true; - ret = btrfs_drop_extents(trans, root, BTRFS_I(inode), &drop_args); + ret = btrfs_drop_extents(trans, root, inode, &drop_args); if (ret) goto out; @@ -902,16 +889,15 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans, goto out; } - ret = btrfs_inode_set_file_extent_range(BTRFS_I(inode), start, - extent_end - start); + ret = btrfs_inode_set_file_extent_range(inode, start, extent_end - start); if (ret) goto out; update_inode: - btrfs_update_inode_bytes(BTRFS_I(inode), nbytes, drop_args.bytes_found); - ret = btrfs_update_inode(trans, BTRFS_I(inode)); + btrfs_update_inode_bytes(inode, nbytes, drop_args.bytes_found); + ret = btrfs_update_inode(trans, inode); out: - iput(inode); + iput(&inode->vfs_inode); return ret; } @@ -948,7 +934,7 @@ static noinline int drop_one_dir_item(struct btrfs_trans_handle *trans, struct btrfs_dir_item *di) { struct btrfs_root *root = dir->root; - struct inode *inode; + struct btrfs_inode *inode; struct fscrypt_str name; struct extent_buffer *leaf; struct btrfs_key location; @@ -963,9 +949,10 @@ static noinline int drop_one_dir_item(struct btrfs_trans_handle *trans, btrfs_release_path(path); - inode = read_one_inode(root, location.objectid); - if (!inode) { - ret = -EIO; + inode = btrfs_iget_logging(location.objectid, root); + if (IS_ERR(inode)) { + ret = PTR_ERR(inode); + inode = NULL; goto out; } @@ -973,10 +960,11 @@ static noinline int drop_one_dir_item(struct btrfs_trans_handle *trans, if (ret) goto out; - ret = unlink_inode_for_log_replay(trans, dir, BTRFS_I(inode), &name); + ret = unlink_inode_for_log_replay(trans, dir, inode, &name); out: kfree(name.name); - iput(inode); + if (inode) + iput(&inode->vfs_inode); return ret; } @@ -1087,7 +1075,9 @@ static inline int __add_inode_ref(struct btrfs_trans_handle *trans, search_key.type = BTRFS_INODE_REF_KEY; search_key.offset = parent_objectid; ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0); - if (ret == 0) { + if (ret < 0) { + return ret; + } else if (ret == 0) { struct btrfs_inode_ref *victim_ref; unsigned long ptr; unsigned long ptr_end; @@ -1149,7 +1139,7 @@ static inline int __add_inode_ref(struct btrfs_trans_handle *trans, u32 item_size; u32 cur_offset = 0; unsigned long base; - struct inode *victim_parent; + struct btrfs_inode *victim_parent; leaf = path->nodes[0]; @@ -1160,13 +1150,13 @@ static inline int __add_inode_ref(struct btrfs_trans_handle *trans, struct fscrypt_str victim_name; extref = (struct btrfs_inode_extref *)(base + cur_offset); + victim_name.len = btrfs_inode_extref_name_len(leaf, extref); if (btrfs_inode_extref_parent(leaf, extref) != parent_objectid) goto next; ret = read_alloc_one_name(leaf, &extref->name, - btrfs_inode_extref_name_len(leaf, extref), - &victim_name); + victim_name.len, &victim_name); if (ret) return ret; @@ -1181,18 +1171,18 @@ static inline int __add_inode_ref(struct btrfs_trans_handle *trans, kfree(victim_name.name); return ret; } else if (!ret) { - ret = -ENOENT; - victim_parent = read_one_inode(root, - parent_objectid); - if (victim_parent) { + victim_parent = btrfs_iget_logging(parent_objectid, root); + if (IS_ERR(victim_parent)) { + ret = PTR_ERR(victim_parent); + } else { inc_nlink(&inode->vfs_inode); btrfs_release_path(path); ret = unlink_inode_for_log_replay(trans, - BTRFS_I(victim_parent), + victim_parent, inode, &victim_name); + iput(&victim_parent->vfs_inode); } - iput(victim_parent); kfree(victim_name.name); if (ret) return ret; @@ -1326,19 +1316,18 @@ static int unlink_old_inode_refs(struct btrfs_trans_handle *trans, ret = !!btrfs_find_name_in_backref(log_eb, log_slot, &name); if (!ret) { - struct inode *dir; + struct btrfs_inode *dir; btrfs_release_path(path); - dir = read_one_inode(root, parent_id); - if (!dir) { - ret = -ENOENT; + dir = btrfs_iget_logging(parent_id, root); + if (IS_ERR(dir)) { + ret = PTR_ERR(dir); kfree(name.name); goto out; } - ret = unlink_inode_for_log_replay(trans, BTRFS_I(dir), - inode, &name); + ret = unlink_inode_for_log_replay(trans, dir, inode, &name); kfree(name.name); - iput(dir); + iput(&dir->vfs_inode); if (ret) goto out; goto again; @@ -1370,8 +1359,8 @@ static noinline int add_inode_ref(struct btrfs_trans_handle *trans, struct extent_buffer *eb, int slot, struct btrfs_key *key) { - struct inode *dir = NULL; - struct inode *inode = NULL; + struct btrfs_inode *dir = NULL; + struct btrfs_inode *inode = NULL; unsigned long ref_ptr; unsigned long ref_end; struct fscrypt_str name = { 0 }; @@ -1404,15 +1393,17 @@ static noinline int add_inode_ref(struct btrfs_trans_handle *trans, * copy the back ref in. The link count fixup code will take * care of the rest */ - dir = read_one_inode(root, parent_objectid); - if (!dir) { - ret = -ENOENT; + dir = btrfs_iget_logging(parent_objectid, root); + if (IS_ERR(dir)) { + ret = PTR_ERR(dir); + dir = NULL; goto out; } - inode = read_one_inode(root, inode_objectid); - if (!inode) { - ret = -EIO; + inode = btrfs_iget_logging(inode_objectid, root); + if (IS_ERR(inode)) { + ret = PTR_ERR(inode); + inode = NULL; goto out; } @@ -1424,11 +1415,13 @@ static noinline int add_inode_ref(struct btrfs_trans_handle *trans, * parent object can change from one array * item to another. */ - if (!dir) - dir = read_one_inode(root, parent_objectid); if (!dir) { - ret = -ENOENT; - goto out; + dir = btrfs_iget_logging(parent_objectid, root); + if (IS_ERR(dir)) { + ret = PTR_ERR(dir); + dir = NULL; + goto out; + } } } else { ret = ref_get_fields(eb, ref_ptr, &name, &ref_index); @@ -1436,8 +1429,8 @@ static noinline int add_inode_ref(struct btrfs_trans_handle *trans, if (ret) goto out; - ret = inode_in_dir(root, path, btrfs_ino(BTRFS_I(dir)), - btrfs_ino(BTRFS_I(inode)), ref_index, &name); + ret = inode_in_dir(root, path, btrfs_ino(dir), btrfs_ino(inode), + ref_index, &name); if (ret < 0) { goto out; } else if (ret == 0) { @@ -1448,8 +1441,7 @@ static noinline int add_inode_ref(struct btrfs_trans_handle *trans, * overwrite any existing back reference, and we don't * want to create dangling pointers in the directory. */ - ret = __add_inode_ref(trans, root, path, log, - BTRFS_I(dir), BTRFS_I(inode), + ret = __add_inode_ref(trans, root, path, log, dir, inode, inode_objectid, parent_objectid, ref_index, &name); if (ret) { @@ -1459,12 +1451,11 @@ static noinline int add_inode_ref(struct btrfs_trans_handle *trans, } /* insert our name */ - ret = btrfs_add_link(trans, BTRFS_I(dir), BTRFS_I(inode), - &name, 0, ref_index); + ret = btrfs_add_link(trans, dir, inode, &name, 0, ref_index); if (ret) goto out; - ret = btrfs_update_inode(trans, BTRFS_I(inode)); + ret = btrfs_update_inode(trans, inode); if (ret) goto out; } @@ -1474,7 +1465,7 @@ static noinline int add_inode_ref(struct btrfs_trans_handle *trans, kfree(name.name); name.name = NULL; if (log_ref_ver) { - iput(dir); + iput(&dir->vfs_inode); dir = NULL; } } @@ -1487,8 +1478,7 @@ static noinline int add_inode_ref(struct btrfs_trans_handle *trans, * dir index entries exist for a name but there is no inode reference * item with the same name. */ - ret = unlink_old_inode_refs(trans, root, path, BTRFS_I(inode), eb, slot, - key); + ret = unlink_old_inode_refs(trans, root, path, inode, eb, slot, key); if (ret) goto out; @@ -1497,8 +1487,10 @@ static noinline int add_inode_ref(struct btrfs_trans_handle *trans, out: btrfs_release_path(path); kfree(name.name); - iput(dir); - iput(inode); + if (dir) + iput(&dir->vfs_inode); + if (inode) + iput(&inode->vfs_inode); return ret; } @@ -1670,12 +1662,13 @@ static noinline int fixup_inode_link_counts(struct btrfs_trans_handle *trans, { int ret; struct btrfs_key key; - struct inode *inode; key.objectid = BTRFS_TREE_LOG_FIXUP_OBJECTID; key.type = BTRFS_ORPHAN_ITEM_KEY; key.offset = (u64)-1; while (1) { + struct btrfs_inode *inode; + ret = btrfs_search_slot(trans, root, &key, path, -1, 1); if (ret < 0) break; @@ -1697,14 +1690,14 @@ static noinline int fixup_inode_link_counts(struct btrfs_trans_handle *trans, break; btrfs_release_path(path); - inode = read_one_inode(root, key.offset); - if (!inode) { - ret = -EIO; + inode = btrfs_iget_logging(key.offset, root); + if (IS_ERR(inode)) { + ret = PTR_ERR(inode); break; } - ret = fixup_inode_link_count(trans, inode); - iput(inode); + ret = fixup_inode_link_count(trans, &inode->vfs_inode); + iput(&inode->vfs_inode); if (ret) break; @@ -1732,12 +1725,14 @@ static noinline int link_to_fixup_dir(struct btrfs_trans_handle *trans, { struct btrfs_key key; int ret = 0; - struct inode *inode; + struct btrfs_inode *inode; + struct inode *vfs_inode; - inode = read_one_inode(root, objectid); - if (!inode) - return -EIO; + inode = btrfs_iget_logging(objectid, root); + if (IS_ERR(inode)) + return PTR_ERR(inode); + vfs_inode = &inode->vfs_inode; key.objectid = BTRFS_TREE_LOG_FIXUP_OBJECTID; key.type = BTRFS_ORPHAN_ITEM_KEY; key.offset = objectid; @@ -1746,15 +1741,15 @@ static noinline int link_to_fixup_dir(struct btrfs_trans_handle *trans, btrfs_release_path(path); if (ret == 0) { - if (!inode->i_nlink) - set_nlink(inode, 1); + if (!vfs_inode->i_nlink) + set_nlink(vfs_inode, 1); else - inc_nlink(inode); - ret = btrfs_update_inode(trans, BTRFS_I(inode)); + inc_nlink(vfs_inode); + ret = btrfs_update_inode(trans, inode); } else if (ret == -EEXIST) { ret = 0; } - iput(inode); + iput(vfs_inode); return ret; } @@ -1770,27 +1765,26 @@ static noinline int insert_one_name(struct btrfs_trans_handle *trans, const struct fscrypt_str *name, struct btrfs_key *location) { - struct inode *inode; - struct inode *dir; + struct btrfs_inode *inode; + struct btrfs_inode *dir; int ret; - inode = read_one_inode(root, location->objectid); - if (!inode) - return -ENOENT; + inode = btrfs_iget_logging(location->objectid, root); + if (IS_ERR(inode)) + return PTR_ERR(inode); - dir = read_one_inode(root, dirid); - if (!dir) { - iput(inode); - return -EIO; + dir = btrfs_iget_logging(dirid, root); + if (IS_ERR(dir)) { + iput(&inode->vfs_inode); + return PTR_ERR(dir); } - ret = btrfs_add_link(trans, BTRFS_I(dir), BTRFS_I(inode), name, - 1, index); + ret = btrfs_add_link(trans, dir, inode, name, 1, index); /* FIXME, put inode into FIXUP list */ - iput(inode); - iput(dir); + iput(&inode->vfs_inode); + iput(&dir->vfs_inode); return ret; } @@ -1852,16 +1846,16 @@ static noinline int replay_one_name(struct btrfs_trans_handle *trans, bool index_dst_matches = false; struct btrfs_key log_key; struct btrfs_key search_key; - struct inode *dir; + struct btrfs_inode *dir; u8 log_flags; bool exists; int ret; bool update_size = true; bool name_added = false; - dir = read_one_inode(root, key->objectid); - if (!dir) - return -EIO; + dir = btrfs_iget_logging(key->objectid, root); + if (IS_ERR(dir)) + return PTR_ERR(dir); ret = read_alloc_one_name(eb, di + 1, btrfs_dir_name_len(eb, di), &name); if (ret) @@ -1882,9 +1876,8 @@ static noinline int replay_one_name(struct btrfs_trans_handle *trans, ret = PTR_ERR(dir_dst_di); goto out; } else if (dir_dst_di) { - ret = delete_conflicting_dir_entry(trans, BTRFS_I(dir), path, - dir_dst_di, &log_key, - log_flags, exists); + ret = delete_conflicting_dir_entry(trans, dir, path, dir_dst_di, + &log_key, log_flags, exists); if (ret < 0) goto out; dir_dst_matches = (ret == 1); @@ -1899,9 +1892,8 @@ static noinline int replay_one_name(struct btrfs_trans_handle *trans, ret = PTR_ERR(index_dst_di); goto out; } else if (index_dst_di) { - ret = delete_conflicting_dir_entry(trans, BTRFS_I(dir), path, - index_dst_di, &log_key, - log_flags, exists); + ret = delete_conflicting_dir_entry(trans, dir, path, index_dst_di, + &log_key, log_flags, exists); if (ret < 0) goto out; index_dst_matches = (ret == 1); @@ -1956,11 +1948,11 @@ static noinline int replay_one_name(struct btrfs_trans_handle *trans, out: if (!ret && update_size) { - btrfs_i_size_write(BTRFS_I(dir), dir->i_size + name.len * 2); - ret = btrfs_update_inode(trans, BTRFS_I(dir)); + btrfs_i_size_write(dir, dir->vfs_inode.i_size + name.len * 2); + ret = btrfs_update_inode(trans, dir); } kfree(name.name); - iput(dir); + iput(&dir->vfs_inode); if (!ret && name_added) ret = 1; return ret; @@ -2117,16 +2109,16 @@ static noinline int check_item_in_log(struct btrfs_trans_handle *trans, struct btrfs_root *log, struct btrfs_path *path, struct btrfs_path *log_path, - struct inode *dir, + struct btrfs_inode *dir, struct btrfs_key *dir_key) { - struct btrfs_root *root = BTRFS_I(dir)->root; + struct btrfs_root *root = dir->root; int ret; struct extent_buffer *eb; int slot; struct btrfs_dir_item *di; struct fscrypt_str name = { 0 }; - struct inode *inode = NULL; + struct btrfs_inode *inode = NULL; struct btrfs_key location; /* @@ -2163,9 +2155,10 @@ static noinline int check_item_in_log(struct btrfs_trans_handle *trans, btrfs_dir_item_key_to_cpu(eb, di, &location); btrfs_release_path(path); btrfs_release_path(log_path); - inode = read_one_inode(root, location.objectid); - if (!inode) { - ret = -EIO; + inode = btrfs_iget_logging(location.objectid, root); + if (IS_ERR(inode)) { + ret = PTR_ERR(inode); + inode = NULL; goto out; } @@ -2173,9 +2166,8 @@ static noinline int check_item_in_log(struct btrfs_trans_handle *trans, if (ret) goto out; - inc_nlink(inode); - ret = unlink_inode_for_log_replay(trans, BTRFS_I(dir), BTRFS_I(inode), - &name); + inc_nlink(&inode->vfs_inode); + ret = unlink_inode_for_log_replay(trans, dir, inode, &name); /* * Unlike dir item keys, dir index keys can only have one name (entry) in * them, as there are no key collisions since each key has a unique offset @@ -2185,7 +2177,8 @@ static noinline int check_item_in_log(struct btrfs_trans_handle *trans, btrfs_release_path(path); btrfs_release_path(log_path); kfree(name.name); - iput(inode); + if (inode) + iput(&inode->vfs_inode); return ret; } @@ -2309,7 +2302,7 @@ static noinline int replay_dir_deletes(struct btrfs_trans_handle *trans, struct btrfs_key dir_key; struct btrfs_key found_key; struct btrfs_path *log_path; - struct inode *dir; + struct btrfs_inode *dir; dir_key.objectid = dirid; dir_key.type = BTRFS_DIR_INDEX_KEY; @@ -2317,14 +2310,17 @@ static noinline int replay_dir_deletes(struct btrfs_trans_handle *trans, if (!log_path) return -ENOMEM; - dir = read_one_inode(root, dirid); - /* it isn't an error if the inode isn't there, that can happen - * because we replay the deletes before we copy in the inode item - * from the log + dir = btrfs_iget_logging(dirid, root); + /* + * It isn't an error if the inode isn't there, that can happen because + * we replay the deletes before we copy in the inode item from the log. */ - if (!dir) { + if (IS_ERR(dir)) { btrfs_free_path(log_path); - return 0; + ret = PTR_ERR(dir); + if (ret == -ENOENT) + ret = 0; + return ret; } range_start = 0; @@ -2386,7 +2382,7 @@ static noinline int replay_dir_deletes(struct btrfs_trans_handle *trans, out: btrfs_release_path(path); btrfs_free_path(log_path); - iput(dir); + iput(&dir->vfs_inode); return ret; } @@ -2480,30 +2476,28 @@ static int replay_one_buffer(struct btrfs_root *log, struct extent_buffer *eb, */ if (S_ISREG(mode)) { struct btrfs_drop_extents_args drop_args = { 0 }; - struct inode *inode; + struct btrfs_inode *inode; u64 from; - inode = read_one_inode(root, key.objectid); - if (!inode) { - ret = -EIO; + inode = btrfs_iget_logging(key.objectid, root); + if (IS_ERR(inode)) { + ret = PTR_ERR(inode); break; } - from = ALIGN(i_size_read(inode), + from = ALIGN(i_size_read(&inode->vfs_inode), root->fs_info->sectorsize); drop_args.start = from; drop_args.end = (u64)-1; drop_args.drop_cache = true; - ret = btrfs_drop_extents(wc->trans, root, - BTRFS_I(inode), + ret = btrfs_drop_extents(wc->trans, root, inode, &drop_args); if (!ret) { - inode_sub_bytes(inode, + inode_sub_bytes(&inode->vfs_inode, drop_args.bytes_found); /* Update the inode's nbytes. */ - ret = btrfs_update_inode(wc->trans, - BTRFS_I(inode)); + ret = btrfs_update_inode(wc->trans, inode); } - iput(inode); + iput(&inode->vfs_inode); if (ret) break; } @@ -5485,7 +5479,6 @@ static int log_new_dir_dentries(struct btrfs_trans_handle *trans, ihold(&curr_inode->vfs_inode); while (true) { - struct inode *vfs_inode; struct btrfs_key key; struct btrfs_key found_key; u64 next_index; @@ -5501,7 +5494,7 @@ static int log_new_dir_dentries(struct btrfs_trans_handle *trans, struct extent_buffer *leaf = path->nodes[0]; struct btrfs_dir_item *di; struct btrfs_key di_key; - struct inode *di_inode; + struct btrfs_inode *di_inode; int log_mode = LOG_INODE_EXISTS; int type; @@ -5528,17 +5521,16 @@ static int log_new_dir_dentries(struct btrfs_trans_handle *trans, goto out; } - if (!need_log_inode(trans, BTRFS_I(di_inode))) { - btrfs_add_delayed_iput(BTRFS_I(di_inode)); + if (!need_log_inode(trans, di_inode)) { + btrfs_add_delayed_iput(di_inode); break; } ctx->log_new_dentries = false; if (type == BTRFS_FT_DIR) log_mode = LOG_INODE_ALL; - ret = btrfs_log_inode(trans, BTRFS_I(di_inode), - log_mode, ctx); - btrfs_add_delayed_iput(BTRFS_I(di_inode)); + ret = btrfs_log_inode(trans, di_inode, log_mode, ctx); + btrfs_add_delayed_iput(di_inode); if (ret) goto out; if (ctx->log_new_dentries) { @@ -5580,14 +5572,13 @@ static int log_new_dir_dentries(struct btrfs_trans_handle *trans, kfree(dir_elem); btrfs_add_delayed_iput(curr_inode); - curr_inode = NULL; - vfs_inode = btrfs_iget_logging(ino, root); - if (IS_ERR(vfs_inode)) { - ret = PTR_ERR(vfs_inode); + curr_inode = btrfs_iget_logging(ino, root); + if (IS_ERR(curr_inode)) { + ret = PTR_ERR(curr_inode); + curr_inode = NULL; break; } - curr_inode = BTRFS_I(vfs_inode); } out: btrfs_free_path(path); @@ -5665,7 +5656,7 @@ static int add_conflicting_inode(struct btrfs_trans_handle *trans, struct btrfs_log_ctx *ctx) { struct btrfs_ino_list *ino_elem; - struct inode *inode; + struct btrfs_inode *inode; /* * It's rare to have a lot of conflicting inodes, in practice it is not @@ -5756,12 +5747,12 @@ static int add_conflicting_inode(struct btrfs_trans_handle *trans, * inode in LOG_INODE_EXISTS mode and rename operations update the log, * so that the log ends up with the new name and without the old name. */ - if (!need_log_inode(trans, BTRFS_I(inode))) { - btrfs_add_delayed_iput(BTRFS_I(inode)); + if (!need_log_inode(trans, inode)) { + btrfs_add_delayed_iput(inode); return 0; } - btrfs_add_delayed_iput(BTRFS_I(inode)); + btrfs_add_delayed_iput(inode); ino_elem = kmalloc(sizeof(*ino_elem), GFP_NOFS); if (!ino_elem) @@ -5797,7 +5788,7 @@ static int log_conflicting_inodes(struct btrfs_trans_handle *trans, */ while (!list_empty(&ctx->conflict_inodes)) { struct btrfs_ino_list *curr; - struct inode *inode; + struct btrfs_inode *inode; u64 ino; u64 parent; @@ -5833,9 +5824,8 @@ static int log_conflicting_inodes(struct btrfs_trans_handle *trans, * dir index key range logged for the directory. So we * must make sure the deletion is recorded. */ - ret = btrfs_log_inode(trans, BTRFS_I(inode), - LOG_INODE_ALL, ctx); - btrfs_add_delayed_iput(BTRFS_I(inode)); + ret = btrfs_log_inode(trans, inode, LOG_INODE_ALL, ctx); + btrfs_add_delayed_iput(inode); if (ret) break; continue; @@ -5851,8 +5841,8 @@ static int log_conflicting_inodes(struct btrfs_trans_handle *trans, * it again because if some other task logged the inode after * that, we can avoid doing it again. */ - if (!need_log_inode(trans, BTRFS_I(inode))) { - btrfs_add_delayed_iput(BTRFS_I(inode)); + if (!need_log_inode(trans, inode)) { + btrfs_add_delayed_iput(inode); continue; } @@ -5863,8 +5853,8 @@ static int log_conflicting_inodes(struct btrfs_trans_handle *trans, * well because during a rename we pin the log and update the * log with the new name before we unpin it. */ - ret = btrfs_log_inode(trans, BTRFS_I(inode), LOG_INODE_EXISTS, ctx); - btrfs_add_delayed_iput(BTRFS_I(inode)); + ret = btrfs_log_inode(trans, inode, LOG_INODE_EXISTS, ctx); + btrfs_add_delayed_iput(inode); if (ret) break; } @@ -6356,7 +6346,7 @@ static int log_new_delayed_dentries(struct btrfs_trans_handle *trans, list_for_each_entry(item, delayed_ins_list, log_list) { struct btrfs_dir_item *dir_item; - struct inode *di_inode; + struct btrfs_inode *di_inode; struct btrfs_key key; int log_mode = LOG_INODE_EXISTS; @@ -6372,8 +6362,8 @@ static int log_new_delayed_dentries(struct btrfs_trans_handle *trans, break; } - if (!need_log_inode(trans, BTRFS_I(di_inode))) { - btrfs_add_delayed_iput(BTRFS_I(di_inode)); + if (!need_log_inode(trans, di_inode)) { + btrfs_add_delayed_iput(di_inode); continue; } @@ -6381,12 +6371,12 @@ static int log_new_delayed_dentries(struct btrfs_trans_handle *trans, log_mode = LOG_INODE_ALL; ctx->log_new_dentries = false; - ret = btrfs_log_inode(trans, BTRFS_I(di_inode), log_mode, ctx); + ret = btrfs_log_inode(trans, di_inode, log_mode, ctx); if (!ret && ctx->log_new_dentries) - ret = log_new_dir_dentries(trans, BTRFS_I(di_inode), ctx); + ret = log_new_dir_dentries(trans, di_inode, ctx); - btrfs_add_delayed_iput(BTRFS_I(di_inode)); + btrfs_add_delayed_iput(di_inode); if (ret) break; @@ -6794,7 +6784,7 @@ static int btrfs_log_all_parents(struct btrfs_trans_handle *trans, ptr = btrfs_item_ptr_offset(leaf, slot); while (cur_offset < item_size) { struct btrfs_key inode_key; - struct inode *dir_inode; + struct btrfs_inode *dir_inode; inode_key.type = BTRFS_INODE_ITEM_KEY; inode_key.offset = 0; @@ -6843,18 +6833,16 @@ static int btrfs_log_all_parents(struct btrfs_trans_handle *trans, goto out; } - if (!need_log_inode(trans, BTRFS_I(dir_inode))) { - btrfs_add_delayed_iput(BTRFS_I(dir_inode)); + if (!need_log_inode(trans, dir_inode)) { + btrfs_add_delayed_iput(dir_inode); continue; } ctx->log_new_dentries = false; - ret = btrfs_log_inode(trans, BTRFS_I(dir_inode), - LOG_INODE_ALL, ctx); + ret = btrfs_log_inode(trans, dir_inode, LOG_INODE_ALL, ctx); if (!ret && ctx->log_new_dentries) - ret = log_new_dir_dentries(trans, - BTRFS_I(dir_inode), ctx); - btrfs_add_delayed_iput(BTRFS_I(dir_inode)); + ret = log_new_dir_dentries(trans, dir_inode, ctx); + btrfs_add_delayed_iput(dir_inode); if (ret) goto out; } @@ -6879,7 +6867,7 @@ static int log_new_ancestors(struct btrfs_trans_handle *trans, struct extent_buffer *leaf; int slot; struct btrfs_key search_key; - struct inode *inode; + struct btrfs_inode *inode; u64 ino; int ret = 0; @@ -6894,11 +6882,10 @@ static int log_new_ancestors(struct btrfs_trans_handle *trans, if (IS_ERR(inode)) return PTR_ERR(inode); - if (BTRFS_I(inode)->generation >= trans->transid && - need_log_inode(trans, BTRFS_I(inode))) - ret = btrfs_log_inode(trans, BTRFS_I(inode), - LOG_INODE_EXISTS, ctx); - btrfs_add_delayed_iput(BTRFS_I(inode)); + if (inode->generation >= trans->transid && + need_log_inode(trans, inode)) + ret = btrfs_log_inode(trans, inode, LOG_INODE_EXISTS, ctx); + btrfs_add_delayed_iput(inode); if (ret) return ret; @@ -7476,6 +7463,8 @@ void btrfs_record_snapshot_destroy(struct btrfs_trans_handle *trans, * full log sync. * Also we don't need to worry with renames, since btrfs_rename() marks the log * for full commit when renaming a subvolume. + * + * Must be called before creating the subvolume entry in its parent directory. */ void btrfs_record_new_subvolume(const struct btrfs_trans_handle *trans, struct btrfs_inode *dir) diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 8e65018600010..58e0cac5779dd 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -3268,6 +3268,12 @@ int btrfs_remove_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset) device->bytes_used - dev_extent_len); atomic64_add(dev_extent_len, &fs_info->free_chunk_space); btrfs_clear_space_info_full(fs_info); + + if (list_empty(&device->post_commit_list)) { + list_add_tail(&device->post_commit_list, + &trans->transaction->dev_update_list); + } + mutex_unlock(&fs_info->chunk_mutex); } } diff --git a/fs/btrfs/zstd.c b/fs/btrfs/zstd.c index 866607fd3e588..c9ea37fabf659 100644 --- a/fs/btrfs/zstd.c +++ b/fs/btrfs/zstd.c @@ -24,7 +24,7 @@ #include "super.h" #define ZSTD_BTRFS_MAX_WINDOWLOG 17 -#define ZSTD_BTRFS_MAX_INPUT (1 << ZSTD_BTRFS_MAX_WINDOWLOG) +#define ZSTD_BTRFS_MAX_INPUT (1U << ZSTD_BTRFS_MAX_WINDOWLOG) #define ZSTD_BTRFS_DEFAULT_LEVEL 3 #define ZSTD_BTRFS_MAX_LEVEL 15 /* 307s to avoid pathologically clashing with transaction commit */ diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c index c2a9e2cc03de9..6828a2cff02c8 100644 --- a/fs/ceph/addr.c +++ b/fs/ceph/addr.c @@ -396,6 +396,15 @@ static void ceph_netfs_issue_read(struct netfs_io_subrequest *subreq) struct page **pages; size_t page_off; + /* + * FIXME: io_iter.count needs to be corrected to aligned + * length. Otherwise, iov_iter_get_pages_alloc2() operates + * with the initial unaligned length value. As a result, + * ceph_msg_data_cursor_init() triggers BUG_ON() in the case + * if msg->sparse_read_total > msg->data_length. + */ + subreq->io_iter.count = len; + err = iov_iter_get_pages_alloc2(&subreq->io_iter, &pages, len, &page_off); if (err < 0) { doutc(cl, "%llx.%llx failed to allocate pages, %d\n", diff --git a/fs/ceph/file.c b/fs/ceph/file.c index 851d70200c6b8..a7254cab44cc2 100644 --- a/fs/ceph/file.c +++ b/fs/ceph/file.c @@ -2616,7 +2616,7 @@ static int ceph_zero_objects(struct inode *inode, loff_t offset, loff_t length) s32 stripe_unit = ci->i_layout.stripe_unit; s32 stripe_count = ci->i_layout.stripe_count; s32 object_size = ci->i_layout.object_size; - u64 object_set_size = object_size * stripe_count; + u64 object_set_size = (u64) object_size * stripe_count; u64 nearly, t; /* round offset up to next period boundary */ diff --git a/fs/ceph/super.c b/fs/ceph/super.c index c235f9a60394c..b61074b377ac5 100644 --- a/fs/ceph/super.c +++ b/fs/ceph/super.c @@ -1227,6 +1227,7 @@ static int ceph_set_super(struct super_block *s, struct fs_context *fc) s->s_time_min = 0; s->s_time_max = U32_MAX; s->s_flags |= SB_NODIRATIME | SB_NOATIME; + s->s_magic = CEPH_SUPER_MAGIC; ceph_fscrypt_set_ops(s); diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c index 43d6bde1adcc0..e5b6a427f31cd 100644 --- a/fs/configfs/dir.c +++ b/fs/configfs/dir.c @@ -600,7 +600,7 @@ static int populate_attrs(struct config_item *item) break; } } - if (t->ct_bin_attrs) { + if (!error && t->ct_bin_attrs) { for (i = 0; (bin_attr = t->ct_bin_attrs[i]) != NULL; i++) { if (ops && ops->is_bin_visible && !ops->is_bin_visible(item, bin_attr, i)) continue; diff --git a/fs/coredump.c b/fs/coredump.c index 2b8c36c9660c5..64894ba6efca4 100644 --- a/fs/coredump.c +++ b/fs/coredump.c @@ -43,6 +43,8 @@ #include #include #include +#include +#include #include #include @@ -60,6 +62,12 @@ static void free_vma_snapshot(struct coredump_params *cprm); #define CORE_FILE_NOTE_SIZE_DEFAULT (4*1024*1024) /* Define a reasonable max cap */ #define CORE_FILE_NOTE_SIZE_MAX (16*1024*1024) +/* + * File descriptor number for the pidfd for the thread-group leader of + * the coredumping task installed into the usermode helper's file + * descriptor table. + */ +#define COREDUMP_PIDFD_NUMBER 3 static int core_uses_pid; static unsigned int core_pipe_limit; @@ -339,6 +347,27 @@ static int format_corename(struct core_name *cn, struct coredump_params *cprm, case 'C': err = cn_printf(cn, "%d", cprm->cpu); break; + /* pidfd number */ + case 'F': { + /* + * Installing a pidfd only makes sense if + * we actually spawn a usermode helper. + */ + if (!ispipe) + break; + + /* + * Note that we'll install a pidfd for the + * thread-group leader. We know that task + * linkage hasn't been removed yet and even if + * this @current isn't the actual thread-group + * leader we know that the thread-group leader + * cannot be reaped until @current has exited. + */ + cprm->pid = task_tgid(current); + err = cn_printf(cn, "%d", COREDUMP_PIDFD_NUMBER); + break; + } default: break; } @@ -493,7 +522,7 @@ static void wait_for_dump_helpers(struct file *file) } /* - * umh_pipe_setup + * umh_coredump_setup * helper function to customize the process used * to collect the core in userspace. Specifically * it sets up a pipe and installs it as fd 0 (stdin) @@ -503,11 +532,32 @@ static void wait_for_dump_helpers(struct file *file) * is a special value that we use to trap recursive * core dumps */ -static int umh_pipe_setup(struct subprocess_info *info, struct cred *new) +static int umh_coredump_setup(struct subprocess_info *info, struct cred *new) { struct file *files[2]; struct coredump_params *cp = (struct coredump_params *)info->data; - int err = create_pipe_files(files, 0); + int err; + + if (cp->pid) { + struct file *pidfs_file __free(fput) = NULL; + + pidfs_file = pidfs_alloc_file(cp->pid, O_RDWR); + if (IS_ERR(pidfs_file)) + return PTR_ERR(pidfs_file); + + /* + * Usermode helpers are childen of either + * system_unbound_wq or of kthreadd. So we know that + * we're starting off with a clean file descriptor + * table. So we should always be able to use + * COREDUMP_PIDFD_NUMBER as our file descriptor value. + */ + err = replace_fd(COREDUMP_PIDFD_NUMBER, pidfs_file, 0); + if (err < 0) + return err; + } + + err = create_pipe_files(files, 0); if (err) return err; @@ -515,10 +565,13 @@ static int umh_pipe_setup(struct subprocess_info *info, struct cred *new) err = replace_fd(0, files[0], 0); fput(files[0]); + if (err < 0) + return err; + /* and disallow core files too */ current->signal->rlim[RLIMIT_CORE] = (struct rlimit){1, 1}; - return err; + return 0; } void do_coredump(const kernel_siginfo_t *siginfo) @@ -593,7 +646,7 @@ void do_coredump(const kernel_siginfo_t *siginfo) } if (cprm.limit == 1) { - /* See umh_pipe_setup() which sets RLIMIT_CORE = 1. + /* See umh_coredump_setup() which sets RLIMIT_CORE = 1. * * Normally core limits are irrelevant to pipes, since * we're not writing to the file system, but we use @@ -632,7 +685,7 @@ void do_coredump(const kernel_siginfo_t *siginfo) retval = -ENOMEM; sub_info = call_usermodehelper_setup(helper_argv[0], helper_argv, NULL, GFP_KERNEL, - umh_pipe_setup, NULL, &cprm); + umh_coredump_setup, NULL, &cprm); if (sub_info) retval = call_usermodehelper_exec(sub_info, UMH_WAIT_EXEC); diff --git a/fs/erofs/data.c b/fs/erofs/data.c index 722151d3fee8b..91182d5e3a66c 100644 --- a/fs/erofs/data.c +++ b/fs/erofs/data.c @@ -240,9 +240,11 @@ int erofs_map_dev(struct super_block *sb, struct erofs_map_dev *map) /* * bit 30: I/O error occurred on this folio + * bit 29: CPU has dirty data in D-cache (needs aliasing handling); * bit 0 - 29: remaining parts to complete this folio */ -#define EROFS_ONLINEFOLIO_EIO (1 << 30) +#define EROFS_ONLINEFOLIO_EIO 30 +#define EROFS_ONLINEFOLIO_DIRTY 29 void erofs_onlinefolio_init(struct folio *folio) { @@ -259,19 +261,23 @@ void erofs_onlinefolio_split(struct folio *folio) atomic_inc((atomic_t *)&folio->private); } -void erofs_onlinefolio_end(struct folio *folio, int err) +void erofs_onlinefolio_end(struct folio *folio, int err, bool dirty) { int orig, v; do { orig = atomic_read((atomic_t *)&folio->private); - v = (orig - 1) | (err ? EROFS_ONLINEFOLIO_EIO : 0); + DBG_BUGON(orig <= 0); + v = dirty << EROFS_ONLINEFOLIO_DIRTY; + v |= (orig - 1) | (!!err << EROFS_ONLINEFOLIO_EIO); } while (atomic_cmpxchg((atomic_t *)&folio->private, orig, v) != orig); - if (v & ~EROFS_ONLINEFOLIO_EIO) + if (v & (BIT(EROFS_ONLINEFOLIO_DIRTY) - 1)) return; folio->private = 0; - folio_end_read(folio, !(v & EROFS_ONLINEFOLIO_EIO)); + if (v & BIT(EROFS_ONLINEFOLIO_DIRTY)) + flush_dcache_folio(folio); + folio_end_read(folio, !(v & BIT(EROFS_ONLINEFOLIO_EIO))); } static int erofs_iomap_begin(struct inode *inode, loff_t offset, loff_t length, @@ -378,11 +384,16 @@ int erofs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, */ static int erofs_read_folio(struct file *file, struct folio *folio) { + trace_erofs_read_folio(folio, true); + return iomap_read_folio(folio, &erofs_iomap_ops); } static void erofs_readahead(struct readahead_control *rac) { + trace_erofs_readahead(rac->mapping->host, readahead_index(rac), + readahead_count(rac), true); + return iomap_readahead(rac, &erofs_iomap_ops); } diff --git a/fs/erofs/decompressor.c b/fs/erofs/decompressor.c index eb318c7ddd80e..dc61a6a8f6965 100644 --- a/fs/erofs/decompressor.c +++ b/fs/erofs/decompressor.c @@ -331,13 +331,11 @@ static int z_erofs_transform_plain(struct z_erofs_decompress_req *rq, cur = min(cur, rq->outputsize); if (cur && rq->out[0]) { kin = kmap_local_page(rq->in[nrpages_in - 1]); - if (rq->out[0] == rq->in[nrpages_in - 1]) { + if (rq->out[0] == rq->in[nrpages_in - 1]) memmove(kin + rq->pageofs_out, kin + pi, cur); - flush_dcache_page(rq->out[0]); - } else { + else memcpy_to_page(rq->out[0], rq->pageofs_out, kin + pi, cur); - } kunmap_local(kin); } rq->outputsize -= cur; @@ -355,14 +353,12 @@ static int z_erofs_transform_plain(struct z_erofs_decompress_req *rq, po = (rq->pageofs_out + cur + pi) & ~PAGE_MASK; DBG_BUGON(no >= nrpages_out); cnt = min(insz - pi, PAGE_SIZE - po); - if (rq->out[no] == rq->in[ni]) { + if (rq->out[no] == rq->in[ni]) memmove(kin + po, kin + rq->pageofs_in + pi, cnt); - flush_dcache_page(rq->out[no]); - } else if (rq->out[no]) { + else if (rq->out[no]) memcpy_to_page(rq->out[no], po, kin + rq->pageofs_in + pi, cnt); - } pi += cnt; } while (pi < insz); kunmap_local(kin); diff --git a/fs/erofs/fileio.c b/fs/erofs/fileio.c index 12e709d93445e..c865a7a610306 100644 --- a/fs/erofs/fileio.c +++ b/fs/erofs/fileio.c @@ -38,7 +38,7 @@ static void erofs_fileio_ki_complete(struct kiocb *iocb, long ret) } else { bio_for_each_folio_all(fi, &rq->bio) { DBG_BUGON(folio_test_uptodate(fi.folio)); - erofs_onlinefolio_end(fi.folio, ret); + erofs_onlinefolio_end(fi.folio, ret, false); } } bio_uninit(&rq->bio); @@ -158,7 +158,7 @@ static int erofs_fileio_scan_folio(struct erofs_fileio *io, struct folio *folio) } cur += len; } - erofs_onlinefolio_end(folio, err); + erofs_onlinefolio_end(folio, err, false); return err; } @@ -180,7 +180,7 @@ static void erofs_fileio_readahead(struct readahead_control *rac) struct folio *folio; int err; - trace_erofs_readpages(inode, readahead_index(rac), + trace_erofs_readahead(inode, readahead_index(rac), readahead_count(rac), true); while ((folio = readahead_folio(rac))) { err = erofs_fileio_scan_folio(&io, folio); diff --git a/fs/erofs/internal.h b/fs/erofs/internal.h index 2c11e8f3048e9..3d06fda70f318 100644 --- a/fs/erofs/internal.h +++ b/fs/erofs/internal.h @@ -405,7 +405,7 @@ int erofs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, int erofs_map_blocks(struct inode *inode, struct erofs_map_blocks *map); void erofs_onlinefolio_init(struct folio *folio); void erofs_onlinefolio_split(struct folio *folio); -void erofs_onlinefolio_end(struct folio *folio, int err); +void erofs_onlinefolio_end(struct folio *folio, int err, bool dirty); struct inode *erofs_iget(struct super_block *sb, erofs_nid_t nid); int erofs_getattr(struct mnt_idmap *idmap, const struct path *path, struct kstat *stat, u32 request_mask, diff --git a/fs/erofs/super.c b/fs/erofs/super.c index 3421448fef0e3..5fcdab6145176 100644 --- a/fs/erofs/super.c +++ b/fs/erofs/super.c @@ -188,8 +188,11 @@ static int erofs_init_device(struct erofs_buf *buf, struct super_block *sb, filp_open(dif->path, O_RDONLY | O_LARGEFILE, 0) : bdev_file_open_by_path(dif->path, BLK_OPEN_READ, sb->s_type, NULL); - if (IS_ERR(file)) + if (IS_ERR(file)) { + if (file == ERR_PTR(-ENOTBLK)) + return -EINVAL; return PTR_ERR(file); + } if (!erofs_is_fileio_mode(sbi)) { dif->dax_dev = fs_dax_get_by_bdev(file_bdev(file), @@ -537,24 +540,52 @@ static int erofs_fc_parse_param(struct fs_context *fc, return 0; } -static struct inode *erofs_nfs_get_inode(struct super_block *sb, - u64 ino, u32 generation) +static int erofs_encode_fh(struct inode *inode, u32 *fh, int *max_len, + struct inode *parent) { - return erofs_iget(sb, ino); + erofs_nid_t nid = EROFS_I(inode)->nid; + int len = parent ? 6 : 3; + + if (*max_len < len) { + *max_len = len; + return FILEID_INVALID; + } + + fh[0] = (u32)(nid >> 32); + fh[1] = (u32)(nid & 0xffffffff); + fh[2] = inode->i_generation; + + if (parent) { + nid = EROFS_I(parent)->nid; + + fh[3] = (u32)(nid >> 32); + fh[4] = (u32)(nid & 0xffffffff); + fh[5] = parent->i_generation; + } + + *max_len = len; + return parent ? FILEID_INO64_GEN_PARENT : FILEID_INO64_GEN; } static struct dentry *erofs_fh_to_dentry(struct super_block *sb, struct fid *fid, int fh_len, int fh_type) { - return generic_fh_to_dentry(sb, fid, fh_len, fh_type, - erofs_nfs_get_inode); + if ((fh_type != FILEID_INO64_GEN && + fh_type != FILEID_INO64_GEN_PARENT) || fh_len < 3) + return NULL; + + return d_obtain_alias(erofs_iget(sb, + ((u64)fid->raw[0] << 32) | fid->raw[1])); } static struct dentry *erofs_fh_to_parent(struct super_block *sb, struct fid *fid, int fh_len, int fh_type) { - return generic_fh_to_parent(sb, fid, fh_len, fh_type, - erofs_nfs_get_inode); + if (fh_type != FILEID_INO64_GEN_PARENT || fh_len < 6) + return NULL; + + return d_obtain_alias(erofs_iget(sb, + ((u64)fid->raw[3] << 32) | fid->raw[4])); } static struct dentry *erofs_get_parent(struct dentry *child) @@ -570,7 +601,7 @@ static struct dentry *erofs_get_parent(struct dentry *child) } static const struct export_operations erofs_export_ops = { - .encode_fh = generic_encode_ino32_fh, + .encode_fh = erofs_encode_fh, .fh_to_dentry = erofs_fh_to_dentry, .fh_to_parent = erofs_fh_to_parent, .get_parent = erofs_get_parent, diff --git a/fs/erofs/xattr.c b/fs/erofs/xattr.c index a90d7d6497390..60d2cf26e837e 100644 --- a/fs/erofs/xattr.c +++ b/fs/erofs/xattr.c @@ -407,7 +407,7 @@ int erofs_getxattr(struct inode *inode, int index, const char *name, } it.index = index; - it.name = (struct qstr)QSTR_INIT(name, strlen(name)); + it.name = QSTR(name); if (it.name.len > EROFS_NAME_LEN) return -ERANGE; diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c index 74521d7dbee1d..94c1e2d64df96 100644 --- a/fs/erofs/zdata.c +++ b/fs/erofs/zdata.c @@ -12,12 +12,6 @@ #define Z_EROFS_PCLUSTER_MAX_PAGES (Z_EROFS_PCLUSTER_MAX_SIZE / PAGE_SIZE) #define Z_EROFS_INLINE_BVECS 2 -/* - * let's leave a type here in case of introducing - * another tagged pointer later. - */ -typedef void *z_erofs_next_pcluster_t; - struct z_erofs_bvec { struct page *page; int offset; @@ -48,7 +42,7 @@ struct z_erofs_pcluster { struct lockref lockref; /* A: point to next chained pcluster or TAILs */ - z_erofs_next_pcluster_t next; + struct z_erofs_pcluster *next; /* I: start block address of this pcluster */ erofs_off_t index; @@ -91,12 +85,11 @@ struct z_erofs_pcluster { /* the end of a chain of pclusters */ #define Z_EROFS_PCLUSTER_TAIL ((void *) 0x700 + POISON_POINTER_DELTA) -#define Z_EROFS_PCLUSTER_NIL (NULL) struct z_erofs_decompressqueue { struct super_block *sb; + struct z_erofs_pcluster *head; atomic_t pending_bios; - z_erofs_next_pcluster_t head; union { struct completion done; @@ -460,39 +453,32 @@ int __init z_erofs_init_subsystem(void) } enum z_erofs_pclustermode { + /* It has previously been linked into another processing chain */ Z_EROFS_PCLUSTER_INFLIGHT, /* - * a weak form of Z_EROFS_PCLUSTER_FOLLOWED, the difference is that it - * could be dispatched into bypass queue later due to uptodated managed - * pages. All related online pages cannot be reused for inplace I/O (or - * bvpage) since it can be directly decoded without I/O submission. + * A weaker form of Z_EROFS_PCLUSTER_FOLLOWED; the difference is that it + * may be dispatched to the bypass queue later due to uptodated managed + * folios. All file-backed folios related to this pcluster cannot be + * reused for in-place I/O (or bvpage) since the pcluster may be decoded + * in a separate queue (and thus out of order). */ Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE, /* - * The pcluster was just linked to a decompression chain by us. It can - * also be linked with the remaining pclusters, which means if the - * processing page is the tail page of a pcluster, this pcluster can - * safely use the whole page (since the previous pcluster is within the - * same chain) for in-place I/O, as illustrated below: - * ___________________________________________________ - * | tail (partial) page | head (partial) page | - * | (of the current pcl) | (of the previous pcl) | - * |___PCLUSTER_FOLLOWED___|_____PCLUSTER_FOLLOWED_____| - * - * [ (*) the page above can be used as inplace I/O. ] + * The pcluster has just been linked to our processing chain. + * File-backed folios (except for the head page) related to it can be + * used for in-place I/O (or bvpage). */ Z_EROFS_PCLUSTER_FOLLOWED, }; -struct z_erofs_decompress_frontend { +struct z_erofs_frontend { struct inode *const inode; struct erofs_map_blocks map; struct z_erofs_bvec_iter biter; struct page *pagepool; struct page *candidate_bvpage; - struct z_erofs_pcluster *pcl; - z_erofs_next_pcluster_t owned_head; + struct z_erofs_pcluster *pcl, *head; enum z_erofs_pclustermode mode; erofs_off_t headoffset; @@ -501,11 +487,11 @@ struct z_erofs_decompress_frontend { unsigned int icur; }; -#define DECOMPRESS_FRONTEND_INIT(__i) { \ - .inode = __i, .owned_head = Z_EROFS_PCLUSTER_TAIL, \ - .mode = Z_EROFS_PCLUSTER_FOLLOWED } +#define Z_EROFS_DEFINE_FRONTEND(fe, i, ho) struct z_erofs_frontend fe = { \ + .inode = i, .head = Z_EROFS_PCLUSTER_TAIL, \ + .mode = Z_EROFS_PCLUSTER_FOLLOWED, .headoffset = ho } -static bool z_erofs_should_alloc_cache(struct z_erofs_decompress_frontend *fe) +static bool z_erofs_should_alloc_cache(struct z_erofs_frontend *fe) { unsigned int cachestrategy = EROFS_I_SB(fe->inode)->opt.cache_strategy; @@ -522,7 +508,7 @@ static bool z_erofs_should_alloc_cache(struct z_erofs_decompress_frontend *fe) return false; } -static void z_erofs_bind_cache(struct z_erofs_decompress_frontend *fe) +static void z_erofs_bind_cache(struct z_erofs_frontend *fe) { struct address_space *mc = MNGD_MAPPING(EROFS_I_SB(fe->inode)); struct z_erofs_pcluster *pcl = fe->pcl; @@ -679,7 +665,7 @@ int z_erofs_init_super(struct super_block *sb) } /* callers must be with pcluster lock held */ -static int z_erofs_attach_page(struct z_erofs_decompress_frontend *fe, +static int z_erofs_attach_page(struct z_erofs_frontend *fe, struct z_erofs_bvec *bvec, bool exclusive) { struct z_erofs_pcluster *pcl = fe->pcl; @@ -725,7 +711,7 @@ static bool z_erofs_get_pcluster(struct z_erofs_pcluster *pcl) return true; } -static int z_erofs_register_pcluster(struct z_erofs_decompress_frontend *fe) +static int z_erofs_register_pcluster(struct z_erofs_frontend *fe) { struct erofs_map_blocks *map = &fe->map; struct super_block *sb = fe->inode->i_sb; @@ -750,9 +736,7 @@ static int z_erofs_register_pcluster(struct z_erofs_decompress_frontend *fe) pcl->algorithmformat = map->m_algorithmformat; pcl->length = 0; pcl->partial = true; - - /* new pclusters should be claimed as type 1, primary and followed */ - pcl->next = fe->owned_head; + pcl->next = fe->head; pcl->pageofs_out = map->m_la & ~PAGE_MASK; fe->mode = Z_EROFS_PCLUSTER_FOLLOWED; @@ -788,8 +772,7 @@ static int z_erofs_register_pcluster(struct z_erofs_decompress_frontend *fe) goto err_out; } } - fe->owned_head = &pcl->next; - fe->pcl = pcl; + fe->head = fe->pcl = pcl; return 0; err_out: @@ -798,7 +781,7 @@ static int z_erofs_register_pcluster(struct z_erofs_decompress_frontend *fe) return err; } -static int z_erofs_pcluster_begin(struct z_erofs_decompress_frontend *fe) +static int z_erofs_pcluster_begin(struct z_erofs_frontend *fe) { struct erofs_map_blocks *map = &fe->map; struct super_block *sb = fe->inode->i_sb; @@ -808,7 +791,7 @@ static int z_erofs_pcluster_begin(struct z_erofs_decompress_frontend *fe) DBG_BUGON(fe->pcl); /* must be Z_EROFS_PCLUSTER_TAIL or pointed to previous pcluster */ - DBG_BUGON(fe->owned_head == Z_EROFS_PCLUSTER_NIL); + DBG_BUGON(!fe->head); if (!(map->m_flags & EROFS_MAP_META)) { while (1) { @@ -836,10 +819,9 @@ static int z_erofs_pcluster_begin(struct z_erofs_decompress_frontend *fe) if (ret == -EEXIST) { mutex_lock(&fe->pcl->lock); /* check if this pcluster hasn't been linked into any chain. */ - if (cmpxchg(&fe->pcl->next, Z_EROFS_PCLUSTER_NIL, - fe->owned_head) == Z_EROFS_PCLUSTER_NIL) { + if (!cmpxchg(&fe->pcl->next, NULL, fe->head)) { /* .. so it can be attached to our submission chain */ - fe->owned_head = &fe->pcl->next; + fe->head = fe->pcl; fe->mode = Z_EROFS_PCLUSTER_FOLLOWED; } else { /* otherwise, it belongs to an inflight chain */ fe->mode = Z_EROFS_PCLUSTER_INFLIGHT; @@ -872,24 +854,16 @@ static int z_erofs_pcluster_begin(struct z_erofs_decompress_frontend *fe) return 0; } -/* - * keep in mind that no referenced pclusters will be freed - * only after a RCU grace period. - */ static void z_erofs_rcu_callback(struct rcu_head *head) { - z_erofs_free_pcluster(container_of(head, - struct z_erofs_pcluster, rcu)); + z_erofs_free_pcluster(container_of(head, struct z_erofs_pcluster, rcu)); } -static bool erofs_try_to_release_pcluster(struct erofs_sb_info *sbi, +static bool __erofs_try_to_release_pcluster(struct erofs_sb_info *sbi, struct z_erofs_pcluster *pcl) { - int free = false; - - spin_lock(&pcl->lockref.lock); if (pcl->lockref.count) - goto out; + return false; /* * Note that all cached folios should be detached before deleted from @@ -897,7 +871,7 @@ static bool erofs_try_to_release_pcluster(struct erofs_sb_info *sbi, * orphan old pcluster when the new one is available in the tree. */ if (erofs_try_to_free_all_cached_folios(sbi, pcl)) - goto out; + return false; /* * It's impossible to fail after the pcluster is freezed, but in order @@ -906,8 +880,16 @@ static bool erofs_try_to_release_pcluster(struct erofs_sb_info *sbi, DBG_BUGON(__xa_erase(&sbi->managed_pslots, pcl->index) != pcl); lockref_mark_dead(&pcl->lockref); - free = true; -out: + return true; +} + +static bool erofs_try_to_release_pcluster(struct erofs_sb_info *sbi, + struct z_erofs_pcluster *pcl) +{ + bool free; + + spin_lock(&pcl->lockref.lock); + free = __erofs_try_to_release_pcluster(sbi, pcl); spin_unlock(&pcl->lockref.lock); if (free) { atomic_long_dec(&erofs_global_shrink_cnt); @@ -916,8 +898,7 @@ static bool erofs_try_to_release_pcluster(struct erofs_sb_info *sbi, return free; } -unsigned long z_erofs_shrink_scan(struct erofs_sb_info *sbi, - unsigned long nr_shrink) +unsigned long z_erofs_shrink_scan(struct erofs_sb_info *sbi, unsigned long nr) { struct z_erofs_pcluster *pcl; unsigned long index, freed = 0; @@ -930,7 +911,7 @@ unsigned long z_erofs_shrink_scan(struct erofs_sb_info *sbi, xa_unlock(&sbi->managed_pslots); ++freed; - if (!--nr_shrink) + if (!--nr) return freed; xa_lock(&sbi->managed_pslots); } @@ -938,19 +919,28 @@ unsigned long z_erofs_shrink_scan(struct erofs_sb_info *sbi, return freed; } -static void z_erofs_put_pcluster(struct z_erofs_pcluster *pcl) +static void z_erofs_put_pcluster(struct erofs_sb_info *sbi, + struct z_erofs_pcluster *pcl, bool try_free) { + bool free = false; + if (lockref_put_or_lock(&pcl->lockref)) return; DBG_BUGON(__lockref_is_dead(&pcl->lockref)); - if (pcl->lockref.count == 1) - atomic_long_inc(&erofs_global_shrink_cnt); - --pcl->lockref.count; + if (!--pcl->lockref.count) { + if (try_free && xa_trylock(&sbi->managed_pslots)) { + free = __erofs_try_to_release_pcluster(sbi, pcl); + xa_unlock(&sbi->managed_pslots); + } + atomic_long_add(!free, &erofs_global_shrink_cnt); + } spin_unlock(&pcl->lockref.lock); + if (free) + call_rcu(&pcl->rcu, z_erofs_rcu_callback); } -static void z_erofs_pcluster_end(struct z_erofs_decompress_frontend *fe) +static void z_erofs_pcluster_end(struct z_erofs_frontend *fe) { struct z_erofs_pcluster *pcl = fe->pcl; @@ -963,13 +953,9 @@ static void z_erofs_pcluster_end(struct z_erofs_decompress_frontend *fe) if (fe->candidate_bvpage) fe->candidate_bvpage = NULL; - /* - * if all pending pages are added, don't hold its reference - * any longer if the pcluster isn't hosted by ourselves. - */ + /* Drop refcount if it doesn't belong to our processing chain */ if (fe->mode < Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE) - z_erofs_put_pcluster(pcl); - + z_erofs_put_pcluster(EROFS_I_SB(fe->inode), pcl, false); fe->pcl = NULL; } @@ -998,7 +984,7 @@ static int z_erofs_read_fragment(struct super_block *sb, struct folio *folio, return 0; } -static int z_erofs_scan_folio(struct z_erofs_decompress_frontend *f, +static int z_erofs_scan_folio(struct z_erofs_frontend *f, struct folio *folio, bool ra) { struct inode *const inode = f->inode; @@ -1087,7 +1073,7 @@ static int z_erofs_scan_folio(struct z_erofs_decompress_frontend *f, tight = (bs == PAGE_SIZE); } } while ((end = cur) > 0); - erofs_onlinefolio_end(folio, err); + erofs_onlinefolio_end(folio, err, false); return err; } @@ -1111,7 +1097,7 @@ static bool z_erofs_page_is_invalidated(struct page *page) return !page_folio(page)->mapping && !z_erofs_is_shortlived_page(page); } -struct z_erofs_decompress_backend { +struct z_erofs_backend { struct page *onstack_pages[Z_EROFS_ONSTACK_PAGES]; struct super_block *sb; struct z_erofs_pcluster *pcl; @@ -1132,7 +1118,7 @@ struct z_erofs_bvec_item { struct list_head list; }; -static void z_erofs_do_decompressed_bvec(struct z_erofs_decompress_backend *be, +static void z_erofs_do_decompressed_bvec(struct z_erofs_backend *be, struct z_erofs_bvec *bvec) { int poff = bvec->offset + be->pcl->pageofs_out; @@ -1157,8 +1143,7 @@ static void z_erofs_do_decompressed_bvec(struct z_erofs_decompress_backend *be, list_add(&item->list, &be->decompressed_secondary_bvecs); } -static void z_erofs_fill_other_copies(struct z_erofs_decompress_backend *be, - int err) +static void z_erofs_fill_other_copies(struct z_erofs_backend *be, int err) { unsigned int off0 = be->pcl->pageofs_out; struct list_head *p, *n; @@ -1193,13 +1178,13 @@ static void z_erofs_fill_other_copies(struct z_erofs_decompress_backend *be, cur += len; } kunmap_local(dst); - erofs_onlinefolio_end(page_folio(bvi->bvec.page), err); + erofs_onlinefolio_end(page_folio(bvi->bvec.page), err, true); list_del(p); kfree(bvi); } } -static void z_erofs_parse_out_bvecs(struct z_erofs_decompress_backend *be) +static void z_erofs_parse_out_bvecs(struct z_erofs_backend *be) { struct z_erofs_pcluster *pcl = be->pcl; struct z_erofs_bvec_iter biter; @@ -1224,8 +1209,7 @@ static void z_erofs_parse_out_bvecs(struct z_erofs_decompress_backend *be) z_erofs_put_shortlivedpage(be->pagepool, old_bvpage); } -static int z_erofs_parse_in_bvecs(struct z_erofs_decompress_backend *be, - bool *overlapped) +static int z_erofs_parse_in_bvecs(struct z_erofs_backend *be, bool *overlapped) { struct z_erofs_pcluster *pcl = be->pcl; unsigned int pclusterpages = z_erofs_pclusterpages(pcl); @@ -1260,8 +1244,7 @@ static int z_erofs_parse_in_bvecs(struct z_erofs_decompress_backend *be, return err; } -static int z_erofs_decompress_pcluster(struct z_erofs_decompress_backend *be, - int err) +static int z_erofs_decompress_pcluster(struct z_erofs_backend *be, int err) { struct erofs_sb_info *const sbi = EROFS_SB(be->sb); struct z_erofs_pcluster *pcl = be->pcl; @@ -1271,6 +1254,7 @@ static int z_erofs_decompress_pcluster(struct z_erofs_decompress_backend *be, int i, j, jtop, err2; struct page *page; bool overlapped; + bool try_free = true; mutex_lock(&pcl->lock); be->nr_pages = PAGE_ALIGN(pcl->length + pcl->pageofs_out) >> PAGE_SHIFT; @@ -1328,9 +1312,12 @@ static int z_erofs_decompress_pcluster(struct z_erofs_decompress_backend *be, /* managed folios are still left in compressed_bvecs[] */ for (i = 0; i < pclusterpages; ++i) { page = be->compressed_pages[i]; - if (!page || - erofs_folio_is_managed(sbi, page_folio(page))) + if (!page) continue; + if (erofs_folio_is_managed(sbi, page_folio(page))) { + try_free = false; + continue; + } (void)z_erofs_put_shortlivedpage(be->pagepool, page); WRITE_ONCE(pcl->compressed_bvecs[i].page, NULL); } @@ -1348,7 +1335,7 @@ static int z_erofs_decompress_pcluster(struct z_erofs_decompress_backend *be, DBG_BUGON(z_erofs_page_is_invalidated(page)); if (!z_erofs_is_shortlived_page(page)) { - erofs_onlinefolio_end(page_folio(page), err); + erofs_onlinefolio_end(page_folio(page), err, true); continue; } if (pcl->algorithmformat != Z_EROFS_COMPRESSION_LZ4) { @@ -1373,34 +1360,33 @@ static int z_erofs_decompress_pcluster(struct z_erofs_decompress_backend *be, pcl->vcnt = 0; /* pcluster lock MUST be taken before the following line */ - WRITE_ONCE(pcl->next, Z_EROFS_PCLUSTER_NIL); + WRITE_ONCE(pcl->next, NULL); mutex_unlock(&pcl->lock); + + if (z_erofs_is_inline_pcluster(pcl)) + z_erofs_free_pcluster(pcl); + else + z_erofs_put_pcluster(sbi, pcl, try_free); return err; } static int z_erofs_decompress_queue(const struct z_erofs_decompressqueue *io, struct page **pagepool) { - struct z_erofs_decompress_backend be = { + struct z_erofs_backend be = { .sb = io->sb, .pagepool = pagepool, .decompressed_secondary_bvecs = LIST_HEAD_INIT(be.decompressed_secondary_bvecs), + .pcl = io->head, }; - z_erofs_next_pcluster_t owned = io->head; + struct z_erofs_pcluster *next; int err = io->eio ? -EIO : 0; - while (owned != Z_EROFS_PCLUSTER_TAIL) { - DBG_BUGON(owned == Z_EROFS_PCLUSTER_NIL); - - be.pcl = container_of(owned, struct z_erofs_pcluster, next); - owned = READ_ONCE(be.pcl->next); - + for (; be.pcl != Z_EROFS_PCLUSTER_TAIL; be.pcl = next) { + DBG_BUGON(!be.pcl); + next = READ_ONCE(be.pcl->next); err = z_erofs_decompress_pcluster(&be, err) ?: err; - if (z_erofs_is_inline_pcluster(be.pcl)) - z_erofs_free_pcluster(be.pcl); - else - z_erofs_put_pcluster(be.pcl); } return err; } @@ -1465,7 +1451,7 @@ static void z_erofs_decompress_kickoff(struct z_erofs_decompressqueue *io, } static void z_erofs_fill_bio_vec(struct bio_vec *bvec, - struct z_erofs_decompress_frontend *f, + struct z_erofs_frontend *f, struct z_erofs_pcluster *pcl, unsigned int nr, struct address_space *mc) @@ -1609,18 +1595,13 @@ enum { NR_JOBQUEUES, }; -static void move_to_bypass_jobqueue(struct z_erofs_pcluster *pcl, - z_erofs_next_pcluster_t qtail[], - z_erofs_next_pcluster_t owned_head) +static void z_erofs_move_to_bypass_queue(struct z_erofs_pcluster *pcl, + struct z_erofs_pcluster *next, + struct z_erofs_pcluster **qtail[]) { - z_erofs_next_pcluster_t *const submit_qtail = qtail[JQ_SUBMIT]; - z_erofs_next_pcluster_t *const bypass_qtail = qtail[JQ_BYPASS]; - WRITE_ONCE(pcl->next, Z_EROFS_PCLUSTER_TAIL); - - WRITE_ONCE(*submit_qtail, owned_head); - WRITE_ONCE(*bypass_qtail, &pcl->next); - + WRITE_ONCE(*qtail[JQ_SUBMIT], next); + WRITE_ONCE(*qtail[JQ_BYPASS], pcl); qtail[JQ_BYPASS] = &pcl->next; } @@ -1649,15 +1630,15 @@ static void z_erofs_endio(struct bio *bio) bio_put(bio); } -static void z_erofs_submit_queue(struct z_erofs_decompress_frontend *f, +static void z_erofs_submit_queue(struct z_erofs_frontend *f, struct z_erofs_decompressqueue *fgq, bool *force_fg, bool readahead) { struct super_block *sb = f->inode->i_sb; struct address_space *mc = MNGD_MAPPING(EROFS_SB(sb)); - z_erofs_next_pcluster_t qtail[NR_JOBQUEUES]; + struct z_erofs_pcluster **qtail[NR_JOBQUEUES]; struct z_erofs_decompressqueue *q[NR_JOBQUEUES]; - z_erofs_next_pcluster_t owned_head = f->owned_head; + struct z_erofs_pcluster *pcl, *next; /* bio is NULL initially, so no need to initialize last_{index,bdev} */ erofs_off_t last_pa; unsigned int nr_bios = 0; @@ -1673,22 +1654,19 @@ static void z_erofs_submit_queue(struct z_erofs_decompress_frontend *f, qtail[JQ_SUBMIT] = &q[JQ_SUBMIT]->head; /* by default, all need io submission */ - q[JQ_SUBMIT]->head = owned_head; + q[JQ_SUBMIT]->head = next = f->head; do { struct erofs_map_dev mdev; - struct z_erofs_pcluster *pcl; erofs_off_t cur, end; struct bio_vec bvec; unsigned int i = 0; bool bypass = true; - DBG_BUGON(owned_head == Z_EROFS_PCLUSTER_NIL); - pcl = container_of(owned_head, struct z_erofs_pcluster, next); - owned_head = READ_ONCE(pcl->next); - + pcl = next; + next = READ_ONCE(pcl->next); if (z_erofs_is_inline_pcluster(pcl)) { - move_to_bypass_jobqueue(pcl, qtail, owned_head); + z_erofs_move_to_bypass_queue(pcl, next, qtail); continue; } @@ -1760,8 +1738,8 @@ static void z_erofs_submit_queue(struct z_erofs_decompress_frontend *f, if (!bypass) qtail[JQ_SUBMIT] = &pcl->next; else - move_to_bypass_jobqueue(pcl, qtail, owned_head); - } while (owned_head != Z_EROFS_PCLUSTER_TAIL); + z_erofs_move_to_bypass_queue(pcl, next, qtail); + } while (next != Z_EROFS_PCLUSTER_TAIL); if (bio) { if (erofs_is_fileio_mode(EROFS_SB(sb))) @@ -1785,17 +1763,16 @@ static void z_erofs_submit_queue(struct z_erofs_decompress_frontend *f, z_erofs_decompress_kickoff(q[JQ_SUBMIT], nr_bios); } -static int z_erofs_runqueue(struct z_erofs_decompress_frontend *f, - unsigned int ra_folios) +static int z_erofs_runqueue(struct z_erofs_frontend *f, unsigned int rapages) { struct z_erofs_decompressqueue io[NR_JOBQUEUES]; struct erofs_sb_info *sbi = EROFS_I_SB(f->inode); - bool force_fg = z_erofs_is_sync_decompress(sbi, ra_folios); + bool force_fg = z_erofs_is_sync_decompress(sbi, rapages); int err; - if (f->owned_head == Z_EROFS_PCLUSTER_TAIL) + if (f->head == Z_EROFS_PCLUSTER_TAIL) return 0; - z_erofs_submit_queue(f, io, &force_fg, !!ra_folios); + z_erofs_submit_queue(f, io, &force_fg, !!rapages); /* handle bypass queue (no i/o pclusters) immediately */ err = z_erofs_decompress_queue(&io[JQ_BYPASS], &f->pagepool); @@ -1813,7 +1790,7 @@ static int z_erofs_runqueue(struct z_erofs_decompress_frontend *f, * Since partial uptodate is still unimplemented for now, we have to use * approximate readmore strategies as a start. */ -static void z_erofs_pcluster_readmore(struct z_erofs_decompress_frontend *f, +static void z_erofs_pcluster_readmore(struct z_erofs_frontend *f, struct readahead_control *rac, bool backmost) { struct inode *inode = f->inode; @@ -1868,12 +1845,10 @@ static void z_erofs_pcluster_readmore(struct z_erofs_decompress_frontend *f, static int z_erofs_read_folio(struct file *file, struct folio *folio) { struct inode *const inode = folio->mapping->host; - struct z_erofs_decompress_frontend f = DECOMPRESS_FRONTEND_INIT(inode); + Z_EROFS_DEFINE_FRONTEND(f, inode, folio_pos(folio)); int err; trace_erofs_read_folio(folio, false); - f.headoffset = (erofs_off_t)folio->index << PAGE_SHIFT; - z_erofs_pcluster_readmore(&f, NULL, true); err = z_erofs_scan_folio(&f, folio, false); z_erofs_pcluster_readmore(&f, NULL, false); @@ -1893,17 +1868,13 @@ static int z_erofs_read_folio(struct file *file, struct folio *folio) static void z_erofs_readahead(struct readahead_control *rac) { struct inode *const inode = rac->mapping->host; - struct z_erofs_decompress_frontend f = DECOMPRESS_FRONTEND_INIT(inode); + Z_EROFS_DEFINE_FRONTEND(f, inode, readahead_pos(rac)); + unsigned int nrpages = readahead_count(rac); struct folio *head = NULL, *folio; - unsigned int nr_folios; int err; - f.headoffset = readahead_pos(rac); - + trace_erofs_readahead(inode, readahead_index(rac), nrpages, false); z_erofs_pcluster_readmore(&f, rac, true); - nr_folios = readahead_count(rac); - trace_erofs_readpages(inode, readahead_index(rac), nr_folios, false); - while ((folio = readahead_folio(rac))) { folio->private = head; head = folio; @@ -1922,7 +1893,7 @@ static void z_erofs_readahead(struct readahead_control *rac) z_erofs_pcluster_readmore(&f, rac, false); z_erofs_pcluster_end(&f); - (void)z_erofs_runqueue(&f, nr_folios); + (void)z_erofs_runqueue(&f, nrpages); erofs_put_metabuf(&f.map.buf); erofs_release_pages(&f.pagepool); } diff --git a/fs/erofs/zutil.c b/fs/erofs/zutil.c index 75704f58ecfa9..0dd65cefce33e 100644 --- a/fs/erofs/zutil.c +++ b/fs/erofs/zutil.c @@ -230,9 +230,10 @@ void erofs_shrinker_unregister(struct super_block *sb) struct erofs_sb_info *const sbi = EROFS_SB(sb); mutex_lock(&sbi->umount_mutex); - /* clean up all remaining pclusters in memory */ - z_erofs_shrink_scan(sbi, ~0UL); - + while (!xa_empty(&sbi->managed_pslots)) { + z_erofs_shrink_scan(sbi, ~0UL); + cond_resched(); + } spin_lock(&erofs_sb_list_lock); list_del(&sbi->list); spin_unlock(&erofs_sb_list_lock); diff --git a/fs/eventpoll.c b/fs/eventpoll.c index 1a06e462b6efb..99eed91d03ebe 100644 --- a/fs/eventpoll.c +++ b/fs/eventpoll.c @@ -854,7 +854,7 @@ static bool __ep_remove(struct eventpoll *ep, struct epitem *epi, bool force) kfree_rcu(epi, rcu); percpu_counter_dec(&ep->user->epoll_watches); - return ep_refcount_dec_and_test(ep); + return true; } /* @@ -862,14 +862,14 @@ static bool __ep_remove(struct eventpoll *ep, struct epitem *epi, bool force) */ static void ep_remove_safe(struct eventpoll *ep, struct epitem *epi) { - WARN_ON_ONCE(__ep_remove(ep, epi, false)); + if (__ep_remove(ep, epi, false)) + WARN_ON_ONCE(ep_refcount_dec_and_test(ep)); } static void ep_clear_and_put(struct eventpoll *ep) { struct rb_node *rbp, *next; struct epitem *epi; - bool dispose; /* We need to release all tasks waiting for these file */ if (waitqueue_active(&ep->poll_wait)) @@ -902,10 +902,8 @@ static void ep_clear_and_put(struct eventpoll *ep) cond_resched(); } - dispose = ep_refcount_dec_and_test(ep); mutex_unlock(&ep->mtx); - - if (dispose) + if (ep_refcount_dec_and_test(ep)) ep_free(ep); } @@ -1108,7 +1106,7 @@ void eventpoll_release_file(struct file *file) dispose = __ep_remove(ep, epi, true); mutex_unlock(&ep->mtx); - if (dispose) + if (dispose && ep_refcount_dec_and_test(ep)) ep_free(ep); goto again; } diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h index c2e6989a568c2..e94df69ee2e0d 100644 --- a/fs/ext4/ext4.h +++ b/fs/ext4/ext4.h @@ -3369,6 +3369,13 @@ static inline unsigned int ext4_flex_bg_size(struct ext4_sb_info *sbi) return 1 << sbi->s_log_groups_per_flex; } +static inline loff_t ext4_get_maxbytes(struct inode *inode) +{ + if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) + return inode->i_sb->s_maxbytes; + return EXT4_SB(inode->i_sb)->s_bitmap_maxbytes; +} + #define ext4_std_error(sb, errno) \ do { \ if ((errno)) \ diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c index ba3419958a832..b16d72275e105 100644 --- a/fs/ext4/extents.c +++ b/fs/ext4/extents.c @@ -2396,18 +2396,19 @@ int ext4_ext_calc_credits_for_single_extent(struct inode *inode, int nrblocks, int ext4_ext_index_trans_blocks(struct inode *inode, int extents) { int index; - int depth; /* If we are converting the inline data, only one is needed here. */ if (ext4_has_inline_data(inode)) return 1; - depth = ext_depth(inode); - + /* + * Extent tree can change between the time we estimate credits and + * the time we actually modify the tree. Assume the worst case. + */ if (extents <= 1) - index = depth * 2; + index = EXT4_MAX_EXTENT_DEPTH * 2; else - index = depth * 3; + index = EXT4_MAX_EXTENT_DEPTH * 3; return index; } @@ -4976,12 +4977,7 @@ static const struct iomap_ops ext4_iomap_xattr_ops = { static int ext4_fiemap_check_ranges(struct inode *inode, u64 start, u64 *len) { - u64 maxbytes; - - if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) - maxbytes = inode->i_sb->s_maxbytes; - else - maxbytes = EXT4_SB(inode->i_sb)->s_bitmap_maxbytes; + u64 maxbytes = ext4_get_maxbytes(inode); if (*len == 0) return -EINVAL; diff --git a/fs/ext4/file.c b/fs/ext4/file.c index f14aed14b9cf3..6c692151b0d6c 100644 --- a/fs/ext4/file.c +++ b/fs/ext4/file.c @@ -896,12 +896,7 @@ static int ext4_file_open(struct inode *inode, struct file *filp) loff_t ext4_llseek(struct file *file, loff_t offset, int whence) { struct inode *inode = file->f_mapping->host; - loff_t maxbytes; - - if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) - maxbytes = EXT4_SB(inode->i_sb)->s_bitmap_maxbytes; - else - maxbytes = inode->i_sb->s_maxbytes; + loff_t maxbytes = ext4_get_maxbytes(inode); switch (whence) { default: diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c index 3536ca7e4fcca..05b148d6fc711 100644 --- a/fs/ext4/inline.c +++ b/fs/ext4/inline.c @@ -392,7 +392,7 @@ static int ext4_update_inline_data(handle_t *handle, struct inode *inode, } static int ext4_prepare_inline_data(handle_t *handle, struct inode *inode, - unsigned int len) + loff_t len) { int ret, size, no_expand; struct ext4_inode_info *ei = EXT4_I(inode); diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 38fe9a213d09b..f769f5cb6deb7 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -1000,7 +1000,12 @@ int ext4_walk_page_buffers(handle_t *handle, struct inode *inode, */ static int ext4_dirty_journalled_data(handle_t *handle, struct buffer_head *bh) { - folio_mark_dirty(bh->b_folio); + struct folio *folio = bh->b_folio; + struct inode *inode = folio->mapping->host; + + /* only regular files have a_ops */ + if (S_ISREG(inode->i_mode)) + folio_mark_dirty(folio); return ext4_handle_dirty_metadata(handle, NULL, bh); } @@ -4928,7 +4933,8 @@ struct inode *__ext4_iget(struct super_block *sb, unsigned long ino, ei->i_file_acl |= ((__u64)le16_to_cpu(raw_inode->i_file_acl_high)) << 32; inode->i_size = ext4_isize(sb, raw_inode); - if ((size = i_size_read(inode)) < 0) { + size = i_size_read(inode); + if (size < 0 || size > ext4_get_maxbytes(inode)) { ext4_error_inode(inode, function, line, 0, "iget: bad i_size value: %lld", size); ret = -EFSCORRUPTED; diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c index 7f26440e8595a..b05bb7bfa14c5 100644 --- a/fs/f2fs/compress.c +++ b/fs/f2fs/compress.c @@ -178,8 +178,7 @@ void f2fs_compress_ctx_add_page(struct compress_ctx *cc, struct folio *folio) #ifdef CONFIG_F2FS_FS_LZO static int lzo_init_compress_ctx(struct compress_ctx *cc) { - cc->private = f2fs_kvmalloc(F2FS_I_SB(cc->inode), - LZO1X_MEM_COMPRESS, GFP_NOFS); + cc->private = f2fs_vmalloc(LZO1X_MEM_COMPRESS); if (!cc->private) return -ENOMEM; @@ -189,7 +188,7 @@ static int lzo_init_compress_ctx(struct compress_ctx *cc) static void lzo_destroy_compress_ctx(struct compress_ctx *cc) { - kvfree(cc->private); + vfree(cc->private); cc->private = NULL; } @@ -246,7 +245,7 @@ static int lz4_init_compress_ctx(struct compress_ctx *cc) size = LZ4HC_MEM_COMPRESS; #endif - cc->private = f2fs_kvmalloc(F2FS_I_SB(cc->inode), size, GFP_NOFS); + cc->private = f2fs_vmalloc(size); if (!cc->private) return -ENOMEM; @@ -261,7 +260,7 @@ static int lz4_init_compress_ctx(struct compress_ctx *cc) static void lz4_destroy_compress_ctx(struct compress_ctx *cc) { - kvfree(cc->private); + vfree(cc->private); cc->private = NULL; } @@ -342,8 +341,7 @@ static int zstd_init_compress_ctx(struct compress_ctx *cc) params = zstd_get_params(level, cc->rlen); workspace_size = zstd_cstream_workspace_bound(¶ms.cParams); - workspace = f2fs_kvmalloc(F2FS_I_SB(cc->inode), - workspace_size, GFP_NOFS); + workspace = f2fs_vmalloc(workspace_size); if (!workspace) return -ENOMEM; @@ -351,7 +349,7 @@ static int zstd_init_compress_ctx(struct compress_ctx *cc) if (!stream) { f2fs_err_ratelimited(F2FS_I_SB(cc->inode), "%s zstd_init_cstream failed", __func__); - kvfree(workspace); + vfree(workspace); return -EIO; } @@ -364,7 +362,7 @@ static int zstd_init_compress_ctx(struct compress_ctx *cc) static void zstd_destroy_compress_ctx(struct compress_ctx *cc) { - kvfree(cc->private); + vfree(cc->private); cc->private = NULL; cc->private2 = NULL; } @@ -423,8 +421,7 @@ static int zstd_init_decompress_ctx(struct decompress_io_ctx *dic) workspace_size = zstd_dstream_workspace_bound(max_window_size); - workspace = f2fs_kvmalloc(F2FS_I_SB(dic->inode), - workspace_size, GFP_NOFS); + workspace = f2fs_vmalloc(workspace_size); if (!workspace) return -ENOMEM; @@ -432,7 +429,7 @@ static int zstd_init_decompress_ctx(struct decompress_io_ctx *dic) if (!stream) { f2fs_err_ratelimited(F2FS_I_SB(dic->inode), "%s zstd_init_dstream failed", __func__); - kvfree(workspace); + vfree(workspace); return -EIO; } @@ -444,7 +441,7 @@ static int zstd_init_decompress_ctx(struct decompress_io_ctx *dic) static void zstd_destroy_decompress_ctx(struct decompress_io_ctx *dic) { - kvfree(dic->private); + vfree(dic->private); dic->private = NULL; dic->private2 = NULL; } diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c index 1b0050b8421d8..654f672639b3c 100644 --- a/fs/f2fs/data.c +++ b/fs/f2fs/data.c @@ -53,8 +53,8 @@ bool f2fs_is_cp_guaranteed(struct page *page) struct inode *inode; struct f2fs_sb_info *sbi; - if (!mapping) - return false; + if (fscrypt_is_bounce_page(page)) + return page_private_gcing(fscrypt_pagecache_page(page)); inode = mapping->host; sbi = F2FS_I_SB(inode); @@ -3986,7 +3986,7 @@ static int check_swap_activate(struct swap_info_struct *sis, if ((pblock - SM_I(sbi)->main_blkaddr) % blks_per_sec || nr_pblocks % blks_per_sec || - !f2fs_valid_pinned_area(sbi, pblock)) { + f2fs_is_sequential_zone_area(sbi, pblock)) { bool last_extent = false; not_aligned++; diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h index 1c783c2e4902a..a435550b2839b 100644 --- a/fs/f2fs/f2fs.h +++ b/fs/f2fs/f2fs.h @@ -1762,6 +1762,7 @@ struct f2fs_sb_info { unsigned int dirty_device; /* for checkpoint data flush */ spinlock_t dev_lock; /* protect dirty_device */ bool aligned_blksize; /* all devices has the same logical blksize */ + unsigned int first_seq_zone_segno; /* first segno in sequential zone */ /* For write statistics */ u64 sectors_written_start; @@ -2508,8 +2509,14 @@ static inline void dec_valid_block_count(struct f2fs_sb_info *sbi, blkcnt_t sectors = count << F2FS_LOG_SECTORS_PER_BLOCK; spin_lock(&sbi->stat_lock); - f2fs_bug_on(sbi, sbi->total_valid_block_count < (block_t) count); - sbi->total_valid_block_count -= (block_t)count; + if (unlikely(sbi->total_valid_block_count < count)) { + f2fs_warn(sbi, "Inconsistent total_valid_block_count:%u, ino:%lu, count:%u", + sbi->total_valid_block_count, inode->i_ino, count); + sbi->total_valid_block_count = 0; + set_sbi_flag(sbi, SBI_NEED_FSCK); + } else { + sbi->total_valid_block_count -= count; + } if (sbi->reserved_blocks && sbi->current_reserved_blocks < sbi->reserved_blocks) sbi->current_reserved_blocks = min(sbi->reserved_blocks, @@ -3488,6 +3495,11 @@ static inline void *f2fs_kvzalloc(struct f2fs_sb_info *sbi, return f2fs_kvmalloc(sbi, size, flags | __GFP_ZERO); } +static inline void *f2fs_vmalloc(size_t size) +{ + return vmalloc(size); +} + static inline int get_extra_isize(struct inode *inode) { return F2FS_I(inode)->i_extra_isize / sizeof(__le32); @@ -4545,12 +4557,16 @@ F2FS_FEATURE_FUNCS(compression, COMPRESSION); F2FS_FEATURE_FUNCS(readonly, RO); #ifdef CONFIG_BLK_DEV_ZONED -static inline bool f2fs_blkz_is_seq(struct f2fs_sb_info *sbi, int devi, - block_t blkaddr) +static inline bool f2fs_zone_is_seq(struct f2fs_sb_info *sbi, int devi, + unsigned int zone) { - unsigned int zno = blkaddr / sbi->blocks_per_blkz; + return test_bit(zone, FDEV(devi).blkz_seq); +} - return test_bit(zno, FDEV(devi).blkz_seq); +static inline bool f2fs_blkz_is_seq(struct f2fs_sb_info *sbi, int devi, + block_t blkaddr) +{ + return f2fs_zone_is_seq(sbi, devi, blkaddr / sbi->blocks_per_blkz); } #endif @@ -4622,15 +4638,31 @@ static inline bool f2fs_lfs_mode(struct f2fs_sb_info *sbi) return F2FS_OPTION(sbi).fs_mode == FS_MODE_LFS; } -static inline bool f2fs_valid_pinned_area(struct f2fs_sb_info *sbi, +static inline bool f2fs_is_sequential_zone_area(struct f2fs_sb_info *sbi, block_t blkaddr) { if (f2fs_sb_has_blkzoned(sbi)) { +#ifdef CONFIG_BLK_DEV_ZONED int devi = f2fs_target_device_index(sbi, blkaddr); - return !bdev_is_zoned(FDEV(devi).bdev); + if (!bdev_is_zoned(FDEV(devi).bdev)) + return false; + + if (f2fs_is_multi_device(sbi)) { + if (blkaddr < FDEV(devi).start_blk || + blkaddr > FDEV(devi).end_blk) { + f2fs_err(sbi, "Invalid block %x", blkaddr); + return false; + } + blkaddr -= FDEV(devi).start_blk; + } + + return f2fs_blkz_is_seq(sbi, devi, blkaddr); +#else + return false; +#endif } - return true; + return false; } static inline bool f2fs_low_mem_mode(struct f2fs_sb_info *sbi) diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c index 71ddecaf771f8..d9037e74631c0 100644 --- a/fs/f2fs/file.c +++ b/fs/f2fs/file.c @@ -35,6 +35,17 @@ #include #include +static void f2fs_zero_post_eof_page(struct inode *inode, loff_t new_size) +{ + loff_t old_size = i_size_read(inode); + + if (old_size >= new_size) + return; + + /* zero or drop pages only in range of [old_size, new_size] */ + truncate_pagecache(inode, old_size); +} + static vm_fault_t f2fs_filemap_fault(struct vm_fault *vmf) { struct inode *inode = file_inode(vmf->vma->vm_file); @@ -103,8 +114,13 @@ static vm_fault_t f2fs_vm_page_mkwrite(struct vm_fault *vmf) f2fs_bug_on(sbi, f2fs_has_inline_data(inode)); + filemap_invalidate_lock(inode->i_mapping); + f2fs_zero_post_eof_page(inode, (folio->index + 1) << PAGE_SHIFT); + filemap_invalidate_unlock(inode->i_mapping); + file_update_time(vmf->vma->vm_file); filemap_invalidate_lock_shared(inode->i_mapping); + folio_lock(folio); if (unlikely(folio->mapping != inode->i_mapping || folio_pos(folio) > i_size_read(inode) || @@ -1064,6 +1080,8 @@ int f2fs_setattr(struct mnt_idmap *idmap, struct dentry *dentry, f2fs_down_write(&fi->i_gc_rwsem[WRITE]); filemap_invalidate_lock(inode->i_mapping); + if (attr->ia_size > old_size) + f2fs_zero_post_eof_page(inode, attr->ia_size); truncate_setsize(inode, attr->ia_size); if (attr->ia_size <= old_size) @@ -1182,6 +1200,10 @@ static int f2fs_punch_hole(struct inode *inode, loff_t offset, loff_t len) if (ret) return ret; + filemap_invalidate_lock(inode->i_mapping); + f2fs_zero_post_eof_page(inode, offset + len); + filemap_invalidate_unlock(inode->i_mapping); + pg_start = ((unsigned long long) offset) >> PAGE_SHIFT; pg_end = ((unsigned long long) offset + len) >> PAGE_SHIFT; @@ -1465,6 +1487,8 @@ static int f2fs_do_collapse(struct inode *inode, loff_t offset, loff_t len) f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); filemap_invalidate_lock(inode->i_mapping); + f2fs_zero_post_eof_page(inode, offset + len); + f2fs_lock_op(sbi); f2fs_drop_extent_tree(inode); truncate_pagecache(inode, offset); @@ -1586,6 +1610,10 @@ static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len, if (ret) return ret; + filemap_invalidate_lock(mapping); + f2fs_zero_post_eof_page(inode, offset + len); + filemap_invalidate_unlock(mapping); + pg_start = ((unsigned long long) offset) >> PAGE_SHIFT; pg_end = ((unsigned long long) offset + len) >> PAGE_SHIFT; @@ -1717,6 +1745,8 @@ static int f2fs_insert_range(struct inode *inode, loff_t offset, loff_t len) /* avoid gc operation during block exchange */ f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); filemap_invalidate_lock(mapping); + + f2fs_zero_post_eof_page(inode, offset + len); truncate_pagecache(inode, offset); while (!ret && idx > pg_start) { @@ -1774,6 +1804,10 @@ static int f2fs_expand_inode_data(struct inode *inode, loff_t offset, if (err) return err; + filemap_invalidate_lock(inode->i_mapping); + f2fs_zero_post_eof_page(inode, offset + len); + filemap_invalidate_unlock(inode->i_mapping); + f2fs_balance_fs(sbi, true); pg_start = ((unsigned long long)offset) >> PAGE_SHIFT; @@ -1794,7 +1828,8 @@ static int f2fs_expand_inode_data(struct inode *inode, loff_t offset, map.m_len = sec_blks; next_alloc: - if (has_not_enough_free_secs(sbi, 0, + if (has_not_enough_free_secs(sbi, 0, f2fs_sb_has_blkzoned(sbi) ? + ZONED_PIN_SEC_REQUIRED_COUNT : GET_SEC_FROM_SEG(sbi, overprovision_segments(sbi)))) { f2fs_down_write(&sbi->gc_lock); stat_inc_gc_call_count(sbi, FOREGROUND); @@ -4715,6 +4750,10 @@ static ssize_t f2fs_write_checks(struct kiocb *iocb, struct iov_iter *from) err = file_modified(file); if (err) return err; + + filemap_invalidate_lock(inode->i_mapping); + f2fs_zero_post_eof_page(inode, iocb->ki_pos + iov_iter_count(from)); + filemap_invalidate_unlock(inode->i_mapping); return count; } diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c index e0469316c7cd4..cd56c0e66657b 100644 --- a/fs/f2fs/gc.c +++ b/fs/f2fs/gc.c @@ -2072,6 +2072,9 @@ int f2fs_gc_range(struct f2fs_sb_info *sbi, .iroot = RADIX_TREE_INIT(gc_list.iroot, GFP_NOFS), }; + if (IS_CURSEC(sbi, GET_SEC_FROM_SEG(sbi, segno))) + continue; + do_garbage_collect(sbi, segno, &gc_list, FG_GC, true, false); put_gc_inode(&gc_list); diff --git a/fs/f2fs/gc.h b/fs/f2fs/gc.h index 2914b678bf8fb..5c1eaf55e1277 100644 --- a/fs/f2fs/gc.h +++ b/fs/f2fs/gc.h @@ -35,6 +35,7 @@ #define LIMIT_BOOST_ZONED_GC 25 /* percentage over total user space of boosted gc for zoned devices */ #define DEF_MIGRATION_WINDOW_GRANULARITY_ZONED 3 #define BOOST_GC_MULTIPLE 5 +#define ZONED_PIN_SEC_REQUIRED_COUNT 1 #define DEF_GC_FAILED_PINNED_FILES 2048 #define MAX_GC_FAILED_PINNED_FILES USHRT_MAX diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c index a60db5e795a4c..06688b9957c81 100644 --- a/fs/f2fs/inode.c +++ b/fs/f2fs/inode.c @@ -34,7 +34,9 @@ void f2fs_mark_inode_dirty_sync(struct inode *inode, bool sync) if (f2fs_inode_dirtied(inode, sync)) return; - if (f2fs_is_atomic_file(inode)) + /* only atomic file w/ FI_ATOMIC_COMMITTED can be set vfs dirty */ + if (f2fs_is_atomic_file(inode) && + !is_inode_flag_set(inode, FI_ATOMIC_COMMITTED)) return; mark_inode_dirty_sync(inode); @@ -286,6 +288,12 @@ static bool sanity_check_inode(struct inode *inode, struct page *node_page) return false; } + if (ino_of_node(node_page) == fi->i_xattr_nid) { + f2fs_warn(sbi, "%s: corrupted inode i_ino=%lx, xnid=%x, run fsck to fix.", + __func__, inode->i_ino, fi->i_xattr_nid); + return false; + } + if (f2fs_has_extra_attr(inode)) { if (!f2fs_sb_has_extra_attr(sbi)) { f2fs_warn(sbi, "%s: inode (ino=%lx) is with extra_attr, but extra_attr feature is off", @@ -777,6 +785,13 @@ int f2fs_write_inode(struct inode *inode, struct writeback_control *wbc) !is_inode_flag_set(inode, FI_DIRTY_INODE)) return 0; + /* + * no need to update inode page, ultimately f2fs_evict_inode() will + * clear dirty status of inode. + */ + if (f2fs_cp_error(sbi)) + return -EIO; + if (!f2fs_is_checkpoint_ready(sbi)) { f2fs_mark_inode_dirty_sync(inode, true); return -ENOSPC; diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c index 57d46e1439ded..781b872fac8c2 100644 --- a/fs/f2fs/namei.c +++ b/fs/f2fs/namei.c @@ -413,7 +413,7 @@ static int f2fs_link(struct dentry *old_dentry, struct inode *dir, if (is_inode_flag_set(dir, FI_PROJ_INHERIT) && (!projid_eq(F2FS_I(dir)->i_projid, - F2FS_I(old_dentry->d_inode)->i_projid))) + F2FS_I(inode)->i_projid))) return -EXDEV; err = f2fs_dquot_initialize(dir); @@ -560,6 +560,15 @@ static int f2fs_unlink(struct inode *dir, struct dentry *dentry) goto fail; } + if (unlikely(inode->i_nlink == 0)) { + f2fs_warn(F2FS_I_SB(inode), "%s: inode (ino=%lx) has zero i_nlink", + __func__, inode->i_ino); + err = -EFSCORRUPTED; + set_sbi_flag(F2FS_I_SB(inode), SBI_NEED_FSCK); + f2fs_put_page(page, 0); + goto fail; + } + f2fs_balance_fs(sbi, true); f2fs_lock_op(sbi); @@ -905,7 +914,7 @@ static int f2fs_rename(struct mnt_idmap *idmap, struct inode *old_dir, if (is_inode_flag_set(new_dir, FI_PROJ_INHERIT) && (!projid_eq(F2FS_I(new_dir)->i_projid, - F2FS_I(old_dentry->d_inode)->i_projid))) + F2FS_I(old_inode)->i_projid))) return -EXDEV; /* @@ -1098,10 +1107,10 @@ static int f2fs_cross_rename(struct inode *old_dir, struct dentry *old_dentry, if ((is_inode_flag_set(new_dir, FI_PROJ_INHERIT) && !projid_eq(F2FS_I(new_dir)->i_projid, - F2FS_I(old_dentry->d_inode)->i_projid)) || - (is_inode_flag_set(new_dir, FI_PROJ_INHERIT) && + F2FS_I(old_inode)->i_projid)) || + (is_inode_flag_set(old_dir, FI_PROJ_INHERIT) && !projid_eq(F2FS_I(old_dir)->i_projid, - F2FS_I(new_dentry->d_inode)->i_projid))) + F2FS_I(new_inode)->i_projid))) return -EXDEV; err = f2fs_dquot_initialize(old_dir); diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c index b9ffb2ee9548a..e48b5e2efea28 100644 --- a/fs/f2fs/segment.c +++ b/fs/f2fs/segment.c @@ -370,7 +370,13 @@ static int __f2fs_commit_atomic_write(struct inode *inode) } else { sbi->committed_atomic_block += fi->atomic_write_cnt; set_inode_flag(inode, FI_ATOMIC_COMMITTED); + + /* + * inode may has no FI_ATOMIC_DIRTIED flag due to no write + * before commit. + */ if (is_inode_flag_set(inode, FI_ATOMIC_DIRTIED)) { + /* clear atomic dirty status and set vfs dirty status */ clear_inode_flag(inode, FI_ATOMIC_DIRTIED); f2fs_mark_inode_dirty_sync(inode, true); } @@ -2713,7 +2719,7 @@ static int get_new_segment(struct f2fs_sb_info *sbi, if (sbi->blkzone_alloc_policy == BLKZONE_ALLOC_PRIOR_CONV || pinning) segno = 0; else - segno = max(first_zoned_segno(sbi), *newseg); + segno = max(sbi->first_seq_zone_segno, *newseg); hint = GET_SEC_FROM_SEG(sbi, segno); } #endif @@ -2725,7 +2731,7 @@ static int get_new_segment(struct f2fs_sb_info *sbi, if (secno >= MAIN_SECS(sbi) && f2fs_sb_has_blkzoned(sbi)) { /* Write only to sequential zones */ if (sbi->blkzone_alloc_policy == BLKZONE_ALLOC_ONLY_SEQ) { - hint = GET_SEC_FROM_SEG(sbi, first_zoned_segno(sbi)); + hint = GET_SEC_FROM_SEG(sbi, sbi->first_seq_zone_segno); secno = find_next_zero_bit(free_i->free_secmap, MAIN_SECS(sbi), hint); } else secno = find_first_zero_bit(free_i->free_secmap, @@ -2772,11 +2778,15 @@ static int get_new_segment(struct f2fs_sb_info *sbi, } got_it: /* set it as dirty segment in free segmap */ - f2fs_bug_on(sbi, test_bit(segno, free_i->free_segmap)); + if (test_bit(segno, free_i->free_segmap)) { + ret = -EFSCORRUPTED; + f2fs_stop_checkpoint(sbi, false, STOP_CP_REASON_CORRUPTED_FREE_BITMAP); + goto out_unlock; + } - /* no free section in conventional zone */ + /* no free section in conventional device or conventional zone */ if (new_sec && pinning && - !f2fs_valid_pinned_area(sbi, START_BLOCK(sbi, segno))) { + f2fs_is_sequential_zone_area(sbi, START_BLOCK(sbi, segno))) { ret = -EAGAIN; goto out_unlock; } @@ -3240,7 +3250,8 @@ int f2fs_allocate_pinning_section(struct f2fs_sb_info *sbi) if (f2fs_sb_has_blkzoned(sbi) && err == -EAGAIN && gc_required) { f2fs_down_write(&sbi->gc_lock); - err = f2fs_gc_range(sbi, 0, GET_SEGNO(sbi, FDEV(0).end_blk), true, 1); + err = f2fs_gc_range(sbi, 0, sbi->first_seq_zone_segno - 1, + true, ZONED_PIN_SEC_REQUIRED_COUNT); f2fs_up_write(&sbi->gc_lock); gc_required = false; diff --git a/fs/f2fs/segment.h b/fs/f2fs/segment.h index 51b2b8c5c749c..52bb1a2819357 100644 --- a/fs/f2fs/segment.h +++ b/fs/f2fs/segment.h @@ -431,7 +431,6 @@ static inline void __set_free(struct f2fs_sb_info *sbi, unsigned int segno) unsigned int secno = GET_SEC_FROM_SEG(sbi, segno); unsigned int start_segno = GET_SEG_FROM_SEC(sbi, secno); unsigned int next; - unsigned int usable_segs = f2fs_usable_segs_in_sec(sbi); spin_lock(&free_i->segmap_lock); clear_bit(segno, free_i->free_segmap); @@ -439,7 +438,7 @@ static inline void __set_free(struct f2fs_sb_info *sbi, unsigned int segno) next = find_next_bit(free_i->free_segmap, start_segno + SEGS_PER_SEC(sbi), start_segno); - if (next >= start_segno + usable_segs) { + if (next >= start_segno + f2fs_usable_segs_in_sec(sbi)) { clear_bit(secno, free_i->free_secmap); free_i->free_sections++; } @@ -465,22 +464,36 @@ static inline void __set_test_and_free(struct f2fs_sb_info *sbi, unsigned int secno = GET_SEC_FROM_SEG(sbi, segno); unsigned int start_segno = GET_SEG_FROM_SEC(sbi, secno); unsigned int next; - unsigned int usable_segs = f2fs_usable_segs_in_sec(sbi); + bool ret; spin_lock(&free_i->segmap_lock); - if (test_and_clear_bit(segno, free_i->free_segmap)) { - free_i->free_segments++; - - if (!inmem && IS_CURSEC(sbi, secno)) - goto skip_free; - next = find_next_bit(free_i->free_segmap, - start_segno + SEGS_PER_SEC(sbi), start_segno); - if (next >= start_segno + usable_segs) { - if (test_and_clear_bit(secno, free_i->free_secmap)) - free_i->free_sections++; - } - } -skip_free: + ret = test_and_clear_bit(segno, free_i->free_segmap); + if (!ret) + goto unlock_out; + + free_i->free_segments++; + + if (!inmem && IS_CURSEC(sbi, secno)) + goto unlock_out; + + /* check large section */ + next = find_next_bit(free_i->free_segmap, + start_segno + SEGS_PER_SEC(sbi), start_segno); + if (next < start_segno + f2fs_usable_segs_in_sec(sbi)) + goto unlock_out; + + ret = test_and_clear_bit(secno, free_i->free_secmap); + if (!ret) + goto unlock_out; + + free_i->free_sections++; + + if (GET_SEC_FROM_SEG(sbi, sbi->next_victim_seg[BG_GC]) == secno) + sbi->next_victim_seg[BG_GC] = NULL_SEGNO; + if (GET_SEC_FROM_SEG(sbi, sbi->next_victim_seg[FG_GC]) == secno) + sbi->next_victim_seg[FG_GC] = NULL_SEGNO; + +unlock_out: spin_unlock(&free_i->segmap_lock); } @@ -562,13 +575,16 @@ static inline bool has_curseg_enough_space(struct f2fs_sb_info *sbi, unsigned int node_blocks, unsigned int data_blocks, unsigned int dent_blocks) { - unsigned int segno, left_blocks, blocks; int i; /* check current data/node sections in the worst case. */ for (i = CURSEG_HOT_DATA; i < NR_PERSISTENT_LOG; i++) { segno = CURSEG_I(sbi, i)->segno; + + if (unlikely(segno == NULL_SEGNO)) + return false; + left_blocks = CAP_BLKS_PER_SEC(sbi) - get_ckpt_valid_blocks(sbi, segno, true); @@ -579,6 +595,10 @@ static inline bool has_curseg_enough_space(struct f2fs_sb_info *sbi, /* check current data section for dentry blocks. */ segno = CURSEG_I(sbi, CURSEG_HOT_DATA)->segno; + + if (unlikely(segno == NULL_SEGNO)) + return false; + left_blocks = CAP_BLKS_PER_SEC(sbi) - get_ckpt_valid_blocks(sbi, segno, true); if (dent_blocks > left_blocks) @@ -972,13 +992,3 @@ static inline void wake_up_discard_thread(struct f2fs_sb_info *sbi, bool force) dcc->discard_wake = true; wake_up_interruptible_all(&dcc->discard_wait_queue); } - -static inline unsigned int first_zoned_segno(struct f2fs_sb_info *sbi) -{ - int devi; - - for (devi = 0; devi < sbi->s_ndevs; devi++) - if (bdev_is_zoned(FDEV(devi).bdev)) - return GET_SEGNO(sbi, FDEV(devi).start_blk); - return 0; -} diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c index 573cc4725e2e8..3f2c6fa3623ba 100644 --- a/fs/f2fs/super.c +++ b/fs/f2fs/super.c @@ -1516,7 +1516,9 @@ int f2fs_inode_dirtied(struct inode *inode, bool sync) } spin_unlock(&sbi->inode_lock[DIRTY_META]); - if (!ret && f2fs_is_atomic_file(inode)) + /* if atomic write is not committed, set inode w/ atomic dirty */ + if (!ret && f2fs_is_atomic_file(inode) && + !is_inode_flag_set(inode, FI_ATOMIC_COMMITTED)) set_inode_flag(inode, FI_ATOMIC_DIRTIED); return ret; @@ -1785,26 +1787,32 @@ static int f2fs_statfs_project(struct super_block *sb, limit = min_not_zero(dquot->dq_dqb.dqb_bsoftlimit, dquot->dq_dqb.dqb_bhardlimit); - if (limit) - limit >>= sb->s_blocksize_bits; + limit >>= sb->s_blocksize_bits; + + if (limit) { + uint64_t remaining = 0; - if (limit && buf->f_blocks > limit) { curblock = (dquot->dq_dqb.dqb_curspace + dquot->dq_dqb.dqb_rsvspace) >> sb->s_blocksize_bits; - buf->f_blocks = limit; - buf->f_bfree = buf->f_bavail = - (buf->f_blocks > curblock) ? - (buf->f_blocks - curblock) : 0; + if (limit > curblock) + remaining = limit - curblock; + + buf->f_blocks = min(buf->f_blocks, limit); + buf->f_bfree = min(buf->f_bfree, remaining); + buf->f_bavail = min(buf->f_bavail, remaining); } limit = min_not_zero(dquot->dq_dqb.dqb_isoftlimit, dquot->dq_dqb.dqb_ihardlimit); - if (limit && buf->f_files > limit) { - buf->f_files = limit; - buf->f_ffree = - (buf->f_files > dquot->dq_dqb.dqb_curinodes) ? - (buf->f_files - dquot->dq_dqb.dqb_curinodes) : 0; + if (limit) { + uint64_t remaining = 0; + + if (limit > dquot->dq_dqb.dqb_curinodes) + remaining = limit - dquot->dq_dqb.dqb_curinodes; + + buf->f_files = min(buf->f_files, limit); + buf->f_ffree = min(buf->f_ffree, remaining); } spin_unlock(&dquot->dq_dqb_lock); @@ -1862,9 +1870,9 @@ static int f2fs_statfs(struct dentry *dentry, struct kstatfs *buf) buf->f_fsid = u64_to_fsid(id); #ifdef CONFIG_QUOTA - if (is_inode_flag_set(dentry->d_inode, FI_PROJ_INHERIT) && + if (is_inode_flag_set(d_inode(dentry), FI_PROJ_INHERIT) && sb_has_quota_limits_enabled(sb, PRJQUOTA)) { - f2fs_statfs_project(sb, F2FS_I(dentry->d_inode)->i_projid, buf); + f2fs_statfs_project(sb, F2FS_I(d_inode(dentry))->i_projid, buf); } #endif return 0; @@ -3659,6 +3667,7 @@ int f2fs_sanity_check_ckpt(struct f2fs_sb_info *sbi) block_t user_block_count, valid_user_blocks; block_t avail_node_count, valid_node_count; unsigned int nat_blocks, nat_bits_bytes, nat_bits_blocks; + unsigned int sit_blk_cnt; int i, j; total = le32_to_cpu(raw_super->segment_count); @@ -3770,6 +3779,13 @@ int f2fs_sanity_check_ckpt(struct f2fs_sb_info *sbi) return 1; } + sit_blk_cnt = DIV_ROUND_UP(main_segs, SIT_ENTRY_PER_BLOCK); + if (sit_bitmap_size * 8 < sit_blk_cnt) { + f2fs_err(sbi, "Wrong bitmap size: sit: %u, sit_blk_cnt:%u", + sit_bitmap_size, sit_blk_cnt); + return 1; + } + cp_pack_start_sum = __start_sum_addr(sbi); cp_payload = __cp_payload(sbi); if (cp_pack_start_sum < cp_payload + 1 || @@ -4244,6 +4260,37 @@ static void f2fs_record_error_work(struct work_struct *work) f2fs_record_stop_reason(sbi); } +static inline unsigned int get_first_seq_zone_segno(struct f2fs_sb_info *sbi) +{ +#ifdef CONFIG_BLK_DEV_ZONED + unsigned int zoneno, total_zones; + int devi; + + if (!f2fs_sb_has_blkzoned(sbi)) + return NULL_SEGNO; + + for (devi = 0; devi < sbi->s_ndevs; devi++) { + if (!bdev_is_zoned(FDEV(devi).bdev)) + continue; + + total_zones = GET_ZONE_FROM_SEG(sbi, FDEV(devi).total_segments); + + for (zoneno = 0; zoneno < total_zones; zoneno++) { + unsigned int segs, blks; + + if (!f2fs_zone_is_seq(sbi, devi, zoneno)) + continue; + + segs = GET_SEG_FROM_SEC(sbi, + zoneno * sbi->secs_per_zone); + blks = SEGS_TO_BLKS(sbi, segs); + return GET_SEGNO(sbi, FDEV(devi).start_blk + blks); + } + } +#endif + return NULL_SEGNO; +} + static int f2fs_scan_devices(struct f2fs_sb_info *sbi) { struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi); @@ -4278,6 +4325,14 @@ static int f2fs_scan_devices(struct f2fs_sb_info *sbi) #endif for (i = 0; i < max_devices; i++) { + if (max_devices == 1) { + FDEV(i).total_segments = + le32_to_cpu(raw_super->segment_count_main); + FDEV(i).start_blk = 0; + FDEV(i).end_blk = FDEV(i).total_segments * + BLKS_PER_SEG(sbi); + } + if (i == 0) FDEV(0).bdev_file = sbi->sb->s_bdev_file; else if (!RDEV(i).path[0]) @@ -4644,6 +4699,9 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent) /* For write statistics */ sbi->sectors_written_start = f2fs_get_sectors_written(sbi); + /* get segno of first zoned block device */ + sbi->first_seq_zone_segno = get_first_seq_zone_segno(sbi); + /* Read accumulated write IO statistics if exists */ seg_i = CURSEG_I(sbi, CURSEG_HOT_NODE); if (__exist_node_summaries(sbi)) diff --git a/fs/file_table.c b/fs/file_table.c index 18735dc8269a1..cf3422edf737c 100644 --- a/fs/file_table.c +++ b/fs/file_table.c @@ -332,9 +332,7 @@ static struct file *alloc_file(const struct path *path, int flags, static inline int alloc_path_pseudo(const char *name, struct inode *inode, struct vfsmount *mnt, struct path *path) { - struct qstr this = QSTR_INIT(name, strlen(name)); - - path->dentry = d_alloc_pseudo(mnt->mnt_sb, &this); + path->dentry = d_alloc_pseudo(mnt->mnt_sb, &QSTR(name)); if (!path->dentry) return -ENOMEM; path->mnt = mntget(mnt); diff --git a/fs/filesystems.c b/fs/filesystems.c index 58b9067b2391c..95e5256821a53 100644 --- a/fs/filesystems.c +++ b/fs/filesystems.c @@ -156,15 +156,19 @@ static int fs_index(const char __user * __name) static int fs_name(unsigned int index, char __user * buf) { struct file_system_type * tmp; - int len, res; + int len, res = -EINVAL; read_lock(&file_systems_lock); - for (tmp = file_systems; tmp; tmp = tmp->next, index--) - if (index <= 0 && try_module_get(tmp->owner)) + for (tmp = file_systems; tmp; tmp = tmp->next, index--) { + if (index == 0) { + if (try_module_get(tmp->owner)) + res = 0; break; + } + } read_unlock(&file_systems_lock); - if (!tmp) - return -EINVAL; + if (res) + return res; /* OK, we got the reference, so we can safely block */ len = strlen(tmp->name) + 1; diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c index ff543dc09130e..ce7324d0d9ed1 100644 --- a/fs/fuse/dir.c +++ b/fs/fuse/dir.c @@ -1921,6 +1921,7 @@ int fuse_do_setattr(struct mnt_idmap *idmap, struct dentry *dentry, int err; bool trust_local_cmtime = is_wb; bool fault_blocked = false; + u64 attr_version; if (!fc->default_permissions) attr->ia_valid |= ATTR_FORCE; @@ -2005,6 +2006,8 @@ int fuse_do_setattr(struct mnt_idmap *idmap, struct dentry *dentry, if (fc->handle_killpriv_v2 && !capable(CAP_FSETID)) inarg.valid |= FATTR_KILL_SUIDGID; } + + attr_version = fuse_get_attr_version(fm->fc); fuse_setattr_fill(fc, &args, inode, &inarg, &outarg); err = fuse_simple_request(fm, &args); if (err) { @@ -2030,6 +2033,14 @@ int fuse_do_setattr(struct mnt_idmap *idmap, struct dentry *dentry, /* FIXME: clear I_DIRTY_SYNC? */ } + if (fi->attr_version > attr_version) { + /* + * Apply attributes, for example for fsnotify_change(), but set + * attribute timeout to zero. + */ + outarg.attr_valid = outarg.attr_valid_nsec = 0; + } + fuse_change_attributes_common(inode, &outarg.attr, NULL, ATTR_TIMEOUT(&outarg), fuse_get_cache_mask(inode)); diff --git a/fs/gfs2/aops.c b/fs/gfs2/aops.c index 68fc8af14700d..eb4270e82ef8e 100644 --- a/fs/gfs2/aops.c +++ b/fs/gfs2/aops.c @@ -37,27 +37,6 @@ #include "aops.h" -void gfs2_trans_add_databufs(struct gfs2_inode *ip, struct folio *folio, - size_t from, size_t len) -{ - struct buffer_head *head = folio_buffers(folio); - unsigned int bsize = head->b_size; - struct buffer_head *bh; - size_t to = from + len; - size_t start, end; - - for (bh = head, start = 0; bh != head || !start; - bh = bh->b_this_page, start = end) { - end = start + bsize; - if (end <= from) - continue; - if (start >= to) - break; - set_buffer_uptodate(bh); - gfs2_trans_add_data(ip->i_gl, bh); - } -} - /** * gfs2_get_block_noalloc - Fills in a buffer head with details about a block * @inode: The inode @@ -133,11 +112,42 @@ static int __gfs2_jdata_write_folio(struct folio *folio, inode->i_sb->s_blocksize, BIT(BH_Dirty)|BIT(BH_Uptodate)); } - gfs2_trans_add_databufs(ip, folio, 0, folio_size(folio)); + gfs2_trans_add_databufs(ip->i_gl, folio, 0, folio_size(folio)); } return gfs2_write_jdata_folio(folio, wbc); } +/** + * gfs2_jdata_writeback - Write jdata folios to the log + * @mapping: The mapping to write + * @wbc: The writeback control + * + * Returns: errno + */ +int gfs2_jdata_writeback(struct address_space *mapping, struct writeback_control *wbc) +{ + struct inode *inode = mapping->host; + struct gfs2_inode *ip = GFS2_I(inode); + struct gfs2_sbd *sdp = GFS2_SB(mapping->host); + struct folio *folio = NULL; + int error; + + BUG_ON(current->journal_info); + if (gfs2_assert_withdraw(sdp, ip->i_gl->gl_state == LM_ST_EXCLUSIVE)) + return 0; + + while ((folio = writeback_iter(mapping, wbc, folio, &error))) { + if (folio_test_checked(folio)) { + folio_redirty_for_writepage(wbc, folio); + folio_unlock(folio); + continue; + } + error = __gfs2_jdata_write_folio(folio, wbc); + } + + return error; +} + /** * gfs2_writepages - Write a bunch of dirty pages back to disk * @mapping: The mapping to write diff --git a/fs/gfs2/aops.h b/fs/gfs2/aops.h index a10c4334d2489..bf002522a7822 100644 --- a/fs/gfs2/aops.h +++ b/fs/gfs2/aops.h @@ -9,7 +9,6 @@ #include "incore.h" void adjust_fs_space(struct inode *inode); -void gfs2_trans_add_databufs(struct gfs2_inode *ip, struct folio *folio, - size_t from, size_t len); +int gfs2_jdata_writeback(struct address_space *mapping, struct writeback_control *wbc); #endif /* __AOPS_DOT_H__ */ diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c index 1795c4e8dbf66..28ad07b003484 100644 --- a/fs/gfs2/bmap.c +++ b/fs/gfs2/bmap.c @@ -988,7 +988,8 @@ static void gfs2_iomap_put_folio(struct inode *inode, loff_t pos, struct gfs2_sbd *sdp = GFS2_SB(inode); if (!gfs2_is_stuffed(ip)) - gfs2_trans_add_databufs(ip, folio, offset_in_folio(folio, pos), + gfs2_trans_add_databufs(ip->i_gl, folio, + offset_in_folio(folio, pos), copied); folio_unlock(folio); diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c index 4f1eca99786b6..161fc76ed5b0e 100644 --- a/fs/gfs2/glock.c +++ b/fs/gfs2/glock.c @@ -807,6 +807,7 @@ __acquires(&gl->gl_lockref.lock) } if (ls->ls_ops->lm_lock) { + set_bit(GLF_PENDING_REPLY, &gl->gl_flags); spin_unlock(&gl->gl_lockref.lock); ret = ls->ls_ops->lm_lock(gl, target, lck_flags); spin_lock(&gl->gl_lockref.lock); @@ -825,6 +826,7 @@ __acquires(&gl->gl_lockref.lock) /* The operation will be completed asynchronously. */ return; } + clear_bit(GLF_PENDING_REPLY, &gl->gl_flags); } /* Complete the operation now. */ @@ -985,16 +987,22 @@ static bool gfs2_try_evict(struct gfs2_glock *gl) ip = NULL; spin_unlock(&gl->gl_lockref.lock); if (ip) { - gl->gl_no_formal_ino = ip->i_no_formal_ino; - set_bit(GIF_DEFERRED_DELETE, &ip->i_flags); + wait_on_inode(&ip->i_inode); + if (is_bad_inode(&ip->i_inode)) { + iput(&ip->i_inode); + ip = NULL; + } + } + if (ip) { + set_bit(GLF_DEFER_DELETE, &gl->gl_flags); d_prune_aliases(&ip->i_inode); iput(&ip->i_inode); + clear_bit(GLF_DEFER_DELETE, &gl->gl_flags); /* If the inode was evicted, gl->gl_object will now be NULL. */ spin_lock(&gl->gl_lockref.lock); ip = gl->gl_object; if (ip) { - clear_bit(GIF_DEFERRED_DELETE, &ip->i_flags); if (!igrab(&ip->i_inode)) ip = NULL; } @@ -1183,7 +1191,6 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number, const struct gfs2_glock_operations *glops, int create, struct gfs2_glock **glp) { - struct super_block *s = sdp->sd_vfs; struct lm_lockname name = { .ln_number = number, .ln_type = glops->go_type, .ln_sbd = sdp }; @@ -1246,7 +1253,7 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number, mapping = gfs2_glock2aspace(gl); if (mapping) { mapping->a_ops = &gfs2_meta_aops; - mapping->host = s->s_bdev->bd_mapping->host; + mapping->host = sdp->sd_inode; mapping->flags = 0; mapping_set_gfp_mask(mapping, GFP_NOFS); mapping->i_private_data = NULL; @@ -1955,6 +1962,7 @@ void gfs2_glock_complete(struct gfs2_glock *gl, int ret) struct lm_lockstruct *ls = &gl->gl_name.ln_sbd->sd_lockstruct; spin_lock(&gl->gl_lockref.lock); + clear_bit(GLF_PENDING_REPLY, &gl->gl_flags); gl->gl_reply = ret; if (unlikely(test_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags))) { @@ -2355,6 +2363,8 @@ static const char *gflags2str(char *buf, const struct gfs2_glock *gl) *p++ = 'f'; if (test_bit(GLF_INVALIDATE_IN_PROGRESS, gflags)) *p++ = 'i'; + if (test_bit(GLF_PENDING_REPLY, gflags)) + *p++ = 'R'; if (test_bit(GLF_HAVE_REPLY, gflags)) *p++ = 'r'; if (test_bit(GLF_INITIAL, gflags)) @@ -2379,6 +2389,8 @@ static const char *gflags2str(char *buf, const struct gfs2_glock *gl) *p++ = 'e'; if (test_bit(GLF_VERIFY_DELETE, gflags)) *p++ = 'E'; + if (test_bit(GLF_DEFER_DELETE, gflags)) + *p++ = 's'; *p = 0; return buf; } diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c index 95d8081681dcc..4b6b23c638e29 100644 --- a/fs/gfs2/glops.c +++ b/fs/gfs2/glops.c @@ -168,7 +168,7 @@ void gfs2_ail_flush(struct gfs2_glock *gl, bool fsync) static int gfs2_rgrp_metasync(struct gfs2_glock *gl) { struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; - struct address_space *metamapping = &sdp->sd_aspace; + struct address_space *metamapping = gfs2_aspace(sdp); struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl); const unsigned bsize = sdp->sd_sb.sb_bsize; loff_t start = (rgd->rd_addr * bsize) & PAGE_MASK; @@ -225,7 +225,7 @@ static int rgrp_go_sync(struct gfs2_glock *gl) static void rgrp_go_inval(struct gfs2_glock *gl, int flags) { struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; - struct address_space *mapping = &sdp->sd_aspace; + struct address_space *mapping = gfs2_aspace(sdp); struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl); const unsigned bsize = sdp->sd_sb.sb_bsize; loff_t start, end; @@ -494,11 +494,18 @@ int gfs2_inode_refresh(struct gfs2_inode *ip) static int inode_go_instantiate(struct gfs2_glock *gl) { struct gfs2_inode *ip = gl->gl_object; + struct gfs2_glock *io_gl; + int error; if (!ip) /* no inode to populate - read it in later */ return 0; - return gfs2_inode_refresh(ip); + error = gfs2_inode_refresh(ip); + if (error) + return error; + io_gl = ip->i_iopen_gh.gh_gl; + io_gl->gl_no_formal_ino = ip->i_no_formal_ino; + return 0; } static int inode_go_held(struct gfs2_holder *gh) diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h index bd1348bff90eb..142f61228d15e 100644 --- a/fs/gfs2/incore.h +++ b/fs/gfs2/incore.h @@ -330,6 +330,8 @@ enum { GLF_UNLOCKED = 16, /* Wait for glock to be unlocked */ GLF_TRY_TO_EVICT = 17, /* iopen glocks only */ GLF_VERIFY_DELETE = 18, /* iopen glocks only */ + GLF_PENDING_REPLY = 19, + GLF_DEFER_DELETE = 20, /* iopen glocks only */ }; struct gfs2_glock { @@ -376,7 +378,6 @@ enum { GIF_SW_PAGED = 3, GIF_FREE_VFS_INODE = 5, GIF_GLOP_PENDING = 6, - GIF_DEFERRED_DELETE = 7, }; struct gfs2_inode { @@ -793,7 +794,7 @@ struct gfs2_sbd { /* Log stuff */ - struct address_space sd_aspace; + struct inode *sd_inode; spinlock_t sd_log_lock; @@ -849,6 +850,13 @@ struct gfs2_sbd { unsigned long sd_glock_dqs_held; }; +#define GFS2_BAD_INO 1 + +static inline struct address_space *gfs2_aspace(struct gfs2_sbd *sdp) +{ + return sdp->sd_inode->i_mapping; +} + static inline void gfs2_glstats_inc(struct gfs2_glock *gl, int which) { gl->gl_stats.stats[which]++; diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c index 1b95db2c3aac3..0b546024f5ef7 100644 --- a/fs/gfs2/inode.c +++ b/fs/gfs2/inode.c @@ -439,6 +439,74 @@ static int alloc_dinode(struct gfs2_inode *ip, u32 flags, unsigned *dblocks) return error; } +static void gfs2_final_release_pages(struct gfs2_inode *ip) +{ + struct inode *inode = &ip->i_inode; + struct gfs2_glock *gl = ip->i_gl; + + if (unlikely(!gl)) { + /* This can only happen during incomplete inode creation. */ + BUG_ON(!test_bit(GIF_ALLOC_FAILED, &ip->i_flags)); + return; + } + + truncate_inode_pages(gfs2_glock2aspace(gl), 0); + truncate_inode_pages(&inode->i_data, 0); + + if (atomic_read(&gl->gl_revokes) == 0) { + clear_bit(GLF_LFLUSH, &gl->gl_flags); + clear_bit(GLF_DIRTY, &gl->gl_flags); + } +} + +int gfs2_dinode_dealloc(struct gfs2_inode *ip) +{ + struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); + struct gfs2_rgrpd *rgd; + struct gfs2_holder gh; + int error; + + if (gfs2_get_inode_blocks(&ip->i_inode) != 1) { + gfs2_consist_inode(ip); + return -EIO; + } + + gfs2_rindex_update(sdp); + + error = gfs2_quota_hold(ip, NO_UID_QUOTA_CHANGE, NO_GID_QUOTA_CHANGE); + if (error) + return error; + + rgd = gfs2_blk2rgrpd(sdp, ip->i_no_addr, 1); + if (!rgd) { + gfs2_consist_inode(ip); + error = -EIO; + goto out_qs; + } + + error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, + LM_FLAG_NODE_SCOPE, &gh); + if (error) + goto out_qs; + + error = gfs2_trans_begin(sdp, RES_RG_BIT + RES_STATFS + RES_QUOTA, + sdp->sd_jdesc->jd_blocks); + if (error) + goto out_rg_gunlock; + + gfs2_free_di(rgd, ip); + + gfs2_final_release_pages(ip); + + gfs2_trans_end(sdp); + +out_rg_gunlock: + gfs2_glock_dq_uninit(&gh); +out_qs: + gfs2_quota_unhold(ip); + return error; +} + static void gfs2_init_dir(struct buffer_head *dibh, const struct gfs2_inode *parent) { @@ -629,10 +697,11 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry, struct gfs2_inode *dip = GFS2_I(dir), *ip; struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode); struct gfs2_glock *io_gl; - int error; + int error, dealloc_error; u32 aflags = 0; unsigned blocks = 1; struct gfs2_diradd da = { .bh = NULL, .save_loc = 1, }; + bool xattr_initialized = false; if (!name->len || name->len > GFS2_FNAMESIZE) return -ENAMETOOLONG; @@ -659,7 +728,8 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry, if (!IS_ERR(inode)) { if (S_ISDIR(inode->i_mode)) { iput(inode); - inode = ERR_PTR(-EISDIR); + inode = NULL; + error = -EISDIR; goto fail_gunlock; } d_instantiate(dentry, inode); @@ -744,12 +814,13 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry, error = gfs2_glock_get(sdp, ip->i_no_addr, &gfs2_inode_glops, CREATE, &ip->i_gl); if (error) - goto fail_free_inode; + goto fail_dealloc_inode; error = gfs2_glock_get(sdp, ip->i_no_addr, &gfs2_iopen_glops, CREATE, &io_gl); if (error) - goto fail_free_inode; + goto fail_dealloc_inode; gfs2_cancel_delete_work(io_gl); + io_gl->gl_no_formal_ino = ip->i_no_formal_ino; retry: error = insert_inode_locked4(inode, ip->i_no_addr, iget_test, &ip->i_no_addr); @@ -771,8 +842,10 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry, if (error) goto fail_gunlock3; - if (blocks > 1) + if (blocks > 1) { gfs2_init_xattr(ip); + xattr_initialized = true; + } init_dinode(dip, ip, symname); gfs2_trans_end(sdp); @@ -827,6 +900,18 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry, gfs2_glock_dq_uninit(&ip->i_iopen_gh); fail_gunlock2: gfs2_glock_put(io_gl); +fail_dealloc_inode: + set_bit(GIF_ALLOC_FAILED, &ip->i_flags); + dealloc_error = 0; + if (ip->i_eattr) + dealloc_error = gfs2_ea_dealloc(ip, xattr_initialized); + clear_nlink(inode); + mark_inode_dirty(inode); + if (!dealloc_error) + dealloc_error = gfs2_dinode_dealloc(ip); + if (dealloc_error) + fs_warn(sdp, "%s: %d\n", __func__, dealloc_error); + ip->i_no_addr = 0; fail_free_inode: if (ip->i_gl) { gfs2_glock_put(ip->i_gl); @@ -841,10 +926,6 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry, gfs2_dir_no_add(&da); gfs2_glock_dq_uninit(&d_gh); if (!IS_ERR_OR_NULL(inode)) { - set_bit(GIF_ALLOC_FAILED, &ip->i_flags); - clear_nlink(inode); - if (ip->i_no_addr) - mark_inode_dirty(inode); if (inode->i_state & I_NEW) iget_failed(inode); else diff --git a/fs/gfs2/inode.h b/fs/gfs2/inode.h index fd15d1c6b6fb1..225b9d0038cd0 100644 --- a/fs/gfs2/inode.h +++ b/fs/gfs2/inode.h @@ -92,6 +92,7 @@ struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned type, struct inode *gfs2_lookup_by_inum(struct gfs2_sbd *sdp, u64 no_addr, u64 no_formal_ino, unsigned int blktype); +int gfs2_dinode_dealloc(struct gfs2_inode *ip); int gfs2_inode_refresh(struct gfs2_inode *ip); diff --git a/fs/gfs2/lock_dlm.c b/fs/gfs2/lock_dlm.c index fa5134df985f7..9e27dd8bef88d 100644 --- a/fs/gfs2/lock_dlm.c +++ b/fs/gfs2/lock_dlm.c @@ -975,14 +975,15 @@ static int control_mount(struct gfs2_sbd *sdp) if (sdp->sd_args.ar_spectator) { fs_info(sdp, "Recovery is required. Waiting for a " "non-spectator to mount.\n"); + spin_unlock(&ls->ls_recover_spin); msleep_interruptible(1000); } else { fs_info(sdp, "control_mount wait1 block %u start %u " "mount %u lvb %u flags %lx\n", block_gen, start_gen, mount_gen, lvb_gen, ls->ls_recover_flags); + spin_unlock(&ls->ls_recover_spin); } - spin_unlock(&ls->ls_recover_spin); goto restart; } diff --git a/fs/gfs2/log.c b/fs/gfs2/log.c index f9c5089783d24..115c4ac457e90 100644 --- a/fs/gfs2/log.c +++ b/fs/gfs2/log.c @@ -31,6 +31,7 @@ #include "dir.h" #include "trace_gfs2.h" #include "trans.h" +#include "aops.h" static void gfs2_log_shutdown(struct gfs2_sbd *sdp); @@ -131,7 +132,11 @@ __acquires(&sdp->sd_ail_lock) if (!mapping) continue; spin_unlock(&sdp->sd_ail_lock); - ret = mapping->a_ops->writepages(mapping, wbc); + BUG_ON(GFS2_SB(mapping->host) != sdp); + if (gfs2_is_jdata(GFS2_I(mapping->host))) + ret = gfs2_jdata_writeback(mapping, wbc); + else + ret = mapping->a_ops->writepages(mapping, wbc); if (need_resched()) { blk_finish_plug(plug); cond_resched(); diff --git a/fs/gfs2/meta_io.c b/fs/gfs2/meta_io.c index fea3efcc2f930..960d6afcdfad8 100644 --- a/fs/gfs2/meta_io.c +++ b/fs/gfs2/meta_io.c @@ -132,7 +132,7 @@ struct buffer_head *gfs2_getbuf(struct gfs2_glock *gl, u64 blkno, int create) unsigned int bufnum; if (mapping == NULL) - mapping = &sdp->sd_aspace; + mapping = gfs2_aspace(sdp); shift = PAGE_SHIFT - sdp->sd_sb.sb_bsize_shift; index = blkno >> shift; /* convert block to page */ diff --git a/fs/gfs2/meta_io.h b/fs/gfs2/meta_io.h index 831d988c2ceb7..b7c8a6684d024 100644 --- a/fs/gfs2/meta_io.h +++ b/fs/gfs2/meta_io.h @@ -44,9 +44,7 @@ static inline struct gfs2_sbd *gfs2_mapping2sbd(struct address_space *mapping) struct gfs2_glock_aspace *gla = container_of(mapping, struct gfs2_glock_aspace, mapping); return gla->glock.gl_name.ln_sbd; - } else if (mapping->a_ops == &gfs2_rgrp_aops) - return container_of(mapping, struct gfs2_sbd, sd_aspace); - else + } else return inode->i_sb->s_fs_info; } diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c index e83d293c36142..4a0f7de41b2b2 100644 --- a/fs/gfs2/ops_fstype.c +++ b/fs/gfs2/ops_fstype.c @@ -64,15 +64,17 @@ static void gfs2_tune_init(struct gfs2_tune *gt) void free_sbd(struct gfs2_sbd *sdp) { + struct super_block *sb = sdp->sd_vfs; + if (sdp->sd_lkstats) free_percpu(sdp->sd_lkstats); + sb->s_fs_info = NULL; kfree(sdp); } static struct gfs2_sbd *init_sbd(struct super_block *sb) { struct gfs2_sbd *sdp; - struct address_space *mapping; sdp = kzalloc(sizeof(struct gfs2_sbd), GFP_KERNEL); if (!sdp) @@ -109,16 +111,6 @@ static struct gfs2_sbd *init_sbd(struct super_block *sb) INIT_LIST_HEAD(&sdp->sd_sc_inodes_list); - mapping = &sdp->sd_aspace; - - address_space_init_once(mapping); - mapping->a_ops = &gfs2_rgrp_aops; - mapping->host = sb->s_bdev->bd_mapping->host; - mapping->flags = 0; - mapping_set_gfp_mask(mapping, GFP_NOFS); - mapping->i_private_data = NULL; - mapping->writeback_index = 0; - spin_lock_init(&sdp->sd_log_lock); atomic_set(&sdp->sd_log_pinned, 0); INIT_LIST_HEAD(&sdp->sd_log_revokes); @@ -1135,6 +1127,7 @@ static int gfs2_fill_super(struct super_block *sb, struct fs_context *fc) int silent = fc->sb_flags & SB_SILENT; struct gfs2_sbd *sdp; struct gfs2_holder mount_gh; + struct address_space *mapping; int error; sdp = init_sbd(sb); @@ -1156,6 +1149,7 @@ static int gfs2_fill_super(struct super_block *sb, struct fs_context *fc) sb->s_flags |= SB_NOSEC; sb->s_magic = GFS2_MAGIC; sb->s_op = &gfs2_super_ops; + sb->s_d_op = &gfs2_dops; sb->s_export_op = &gfs2_export_ops; sb->s_qcop = &gfs2_quotactl_ops; @@ -1181,9 +1175,21 @@ static int gfs2_fill_super(struct super_block *sb, struct fs_context *fc) sdp->sd_tune.gt_statfs_quantum = 30; } + /* Set up an address space for metadata writes */ + sdp->sd_inode = new_inode(sb); + error = -ENOMEM; + if (!sdp->sd_inode) + goto fail_free; + sdp->sd_inode->i_ino = GFS2_BAD_INO; + sdp->sd_inode->i_size = OFFSET_MAX; + + mapping = gfs2_aspace(sdp); + mapping->a_ops = &gfs2_rgrp_aops; + mapping_set_gfp_mask(mapping, GFP_NOFS); + error = init_names(sdp, silent); if (error) - goto fail_free; + goto fail_iput; snprintf(sdp->sd_fsname, sizeof(sdp->sd_fsname), "%s", sdp->sd_table_name); @@ -1192,7 +1198,7 @@ static int gfs2_fill_super(struct super_block *sb, struct fs_context *fc) WQ_MEM_RECLAIM | WQ_HIGHPRI | WQ_FREEZABLE, 0, sdp->sd_fsname); if (!sdp->sd_glock_wq) - goto fail_free; + goto fail_iput; sdp->sd_delete_wq = alloc_workqueue("gfs2-delete/%s", WQ_MEM_RECLAIM | WQ_FREEZABLE, 0, sdp->sd_fsname); @@ -1309,9 +1315,10 @@ static int gfs2_fill_super(struct super_block *sb, struct fs_context *fc) fail_glock_wq: if (sdp->sd_glock_wq) destroy_workqueue(sdp->sd_glock_wq); +fail_iput: + iput(sdp->sd_inode); fail_free: free_sbd(sdp); - sb->s_fs_info = NULL; return error; } diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c index b9cef63c78717..3b1303f97a3bc 100644 --- a/fs/gfs2/super.c +++ b/fs/gfs2/super.c @@ -44,10 +44,10 @@ #include "xattr.h" #include "lops.h" -enum dinode_demise { - SHOULD_DELETE_DINODE, - SHOULD_NOT_DELETE_DINODE, - SHOULD_DEFER_EVICTION, +enum evict_behavior { + EVICT_SHOULD_DELETE, + EVICT_SHOULD_SKIP_DELETE, + EVICT_SHOULD_DEFER_DELETE, }; /** @@ -648,7 +648,7 @@ static void gfs2_put_super(struct super_block *sb) gfs2_jindex_free(sdp); /* Take apart glock structures and buffer lists */ gfs2_gl_hash_clear(sdp); - truncate_inode_pages_final(&sdp->sd_aspace); + iput(sdp->sd_inode); gfs2_delete_debugfs_file(sdp); gfs2_sys_fs_del(sdp); @@ -674,7 +674,7 @@ static int gfs2_sync_fs(struct super_block *sb, int wait) return sdp->sd_log_error; } -static int gfs2_do_thaw(struct gfs2_sbd *sdp) +static int gfs2_do_thaw(struct gfs2_sbd *sdp, enum freeze_holder who) { struct super_block *sb = sdp->sd_vfs; int error; @@ -682,7 +682,7 @@ static int gfs2_do_thaw(struct gfs2_sbd *sdp) error = gfs2_freeze_lock_shared(sdp); if (error) goto fail; - error = thaw_super(sb, FREEZE_HOLDER_USERSPACE); + error = thaw_super(sb, who); if (!error) return 0; @@ -710,7 +710,7 @@ void gfs2_freeze_func(struct work_struct *work) gfs2_freeze_unlock(sdp); set_bit(SDF_FROZEN, &sdp->sd_flags); - error = gfs2_do_thaw(sdp); + error = gfs2_do_thaw(sdp, FREEZE_HOLDER_USERSPACE); if (error) goto out; @@ -728,6 +728,7 @@ void gfs2_freeze_func(struct work_struct *work) /** * gfs2_freeze_super - prevent further writes to the filesystem * @sb: the VFS structure for the filesystem + * @who: freeze flags * */ @@ -744,7 +745,7 @@ static int gfs2_freeze_super(struct super_block *sb, enum freeze_holder who) } for (;;) { - error = freeze_super(sb, FREEZE_HOLDER_USERSPACE); + error = freeze_super(sb, who); if (error) { fs_info(sdp, "GFS2: couldn't freeze filesystem: %d\n", error); @@ -758,7 +759,7 @@ static int gfs2_freeze_super(struct super_block *sb, enum freeze_holder who) break; } - error = gfs2_do_thaw(sdp); + error = gfs2_do_thaw(sdp, who); if (error) goto out; @@ -796,6 +797,7 @@ static int gfs2_freeze_fs(struct super_block *sb) /** * gfs2_thaw_super - reallow writes to the filesystem * @sb: the VFS structure for the filesystem + * @who: freeze flags * */ @@ -814,7 +816,7 @@ static int gfs2_thaw_super(struct super_block *sb, enum freeze_holder who) atomic_inc(&sb->s_active); gfs2_freeze_unlock(sdp); - error = gfs2_do_thaw(sdp); + error = gfs2_do_thaw(sdp, who); if (!error) { clear_bit(SDF_FREEZE_INITIATOR, &sdp->sd_flags); @@ -1173,74 +1175,6 @@ static int gfs2_show_options(struct seq_file *s, struct dentry *root) return 0; } -static void gfs2_final_release_pages(struct gfs2_inode *ip) -{ - struct inode *inode = &ip->i_inode; - struct gfs2_glock *gl = ip->i_gl; - - if (unlikely(!gl)) { - /* This can only happen during incomplete inode creation. */ - BUG_ON(!test_bit(GIF_ALLOC_FAILED, &ip->i_flags)); - return; - } - - truncate_inode_pages(gfs2_glock2aspace(gl), 0); - truncate_inode_pages(&inode->i_data, 0); - - if (atomic_read(&gl->gl_revokes) == 0) { - clear_bit(GLF_LFLUSH, &gl->gl_flags); - clear_bit(GLF_DIRTY, &gl->gl_flags); - } -} - -static int gfs2_dinode_dealloc(struct gfs2_inode *ip) -{ - struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); - struct gfs2_rgrpd *rgd; - struct gfs2_holder gh; - int error; - - if (gfs2_get_inode_blocks(&ip->i_inode) != 1) { - gfs2_consist_inode(ip); - return -EIO; - } - - gfs2_rindex_update(sdp); - - error = gfs2_quota_hold(ip, NO_UID_QUOTA_CHANGE, NO_GID_QUOTA_CHANGE); - if (error) - return error; - - rgd = gfs2_blk2rgrpd(sdp, ip->i_no_addr, 1); - if (!rgd) { - gfs2_consist_inode(ip); - error = -EIO; - goto out_qs; - } - - error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, - LM_FLAG_NODE_SCOPE, &gh); - if (error) - goto out_qs; - - error = gfs2_trans_begin(sdp, RES_RG_BIT + RES_STATFS + RES_QUOTA, - sdp->sd_jdesc->jd_blocks); - if (error) - goto out_rg_gunlock; - - gfs2_free_di(rgd, ip); - - gfs2_final_release_pages(ip); - - gfs2_trans_end(sdp); - -out_rg_gunlock: - gfs2_glock_dq_uninit(&gh); -out_qs: - gfs2_quota_unhold(ip); - return error; -} - /** * gfs2_glock_put_eventually * @gl: The glock to put @@ -1313,23 +1247,21 @@ static bool gfs2_upgrade_iopen_glock(struct inode *inode) * * Returns: the fate of the dinode */ -static enum dinode_demise evict_should_delete(struct inode *inode, - struct gfs2_holder *gh) +static enum evict_behavior evict_should_delete(struct inode *inode, + struct gfs2_holder *gh) { struct gfs2_inode *ip = GFS2_I(inode); struct super_block *sb = inode->i_sb; struct gfs2_sbd *sdp = sb->s_fs_info; int ret; - if (unlikely(test_bit(GIF_ALLOC_FAILED, &ip->i_flags))) - goto should_delete; - - if (test_bit(GIF_DEFERRED_DELETE, &ip->i_flags)) - return SHOULD_DEFER_EVICTION; + if (gfs2_holder_initialized(&ip->i_iopen_gh) && + test_bit(GLF_DEFER_DELETE, &ip->i_iopen_gh.gh_gl->gl_flags)) + return EVICT_SHOULD_DEFER_DELETE; /* Deletes should never happen under memory pressure anymore. */ if (WARN_ON_ONCE(current->flags & PF_MEMALLOC)) - return SHOULD_DEFER_EVICTION; + return EVICT_SHOULD_DEFER_DELETE; /* Must not read inode block until block type has been verified */ ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, GL_SKIP, gh); @@ -1337,34 +1269,33 @@ static enum dinode_demise evict_should_delete(struct inode *inode, glock_clear_object(ip->i_iopen_gh.gh_gl, ip); ip->i_iopen_gh.gh_flags |= GL_NOCACHE; gfs2_glock_dq_uninit(&ip->i_iopen_gh); - return SHOULD_DEFER_EVICTION; + return EVICT_SHOULD_DEFER_DELETE; } if (gfs2_inode_already_deleted(ip->i_gl, ip->i_no_formal_ino)) - return SHOULD_NOT_DELETE_DINODE; + return EVICT_SHOULD_SKIP_DELETE; ret = gfs2_check_blk_type(sdp, ip->i_no_addr, GFS2_BLKST_UNLINKED); if (ret) - return SHOULD_NOT_DELETE_DINODE; + return EVICT_SHOULD_SKIP_DELETE; ret = gfs2_instantiate(gh); if (ret) - return SHOULD_NOT_DELETE_DINODE; + return EVICT_SHOULD_SKIP_DELETE; /* * The inode may have been recreated in the meantime. */ if (inode->i_nlink) - return SHOULD_NOT_DELETE_DINODE; + return EVICT_SHOULD_SKIP_DELETE; -should_delete: if (gfs2_holder_initialized(&ip->i_iopen_gh) && test_bit(HIF_HOLDER, &ip->i_iopen_gh.gh_iflags)) { if (!gfs2_upgrade_iopen_glock(inode)) { gfs2_holder_uninit(&ip->i_iopen_gh); - return SHOULD_NOT_DELETE_DINODE; + return EVICT_SHOULD_SKIP_DELETE; } } - return SHOULD_DELETE_DINODE; + return EVICT_SHOULD_DELETE; } /** @@ -1384,7 +1315,7 @@ static int evict_unlinked_inode(struct inode *inode) } if (ip->i_eattr) { - ret = gfs2_ea_dealloc(ip); + ret = gfs2_ea_dealloc(ip, true); if (ret) goto out; } @@ -1475,6 +1406,7 @@ static void gfs2_evict_inode(struct inode *inode) struct gfs2_sbd *sdp = sb->s_fs_info; struct gfs2_inode *ip = GFS2_I(inode); struct gfs2_holder gh; + enum evict_behavior behavior; int ret; if (inode->i_nlink || sb_rdonly(sb) || !ip->i_no_addr) @@ -1489,10 +1421,10 @@ static void gfs2_evict_inode(struct inode *inode) goto out; gfs2_holder_mark_uninitialized(&gh); - ret = evict_should_delete(inode, &gh); - if (ret == SHOULD_DEFER_EVICTION) + behavior = evict_should_delete(inode, &gh); + if (behavior == EVICT_SHOULD_DEFER_DELETE) goto out; - if (ret == SHOULD_DELETE_DINODE) + if (behavior == EVICT_SHOULD_DELETE) ret = evict_unlinked_inode(inode); else ret = evict_linked_inode(inode); diff --git a/fs/gfs2/sys.c b/fs/gfs2/sys.c index ecc699f8d9fca..6286183021022 100644 --- a/fs/gfs2/sys.c +++ b/fs/gfs2/sys.c @@ -764,7 +764,6 @@ int gfs2_sys_fs_add(struct gfs2_sbd *sdp) fs_err(sdp, "error %d adding sysfs files\n", error); kobject_put(&sdp->sd_kobj); wait_for_completion(&sdp->sd_kobj_unregister); - sb->s_fs_info = NULL; return error; } diff --git a/fs/gfs2/trace_gfs2.h b/fs/gfs2/trace_gfs2.h index 8eae8d62a4132..43de603ab347e 100644 --- a/fs/gfs2/trace_gfs2.h +++ b/fs/gfs2/trace_gfs2.h @@ -53,12 +53,19 @@ {(1UL << GLF_DIRTY), "y" }, \ {(1UL << GLF_LFLUSH), "f" }, \ {(1UL << GLF_INVALIDATE_IN_PROGRESS), "i" }, \ + {(1UL << GLF_PENDING_REPLY), "R" }, \ {(1UL << GLF_HAVE_REPLY), "r" }, \ {(1UL << GLF_INITIAL), "a" }, \ {(1UL << GLF_HAVE_FROZEN_REPLY), "F" }, \ {(1UL << GLF_LRU), "L" }, \ {(1UL << GLF_OBJECT), "o" }, \ - {(1UL << GLF_BLOCKING), "b" }) + {(1UL << GLF_BLOCKING), "b" }, \ + {(1UL << GLF_UNLOCKED), "x" }, \ + {(1UL << GLF_INSTANTIATE_NEEDED), "n" }, \ + {(1UL << GLF_INSTANTIATE_IN_PROG), "N" }, \ + {(1UL << GLF_TRY_TO_EVICT), "e" }, \ + {(1UL << GLF_VERIFY_DELETE), "E" }, \ + {(1UL << GLF_DEFER_DELETE), "s" }) #ifndef NUMPTY #define NUMPTY diff --git a/fs/gfs2/trans.c b/fs/gfs2/trans.c index 192213c7359af..42cf8c5204db4 100644 --- a/fs/gfs2/trans.c +++ b/fs/gfs2/trans.c @@ -226,6 +226,27 @@ void gfs2_trans_add_data(struct gfs2_glock *gl, struct buffer_head *bh) unlock_buffer(bh); } +void gfs2_trans_add_databufs(struct gfs2_glock *gl, struct folio *folio, + size_t from, size_t len) +{ + struct buffer_head *head = folio_buffers(folio); + unsigned int bsize = head->b_size; + struct buffer_head *bh; + size_t to = from + len; + size_t start, end; + + for (bh = head, start = 0; bh != head || !start; + bh = bh->b_this_page, start = end) { + end = start + bsize; + if (end <= from) + continue; + if (start >= to) + break; + set_buffer_uptodate(bh); + gfs2_trans_add_data(gl, bh); + } +} + void gfs2_trans_add_meta(struct gfs2_glock *gl, struct buffer_head *bh) { diff --git a/fs/gfs2/trans.h b/fs/gfs2/trans.h index f8ce5302280d3..790c55f59e612 100644 --- a/fs/gfs2/trans.h +++ b/fs/gfs2/trans.h @@ -42,6 +42,8 @@ int gfs2_trans_begin(struct gfs2_sbd *sdp, unsigned int blocks, void gfs2_trans_end(struct gfs2_sbd *sdp); void gfs2_trans_add_data(struct gfs2_glock *gl, struct buffer_head *bh); +void gfs2_trans_add_databufs(struct gfs2_glock *gl, struct folio *folio, + size_t from, size_t len); void gfs2_trans_add_meta(struct gfs2_glock *gl, struct buffer_head *bh); void gfs2_trans_add_revoke(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd); void gfs2_trans_remove_revoke(struct gfs2_sbd *sdp, u64 blkno, unsigned int len); diff --git a/fs/gfs2/xattr.c b/fs/gfs2/xattr.c index 17ae5070a90e6..df9c93de94c79 100644 --- a/fs/gfs2/xattr.c +++ b/fs/gfs2/xattr.c @@ -1383,7 +1383,7 @@ static int ea_dealloc_indirect(struct gfs2_inode *ip) return error; } -static int ea_dealloc_block(struct gfs2_inode *ip) +static int ea_dealloc_block(struct gfs2_inode *ip, bool initialized) { struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); struct gfs2_rgrpd *rgd; @@ -1416,7 +1416,7 @@ static int ea_dealloc_block(struct gfs2_inode *ip) ip->i_eattr = 0; gfs2_add_inode_blocks(&ip->i_inode, -1); - if (likely(!test_bit(GIF_ALLOC_FAILED, &ip->i_flags))) { + if (initialized) { error = gfs2_meta_inode_buffer(ip, &dibh); if (!error) { gfs2_trans_add_meta(ip->i_gl, dibh); @@ -1435,11 +1435,12 @@ static int ea_dealloc_block(struct gfs2_inode *ip) /** * gfs2_ea_dealloc - deallocate the extended attribute fork * @ip: the inode + * @initialized: xattrs have been initialized * * Returns: errno */ -int gfs2_ea_dealloc(struct gfs2_inode *ip) +int gfs2_ea_dealloc(struct gfs2_inode *ip, bool initialized) { int error; @@ -1451,7 +1452,7 @@ int gfs2_ea_dealloc(struct gfs2_inode *ip) if (error) return error; - if (likely(!test_bit(GIF_ALLOC_FAILED, &ip->i_flags))) { + if (initialized) { error = ea_foreach(ip, ea_dealloc_unstuffed, NULL); if (error) goto out_quota; @@ -1463,7 +1464,7 @@ int gfs2_ea_dealloc(struct gfs2_inode *ip) } } - error = ea_dealloc_block(ip); + error = ea_dealloc_block(ip, initialized); out_quota: gfs2_quota_unhold(ip); diff --git a/fs/gfs2/xattr.h b/fs/gfs2/xattr.h index eb12eb7e37c19..3c9788e0e1375 100644 --- a/fs/gfs2/xattr.h +++ b/fs/gfs2/xattr.h @@ -54,7 +54,7 @@ int __gfs2_xattr_set(struct inode *inode, const char *name, const void *value, size_t size, int flags, int type); ssize_t gfs2_listxattr(struct dentry *dentry, char *buffer, size_t size); -int gfs2_ea_dealloc(struct gfs2_inode *ip); +int gfs2_ea_dealloc(struct gfs2_inode *ip, bool initialized); /* Exported to acl.c */ diff --git a/fs/isofs/inode.c b/fs/isofs/inode.c index 47038e6608123..d5da9817df9b3 100644 --- a/fs/isofs/inode.c +++ b/fs/isofs/inode.c @@ -1275,6 +1275,7 @@ static int isofs_read_inode(struct inode *inode, int relocated) unsigned long offset; struct iso_inode_info *ei = ISOFS_I(inode); int ret = -EIO; + struct timespec64 ts; block = ei->i_iget5_block; bh = sb_bread(inode->i_sb, block); @@ -1387,8 +1388,10 @@ static int isofs_read_inode(struct inode *inode, int relocated) inode->i_ino, de->flags[-high_sierra]); } #endif - inode_set_mtime_to_ts(inode, - inode_set_atime_to_ts(inode, inode_set_ctime(inode, iso_date(de->date, high_sierra), 0))); + ts = iso_date(de->date, high_sierra ? ISO_DATE_HIGH_SIERRA : 0); + inode_set_ctime_to_ts(inode, ts); + inode_set_atime_to_ts(inode, ts); + inode_set_mtime_to_ts(inode, ts); ei->i_first_extent = (isonum_733(de->extent) + isonum_711(de->ext_attr_length)); diff --git a/fs/isofs/isofs.h b/fs/isofs/isofs.h index 2d55207c9a990..5065558375333 100644 --- a/fs/isofs/isofs.h +++ b/fs/isofs/isofs.h @@ -106,7 +106,9 @@ static inline unsigned int isonum_733(u8 *p) /* Ignore bigendian datum due to broken mastering programs */ return get_unaligned_le32(p); } -extern int iso_date(u8 *, int); +#define ISO_DATE_HIGH_SIERRA (1 << 0) +#define ISO_DATE_LONG_FORM (1 << 1) +struct timespec64 iso_date(u8 *p, int flags); struct inode; /* To make gcc happy */ diff --git a/fs/isofs/rock.c b/fs/isofs/rock.c index dbf911126e610..576498245b9d7 100644 --- a/fs/isofs/rock.c +++ b/fs/isofs/rock.c @@ -412,7 +412,12 @@ parse_rock_ridge_inode_internal(struct iso_directory_record *de, } } break; - case SIG('T', 'F'): + case SIG('T', 'F'): { + int flags, size, slen; + + flags = rr->u.TF.flags & TF_LONG_FORM ? ISO_DATE_LONG_FORM : 0; + size = rr->u.TF.flags & TF_LONG_FORM ? 17 : 7; + slen = rr->len - 5; /* * Some RRIP writers incorrectly place ctime in the * TF_CREATE field. Try to handle this correctly for @@ -420,27 +425,28 @@ parse_rock_ridge_inode_internal(struct iso_directory_record *de, */ /* Rock ridge never appears on a High Sierra disk */ cnt = 0; - if (rr->u.TF.flags & TF_CREATE) { - inode_set_ctime(inode, - iso_date(rr->u.TF.times[cnt++].time, 0), - 0); + if ((rr->u.TF.flags & TF_CREATE) && size <= slen) { + inode_set_ctime_to_ts(inode, + iso_date(rr->u.TF.data + size * cnt++, flags)); + slen -= size; } - if (rr->u.TF.flags & TF_MODIFY) { - inode_set_mtime(inode, - iso_date(rr->u.TF.times[cnt++].time, 0), - 0); + if ((rr->u.TF.flags & TF_MODIFY) && size <= slen) { + inode_set_mtime_to_ts(inode, + iso_date(rr->u.TF.data + size * cnt++, flags)); + slen -= size; } - if (rr->u.TF.flags & TF_ACCESS) { - inode_set_atime(inode, - iso_date(rr->u.TF.times[cnt++].time, 0), - 0); + if ((rr->u.TF.flags & TF_ACCESS) && size <= slen) { + inode_set_atime_to_ts(inode, + iso_date(rr->u.TF.data + size * cnt++, flags)); + slen -= size; } - if (rr->u.TF.flags & TF_ATTRIBUTES) { - inode_set_ctime(inode, - iso_date(rr->u.TF.times[cnt++].time, 0), - 0); + if ((rr->u.TF.flags & TF_ATTRIBUTES) && size <= slen) { + inode_set_ctime_to_ts(inode, + iso_date(rr->u.TF.data + size * cnt++, flags)); + slen -= size; } break; + } case SIG('S', 'L'): { int slen; diff --git a/fs/isofs/rock.h b/fs/isofs/rock.h index 7755e587f7785..c0856fa9bb6a4 100644 --- a/fs/isofs/rock.h +++ b/fs/isofs/rock.h @@ -65,13 +65,9 @@ struct RR_PL_s { __u8 location[8]; }; -struct stamp { - __u8 time[7]; /* actually 6 unsigned, 1 signed */ -} __attribute__ ((packed)); - struct RR_TF_s { __u8 flags; - struct stamp times[]; /* Variable number of these beasts */ + __u8 data[]; } __attribute__ ((packed)); /* Linux-specific extension for transparent decompression */ diff --git a/fs/isofs/util.c b/fs/isofs/util.c index e88dba7216618..42f479da0b282 100644 --- a/fs/isofs/util.c +++ b/fs/isofs/util.c @@ -16,29 +16,44 @@ * to GMT. Thus we should always be correct. */ -int iso_date(u8 *p, int flag) +struct timespec64 iso_date(u8 *p, int flags) { int year, month, day, hour, minute, second, tz; - int crtime; + struct timespec64 ts; + + if (flags & ISO_DATE_LONG_FORM) { + year = (p[0] - '0') * 1000 + + (p[1] - '0') * 100 + + (p[2] - '0') * 10 + + (p[3] - '0') - 1900; + month = ((p[4] - '0') * 10 + (p[5] - '0')); + day = ((p[6] - '0') * 10 + (p[7] - '0')); + hour = ((p[8] - '0') * 10 + (p[9] - '0')); + minute = ((p[10] - '0') * 10 + (p[11] - '0')); + second = ((p[12] - '0') * 10 + (p[13] - '0')); + ts.tv_nsec = ((p[14] - '0') * 10 + (p[15] - '0')) * 10000000; + tz = p[16]; + } else { + year = p[0]; + month = p[1]; + day = p[2]; + hour = p[3]; + minute = p[4]; + second = p[5]; + ts.tv_nsec = 0; + /* High sierra has no time zone */ + tz = flags & ISO_DATE_HIGH_SIERRA ? 0 : p[6]; + } - year = p[0]; - month = p[1]; - day = p[2]; - hour = p[3]; - minute = p[4]; - second = p[5]; - if (flag == 0) tz = p[6]; /* High sierra has no time zone */ - else tz = 0; - if (year < 0) { - crtime = 0; + ts.tv_sec = 0; } else { - crtime = mktime64(year+1900, month, day, hour, minute, second); + ts.tv_sec = mktime64(year+1900, month, day, hour, minute, second); /* sign extend */ if (tz & 0x80) tz |= (-1 << 8); - + /* * The timezone offset is unreliable on some disks, * so we make a sanity check. In no case is it ever @@ -65,7 +80,7 @@ int iso_date(u8 *p, int flag) * for pointing out the sign error. */ if (-52 <= tz && tz <= 52) - crtime -= tz * 15 * 60; + ts.tv_sec -= tz * 15 * 60; } - return crtime; -} + return ts; +} diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c index 66513c18ca294..f440110df93a9 100644 --- a/fs/jbd2/transaction.c +++ b/fs/jbd2/transaction.c @@ -1513,7 +1513,7 @@ int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh) jh->b_next_transaction == transaction); spin_unlock(&jh->b_state_lock); } - if (jh->b_modified == 1) { + if (data_race(jh->b_modified == 1)) { /* If it's in our transaction it must be in BJ_Metadata list. */ if (data_race(jh->b_transaction == transaction && jh->b_jlist != BJ_Metadata)) { @@ -1532,7 +1532,6 @@ int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh) goto out; } - journal = transaction->t_journal; spin_lock(&jh->b_state_lock); if (is_handle_aborted(handle)) { @@ -1547,6 +1546,8 @@ int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh) goto out_unlock_bh; } + journal = transaction->t_journal; + if (jh->b_modified == 0) { /* * This buffer's got modified and becoming part diff --git a/fs/jffs2/erase.c b/fs/jffs2/erase.c index ef3a1e1b6cb06..fda9f4d6093f9 100644 --- a/fs/jffs2/erase.c +++ b/fs/jffs2/erase.c @@ -425,7 +425,9 @@ static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseb .totlen = cpu_to_je32(c->cleanmarker_size) }; - jffs2_prealloc_raw_node_refs(c, jeb, 1); + ret = jffs2_prealloc_raw_node_refs(c, jeb, 1); + if (ret) + goto filebad; marker.hdr_crc = cpu_to_je32(crc32(0, &marker, sizeof(struct jffs2_unknown_node)-4)); diff --git a/fs/jffs2/scan.c b/fs/jffs2/scan.c index 29671e33a1714..62879c218d4b1 100644 --- a/fs/jffs2/scan.c +++ b/fs/jffs2/scan.c @@ -256,7 +256,9 @@ int jffs2_scan_medium(struct jffs2_sb_info *c) jffs2_dbg(1, "%s(): Skipping %d bytes in nextblock to ensure page alignment\n", __func__, skip); - jffs2_prealloc_raw_node_refs(c, c->nextblock, 1); + ret = jffs2_prealloc_raw_node_refs(c, c->nextblock, 1); + if (ret) + goto out; jffs2_scan_dirty_space(c, c->nextblock, skip); } #endif diff --git a/fs/jffs2/summary.c b/fs/jffs2/summary.c index 4fe64519870f1..d83372d3e1a07 100644 --- a/fs/jffs2/summary.c +++ b/fs/jffs2/summary.c @@ -858,7 +858,10 @@ int jffs2_sum_write_sumnode(struct jffs2_sb_info *c) spin_unlock(&c->erase_completion_lock); jeb = c->nextblock; - jffs2_prealloc_raw_node_refs(c, jeb, 1); + ret = jffs2_prealloc_raw_node_refs(c, jeb, 1); + + if (ret) + goto out; if (!c->summary->sum_num || !c->summary->sum_list_head) { JFFS2_WARNING("Empty summary info!!!\n"); @@ -872,6 +875,8 @@ int jffs2_sum_write_sumnode(struct jffs2_sb_info *c) datasize += padsize; ret = jffs2_sum_write_data(c, jeb, infosize, datasize, padsize); + +out: spin_lock(&c->erase_completion_lock); return ret; } diff --git a/fs/jfs/jfs_dmap.c b/fs/jfs/jfs_dmap.c index 0e1019382cf51..35e063c9f3a42 100644 --- a/fs/jfs/jfs_dmap.c +++ b/fs/jfs/jfs_dmap.c @@ -178,45 +178,30 @@ int dbMount(struct inode *ipbmap) dbmp_le = (struct dbmap_disk *) mp->data; bmp->db_mapsize = le64_to_cpu(dbmp_le->dn_mapsize); bmp->db_nfree = le64_to_cpu(dbmp_le->dn_nfree); - bmp->db_l2nbperpage = le32_to_cpu(dbmp_le->dn_l2nbperpage); - if (bmp->db_l2nbperpage > L2PSIZE - L2MINBLOCKSIZE || - bmp->db_l2nbperpage < 0) { - err = -EINVAL; - goto err_release_metapage; - } - bmp->db_numag = le32_to_cpu(dbmp_le->dn_numag); - if (!bmp->db_numag || bmp->db_numag > MAXAG) { - err = -EINVAL; - goto err_release_metapage; - } - bmp->db_maxlevel = le32_to_cpu(dbmp_le->dn_maxlevel); bmp->db_maxag = le32_to_cpu(dbmp_le->dn_maxag); bmp->db_agpref = le32_to_cpu(dbmp_le->dn_agpref); - if (bmp->db_maxag >= MAXAG || bmp->db_maxag < 0 || - bmp->db_agpref >= MAXAG || bmp->db_agpref < 0) { - err = -EINVAL; - goto err_release_metapage; - } - bmp->db_aglevel = le32_to_cpu(dbmp_le->dn_aglevel); bmp->db_agheight = le32_to_cpu(dbmp_le->dn_agheight); bmp->db_agwidth = le32_to_cpu(dbmp_le->dn_agwidth); - if (!bmp->db_agwidth) { - err = -EINVAL; - goto err_release_metapage; - } bmp->db_agstart = le32_to_cpu(dbmp_le->dn_agstart); bmp->db_agl2size = le32_to_cpu(dbmp_le->dn_agl2size); - if (bmp->db_agl2size > L2MAXL2SIZE - L2MAXAG || - bmp->db_agl2size < 0) { - err = -EINVAL; - goto err_release_metapage; - } - if (((bmp->db_mapsize - 1) >> bmp->db_agl2size) > MAXAG) { + if ((bmp->db_l2nbperpage > L2PSIZE - L2MINBLOCKSIZE) || + (bmp->db_l2nbperpage < 0) || + !bmp->db_numag || (bmp->db_numag > MAXAG) || + (bmp->db_maxag >= MAXAG) || (bmp->db_maxag < 0) || + (bmp->db_agpref >= MAXAG) || (bmp->db_agpref < 0) || + (bmp->db_agheight < 0) || (bmp->db_agheight > (L2LPERCTL >> 1)) || + (bmp->db_agwidth < 1) || (bmp->db_agwidth > (LPERCTL / MAXAG)) || + (bmp->db_agwidth > (1 << (L2LPERCTL - (bmp->db_agheight << 1)))) || + (bmp->db_agstart < 0) || + (bmp->db_agstart > (CTLTREESIZE - 1 - bmp->db_agwidth * (MAXAG - 1))) || + (bmp->db_agl2size > L2MAXL2SIZE - L2MAXAG) || + (bmp->db_agl2size < 0) || + ((bmp->db_mapsize - 1) >> bmp->db_agl2size) > MAXAG) { err = -EINVAL; goto err_release_metapage; } diff --git a/fs/kernfs/dir.c b/fs/kernfs/dir.c index 458519e416fe7..5dc90a498e75d 100644 --- a/fs/kernfs/dir.c +++ b/fs/kernfs/dir.c @@ -1560,8 +1560,9 @@ void kernfs_break_active_protection(struct kernfs_node *kn) * invoked before finishing the kernfs operation. Note that while this * function restores the active reference, it doesn't and can't actually * restore the active protection - @kn may already or be in the process of - * being removed. Once kernfs_break_active_protection() is invoked, that - * protection is irreversibly gone for the kernfs operation instance. + * being drained and removed. Once kernfs_break_active_protection() is + * invoked, that protection is irreversibly gone for the kernfs operation + * instance. * * While this function may be called at any point after * kernfs_break_active_protection() is invoked, its most useful location diff --git a/fs/kernfs/file.c b/fs/kernfs/file.c index 8502ef68459b9..2d9d5dfa19b87 100644 --- a/fs/kernfs/file.c +++ b/fs/kernfs/file.c @@ -778,8 +778,9 @@ bool kernfs_should_drain_open_files(struct kernfs_node *kn) /* * @kn being deactivated guarantees that @kn->attr.open can't change * beneath us making the lockless test below safe. + * Callers post kernfs_unbreak_active_protection may be counted in + * kn->active by now, do not WARN_ON because of them. */ - WARN_ON_ONCE(atomic_read(&kn->active) != KN_DEACTIVATED_BIAS); rcu_read_lock(); on = rcu_dereference(kn->attr.open); @@ -927,7 +928,7 @@ static void kernfs_notify_workfn(struct work_struct *work) if (!inode) continue; - name = (struct qstr)QSTR_INIT(kn->name, strlen(kn->name)); + name = QSTR(kn->name); parent = kernfs_get_parent(kn); if (parent) { p_inode = ilookup(info->sb, kernfs_ino(parent)); diff --git a/fs/namespace.c b/fs/namespace.c index c1ac585e41e36..b5c5cf01d0c40 100644 --- a/fs/namespace.c +++ b/fs/namespace.c @@ -2521,14 +2521,14 @@ static int attach_recursive_mnt(struct mount *source_mnt, hlist_for_each_entry_safe(child, n, &tree_list, mnt_hash) { struct mount *q; hlist_del_init(&child->mnt_hash); - q = __lookup_mnt(&child->mnt_parent->mnt, - child->mnt_mountpoint); - if (q) - mnt_change_mountpoint(child, smp, q); /* Notice when we are propagating across user namespaces */ if (child->mnt_parent->mnt_ns->user_ns != user_ns) lock_mnt_tree(child); child->mnt.mnt_flags &= ~MNT_LOCKED; + q = __lookup_mnt(&child->mnt_parent->mnt, + child->mnt_mountpoint); + if (q) + mnt_change_mountpoint(child, smp, q); commit_tree(child); } put_mountpoint(smp); @@ -2714,6 +2714,10 @@ static int do_change_type(struct path *path, int ms_flags) return -EINVAL; namespace_lock(); + if (!check_mnt(mnt)) { + err = -EINVAL; + goto out_unlock; + } if (type == MS_SHARED) { err = invent_group_ids(mnt, recurse); if (err) @@ -3151,7 +3155,7 @@ static int do_set_group(struct path *from_path, struct path *to_path) if (IS_MNT_SLAVE(from)) { struct mount *m = from->mnt_master; - list_add(&to->mnt_slave, &m->mnt_slave_list); + list_add(&to->mnt_slave, &from->mnt_slave); to->mnt_master = m; } @@ -3176,18 +3180,25 @@ static int do_set_group(struct path *from_path, struct path *to_path) * Check if path is overmounted, i.e., if there's a mount on top of * @path->mnt with @path->dentry as mountpoint. * - * Context: This function expects namespace_lock() to be held. + * Context: namespace_sem must be held at least shared. + * MUST NOT be called under lock_mount_hash() (there one should just + * call __lookup_mnt() and check if it returns NULL). * Return: If path is overmounted true is returned, false if not. */ static inline bool path_overmounted(const struct path *path) { + unsigned seq = read_seqbegin(&mount_lock); + bool no_child; + rcu_read_lock(); - if (unlikely(__lookup_mnt(path->mnt, path->dentry))) { - rcu_read_unlock(); - return true; - } + no_child = !__lookup_mnt(path->mnt, path->dentry); rcu_read_unlock(); - return false; + if (need_seqretry(&mount_lock, seq)) { + read_seqlock_excl(&mount_lock); + no_child = !__lookup_mnt(path->mnt, path->dentry); + read_sequnlock_excl(&mount_lock); + } + return unlikely(!no_child); } /** diff --git a/fs/netfs/buffered_write.c b/fs/netfs/buffered_write.c index b3910dfcb56d3..896d1d4219ed9 100644 --- a/fs/netfs/buffered_write.c +++ b/fs/netfs/buffered_write.c @@ -64,6 +64,7 @@ static void netfs_update_i_size(struct netfs_inode *ctx, struct inode *inode, return; } + spin_lock(&inode->i_lock); i_size_write(inode, pos); #if IS_ENABLED(CONFIG_FSCACHE) fscache_update_cookie(ctx->cache, NULL, &pos); @@ -77,6 +78,7 @@ static void netfs_update_i_size(struct netfs_inode *ctx, struct inode *inode, DIV_ROUND_UP(pos, SECTOR_SIZE), inode->i_blocks + add); } + spin_unlock(&inode->i_lock); } /** diff --git a/fs/netfs/direct_write.c b/fs/netfs/direct_write.c index 26cf9c94deebb..8fbfaf71c154c 100644 --- a/fs/netfs/direct_write.c +++ b/fs/netfs/direct_write.c @@ -14,13 +14,17 @@ static void netfs_cleanup_dio_write(struct netfs_io_request *wreq) struct inode *inode = wreq->inode; unsigned long long end = wreq->start + wreq->transferred; - if (!wreq->error && - i_size_read(inode) < end) { + if (wreq->error || end <= i_size_read(inode)) + return; + + spin_lock(&inode->i_lock); + if (end > i_size_read(inode)) { if (wreq->netfs_ops->update_i_size) wreq->netfs_ops->update_i_size(inode, end); else i_size_write(inode, end); } + spin_unlock(&inode->i_lock); } /* diff --git a/fs/netfs/write_collect.c b/fs/netfs/write_collect.c index 412d4da742270..a968688a73234 100644 --- a/fs/netfs/write_collect.c +++ b/fs/netfs/write_collect.c @@ -176,9 +176,10 @@ static void netfs_retry_write_stream(struct netfs_io_request *wreq, if (test_bit(NETFS_SREQ_FAILED, &subreq->flags)) break; if (__test_and_clear_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags)) { - struct iov_iter source = subreq->io_iter; + struct iov_iter source; - iov_iter_revert(&source, subreq->len - source.count); + netfs_reset_iter(subreq); + source = subreq->io_iter; __set_bit(NETFS_SREQ_RETRYING, &subreq->flags); netfs_get_subrequest(subreq, netfs_sreq_trace_get_resubmit); netfs_reissue_write(stream, subreq, &source); @@ -284,7 +285,7 @@ static void netfs_retry_write_stream(struct netfs_io_request *wreq, trace_netfs_sreq_ref(wreq->debug_id, subreq->debug_index, refcount_read(&subreq->ref), netfs_sreq_trace_new); - netfs_get_subrequest(subreq, netfs_sreq_trace_get_resubmit); + trace_netfs_sreq(subreq, netfs_sreq_trace_split); list_add(&subreq->rreq_link, &to->rreq_link); to = list_next_entry(to, rreq_link); diff --git a/fs/nfs/client.c b/fs/nfs/client.c index 03ecc77656151..4503758e9594b 100644 --- a/fs/nfs/client.c +++ b/fs/nfs/client.c @@ -1096,6 +1096,8 @@ struct nfs_server *nfs_create_server(struct fs_context *fc) if (server->namelen == 0 || server->namelen > NFS2_MAXNAMLEN) server->namelen = NFS2_MAXNAMLEN; } + /* Linux 'subtree_check' borkenness mandates this setting */ + server->fh_expire_type = NFS_FH_VOL_RENAME; if (!(fattr->valid & NFS_ATTR_FATTR)) { error = ctx->nfs_mod->rpc_ops->getattr(server, ctx->mntfh, diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c index 492cffd9d3d84..f9f4a92f63e92 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c @@ -2690,6 +2690,18 @@ nfs_unblock_rename(struct rpc_task *task, struct nfs_renamedata *data) unblock_revalidate(new_dentry); } +static bool nfs_rename_is_unsafe_cross_dir(struct dentry *old_dentry, + struct dentry *new_dentry) +{ + struct nfs_server *server = NFS_SB(old_dentry->d_sb); + + if (old_dentry->d_parent != new_dentry->d_parent) + return false; + if (server->fh_expire_type & NFS_FH_RENAME_UNSAFE) + return !(server->fh_expire_type & NFS_FH_NOEXPIRE_WITH_OPEN); + return true; +} + /* * RENAME * FIXME: Some nfsds, like the Linux user space nfsd, may generate a @@ -2777,7 +2789,8 @@ int nfs_rename(struct mnt_idmap *idmap, struct inode *old_dir, } - if (S_ISREG(old_inode->i_mode)) + if (S_ISREG(old_inode->i_mode) && + nfs_rename_is_unsafe_cross_dir(old_dentry, new_dentry)) nfs_sync_inode(old_inode); task = nfs_async_rename(old_dir, new_dir, old_dentry, new_dentry, must_unblock ? nfs_unblock_rename : NULL); diff --git a/fs/nfs/filelayout/filelayoutdev.c b/fs/nfs/filelayout/filelayoutdev.c index 4fa304fa5bc4b..29d9234d5c085 100644 --- a/fs/nfs/filelayout/filelayoutdev.c +++ b/fs/nfs/filelayout/filelayoutdev.c @@ -76,6 +76,7 @@ nfs4_fl_alloc_deviceid_node(struct nfs_server *server, struct pnfs_device *pdev, struct page *scratch; struct list_head dsaddrs; struct nfs4_pnfs_ds_addr *da; + struct net *net = server->nfs_client->cl_net; /* set up xdr stream */ scratch = alloc_page(gfp_flags); @@ -159,8 +160,7 @@ nfs4_fl_alloc_deviceid_node(struct nfs_server *server, struct pnfs_device *pdev, mp_count = be32_to_cpup(p); /* multipath count */ for (j = 0; j < mp_count; j++) { - da = nfs4_decode_mp_ds_addr(server->nfs_client->cl_net, - &stream, gfp_flags); + da = nfs4_decode_mp_ds_addr(net, &stream, gfp_flags); if (da) list_add_tail(&da->da_node, &dsaddrs); } @@ -170,7 +170,7 @@ nfs4_fl_alloc_deviceid_node(struct nfs_server *server, struct pnfs_device *pdev, goto out_err_free_deviceid; } - dsaddr->ds_list[i] = nfs4_pnfs_ds_add(&dsaddrs, gfp_flags); + dsaddr->ds_list[i] = nfs4_pnfs_ds_add(net, &dsaddrs, gfp_flags); if (!dsaddr->ds_list[i]) goto out_err_drain_dsaddrs; trace_fl_getdevinfo(server, &pdev->dev_id, dsaddr->ds_list[i]->ds_remotestr); diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c index 8f7ea4076653d..bf96f7a8900c1 100644 --- a/fs/nfs/flexfilelayout/flexfilelayout.c +++ b/fs/nfs/flexfilelayout/flexfilelayout.c @@ -1104,6 +1104,7 @@ static void ff_layout_reset_read(struct nfs_pgio_header *hdr) } static int ff_layout_async_handle_error_v4(struct rpc_task *task, + u32 op_status, struct nfs4_state *state, struct nfs_client *clp, struct pnfs_layout_segment *lseg, @@ -1114,32 +1115,42 @@ static int ff_layout_async_handle_error_v4(struct rpc_task *task, struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx); struct nfs4_slot_table *tbl = &clp->cl_session->fc_slot_table; - switch (task->tk_status) { - case -NFS4ERR_BADSESSION: - case -NFS4ERR_BADSLOT: - case -NFS4ERR_BAD_HIGH_SLOT: - case -NFS4ERR_DEADSESSION: - case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: - case -NFS4ERR_SEQ_FALSE_RETRY: - case -NFS4ERR_SEQ_MISORDERED: + switch (op_status) { + case NFS4_OK: + case NFS4ERR_NXIO: + break; + case NFSERR_PERM: + if (!task->tk_xprt) + break; + xprt_force_disconnect(task->tk_xprt); + goto out_retry; + case NFS4ERR_BADSESSION: + case NFS4ERR_BADSLOT: + case NFS4ERR_BAD_HIGH_SLOT: + case NFS4ERR_DEADSESSION: + case NFS4ERR_CONN_NOT_BOUND_TO_SESSION: + case NFS4ERR_SEQ_FALSE_RETRY: + case NFS4ERR_SEQ_MISORDERED: dprintk("%s ERROR %d, Reset session. Exchangeid " "flags 0x%x\n", __func__, task->tk_status, clp->cl_exchange_flags); nfs4_schedule_session_recovery(clp->cl_session, task->tk_status); - break; - case -NFS4ERR_DELAY: - case -NFS4ERR_GRACE: + goto out_retry; + case NFS4ERR_DELAY: + nfs_inc_stats(lseg->pls_layout->plh_inode, NFSIOS_DELAY); + fallthrough; + case NFS4ERR_GRACE: rpc_delay(task, FF_LAYOUT_POLL_RETRY_MAX); - break; - case -NFS4ERR_RETRY_UNCACHED_REP: - break; + goto out_retry; + case NFS4ERR_RETRY_UNCACHED_REP: + goto out_retry; /* Invalidate Layout errors */ - case -NFS4ERR_PNFS_NO_LAYOUT: - case -ESTALE: /* mapped NFS4ERR_STALE */ - case -EBADHANDLE: /* mapped NFS4ERR_BADHANDLE */ - case -EISDIR: /* mapped NFS4ERR_ISDIR */ - case -NFS4ERR_FHEXPIRED: - case -NFS4ERR_WRONG_TYPE: + case NFS4ERR_PNFS_NO_LAYOUT: + case NFS4ERR_STALE: + case NFS4ERR_BADHANDLE: + case NFS4ERR_ISDIR: + case NFS4ERR_FHEXPIRED: + case NFS4ERR_WRONG_TYPE: dprintk("%s Invalid layout error %d\n", __func__, task->tk_status); /* @@ -1152,6 +1163,11 @@ static int ff_layout_async_handle_error_v4(struct rpc_task *task, pnfs_destroy_layout(NFS_I(inode)); rpc_wake_up(&tbl->slot_tbl_waitq); goto reset; + default: + break; + } + + switch (task->tk_status) { /* RPC connection errors */ case -ECONNREFUSED: case -EHOSTDOWN: @@ -1167,26 +1183,56 @@ static int ff_layout_async_handle_error_v4(struct rpc_task *task, nfs4_delete_deviceid(devid->ld, devid->nfs_client, &devid->deviceid); rpc_wake_up(&tbl->slot_tbl_waitq); - fallthrough; + break; default: - if (ff_layout_avoid_mds_available_ds(lseg)) - return -NFS4ERR_RESET_TO_PNFS; -reset: - dprintk("%s Retry through MDS. Error %d\n", __func__, - task->tk_status); - return -NFS4ERR_RESET_TO_MDS; + break; } + + if (ff_layout_avoid_mds_available_ds(lseg)) + return -NFS4ERR_RESET_TO_PNFS; +reset: + dprintk("%s Retry through MDS. Error %d\n", __func__, + task->tk_status); + return -NFS4ERR_RESET_TO_MDS; + +out_retry: task->tk_status = 0; return -EAGAIN; } /* Retry all errors through either pNFS or MDS except for -EJUKEBOX */ static int ff_layout_async_handle_error_v3(struct rpc_task *task, + u32 op_status, + struct nfs_client *clp, struct pnfs_layout_segment *lseg, u32 idx) { struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx); + switch (op_status) { + case NFS_OK: + case NFSERR_NXIO: + break; + case NFSERR_PERM: + if (!task->tk_xprt) + break; + xprt_force_disconnect(task->tk_xprt); + goto out_retry; + case NFSERR_ACCES: + case NFSERR_BADHANDLE: + case NFSERR_FBIG: + case NFSERR_IO: + case NFSERR_NOSPC: + case NFSERR_ROFS: + case NFSERR_STALE: + goto out_reset_to_pnfs; + case NFSERR_JUKEBOX: + nfs_inc_stats(lseg->pls_layout->plh_inode, NFSIOS_DELAY); + goto out_retry; + default: + break; + } + switch (task->tk_status) { /* File access problems. Don't mark the device as unavailable */ case -EACCES: @@ -1205,6 +1251,7 @@ static int ff_layout_async_handle_error_v3(struct rpc_task *task, nfs4_delete_deviceid(devid->ld, devid->nfs_client, &devid->deviceid); } +out_reset_to_pnfs: /* FIXME: Need to prevent infinite looping here. */ return -NFS4ERR_RESET_TO_PNFS; out_retry: @@ -1215,6 +1262,7 @@ static int ff_layout_async_handle_error_v3(struct rpc_task *task, } static int ff_layout_async_handle_error(struct rpc_task *task, + u32 op_status, struct nfs4_state *state, struct nfs_client *clp, struct pnfs_layout_segment *lseg, @@ -1233,10 +1281,11 @@ static int ff_layout_async_handle_error(struct rpc_task *task, switch (vers) { case 3: - return ff_layout_async_handle_error_v3(task, lseg, idx); - case 4: - return ff_layout_async_handle_error_v4(task, state, clp, + return ff_layout_async_handle_error_v3(task, op_status, clp, lseg, idx); + case 4: + return ff_layout_async_handle_error_v4(task, op_status, state, + clp, lseg, idx); default: /* should never happen */ WARN_ON_ONCE(1); @@ -1289,6 +1338,7 @@ static void ff_layout_io_track_ds_error(struct pnfs_layout_segment *lseg, switch (status) { case NFS4ERR_DELAY: case NFS4ERR_GRACE: + case NFS4ERR_PERM: break; case NFS4ERR_NXIO: ff_layout_mark_ds_unreachable(lseg, idx); @@ -1321,7 +1371,8 @@ static int ff_layout_read_done_cb(struct rpc_task *task, trace_ff_layout_read_error(hdr); } - err = ff_layout_async_handle_error(task, hdr->args.context->state, + err = ff_layout_async_handle_error(task, hdr->res.op_status, + hdr->args.context->state, hdr->ds_clp, hdr->lseg, hdr->pgio_mirror_idx); @@ -1491,7 +1542,8 @@ static int ff_layout_write_done_cb(struct rpc_task *task, trace_ff_layout_write_error(hdr); } - err = ff_layout_async_handle_error(task, hdr->args.context->state, + err = ff_layout_async_handle_error(task, hdr->res.op_status, + hdr->args.context->state, hdr->ds_clp, hdr->lseg, hdr->pgio_mirror_idx); @@ -1537,8 +1589,9 @@ static int ff_layout_commit_done_cb(struct rpc_task *task, trace_ff_layout_commit_error(data); } - err = ff_layout_async_handle_error(task, NULL, data->ds_clp, - data->lseg, data->ds_commit_index); + err = ff_layout_async_handle_error(task, data->res.op_status, + NULL, data->ds_clp, data->lseg, + data->ds_commit_index); trace_nfs4_pnfs_commit_ds(data, err); switch (err) { diff --git a/fs/nfs/flexfilelayout/flexfilelayoutdev.c b/fs/nfs/flexfilelayout/flexfilelayoutdev.c index e58bedfb1dcc1..4a304cf17c4b0 100644 --- a/fs/nfs/flexfilelayout/flexfilelayoutdev.c +++ b/fs/nfs/flexfilelayout/flexfilelayoutdev.c @@ -49,6 +49,7 @@ nfs4_ff_alloc_deviceid_node(struct nfs_server *server, struct pnfs_device *pdev, struct nfs4_pnfs_ds_addr *da; struct nfs4_ff_layout_ds *new_ds = NULL; struct nfs4_ff_ds_version *ds_versions = NULL; + struct net *net = server->nfs_client->cl_net; u32 mp_count; u32 version_count; __be32 *p; @@ -80,8 +81,7 @@ nfs4_ff_alloc_deviceid_node(struct nfs_server *server, struct pnfs_device *pdev, for (i = 0; i < mp_count; i++) { /* multipath ds */ - da = nfs4_decode_mp_ds_addr(server->nfs_client->cl_net, - &stream, gfp_flags); + da = nfs4_decode_mp_ds_addr(net, &stream, gfp_flags); if (da) list_add_tail(&da->da_node, &dsaddrs); } @@ -149,7 +149,7 @@ nfs4_ff_alloc_deviceid_node(struct nfs_server *server, struct pnfs_device *pdev, new_ds->ds_versions = ds_versions; new_ds->ds_versions_cnt = version_count; - new_ds->ds = nfs4_pnfs_ds_add(&dsaddrs, gfp_flags); + new_ds->ds = nfs4_pnfs_ds_add(net, &dsaddrs, gfp_flags); if (!new_ds->ds) goto out_err_drain_dsaddrs; diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c index 330273cf94531..8827cb00f86d5 100644 --- a/fs/nfs/inode.c +++ b/fs/nfs/inode.c @@ -557,6 +557,8 @@ nfs_fhget(struct super_block *sb, struct nfs_fh *fh, struct nfs_fattr *fattr) set_nlink(inode, fattr->nlink); else if (fattr_supported & NFS_ATTR_FATTR_NLINK) nfs_set_cache_invalid(inode, NFS_INO_INVALID_NLINK); + else + set_nlink(inode, 1); if (fattr->valid & NFS_ATTR_FATTR_OWNER) inode->i_uid = fattr->uid; else if (fattr_supported & NFS_ATTR_FATTR_OWNER) @@ -633,6 +635,34 @@ nfs_fattr_fixup_delegated(struct inode *inode, struct nfs_fattr *fattr) } } +static void nfs_set_timestamps_to_ts(struct inode *inode, struct iattr *attr) +{ + unsigned int cache_flags = 0; + + if (attr->ia_valid & ATTR_MTIME_SET) { + struct timespec64 ctime = inode_get_ctime(inode); + struct timespec64 mtime = inode_get_mtime(inode); + struct timespec64 now; + int updated = 0; + + now = inode_set_ctime_current(inode); + if (!timespec64_equal(&now, &ctime)) + updated |= S_CTIME; + + inode_set_mtime_to_ts(inode, attr->ia_mtime); + if (!timespec64_equal(&now, &mtime)) + updated |= S_MTIME; + + inode_maybe_inc_iversion(inode, updated); + cache_flags |= NFS_INO_INVALID_CTIME | NFS_INO_INVALID_MTIME; + } + if (attr->ia_valid & ATTR_ATIME_SET) { + inode_set_atime_to_ts(inode, attr->ia_atime); + cache_flags |= NFS_INO_INVALID_ATIME; + } + NFS_I(inode)->cache_validity &= ~cache_flags; +} + static void nfs_update_timestamps(struct inode *inode, unsigned int ia_valid) { enum file_time_flags time_flags = 0; @@ -701,14 +731,27 @@ nfs_setattr(struct mnt_idmap *idmap, struct dentry *dentry, if (nfs_have_delegated_mtime(inode) && attr->ia_valid & ATTR_MTIME) { spin_lock(&inode->i_lock); - nfs_update_timestamps(inode, attr->ia_valid); + if (attr->ia_valid & ATTR_MTIME_SET) { + nfs_set_timestamps_to_ts(inode, attr); + attr->ia_valid &= ~(ATTR_MTIME|ATTR_MTIME_SET| + ATTR_ATIME|ATTR_ATIME_SET); + } else { + nfs_update_timestamps(inode, attr->ia_valid); + attr->ia_valid &= ~(ATTR_MTIME|ATTR_ATIME); + } spin_unlock(&inode->i_lock); - attr->ia_valid &= ~(ATTR_MTIME | ATTR_ATIME); } else if (nfs_have_delegated_atime(inode) && attr->ia_valid & ATTR_ATIME && !(attr->ia_valid & ATTR_MTIME)) { - nfs_update_delegated_atime(inode); - attr->ia_valid &= ~ATTR_ATIME; + if (attr->ia_valid & ATTR_ATIME_SET) { + spin_lock(&inode->i_lock); + nfs_set_timestamps_to_ts(inode, attr); + spin_unlock(&inode->i_lock); + attr->ia_valid &= ~(ATTR_ATIME|ATTR_ATIME_SET); + } else { + nfs_update_delegated_atime(inode); + attr->ia_valid &= ~ATTR_ATIME; + } } /* Optimization: if the end result is no change, don't RPC */ @@ -2543,15 +2586,26 @@ EXPORT_SYMBOL_GPL(nfs_net_id); static int nfs_net_init(struct net *net) { struct nfs_net *nn = net_generic(net, nfs_net_id); + int err; nfs_clients_init(net); if (!rpc_proc_register(net, &nn->rpcstats)) { - nfs_clients_exit(net); - return -ENOMEM; + err = -ENOMEM; + goto err_proc_rpc; } - return nfs_fs_proc_net_init(net); + err = nfs_fs_proc_net_init(net); + if (err) + goto err_proc_nfs; + + return 0; + +err_proc_nfs: + rpc_proc_unregister(net, "nfs"); +err_proc_rpc: + nfs_clients_exit(net); + return err; } static void nfs_net_exit(struct net *net) diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 11f2b5cb3b06b..77b239b10d418 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -313,14 +313,14 @@ static void nfs4_bitmap_copy_adjust(__u32 *dst, const __u32 *src, if (nfs_have_delegated_mtime(inode)) { if (!(cache_validity & NFS_INO_INVALID_ATIME)) - dst[1] &= ~FATTR4_WORD1_TIME_ACCESS; + dst[1] &= ~(FATTR4_WORD1_TIME_ACCESS|FATTR4_WORD1_TIME_ACCESS_SET); if (!(cache_validity & NFS_INO_INVALID_MTIME)) - dst[1] &= ~FATTR4_WORD1_TIME_MODIFY; + dst[1] &= ~(FATTR4_WORD1_TIME_MODIFY|FATTR4_WORD1_TIME_MODIFY_SET); if (!(cache_validity & NFS_INO_INVALID_CTIME)) - dst[1] &= ~FATTR4_WORD1_TIME_METADATA; + dst[1] &= ~(FATTR4_WORD1_TIME_METADATA|FATTR4_WORD1_TIME_MODIFY_SET); } else if (nfs_have_delegated_atime(inode)) { if (!(cache_validity & NFS_INO_INVALID_ATIME)) - dst[1] &= ~FATTR4_WORD1_TIME_ACCESS; + dst[1] &= ~(FATTR4_WORD1_TIME_ACCESS|FATTR4_WORD1_TIME_ACCESS_SET); } } @@ -3957,8 +3957,9 @@ static int _nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *f FATTR4_WORD0_CASE_INSENSITIVE | FATTR4_WORD0_CASE_PRESERVING; if (minorversion) - bitmask[2] = FATTR4_WORD2_SUPPATTR_EXCLCREAT | - FATTR4_WORD2_OPEN_ARGUMENTS; + bitmask[2] = FATTR4_WORD2_SUPPATTR_EXCLCREAT; + if (minorversion > 1) + bitmask[2] |= FATTR4_WORD2_OPEN_ARGUMENTS; status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); if (status == 0) { @@ -6173,6 +6174,8 @@ static ssize_t nfs4_proc_get_acl(struct inode *inode, void *buf, size_t buflen, struct nfs_server *server = NFS_SERVER(inode); int ret; + if (unlikely(NFS_FH(inode)->size == 0)) + return -ENODATA; if (!nfs4_server_supports_acls(server, type)) return -EOPNOTSUPP; ret = nfs_revalidate_inode(inode, NFS_INO_INVALID_CHANGE); @@ -6247,6 +6250,9 @@ static int nfs4_proc_set_acl(struct inode *inode, const void *buf, { struct nfs4_exception exception = { }; int err; + + if (unlikely(NFS_FH(inode)->size == 0)) + return -ENODATA; do { err = __nfs4_proc_set_acl(inode, buf, buflen, type); trace_nfs4_set_acl(inode, err); @@ -10813,7 +10819,7 @@ const struct nfs4_minor_version_ops *nfs_v4_minor_ops[] = { static ssize_t nfs4_listxattr(struct dentry *dentry, char *list, size_t size) { - ssize_t error, error2, error3; + ssize_t error, error2, error3, error4; size_t left = size; error = generic_listxattr(dentry, list, left); @@ -10836,8 +10842,16 @@ static ssize_t nfs4_listxattr(struct dentry *dentry, char *list, size_t size) error3 = nfs4_listxattr_nfs4_user(d_inode(dentry), list, left); if (error3 < 0) return error3; + if (list) { + list += error3; + left -= error3; + } + + error4 = security_inode_listsecurity(d_inode(dentry), list, left); + if (error4 < 0) + return error4; - error += error2 + error3; + error += error2 + error3 + error4; if (size && error > size) return -ERANGE; return error; diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c index 683e09be25adf..6b888e9ff394a 100644 --- a/fs/nfs/pnfs.c +++ b/fs/nfs/pnfs.c @@ -2051,8 +2051,10 @@ static void nfs_layoutget_begin(struct pnfs_layout_hdr *lo) static void nfs_layoutget_end(struct pnfs_layout_hdr *lo) { if (atomic_dec_and_test(&lo->plh_outstanding) && - test_and_clear_bit(NFS_LAYOUT_DRAIN, &lo->plh_flags)) + test_and_clear_bit(NFS_LAYOUT_DRAIN, &lo->plh_flags)) { + smp_mb__after_atomic(); wake_up_bit(&lo->plh_flags, NFS_LAYOUT_DRAIN); + } } static bool pnfs_is_first_layoutget(struct pnfs_layout_hdr *lo) diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h index 30d2613e912b8..91ff877185c8a 100644 --- a/fs/nfs/pnfs.h +++ b/fs/nfs/pnfs.h @@ -60,6 +60,7 @@ struct nfs4_pnfs_ds { struct list_head ds_node; /* nfs4_pnfs_dev_hlist dev_dslist */ char *ds_remotestr; /* comma sep list of addrs */ struct list_head ds_addrs; + const struct net *ds_net; struct nfs_client *ds_clp; refcount_t ds_count; unsigned long ds_state; @@ -415,7 +416,8 @@ int pnfs_generic_commit_pagelist(struct inode *inode, int pnfs_generic_scan_commit_lists(struct nfs_commit_info *cinfo, int max); void pnfs_generic_write_commit_done(struct rpc_task *task, void *data); void nfs4_pnfs_ds_put(struct nfs4_pnfs_ds *ds); -struct nfs4_pnfs_ds *nfs4_pnfs_ds_add(struct list_head *dsaddrs, +struct nfs4_pnfs_ds *nfs4_pnfs_ds_add(const struct net *net, + struct list_head *dsaddrs, gfp_t gfp_flags); void nfs4_pnfs_v3_ds_connect_unload(void); int nfs4_pnfs_ds_connect(struct nfs_server *mds_srv, struct nfs4_pnfs_ds *ds, diff --git a/fs/nfs/pnfs_nfs.c b/fs/nfs/pnfs_nfs.c index dbef837e871ad..2ee20a0f0b36d 100644 --- a/fs/nfs/pnfs_nfs.c +++ b/fs/nfs/pnfs_nfs.c @@ -604,12 +604,12 @@ _same_data_server_addrs_locked(const struct list_head *dsaddrs1, * Lookup DS by addresses. nfs4_ds_cache_lock is held */ static struct nfs4_pnfs_ds * -_data_server_lookup_locked(const struct list_head *dsaddrs) +_data_server_lookup_locked(const struct net *net, const struct list_head *dsaddrs) { struct nfs4_pnfs_ds *ds; list_for_each_entry(ds, &nfs4_data_server_cache, ds_node) - if (_same_data_server_addrs_locked(&ds->ds_addrs, dsaddrs)) + if (ds->ds_net == net && _same_data_server_addrs_locked(&ds->ds_addrs, dsaddrs)) return ds; return NULL; } @@ -716,7 +716,7 @@ nfs4_pnfs_remotestr(struct list_head *dsaddrs, gfp_t gfp_flags) * uncached and return cached struct nfs4_pnfs_ds. */ struct nfs4_pnfs_ds * -nfs4_pnfs_ds_add(struct list_head *dsaddrs, gfp_t gfp_flags) +nfs4_pnfs_ds_add(const struct net *net, struct list_head *dsaddrs, gfp_t gfp_flags) { struct nfs4_pnfs_ds *tmp_ds, *ds = NULL; char *remotestr; @@ -734,13 +734,14 @@ nfs4_pnfs_ds_add(struct list_head *dsaddrs, gfp_t gfp_flags) remotestr = nfs4_pnfs_remotestr(dsaddrs, gfp_flags); spin_lock(&nfs4_ds_cache_lock); - tmp_ds = _data_server_lookup_locked(dsaddrs); + tmp_ds = _data_server_lookup_locked(net, dsaddrs); if (tmp_ds == NULL) { INIT_LIST_HEAD(&ds->ds_addrs); list_splice_init(dsaddrs, &ds->ds_addrs); ds->ds_remotestr = remotestr; refcount_set(&ds->ds_count, 1); INIT_LIST_HEAD(&ds->ds_node); + ds->ds_net = net; ds->ds_clp = NULL; list_add(&ds->ds_node, &nfs4_data_server_cache); dprintk("%s add new data server %s\n", __func__, diff --git a/fs/nfs/read.c b/fs/nfs/read.c index 81bd1b9aba176..3c1fa320b3f1b 100644 --- a/fs/nfs/read.c +++ b/fs/nfs/read.c @@ -56,7 +56,8 @@ static int nfs_return_empty_folio(struct folio *folio) { folio_zero_segment(folio, 0, folio_size(folio)); folio_mark_uptodate(folio); - folio_unlock(folio); + if (nfs_netfs_folio_unlock(folio)) + folio_unlock(folio); return 0; } diff --git a/fs/nfs/super.c b/fs/nfs/super.c index ae5c5e39afa03..da5286514d8c7 100644 --- a/fs/nfs/super.c +++ b/fs/nfs/super.c @@ -1046,6 +1046,16 @@ int nfs_reconfigure(struct fs_context *fc) sync_filesystem(sb); + /* + * The SB_RDONLY flag has been removed from the superblock during + * mounts to prevent interference between different filesystems. + * Similarly, it is also necessary to ignore the SB_RDONLY flag + * during reconfiguration; otherwise, it may also result in the + * creation of redundant superblocks when mounting a directory with + * different rw and ro flags multiple times. + */ + fc->sb_flags_mask &= ~SB_RDONLY; + /* * Userspace mount programs that send binary options generally send * them populated with default values. We have no way to know which @@ -1303,8 +1313,17 @@ int nfs_get_tree_common(struct fs_context *fc) if (IS_ERR(server)) return PTR_ERR(server); + /* + * When NFS_MOUNT_UNSHARED is not set, NFS forces the sharing of a + * superblock among each filesystem that mounts sub-directories + * belonging to a single exported root path. + * To prevent interference between different filesystems, the + * SB_RDONLY flag should be removed from the superblock. + */ if (server->flags & NFS_MOUNT_UNSHARED) compare_super = NULL; + else + fc->sb_flags &= ~SB_RDONLY; /* -o noac implies -o sync */ if (server->flags & NFS_MOUNT_NOAC) diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c index 7a1fdafa42ea1..02c9f3b312a0e 100644 --- a/fs/nfsd/nfs4proc.c +++ b/fs/nfsd/nfs4proc.c @@ -3658,7 +3658,8 @@ bool nfsd4_spo_must_allow(struct svc_rqst *rqstp) struct nfs4_op_map *allow = &cstate->clp->cl_spo_must_allow; u32 opiter; - if (!cstate->minorversion) + if (rqstp->rq_procinfo != &nfsd_version4.vs_proc[NFSPROC4_COMPOUND] || + cstate->minorversion == 0) return false; if (cstate->spo_must_allowed) diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c index 2fc1919dd3c09..6edeb3bdf81b5 100644 --- a/fs/nfsd/nfs4xdr.c +++ b/fs/nfsd/nfs4xdr.c @@ -3382,6 +3382,23 @@ static __be32 nfsd4_encode_fattr4_suppattr_exclcreat(struct xdr_stream *xdr, return nfsd4_encode_bitmap4(xdr, supp[0], supp[1], supp[2]); } +/* + * Copied from generic_remap_checks/generic_remap_file_range_prep. + * + * These generic functions use the file system's s_blocksize, but + * individual file systems aren't required to use + * generic_remap_file_range_prep. Until there is a mechanism for + * determining a particular file system's (or file's) clone block + * size, this is the best NFSD can do. + */ +static __be32 nfsd4_encode_fattr4_clone_blksize(struct xdr_stream *xdr, + const struct nfsd4_fattr_args *args) +{ + struct inode *inode = d_inode(args->dentry); + + return nfsd4_encode_uint32_t(xdr, inode->i_sb->s_blocksize); +} + #ifdef CONFIG_NFSD_V4_SECURITY_LABEL static __be32 nfsd4_encode_fattr4_sec_label(struct xdr_stream *xdr, const struct nfsd4_fattr_args *args) @@ -3487,7 +3504,7 @@ static const nfsd4_enc_attr nfsd4_enc_fattr4_encode_ops[] = { [FATTR4_MODE_SET_MASKED] = nfsd4_encode_fattr4__noop, [FATTR4_SUPPATTR_EXCLCREAT] = nfsd4_encode_fattr4_suppattr_exclcreat, [FATTR4_FS_CHARSET_CAP] = nfsd4_encode_fattr4__noop, - [FATTR4_CLONE_BLKSIZE] = nfsd4_encode_fattr4__noop, + [FATTR4_CLONE_BLKSIZE] = nfsd4_encode_fattr4_clone_blksize, [FATTR4_SPACE_FREED] = nfsd4_encode_fattr4__noop, [FATTR4_CHANGE_ATTR_TYPE] = nfsd4_encode_fattr4__noop, diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c index 2e835e7c107ee..dcaa31706394c 100644 --- a/fs/nfsd/nfsctl.c +++ b/fs/nfsd/nfsctl.c @@ -1653,7 +1653,7 @@ int nfsd_nl_rpc_status_get_dumpit(struct sk_buff *skb, */ int nfsd_nl_threads_set_doit(struct sk_buff *skb, struct genl_info *info) { - int *nthreads, count = 0, nrpools, i, ret = -EOPNOTSUPP, rem; + int *nthreads, nrpools = 0, i, ret = -EOPNOTSUPP, rem; struct net *net = genl_info_net(info); struct nfsd_net *nn = net_generic(net, nfsd_net_id); const struct nlattr *attr; @@ -1665,12 +1665,11 @@ int nfsd_nl_threads_set_doit(struct sk_buff *skb, struct genl_info *info) /* count number of SERVER_THREADS values */ nlmsg_for_each_attr(attr, info->nlhdr, GENL_HDRLEN, rem) { if (nla_type(attr) == NFSD_A_SERVER_THREADS) - count++; + nrpools++; } mutex_lock(&nfsd_mutex); - nrpools = max(count, nfsd_nrpools(net)); nthreads = kcalloc(nrpools, sizeof(int), GFP_KERNEL); if (!nthreads) { ret = -ENOMEM; @@ -2331,12 +2330,9 @@ static int __init init_nfsd(void) if (retval) goto out_free_pnfs; nfsd_lockd_init(); /* lockd->nfsd callbacks */ - retval = create_proc_exports_entry(); - if (retval) - goto out_free_lockd; retval = register_pernet_subsys(&nfsd_net_ops); if (retval < 0) - goto out_free_exports; + goto out_free_lockd; retval = register_cld_notifier(); if (retval) goto out_free_subsys; @@ -2345,22 +2341,26 @@ static int __init init_nfsd(void) goto out_free_cld; retval = register_filesystem(&nfsd_fs_type); if (retval) - goto out_free_all; + goto out_free_nfsd4; retval = genl_register_family(&nfsd_nl_family); + if (retval) + goto out_free_filesystem; + retval = create_proc_exports_entry(); if (retval) goto out_free_all; nfsd_localio_ops_init(); return 0; out_free_all: + genl_unregister_family(&nfsd_nl_family); +out_free_filesystem: + unregister_filesystem(&nfsd_fs_type); +out_free_nfsd4: nfsd4_destroy_laundry_wq(); out_free_cld: unregister_cld_notifier(); out_free_subsys: unregister_pernet_subsys(&nfsd_net_ops); -out_free_exports: - remove_proc_entry("fs/nfs/exports", NULL); - remove_proc_entry("fs/nfs", NULL); out_free_lockd: nfsd_lockd_shutdown(); nfsd_drc_slab_free(); @@ -2373,14 +2373,14 @@ static int __init init_nfsd(void) static void __exit exit_nfsd(void) { + remove_proc_entry("fs/nfs/exports", NULL); + remove_proc_entry("fs/nfs", NULL); genl_unregister_family(&nfsd_nl_family); unregister_filesystem(&nfsd_fs_type); nfsd4_destroy_laundry_wq(); unregister_cld_notifier(); unregister_pernet_subsys(&nfsd_net_ops); nfsd_drc_slab_free(); - remove_proc_entry("fs/nfs/exports", NULL); - remove_proc_entry("fs/nfs", NULL); nfsd_lockd_shutdown(); nfsd4_free_slabs(); nfsd4_exit_pnfs(); diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c index 49e2f32102ab5..45f1bb2c6f136 100644 --- a/fs/nfsd/nfssvc.c +++ b/fs/nfsd/nfssvc.c @@ -406,13 +406,13 @@ static int nfsd_startup_net(struct net *net, const struct cred *cred) if (ret) goto out_filecache; +#ifdef CONFIG_NFSD_V4_2_INTER_SSC + nfsd4_ssc_init_umount_work(nn); +#endif ret = nfs4_state_start_net(net); if (ret) goto out_reply_cache; -#ifdef CONFIG_NFSD_V4_2_INTER_SSC - nfsd4_ssc_init_umount_work(nn); -#endif nn->nfsd_net_up = true; return 0; diff --git a/fs/nilfs2/btree.c b/fs/nilfs2/btree.c index ef5061bb56da1..9c51a4ac2627f 100644 --- a/fs/nilfs2/btree.c +++ b/fs/nilfs2/btree.c @@ -2103,11 +2103,13 @@ static int nilfs_btree_propagate(struct nilfs_bmap *btree, ret = nilfs_btree_do_lookup(btree, path, key, NULL, level + 1, 0); if (ret < 0) { - if (unlikely(ret == -ENOENT)) + if (unlikely(ret == -ENOENT)) { nilfs_crit(btree->b_inode->i_sb, "writing node/leaf block does not appear in b-tree (ino=%lu) at key=%llu, level=%d", btree->b_inode->i_ino, (unsigned long long)key, level); + ret = -EINVAL; + } goto out; } diff --git a/fs/nilfs2/direct.c b/fs/nilfs2/direct.c index 893ab36824cc2..2d8dc6b35b547 100644 --- a/fs/nilfs2/direct.c +++ b/fs/nilfs2/direct.c @@ -273,6 +273,9 @@ static int nilfs_direct_propagate(struct nilfs_bmap *bmap, dat = nilfs_bmap_get_dat(bmap); key = nilfs_bmap_data_get_key(bmap, bh); ptr = nilfs_direct_get_ptr(bmap, key); + if (ptr == NILFS_BMAP_INVALID_PTR) + return -EINVAL; + if (!buffer_nilfs_volatile(bh)) { oldreq.pr_entry_nr = ptr; newreq.pr_entry_nr = ptr; diff --git a/fs/ntfs3/index.c b/fs/ntfs3/index.c index 78d20e4baa2c9..1bf2a6593dec6 100644 --- a/fs/ntfs3/index.c +++ b/fs/ntfs3/index.c @@ -2182,6 +2182,10 @@ static int indx_get_entry_to_replace(struct ntfs_index *indx, e = hdr_first_de(&n->index->ihdr); fnd_push(fnd, n, e); + if (!e) { + err = -EINVAL; + goto out; + } if (!de_is_last(e)) { /* @@ -2203,6 +2207,10 @@ static int indx_get_entry_to_replace(struct ntfs_index *indx, n = fnd->nodes[level]; te = hdr_first_de(&n->index->ihdr); + if (!te) { + err = -EINVAL; + goto out; + } /* Copy the candidate entry into the replacement entry buffer. */ re = kmalloc(le16_to_cpu(te->size) + sizeof(u64), GFP_NOFS); if (!re) { diff --git a/fs/ntfs3/inode.c b/fs/ntfs3/inode.c index a1e11228dafd0..5c05cccd2d40b 100644 --- a/fs/ntfs3/inode.c +++ b/fs/ntfs3/inode.c @@ -805,6 +805,10 @@ static ssize_t ntfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter) ret = 0; goto out; } + if (is_compressed(ni)) { + ret = 0; + goto out; + } ret = blockdev_direct_IO(iocb, inode, iter, wr ? ntfs_get_block_direct_IO_W : @@ -2108,5 +2112,6 @@ const struct address_space_operations ntfs_aops_cmpr = { .read_folio = ntfs_read_folio, .readahead = ntfs_readahead, .dirty_folio = block_dirty_folio, + .direct_IO = ntfs_direct_IO, }; // clang-format on diff --git a/fs/ocfs2/quota_local.c b/fs/ocfs2/quota_local.c index e272429da3db3..de7f12858729a 100644 --- a/fs/ocfs2/quota_local.c +++ b/fs/ocfs2/quota_local.c @@ -674,7 +674,7 @@ int ocfs2_finish_quota_recovery(struct ocfs2_super *osb, break; } out: - kfree(rec); + ocfs2_free_quota_recovery(rec); return status; } diff --git a/fs/overlayfs/file.c b/fs/overlayfs/file.c index 4444c78e2e0c3..94095058da34e 100644 --- a/fs/overlayfs/file.c +++ b/fs/overlayfs/file.c @@ -48,8 +48,8 @@ static struct file *ovl_open_realfile(const struct file *file, if (!inode_owner_or_capable(real_idmap, realinode)) flags &= ~O_NOATIME; - realfile = backing_file_open(&file->f_path, flags, realpath, - current_cred()); + realfile = backing_file_open(file_user_path((struct file *) file), + flags, realpath, current_cred()); } revert_creds(old_cred); diff --git a/fs/overlayfs/util.c b/fs/overlayfs/util.c index 8f080046c59d9..99571de665dde 100644 --- a/fs/overlayfs/util.c +++ b/fs/overlayfs/util.c @@ -300,7 +300,9 @@ enum ovl_path_type ovl_path_realdata(struct dentry *dentry, struct path *path) struct dentry *ovl_dentry_upper(struct dentry *dentry) { - return ovl_upperdentry_dereference(OVL_I(d_inode(dentry))); + struct inode *inode = d_inode(dentry); + + return inode ? ovl_upperdentry_dereference(OVL_I(inode)) : NULL; } struct dentry *ovl_dentry_lower(struct dentry *dentry) diff --git a/fs/proc/inode.c b/fs/proc/inode.c index a3eb3b740f766..3604b616311c2 100644 --- a/fs/proc/inode.c +++ b/fs/proc/inode.c @@ -42,7 +42,7 @@ static void proc_evict_inode(struct inode *inode) head = ei->sysctl; if (head) { - RCU_INIT_POINTER(ei->sysctl, NULL); + WRITE_ONCE(ei->sysctl, NULL); proc_sys_evict_inode(inode, head); } } diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c index d11ebc055ce0d..e785db5fa499e 100644 --- a/fs/proc/proc_sysctl.c +++ b/fs/proc/proc_sysctl.c @@ -911,17 +911,21 @@ static int proc_sys_compare(const struct dentry *dentry, struct ctl_table_header *head; struct inode *inode; - /* Although proc doesn't have negative dentries, rcu-walk means - * that inode here can be NULL */ - /* AV: can it, indeed? */ - inode = d_inode_rcu(dentry); - if (!inode) - return 1; if (name->len != len) return 1; if (memcmp(name->name, str, len)) return 1; - head = rcu_dereference(PROC_I(inode)->sysctl); + + // false positive is fine here - we'll recheck anyway + if (d_in_lookup(dentry)) + return 0; + + inode = d_inode_rcu(dentry); + // we just might have run into dentry in the middle of __dentry_kill() + if (!inode) + return 1; + + head = READ_ONCE(PROC_I(inode)->sysctl); return !head || !sysctl_is_seen(head); } diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index 536b7dc453818..72a58681f0316 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c @@ -36,9 +36,9 @@ void task_mem(struct seq_file *m, struct mm_struct *mm) unsigned long text, lib, swap, anon, file, shmem; unsigned long hiwater_vm, total_vm, hiwater_rss, total_rss; - anon = get_mm_counter(mm, MM_ANONPAGES); - file = get_mm_counter(mm, MM_FILEPAGES); - shmem = get_mm_counter(mm, MM_SHMEMPAGES); + anon = get_mm_counter_sum(mm, MM_ANONPAGES); + file = get_mm_counter_sum(mm, MM_FILEPAGES); + shmem = get_mm_counter_sum(mm, MM_SHMEMPAGES); /* * Note: to minimize their overhead, mm maintains hiwater_vm and @@ -59,7 +59,7 @@ void task_mem(struct seq_file *m, struct mm_struct *mm) text = min(text, mm->exec_vm << PAGE_SHIFT); lib = (mm->exec_vm << PAGE_SHIFT) - text; - swap = get_mm_counter(mm, MM_SWAPENTS); + swap = get_mm_counter_sum(mm, MM_SWAPENTS); SEQ_PUT_DEC("VmPeak:\t", hiwater_vm); SEQ_PUT_DEC(" kB\nVmSize:\t", total_vm); SEQ_PUT_DEC(" kB\nVmLck:\t", mm->locked_vm); @@ -92,12 +92,12 @@ unsigned long task_statm(struct mm_struct *mm, unsigned long *shared, unsigned long *text, unsigned long *data, unsigned long *resident) { - *shared = get_mm_counter(mm, MM_FILEPAGES) + - get_mm_counter(mm, MM_SHMEMPAGES); + *shared = get_mm_counter_sum(mm, MM_FILEPAGES) + + get_mm_counter_sum(mm, MM_SHMEMPAGES); *text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK)) >> PAGE_SHIFT; *data = mm->data_vm + mm->stack_vm; - *resident = *shared + get_mm_counter(mm, MM_ANONPAGES); + *resident = *shared + get_mm_counter_sum(mm, MM_ANONPAGES); return mm->total_vm; } @@ -2155,7 +2155,7 @@ static unsigned long pagemap_thp_category(struct pagemap_scan_private *p, categories |= PAGE_IS_FILE; } - if (is_zero_pfn(pmd_pfn(pmd))) + if (is_huge_zero_pmd(pmd)) categories |= PAGE_IS_PFNZERO; if (pmd_soft_dirty(pmd)) categories |= PAGE_IS_SOFT_DIRTY; diff --git a/fs/smb/client/cached_dir.c b/fs/smb/client/cached_dir.c index 7497946672953..d64742ba371aa 100644 --- a/fs/smb/client/cached_dir.c +++ b/fs/smb/client/cached_dir.c @@ -484,8 +484,17 @@ void close_all_cached_dirs(struct cifs_sb_info *cifs_sb) spin_lock(&cfids->cfid_list_lock); list_for_each_entry(cfid, &cfids->entries, entry) { tmp_list = kmalloc(sizeof(*tmp_list), GFP_ATOMIC); - if (tmp_list == NULL) - break; + if (tmp_list == NULL) { + /* + * If the malloc() fails, we won't drop all + * dentries, and unmounting is likely to trigger + * a 'Dentry still in use' error. + */ + cifs_tcon_dbg(VFS, "Out of memory while dropping dentries\n"); + spin_unlock(&cfids->cfid_list_lock); + spin_unlock(&cifs_sb->tlink_tree_lock); + goto done; + } spin_lock(&cfid->fid_lock); tmp_list->dentry = cfid->dentry; cfid->dentry = NULL; @@ -497,6 +506,7 @@ void close_all_cached_dirs(struct cifs_sb_info *cifs_sb) } spin_unlock(&cifs_sb->tlink_tree_lock); +done: list_for_each_entry_safe(tmp_list, q, &entry, entry) { list_del(&tmp_list->entry); dput(tmp_list->dentry); diff --git a/fs/smb/client/cached_dir.h b/fs/smb/client/cached_dir.h index 1dfe79d947a62..bc8a812ff95f8 100644 --- a/fs/smb/client/cached_dir.h +++ b/fs/smb/client/cached_dir.h @@ -21,10 +21,10 @@ struct cached_dirent { struct cached_dirents { bool is_valid:1; bool is_failed:1; - struct dir_context *ctx; /* - * Only used to make sure we only take entries - * from a single context. Never dereferenced. - */ + struct file *file; /* + * Used to associate the cache with a single + * open file instance. + */ struct mutex de_mutex; int pos; /* Expected ctx->pos */ struct list_head entries; diff --git a/fs/smb/client/cifs_debug.c b/fs/smb/client/cifs_debug.c index e03c890de0a06..c0196be0e65fc 100644 --- a/fs/smb/client/cifs_debug.c +++ b/fs/smb/client/cifs_debug.c @@ -362,6 +362,10 @@ static int cifs_debug_data_proc_show(struct seq_file *m, void *v) c = 0; spin_lock(&cifs_tcp_ses_lock); list_for_each_entry(server, &cifs_tcp_ses_list, tcp_ses_list) { +#ifdef CONFIG_CIFS_SMB_DIRECT + struct smbdirect_socket_parameters *sp; +#endif + /* channel info will be printed as a part of sessions below */ if (SERVER_IS_CHAN(server)) continue; @@ -383,25 +387,26 @@ static int cifs_debug_data_proc_show(struct seq_file *m, void *v) seq_printf(m, "\nSMBDirect transport not available"); goto skip_rdma; } + sp = &server->smbd_conn->socket.parameters; seq_printf(m, "\nSMBDirect (in hex) protocol version: %x " "transport status: %x", server->smbd_conn->protocol, - server->smbd_conn->transport_status); + server->smbd_conn->socket.status); seq_printf(m, "\nConn receive_credit_max: %x " "send_credit_target: %x max_send_size: %x", - server->smbd_conn->receive_credit_max, - server->smbd_conn->send_credit_target, - server->smbd_conn->max_send_size); + sp->recv_credit_max, + sp->send_credit_target, + sp->max_send_size); seq_printf(m, "\nConn max_fragmented_recv_size: %x " "max_fragmented_send_size: %x max_receive_size:%x", - server->smbd_conn->max_fragmented_recv_size, - server->smbd_conn->max_fragmented_send_size, - server->smbd_conn->max_receive_size); + sp->max_fragmented_recv_size, + sp->max_fragmented_send_size, + sp->max_recv_size); seq_printf(m, "\nConn keep_alive_interval: %x " "max_readwrite_size: %x rdma_readwrite_threshold: %x", - server->smbd_conn->keep_alive_interval, - server->smbd_conn->max_readwrite_size, + sp->keepalive_interval_msec * 1000, + sp->max_read_write_size, server->smbd_conn->rdma_readwrite_threshold); seq_printf(m, "\nDebug count_get_receive_buffer: %x " "count_put_receive_buffer: %x count_send_empty: %x", diff --git a/fs/smb/client/cifsglob.h b/fs/smb/client/cifsglob.h index a38b40d68b14f..b74637ae9085a 100644 --- a/fs/smb/client/cifsglob.h +++ b/fs/smb/client/cifsglob.h @@ -677,6 +677,7 @@ inc_rfc1001_len(void *buf, int count) struct TCP_Server_Info { struct list_head tcp_ses_list; struct list_head smb_ses_list; + struct list_head rlist; /* reconnect list */ spinlock_t srv_lock; /* protect anything here that is not protected */ __u64 conn_id; /* connection identifier (useful for debugging) */ int srv_count; /* reference counter */ @@ -739,8 +740,10 @@ struct TCP_Server_Info { char workstation_RFC1001_name[RFC1001_NAME_LEN_WITH_NULL]; __u32 sequence_number; /* for signing, protected by srv_mutex */ __u32 reconnect_instance; /* incremented on each reconnect */ + __le32 session_key_id; /* retrieved from negotiate response and send in session setup request */ struct session_key session_key; unsigned long lstrp; /* when we got last response from this server */ + unsigned long neg_start; /* when negotiate started (jiffies) */ struct cifs_secmech secmech; /* crypto sec mech functs, descriptors */ #define CIFS_NEGFLAVOR_UNENCAP 1 /* wct == 17, but no ext_sec */ #define CIFS_NEGFLAVOR_EXTENDED 2 /* wct == 17, ext_sec bit set */ @@ -1058,6 +1061,7 @@ struct cifs_chan { }; #define CIFS_SES_FLAG_SCALE_CHANNELS (0x1) +#define CIFS_SES_FLAGS_PENDING_QUERY_INTERFACES (0x2) /* * Session structure. One of these for each uid session with a particular host @@ -1272,6 +1276,7 @@ struct cifs_tcon { bool use_persistent:1; /* use persistent instead of durable handles */ bool no_lease:1; /* Do not request leases on files or directories */ bool use_witness:1; /* use witness protocol */ + bool dummy:1; /* dummy tcon used for reconnecting channels */ __le32 capabilities; __u32 share_flags; __u32 maximal_access; diff --git a/fs/smb/client/cifspdu.h b/fs/smb/client/cifspdu.h index 28f8ca470770d..688a26aeef3b4 100644 --- a/fs/smb/client/cifspdu.h +++ b/fs/smb/client/cifspdu.h @@ -557,7 +557,7 @@ typedef union smb_com_session_setup_andx { __le16 MaxBufferSize; __le16 MaxMpxCount; __le16 VcNumber; - __u32 SessionKey; + __le32 SessionKey; __le16 SecurityBlobLength; __u32 Reserved; __le32 Capabilities; /* see below */ @@ -576,7 +576,7 @@ typedef union smb_com_session_setup_andx { __le16 MaxBufferSize; __le16 MaxMpxCount; __le16 VcNumber; - __u32 SessionKey; + __le32 SessionKey; __le16 CaseInsensitivePasswordLength; /* ASCII password len */ __le16 CaseSensitivePasswordLength; /* Unicode password length*/ __u32 Reserved; /* see below */ @@ -614,7 +614,7 @@ typedef union smb_com_session_setup_andx { __le16 MaxBufferSize; __le16 MaxMpxCount; __le16 VcNumber; - __u32 SessionKey; + __le32 SessionKey; __le16 PasswordLength; __u32 Reserved; /* encrypt key len and offset */ __le16 ByteCount; diff --git a/fs/smb/client/cifsproto.h b/fs/smb/client/cifsproto.h index 6e938b17875f5..fee7bc9848a36 100644 --- a/fs/smb/client/cifsproto.h +++ b/fs/smb/client/cifsproto.h @@ -136,6 +136,7 @@ extern int SendReceiveBlockingLock(const unsigned int xid, struct smb_hdr *out_buf, int *bytes_returned); +void smb2_query_server_interfaces(struct work_struct *work); void cifs_signal_cifsd_for_reconnect(struct TCP_Server_Info *server, bool all_channels); diff --git a/fs/smb/client/cifssmb.c b/fs/smb/client/cifssmb.c index 8667f403a0ab6..e3d9367eaec37 100644 --- a/fs/smb/client/cifssmb.c +++ b/fs/smb/client/cifssmb.c @@ -481,6 +481,7 @@ CIFSSMBNegotiate(const unsigned int xid, server->max_rw = le32_to_cpu(pSMBr->MaxRawSize); cifs_dbg(NOISY, "Max buf = %d\n", ses->server->maxBuf); server->capabilities = le32_to_cpu(pSMBr->Capabilities); + server->session_key_id = pSMBr->SessionKey; server->timeAdj = (int)(__s16)le16_to_cpu(pSMBr->ServerTimeZone); server->timeAdj *= 60; @@ -1309,6 +1310,7 @@ cifs_readv_callback(struct mid_q_entry *mid) break; case MID_REQUEST_SUBMITTED: case MID_RETRY_NEEDED: + __set_bit(NETFS_SREQ_NEED_RETRY, &rdata->subreq.flags); rdata->result = -EAGAIN; if (server->sign && rdata->got_bytes) /* reset bytes number since we can not check a sign */ @@ -1680,6 +1682,7 @@ cifs_writev_callback(struct mid_q_entry *mid) break; case MID_REQUEST_SUBMITTED: case MID_RETRY_NEEDED: + __set_bit(NETFS_SREQ_NEED_RETRY, &wdata->subreq.flags); result = -EAGAIN; break; default: @@ -2780,10 +2783,10 @@ int cifs_query_reparse_point(const unsigned int xid, io_req->TotalParameterCount = 0; io_req->TotalDataCount = 0; - io_req->MaxParameterCount = cpu_to_le32(2); + io_req->MaxParameterCount = cpu_to_le32(0); /* BB find exact data count max from sess structure BB */ io_req->MaxDataCount = cpu_to_le32(CIFSMaxBufSize & 0xFFFFFF00); - io_req->MaxSetupCount = 4; + io_req->MaxSetupCount = 1; io_req->Reserved = 0; io_req->ParameterOffset = 0; io_req->DataCount = 0; @@ -2810,6 +2813,22 @@ int cifs_query_reparse_point(const unsigned int xid, goto error; } + /* SetupCount must be 1, otherwise offset to ByteCount is incorrect. */ + if (io_rsp->SetupCount != 1) { + rc = -EIO; + goto error; + } + + /* + * ReturnedDataLen is output length of executed IOCTL. + * DataCount is output length transferred over network. + * Check that we have full FSCTL_GET_REPARSE_POINT buffer. + */ + if (data_count != le16_to_cpu(io_rsp->ReturnedDataLen)) { + rc = -EIO; + goto error; + } + end = 2 + get_bcc(&io_rsp->hdr) + (__u8 *)&io_rsp->ByteCount; start = (__u8 *)&io_rsp->hdr.Protocol + data_offset; if (start >= end) { diff --git a/fs/smb/client/connect.c b/fs/smb/client/connect.c index 8260d0e07a628..ebc380b18da73 100644 --- a/fs/smb/client/connect.c +++ b/fs/smb/client/connect.c @@ -113,7 +113,7 @@ static int reconn_set_ipaddr_from_hostname(struct TCP_Server_Info *server) return rc; } -static void smb2_query_server_interfaces(struct work_struct *work) +void smb2_query_server_interfaces(struct work_struct *work) { int rc; int xid; @@ -132,18 +132,22 @@ static void smb2_query_server_interfaces(struct work_struct *work) rc = server->ops->query_server_interfaces(xid, tcon, false); free_xid(xid); - if (rc) { - if (rc == -EOPNOTSUPP) - return; - + if (rc) cifs_dbg(FYI, "%s: failed to query server interfaces: %d\n", __func__, rc); - } queue_delayed_work(cifsiod_wq, &tcon->query_interfaces, (SMB_INTERFACE_POLL_INTERVAL * HZ)); } +#define set_need_reco(server) \ +do { \ + spin_lock(&server->srv_lock); \ + if (server->tcpStatus != CifsExiting) \ + server->tcpStatus = CifsNeedReconnect; \ + spin_unlock(&server->srv_lock); \ +} while (0) + /* * Update the tcpStatus for the server. * This is used to signal the cifsd thread to call cifs_reconnect @@ -157,39 +161,45 @@ void cifs_signal_cifsd_for_reconnect(struct TCP_Server_Info *server, bool all_channels) { - struct TCP_Server_Info *pserver; + struct TCP_Server_Info *nserver; struct cifs_ses *ses; + LIST_HEAD(reco); int i; - /* If server is a channel, select the primary channel */ - pserver = SERVER_IS_CHAN(server) ? server->primary_server : server; - /* if we need to signal just this channel */ if (!all_channels) { - spin_lock(&server->srv_lock); - if (server->tcpStatus != CifsExiting) - server->tcpStatus = CifsNeedReconnect; - spin_unlock(&server->srv_lock); + set_need_reco(server); return; } - spin_lock(&cifs_tcp_ses_lock); - list_for_each_entry(ses, &pserver->smb_ses_list, smb_ses_list) { - if (cifs_ses_exiting(ses)) - continue; - spin_lock(&ses->chan_lock); - for (i = 0; i < ses->chan_count; i++) { - if (!ses->chans[i].server) + if (SERVER_IS_CHAN(server)) + server = server->primary_server; + scoped_guard(spinlock, &cifs_tcp_ses_lock) { + set_need_reco(server); + list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) { + spin_lock(&ses->ses_lock); + if (ses->ses_status == SES_EXITING) { + spin_unlock(&ses->ses_lock); continue; - - spin_lock(&ses->chans[i].server->srv_lock); - if (ses->chans[i].server->tcpStatus != CifsExiting) - ses->chans[i].server->tcpStatus = CifsNeedReconnect; - spin_unlock(&ses->chans[i].server->srv_lock); + } + spin_lock(&ses->chan_lock); + for (i = 1; i < ses->chan_count; i++) { + nserver = ses->chans[i].server; + if (!nserver) + continue; + nserver->srv_count++; + list_add(&nserver->rlist, &reco); + } + spin_unlock(&ses->chan_lock); + spin_unlock(&ses->ses_lock); } - spin_unlock(&ses->chan_lock); } - spin_unlock(&cifs_tcp_ses_lock); + + list_for_each_entry_safe(server, nserver, &reco, rlist) { + list_del_init(&server->rlist); + set_need_reco(server); + cifs_put_tcp_session(server, 0); + } } /* @@ -393,6 +403,13 @@ static int __cifs_reconnect(struct TCP_Server_Info *server, if (!cifs_tcp_ses_needs_reconnect(server, 1)) return 0; + /* + * if smb session has been marked for reconnect, also reconnect all + * connections. This way, the other connections do not end up bad. + */ + if (mark_smb_session) + cifs_signal_cifsd_for_reconnect(server, mark_smb_session); + cifs_mark_tcp_ses_conns_for_reconnect(server, mark_smb_session); cifs_abort_connection(server); @@ -401,7 +418,8 @@ static int __cifs_reconnect(struct TCP_Server_Info *server, try_to_freeze(); cifs_server_lock(server); - if (!cifs_swn_set_server_dstaddr(server)) { + if (!cifs_swn_set_server_dstaddr(server) && + !SERVER_IS_CHAN(server)) { /* resolve the hostname again to make sure that IP address is up-to-date */ rc = reconn_set_ipaddr_from_hostname(server); cifs_dbg(FYI, "%s: reconn_set_ipaddr_from_hostname: rc=%d\n", __func__, rc); @@ -659,12 +677,12 @@ server_unresponsive(struct TCP_Server_Info *server) /* * If we're in the process of mounting a share or reconnecting a session * and the server abruptly shut down (e.g. socket wasn't closed, packet - * had been ACK'ed but no SMB response), don't wait longer than 20s to - * negotiate protocol. + * had been ACK'ed but no SMB response), don't wait longer than 20s from + * when negotiate actually started. */ spin_lock(&server->srv_lock); if (server->tcpStatus == CifsInNegotiate && - time_after(jiffies, server->lstrp + 20 * HZ)) { + time_after(jiffies, server->neg_start + 20 * HZ)) { spin_unlock(&server->srv_lock); cifs_reconnect(server, false); return true; @@ -2801,20 +2819,14 @@ cifs_get_tcon(struct cifs_ses *ses, struct smb3_fs_context *ctx) tcon->max_cached_dirs = ctx->max_cached_dirs; tcon->nodelete = ctx->nodelete; tcon->local_lease = ctx->local_lease; - INIT_LIST_HEAD(&tcon->pending_opens); tcon->status = TID_GOOD; - INIT_DELAYED_WORK(&tcon->query_interfaces, - smb2_query_server_interfaces); if (ses->server->dialect >= SMB30_PROT_ID && (ses->server->capabilities & SMB2_GLOBAL_CAP_MULTI_CHANNEL)) { /* schedule query interfaces poll */ queue_delayed_work(cifsiod_wq, &tcon->query_interfaces, (SMB_INTERFACE_POLL_INTERVAL * HZ)); } -#ifdef CONFIG_CIFS_DFS_UPCALL - INIT_DELAYED_WORK(&tcon->dfs_cache_work, dfs_cache_refresh); -#endif spin_lock(&cifs_tcp_ses_lock); list_add(&tcon->tcon_list, &ses->tcon_list); spin_unlock(&cifs_tcp_ses_lock); @@ -3989,7 +4001,9 @@ cifs_negotiate_protocol(const unsigned int xid, struct cifs_ses *ses, return 0; } + server->lstrp = jiffies; server->tcpStatus = CifsInNegotiate; + server->neg_start = jiffies; spin_unlock(&server->srv_lock); rc = server->ops->negotiate(xid, ses, server); diff --git a/fs/smb/client/misc.c b/fs/smb/client/misc.c index 4373dd64b66d4..57b6b191293ee 100644 --- a/fs/smb/client/misc.c +++ b/fs/smb/client/misc.c @@ -148,6 +148,12 @@ tcon_info_alloc(bool dir_leases_enabled, enum smb3_tcon_ref_trace trace) #ifdef CONFIG_CIFS_DFS_UPCALL INIT_LIST_HEAD(&ret_buf->dfs_ses_list); #endif + INIT_LIST_HEAD(&ret_buf->pending_opens); + INIT_DELAYED_WORK(&ret_buf->query_interfaces, + smb2_query_server_interfaces); +#ifdef CONFIG_CIFS_DFS_UPCALL + INIT_DELAYED_WORK(&ret_buf->dfs_cache_work, dfs_cache_refresh); +#endif return ret_buf; } @@ -323,6 +329,14 @@ check_smb_hdr(struct smb_hdr *smb) if (smb->Command == SMB_COM_LOCKING_ANDX) return 0; + /* + * Windows NT server returns error resposne (e.g. STATUS_DELETE_PENDING + * or STATUS_OBJECT_NAME_NOT_FOUND or ERRDOS/ERRbadfile or any other) + * for some TRANS2 requests without the RESPONSE flag set in header. + */ + if (smb->Command == SMB_COM_TRANSACTION2 && smb->Status.CifsError != 0) + return 0; + cifs_dbg(VFS, "Server sent request, not response. mid=%u\n", get_mid(smb)); return 1; diff --git a/fs/smb/client/namespace.c b/fs/smb/client/namespace.c index e3f9213131c46..a6655807c0865 100644 --- a/fs/smb/client/namespace.c +++ b/fs/smb/client/namespace.c @@ -146,6 +146,9 @@ static char *automount_fullpath(struct dentry *dentry, void *page) } spin_unlock(&tcon->tc_lock); + if (unlikely(!page)) + return ERR_PTR(-ENOMEM); + s = dentry_path_raw(dentry, page, PATH_MAX); if (IS_ERR(s)) return s; diff --git a/fs/smb/client/readdir.c b/fs/smb/client/readdir.c index 787d6bcb5d1dc..7bf3214117a91 100644 --- a/fs/smb/client/readdir.c +++ b/fs/smb/client/readdir.c @@ -263,7 +263,7 @@ cifs_posix_to_fattr(struct cifs_fattr *fattr, struct smb2_posix_info *info, /* The Mode field in the response can now include the file type as well */ fattr->cf_mode = wire_mode_to_posix(le32_to_cpu(info->Mode), fattr->cf_cifsattrs & ATTR_DIRECTORY); - fattr->cf_dtype = S_DT(le32_to_cpu(info->Mode)); + fattr->cf_dtype = S_DT(fattr->cf_mode); switch (fattr->cf_mode & S_IFMT) { case S_IFLNK: @@ -850,9 +850,9 @@ static bool emit_cached_dirents(struct cached_dirents *cde, } static void update_cached_dirents_count(struct cached_dirents *cde, - struct dir_context *ctx) + struct file *file) { - if (cde->ctx != ctx) + if (cde->file != file) return; if (cde->is_valid || cde->is_failed) return; @@ -861,9 +861,9 @@ static void update_cached_dirents_count(struct cached_dirents *cde, } static void finished_cached_dirents_count(struct cached_dirents *cde, - struct dir_context *ctx) + struct dir_context *ctx, struct file *file) { - if (cde->ctx != ctx) + if (cde->file != file) return; if (cde->is_valid || cde->is_failed) return; @@ -876,11 +876,12 @@ static void finished_cached_dirents_count(struct cached_dirents *cde, static void add_cached_dirent(struct cached_dirents *cde, struct dir_context *ctx, const char *name, int namelen, - struct cifs_fattr *fattr) + struct cifs_fattr *fattr, + struct file *file) { struct cached_dirent *de; - if (cde->ctx != ctx) + if (cde->file != file) return; if (cde->is_valid || cde->is_failed) return; @@ -910,7 +911,8 @@ static void add_cached_dirent(struct cached_dirents *cde, static bool cifs_dir_emit(struct dir_context *ctx, const char *name, int namelen, struct cifs_fattr *fattr, - struct cached_fid *cfid) + struct cached_fid *cfid, + struct file *file) { bool rc; ino_t ino = cifs_uniqueid_to_ino_t(fattr->cf_uniqueid); @@ -922,7 +924,7 @@ static bool cifs_dir_emit(struct dir_context *ctx, if (cfid) { mutex_lock(&cfid->dirents.de_mutex); add_cached_dirent(&cfid->dirents, ctx, name, namelen, - fattr); + fattr, file); mutex_unlock(&cfid->dirents.de_mutex); } @@ -1022,7 +1024,7 @@ static int cifs_filldir(char *find_entry, struct file *file, cifs_prime_dcache(file_dentry(file), &name, &fattr); return !cifs_dir_emit(ctx, name.name, name.len, - &fattr, cfid); + &fattr, cfid, file); } @@ -1073,8 +1075,8 @@ int cifs_readdir(struct file *file, struct dir_context *ctx) * we need to initialize scanning and storing the * directory content. */ - if (ctx->pos == 0 && cfid->dirents.ctx == NULL) { - cfid->dirents.ctx = ctx; + if (ctx->pos == 0 && cfid->dirents.file == NULL) { + cfid->dirents.file = file; cfid->dirents.pos = 2; } /* @@ -1142,7 +1144,7 @@ int cifs_readdir(struct file *file, struct dir_context *ctx) } else { if (cfid) { mutex_lock(&cfid->dirents.de_mutex); - finished_cached_dirents_count(&cfid->dirents, ctx); + finished_cached_dirents_count(&cfid->dirents, ctx, file); mutex_unlock(&cfid->dirents.de_mutex); } cifs_dbg(FYI, "Could not find entry\n"); @@ -1183,7 +1185,7 @@ int cifs_readdir(struct file *file, struct dir_context *ctx) ctx->pos++; if (cfid) { mutex_lock(&cfid->dirents.de_mutex); - update_cached_dirents_count(&cfid->dirents, ctx); + update_cached_dirents_count(&cfid->dirents, file); mutex_unlock(&cfid->dirents.de_mutex); } diff --git a/fs/smb/client/reparse.c b/fs/smb/client/reparse.c index b6556fe3dfa11..4d45c31336df1 100644 --- a/fs/smb/client/reparse.c +++ b/fs/smb/client/reparse.c @@ -738,7 +738,6 @@ static bool wsl_to_fattr(struct cifs_open_info_data *data, if (!have_xattr_dev && (tag == IO_REPARSE_TAG_LX_CHR || tag == IO_REPARSE_TAG_LX_BLK)) return false; - fattr->cf_dtype = S_DT(fattr->cf_mode); return true; } diff --git a/fs/smb/client/sess.c b/fs/smb/client/sess.c index 9b32f7821b718..8be7c4d2d9d62 100644 --- a/fs/smb/client/sess.c +++ b/fs/smb/client/sess.c @@ -473,6 +473,10 @@ cifs_chan_update_iface(struct cifs_ses *ses, struct TCP_Server_Info *server) ses->chans[chan_index].iface = iface; spin_unlock(&ses->chan_lock); + + spin_lock(&server->srv_lock); + memcpy(&server->dstaddr, &iface->sockaddr, sizeof(server->dstaddr)); + spin_unlock(&server->srv_lock); } static int @@ -522,8 +526,7 @@ cifs_ses_add_channel(struct cifs_ses *ses, ctx->domainauto = ses->domainAuto; ctx->domainname = ses->domainName; - /* no hostname for extra channels */ - ctx->server_hostname = ""; + ctx->server_hostname = ses->server->hostname; ctx->username = ses->user_name; ctx->password = ses->password; @@ -655,6 +658,7 @@ static __u32 cifs_ssetup_hdr(struct cifs_ses *ses, USHRT_MAX)); pSMB->req.MaxMpxCount = cpu_to_le16(server->maxReq); pSMB->req.VcNumber = cpu_to_le16(1); + pSMB->req.SessionKey = server->session_key_id; /* Now no need to set SMBFLG_CASELESS or obsolete CANONICAL PATH */ @@ -1711,22 +1715,22 @@ _sess_auth_rawntlmssp_assemble_req(struct sess_data *sess_data) pSMB = (SESSION_SETUP_ANDX *)sess_data->iov[0].iov_base; capabilities = cifs_ssetup_hdr(ses, server, pSMB); - if ((pSMB->req.hdr.Flags2 & SMBFLG2_UNICODE) == 0) { - cifs_dbg(VFS, "NTLMSSP requires Unicode support\n"); - return -ENOSYS; - } - pSMB->req.hdr.Flags2 |= SMBFLG2_EXT_SEC; capabilities |= CAP_EXTENDED_SECURITY; pSMB->req.Capabilities |= cpu_to_le32(capabilities); bcc_ptr = sess_data->iov[2].iov_base; - /* unicode strings must be word aligned */ - if (!IS_ALIGNED(sess_data->iov[0].iov_len + sess_data->iov[1].iov_len, 2)) { - *bcc_ptr = 0; - bcc_ptr++; + + if (pSMB->req.hdr.Flags2 & SMBFLG2_UNICODE) { + /* unicode strings must be word aligned */ + if (!IS_ALIGNED(sess_data->iov[0].iov_len + sess_data->iov[1].iov_len, 2)) { + *bcc_ptr = 0; + bcc_ptr++; + } + unicode_oslm_strings(&bcc_ptr, sess_data->nls_cp); + } else { + ascii_oslm_strings(&bcc_ptr, sess_data->nls_cp); } - unicode_oslm_strings(&bcc_ptr, sess_data->nls_cp); sess_data->iov[2].iov_len = (long) bcc_ptr - (long) sess_data->iov[2].iov_base; diff --git a/fs/smb/client/smb2ops.c b/fs/smb/client/smb2ops.c index 74bcc51ccd32f..e596bc4837b68 100644 --- a/fs/smb/client/smb2ops.c +++ b/fs/smb/client/smb2ops.c @@ -504,6 +504,9 @@ smb3_negotiate_wsize(struct cifs_tcon *tcon, struct smb3_fs_context *ctx) wsize = min_t(unsigned int, wsize, server->max_write); #ifdef CONFIG_CIFS_SMB_DIRECT if (server->rdma) { + struct smbdirect_socket_parameters *sp = + &server->smbd_conn->socket.parameters; + if (server->sign) /* * Account for SMB2 data transfer packet header and @@ -511,12 +514,12 @@ smb3_negotiate_wsize(struct cifs_tcon *tcon, struct smb3_fs_context *ctx) */ wsize = min_t(unsigned int, wsize, - server->smbd_conn->max_fragmented_send_size - + sp->max_fragmented_send_size - SMB2_READWRITE_PDU_HEADER_SIZE - sizeof(struct smb2_transform_hdr)); else wsize = min_t(unsigned int, - wsize, server->smbd_conn->max_readwrite_size); + wsize, sp->max_read_write_size); } #endif if (!(server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU)) @@ -552,6 +555,9 @@ smb3_negotiate_rsize(struct cifs_tcon *tcon, struct smb3_fs_context *ctx) rsize = min_t(unsigned int, rsize, server->max_read); #ifdef CONFIG_CIFS_SMB_DIRECT if (server->rdma) { + struct smbdirect_socket_parameters *sp = + &server->smbd_conn->socket.parameters; + if (server->sign) /* * Account for SMB2 data transfer packet header and @@ -559,12 +565,12 @@ smb3_negotiate_rsize(struct cifs_tcon *tcon, struct smb3_fs_context *ctx) */ rsize = min_t(unsigned int, rsize, - server->smbd_conn->max_fragmented_recv_size - + sp->max_fragmented_recv_size - SMB2_READWRITE_PDU_HEADER_SIZE - sizeof(struct smb2_transform_hdr)); else rsize = min_t(unsigned int, - rsize, server->smbd_conn->max_readwrite_size); + rsize, sp->max_read_write_size); } #endif diff --git a/fs/smb/client/smb2pdu.c b/fs/smb/client/smb2pdu.c index 176be478cd138..d514f95deb7e7 100644 --- a/fs/smb/client/smb2pdu.c +++ b/fs/smb/client/smb2pdu.c @@ -428,14 +428,23 @@ smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon, if (!rc && (server->capabilities & SMB2_GLOBAL_CAP_MULTI_CHANNEL) && server->ops->query_server_interfaces) { - mutex_unlock(&ses->session_mutex); - /* - * query server network interfaces, in case they change + * query server network interfaces, in case they change. + * Also mark the session as pending this update while the query + * is in progress. This will be used to avoid calling + * smb2_reconnect recursively. */ + ses->flags |= CIFS_SES_FLAGS_PENDING_QUERY_INTERFACES; xid = get_xid(); rc = server->ops->query_server_interfaces(xid, tcon, false); free_xid(xid); + ses->flags &= ~CIFS_SES_FLAGS_PENDING_QUERY_INTERFACES; + + if (!tcon->ipc && !tcon->dummy) + queue_delayed_work(cifsiod_wq, &tcon->query_interfaces, + (SMB_INTERFACE_POLL_INTERVAL * HZ)); + + mutex_unlock(&ses->session_mutex); if (rc == -EOPNOTSUPP && ses->chan_count > 1) { /* @@ -455,11 +464,8 @@ smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon, if (ses->chan_max > ses->chan_count && ses->iface_count && !SERVER_IS_CHAN(server)) { - if (ses->chan_count == 1) { + if (ses->chan_count == 1) cifs_server_dbg(VFS, "supports multichannel now\n"); - queue_delayed_work(cifsiod_wq, &tcon->query_interfaces, - (SMB_INTERFACE_POLL_INTERVAL * HZ)); - } cifs_try_adding_channels(ses); } @@ -577,11 +583,18 @@ static int smb2_ioctl_req_init(u32 opcode, struct cifs_tcon *tcon, struct TCP_Server_Info *server, void **request_buf, unsigned int *total_len) { - /* Skip reconnect only for FSCTL_VALIDATE_NEGOTIATE_INFO IOCTLs */ - if (opcode == FSCTL_VALIDATE_NEGOTIATE_INFO) { + /* + * Skip reconnect in one of the following cases: + * 1. For FSCTL_VALIDATE_NEGOTIATE_INFO IOCTLs + * 2. For FSCTL_QUERY_NETWORK_INTERFACE_INFO IOCTL when called from + * smb2_reconnect (indicated by CIFS_SES_FLAG_SCALE_CHANNELS ses flag) + */ + if (opcode == FSCTL_VALIDATE_NEGOTIATE_INFO || + (opcode == FSCTL_QUERY_NETWORK_INTERFACE_INFO && + (tcon->ses->flags & CIFS_SES_FLAGS_PENDING_QUERY_INTERFACES))) return __smb2_plain_req_init(SMB2_IOCTL, tcon, server, request_buf, total_len); - } + return smb2_plain_req_init(SMB2_IOCTL, tcon, server, request_buf, total_len); } @@ -4221,10 +4234,8 @@ void smb2_reconnect_server(struct work_struct *work) } goto done; } - tcon->status = TID_GOOD; - tcon->retry = false; - tcon->need_reconnect = false; + tcon->dummy = true; /* now reconnect sessions for necessary channels */ list_for_each_entry_safe(ses, ses2, &tmp_ses_list, rlist) { @@ -4858,6 +4869,7 @@ smb2_writev_callback(struct mid_q_entry *mid) break; case MID_REQUEST_SUBMITTED: case MID_RETRY_NEEDED: + __set_bit(NETFS_SREQ_NEED_RETRY, &wdata->subreq.flags); result = -EAGAIN; break; case MID_RESPONSE_MALFORMED: diff --git a/fs/smb/client/smbdirect.c b/fs/smb/client/smbdirect.c index b0b7254661e92..ac06f2617f346 100644 --- a/fs/smb/client/smbdirect.c +++ b/fs/smb/client/smbdirect.c @@ -7,6 +7,7 @@ #include #include #include +#include "../common/smbdirect/smbdirect_pdu.h" #include "smbdirect.h" #include "cifs_debug.h" #include "cifsproto.h" @@ -50,9 +51,6 @@ struct smb_extract_to_rdma { static ssize_t smb_extract_iter_to_rdma(struct iov_iter *iter, size_t len, struct smb_extract_to_rdma *rdma); -/* SMBD version number */ -#define SMBD_V1 0x0100 - /* Port numbers for SMBD transport */ #define SMB_PORT 445 #define SMBD_PORT 5445 @@ -165,10 +163,11 @@ static void smbd_disconnect_rdma_work(struct work_struct *work) { struct smbd_connection *info = container_of(work, struct smbd_connection, disconnect_work); + struct smbdirect_socket *sc = &info->socket; - if (info->transport_status == SMBD_CONNECTED) { - info->transport_status = SMBD_DISCONNECTING; - rdma_disconnect(info->id); + if (sc->status == SMBDIRECT_SOCKET_CONNECTED) { + sc->status = SMBDIRECT_SOCKET_DISCONNECTING; + rdma_disconnect(sc->rdma.cm_id); } } @@ -182,6 +181,7 @@ static int smbd_conn_upcall( struct rdma_cm_id *id, struct rdma_cm_event *event) { struct smbd_connection *info = id->context; + struct smbdirect_socket *sc = &info->socket; log_rdma_event(INFO, "event=%d status=%d\n", event->event, event->status); @@ -205,7 +205,7 @@ static int smbd_conn_upcall( case RDMA_CM_EVENT_ESTABLISHED: log_rdma_event(INFO, "connected event=%d\n", event->event); - info->transport_status = SMBD_CONNECTED; + sc->status = SMBDIRECT_SOCKET_CONNECTED; wake_up_interruptible(&info->conn_wait); break; @@ -213,20 +213,20 @@ static int smbd_conn_upcall( case RDMA_CM_EVENT_UNREACHABLE: case RDMA_CM_EVENT_REJECTED: log_rdma_event(INFO, "connecting failed event=%d\n", event->event); - info->transport_status = SMBD_DISCONNECTED; + sc->status = SMBDIRECT_SOCKET_DISCONNECTED; wake_up_interruptible(&info->conn_wait); break; case RDMA_CM_EVENT_DEVICE_REMOVAL: case RDMA_CM_EVENT_DISCONNECTED: /* This happens when we fail the negotiation */ - if (info->transport_status == SMBD_NEGOTIATE_FAILED) { - info->transport_status = SMBD_DISCONNECTED; + if (sc->status == SMBDIRECT_SOCKET_NEGOTIATE_FAILED) { + sc->status = SMBDIRECT_SOCKET_DISCONNECTED; wake_up(&info->conn_wait); break; } - info->transport_status = SMBD_DISCONNECTED; + sc->status = SMBDIRECT_SOCKET_DISCONNECTED; wake_up_interruptible(&info->disconn_wait); wake_up_interruptible(&info->wait_reassembly_queue); wake_up_interruptible_all(&info->wait_send_queue); @@ -275,6 +275,8 @@ static void send_done(struct ib_cq *cq, struct ib_wc *wc) int i; struct smbd_request *request = container_of(wc->wr_cqe, struct smbd_request, cqe); + struct smbd_connection *info = request->info; + struct smbdirect_socket *sc = &info->socket; log_rdma_send(INFO, "smbd_request 0x%p completed wc->status=%d\n", request, wc->status); @@ -286,7 +288,7 @@ static void send_done(struct ib_cq *cq, struct ib_wc *wc) } for (i = 0; i < request->num_sge; i++) - ib_dma_unmap_single(request->info->id->device, + ib_dma_unmap_single(sc->ib.dev, request->sge[i].addr, request->sge[i].length, DMA_TO_DEVICE); @@ -299,7 +301,7 @@ static void send_done(struct ib_cq *cq, struct ib_wc *wc) mempool_free(request, request->info->request_mempool); } -static void dump_smbd_negotiate_resp(struct smbd_negotiate_resp *resp) +static void dump_smbdirect_negotiate_resp(struct smbdirect_negotiate_resp *resp) { log_rdma_event(INFO, "resp message min_version %u max_version %u negotiated_version %u credits_requested %u credits_granted %u status %u max_readwrite_size %u preferred_send_size %u max_receive_size %u max_fragmented_size %u\n", resp->min_version, resp->max_version, @@ -318,15 +320,17 @@ static bool process_negotiation_response( struct smbd_response *response, int packet_length) { struct smbd_connection *info = response->info; - struct smbd_negotiate_resp *packet = smbd_response_payload(response); + struct smbdirect_socket *sc = &info->socket; + struct smbdirect_socket_parameters *sp = &sc->parameters; + struct smbdirect_negotiate_resp *packet = smbd_response_payload(response); - if (packet_length < sizeof(struct smbd_negotiate_resp)) { + if (packet_length < sizeof(struct smbdirect_negotiate_resp)) { log_rdma_event(ERR, "error: packet_length=%d\n", packet_length); return false; } - if (le16_to_cpu(packet->negotiated_version) != SMBD_V1) { + if (le16_to_cpu(packet->negotiated_version) != SMBDIRECT_V1) { log_rdma_event(ERR, "error: negotiated_version=%x\n", le16_to_cpu(packet->negotiated_version)); return false; @@ -347,20 +351,20 @@ static bool process_negotiation_response( atomic_set(&info->receive_credits, 0); - if (le32_to_cpu(packet->preferred_send_size) > info->max_receive_size) { + if (le32_to_cpu(packet->preferred_send_size) > sp->max_recv_size) { log_rdma_event(ERR, "error: preferred_send_size=%d\n", le32_to_cpu(packet->preferred_send_size)); return false; } - info->max_receive_size = le32_to_cpu(packet->preferred_send_size); + sp->max_recv_size = le32_to_cpu(packet->preferred_send_size); if (le32_to_cpu(packet->max_receive_size) < SMBD_MIN_RECEIVE_SIZE) { log_rdma_event(ERR, "error: max_receive_size=%d\n", le32_to_cpu(packet->max_receive_size)); return false; } - info->max_send_size = min_t(int, info->max_send_size, - le32_to_cpu(packet->max_receive_size)); + sp->max_send_size = min_t(u32, sp->max_send_size, + le32_to_cpu(packet->max_receive_size)); if (le32_to_cpu(packet->max_fragmented_size) < SMBD_MIN_FRAGMENTED_SIZE) { @@ -368,18 +372,18 @@ static bool process_negotiation_response( le32_to_cpu(packet->max_fragmented_size)); return false; } - info->max_fragmented_send_size = + sp->max_fragmented_send_size = le32_to_cpu(packet->max_fragmented_size); info->rdma_readwrite_threshold = - rdma_readwrite_threshold > info->max_fragmented_send_size ? - info->max_fragmented_send_size : + rdma_readwrite_threshold > sp->max_fragmented_send_size ? + sp->max_fragmented_send_size : rdma_readwrite_threshold; - info->max_readwrite_size = min_t(u32, + sp->max_read_write_size = min_t(u32, le32_to_cpu(packet->max_readwrite_size), info->max_frmr_depth * PAGE_SIZE); - info->max_frmr_depth = info->max_readwrite_size / PAGE_SIZE; + info->max_frmr_depth = sp->max_read_write_size / PAGE_SIZE; return true; } @@ -393,8 +397,9 @@ static void smbd_post_send_credits(struct work_struct *work) struct smbd_connection *info = container_of(work, struct smbd_connection, post_send_credits_work); + struct smbdirect_socket *sc = &info->socket; - if (info->transport_status != SMBD_CONNECTED) { + if (sc->status != SMBDIRECT_SOCKET_CONNECTED) { wake_up(&info->wait_receive_queues); return; } @@ -448,7 +453,7 @@ static void smbd_post_send_credits(struct work_struct *work) /* Called from softirq, when recv is done */ static void recv_done(struct ib_cq *cq, struct ib_wc *wc) { - struct smbd_data_transfer *data_transfer; + struct smbdirect_data_transfer *data_transfer; struct smbd_response *response = container_of(wc->wr_cqe, struct smbd_response, cqe); struct smbd_connection *info = response->info; @@ -474,7 +479,7 @@ static void recv_done(struct ib_cq *cq, struct ib_wc *wc) switch (response->type) { /* SMBD negotiation response */ case SMBD_NEGOTIATE_RESP: - dump_smbd_negotiate_resp(smbd_response_payload(response)); + dump_smbdirect_negotiate_resp(smbd_response_payload(response)); info->full_packet_received = true; info->negotiate_done = process_negotiation_response(response, wc->byte_len); @@ -531,7 +536,7 @@ static void recv_done(struct ib_cq *cq, struct ib_wc *wc) /* Send a KEEP_ALIVE response right away if requested */ info->keep_alive_requested = KEEP_ALIVE_NONE; if (le16_to_cpu(data_transfer->flags) & - SMB_DIRECT_RESPONSE_REQUESTED) { + SMBDIRECT_FLAG_RESPONSE_REQUESTED) { info->keep_alive_requested = KEEP_ALIVE_PENDING; } @@ -635,32 +640,34 @@ static int smbd_ia_open( struct smbd_connection *info, struct sockaddr *dstaddr, int port) { + struct smbdirect_socket *sc = &info->socket; int rc; - info->id = smbd_create_id(info, dstaddr, port); - if (IS_ERR(info->id)) { - rc = PTR_ERR(info->id); + sc->rdma.cm_id = smbd_create_id(info, dstaddr, port); + if (IS_ERR(sc->rdma.cm_id)) { + rc = PTR_ERR(sc->rdma.cm_id); goto out1; } + sc->ib.dev = sc->rdma.cm_id->device; - if (!frwr_is_supported(&info->id->device->attrs)) { + if (!frwr_is_supported(&sc->ib.dev->attrs)) { log_rdma_event(ERR, "Fast Registration Work Requests (FRWR) is not supported\n"); log_rdma_event(ERR, "Device capability flags = %llx max_fast_reg_page_list_len = %u\n", - info->id->device->attrs.device_cap_flags, - info->id->device->attrs.max_fast_reg_page_list_len); + sc->ib.dev->attrs.device_cap_flags, + sc->ib.dev->attrs.max_fast_reg_page_list_len); rc = -EPROTONOSUPPORT; goto out2; } info->max_frmr_depth = min_t(int, smbd_max_frmr_depth, - info->id->device->attrs.max_fast_reg_page_list_len); + sc->ib.dev->attrs.max_fast_reg_page_list_len); info->mr_type = IB_MR_TYPE_MEM_REG; - if (info->id->device->attrs.kernel_cap_flags & IBK_SG_GAPS_REG) + if (sc->ib.dev->attrs.kernel_cap_flags & IBK_SG_GAPS_REG) info->mr_type = IB_MR_TYPE_SG_GAPS; - info->pd = ib_alloc_pd(info->id->device, 0); - if (IS_ERR(info->pd)) { - rc = PTR_ERR(info->pd); + sc->ib.pd = ib_alloc_pd(sc->ib.dev, 0); + if (IS_ERR(sc->ib.pd)) { + rc = PTR_ERR(sc->ib.pd); log_rdma_event(ERR, "ib_alloc_pd() returned %d\n", rc); goto out2; } @@ -668,8 +675,8 @@ static int smbd_ia_open( return 0; out2: - rdma_destroy_id(info->id); - info->id = NULL; + rdma_destroy_id(sc->rdma.cm_id); + sc->rdma.cm_id = NULL; out1: return rc; @@ -683,10 +690,12 @@ static int smbd_ia_open( */ static int smbd_post_send_negotiate_req(struct smbd_connection *info) { + struct smbdirect_socket *sc = &info->socket; + struct smbdirect_socket_parameters *sp = &sc->parameters; struct ib_send_wr send_wr; int rc = -ENOMEM; struct smbd_request *request; - struct smbd_negotiate_req *packet; + struct smbdirect_negotiate_req *packet; request = mempool_alloc(info->request_mempool, GFP_KERNEL); if (!request) @@ -695,29 +704,29 @@ static int smbd_post_send_negotiate_req(struct smbd_connection *info) request->info = info; packet = smbd_request_payload(request); - packet->min_version = cpu_to_le16(SMBD_V1); - packet->max_version = cpu_to_le16(SMBD_V1); + packet->min_version = cpu_to_le16(SMBDIRECT_V1); + packet->max_version = cpu_to_le16(SMBDIRECT_V1); packet->reserved = 0; - packet->credits_requested = cpu_to_le16(info->send_credit_target); - packet->preferred_send_size = cpu_to_le32(info->max_send_size); - packet->max_receive_size = cpu_to_le32(info->max_receive_size); + packet->credits_requested = cpu_to_le16(sp->send_credit_target); + packet->preferred_send_size = cpu_to_le32(sp->max_send_size); + packet->max_receive_size = cpu_to_le32(sp->max_recv_size); packet->max_fragmented_size = - cpu_to_le32(info->max_fragmented_recv_size); + cpu_to_le32(sp->max_fragmented_recv_size); request->num_sge = 1; request->sge[0].addr = ib_dma_map_single( - info->id->device, (void *)packet, + sc->ib.dev, (void *)packet, sizeof(*packet), DMA_TO_DEVICE); - if (ib_dma_mapping_error(info->id->device, request->sge[0].addr)) { + if (ib_dma_mapping_error(sc->ib.dev, request->sge[0].addr)) { rc = -EIO; goto dma_mapping_failed; } request->sge[0].length = sizeof(*packet); - request->sge[0].lkey = info->pd->local_dma_lkey; + request->sge[0].lkey = sc->ib.pd->local_dma_lkey; ib_dma_sync_single_for_device( - info->id->device, request->sge[0].addr, + sc->ib.dev, request->sge[0].addr, request->sge[0].length, DMA_TO_DEVICE); request->cqe.done = send_done; @@ -734,14 +743,14 @@ static int smbd_post_send_negotiate_req(struct smbd_connection *info) request->sge[0].length, request->sge[0].lkey); atomic_inc(&info->send_pending); - rc = ib_post_send(info->id->qp, &send_wr, NULL); + rc = ib_post_send(sc->ib.qp, &send_wr, NULL); if (!rc) return 0; /* if we reach here, post send failed */ log_rdma_send(ERR, "ib_post_send failed rc=%d\n", rc); atomic_dec(&info->send_pending); - ib_dma_unmap_single(info->id->device, request->sge[0].addr, + ib_dma_unmap_single(sc->ib.dev, request->sge[0].addr, request->sge[0].length, DMA_TO_DEVICE); smbd_disconnect_rdma_connection(info); @@ -774,10 +783,10 @@ static int manage_credits_prior_sending(struct smbd_connection *info) /* * Check if we need to send a KEEP_ALIVE message * The idle connection timer triggers a KEEP_ALIVE message when expires - * SMB_DIRECT_RESPONSE_REQUESTED is set in the message flag to have peer send + * SMBDIRECT_FLAG_RESPONSE_REQUESTED is set in the message flag to have peer send * back a response. * return value: - * 1 if SMB_DIRECT_RESPONSE_REQUESTED needs to be set + * 1 if SMBDIRECT_FLAG_RESPONSE_REQUESTED needs to be set * 0: otherwise */ static int manage_keep_alive_before_sending(struct smbd_connection *info) @@ -793,6 +802,8 @@ static int manage_keep_alive_before_sending(struct smbd_connection *info) static int smbd_post_send(struct smbd_connection *info, struct smbd_request *request) { + struct smbdirect_socket *sc = &info->socket; + struct smbdirect_socket_parameters *sp = &sc->parameters; struct ib_send_wr send_wr; int rc, i; @@ -801,7 +812,7 @@ static int smbd_post_send(struct smbd_connection *info, "rdma_request sge[%d] addr=0x%llx length=%u\n", i, request->sge[i].addr, request->sge[i].length); ib_dma_sync_single_for_device( - info->id->device, + sc->ib.dev, request->sge[i].addr, request->sge[i].length, DMA_TO_DEVICE); @@ -816,7 +827,7 @@ static int smbd_post_send(struct smbd_connection *info, send_wr.opcode = IB_WR_SEND; send_wr.send_flags = IB_SEND_SIGNALED; - rc = ib_post_send(info->id->qp, &send_wr, NULL); + rc = ib_post_send(sc->ib.qp, &send_wr, NULL); if (rc) { log_rdma_send(ERR, "ib_post_send failed rc=%d\n", rc); smbd_disconnect_rdma_connection(info); @@ -824,7 +835,7 @@ static int smbd_post_send(struct smbd_connection *info, } else /* Reset timer for idle connection after packet is sent */ mod_delayed_work(info->workqueue, &info->idle_timer_work, - info->keep_alive_interval*HZ); + msecs_to_jiffies(sp->keepalive_interval_msec)); return rc; } @@ -833,22 +844,24 @@ static int smbd_post_send_iter(struct smbd_connection *info, struct iov_iter *iter, int *_remaining_data_length) { + struct smbdirect_socket *sc = &info->socket; + struct smbdirect_socket_parameters *sp = &sc->parameters; int i, rc; int header_length; int data_length; struct smbd_request *request; - struct smbd_data_transfer *packet; + struct smbdirect_data_transfer *packet; int new_credits = 0; wait_credit: /* Wait for send credits. A SMBD packet needs one credit */ rc = wait_event_interruptible(info->wait_send_queue, atomic_read(&info->send_credits) > 0 || - info->transport_status != SMBD_CONNECTED); + sc->status != SMBDIRECT_SOCKET_CONNECTED); if (rc) goto err_wait_credit; - if (info->transport_status != SMBD_CONNECTED) { + if (sc->status != SMBDIRECT_SOCKET_CONNECTED) { log_outgoing(ERR, "disconnected not sending on wait_credit\n"); rc = -EAGAIN; goto err_wait_credit; @@ -860,17 +873,17 @@ static int smbd_post_send_iter(struct smbd_connection *info, wait_send_queue: wait_event(info->wait_post_send, - atomic_read(&info->send_pending) < info->send_credit_target || - info->transport_status != SMBD_CONNECTED); + atomic_read(&info->send_pending) < sp->send_credit_target || + sc->status != SMBDIRECT_SOCKET_CONNECTED); - if (info->transport_status != SMBD_CONNECTED) { + if (sc->status != SMBDIRECT_SOCKET_CONNECTED) { log_outgoing(ERR, "disconnected not sending on wait_send_queue\n"); rc = -EAGAIN; goto err_wait_send_queue; } if (unlikely(atomic_inc_return(&info->send_pending) > - info->send_credit_target)) { + sp->send_credit_target)) { atomic_dec(&info->send_pending); goto wait_send_queue; } @@ -890,8 +903,8 @@ static int smbd_post_send_iter(struct smbd_connection *info, .nr_sge = 1, .max_sge = SMBDIRECT_MAX_SEND_SGE, .sge = request->sge, - .device = info->id->device, - .local_dma_lkey = info->pd->local_dma_lkey, + .device = sc->ib.dev, + .local_dma_lkey = sc->ib.pd->local_dma_lkey, .direction = DMA_TO_DEVICE, }; @@ -909,7 +922,7 @@ static int smbd_post_send_iter(struct smbd_connection *info, /* Fill in the packet header */ packet = smbd_request_payload(request); - packet->credits_requested = cpu_to_le16(info->send_credit_target); + packet->credits_requested = cpu_to_le16(sp->send_credit_target); new_credits = manage_credits_prior_sending(info); atomic_add(new_credits, &info->receive_credits); @@ -919,7 +932,7 @@ static int smbd_post_send_iter(struct smbd_connection *info, packet->flags = 0; if (manage_keep_alive_before_sending(info)) - packet->flags |= cpu_to_le16(SMB_DIRECT_RESPONSE_REQUESTED); + packet->flags |= cpu_to_le16(SMBDIRECT_FLAG_RESPONSE_REQUESTED); packet->reserved = 0; if (!data_length) @@ -938,23 +951,23 @@ static int smbd_post_send_iter(struct smbd_connection *info, le32_to_cpu(packet->remaining_data_length)); /* Map the packet to DMA */ - header_length = sizeof(struct smbd_data_transfer); + header_length = sizeof(struct smbdirect_data_transfer); /* If this is a packet without payload, don't send padding */ if (!data_length) - header_length = offsetof(struct smbd_data_transfer, padding); + header_length = offsetof(struct smbdirect_data_transfer, padding); - request->sge[0].addr = ib_dma_map_single(info->id->device, + request->sge[0].addr = ib_dma_map_single(sc->ib.dev, (void *)packet, header_length, DMA_TO_DEVICE); - if (ib_dma_mapping_error(info->id->device, request->sge[0].addr)) { + if (ib_dma_mapping_error(sc->ib.dev, request->sge[0].addr)) { rc = -EIO; request->sge[0].addr = 0; goto err_dma; } request->sge[0].length = header_length; - request->sge[0].lkey = info->pd->local_dma_lkey; + request->sge[0].lkey = sc->ib.pd->local_dma_lkey; rc = smbd_post_send(info, request); if (!rc) @@ -963,7 +976,7 @@ static int smbd_post_send_iter(struct smbd_connection *info, err_dma: for (i = 0; i < request->num_sge; i++) if (request->sge[i].addr) - ib_dma_unmap_single(info->id->device, + ib_dma_unmap_single(sc->ib.dev, request->sge[i].addr, request->sge[i].length, DMA_TO_DEVICE); @@ -1008,17 +1021,19 @@ static int smbd_post_send_empty(struct smbd_connection *info) static int smbd_post_recv( struct smbd_connection *info, struct smbd_response *response) { + struct smbdirect_socket *sc = &info->socket; + struct smbdirect_socket_parameters *sp = &sc->parameters; struct ib_recv_wr recv_wr; int rc = -EIO; response->sge.addr = ib_dma_map_single( - info->id->device, response->packet, - info->max_receive_size, DMA_FROM_DEVICE); - if (ib_dma_mapping_error(info->id->device, response->sge.addr)) + sc->ib.dev, response->packet, + sp->max_recv_size, DMA_FROM_DEVICE); + if (ib_dma_mapping_error(sc->ib.dev, response->sge.addr)) return rc; - response->sge.length = info->max_receive_size; - response->sge.lkey = info->pd->local_dma_lkey; + response->sge.length = sp->max_recv_size; + response->sge.lkey = sc->ib.pd->local_dma_lkey; response->cqe.done = recv_done; @@ -1027,9 +1042,9 @@ static int smbd_post_recv( recv_wr.sg_list = &response->sge; recv_wr.num_sge = 1; - rc = ib_post_recv(info->id->qp, &recv_wr, NULL); + rc = ib_post_recv(sc->ib.qp, &recv_wr, NULL); if (rc) { - ib_dma_unmap_single(info->id->device, response->sge.addr, + ib_dma_unmap_single(sc->ib.dev, response->sge.addr, response->sge.length, DMA_FROM_DEVICE); smbd_disconnect_rdma_connection(info); log_rdma_recv(ERR, "ib_post_recv failed rc=%d\n", rc); @@ -1187,9 +1202,10 @@ static struct smbd_response *get_receive_buffer(struct smbd_connection *info) static void put_receive_buffer( struct smbd_connection *info, struct smbd_response *response) { + struct smbdirect_socket *sc = &info->socket; unsigned long flags; - ib_dma_unmap_single(info->id->device, response->sge.addr, + ib_dma_unmap_single(sc->ib.dev, response->sge.addr, response->sge.length, DMA_FROM_DEVICE); spin_lock_irqsave(&info->receive_queue_lock, flags); @@ -1264,6 +1280,8 @@ static void idle_connection_timer(struct work_struct *work) struct smbd_connection *info = container_of( work, struct smbd_connection, idle_timer_work.work); + struct smbdirect_socket *sc = &info->socket; + struct smbdirect_socket_parameters *sp = &sc->parameters; if (info->keep_alive_requested != KEEP_ALIVE_NONE) { log_keep_alive(ERR, @@ -1278,7 +1296,7 @@ static void idle_connection_timer(struct work_struct *work) /* Setup the next idle timeout work */ queue_delayed_work(info->workqueue, &info->idle_timer_work, - info->keep_alive_interval*HZ); + msecs_to_jiffies(sp->keepalive_interval_msec)); } /* @@ -1289,6 +1307,8 @@ static void idle_connection_timer(struct work_struct *work) void smbd_destroy(struct TCP_Server_Info *server) { struct smbd_connection *info = server->smbd_conn; + struct smbdirect_socket *sc; + struct smbdirect_socket_parameters *sp; struct smbd_response *response; unsigned long flags; @@ -1296,19 +1316,22 @@ void smbd_destroy(struct TCP_Server_Info *server) log_rdma_event(INFO, "rdma session already destroyed\n"); return; } + sc = &info->socket; + sp = &sc->parameters; log_rdma_event(INFO, "destroying rdma session\n"); - if (info->transport_status != SMBD_DISCONNECTED) { - rdma_disconnect(server->smbd_conn->id); + if (sc->status != SMBDIRECT_SOCKET_DISCONNECTED) { + rdma_disconnect(sc->rdma.cm_id); log_rdma_event(INFO, "wait for transport being disconnected\n"); wait_event_interruptible( info->disconn_wait, - info->transport_status == SMBD_DISCONNECTED); + sc->status == SMBDIRECT_SOCKET_DISCONNECTED); } log_rdma_event(INFO, "destroying qp\n"); - ib_drain_qp(info->id->qp); - rdma_destroy_qp(info->id); + ib_drain_qp(sc->ib.qp); + rdma_destroy_qp(sc->rdma.cm_id); + sc->ib.qp = NULL; log_rdma_event(INFO, "cancelling idle timer\n"); cancel_delayed_work_sync(&info->idle_timer_work); @@ -1336,7 +1359,7 @@ void smbd_destroy(struct TCP_Server_Info *server) log_rdma_event(INFO, "free receive buffers\n"); wait_event(info->wait_receive_queues, info->count_receive_queue + info->count_empty_packet_queue - == info->receive_credit_max); + == sp->recv_credit_max); destroy_receive_buffers(info); /* @@ -1355,10 +1378,10 @@ void smbd_destroy(struct TCP_Server_Info *server) } destroy_mr_list(info); - ib_free_cq(info->send_cq); - ib_free_cq(info->recv_cq); - ib_dealloc_pd(info->pd); - rdma_destroy_id(info->id); + ib_free_cq(sc->ib.send_cq); + ib_free_cq(sc->ib.recv_cq); + ib_dealloc_pd(sc->ib.pd); + rdma_destroy_id(sc->rdma.cm_id); /* free mempools */ mempool_destroy(info->request_mempool); @@ -1367,7 +1390,7 @@ void smbd_destroy(struct TCP_Server_Info *server) mempool_destroy(info->response_mempool); kmem_cache_destroy(info->response_cache); - info->transport_status = SMBD_DESTROYED; + sc->status = SMBDIRECT_SOCKET_DESTROYED; destroy_workqueue(info->workqueue); log_rdma_event(INFO, "rdma session destroyed\n"); @@ -1392,7 +1415,7 @@ int smbd_reconnect(struct TCP_Server_Info *server) * This is possible if transport is disconnected and we haven't received * notification from RDMA, but upper layer has detected timeout */ - if (server->smbd_conn->transport_status == SMBD_CONNECTED) { + if (server->smbd_conn->socket.status == SMBDIRECT_SOCKET_CONNECTED) { log_rdma_event(INFO, "disconnecting transport\n"); smbd_destroy(server); } @@ -1424,37 +1447,47 @@ static void destroy_caches_and_workqueue(struct smbd_connection *info) #define MAX_NAME_LEN 80 static int allocate_caches_and_workqueue(struct smbd_connection *info) { + struct smbdirect_socket *sc = &info->socket; + struct smbdirect_socket_parameters *sp = &sc->parameters; char name[MAX_NAME_LEN]; int rc; + if (WARN_ON_ONCE(sp->max_recv_size < sizeof(struct smbdirect_data_transfer))) + return -ENOMEM; + scnprintf(name, MAX_NAME_LEN, "smbd_request_%p", info); info->request_cache = kmem_cache_create( name, sizeof(struct smbd_request) + - sizeof(struct smbd_data_transfer), + sizeof(struct smbdirect_data_transfer), 0, SLAB_HWCACHE_ALIGN, NULL); if (!info->request_cache) return -ENOMEM; info->request_mempool = - mempool_create(info->send_credit_target, mempool_alloc_slab, + mempool_create(sp->send_credit_target, mempool_alloc_slab, mempool_free_slab, info->request_cache); if (!info->request_mempool) goto out1; scnprintf(name, MAX_NAME_LEN, "smbd_response_%p", info); + + struct kmem_cache_args response_args = { + .align = __alignof__(struct smbd_response), + .useroffset = (offsetof(struct smbd_response, packet) + + sizeof(struct smbdirect_data_transfer)), + .usersize = sp->max_recv_size - sizeof(struct smbdirect_data_transfer), + }; info->response_cache = - kmem_cache_create( - name, - sizeof(struct smbd_response) + - info->max_receive_size, - 0, SLAB_HWCACHE_ALIGN, NULL); + kmem_cache_create(name, + sizeof(struct smbd_response) + sp->max_recv_size, + &response_args, SLAB_HWCACHE_ALIGN); if (!info->response_cache) goto out2; info->response_mempool = - mempool_create(info->receive_credit_max, mempool_alloc_slab, + mempool_create(sp->recv_credit_max, mempool_alloc_slab, mempool_free_slab, info->response_cache); if (!info->response_mempool) goto out3; @@ -1464,7 +1497,7 @@ static int allocate_caches_and_workqueue(struct smbd_connection *info) if (!info->workqueue) goto out4; - rc = allocate_receive_buffers(info, info->receive_credit_max); + rc = allocate_receive_buffers(info, sp->recv_credit_max); if (rc) { log_rdma_event(ERR, "failed to allocate receive buffers\n"); goto out5; @@ -1491,6 +1524,8 @@ static struct smbd_connection *_smbd_get_connection( { int rc; struct smbd_connection *info; + struct smbdirect_socket *sc; + struct smbdirect_socket_parameters *sp; struct rdma_conn_param conn_param; struct ib_qp_init_attr qp_attr; struct sockaddr_in *addr_in = (struct sockaddr_in *) dstaddr; @@ -1500,101 +1535,102 @@ static struct smbd_connection *_smbd_get_connection( info = kzalloc(sizeof(struct smbd_connection), GFP_KERNEL); if (!info) return NULL; + sc = &info->socket; + sp = &sc->parameters; - info->transport_status = SMBD_CONNECTING; + sc->status = SMBDIRECT_SOCKET_CONNECTING; rc = smbd_ia_open(info, dstaddr, port); if (rc) { log_rdma_event(INFO, "smbd_ia_open rc=%d\n", rc); goto create_id_failed; } - if (smbd_send_credit_target > info->id->device->attrs.max_cqe || - smbd_send_credit_target > info->id->device->attrs.max_qp_wr) { + if (smbd_send_credit_target > sc->ib.dev->attrs.max_cqe || + smbd_send_credit_target > sc->ib.dev->attrs.max_qp_wr) { log_rdma_event(ERR, "consider lowering send_credit_target = %d. Possible CQE overrun, device reporting max_cqe %d max_qp_wr %d\n", smbd_send_credit_target, - info->id->device->attrs.max_cqe, - info->id->device->attrs.max_qp_wr); + sc->ib.dev->attrs.max_cqe, + sc->ib.dev->attrs.max_qp_wr); goto config_failed; } - if (smbd_receive_credit_max > info->id->device->attrs.max_cqe || - smbd_receive_credit_max > info->id->device->attrs.max_qp_wr) { + if (smbd_receive_credit_max > sc->ib.dev->attrs.max_cqe || + smbd_receive_credit_max > sc->ib.dev->attrs.max_qp_wr) { log_rdma_event(ERR, "consider lowering receive_credit_max = %d. Possible CQE overrun, device reporting max_cqe %d max_qp_wr %d\n", smbd_receive_credit_max, - info->id->device->attrs.max_cqe, - info->id->device->attrs.max_qp_wr); + sc->ib.dev->attrs.max_cqe, + sc->ib.dev->attrs.max_qp_wr); goto config_failed; } - info->receive_credit_max = smbd_receive_credit_max; - info->send_credit_target = smbd_send_credit_target; - info->max_send_size = smbd_max_send_size; - info->max_fragmented_recv_size = smbd_max_fragmented_recv_size; - info->max_receive_size = smbd_max_receive_size; - info->keep_alive_interval = smbd_keep_alive_interval; + sp->recv_credit_max = smbd_receive_credit_max; + sp->send_credit_target = smbd_send_credit_target; + sp->max_send_size = smbd_max_send_size; + sp->max_fragmented_recv_size = smbd_max_fragmented_recv_size; + sp->max_recv_size = smbd_max_receive_size; + sp->keepalive_interval_msec = smbd_keep_alive_interval * 1000; - if (info->id->device->attrs.max_send_sge < SMBDIRECT_MAX_SEND_SGE || - info->id->device->attrs.max_recv_sge < SMBDIRECT_MAX_RECV_SGE) { + if (sc->ib.dev->attrs.max_send_sge < SMBDIRECT_MAX_SEND_SGE || + sc->ib.dev->attrs.max_recv_sge < SMBDIRECT_MAX_RECV_SGE) { log_rdma_event(ERR, "device %.*s max_send_sge/max_recv_sge = %d/%d too small\n", IB_DEVICE_NAME_MAX, - info->id->device->name, - info->id->device->attrs.max_send_sge, - info->id->device->attrs.max_recv_sge); + sc->ib.dev->name, + sc->ib.dev->attrs.max_send_sge, + sc->ib.dev->attrs.max_recv_sge); goto config_failed; } - info->send_cq = NULL; - info->recv_cq = NULL; - info->send_cq = - ib_alloc_cq_any(info->id->device, info, - info->send_credit_target, IB_POLL_SOFTIRQ); - if (IS_ERR(info->send_cq)) { - info->send_cq = NULL; + sc->ib.send_cq = + ib_alloc_cq_any(sc->ib.dev, info, + sp->send_credit_target, IB_POLL_SOFTIRQ); + if (IS_ERR(sc->ib.send_cq)) { + sc->ib.send_cq = NULL; goto alloc_cq_failed; } - info->recv_cq = - ib_alloc_cq_any(info->id->device, info, - info->receive_credit_max, IB_POLL_SOFTIRQ); - if (IS_ERR(info->recv_cq)) { - info->recv_cq = NULL; + sc->ib.recv_cq = + ib_alloc_cq_any(sc->ib.dev, info, + sp->recv_credit_max, IB_POLL_SOFTIRQ); + if (IS_ERR(sc->ib.recv_cq)) { + sc->ib.recv_cq = NULL; goto alloc_cq_failed; } memset(&qp_attr, 0, sizeof(qp_attr)); qp_attr.event_handler = smbd_qp_async_error_upcall; qp_attr.qp_context = info; - qp_attr.cap.max_send_wr = info->send_credit_target; - qp_attr.cap.max_recv_wr = info->receive_credit_max; + qp_attr.cap.max_send_wr = sp->send_credit_target; + qp_attr.cap.max_recv_wr = sp->recv_credit_max; qp_attr.cap.max_send_sge = SMBDIRECT_MAX_SEND_SGE; qp_attr.cap.max_recv_sge = SMBDIRECT_MAX_RECV_SGE; qp_attr.cap.max_inline_data = 0; qp_attr.sq_sig_type = IB_SIGNAL_REQ_WR; qp_attr.qp_type = IB_QPT_RC; - qp_attr.send_cq = info->send_cq; - qp_attr.recv_cq = info->recv_cq; + qp_attr.send_cq = sc->ib.send_cq; + qp_attr.recv_cq = sc->ib.recv_cq; qp_attr.port_num = ~0; - rc = rdma_create_qp(info->id, info->pd, &qp_attr); + rc = rdma_create_qp(sc->rdma.cm_id, sc->ib.pd, &qp_attr); if (rc) { log_rdma_event(ERR, "rdma_create_qp failed %i\n", rc); goto create_qp_failed; } + sc->ib.qp = sc->rdma.cm_id->qp; memset(&conn_param, 0, sizeof(conn_param)); conn_param.initiator_depth = 0; conn_param.responder_resources = - min(info->id->device->attrs.max_qp_rd_atom, + min(sc->ib.dev->attrs.max_qp_rd_atom, SMBD_CM_RESPONDER_RESOURCES); info->responder_resources = conn_param.responder_resources; log_rdma_mr(INFO, "responder_resources=%d\n", info->responder_resources); /* Need to send IRD/ORD in private data for iWARP */ - info->id->device->ops.get_port_immutable( - info->id->device, info->id->port_num, &port_immutable); + sc->ib.dev->ops.get_port_immutable( + sc->ib.dev, sc->rdma.cm_id->port_num, &port_immutable); if (port_immutable.core_cap_flags & RDMA_CORE_PORT_IWARP) { ird_ord_hdr[0] = info->responder_resources; ird_ord_hdr[1] = 1; @@ -1615,16 +1651,16 @@ static struct smbd_connection *_smbd_get_connection( init_waitqueue_head(&info->conn_wait); init_waitqueue_head(&info->disconn_wait); init_waitqueue_head(&info->wait_reassembly_queue); - rc = rdma_connect(info->id, &conn_param); + rc = rdma_connect(sc->rdma.cm_id, &conn_param); if (rc) { log_rdma_event(ERR, "rdma_connect() failed with %i\n", rc); goto rdma_connect_failed; } wait_event_interruptible( - info->conn_wait, info->transport_status != SMBD_CONNECTING); + info->conn_wait, sc->status != SMBDIRECT_SOCKET_CONNECTING); - if (info->transport_status != SMBD_CONNECTED) { + if (sc->status != SMBDIRECT_SOCKET_CONNECTED) { log_rdma_event(ERR, "rdma_connect failed port=%d\n", port); goto rdma_connect_failed; } @@ -1640,7 +1676,7 @@ static struct smbd_connection *_smbd_get_connection( init_waitqueue_head(&info->wait_send_queue); INIT_DELAYED_WORK(&info->idle_timer_work, idle_connection_timer); queue_delayed_work(info->workqueue, &info->idle_timer_work, - info->keep_alive_interval*HZ); + msecs_to_jiffies(sp->keepalive_interval_msec)); init_waitqueue_head(&info->wait_send_pending); atomic_set(&info->send_pending, 0); @@ -1675,26 +1711,26 @@ static struct smbd_connection *_smbd_get_connection( negotiation_failed: cancel_delayed_work_sync(&info->idle_timer_work); destroy_caches_and_workqueue(info); - info->transport_status = SMBD_NEGOTIATE_FAILED; + sc->status = SMBDIRECT_SOCKET_NEGOTIATE_FAILED; init_waitqueue_head(&info->conn_wait); - rdma_disconnect(info->id); + rdma_disconnect(sc->rdma.cm_id); wait_event(info->conn_wait, - info->transport_status == SMBD_DISCONNECTED); + sc->status == SMBDIRECT_SOCKET_DISCONNECTED); allocate_cache_failed: rdma_connect_failed: - rdma_destroy_qp(info->id); + rdma_destroy_qp(sc->rdma.cm_id); create_qp_failed: alloc_cq_failed: - if (info->send_cq) - ib_free_cq(info->send_cq); - if (info->recv_cq) - ib_free_cq(info->recv_cq); + if (sc->ib.send_cq) + ib_free_cq(sc->ib.send_cq); + if (sc->ib.recv_cq) + ib_free_cq(sc->ib.recv_cq); config_failed: - ib_dealloc_pd(info->pd); - rdma_destroy_id(info->id); + ib_dealloc_pd(sc->ib.pd); + rdma_destroy_id(sc->rdma.cm_id); create_id_failed: kfree(info); @@ -1719,34 +1755,39 @@ struct smbd_connection *smbd_get_connection( } /* - * Receive data from receive reassembly queue + * Receive data from the transport's receive reassembly queue * All the incoming data packets are placed in reassembly queue - * buf: the buffer to read data into + * iter: the buffer to read data into * size: the length of data to read * return value: actual data read - * Note: this implementation copies the data from reassebmly queue to receive + * + * Note: this implementation copies the data from reassembly queue to receive * buffers used by upper layer. This is not the optimal code path. A better way * to do it is to not have upper layer allocate its receive buffers but rather * borrow the buffer from reassembly queue, and return it after data is * consumed. But this will require more changes to upper layer code, and also * need to consider packet boundaries while they still being reassembled. */ -static int smbd_recv_buf(struct smbd_connection *info, char *buf, - unsigned int size) +int smbd_recv(struct smbd_connection *info, struct msghdr *msg) { + struct smbdirect_socket *sc = &info->socket; struct smbd_response *response; - struct smbd_data_transfer *data_transfer; + struct smbdirect_data_transfer *data_transfer; + size_t size = iov_iter_count(&msg->msg_iter); int to_copy, to_read, data_read, offset; u32 data_length, remaining_data_length, data_offset; int rc; + if (WARN_ON_ONCE(iov_iter_rw(&msg->msg_iter) == WRITE)) + return -EINVAL; /* It's a bug in upper layer to get there */ + again: /* * No need to hold the reassembly queue lock all the time as we are * the only one reading from the front of the queue. The transport * may add more entries to the back of the queue at the same time */ - log_read(INFO, "size=%d info->reassembly_data_length=%d\n", size, + log_read(INFO, "size=%zd info->reassembly_data_length=%d\n", size, info->reassembly_data_length); if (info->reassembly_data_length >= size) { int queue_length; @@ -1784,7 +1825,10 @@ static int smbd_recv_buf(struct smbd_connection *info, char *buf, if (response->first_segment && size == 4) { unsigned int rfc1002_len = data_length + remaining_data_length; - *((__be32 *)buf) = cpu_to_be32(rfc1002_len); + __be32 rfc1002_hdr = cpu_to_be32(rfc1002_len); + if (copy_to_iter(&rfc1002_hdr, sizeof(rfc1002_hdr), + &msg->msg_iter) != sizeof(rfc1002_hdr)) + return -EFAULT; data_read = 4; response->first_segment = false; log_read(INFO, "returning rfc1002 length %d\n", @@ -1793,10 +1837,9 @@ static int smbd_recv_buf(struct smbd_connection *info, char *buf, } to_copy = min_t(int, data_length - offset, to_read); - memcpy( - buf + data_read, - (char *)data_transfer + data_offset + offset, - to_copy); + if (copy_to_iter((char *)data_transfer + data_offset + offset, + to_copy, &msg->msg_iter) != to_copy) + return -EFAULT; /* move on to the next buffer? */ if (to_copy == data_length - offset) { @@ -1848,12 +1891,12 @@ static int smbd_recv_buf(struct smbd_connection *info, char *buf, rc = wait_event_interruptible( info->wait_reassembly_queue, info->reassembly_data_length >= size || - info->transport_status != SMBD_CONNECTED); + sc->status != SMBDIRECT_SOCKET_CONNECTED); /* Don't return any data if interrupted */ if (rc) return rc; - if (info->transport_status != SMBD_CONNECTED) { + if (sc->status != SMBDIRECT_SOCKET_CONNECTED) { log_read(ERR, "disconnected\n"); return -ECONNABORTED; } @@ -1861,89 +1904,6 @@ static int smbd_recv_buf(struct smbd_connection *info, char *buf, goto again; } -/* - * Receive a page from receive reassembly queue - * page: the page to read data into - * to_read: the length of data to read - * return value: actual data read - */ -static int smbd_recv_page(struct smbd_connection *info, - struct page *page, unsigned int page_offset, - unsigned int to_read) -{ - int ret; - char *to_address; - void *page_address; - - /* make sure we have the page ready for read */ - ret = wait_event_interruptible( - info->wait_reassembly_queue, - info->reassembly_data_length >= to_read || - info->transport_status != SMBD_CONNECTED); - if (ret) - return ret; - - /* now we can read from reassembly queue and not sleep */ - page_address = kmap_atomic(page); - to_address = (char *) page_address + page_offset; - - log_read(INFO, "reading from page=%p address=%p to_read=%d\n", - page, to_address, to_read); - - ret = smbd_recv_buf(info, to_address, to_read); - kunmap_atomic(page_address); - - return ret; -} - -/* - * Receive data from transport - * msg: a msghdr point to the buffer, can be ITER_KVEC or ITER_BVEC - * return: total bytes read, or 0. SMB Direct will not do partial read. - */ -int smbd_recv(struct smbd_connection *info, struct msghdr *msg) -{ - char *buf; - struct page *page; - unsigned int to_read, page_offset; - int rc; - - if (iov_iter_rw(&msg->msg_iter) == WRITE) { - /* It's a bug in upper layer to get there */ - cifs_dbg(VFS, "Invalid msg iter dir %u\n", - iov_iter_rw(&msg->msg_iter)); - rc = -EINVAL; - goto out; - } - - switch (iov_iter_type(&msg->msg_iter)) { - case ITER_KVEC: - buf = msg->msg_iter.kvec->iov_base; - to_read = msg->msg_iter.kvec->iov_len; - rc = smbd_recv_buf(info, buf, to_read); - break; - - case ITER_BVEC: - page = msg->msg_iter.bvec->bv_page; - page_offset = msg->msg_iter.bvec->bv_offset; - to_read = msg->msg_iter.bvec->bv_len; - rc = smbd_recv_page(info, page, page_offset, to_read); - break; - - default: - /* It's a bug in upper layer to get there */ - cifs_dbg(VFS, "Invalid msg type %d\n", - iov_iter_type(&msg->msg_iter)); - rc = -EINVAL; - } - -out: - /* SMBDirect will read it all or nothing */ - if (rc > 0) - msg->msg_iter.count = 0; - return rc; -} - /* * Send data to transport * Each rqst is transported as a SMBDirect payload @@ -1954,12 +1914,14 @@ int smbd_send(struct TCP_Server_Info *server, int num_rqst, struct smb_rqst *rqst_array) { struct smbd_connection *info = server->smbd_conn; + struct smbdirect_socket *sc = &info->socket; + struct smbdirect_socket_parameters *sp = &sc->parameters; struct smb_rqst *rqst; struct iov_iter iter; unsigned int remaining_data_length, klen; int rc, i, rqst_idx; - if (info->transport_status != SMBD_CONNECTED) + if (sc->status != SMBDIRECT_SOCKET_CONNECTED) return -EAGAIN; /* @@ -1971,10 +1933,10 @@ int smbd_send(struct TCP_Server_Info *server, for (i = 0; i < num_rqst; i++) remaining_data_length += smb_rqst_len(server, &rqst_array[i]); - if (unlikely(remaining_data_length > info->max_fragmented_send_size)) { + if (unlikely(remaining_data_length > sp->max_fragmented_send_size)) { /* assertion: payload never exceeds negotiated maximum */ log_write(ERR, "payload size %d > max size %d\n", - remaining_data_length, info->max_fragmented_send_size); + remaining_data_length, sp->max_fragmented_send_size); return -EINVAL; } @@ -2053,6 +2015,7 @@ static void smbd_mr_recovery_work(struct work_struct *work) { struct smbd_connection *info = container_of(work, struct smbd_connection, mr_recovery_work); + struct smbdirect_socket *sc = &info->socket; struct smbd_mr *smbdirect_mr; int rc; @@ -2070,7 +2033,7 @@ static void smbd_mr_recovery_work(struct work_struct *work) } smbdirect_mr->mr = ib_alloc_mr( - info->pd, info->mr_type, + sc->ib.pd, info->mr_type, info->max_frmr_depth); if (IS_ERR(smbdirect_mr->mr)) { log_rdma_mr(ERR, "ib_alloc_mr failed mr_type=%x max_frmr_depth=%x\n", @@ -2099,12 +2062,13 @@ static void smbd_mr_recovery_work(struct work_struct *work) static void destroy_mr_list(struct smbd_connection *info) { + struct smbdirect_socket *sc = &info->socket; struct smbd_mr *mr, *tmp; cancel_work_sync(&info->mr_recovery_work); list_for_each_entry_safe(mr, tmp, &info->mr_list, list) { if (mr->state == MR_INVALIDATED) - ib_dma_unmap_sg(info->id->device, mr->sgt.sgl, + ib_dma_unmap_sg(sc->ib.dev, mr->sgt.sgl, mr->sgt.nents, mr->dir); ib_dereg_mr(mr->mr); kfree(mr->sgt.sgl); @@ -2121,6 +2085,7 @@ static void destroy_mr_list(struct smbd_connection *info) */ static int allocate_mr_list(struct smbd_connection *info) { + struct smbdirect_socket *sc = &info->socket; int i; struct smbd_mr *smbdirect_mr, *tmp; @@ -2136,7 +2101,7 @@ static int allocate_mr_list(struct smbd_connection *info) smbdirect_mr = kzalloc(sizeof(*smbdirect_mr), GFP_KERNEL); if (!smbdirect_mr) goto cleanup_entries; - smbdirect_mr->mr = ib_alloc_mr(info->pd, info->mr_type, + smbdirect_mr->mr = ib_alloc_mr(sc->ib.pd, info->mr_type, info->max_frmr_depth); if (IS_ERR(smbdirect_mr->mr)) { log_rdma_mr(ERR, "ib_alloc_mr failed mr_type=%x max_frmr_depth=%x\n", @@ -2181,20 +2146,20 @@ static int allocate_mr_list(struct smbd_connection *info) */ static struct smbd_mr *get_mr(struct smbd_connection *info) { + struct smbdirect_socket *sc = &info->socket; struct smbd_mr *ret; int rc; again: rc = wait_event_interruptible(info->wait_mr, atomic_read(&info->mr_ready_count) || - info->transport_status != SMBD_CONNECTED); + sc->status != SMBDIRECT_SOCKET_CONNECTED); if (rc) { log_rdma_mr(ERR, "wait_event_interruptible rc=%x\n", rc); return NULL; } - if (info->transport_status != SMBD_CONNECTED) { - log_rdma_mr(ERR, "info->transport_status=%x\n", - info->transport_status); + if (sc->status != SMBDIRECT_SOCKET_CONNECTED) { + log_rdma_mr(ERR, "sc->status=%x\n", sc->status); return NULL; } @@ -2247,6 +2212,7 @@ struct smbd_mr *smbd_register_mr(struct smbd_connection *info, struct iov_iter *iter, bool writing, bool need_invalidate) { + struct smbdirect_socket *sc = &info->socket; struct smbd_mr *smbdirect_mr; int rc, num_pages; enum dma_data_direction dir; @@ -2276,7 +2242,7 @@ struct smbd_mr *smbd_register_mr(struct smbd_connection *info, num_pages, iov_iter_count(iter), info->max_frmr_depth); smbd_iter_to_mr(info, iter, &smbdirect_mr->sgt, info->max_frmr_depth); - rc = ib_dma_map_sg(info->id->device, smbdirect_mr->sgt.sgl, + rc = ib_dma_map_sg(sc->ib.dev, smbdirect_mr->sgt.sgl, smbdirect_mr->sgt.nents, dir); if (!rc) { log_rdma_mr(ERR, "ib_dma_map_sg num_pages=%x dir=%x rc=%x\n", @@ -2312,7 +2278,7 @@ struct smbd_mr *smbd_register_mr(struct smbd_connection *info, * on IB_WR_REG_MR. Hardware enforces a barrier and order of execution * on the next ib_post_send when we actually send I/O to remote peer */ - rc = ib_post_send(info->id->qp, ®_wr->wr, NULL); + rc = ib_post_send(sc->ib.qp, ®_wr->wr, NULL); if (!rc) return smbdirect_mr; @@ -2321,7 +2287,7 @@ struct smbd_mr *smbd_register_mr(struct smbd_connection *info, /* If all failed, attempt to recover this MR by setting it MR_ERROR*/ map_mr_error: - ib_dma_unmap_sg(info->id->device, smbdirect_mr->sgt.sgl, + ib_dma_unmap_sg(sc->ib.dev, smbdirect_mr->sgt.sgl, smbdirect_mr->sgt.nents, smbdirect_mr->dir); dma_map_error: @@ -2359,6 +2325,7 @@ int smbd_deregister_mr(struct smbd_mr *smbdirect_mr) { struct ib_send_wr *wr; struct smbd_connection *info = smbdirect_mr->conn; + struct smbdirect_socket *sc = &info->socket; int rc = 0; if (smbdirect_mr->need_invalidate) { @@ -2372,7 +2339,7 @@ int smbd_deregister_mr(struct smbd_mr *smbdirect_mr) wr->send_flags = IB_SEND_SIGNALED; init_completion(&smbdirect_mr->invalidate_done); - rc = ib_post_send(info->id->qp, wr, NULL); + rc = ib_post_send(sc->ib.qp, wr, NULL); if (rc) { log_rdma_mr(ERR, "ib_post_send failed rc=%x\n", rc); smbd_disconnect_rdma_connection(info); @@ -2389,7 +2356,7 @@ int smbd_deregister_mr(struct smbd_mr *smbdirect_mr) if (smbdirect_mr->state == MR_INVALIDATED) { ib_dma_unmap_sg( - info->id->device, smbdirect_mr->sgt.sgl, + sc->ib.dev, smbdirect_mr->sgt.sgl, smbdirect_mr->sgt.nents, smbdirect_mr->dir); smbdirect_mr->state = MR_READY; @@ -2552,13 +2519,14 @@ static ssize_t smb_extract_folioq_to_rdma(struct iov_iter *iter, size_t fsize = folioq_folio_size(folioq, slot); if (offset < fsize) { - size_t part = umin(maxsize - ret, fsize - offset); + size_t part = umin(maxsize, fsize - offset); if (!smb_set_sge(rdma, folio_page(folio, 0), offset, part)) return -EIO; offset += part; ret += part; + maxsize -= part; } if (offset >= fsize) { @@ -2573,7 +2541,7 @@ static ssize_t smb_extract_folioq_to_rdma(struct iov_iter *iter, slot = 0; } } - } while (rdma->nr_sge < rdma->max_sge || maxsize > 0); + } while (rdma->nr_sge < rdma->max_sge && maxsize > 0); iter->folioq = folioq; iter->folioq_slot = slot; diff --git a/fs/smb/client/smbdirect.h b/fs/smb/client/smbdirect.h index c08e3665150d7..3d552ab27e0f3 100644 --- a/fs/smb/client/smbdirect.h +++ b/fs/smb/client/smbdirect.h @@ -15,6 +15,9 @@ #include #include +#include "../common/smbdirect/smbdirect.h" +#include "../common/smbdirect/smbdirect_socket.h" + extern int rdma_readwrite_threshold; extern int smbd_max_frmr_depth; extern int smbd_keep_alive_interval; @@ -50,14 +53,8 @@ enum smbd_connection_status { * 5. mempools for allocating packets */ struct smbd_connection { - enum smbd_connection_status transport_status; - - /* RDMA related */ - struct rdma_cm_id *id; - struct ib_qp_init_attr qp_attr; - struct ib_pd *pd; - struct ib_cq *send_cq, *recv_cq; - struct ib_device_attr dev_attr; + struct smbdirect_socket socket; + int ri_rc; struct completion ri_done; wait_queue_head_t conn_wait; @@ -72,15 +69,7 @@ struct smbd_connection { spinlock_t lock_new_credits_offered; int new_credits_offered; - /* Connection parameters defined in [MS-SMBD] 3.1.1.1 */ - int receive_credit_max; - int send_credit_target; - int max_send_size; - int max_fragmented_recv_size; - int max_fragmented_send_size; - int max_receive_size; - int keep_alive_interval; - int max_readwrite_size; + /* dynamic connection parameters defined in [MS-SMBD] 3.1.1.1 */ enum keep_alive_status keep_alive_requested; int protocol; atomic_t send_credits; @@ -177,47 +166,6 @@ enum smbd_message_type { SMBD_TRANSFER_DATA, }; -#define SMB_DIRECT_RESPONSE_REQUESTED 0x0001 - -/* SMBD negotiation request packet [MS-SMBD] 2.2.1 */ -struct smbd_negotiate_req { - __le16 min_version; - __le16 max_version; - __le16 reserved; - __le16 credits_requested; - __le32 preferred_send_size; - __le32 max_receive_size; - __le32 max_fragmented_size; -} __packed; - -/* SMBD negotiation response packet [MS-SMBD] 2.2.2 */ -struct smbd_negotiate_resp { - __le16 min_version; - __le16 max_version; - __le16 negotiated_version; - __le16 reserved; - __le16 credits_requested; - __le16 credits_granted; - __le32 status; - __le32 max_readwrite_size; - __le32 preferred_send_size; - __le32 max_receive_size; - __le32 max_fragmented_size; -} __packed; - -/* SMBD data transfer packet with payload [MS-SMBD] 2.2.3 */ -struct smbd_data_transfer { - __le16 credits_requested; - __le16 credits_granted; - __le16 flags; - __le16 reserved; - __le32 remaining_data_length; - __le32 data_offset; - __le32 data_length; - __le32 padding; - __u8 buffer[]; -} __packed; - /* The packet fields for a registered RDMA buffer */ struct smbd_buffer_descriptor_v1 { __le64 offset; diff --git a/fs/smb/client/trace.h b/fs/smb/client/trace.h index 12cbd3428a6da..9c3cc7c3300c2 100644 --- a/fs/smb/client/trace.h +++ b/fs/smb/client/trace.h @@ -140,7 +140,7 @@ DECLARE_EVENT_CLASS(smb3_rw_err_class, __entry->len = len; __entry->rc = rc; ), - TP_printk("\tR=%08x[%x] xid=%u sid=0x%llx tid=0x%x fid=0x%llx offset=0x%llx len=0x%x rc=%d", + TP_printk("R=%08x[%x] xid=%u sid=0x%llx tid=0x%x fid=0x%llx offset=0x%llx len=0x%x rc=%d", __entry->rreq_debug_id, __entry->rreq_debug_index, __entry->xid, __entry->sesid, __entry->tid, __entry->fid, __entry->offset, __entry->len, __entry->rc) @@ -190,7 +190,7 @@ DECLARE_EVENT_CLASS(smb3_other_err_class, __entry->len = len; __entry->rc = rc; ), - TP_printk("\txid=%u sid=0x%llx tid=0x%x fid=0x%llx offset=0x%llx len=0x%x rc=%d", + TP_printk("xid=%u sid=0x%llx tid=0x%x fid=0x%llx offset=0x%llx len=0x%x rc=%d", __entry->xid, __entry->sesid, __entry->tid, __entry->fid, __entry->offset, __entry->len, __entry->rc) ) @@ -247,7 +247,7 @@ DECLARE_EVENT_CLASS(smb3_copy_range_err_class, __entry->len = len; __entry->rc = rc; ), - TP_printk("\txid=%u sid=0x%llx tid=0x%x source fid=0x%llx source offset=0x%llx target fid=0x%llx target offset=0x%llx len=0x%x rc=%d", + TP_printk("xid=%u sid=0x%llx tid=0x%x source fid=0x%llx source offset=0x%llx target fid=0x%llx target offset=0x%llx len=0x%x rc=%d", __entry->xid, __entry->sesid, __entry->tid, __entry->target_fid, __entry->src_offset, __entry->target_fid, __entry->target_offset, __entry->len, __entry->rc) ) @@ -298,7 +298,7 @@ DECLARE_EVENT_CLASS(smb3_copy_range_done_class, __entry->target_offset = target_offset; __entry->len = len; ), - TP_printk("\txid=%u sid=0x%llx tid=0x%x source fid=0x%llx source offset=0x%llx target fid=0x%llx target offset=0x%llx len=0x%x", + TP_printk("xid=%u sid=0x%llx tid=0x%x source fid=0x%llx source offset=0x%llx target fid=0x%llx target offset=0x%llx len=0x%x", __entry->xid, __entry->sesid, __entry->tid, __entry->target_fid, __entry->src_offset, __entry->target_fid, __entry->target_offset, __entry->len) ) @@ -482,7 +482,7 @@ DECLARE_EVENT_CLASS(smb3_fd_class, __entry->tid = tid; __entry->sesid = sesid; ), - TP_printk("\txid=%u sid=0x%llx tid=0x%x fid=0x%llx", + TP_printk("xid=%u sid=0x%llx tid=0x%x fid=0x%llx", __entry->xid, __entry->sesid, __entry->tid, __entry->fid) ) @@ -521,7 +521,7 @@ DECLARE_EVENT_CLASS(smb3_fd_err_class, __entry->sesid = sesid; __entry->rc = rc; ), - TP_printk("\txid=%u sid=0x%llx tid=0x%x fid=0x%llx rc=%d", + TP_printk("xid=%u sid=0x%llx tid=0x%x fid=0x%llx rc=%d", __entry->xid, __entry->sesid, __entry->tid, __entry->fid, __entry->rc) ) @@ -793,7 +793,7 @@ DECLARE_EVENT_CLASS(smb3_cmd_err_class, __entry->status = status; __entry->rc = rc; ), - TP_printk("\tsid=0x%llx tid=0x%x cmd=%u mid=%llu status=0x%x rc=%d", + TP_printk("sid=0x%llx tid=0x%x cmd=%u mid=%llu status=0x%x rc=%d", __entry->sesid, __entry->tid, __entry->cmd, __entry->mid, __entry->status, __entry->rc) ) @@ -828,7 +828,7 @@ DECLARE_EVENT_CLASS(smb3_cmd_done_class, __entry->cmd = cmd; __entry->mid = mid; ), - TP_printk("\tsid=0x%llx tid=0x%x cmd=%u mid=%llu", + TP_printk("sid=0x%llx tid=0x%x cmd=%u mid=%llu", __entry->sesid, __entry->tid, __entry->cmd, __entry->mid) ) @@ -866,7 +866,7 @@ DECLARE_EVENT_CLASS(smb3_mid_class, __entry->when_sent = when_sent; __entry->when_received = when_received; ), - TP_printk("\tcmd=%u mid=%llu pid=%u, when_sent=%lu when_rcv=%lu", + TP_printk("cmd=%u mid=%llu pid=%u, when_sent=%lu when_rcv=%lu", __entry->cmd, __entry->mid, __entry->pid, __entry->when_sent, __entry->when_received) ) @@ -897,7 +897,7 @@ DECLARE_EVENT_CLASS(smb3_exit_err_class, __assign_str(func_name); __entry->rc = rc; ), - TP_printk("\t%s: xid=%u rc=%d", + TP_printk("%s: xid=%u rc=%d", __get_str(func_name), __entry->xid, __entry->rc) ) @@ -923,7 +923,7 @@ DECLARE_EVENT_CLASS(smb3_sync_err_class, __entry->ino = ino; __entry->rc = rc; ), - TP_printk("\tino=%lu rc=%d", + TP_printk("ino=%lu rc=%d", __entry->ino, __entry->rc) ) @@ -949,7 +949,7 @@ DECLARE_EVENT_CLASS(smb3_enter_exit_class, __entry->xid = xid; __assign_str(func_name); ), - TP_printk("\t%s: xid=%u", + TP_printk("%s: xid=%u", __get_str(func_name), __entry->xid) ) diff --git a/fs/smb/client/transport.c b/fs/smb/client/transport.c index 9f13a705f7f67..35d1871187931 100644 --- a/fs/smb/client/transport.c +++ b/fs/smb/client/transport.c @@ -1029,14 +1029,16 @@ struct TCP_Server_Info *cifs_pick_channel(struct cifs_ses *ses) uint index = 0; unsigned int min_in_flight = UINT_MAX, max_in_flight = 0; struct TCP_Server_Info *server = NULL; - int i; + int i, start, cur; if (!ses) return NULL; spin_lock(&ses->chan_lock); + start = atomic_inc_return(&ses->chan_seq); for (i = 0; i < ses->chan_count; i++) { - server = ses->chans[i].server; + cur = (start + i) % ses->chan_count; + server = ses->chans[cur].server; if (!server || server->terminate) continue; @@ -1053,17 +1055,15 @@ struct TCP_Server_Info *cifs_pick_channel(struct cifs_ses *ses) */ if (server->in_flight < min_in_flight) { min_in_flight = server->in_flight; - index = i; + index = cur; } if (server->in_flight > max_in_flight) max_in_flight = server->in_flight; } /* if all channels are equally loaded, fall back to round-robin */ - if (min_in_flight == max_in_flight) { - index = (uint)atomic_inc_return(&ses->chan_seq); - index %= ses->chan_count; - } + if (min_in_flight == max_in_flight) + index = (uint)start % ses->chan_count; server = ses->chans[index].server; spin_unlock(&ses->chan_lock); diff --git a/fs/smb/common/smbdirect/smbdirect.h b/fs/smb/common/smbdirect/smbdirect.h new file mode 100644 index 0000000000000..b9a385344ff31 --- /dev/null +++ b/fs/smb/common/smbdirect/smbdirect.h @@ -0,0 +1,37 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright (C) 2017, Microsoft Corporation. + * Copyright (C) 2018, LG Electronics. + */ + +#ifndef __FS_SMB_COMMON_SMBDIRECT_SMBDIRECT_H__ +#define __FS_SMB_COMMON_SMBDIRECT_SMBDIRECT_H__ + +/* SMB-DIRECT buffer descriptor V1 structure [MS-SMBD] 2.2.3.1 */ +struct smbdirect_buffer_descriptor_v1 { + __le64 offset; + __le32 token; + __le32 length; +} __packed; + +/* + * Connection parameters mostly from [MS-SMBD] 3.1.1.1 + * + * These are setup and negotiated at the beginning of a + * connection and remain constant unless explicitly changed. + * + * Some values are important for the upper layer. + */ +struct smbdirect_socket_parameters { + __u16 recv_credit_max; + __u16 send_credit_target; + __u32 max_send_size; + __u32 max_fragmented_send_size; + __u32 max_recv_size; + __u32 max_fragmented_recv_size; + __u32 max_read_write_size; + __u32 keepalive_interval_msec; + __u32 keepalive_timeout_msec; +} __packed; + +#endif /* __FS_SMB_COMMON_SMBDIRECT_SMBDIRECT_H__ */ diff --git a/fs/smb/common/smbdirect/smbdirect_pdu.h b/fs/smb/common/smbdirect/smbdirect_pdu.h new file mode 100644 index 0000000000000..ae9fdb05ce231 --- /dev/null +++ b/fs/smb/common/smbdirect/smbdirect_pdu.h @@ -0,0 +1,55 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright (c) 2017 Stefan Metzmacher + */ + +#ifndef __FS_SMB_COMMON_SMBDIRECT_SMBDIRECT_PDU_H__ +#define __FS_SMB_COMMON_SMBDIRECT_SMBDIRECT_PDU_H__ + +#define SMBDIRECT_V1 0x0100 + +/* SMBD negotiation request packet [MS-SMBD] 2.2.1 */ +struct smbdirect_negotiate_req { + __le16 min_version; + __le16 max_version; + __le16 reserved; + __le16 credits_requested; + __le32 preferred_send_size; + __le32 max_receive_size; + __le32 max_fragmented_size; +} __packed; + +/* SMBD negotiation response packet [MS-SMBD] 2.2.2 */ +struct smbdirect_negotiate_resp { + __le16 min_version; + __le16 max_version; + __le16 negotiated_version; + __le16 reserved; + __le16 credits_requested; + __le16 credits_granted; + __le32 status; + __le32 max_readwrite_size; + __le32 preferred_send_size; + __le32 max_receive_size; + __le32 max_fragmented_size; +} __packed; + +#define SMBDIRECT_DATA_MIN_HDR_SIZE 0x14 +#define SMBDIRECT_DATA_OFFSET 0x18 + +#define SMBDIRECT_FLAG_RESPONSE_REQUESTED 0x0001 + +/* SMBD data transfer packet with payload [MS-SMBD] 2.2.3 */ +struct smbdirect_data_transfer { + __le16 credits_requested; + __le16 credits_granted; + __le16 flags; + __le16 reserved; + __le32 remaining_data_length; + __le32 data_offset; + __le32 data_length; + __le32 padding; + __u8 buffer[]; +} __packed; + +#endif /* __FS_SMB_COMMON_SMBDIRECT_SMBDIRECT_PDU_H__ */ diff --git a/fs/smb/common/smbdirect/smbdirect_socket.h b/fs/smb/common/smbdirect/smbdirect_socket.h new file mode 100644 index 0000000000000..e5b15cc44a7ba --- /dev/null +++ b/fs/smb/common/smbdirect/smbdirect_socket.h @@ -0,0 +1,43 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright (c) 2025 Stefan Metzmacher + */ + +#ifndef __FS_SMB_COMMON_SMBDIRECT_SMBDIRECT_SOCKET_H__ +#define __FS_SMB_COMMON_SMBDIRECT_SMBDIRECT_SOCKET_H__ + +enum smbdirect_socket_status { + SMBDIRECT_SOCKET_CREATED, + SMBDIRECT_SOCKET_CONNECTING, + SMBDIRECT_SOCKET_CONNECTED, + SMBDIRECT_SOCKET_NEGOTIATE_FAILED, + SMBDIRECT_SOCKET_DISCONNECTING, + SMBDIRECT_SOCKET_DISCONNECTED, + SMBDIRECT_SOCKET_DESTROYED +}; + +struct smbdirect_socket { + enum smbdirect_socket_status status; + + /* RDMA related */ + struct { + struct rdma_cm_id *cm_id; + } rdma; + + /* IB verbs related */ + struct { + struct ib_pd *pd; + struct ib_cq *send_cq; + struct ib_cq *recv_cq; + + /* + * shortcuts for rdma.cm_id->{qp,device}; + */ + struct ib_qp *qp; + struct ib_device *dev; + } ib; + + struct smbdirect_socket_parameters parameters; +}; + +#endif /* __FS_SMB_COMMON_SMBDIRECT_SMBDIRECT_SOCKET_H__ */ diff --git a/fs/smb/server/connection.c b/fs/smb/server/connection.c index 7aaea71a4f206..9eb3e6010aa68 100644 --- a/fs/smb/server/connection.c +++ b/fs/smb/server/connection.c @@ -40,7 +40,7 @@ void ksmbd_conn_free(struct ksmbd_conn *conn) kvfree(conn->request_buf); kfree(conn->preauth_info); if (atomic_dec_and_test(&conn->refcnt)) { - ksmbd_free_transport(conn->transport); + conn->transport->ops->free_transport(conn->transport); kfree(conn); } } diff --git a/fs/smb/server/connection.h b/fs/smb/server/connection.h index 14620e147dda5..dd3e0e3f7bf04 100644 --- a/fs/smb/server/connection.h +++ b/fs/smb/server/connection.h @@ -108,6 +108,7 @@ struct ksmbd_conn { __le16 signing_algorithm; bool binding; atomic_t refcnt; + bool is_aapl; }; struct ksmbd_conn_ops { @@ -132,6 +133,7 @@ struct ksmbd_transport_ops { void *buf, unsigned int len, struct smb2_buffer_desc_v1 *desc, unsigned int desc_len); + void (*free_transport)(struct ksmbd_transport *kt); }; struct ksmbd_transport { diff --git a/fs/smb/server/oplock.c b/fs/smb/server/oplock.c index 03f606afad93a..d7a8a580d0136 100644 --- a/fs/smb/server/oplock.c +++ b/fs/smb/server/oplock.c @@ -146,12 +146,9 @@ static struct oplock_info *opinfo_get_list(struct ksmbd_inode *ci) { struct oplock_info *opinfo; - if (list_empty(&ci->m_op_list)) - return NULL; - down_read(&ci->m_lock); - opinfo = list_first_entry(&ci->m_op_list, struct oplock_info, - op_entry); + opinfo = list_first_entry_or_null(&ci->m_op_list, struct oplock_info, + op_entry); if (opinfo) { if (opinfo->conn == NULL || !atomic_inc_not_zero(&opinfo->refcount)) diff --git a/fs/smb/server/smb2pdu.c b/fs/smb/server/smb2pdu.c index 08d9a7cfba8cd..a97a2885730da 100644 --- a/fs/smb/server/smb2pdu.c +++ b/fs/smb/server/smb2pdu.c @@ -1605,17 +1605,18 @@ static int krb5_authenticate(struct ksmbd_work *work, out_len = work->response_sz - (le16_to_cpu(rsp->SecurityBufferOffset) + 4); - /* Check previous session */ - prev_sess_id = le64_to_cpu(req->PreviousSessionId); - if (prev_sess_id && prev_sess_id != sess->id) - destroy_previous_session(conn, sess->user, prev_sess_id); - retval = ksmbd_krb5_authenticate(sess, in_blob, in_len, out_blob, &out_len); if (retval) { ksmbd_debug(SMB, "krb5 authentication failed\n"); return -EINVAL; } + + /* Check previous session */ + prev_sess_id = le64_to_cpu(req->PreviousSessionId); + if (prev_sess_id && prev_sess_id != sess->id) + destroy_previous_session(conn, sess->user, prev_sess_id); + rsp->SecurityBufferLength = cpu_to_le16(out_len); if ((conn->sign || server_conf.enforced_signing) || @@ -2870,7 +2871,7 @@ int smb2_open(struct ksmbd_work *work) int req_op_level = 0, open_flags = 0, may_flags = 0, file_info = 0; int rc = 0; int contxt_cnt = 0, query_disk_id = 0; - int maximal_access_ctxt = 0, posix_ctxt = 0; + bool maximal_access_ctxt = false, posix_ctxt = false; int s_type = 0; int next_off = 0; char *name = NULL; @@ -2897,6 +2898,27 @@ int smb2_open(struct ksmbd_work *work) return create_smb2_pipe(work); } + if (req->CreateContextsOffset && tcon->posix_extensions) { + context = smb2_find_context_vals(req, SMB2_CREATE_TAG_POSIX, 16); + if (IS_ERR(context)) { + rc = PTR_ERR(context); + goto err_out2; + } else if (context) { + struct create_posix *posix = (struct create_posix *)context; + + if (le16_to_cpu(context->DataOffset) + + le32_to_cpu(context->DataLength) < + sizeof(struct create_posix) - 4) { + rc = -EINVAL; + goto err_out2; + } + ksmbd_debug(SMB, "get posix context\n"); + + posix_mode = le32_to_cpu(posix->Mode); + posix_ctxt = true; + } + } + if (req->NameLength) { name = smb2_get_name((char *)req + le16_to_cpu(req->NameOffset), le16_to_cpu(req->NameLength), @@ -2919,9 +2941,11 @@ int smb2_open(struct ksmbd_work *work) goto err_out2; } - rc = ksmbd_validate_filename(name); - if (rc < 0) - goto err_out2; + if (posix_ctxt == false) { + rc = ksmbd_validate_filename(name); + if (rc < 0) + goto err_out2; + } if (ksmbd_share_veto_filename(share, name)) { rc = -ENOENT; @@ -3079,28 +3103,6 @@ int smb2_open(struct ksmbd_work *work) rc = -EBADF; goto err_out2; } - - if (tcon->posix_extensions) { - context = smb2_find_context_vals(req, - SMB2_CREATE_TAG_POSIX, 16); - if (IS_ERR(context)) { - rc = PTR_ERR(context); - goto err_out2; - } else if (context) { - struct create_posix *posix = - (struct create_posix *)context; - if (le16_to_cpu(context->DataOffset) + - le32_to_cpu(context->DataLength) < - sizeof(struct create_posix) - 4) { - rc = -EINVAL; - goto err_out2; - } - ksmbd_debug(SMB, "get posix context\n"); - - posix_mode = le32_to_cpu(posix->Mode); - posix_ctxt = 1; - } - } } if (ksmbd_override_fsids(work)) { @@ -3533,6 +3535,15 @@ int smb2_open(struct ksmbd_work *work) ksmbd_debug(SMB, "get query on disk id context\n"); query_disk_id = 1; } + + if (conn->is_aapl == false) { + context = smb2_find_context_vals(req, SMB2_CREATE_AAPL, 4); + if (IS_ERR(context)) { + rc = PTR_ERR(context); + goto err_out1; + } else if (context) + conn->is_aapl = true; + } } rc = ksmbd_vfs_getattr(&path, &stat); @@ -3972,7 +3983,10 @@ static int smb2_populate_readdir_entry(struct ksmbd_conn *conn, int info_level, if (dinfo->EaSize) dinfo->ExtFileAttributes = FILE_ATTRIBUTE_REPARSE_POINT_LE; dinfo->Reserved = 0; - dinfo->UniqueId = cpu_to_le64(ksmbd_kstat->kstat->ino); + if (conn->is_aapl) + dinfo->UniqueId = 0; + else + dinfo->UniqueId = cpu_to_le64(ksmbd_kstat->kstat->ino); if (d_info->hide_dot_file && d_info->name[0] == '.') dinfo->ExtFileAttributes |= FILE_ATTRIBUTE_HIDDEN_LE; memcpy(dinfo->FileName, conv_name, conv_len); @@ -3989,7 +4003,10 @@ static int smb2_populate_readdir_entry(struct ksmbd_conn *conn, int info_level, smb2_get_reparse_tag_special_file(ksmbd_kstat->kstat->mode); if (fibdinfo->EaSize) fibdinfo->ExtFileAttributes = FILE_ATTRIBUTE_REPARSE_POINT_LE; - fibdinfo->UniqueId = cpu_to_le64(ksmbd_kstat->kstat->ino); + if (conn->is_aapl) + fibdinfo->UniqueId = 0; + else + fibdinfo->UniqueId = cpu_to_le64(ksmbd_kstat->kstat->ino); fibdinfo->ShortNameLength = 0; fibdinfo->Reserved = 0; fibdinfo->Reserved2 = cpu_to_le16(0); @@ -8500,11 +8517,6 @@ static void smb20_oplock_break_ack(struct ksmbd_work *work) goto err_out; } - opinfo->op_state = OPLOCK_STATE_NONE; - wake_up_interruptible_all(&opinfo->oplock_q); - opinfo_put(opinfo); - ksmbd_fd_put(work, fp); - rsp->StructureSize = cpu_to_le16(24); rsp->OplockLevel = rsp_oplevel; rsp->Reserved = 0; @@ -8512,16 +8524,15 @@ static void smb20_oplock_break_ack(struct ksmbd_work *work) rsp->VolatileFid = volatile_id; rsp->PersistentFid = persistent_id; ret = ksmbd_iov_pin_rsp(work, rsp, sizeof(struct smb2_oplock_break)); - if (!ret) - return; - + if (ret) { err_out: + smb2_set_err_rsp(work); + } + opinfo->op_state = OPLOCK_STATE_NONE; wake_up_interruptible_all(&opinfo->oplock_q); - opinfo_put(opinfo); ksmbd_fd_put(work, fp); - smb2_set_err_rsp(work); } static int check_lease_state(struct lease *lease, __le32 req_state) @@ -8651,11 +8662,6 @@ static void smb21_lease_break_ack(struct ksmbd_work *work) } lease_state = lease->state; - opinfo->op_state = OPLOCK_STATE_NONE; - wake_up_interruptible_all(&opinfo->oplock_q); - atomic_dec(&opinfo->breaking_cnt); - wake_up_interruptible_all(&opinfo->oplock_brk); - opinfo_put(opinfo); rsp->StructureSize = cpu_to_le16(36); rsp->Reserved = 0; @@ -8664,16 +8670,16 @@ static void smb21_lease_break_ack(struct ksmbd_work *work) rsp->LeaseState = lease_state; rsp->LeaseDuration = 0; ret = ksmbd_iov_pin_rsp(work, rsp, sizeof(struct smb2_lease_ack)); - if (!ret) - return; - + if (ret) { err_out: + smb2_set_err_rsp(work); + } + + opinfo->op_state = OPLOCK_STATE_NONE; wake_up_interruptible_all(&opinfo->oplock_q); atomic_dec(&opinfo->breaking_cnt); wake_up_interruptible_all(&opinfo->oplock_brk); - opinfo_put(opinfo); - smb2_set_err_rsp(work); } /** diff --git a/fs/smb/server/smb2pdu.h b/fs/smb/server/smb2pdu.h index 17a0b18a8406b..16ae8a10490be 100644 --- a/fs/smb/server/smb2pdu.h +++ b/fs/smb/server/smb2pdu.h @@ -63,6 +63,9 @@ struct preauth_integrity_info { #define SMB2_SESSION_TIMEOUT (10 * HZ) +/* Apple Defined Contexts */ +#define SMB2_CREATE_AAPL "AAPL" + struct create_durable_req_v2 { struct create_context_hdr ccontext; __u8 Name[8]; diff --git a/fs/smb/server/transport_rdma.c b/fs/smb/server/transport_rdma.c index 7c5a0d712873d..3ab8c04f72e48 100644 --- a/fs/smb/server/transport_rdma.c +++ b/fs/smb/server/transport_rdma.c @@ -158,7 +158,8 @@ struct smb_direct_transport { }; #define KSMBD_TRANS(t) ((struct ksmbd_transport *)&((t)->transport)) - +#define SMBD_TRANS(t) ((struct smb_direct_transport *)container_of(t, \ + struct smb_direct_transport, transport)) enum { SMB_DIRECT_MSG_NEGOTIATE_REQ = 0, SMB_DIRECT_MSG_DATA_TRANSFER @@ -409,6 +410,11 @@ static struct smb_direct_transport *alloc_transport(struct rdma_cm_id *cm_id) return NULL; } +static void smb_direct_free_transport(struct ksmbd_transport *kt) +{ + kfree(SMBD_TRANS(kt)); +} + static void free_transport(struct smb_direct_transport *t) { struct smb_direct_recvmsg *recvmsg; @@ -426,7 +432,8 @@ static void free_transport(struct smb_direct_transport *t) if (t->qp) { ib_drain_qp(t->qp); ib_mr_pool_destroy(t->qp, &t->qp->rdma_mrs); - ib_destroy_qp(t->qp); + t->qp = NULL; + rdma_destroy_qp(t->cm_id); } ksmbd_debug(RDMA, "drain the reassembly queue\n"); @@ -454,7 +461,6 @@ static void free_transport(struct smb_direct_transport *t) smb_direct_destroy_pools(t); ksmbd_conn_free(KSMBD_TRANS(t)->conn); - kfree(t); } static struct smb_direct_sendmsg @@ -1934,8 +1940,8 @@ static int smb_direct_create_qpair(struct smb_direct_transport *t, return 0; err: if (t->qp) { - ib_destroy_qp(t->qp); t->qp = NULL; + rdma_destroy_qp(t->cm_id); } if (t->recv_cq) { ib_destroy_cq(t->recv_cq); @@ -2300,4 +2306,5 @@ static const struct ksmbd_transport_ops ksmbd_smb_direct_transport_ops = { .read = smb_direct_read, .rdma_read = smb_direct_rdma_read, .rdma_write = smb_direct_rdma_write, + .free_transport = smb_direct_free_transport, }; diff --git a/fs/smb/server/transport_tcp.c b/fs/smb/server/transport_tcp.c index abedf510899a7..4e9f98db9ff40 100644 --- a/fs/smb/server/transport_tcp.c +++ b/fs/smb/server/transport_tcp.c @@ -93,7 +93,7 @@ static struct tcp_transport *alloc_transport(struct socket *client_sk) return t; } -void ksmbd_free_transport(struct ksmbd_transport *kt) +static void ksmbd_tcp_free_transport(struct ksmbd_transport *kt) { struct tcp_transport *t = TCP_TRANS(kt); @@ -656,4 +656,5 @@ static const struct ksmbd_transport_ops ksmbd_tcp_transport_ops = { .read = ksmbd_tcp_read, .writev = ksmbd_tcp_writev, .disconnect = ksmbd_tcp_disconnect, + .free_transport = ksmbd_tcp_free_transport, }; diff --git a/fs/smb/server/vfs.c b/fs/smb/server/vfs.c index 59ae63ab86857..a662aae5126c0 100644 --- a/fs/smb/server/vfs.c +++ b/fs/smb/server/vfs.c @@ -1298,6 +1298,7 @@ int ksmbd_vfs_kern_path_locked(struct ksmbd_work *work, char *name, err = ksmbd_vfs_lock_parent(parent_path->dentry, path->dentry); if (err) { + mnt_drop_write(parent_path->mnt); path_put(path); path_put(parent_path); } diff --git a/fs/squashfs/super.c b/fs/squashfs/super.c index 22e812808e5cf..3a27d4268b3c4 100644 --- a/fs/squashfs/super.c +++ b/fs/squashfs/super.c @@ -202,6 +202,11 @@ static int squashfs_fill_super(struct super_block *sb, struct fs_context *fc) msblk->panic_on_errors = (opts->errors == Opt_errors_panic); msblk->devblksize = sb_min_blocksize(sb, SQUASHFS_DEVBLK_SIZE); + if (!msblk->devblksize) { + errorf(fc, "squashfs: unable to set blocksize\n"); + return -EINVAL; + } + msblk->devblksize_log2 = ffz(~msblk->devblksize); mutex_init(&msblk->meta_index_mutex); diff --git a/fs/xattr.c b/fs/xattr.c index 4f5a45338a83a..0191ac2590e09 100644 --- a/fs/xattr.c +++ b/fs/xattr.c @@ -1341,6 +1341,7 @@ ssize_t simple_xattr_list(struct inode *inode, struct simple_xattrs *xattrs, buffer += err; } remaining_size -= err; + err = 0; read_lock(&xattrs->lock); for (rbp = rb_first(&xattrs->rb_root); rbp; rbp = rb_next(rbp)) { diff --git a/fs/xfs/xfs_discard.c b/fs/xfs/xfs_discard.c index d8c4a5dcca7ae..0b343776da8c3 100644 --- a/fs/xfs/xfs_discard.c +++ b/fs/xfs/xfs_discard.c @@ -146,6 +146,14 @@ xfs_discard_extents( return error; } +/* + * Care must be taken setting up the trim cursor as the perags may not have been + * initialised when the cursor is initialised. e.g. a clean mount which hasn't + * read in AGFs and the first operation run on the mounted fs is a trim. This + * can result in perag fields that aren't initialised until + * xfs_trim_gather_extents() calls xfs_alloc_read_agf() to lock down the AG for + * the free space search. + */ struct xfs_trim_cur { xfs_agblock_t start; xfs_extlen_t count; @@ -183,6 +191,14 @@ xfs_trim_gather_extents( if (error) goto out_trans_cancel; + /* + * First time through tcur->count will not have been initialised as + * pag->pagf_longest is not guaranteed to be valid before we read + * the AGF buffer above. + */ + if (!tcur->count) + tcur->count = pag->pagf_longest; + if (tcur->by_bno) { /* sub-AG discard request always starts at tcur->start */ cur = xfs_bnobt_init_cursor(mp, tp, agbp, pag); @@ -329,7 +345,6 @@ xfs_trim_perag_extents( { struct xfs_trim_cur tcur = { .start = start, - .count = pag->pagf_longest, .end = end, .minlen = minlen, }; diff --git a/include/acpi/actypes.h b/include/acpi/actypes.h index 80767e8bf3ad4..d323dfffa4bfc 100644 --- a/include/acpi/actypes.h +++ b/include/acpi/actypes.h @@ -527,7 +527,7 @@ typedef u64 acpi_integer; /* Support for the special RSDP signature (8 characters) */ -#define ACPI_VALIDATE_RSDP_SIG(a) (!strncmp (ACPI_CAST_PTR (char, (a)), ACPI_SIG_RSDP, 8)) +#define ACPI_VALIDATE_RSDP_SIG(a) (!strncmp (ACPI_CAST_PTR (char, (a)), ACPI_SIG_RSDP, (sizeof(a) < 8) ? ACPI_NAMESEG_SIZE : 8)) #define ACPI_MAKE_RSDP_SIG(dest) (memcpy (ACPI_CAST_PTR (char, (dest)), ACPI_SIG_RSDP, 8)) /* Support for OEMx signature (x can be any character) */ diff --git a/include/drm/drm_file.h b/include/drm/drm_file.h index 8c0030c773081..2bc88d2d4a84e 100644 --- a/include/drm/drm_file.h +++ b/include/drm/drm_file.h @@ -300,6 +300,9 @@ struct drm_file { * * Mapping of mm object handles to object pointers. Used by the GEM * subsystem. Protected by @table_lock. + * + * Note that allocated entries might be NULL as a transient state when + * creating or deleting a handle. */ struct idr object_idr; diff --git a/include/drm/drm_framebuffer.h b/include/drm/drm_framebuffer.h index 668077009fced..38b24fc8978d3 100644 --- a/include/drm/drm_framebuffer.h +++ b/include/drm/drm_framebuffer.h @@ -23,6 +23,7 @@ #ifndef __DRM_FRAMEBUFFER_H__ #define __DRM_FRAMEBUFFER_H__ +#include #include #include #include @@ -100,6 +101,8 @@ struct drm_framebuffer_funcs { unsigned num_clips); }; +#define DRM_FRAMEBUFFER_HAS_HANDLE_REF(_i) BIT(0u + (_i)) + /** * struct drm_framebuffer - frame buffer object * @@ -188,6 +191,10 @@ struct drm_framebuffer { * DRM_MODE_FB_MODIFIERS. */ int flags; + /** + * @internal_flags: Framebuffer flags like DRM_FRAMEBUFFER_HAS_HANDLE_REF. + */ + unsigned int internal_flags; /** * @filp_head: Placed on &drm_file.fbs, protected by &drm_file.fbs_lock. */ diff --git a/include/drm/spsc_queue.h b/include/drm/spsc_queue.h index 125f096c88cb9..ee9df8cc67b73 100644 --- a/include/drm/spsc_queue.h +++ b/include/drm/spsc_queue.h @@ -70,9 +70,11 @@ static inline bool spsc_queue_push(struct spsc_queue *queue, struct spsc_node *n preempt_disable(); + atomic_inc(&queue->job_count); + smp_mb__after_atomic(); + tail = (struct spsc_node **)atomic_long_xchg(&queue->tail, (long)&node->next); WRITE_ONCE(*tail, node); - atomic_inc(&queue->job_count); /* * In case of first element verify new node will be visible to the consumer diff --git a/include/linux/acpi.h b/include/linux/acpi.h index 4d5ee84c468ba..f826bb59556af 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -1110,13 +1110,13 @@ void acpi_os_set_prepare_extended_sleep(int (*func)(u8 sleep_state, acpi_status acpi_os_prepare_extended_sleep(u8 sleep_state, u32 val_a, u32 val_b); -#if defined(CONFIG_SUSPEND) && defined(CONFIG_X86) struct acpi_s2idle_dev_ops { struct list_head list_node; void (*prepare)(void); void (*check)(void); void (*restore)(void); }; +#if defined(CONFIG_SUSPEND) && defined(CONFIG_X86) int acpi_register_lps0_dev(struct acpi_s2idle_dev_ops *arg); void acpi_unregister_lps0_dev(struct acpi_s2idle_dev_ops *arg); int acpi_get_lps0_constraint(struct acpi_device *adev); @@ -1125,6 +1125,13 @@ static inline int acpi_get_lps0_constraint(struct device *dev) { return ACPI_STATE_UNKNOWN; } +static inline int acpi_register_lps0_dev(struct acpi_s2idle_dev_ops *arg) +{ + return -ENODEV; +} +static inline void acpi_unregister_lps0_dev(struct acpi_s2idle_dev_ops *arg) +{ +} #endif /* CONFIG_SUSPEND && CONFIG_X86 */ void arch_reserve_mem_area(acpi_physical_address addr, size_t size); #else diff --git a/include/linux/arm_sdei.h b/include/linux/arm_sdei.h index 255701e1251b4..f652a5028b590 100644 --- a/include/linux/arm_sdei.h +++ b/include/linux/arm_sdei.h @@ -46,12 +46,12 @@ int sdei_unregister_ghes(struct ghes *ghes); /* For use by arch code when CPU hotplug notifiers are not appropriate. */ int sdei_mask_local_cpu(void); int sdei_unmask_local_cpu(void); -void __init sdei_init(void); +void __init acpi_sdei_init(void); void sdei_handler_abort(void); #else static inline int sdei_mask_local_cpu(void) { return 0; } static inline int sdei_unmask_local_cpu(void) { return 0; } -static inline void sdei_init(void) { } +static inline void acpi_sdei_init(void) { } static inline void sdei_handler_abort(void) { } #endif /* CONFIG_ARM_SDE_INTERFACE */ diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h index 9b02961d65ee6..45f2f278b50a8 100644 --- a/include/linux/atmdev.h +++ b/include/linux/atmdev.h @@ -249,6 +249,12 @@ static inline void atm_account_tx(struct atm_vcc *vcc, struct sk_buff *skb) ATM_SKB(skb)->atm_options = vcc->atm_options; } +static inline void atm_return_tx(struct atm_vcc *vcc, struct sk_buff *skb) +{ + WARN_ON_ONCE(refcount_sub_and_test(ATM_SKB(skb)->acct_truesize, + &sk_atm(vcc)->sk_wmem_alloc)); +} + static inline void atm_force_charge(struct atm_vcc *vcc,int truesize) { atomic_add(truesize, &sk_atm(vcc)->sk_rmem_alloc); diff --git a/include/linux/bio.h b/include/linux/bio.h index 9e98fb87e7ef7..1289b8e487801 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h @@ -294,7 +294,7 @@ static inline void bio_first_folio(struct folio_iter *fi, struct bio *bio, fi->folio = page_folio(bvec->bv_page); fi->offset = bvec->bv_offset + - PAGE_SIZE * (bvec->bv_page - &fi->folio->page); + PAGE_SIZE * folio_page_idx(fi->folio, bvec->bv_page); fi->_seg_count = bvec->bv_len; fi->length = min(folio_size(fi->folio) - fi->offset, fi->_seg_count); fi->_next = folio_next(fi->folio); diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h index 50eeb5b86ed70..fb33458f2fc77 100644 --- a/include/linux/bpf_verifier.h +++ b/include/linux/bpf_verifier.h @@ -349,7 +349,7 @@ struct bpf_func_state { #define MAX_CALL_FRAMES 8 -/* instruction history flags, used in bpf_jmp_history_entry.flags field */ +/* instruction history flags, used in bpf_insn_hist_entry.flags field */ enum { /* instruction references stack slot through PTR_TO_STACK register; * we also store stack's frame number in lower 3 bits (MAX_CALL_FRAMES is 8) @@ -361,18 +361,22 @@ enum { INSN_F_SPI_MASK = 0x3f, /* 6 bits */ INSN_F_SPI_SHIFT = 3, /* shifted 3 bits to the left */ - INSN_F_STACK_ACCESS = BIT(9), /* we need 10 bits total */ + INSN_F_STACK_ACCESS = BIT(9), + + INSN_F_DST_REG_STACK = BIT(10), /* dst_reg is PTR_TO_STACK */ + INSN_F_SRC_REG_STACK = BIT(11), /* src_reg is PTR_TO_STACK */ + /* total 12 bits are used now. */ }; static_assert(INSN_F_FRAMENO_MASK + 1 >= MAX_CALL_FRAMES); static_assert(INSN_F_SPI_MASK + 1 >= MAX_BPF_STACK / 8); -struct bpf_jmp_history_entry { +struct bpf_insn_hist_entry { u32 idx; /* insn idx can't be bigger than 1 million */ - u32 prev_idx : 22; - /* special flags, e.g., whether insn is doing register stack spill/load */ - u32 flags : 10; + u32 prev_idx : 20; + /* special INSN_F_xxx flags */ + u32 flags : 12; /* additional registers that need precision tracking when this * jump is backtracked, vector of six 10-bit records */ @@ -458,13 +462,14 @@ struct bpf_verifier_state { * See get_loop_entry() for more information. */ struct bpf_verifier_state *loop_entry; - /* jmp history recorded from first to last. - * backtracking is using it to go from last to first. - * For most states jmp_history_cnt is [0-3]. + /* Sub-range of env->insn_hist[] corresponding to this state's + * instruction history. + * Backtracking is using it to go from last to first. + * For most states instruction history is short, 0-3 instructions. * For loops can go up to ~40. */ - struct bpf_jmp_history_entry *jmp_history; - u32 jmp_history_cnt; + u32 insn_hist_start; + u32 insn_hist_end; u32 dfs_depth; u32 callback_unroll_depth; u32 may_goto_depth; @@ -748,7 +753,9 @@ struct bpf_verifier_env { int cur_stack; } cfg; struct backtrack_state bt; - struct bpf_jmp_history_entry *cur_hist_ent; + struct bpf_insn_hist_entry *insn_hist; + struct bpf_insn_hist_entry *cur_hist_ent; + u32 insn_hist_cap; u32 pass_cnt; /* number of times do_check() was called */ u32 subprog_cnt; /* number of instructions analyzed by the verifier */ diff --git a/include/linux/bus/stm32_firewall_device.h b/include/linux/bus/stm32_firewall_device.h index 5178b72bc9209..eaa7a3f544507 100644 --- a/include/linux/bus/stm32_firewall_device.h +++ b/include/linux/bus/stm32_firewall_device.h @@ -114,27 +114,30 @@ void stm32_firewall_release_access_by_id(struct stm32_firewall *firewall, u32 su #else /* CONFIG_STM32_FIREWALL */ -int stm32_firewall_get_firewall(struct device_node *np, struct stm32_firewall *firewall, - unsigned int nb_firewall) +static inline int stm32_firewall_get_firewall(struct device_node *np, + struct stm32_firewall *firewall, + unsigned int nb_firewall) { return -ENODEV; } -int stm32_firewall_grant_access(struct stm32_firewall *firewall) +static inline int stm32_firewall_grant_access(struct stm32_firewall *firewall) { return -ENODEV; } -void stm32_firewall_release_access(struct stm32_firewall *firewall) +static inline void stm32_firewall_release_access(struct stm32_firewall *firewall) { } -int stm32_firewall_grant_access_by_id(struct stm32_firewall *firewall, u32 subsystem_id) +static inline int stm32_firewall_grant_access_by_id(struct stm32_firewall *firewall, + u32 subsystem_id) { return -ENODEV; } -void stm32_firewall_release_access_by_id(struct stm32_firewall *firewall, u32 subsystem_id) +static inline void stm32_firewall_release_access_by_id(struct stm32_firewall *firewall, + u32 subsystem_id) { } diff --git a/include/linux/bvec.h b/include/linux/bvec.h index f41c7f0ef91ed..a8333b82e766d 100644 --- a/include/linux/bvec.h +++ b/include/linux/bvec.h @@ -57,9 +57,12 @@ static inline void bvec_set_page(struct bio_vec *bv, struct page *page, * @offset: offset into the folio */ static inline void bvec_set_folio(struct bio_vec *bv, struct folio *folio, - unsigned int len, unsigned int offset) + size_t len, size_t offset) { - bvec_set_page(bv, &folio->page, len, offset); + unsigned long nr = offset / PAGE_SIZE; + + WARN_ON_ONCE(len > UINT_MAX); + bvec_set_page(bv, folio_page(folio, nr), len, offset % PAGE_SIZE); } /** diff --git a/include/linux/coredump.h b/include/linux/coredump.h index 77e6e195d1d68..76e41805b92de 100644 --- a/include/linux/coredump.h +++ b/include/linux/coredump.h @@ -28,6 +28,7 @@ struct coredump_params { int vma_count; size_t vma_data_size; struct core_vma_metadata *vma_meta; + struct pid *pid; }; extern unsigned int core_file_note_size_limit; diff --git a/include/linux/coresight.h b/include/linux/coresight.h index f106b10251118..59f99b7da43f5 100644 --- a/include/linux/coresight.h +++ b/include/linux/coresight.h @@ -683,7 +683,7 @@ coresight_find_output_type(struct coresight_platform_data *pdata, union coresight_dev_subtype subtype); int coresight_init_driver(const char *drv, struct amba_driver *amba_drv, - struct platform_driver *pdev_drv); + struct platform_driver *pdev_drv, struct module *owner); void coresight_remove_driver(struct amba_driver *amba_drv, struct platform_driver *pdev_drv); diff --git a/include/linux/cpu.h b/include/linux/cpu.h index cc668a054d096..4342b56949095 100644 --- a/include/linux/cpu.h +++ b/include/linux/cpu.h @@ -79,6 +79,7 @@ extern ssize_t cpu_show_reg_file_data_sampling(struct device *dev, struct device_attribute *attr, char *buf); extern ssize_t cpu_show_indirect_target_selection(struct device *dev, struct device_attribute *attr, char *buf); +extern ssize_t cpu_show_tsa(struct device *dev, struct device_attribute *attr, char *buf); extern __printf(4, 5) struct device *cpu_device_create(struct device *parent, void *drvdata, diff --git a/include/linux/dcache.h b/include/linux/dcache.h index bff956f7b2b98..3d53a60145911 100644 --- a/include/linux/dcache.h +++ b/include/linux/dcache.h @@ -57,6 +57,7 @@ struct qstr { }; #define QSTR_INIT(n,l) { { { .len = l } }, .name = n } +#define QSTR(n) (struct qstr)QSTR_INIT(n, strlen(n)) extern const struct qstr empty_name; extern const struct qstr slash_name; diff --git a/include/linux/export.h b/include/linux/export.h index 1e04dbc675c2f..b40ae79b767da 100644 --- a/include/linux/export.h +++ b/include/linux/export.h @@ -24,11 +24,17 @@ .long sym #endif -#define ___EXPORT_SYMBOL(sym, license, ns) \ +/* + * LLVM integrated assembler cam merge adjacent string literals (like + * C and GNU-as) passed to '.ascii', but not to '.asciz' and chokes on: + * + * .asciz "MODULE_" "kvm" ; + */ +#define ___EXPORT_SYMBOL(sym, license, ns...) \ .section ".export_symbol","a" ASM_NL \ __export_symbol_##sym: ASM_NL \ .asciz license ASM_NL \ - .asciz ns ASM_NL \ + .ascii ns "\0" ASM_NL \ __EXPORT_SYMBOL_REF(sym) ASM_NL \ .previous @@ -70,4 +76,6 @@ #define EXPORT_SYMBOL_NS(sym, ns) __EXPORT_SYMBOL(sym, "", __stringify(ns)) #define EXPORT_SYMBOL_NS_GPL(sym, ns) __EXPORT_SYMBOL(sym, "GPL", __stringify(ns)) +#define EXPORT_SYMBOL_GPL_FOR_MODULES(sym, mods) __EXPORT_SYMBOL(sym, "GPL", "module:" mods) + #endif /* _LINUX_EXPORT_H */ diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h index c24f8bc01045d..5206d63b33860 100644 --- a/include/linux/f2fs_fs.h +++ b/include/linux/f2fs_fs.h @@ -78,6 +78,7 @@ enum stop_cp_reason { STOP_CP_REASON_UPDATE_INODE, STOP_CP_REASON_FLUSH_FAIL, STOP_CP_REASON_NO_SEGMENT, + STOP_CP_REASON_CORRUPTED_FREE_BITMAP, STOP_CP_REASON_MAX, }; diff --git a/include/linux/fs.h b/include/linux/fs.h index b98f128c9afa7..a6de8d93838d1 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -3407,6 +3407,8 @@ extern int simple_write_begin(struct file *file, struct address_space *mapping, extern const struct address_space_operations ram_aops; extern int always_delete_dentry(const struct dentry *); extern struct inode *alloc_anon_inode(struct super_block *); +struct inode *anon_inode_make_secure_inode(struct super_block *sb, const char *name, + const struct inode *context_inode); extern int simple_nosetlease(struct file *, int, struct file_lease **, void **); extern const struct dentry_operations simple_dentry_operations; diff --git a/include/linux/hid.h b/include/linux/hid.h index 018de72505b07..017d31f1d27b8 100644 --- a/include/linux/hid.h +++ b/include/linux/hid.h @@ -736,8 +736,9 @@ struct hid_descriptor { __le16 bcdHID; __u8 bCountryCode; __u8 bNumDescriptors; + struct hid_class_descriptor rpt_desc; - struct hid_class_descriptor desc[1]; + struct hid_class_descriptor opt_descs[]; } __attribute__ ((packed)); #define HID_DEVICE(b, g, ven, prod) \ diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index 12f7a7b9c06e9..3897f4492e1f4 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -272,6 +272,7 @@ long hugetlb_change_protection(struct vm_area_struct *vma, bool is_hugetlb_entry_migration(pte_t pte); bool is_hugetlb_entry_hwpoisoned(pte_t pte); void hugetlb_unshare_all_pmds(struct vm_area_struct *vma); +void hugetlb_split(struct vm_area_struct *vma, unsigned long addr); #else /* !CONFIG_HUGETLB_PAGE */ @@ -465,6 +466,8 @@ static inline vm_fault_t hugetlb_fault(struct mm_struct *mm, static inline void hugetlb_unshare_all_pmds(struct vm_area_struct *vma) { } +static inline void hugetlb_split(struct vm_area_struct *vma, unsigned long addr) {} + #endif /* !CONFIG_HUGETLB_PAGE */ #ifndef pgd_write diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h index 777f6aa8efa7b..7ecdde54e1edd 100644 --- a/include/linux/ieee80211.h +++ b/include/linux/ieee80211.h @@ -111,6 +111,8 @@ /* bits unique to S1G beacon */ #define IEEE80211_S1G_BCN_NEXT_TBTT 0x100 +#define IEEE80211_S1G_BCN_CSSID 0x200 +#define IEEE80211_S1G_BCN_ANO 0x400 /* see 802.11ah-2016 9.9 NDP CMAC frames */ #define IEEE80211_S1G_1MHZ_NDP_BITS 25 @@ -153,9 +155,6 @@ #define IEEE80211_ANO_NETTYPE_WILD 15 -/* bits unique to S1G beacon */ -#define IEEE80211_S1G_BCN_NEXT_TBTT 0x100 - /* control extension - for IEEE80211_FTYPE_CTL | IEEE80211_STYPE_CTL_EXT */ #define IEEE80211_CTL_EXT_POLL 0x2000 #define IEEE80211_CTL_EXT_SPR 0x3000 @@ -628,17 +627,41 @@ static inline bool ieee80211_is_s1g_beacon(__le16 fc) } /** - * ieee80211_is_s1g_short_beacon - check if frame is an S1G short beacon + * ieee80211_s1g_has_next_tbtt - check if IEEE80211_S1G_BCN_NEXT_TBTT * @fc: frame control bytes in little-endian byteorder - * Return: whether or not the frame is an S1G short beacon, - * i.e. it is an S1G beacon with 'next TBTT' flag set + * Return: whether or not the frame contains the variable-length + * next TBTT field */ -static inline bool ieee80211_is_s1g_short_beacon(__le16 fc) +static inline bool ieee80211_s1g_has_next_tbtt(__le16 fc) { return ieee80211_is_s1g_beacon(fc) && (fc & cpu_to_le16(IEEE80211_S1G_BCN_NEXT_TBTT)); } +/** + * ieee80211_s1g_has_ano - check if IEEE80211_S1G_BCN_ANO + * @fc: frame control bytes in little-endian byteorder + * Return: whether or not the frame contains the variable-length + * ANO field + */ +static inline bool ieee80211_s1g_has_ano(__le16 fc) +{ + return ieee80211_is_s1g_beacon(fc) && + (fc & cpu_to_le16(IEEE80211_S1G_BCN_ANO)); +} + +/** + * ieee80211_s1g_has_cssid - check if IEEE80211_S1G_BCN_CSSID + * @fc: frame control bytes in little-endian byteorder + * Return: whether or not the frame contains the variable-length + * compressed SSID field + */ +static inline bool ieee80211_s1g_has_cssid(__le16 fc) +{ + return ieee80211_is_s1g_beacon(fc) && + (fc & cpu_to_le16(IEEE80211_S1G_BCN_CSSID)); +} + /** * ieee80211_is_atim - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_ATIM * @fc: frame control bytes in little-endian byteorder @@ -1245,16 +1268,40 @@ struct ieee80211_ext { u8 change_seq; u8 variable[0]; } __packed s1g_beacon; - struct { - u8 sa[ETH_ALEN]; - __le32 timestamp; - u8 change_seq; - u8 next_tbtt[3]; - u8 variable[0]; - } __packed s1g_short_beacon; } u; } __packed __aligned(2); +/** + * ieee80211_s1g_optional_len - determine length of optional S1G beacon fields + * @fc: frame control bytes in little-endian byteorder + * Return: total length in bytes of the optional fixed-length fields + * + * S1G beacons may contain up to three optional fixed-length fields that + * precede the variable-length elements. Whether these fields are present + * is indicated by flags in the frame control field. + * + * From IEEE 802.11-2024 section 9.3.4.3: + * - Next TBTT field may be 0 or 3 bytes + * - Short SSID field may be 0 or 4 bytes + * - Access Network Options (ANO) field may be 0 or 1 byte + */ +static inline size_t +ieee80211_s1g_optional_len(__le16 fc) +{ + size_t len = 0; + + if (ieee80211_s1g_has_next_tbtt(fc)) + len += 3; + + if (ieee80211_s1g_has_cssid(fc)) + len += 4; + + if (ieee80211_s1g_has_ano(fc)) + len += 1; + + return len; +} + #define IEEE80211_TWT_CONTROL_NDP BIT(0) #define IEEE80211_TWT_CONTROL_RESP_MODE BIT(1) #define IEEE80211_TWT_CONTROL_NEG_TYPE_BROADCAST BIT(3) @@ -4804,6 +4851,39 @@ static inline bool ieee80211_is_ftm(struct sk_buff *skb) return false; } +/** + * ieee80211_is_s1g_short_beacon - check if frame is an S1G short beacon + * @fc: frame control bytes in little-endian byteorder + * @variable: pointer to the beacon frame elements + * @variable_len: length of the frame elements + * Return: whether or not the frame is an S1G short beacon. As per + * IEEE80211-2024 11.1.3.10.1, The S1G beacon compatibility element shall + * always be present as the first element in beacon frames generated at a + * TBTT (Target Beacon Transmission Time), so any frame not containing + * this element must have been generated at a TSBTT (Target Short Beacon + * Transmission Time) that is not a TBTT. Additionally, short beacons are + * prohibited from containing the S1G beacon compatibility element as per + * IEEE80211-2024 9.3.4.3 Table 9-76, so if we have an S1G beacon with + * either no elements or the first element is not the beacon compatibility + * element, we have a short beacon. + */ +static inline bool ieee80211_is_s1g_short_beacon(__le16 fc, const u8 *variable, + size_t variable_len) +{ + if (!ieee80211_is_s1g_beacon(fc)) + return false; + + /* + * If the frame does not contain at least 1 element (this is perfectly + * valid in a short beacon) and is an S1G beacon, we have a short + * beacon. + */ + if (variable_len < 2) + return true; + + return variable[0] != WLAN_EID_S1G_BCN_COMPAT; +} + struct element { u8 id; u8 datalen; diff --git a/include/linux/libata.h b/include/linux/libata.h index 79974a99265fc..2d3bfec568ebe 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h @@ -1366,7 +1366,7 @@ int ata_acpi_stm(struct ata_port *ap, const struct ata_acpi_gtm *stm); int ata_acpi_gtm(struct ata_port *ap, struct ata_acpi_gtm *stm); unsigned int ata_acpi_gtm_xfermask(struct ata_device *dev, const struct ata_acpi_gtm *gtm); -int ata_acpi_cbl_80wire(struct ata_port *ap, const struct ata_acpi_gtm *gtm); +int ata_acpi_cbl_pata_type(struct ata_port *ap); #else static inline const struct ata_acpi_gtm *ata_acpi_init_gtm(struct ata_port *ap) { @@ -1391,10 +1391,9 @@ static inline unsigned int ata_acpi_gtm_xfermask(struct ata_device *dev, return 0; } -static inline int ata_acpi_cbl_80wire(struct ata_port *ap, - const struct ata_acpi_gtm *gtm) +static inline int ata_acpi_cbl_pata_type(struct ata_port *ap) { - return 0; + return ATA_CBL_PATA40; } #endif diff --git a/include/linux/math.h b/include/linux/math.h index f5f18dc3616b0..0198c92cbe3ef 100644 --- a/include/linux/math.h +++ b/include/linux/math.h @@ -34,6 +34,18 @@ */ #define round_down(x, y) ((x) & ~__round_mask(x, y)) +/** + * DIV_ROUND_UP_POW2 - divide and round up + * @n: numerator + * @d: denominator (must be a power of 2) + * + * Divides @n by @d and rounds up to next multiple of @d (which must be a power + * of 2). Avoids integer overflows that may occur with __KERNEL_DIV_ROUND_UP(). + * Performance is roughly equivalent to __KERNEL_DIV_ROUND_UP(). + */ +#define DIV_ROUND_UP_POW2(n, d) \ + ((n) / (d) + !!((n) & ((d) - 1))) + #define DIV_ROUND_UP __KERNEL_DIV_ROUND_UP #define DIV_ROUND_DOWN_ULL(ll, d) \ diff --git a/include/linux/mdio.h b/include/linux/mdio.h index efeca5bd7600b..84b0805918372 100644 --- a/include/linux/mdio.h +++ b/include/linux/mdio.h @@ -45,10 +45,7 @@ struct mdio_device { unsigned int reset_deassert_delay; }; -static inline struct mdio_device *to_mdio_device(const struct device *dev) -{ - return container_of(dev, struct mdio_device, dev); -} +#define to_mdio_device(__dev) container_of_const(__dev, struct mdio_device, dev) /* struct mdio_driver_common: Common to all MDIO drivers */ struct mdio_driver_common { diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index d4b2c09cd5fec..da9749739abde 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -395,6 +395,7 @@ struct mlx5_core_rsc_common { enum mlx5_res_type res; refcount_t refcount; struct completion free; + bool invalid; }; struct mlx5_uars_page { diff --git a/include/linux/mm.h b/include/linux/mm.h index 8617adc6becd1..deeb535f920c8 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -2592,6 +2592,11 @@ static inline unsigned long get_mm_counter(struct mm_struct *mm, int member) return percpu_counter_read_positive(&mm->rss_stat[member]); } +static inline unsigned long get_mm_counter_sum(struct mm_struct *mm, int member) +{ + return percpu_counter_sum_positive(&mm->rss_stat[member]); +} + void mm_trace_rss_stat(struct mm_struct *mm, int member); static inline void add_mm_counter(struct mm_struct *mm, int member, long value) @@ -4243,4 +4248,62 @@ static inline void pgalloc_tag_copy(struct folio *new, struct folio *old) } #endif /* CONFIG_MEM_ALLOC_PROFILING */ +/* + * DMA mapping IDs for page_pool + * + * When DMA-mapping a page, page_pool allocates an ID (from an xarray) and + * stashes it in the upper bits of page->pp_magic. We always want to be able to + * unambiguously identify page pool pages (using page_pool_page_is_pp()). Non-PP + * pages can have arbitrary kernel pointers stored in the same field as pp_magic + * (since it overlaps with page->lru.next), so we must ensure that we cannot + * mistake a valid kernel pointer with any of the values we write into this + * field. + * + * On architectures that set POISON_POINTER_DELTA, this is already ensured, + * since this value becomes part of PP_SIGNATURE; meaning we can just use the + * space between the PP_SIGNATURE value (without POISON_POINTER_DELTA), and the + * lowest bits of POISON_POINTER_DELTA. On arches where POISON_POINTER_DELTA is + * 0, we make sure that we leave the two topmost bits empty, as that guarantees + * we won't mistake a valid kernel pointer for a value we set, regardless of the + * VMSPLIT setting. + * + * Altogether, this means that the number of bits available is constrained by + * the size of an unsigned long (at the upper end, subtracting two bits per the + * above), and the definition of PP_SIGNATURE (with or without + * POISON_POINTER_DELTA). + */ +#define PP_DMA_INDEX_SHIFT (1 + __fls(PP_SIGNATURE - POISON_POINTER_DELTA)) +#if POISON_POINTER_DELTA > 0 +/* PP_SIGNATURE includes POISON_POINTER_DELTA, so limit the size of the DMA + * index to not overlap with that if set + */ +#define PP_DMA_INDEX_BITS MIN(32, __ffs(POISON_POINTER_DELTA) - PP_DMA_INDEX_SHIFT) +#else +/* Always leave out the topmost two; see above. */ +#define PP_DMA_INDEX_BITS MIN(32, BITS_PER_LONG - PP_DMA_INDEX_SHIFT - 2) +#endif + +#define PP_DMA_INDEX_MASK GENMASK(PP_DMA_INDEX_BITS + PP_DMA_INDEX_SHIFT - 1, \ + PP_DMA_INDEX_SHIFT) + +/* Mask used for checking in page_pool_page_is_pp() below. page->pp_magic is + * OR'ed with PP_SIGNATURE after the allocation in order to preserve bit 0 for + * the head page of compound page and bit 1 for pfmemalloc page, as well as the + * bits used for the DMA index. page_is_pfmemalloc() is checked in + * __page_pool_put_page() to avoid recycling the pfmemalloc page. + */ +#define PP_MAGIC_MASK ~(PP_DMA_INDEX_MASK | 0x3UL) + +#ifdef CONFIG_PAGE_POOL +static inline bool page_pool_page_is_pp(struct page *page) +{ + return (page->pp_magic & PP_MAGIC_MASK) == PP_SIGNATURE; +} +#else +static inline bool page_pool_page_is_pp(struct page *page) +{ + return false; +} +#endif + #endif /* _LINUX_MM_H */ diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h index eb67d3d5ff5b2..2e455b20c37c2 100644 --- a/include/linux/mmc/card.h +++ b/include/linux/mmc/card.h @@ -295,6 +295,7 @@ struct mmc_card { #define MMC_QUIRK_BROKEN_SD_CACHE (1<<15) /* Disable broken SD cache support */ #define MMC_QUIRK_BROKEN_CACHE_FLUSH (1<<16) /* Don't flush cache until the write has occurred */ #define MMC_QUIRK_BROKEN_SD_POWEROFF_NOTIFY (1<<17) /* Disable broken SD poweroff notify support */ +#define MMC_QUIRK_NO_UHS_DDR50_TUNING (1<<18) /* Disable DDR50 tuning */ bool written_flag; /* Indicates eMMC has been written since power on */ bool reenable_cmdq; /* Re-enable Command Queue */ diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h index 81ab18658d72d..2cff5cafbaa78 100644 --- a/include/linux/nfs_fs_sb.h +++ b/include/linux/nfs_fs_sb.h @@ -211,6 +211,15 @@ struct nfs_server { char *fscache_uniq; /* Uniquifier (or NULL) */ #endif + /* The following #defines numerically match the NFSv4 equivalents */ +#define NFS_FH_NOEXPIRE_WITH_OPEN (0x1) +#define NFS_FH_VOLATILE_ANY (0x2) +#define NFS_FH_VOL_MIGRATION (0x4) +#define NFS_FH_VOL_RENAME (0x8) +#define NFS_FH_RENAME_UNSAFE (NFS_FH_VOLATILE_ANY | NFS_FH_VOL_RENAME) + u32 fh_expire_type; /* V4 bitmask representing file + handle volatility type for + this filesystem */ u32 pnfs_blksize; /* layout_blksize attr */ #if IS_ENABLED(CONFIG_NFS_V4) u32 attr_bitmask[3];/* V4 bitmask representing the set @@ -234,9 +243,6 @@ struct nfs_server { u32 acl_bitmask; /* V4 bitmask representing the ACEs that are supported on this filesystem */ - u32 fh_expire_type; /* V4 bitmask representing file - handle volatility type for - this filesystem */ struct pnfs_layoutdriver_type *pnfs_curr_ld; /* Active layout driver */ struct rpc_wait_queue roc_rpcwaitq; void *pnfs_ld_data; /* per mount point data */ diff --git a/include/linux/nvme.h b/include/linux/nvme.h index 1c101f6fad2f3..84d4f0657b7a8 100644 --- a/include/linux/nvme.h +++ b/include/linux/nvme.h @@ -1954,7 +1954,7 @@ enum { NVME_SC_BAD_ATTRIBUTES = 0x180, NVME_SC_INVALID_PI = 0x181, NVME_SC_READ_ONLY = 0x182, - NVME_SC_ONCS_NOT_SUPPORTED = 0x183, + NVME_SC_CMD_SIZE_LIM_EXCEEDED = 0x183, /* * I/O Command Set Specific - Fabrics commands: diff --git a/include/linux/overflow.h b/include/linux/overflow.h index 0c7e3dcfe8670..89e9d60498835 100644 --- a/include/linux/overflow.h +++ b/include/linux/overflow.h @@ -389,24 +389,37 @@ static inline size_t __must_check size_sub(size_t minuend, size_t subtrahend) struct_size((type *)NULL, member, count) /** - * _DEFINE_FLEX() - helper macro for DEFINE_FLEX() family. - * Enables caller macro to pass (different) initializer. + * __DEFINE_FLEX() - helper macro for DEFINE_FLEX() family. + * Enables caller macro to pass arbitrary trailing expressions * * @type: structure type name, including "struct" keyword. * @name: Name for a variable to define. * @member: Name of the array member. * @count: Number of elements in the array; must be compile-time const. - * @initializer: initializer expression (could be empty for no init). + * @trailer: Trailing expressions for attributes and/or initializers. */ -#define _DEFINE_FLEX(type, name, member, count, initializer...) \ +#define __DEFINE_FLEX(type, name, member, count, trailer...) \ _Static_assert(__builtin_constant_p(count), \ "onstack flex array members require compile-time const count"); \ union { \ u8 bytes[struct_size_t(type, member, count)]; \ type obj; \ - } name##_u initializer; \ + } name##_u trailer; \ type *name = (type *)&name##_u +/** + * _DEFINE_FLEX() - helper macro for DEFINE_FLEX() family. + * Enables caller macro to pass (different) initializer. + * + * @type: structure type name, including "struct" keyword. + * @name: Name for a variable to define. + * @member: Name of the array member. + * @count: Number of elements in the array; must be compile-time const. + * @initializer: Initializer expression (e.g., pass `= { }` at minimum). + */ +#define _DEFINE_FLEX(type, name, member, count, initializer...) \ + __DEFINE_FLEX(type, name, member, count, = { .obj initializer }) + /** * DEFINE_RAW_FLEX() - Define an on-stack instance of structure with a trailing * flexible array member, when it does not have a __counted_by annotation. @@ -421,7 +434,7 @@ static inline size_t __must_check size_sub(size_t minuend, size_t subtrahend) * Use __struct_size(@name) to get compile-time size of it afterwards. */ #define DEFINE_RAW_FLEX(type, name, member, count) \ - _DEFINE_FLEX(type, name, member, count, = {}) + __DEFINE_FLEX(type, name, member, count, = { }) /** * DEFINE_FLEX() - Define an on-stack instance of structure with a trailing @@ -438,6 +451,6 @@ static inline size_t __must_check size_sub(size_t minuend, size_t subtrahend) * Use __struct_size(@NAME) to get compile-time size of it afterwards. */ #define DEFINE_FLEX(TYPE, NAME, MEMBER, COUNTER, COUNT) \ - _DEFINE_FLEX(TYPE, NAME, MEMBER, COUNT, = { .obj.COUNTER = COUNT, }) + _DEFINE_FLEX(TYPE, NAME, MEMBER, COUNT, = { .COUNTER = COUNT, }) #endif /* __LINUX_OVERFLOW_H */ diff --git a/include/linux/pci-epf.h b/include/linux/pci-epf.h index 18a3aeb62ae4e..cd6f8f4bc4540 100644 --- a/include/linux/pci-epf.h +++ b/include/linux/pci-epf.h @@ -114,6 +114,8 @@ struct pci_epf_driver { * @phys_addr: physical address that should be mapped to the BAR * @addr: virtual address corresponding to the @phys_addr * @size: the size of the address space present in BAR + * @aligned_size: the size actually allocated to accommodate the iATU alignment + * requirement * @barno: BAR number * @flags: flags that are set for the BAR */ @@ -121,6 +123,7 @@ struct pci_epf_bar { dma_addr_t phys_addr; void *addr; size_t size; + size_t aligned_size; enum pci_barno barno; int flags; }; diff --git a/include/linux/phy.h b/include/linux/phy.h index 945264f457d8a..dfc7b97f9648d 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -792,10 +792,7 @@ struct phy_device { #define PHY_F_NO_IRQ 0x80000000 #define PHY_F_RXC_ALWAYS_ON 0x40000000 -static inline struct phy_device *to_phy_device(const struct device *dev) -{ - return container_of(to_mdio_device(dev), struct phy_device, mdio); -} +#define to_phy_device(__dev) container_of_const(to_mdio_device(__dev), struct phy_device, mdio) /** * struct phy_tdr_config - Configuration of a TDR raw test diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h index cf4b11be37097..c6716f474ba45 100644 --- a/include/linux/pm_domain.h +++ b/include/linux/pm_domain.h @@ -251,6 +251,7 @@ struct generic_pm_domain_data { unsigned int default_pstate; unsigned int rpm_pstate; bool hw_mode; + bool rpm_always_on; void *data; }; @@ -283,6 +284,7 @@ ktime_t dev_pm_genpd_get_next_hrtimer(struct device *dev); void dev_pm_genpd_synced_poweroff(struct device *dev); int dev_pm_genpd_set_hwmode(struct device *dev, bool enable); bool dev_pm_genpd_get_hwmode(struct device *dev); +int dev_pm_genpd_rpm_always_on(struct device *dev, bool on); extern struct dev_power_governor simple_qos_governor; extern struct dev_power_governor pm_domain_always_on_gov; @@ -366,6 +368,11 @@ static inline bool dev_pm_genpd_get_hwmode(struct device *dev) return false; } +static inline int dev_pm_genpd_rpm_always_on(struct device *dev, bool on) +{ + return -EOPNOTSUPP; +} + #define simple_qos_governor (*(struct dev_power_governor *)(NULL)) #define pm_domain_always_on_gov (*(struct dev_power_governor *)(NULL)) #endif diff --git a/include/linux/poison.h b/include/linux/poison.h index 331a9a996fa87..8ca2235f78d5d 100644 --- a/include/linux/poison.h +++ b/include/linux/poison.h @@ -70,6 +70,10 @@ #define KEY_DESTROY 0xbd /********** net/core/page_pool.c **********/ +/* + * page_pool uses additional free bits within this value to store data, see the + * definition of PP_DMA_INDEX_MASK in mm.h + */ #define PP_SIGNATURE (0x40 + POISON_POINTER_DELTA) /********** net/core/skbuff.c **********/ diff --git a/include/linux/psp-sev.h b/include/linux/psp-sev.h index 903ddfea85850..613a8209bed27 100644 --- a/include/linux/psp-sev.h +++ b/include/linux/psp-sev.h @@ -594,6 +594,7 @@ struct sev_data_snp_addr { * @imi_en: launch flow is launching an IMI (Incoming Migration Image) for the * purpose of guest-assisted migration. * @rsvd: reserved + * @desired_tsc_khz: hypervisor desired mean TSC freq in kHz of the guest * @gosvw: guest OS-visible workarounds, as defined by hypervisor */ struct sev_data_snp_launch_start { @@ -603,6 +604,7 @@ struct sev_data_snp_launch_start { u32 ma_en:1; /* In */ u32 imi_en:1; /* In */ u32 rsvd:30; + u32 desired_tsc_khz; /* In */ u8 gosvw[16]; /* In */ } __packed; diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h index 63dd8cf3c3c2b..d3561c4a080e2 100644 --- a/include/linux/spinlock.h +++ b/include/linux/spinlock.h @@ -548,6 +548,12 @@ DEFINE_LOCK_GUARD_1(raw_spinlock_irq, raw_spinlock_t, DEFINE_LOCK_GUARD_1_COND(raw_spinlock_irq, _try, raw_spin_trylock_irq(_T->lock)) +DEFINE_LOCK_GUARD_1(raw_spinlock_bh, raw_spinlock_t, + raw_spin_lock_bh(_T->lock), + raw_spin_unlock_bh(_T->lock)) + +DEFINE_LOCK_GUARD_1_COND(raw_spinlock_bh, _try, raw_spin_trylock_bh(_T->lock)) + DEFINE_LOCK_GUARD_1(raw_spinlock_irqsave, raw_spinlock_t, raw_spin_lock_irqsave(_T->lock, _T->flags), raw_spin_unlock_irqrestore(_T->lock, _T->flags), @@ -569,6 +575,13 @@ DEFINE_LOCK_GUARD_1(spinlock_irq, spinlock_t, DEFINE_LOCK_GUARD_1_COND(spinlock_irq, _try, spin_trylock_irq(_T->lock)) +DEFINE_LOCK_GUARD_1(spinlock_bh, spinlock_t, + spin_lock_bh(_T->lock), + spin_unlock_bh(_T->lock)) + +DEFINE_LOCK_GUARD_1_COND(spinlock_bh, _try, + spin_trylock_bh(_T->lock)) + DEFINE_LOCK_GUARD_1(spinlock_irqsave, spinlock_t, spin_lock_irqsave(_T->lock, _T->flags), spin_unlock_irqrestore(_T->lock, _T->flags), diff --git a/include/linux/tcp.h b/include/linux/tcp.h index 6a5e08b937b31..5f56fa8780131 100644 --- a/include/linux/tcp.h +++ b/include/linux/tcp.h @@ -336,7 +336,7 @@ struct tcp_sock { } rcv_rtt_est; /* Receiver queue space */ struct { - u32 space; + int space; u32 seq; u64 time; } rcvq_space; diff --git a/include/linux/usb.h b/include/linux/usb.h index 672d8fc2abdb0..e76e3515a1da0 100644 --- a/include/linux/usb.h +++ b/include/linux/usb.h @@ -612,6 +612,7 @@ struct usb3_lpm_parameters { * FIXME -- complete doc * @authenticated: Crypto authentication passed * @tunnel_mode: Connection native or tunneled over USB4 + * @usb4_link: device link to the USB4 host interface * @lpm_capable: device supports LPM * @lpm_devinit_allow: Allow USB3 device initiated LPM, exit latency is in range * @usb2_hw_lpm_capable: device can perform USB2 hardware LPM @@ -722,6 +723,7 @@ struct usb_device { unsigned reset_resume:1; unsigned port_is_suspended:1; enum usb_link_tunnel_mode tunnel_mode; + struct device_link *usb4_link; int slot_id; struct usb2_lpm_parameters l1_params; diff --git a/include/linux/usb/typec_dp.h b/include/linux/usb/typec_dp.h index f2da264d9c140..acb0ad03bdacb 100644 --- a/include/linux/usb/typec_dp.h +++ b/include/linux/usb/typec_dp.h @@ -57,6 +57,7 @@ enum { DP_PIN_ASSIGN_D, DP_PIN_ASSIGN_E, DP_PIN_ASSIGN_F, /* Not supported after v1.0b */ + DP_PIN_ASSIGN_MAX, }; /* DisplayPort alt mode specific commands */ diff --git a/include/linux/virtio_vsock.h b/include/linux/virtio_vsock.h index 0387d64e2c66c..36fb3edfa403d 100644 --- a/include/linux/virtio_vsock.h +++ b/include/linux/virtio_vsock.h @@ -140,6 +140,7 @@ struct virtio_vsock_sock { u32 last_fwd_cnt; u32 rx_bytes; u32 buf_alloc; + u32 buf_used; struct sk_buff_head rx_queue; u32 msg_count; }; diff --git a/include/net/af_vsock.h b/include/net/af_vsock.h index 9e85424c83435..70302c92d329f 100644 --- a/include/net/af_vsock.h +++ b/include/net/af_vsock.h @@ -242,8 +242,8 @@ int __vsock_dgram_recvmsg(struct socket *sock, struct msghdr *msg, int vsock_dgram_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, int flags); -#ifdef CONFIG_BPF_SYSCALL extern struct proto vsock_proto; +#ifdef CONFIG_BPF_SYSCALL int vsock_bpf_update_proto(struct sock *sk, struct sk_psock *psock, bool restore); void __init vsock_bpf_build_proto(void); #else diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h index 4f3b537476e10..730aa0245aef9 100644 --- a/include/net/bluetooth/hci_core.h +++ b/include/net/bluetooth/hci_core.h @@ -29,6 +29,7 @@ #include #include #include +#include #include #include @@ -338,6 +339,7 @@ struct adv_monitor { struct hci_dev { struct list_head list; + struct srcu_struct srcu; struct mutex lock; struct ida unset_handle_ida; @@ -538,6 +540,7 @@ struct hci_dev { struct hci_conn_hash conn_hash; struct list_head mesh_pending; + struct mutex mgmt_pending_lock; struct list_head mgmt_pending; struct list_head reject_list; struct list_head accept_list; @@ -2379,7 +2382,6 @@ void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev, u8 instance); void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev, u8 instance); -void mgmt_adv_monitor_removed(struct hci_dev *hdev, u16 handle); int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip); void mgmt_adv_monitor_device_lost(struct hci_dev *hdev, u16 handle, bdaddr_t *bdaddr, u8 addr_type); diff --git a/include/net/checksum.h b/include/net/checksum.h index 1338cb92c8e72..28b101f26636e 100644 --- a/include/net/checksum.h +++ b/include/net/checksum.h @@ -158,7 +158,7 @@ void inet_proto_csum_replace16(__sum16 *sum, struct sk_buff *skb, const __be32 *from, const __be32 *to, bool pseudohdr); void inet_proto_csum_replace_by_diff(__sum16 *sum, struct sk_buff *skb, - __wsum diff, bool pseudohdr); + __wsum diff, bool pseudohdr, bool ipv6); static __always_inline void inet_proto_csum_replace2(__sum16 *sum, struct sk_buff *skb, diff --git a/include/net/ipv6.h b/include/net/ipv6.h index 248bfb26e2af9..6d52b5584d2fb 100644 --- a/include/net/ipv6.h +++ b/include/net/ipv6.h @@ -363,15 +363,6 @@ struct ipcm6_cookie { struct ipv6_txoptions *opt; }; -static inline void ipcm6_init(struct ipcm6_cookie *ipc6) -{ - *ipc6 = (struct ipcm6_cookie) { - .hlimit = -1, - .tclass = -1, - .dontfrag = -1, - }; -} - static inline void ipcm6_init_sk(struct ipcm6_cookie *ipc6, const struct sock *sk) { diff --git a/include/net/mac80211.h b/include/net/mac80211.h index fee854892bec5..8e70941602064 100644 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h @@ -5311,22 +5311,6 @@ void ieee80211_get_tx_rates(struct ieee80211_vif *vif, struct ieee80211_tx_rate *dest, int max_rates); -/** - * ieee80211_sta_set_expected_throughput - set the expected tpt for a station - * - * Call this function to notify mac80211 about a change in expected throughput - * to a station. A driver for a device that does rate control in firmware can - * call this function when the expected throughput estimate towards a station - * changes. The information is used to tune the CoDel AQM applied to traffic - * going towards that station (which can otherwise be too aggressive and cause - * slow stations to starve). - * - * @pubsta: the station to set throughput for. - * @thr: the current expected throughput in kbps. - */ -void ieee80211_sta_set_expected_throughput(struct ieee80211_sta *pubsta, - u32 thr); - /** * ieee80211_tx_rate_update - transmit rate update callback * diff --git a/include/net/netfilter/nf_flow_table.h b/include/net/netfilter/nf_flow_table.h index b63d53bb9dd6d..1a6fca0131653 100644 --- a/include/net/netfilter/nf_flow_table.h +++ b/include/net/netfilter/nf_flow_table.h @@ -369,7 +369,7 @@ static inline __be16 __nf_flow_pppoe_proto(const struct sk_buff *skb) static inline bool nf_flow_pppoe_proto(struct sk_buff *skb, __be16 *inner_proto) { - if (!pskb_may_pull(skb, PPPOE_SES_HLEN)) + if (!pskb_may_pull(skb, ETH_HLEN + PPPOE_SES_HLEN)) return false; *inner_proto = __nf_flow_pppoe_proto(skb); diff --git a/include/net/netfilter/nft_fib.h b/include/net/netfilter/nft_fib.h index 6e202ed5e63f3..7370fba844efc 100644 --- a/include/net/netfilter/nft_fib.h +++ b/include/net/netfilter/nft_fib.h @@ -2,6 +2,7 @@ #ifndef _NFT_FIB_H_ #define _NFT_FIB_H_ +#include #include struct nft_fib { @@ -39,6 +40,14 @@ static inline bool nft_fib_can_skip(const struct nft_pktinfo *pkt) return nft_fib_is_loopback(pkt->skb, indev); } +static inline int nft_fib_l3mdev_master_ifindex_rcu(const struct nft_pktinfo *pkt, + const struct net_device *iif) +{ + const struct net_device *dev = iif ? iif : pkt->skb->dev; + + return l3mdev_master_ifindex_rcu(dev); +} + int nft_fib_dump(struct sk_buff *skb, const struct nft_expr *expr, bool reset); int nft_fib_init(const struct nft_ctx *ctx, const struct nft_expr *expr, const struct nlattr * const tb[]); diff --git a/include/net/page_pool/types.h b/include/net/page_pool/types.h index c022c410abe39..f53e2c90b6866 100644 --- a/include/net/page_pool/types.h +++ b/include/net/page_pool/types.h @@ -6,6 +6,7 @@ #include #include #include +#include #include #define PP_FLAG_DMA_MAP BIT(0) /* Should page_pool do the DMA @@ -33,6 +34,9 @@ #define PP_FLAG_ALL (PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV | \ PP_FLAG_SYSTEM_POOL | PP_FLAG_ALLOW_UNREADABLE_NETMEM) +/* Index limit to stay within PP_DMA_INDEX_BITS for DMA indices */ +#define PP_DMA_INDEX_LIMIT XA_LIMIT(1, BIT(PP_DMA_INDEX_BITS) - 1) + /* * Fast allocation side cache array/stack * @@ -216,6 +220,8 @@ struct page_pool { void *mp_priv; + struct xarray dma_mapped; + #ifdef CONFIG_PAGE_POOL_STATS /* recycle stats are per-cpu to avoid locking */ struct page_pool_recycle_stats __percpu *recycle_stats; diff --git a/include/net/sock.h b/include/net/sock.h index fa9b9dadbe170..b7270b6b9e9cc 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -2942,8 +2942,11 @@ int sock_ioctl_inout(struct sock *sk, unsigned int cmd, int sk_ioctl(struct sock *sk, unsigned int cmd, void __user *arg); static inline bool sk_is_readable(struct sock *sk) { - if (sk->sk_prot->sock_is_readable) - return sk->sk_prot->sock_is_readable(sk); + const struct proto *prot = READ_ONCE(sk->sk_prot); + + if (prot->sock_is_readable) + return prot->sock_is_readable(sk); + return false; } #endif /* _SOCK_H */ diff --git a/include/sound/hdaudio.h b/include/sound/hdaudio.h index b098ceadbe74b..9a70048adbc06 100644 --- a/include/sound/hdaudio.h +++ b/include/sound/hdaudio.h @@ -223,7 +223,7 @@ struct hdac_driver { struct device_driver driver; int type; const struct hda_device_id *id_table; - int (*match)(struct hdac_device *dev, struct hdac_driver *drv); + int (*match)(struct hdac_device *dev, const struct hdac_driver *drv); void (*unsol_event)(struct hdac_device *dev, unsigned int event); /* fields used by ext bus APIs */ @@ -235,7 +235,7 @@ struct hdac_driver { #define drv_to_hdac_driver(_drv) container_of(_drv, struct hdac_driver, driver) const struct hda_device_id * -hdac_get_device_id(struct hdac_device *hdev, struct hdac_driver *drv); +hdac_get_device_id(struct hdac_device *hdev, const struct hdac_driver *drv); /* * Bus verb operators diff --git a/include/sound/soc-acpi.h b/include/sound/soc-acpi.h index 60d3b86a4660f..6293ab852c142 100644 --- a/include/sound/soc-acpi.h +++ b/include/sound/soc-acpi.h @@ -10,6 +10,7 @@ #include #include #include +#include struct snd_soc_acpi_package_context { char *name; /* package name */ @@ -189,6 +190,15 @@ struct snd_soc_acpi_link_adr { * is not constant since this field may be updated at run-time * @sof_tplg_filename: Sound Open Firmware topology file name, if enabled * @tplg_quirk_mask: quirks to select different topology files dynamically + * @get_function_tplg_files: This is an optional callback, if specified then instead of + * the single sof_tplg_filename the callback will return the list of function topology + * files to be loaded. + * Return value: The number of the files or negative ERRNO. 0 means that the single topology + * file should be used, no function topology split can be used on the machine. + * @card: the pointer of the card + * @mach: the pointer of the machine driver + * @prefix: the prefix of the topology file name. Typically, it is the path. + * @tplg_files: the pointer of the array of the topology file names. */ /* Descriptor for SST ASoC machine driver */ struct snd_soc_acpi_mach { @@ -207,6 +217,9 @@ struct snd_soc_acpi_mach { struct snd_soc_acpi_mach_params mach_params; const char *sof_tplg_filename; const u32 tplg_quirk_mask; + int (*get_function_tplg_files)(struct snd_soc_card *card, + const struct snd_soc_acpi_mach *mach, + const char *prefix, const char ***tplg_files); }; #define SND_SOC_ACPI_MAX_CODECS 3 diff --git a/include/trace/events/erofs.h b/include/trace/events/erofs.h index 57df3843e650c..198a0c644bea1 100644 --- a/include/trace/events/erofs.h +++ b/include/trace/events/erofs.h @@ -113,7 +113,7 @@ TRACE_EVENT(erofs_read_folio, __entry->raw) ); -TRACE_EVENT(erofs_readpages, +TRACE_EVENT(erofs_readahead, TP_PROTO(struct inode *inode, pgoff_t start, unsigned int nrpage, bool raw), @@ -211,24 +211,6 @@ TRACE_EVENT(erofs_map_blocks_exit, show_mflags(__entry->mflags), __entry->ret) ); -TRACE_EVENT(erofs_destroy_inode, - TP_PROTO(struct inode *inode), - - TP_ARGS(inode), - - TP_STRUCT__entry( - __field( dev_t, dev ) - __field( erofs_nid_t, nid ) - ), - - TP_fast_assign( - __entry->dev = inode->i_sb->s_dev; - __entry->nid = EROFS_I(inode)->nid; - ), - - TP_printk("dev = (%d,%d), nid = %llu", show_dev_nid(__entry)) -); - #endif /* _TRACE_EROFS_H */ /* This part must be outside protection */ diff --git a/include/uapi/drm/ivpu_accel.h b/include/uapi/drm/ivpu_accel.h index 13001da141c33..4b261eb705bc0 100644 --- a/include/uapi/drm/ivpu_accel.h +++ b/include/uapi/drm/ivpu_accel.h @@ -261,7 +261,7 @@ struct drm_ivpu_bo_info { /* drm_ivpu_submit engines */ #define DRM_IVPU_ENGINE_COMPUTE 0 -#define DRM_IVPU_ENGINE_COPY 1 +#define DRM_IVPU_ENGINE_COPY 1 /* Deprecated */ /** * struct drm_ivpu_submit - Submit commands to the VPU @@ -292,10 +292,6 @@ struct drm_ivpu_submit { * %DRM_IVPU_ENGINE_COMPUTE: * * Performs Deep Learning Neural Compute Inference Operations - * - * %DRM_IVPU_ENGINE_COPY: - * - * Performs memory copy operations to/from system memory allocated for VPU */ __u32 engine; diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 552fd633f8200..5a5cdb4539358 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -2035,6 +2035,7 @@ union bpf_attr { * for updates resulting in a null checksum the value is set to * **CSUM_MANGLED_0** instead. Flag **BPF_F_PSEUDO_HDR** indicates * the checksum is to be computed against a pseudo-header. + * Flag **BPF_F_IPV6** should be set for IPv6 packets. * * This helper works in combination with **bpf_csum_diff**\ (), * which does not update the checksum in-place, but offers more @@ -6049,6 +6050,7 @@ enum { BPF_F_PSEUDO_HDR = (1ULL << 4), BPF_F_MARK_MANGLED_0 = (1ULL << 5), BPF_F_MARK_ENFORCE = (1ULL << 6), + BPF_F_IPV6 = (1ULL << 7), }; /* BPF_FUNC_skb_set_tunnel_key and BPF_FUNC_skb_get_tunnel_key flags. */ diff --git a/include/uapi/linux/vm_sockets.h b/include/uapi/linux/vm_sockets.h index ed07181d4eff9..e05280e415228 100644 --- a/include/uapi/linux/vm_sockets.h +++ b/include/uapi/linux/vm_sockets.h @@ -17,6 +17,10 @@ #ifndef _UAPI_VM_SOCKETS_H #define _UAPI_VM_SOCKETS_H +#ifndef __KERNEL__ +#include /* for struct sockaddr and sa_family_t */ +#endif + #include #include diff --git a/io_uring/fdinfo.c b/io_uring/fdinfo.c index ecdbe473a49f7..c6c624eb9866d 100644 --- a/io_uring/fdinfo.c +++ b/io_uring/fdinfo.c @@ -146,18 +146,26 @@ __cold void io_uring_show_fdinfo(struct seq_file *m, struct file *file) if (has_lock && (ctx->flags & IORING_SETUP_SQPOLL)) { struct io_sq_data *sq = ctx->sq_data; + struct task_struct *tsk; + rcu_read_lock(); + tsk = rcu_dereference(sq->thread); /* * sq->thread might be NULL if we raced with the sqpoll * thread termination. */ - if (sq->thread) { + if (tsk) { + get_task_struct(tsk); + rcu_read_unlock(); + getrusage(tsk, RUSAGE_SELF, &sq_usage); + put_task_struct(tsk); sq_pid = sq->task_pid; sq_cpu = sq->sq_cpu; - getrusage(sq->thread, RUSAGE_SELF, &sq_usage); sq_total_time = (sq_usage.ru_stime.tv_sec * 1000000 + sq_usage.ru_stime.tv_usec); sq_work_time = sq->work_time; + } else { + rcu_read_unlock(); } } diff --git a/io_uring/io-wq.c b/io_uring/io-wq.c index a2d577b099308..8f555c1d7185c 100644 --- a/io_uring/io-wq.c +++ b/io_uring/io-wq.c @@ -1204,8 +1204,10 @@ struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data) atomic_set(&wq->worker_refs, 1); init_completion(&wq->worker_done); ret = cpuhp_state_add_instance_nocalls(io_wq_online, &wq->cpuhp_node); - if (ret) + if (ret) { + put_task_struct(wq->task); goto err; + } return wq; err: diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c index bd3b3f7a6f6ca..52ada466bf98f 100644 --- a/io_uring/io_uring.c +++ b/io_uring/io_uring.c @@ -1681,7 +1681,7 @@ static __cold void io_drain_req(struct io_kiocb *req) spin_unlock(&ctx->completion_lock); io_prep_async_link(req); - de = kmalloc(sizeof(*de), GFP_KERNEL); + de = kmalloc(sizeof(*de), GFP_KERNEL_ACCOUNT); if (!de) { ret = -ENOMEM; io_req_defer_failed(req, ret); @@ -2916,7 +2916,7 @@ static __cold void io_ring_exit_work(struct work_struct *work) struct task_struct *tsk; io_sq_thread_park(sqd); - tsk = sqd->thread; + tsk = sqpoll_task_locked(sqd); if (tsk && tsk->io_uring && tsk->io_uring->io_wq) io_wq_cancel_cb(tsk->io_uring->io_wq, io_cancel_ctx_cb, ctx, true); @@ -3153,7 +3153,7 @@ __cold void io_uring_cancel_generic(bool cancel_all, struct io_sq_data *sqd) s64 inflight; DEFINE_WAIT(wait); - WARN_ON_ONCE(sqd && sqd->thread != current); + WARN_ON_ONCE(sqd && sqpoll_task_locked(sqd) != current); if (!current->io_uring) return; diff --git a/io_uring/kbuf.c b/io_uring/kbuf.c index 7a8c3a004800e..9bd27deeee6fa 100644 --- a/io_uring/kbuf.c +++ b/io_uring/kbuf.c @@ -262,8 +262,12 @@ static int io_ring_buffers_peek(struct io_kiocb *req, struct buf_sel_arg *arg, /* truncate end piece, if needed, for non partial buffers */ if (len > arg->max_len) { len = arg->max_len; - if (!(bl->flags & IOBL_INC)) + if (!(bl->flags & IOBL_INC)) { + arg->partial_map = 1; + if (iov != arg->iovs) + break; buf->len = len; + } } iov->iov_base = u64_to_user_ptr(buf->addr); @@ -728,7 +732,7 @@ int io_register_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg) io_destroy_bl(ctx, bl); } - free_bl = bl = kzalloc(sizeof(*bl), GFP_KERNEL); + free_bl = bl = kzalloc(sizeof(*bl), GFP_KERNEL_ACCOUNT); if (!bl) return -ENOMEM; diff --git a/io_uring/kbuf.h b/io_uring/kbuf.h index 36aadfe5ac002..2586a292dfb91 100644 --- a/io_uring/kbuf.h +++ b/io_uring/kbuf.h @@ -61,6 +61,7 @@ struct buf_sel_arg { size_t max_len; unsigned short nr_iovs; unsigned short mode; + unsigned short partial_map; }; void __user *io_buffer_select(struct io_kiocb *req, size_t *len, diff --git a/io_uring/net.c b/io_uring/net.c index 384915d931b72..0116cfaec8488 100644 --- a/io_uring/net.c +++ b/io_uring/net.c @@ -76,12 +76,18 @@ struct io_sr_msg { /* initialised and used only by !msg send variants */ u16 addr_len; u16 buf_group; + unsigned short retry_flags; void __user *addr; void __user *msg_control; /* used only for send zerocopy */ struct io_kiocb *notif; }; +enum sr_retry_flags { + IO_SR_MSG_RETRY = 1, + IO_SR_MSG_PARTIAL_MAP = 2, +}; + /* * Number of times we'll try and do receives if there's more data. If we * exceed this limit, then add us to the back of the queue and retry from @@ -203,6 +209,7 @@ static inline void io_mshot_prep_retry(struct io_kiocb *req, req->flags &= ~REQ_F_BL_EMPTY; sr->done_io = 0; + sr->retry_flags = 0; sr->len = 0; /* get from the provided buffer */ req->buf_index = sr->buf_group; } @@ -409,6 +416,7 @@ int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); sr->done_io = 0; + sr->retry_flags = 0; if (req->opcode == IORING_OP_SEND) { if (READ_ONCE(sqe->__pad3[0])) @@ -780,6 +788,7 @@ int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); sr->done_io = 0; + sr->retry_flags = 0; if (unlikely(sqe->file_index || sqe->addr2)) return -EINVAL; @@ -828,6 +837,9 @@ int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) return io_recvmsg_prep_setup(req); } +/* bits to clear in old and inherit in new cflags on bundle retry */ +#define CQE_F_MASK (IORING_CQE_F_SOCK_NONEMPTY|IORING_CQE_F_MORE) + /* * Finishes io_recv and io_recvmsg. * @@ -845,11 +857,27 @@ static inline bool io_recv_finish(struct io_kiocb *req, int *ret, cflags |= IORING_CQE_F_SOCK_NONEMPTY; if (sr->flags & IORING_RECVSEND_BUNDLE) { - cflags |= io_put_kbufs(req, *ret, io_bundle_nbufs(kmsg, *ret), + size_t this_ret = *ret - sr->done_io; + + cflags |= io_put_kbufs(req, this_ret, io_bundle_nbufs(kmsg, this_ret), issue_flags); + if (sr->retry_flags & IO_SR_MSG_RETRY) + cflags = req->cqe.flags | (cflags & CQE_F_MASK); /* bundle with no more immediate buffers, we're done */ if (req->flags & REQ_F_BL_EMPTY) goto finish; + /* + * If more is available AND it was a full transfer, retry and + * append to this one + */ + if (!sr->retry_flags && kmsg->msg.msg_inq > 1 && this_ret > 0 && + !iov_iter_count(&kmsg->msg.msg_iter)) { + req->cqe.flags = cflags & ~CQE_F_MASK; + sr->len = kmsg->msg.msg_inq; + sr->done_io += this_ret; + sr->retry_flags |= IO_SR_MSG_RETRY; + return false; + } } else { cflags |= io_put_kbuf(req, *ret, issue_flags); } @@ -1088,13 +1116,21 @@ static int io_recv_buf_select(struct io_kiocb *req, struct io_async_msghdr *kmsg arg.mode |= KBUF_MODE_FREE; } - if (kmsg->msg.msg_inq > 0) + if (kmsg->msg.msg_inq > 1) arg.max_len = min_not_zero(sr->len, kmsg->msg.msg_inq); ret = io_buffers_peek(req, &arg); if (unlikely(ret < 0)) return ret; + if (arg.iovs != &kmsg->fast_iov && arg.iovs != kmsg->free_iov) { + kmsg->free_iov_nr = ret; + kmsg->free_iov = arg.iovs; + req->flags |= REQ_F_NEED_CLEANUP; + } + if (arg.partial_map) + sr->retry_flags |= IO_SR_MSG_PARTIAL_MAP; + /* special case 1 vec, can be a fast path */ if (ret == 1) { sr->buf = arg.iovs[0].iov_base; @@ -1103,11 +1139,6 @@ static int io_recv_buf_select(struct io_kiocb *req, struct io_async_msghdr *kmsg } iov_iter_init(&kmsg->msg.msg_iter, ITER_DEST, arg.iovs, ret, arg.out_len); - if (arg.iovs != &kmsg->fast_iov && arg.iovs != kmsg->free_iov) { - kmsg->free_iov_nr = ret; - kmsg->free_iov = arg.iovs; - req->flags |= REQ_F_NEED_CLEANUP; - } } else { void __user *buf; @@ -1228,6 +1259,7 @@ int io_send_zc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) struct io_kiocb *notif; zc->done_io = 0; + zc->retry_flags = 0; req->flags |= REQ_F_POLL_NO_LAZY; if (unlikely(READ_ONCE(sqe->__pad2[0]) || READ_ONCE(sqe->addr3))) diff --git a/io_uring/opdef.c b/io_uring/opdef.c index a2be3bbca5ffa..5dc1cba158a06 100644 --- a/io_uring/opdef.c +++ b/io_uring/opdef.c @@ -214,6 +214,7 @@ const struct io_issue_def io_issue_defs[] = { }, [IORING_OP_FALLOCATE] = { .needs_file = 1, + .hash_reg_file = 1, .prep = io_fallocate_prep, .issue = io_fallocate, }, diff --git a/io_uring/register.c b/io_uring/register.c index eca26d4884d9a..a325b493ae121 100644 --- a/io_uring/register.c +++ b/io_uring/register.c @@ -268,6 +268,8 @@ static __cold int io_register_iowq_max_workers(struct io_ring_ctx *ctx, if (ctx->flags & IORING_SETUP_SQPOLL) { sqd = ctx->sq_data; if (sqd) { + struct task_struct *tsk; + /* * Observe the correct sqd->lock -> ctx->uring_lock * ordering. Fine to drop uring_lock here, we hold @@ -277,8 +279,9 @@ static __cold int io_register_iowq_max_workers(struct io_ring_ctx *ctx, mutex_unlock(&ctx->uring_lock); mutex_lock(&sqd->lock); mutex_lock(&ctx->uring_lock); - if (sqd->thread) - tctx = sqd->thread->io_uring; + tsk = sqpoll_task_locked(sqd); + if (tsk) + tctx = tsk->io_uring; } } else { tctx = current->io_uring; diff --git a/io_uring/rsrc.c b/io_uring/rsrc.c index a67bae350416b..1687e35e21c93 100644 --- a/io_uring/rsrc.c +++ b/io_uring/rsrc.c @@ -119,8 +119,11 @@ static void io_buffer_unmap(struct io_ring_ctx *ctx, struct io_mapped_ubuf **slo if (imu != &dummy_ubuf) { if (!refcount_dec_and_test(&imu->refs)) return; - for (i = 0; i < imu->nr_bvecs; i++) - unpin_user_page(imu->bvec[i].bv_page); + for (i = 0; i < imu->nr_bvecs; i++) { + struct folio *folio = page_folio(imu->bvec[i].bv_page); + + unpin_user_folio(folio, 1); + } if (imu->acct_pages) io_unaccount_mem(ctx, imu->acct_pages); kvfree(imu); @@ -915,6 +918,7 @@ static bool io_try_coalesce_buffer(struct page ***pages, int *nr_pages, return false; data->folio_shift = folio_shift(folio); + data->first_folio_page_idx = folio_page_idx(folio, page_array[0]); /* * Check if pages are contiguous inside a folio, and all folios have * the same page count except for the head and tail. @@ -983,10 +987,8 @@ static int io_sqe_buffer_register(struct io_ring_ctx *ctx, struct iovec *iov, goto done; ret = io_buffer_account_pin(ctx, pages, nr_pages, imu, last_hpage); - if (ret) { - unpin_user_pages(pages, nr_pages); + if (ret) goto done; - } size = iov->iov_len; /* store original address for later verification */ @@ -997,7 +999,9 @@ static int io_sqe_buffer_register(struct io_ring_ctx *ctx, struct iovec *iov, if (coalesced) imu->folio_shift = data.folio_shift; refcount_set(&imu->refs, 1); - off = (unsigned long) iov->iov_base & ((1UL << imu->folio_shift) - 1); + off = (unsigned long)iov->iov_base & ~PAGE_MASK; + if (coalesced) + off += data.first_folio_page_idx << PAGE_SHIFT; *pimu = imu; ret = 0; @@ -1010,8 +1014,13 @@ static int io_sqe_buffer_register(struct io_ring_ctx *ctx, struct iovec *iov, size -= vec_len; } done: - if (ret) + if (ret) { kvfree(imu); + if (pages) { + for (i = 0; i < nr_pages; i++) + unpin_user_folio(page_folio(pages[i]), 1); + } + } kvfree(pages); return ret; } diff --git a/io_uring/rsrc.h b/io_uring/rsrc.h index 8ed5880362102..459cf4c6e8564 100644 --- a/io_uring/rsrc.h +++ b/io_uring/rsrc.h @@ -56,6 +56,7 @@ struct io_imu_folio_data { /* For non-head/tail folios, has to be fully included */ unsigned int nr_pages_mid; unsigned int folio_shift; + unsigned long first_folio_page_idx; }; void io_rsrc_node_ref_zero(struct io_rsrc_node *node); diff --git a/io_uring/sqpoll.c b/io_uring/sqpoll.c index 430922c541681..2faa3058b2d0e 100644 --- a/io_uring/sqpoll.c +++ b/io_uring/sqpoll.c @@ -30,7 +30,7 @@ enum { void io_sq_thread_unpark(struct io_sq_data *sqd) __releases(&sqd->lock) { - WARN_ON_ONCE(sqd->thread == current); + WARN_ON_ONCE(sqpoll_task_locked(sqd) == current); /* * Do the dance but not conditional clear_bit() because it'd race with @@ -45,24 +45,32 @@ void io_sq_thread_unpark(struct io_sq_data *sqd) void io_sq_thread_park(struct io_sq_data *sqd) __acquires(&sqd->lock) { - WARN_ON_ONCE(data_race(sqd->thread) == current); + struct task_struct *tsk; atomic_inc(&sqd->park_pending); set_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state); mutex_lock(&sqd->lock); - if (sqd->thread) - wake_up_process(sqd->thread); + + tsk = sqpoll_task_locked(sqd); + if (tsk) { + WARN_ON_ONCE(tsk == current); + wake_up_process(tsk); + } } void io_sq_thread_stop(struct io_sq_data *sqd) { - WARN_ON_ONCE(sqd->thread == current); + struct task_struct *tsk; + WARN_ON_ONCE(test_bit(IO_SQ_THREAD_SHOULD_STOP, &sqd->state)); set_bit(IO_SQ_THREAD_SHOULD_STOP, &sqd->state); mutex_lock(&sqd->lock); - if (sqd->thread) - wake_up_process(sqd->thread); + tsk = sqpoll_task_locked(sqd); + if (tsk) { + WARN_ON_ONCE(tsk == current); + wake_up_process(tsk); + } mutex_unlock(&sqd->lock); wait_for_completion(&sqd->exited); } @@ -277,7 +285,8 @@ static int io_sq_thread(void *data) /* offload context creation failed, just exit */ if (!current->io_uring) { mutex_lock(&sqd->lock); - sqd->thread = NULL; + rcu_assign_pointer(sqd->thread, NULL); + put_task_struct(current); mutex_unlock(&sqd->lock); goto err_out; } @@ -386,7 +395,8 @@ static int io_sq_thread(void *data) io_sq_tw(&retry_list, UINT_MAX); io_uring_cancel_generic(true, sqd); - sqd->thread = NULL; + rcu_assign_pointer(sqd->thread, NULL); + put_task_struct(current); list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) atomic_or(IORING_SQ_NEED_WAKEUP, &ctx->rings->sq_flags); io_run_task_work(); @@ -416,7 +426,6 @@ void io_sqpoll_wait_sq(struct io_ring_ctx *ctx) __cold int io_sq_offload_create(struct io_ring_ctx *ctx, struct io_uring_params *p) { - struct task_struct *task_to_put = NULL; int ret; /* Retain compatibility with failing for an invalid attach attempt */ @@ -496,8 +505,11 @@ __cold int io_sq_offload_create(struct io_ring_ctx *ctx, goto err_sqpoll; } - sqd->thread = tsk; - task_to_put = get_task_struct(tsk); + mutex_lock(&sqd->lock); + rcu_assign_pointer(sqd->thread, tsk); + mutex_unlock(&sqd->lock); + + get_task_struct(tsk); ret = io_uring_alloc_task_context(tsk, ctx); wake_up_new_task(tsk); if (ret) @@ -507,16 +519,11 @@ __cold int io_sq_offload_create(struct io_ring_ctx *ctx, ret = -EINVAL; goto err; } - - if (task_to_put) - put_task_struct(task_to_put); return 0; err_sqpoll: complete(&ctx->sq_data->exited); err: io_sq_thread_finish(ctx); - if (task_to_put) - put_task_struct(task_to_put); return ret; } @@ -527,10 +534,13 @@ __cold int io_sqpoll_wq_cpu_affinity(struct io_ring_ctx *ctx, int ret = -EINVAL; if (sqd) { + struct task_struct *tsk; + io_sq_thread_park(sqd); /* Don't set affinity for a dying thread */ - if (sqd->thread) - ret = io_wq_cpu_affinity(sqd->thread->io_uring, mask); + tsk = sqpoll_task_locked(sqd); + if (tsk) + ret = io_wq_cpu_affinity(tsk->io_uring, mask); io_sq_thread_unpark(sqd); } diff --git a/io_uring/sqpoll.h b/io_uring/sqpoll.h index 4171666b1cf4c..b83dcdec9765f 100644 --- a/io_uring/sqpoll.h +++ b/io_uring/sqpoll.h @@ -8,7 +8,7 @@ struct io_sq_data { /* ctx's that are using this sqd */ struct list_head ctx_list; - struct task_struct *thread; + struct task_struct __rcu *thread; struct wait_queue_head wait; unsigned sq_thread_idle; @@ -29,3 +29,9 @@ void io_sq_thread_unpark(struct io_sq_data *sqd); void io_put_sq_data(struct io_sq_data *sqd); void io_sqpoll_wait_sq(struct io_ring_ctx *ctx); int io_sqpoll_wq_cpu_affinity(struct io_ring_ctx *ctx, cpumask_var_t mask); + +static inline struct task_struct *sqpoll_task_locked(struct io_sq_data *sqd) +{ + return rcu_dereference_protected(sqd->thread, + lockdep_is_held(&sqd->lock)); +} diff --git a/ipc/shm.c b/ipc/shm.c index 99564c8700840..492fcc6999857 100644 --- a/ipc/shm.c +++ b/ipc/shm.c @@ -431,8 +431,11 @@ static int shm_try_destroy_orphaned(int id, void *p, void *data) void shm_destroy_orphaned(struct ipc_namespace *ns) { down_write(&shm_ids(ns).rwsem); - if (shm_ids(ns).in_use) + if (shm_ids(ns).in_use) { + rcu_read_lock(); idr_for_each(&shm_ids(ns).ipcs_idr, &shm_try_destroy_orphaned, ns); + rcu_read_unlock(); + } up_write(&shm_ids(ns).rwsem); } diff --git a/kernel/bpf/bpf_lru_list.c b/kernel/bpf/bpf_lru_list.c index 3dabdd137d102..2d6e1c98d8adc 100644 --- a/kernel/bpf/bpf_lru_list.c +++ b/kernel/bpf/bpf_lru_list.c @@ -337,12 +337,12 @@ static void bpf_lru_list_pop_free_to_local(struct bpf_lru *lru, list) { __bpf_lru_node_move_to_free(l, node, local_free_list(loc_l), BPF_LRU_LOCAL_LIST_T_FREE); - if (++nfree == LOCAL_FREE_TARGET) + if (++nfree == lru->target_free) break; } - if (nfree < LOCAL_FREE_TARGET) - __bpf_lru_list_shrink(lru, l, LOCAL_FREE_TARGET - nfree, + if (nfree < lru->target_free) + __bpf_lru_list_shrink(lru, l, lru->target_free - nfree, local_free_list(loc_l), BPF_LRU_LOCAL_LIST_T_FREE); @@ -577,6 +577,9 @@ static void bpf_common_lru_populate(struct bpf_lru *lru, void *buf, list_add(&node->list, &l->lists[BPF_LRU_LIST_T_FREE]); buf += elem_size; } + + lru->target_free = clamp((nr_elems / num_possible_cpus()) / 2, + 1, LOCAL_FREE_TARGET); } static void bpf_percpu_lru_populate(struct bpf_lru *lru, void *buf, diff --git a/kernel/bpf/bpf_lru_list.h b/kernel/bpf/bpf_lru_list.h index cbd8d3720c2bb..fe2661a58ea94 100644 --- a/kernel/bpf/bpf_lru_list.h +++ b/kernel/bpf/bpf_lru_list.h @@ -58,6 +58,7 @@ struct bpf_lru { del_from_htab_func del_from_htab; void *del_arg; unsigned int hash_offset; + unsigned int target_free; unsigned int nr_scans; bool percpu; }; diff --git a/kernel/bpf/bpf_struct_ops.c b/kernel/bpf/bpf_struct_ops.c index 477947456371a..2285b27ce68c7 100644 --- a/kernel/bpf/bpf_struct_ops.c +++ b/kernel/bpf/bpf_struct_ops.c @@ -577,7 +577,7 @@ int bpf_struct_ops_prepare_trampoline(struct bpf_tramp_links *tlinks, if (model->ret_size > 0) flags |= BPF_TRAMP_F_RET_FENTRY_RET; - size = arch_bpf_trampoline_size(model, flags, tlinks, NULL); + size = arch_bpf_trampoline_size(model, flags, tlinks, stub_func); if (size <= 0) return size ? : -EFAULT; diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c index 2c54c148a94f3..f83bd019db141 100644 --- a/kernel/bpf/btf.c +++ b/kernel/bpf/btf.c @@ -6684,10 +6684,10 @@ bool btf_ctx_access(int off, int size, enum bpf_access_type type, /* Is this a func with potential NULL args? */ if (strcmp(tname, raw_tp_null_args[i].func)) continue; - if (raw_tp_null_args[i].mask & (0x1 << (arg * 4))) + if (raw_tp_null_args[i].mask & (0x1ULL << (arg * 4))) info->reg_type |= PTR_MAYBE_NULL; /* Is the current arg IS_ERR? */ - if (raw_tp_null_args[i].mask & (0x2 << (arg * 4))) + if (raw_tp_null_args[i].mask & (0x2ULL << (arg * 4))) ptr_err_raw_tp = true; break; } diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c index a60a6a2ce0d7f..68a327158989b 100644 --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c @@ -2303,8 +2303,8 @@ static unsigned int __bpf_prog_ret0_warn(const void *ctx, return 0; } -bool bpf_prog_map_compatible(struct bpf_map *map, - const struct bpf_prog *fp) +static bool __bpf_prog_map_compatible(struct bpf_map *map, + const struct bpf_prog *fp) { enum bpf_prog_type prog_type = resolve_prog_type(fp); bool ret; @@ -2313,14 +2313,6 @@ bool bpf_prog_map_compatible(struct bpf_map *map, if (fp->kprobe_override) return false; - /* XDP programs inserted into maps are not guaranteed to run on - * a particular netdev (and can run outside driver context entirely - * in the case of devmap and cpumap). Until device checks - * are implemented, prohibit adding dev-bound programs to program maps. - */ - if (bpf_prog_is_dev_bound(aux)) - return false; - spin_lock(&map->owner.lock); if (!map->owner.type) { /* There's no owner yet where we could check for @@ -2354,6 +2346,19 @@ bool bpf_prog_map_compatible(struct bpf_map *map, return ret; } +bool bpf_prog_map_compatible(struct bpf_map *map, const struct bpf_prog *fp) +{ + /* XDP programs inserted into maps are not guaranteed to run on + * a particular netdev (and can run outside driver context entirely + * in the case of devmap and cpumap). Until device checks + * are implemented, prohibit adding dev-bound programs to program maps. + */ + if (bpf_prog_is_dev_bound(fp->aux)) + return false; + + return __bpf_prog_map_compatible(map, fp); +} + static int bpf_check_tail_call(const struct bpf_prog *fp) { struct bpf_prog_aux *aux = fp->aux; @@ -2366,7 +2371,7 @@ static int bpf_check_tail_call(const struct bpf_prog *fp) if (!map_type_contains_progs(map)) continue; - if (!bpf_prog_map_compatible(map, fp)) { + if (!__bpf_prog_map_compatible(map, fp)) { ret = -EINVAL; goto out; } @@ -2414,7 +2419,7 @@ struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err) /* In case of BPF to BPF calls, verifier did all the prep * work with regards to JITing, etc. */ - bool jit_needed = false; + bool jit_needed = fp->jit_requested; if (fp->bpf_func) goto finalize; diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c index a05aeb3458964..9173d107758d4 100644 --- a/kernel/bpf/helpers.c +++ b/kernel/bpf/helpers.c @@ -129,7 +129,8 @@ const struct bpf_func_proto bpf_map_peek_elem_proto = { BPF_CALL_3(bpf_map_lookup_percpu_elem, struct bpf_map *, map, void *, key, u32, cpu) { - WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_bh_held()); + WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() && + !rcu_read_lock_bh_held()); return (unsigned long) map->ops->map_lookup_percpu_elem(map, key, cpu); } diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 39a3d750f2ff9..f01477cecf393 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -1376,13 +1376,6 @@ static void free_func_state(struct bpf_func_state *state) kfree(state); } -static void clear_jmp_history(struct bpf_verifier_state *state) -{ - kfree(state->jmp_history); - state->jmp_history = NULL; - state->jmp_history_cnt = 0; -} - static void free_verifier_state(struct bpf_verifier_state *state, bool free_self) { @@ -1392,7 +1385,6 @@ static void free_verifier_state(struct bpf_verifier_state *state, free_func_state(state->frame[i]); state->frame[i] = NULL; } - clear_jmp_history(state); if (free_self) kfree(state); } @@ -1418,13 +1410,6 @@ static int copy_verifier_state(struct bpf_verifier_state *dst_state, struct bpf_func_state *dst; int i, err; - dst_state->jmp_history = copy_array(dst_state->jmp_history, src->jmp_history, - src->jmp_history_cnt, sizeof(*dst_state->jmp_history), - GFP_USER); - if (!dst_state->jmp_history) - return -ENOMEM; - dst_state->jmp_history_cnt = src->jmp_history_cnt; - /* if dst has more stack frames then src frame, free them, this is also * necessary in case of exceptional exits using bpf_throw. */ @@ -1443,6 +1428,8 @@ static int copy_verifier_state(struct bpf_verifier_state *dst_state, dst_state->parent = src->parent; dst_state->first_insn_idx = src->first_insn_idx; dst_state->last_insn_idx = src->last_insn_idx; + dst_state->insn_hist_start = src->insn_hist_start; + dst_state->insn_hist_end = src->insn_hist_end; dst_state->dfs_depth = src->dfs_depth; dst_state->callback_unroll_depth = src->callback_unroll_depth; dst_state->used_as_loop_entry = src->used_as_loop_entry; @@ -2496,9 +2483,14 @@ static struct bpf_verifier_state *push_async_cb(struct bpf_verifier_env *env, * The caller state doesn't matter. * This is async callback. It starts in a fresh stack. * Initialize it similar to do_check_common(). + * But we do need to make sure to not clobber insn_hist, so we keep + * chaining insn_hist_start/insn_hist_end indices as for a normal + * child state. */ elem->st.branches = 1; elem->st.in_sleepable = is_sleepable; + elem->st.insn_hist_start = env->cur_state->insn_hist_end; + elem->st.insn_hist_end = elem->st.insn_hist_start; frame = kzalloc(sizeof(*frame), GFP_KERNEL); if (!frame) goto err; @@ -3513,11 +3505,10 @@ static void linked_regs_unpack(u64 val, struct linked_regs *s) } /* for any branch, call, exit record the history of jmps in the given state */ -static int push_jmp_history(struct bpf_verifier_env *env, struct bpf_verifier_state *cur, - int insn_flags, u64 linked_regs) +static int push_insn_history(struct bpf_verifier_env *env, struct bpf_verifier_state *cur, + int insn_flags, u64 linked_regs) { - u32 cnt = cur->jmp_history_cnt; - struct bpf_jmp_history_entry *p; + struct bpf_insn_hist_entry *p; size_t alloc_size; /* combine instruction flags if we already recorded this instruction */ @@ -3537,29 +3528,32 @@ static int push_jmp_history(struct bpf_verifier_env *env, struct bpf_verifier_st return 0; } - cnt++; - alloc_size = kmalloc_size_roundup(size_mul(cnt, sizeof(*p))); - p = krealloc(cur->jmp_history, alloc_size, GFP_USER); - if (!p) - return -ENOMEM; - cur->jmp_history = p; + if (cur->insn_hist_end + 1 > env->insn_hist_cap) { + alloc_size = size_mul(cur->insn_hist_end + 1, sizeof(*p)); + p = kvrealloc(env->insn_hist, alloc_size, GFP_USER); + if (!p) + return -ENOMEM; + env->insn_hist = p; + env->insn_hist_cap = alloc_size / sizeof(*p); + } - p = &cur->jmp_history[cnt - 1]; + p = &env->insn_hist[cur->insn_hist_end]; p->idx = env->insn_idx; p->prev_idx = env->prev_insn_idx; p->flags = insn_flags; p->linked_regs = linked_regs; - cur->jmp_history_cnt = cnt; + + cur->insn_hist_end++; env->cur_hist_ent = p; return 0; } -static struct bpf_jmp_history_entry *get_jmp_hist_entry(struct bpf_verifier_state *st, - u32 hist_end, int insn_idx) +static struct bpf_insn_hist_entry *get_insn_hist_entry(struct bpf_verifier_env *env, + u32 hist_start, u32 hist_end, int insn_idx) { - if (hist_end > 0 && st->jmp_history[hist_end - 1].idx == insn_idx) - return &st->jmp_history[hist_end - 1]; + if (hist_end > hist_start && env->insn_hist[hist_end - 1].idx == insn_idx) + return &env->insn_hist[hist_end - 1]; return NULL; } @@ -3576,25 +3570,26 @@ static struct bpf_jmp_history_entry *get_jmp_hist_entry(struct bpf_verifier_stat * history entry recording a jump from last instruction of parent state and * first instruction of given state. */ -static int get_prev_insn_idx(struct bpf_verifier_state *st, int i, - u32 *history) +static int get_prev_insn_idx(const struct bpf_verifier_env *env, + struct bpf_verifier_state *st, + int insn_idx, u32 hist_start, u32 *hist_endp) { - u32 cnt = *history; + u32 hist_end = *hist_endp; + u32 cnt = hist_end - hist_start; - if (i == st->first_insn_idx) { + if (insn_idx == st->first_insn_idx) { if (cnt == 0) return -ENOENT; - if (cnt == 1 && st->jmp_history[0].idx == i) + if (cnt == 1 && env->insn_hist[hist_start].idx == insn_idx) return -ENOENT; } - if (cnt && st->jmp_history[cnt - 1].idx == i) { - i = st->jmp_history[cnt - 1].prev_idx; - (*history)--; + if (cnt && env->insn_hist[hist_end - 1].idx == insn_idx) { + (*hist_endp)--; + return env->insn_hist[hist_end - 1].prev_idx; } else { - i--; + return insn_idx - 1; } - return i; } static const char *disasm_kfunc_name(void *data, const struct bpf_insn *insn) @@ -3766,7 +3761,7 @@ static void fmt_stack_mask(char *buf, ssize_t buf_sz, u64 stack_mask) /* If any register R in hist->linked_regs is marked as precise in bt, * do bt_set_frame_{reg,slot}(bt, R) for all registers in hist->linked_regs. */ -static void bt_sync_linked_regs(struct backtrack_state *bt, struct bpf_jmp_history_entry *hist) +static void bt_sync_linked_regs(struct backtrack_state *bt, struct bpf_insn_hist_entry *hist) { struct linked_regs linked_regs; bool some_precise = false; @@ -3811,7 +3806,7 @@ static bool calls_callback(struct bpf_verifier_env *env, int insn_idx); * - *was* processed previously during backtracking. */ static int backtrack_insn(struct bpf_verifier_env *env, int idx, int subseq_idx, - struct bpf_jmp_history_entry *hist, struct backtrack_state *bt) + struct bpf_insn_hist_entry *hist, struct backtrack_state *bt) { const struct bpf_insn_cbs cbs = { .cb_call = disasm_kfunc_name, @@ -4071,8 +4066,10 @@ static int backtrack_insn(struct bpf_verifier_env *env, int idx, int subseq_idx, * before it would be equally necessary to * propagate it to dreg. */ - bt_set_reg(bt, dreg); - bt_set_reg(bt, sreg); + if (!hist || !(hist->flags & INSN_F_SRC_REG_STACK)) + bt_set_reg(bt, sreg); + if (!hist || !(hist->flags & INSN_F_DST_REG_STACK)) + bt_set_reg(bt, dreg); } else if (BPF_SRC(insn->code) == BPF_K) { /* dreg K * Only dreg still needs precision before @@ -4230,7 +4227,7 @@ static void mark_all_scalars_imprecise(struct bpf_verifier_env *env, struct bpf_ * SCALARS, as well as any other registers and slots that contribute to * a tracked state of given registers/stack slots, depending on specific BPF * assembly instructions (see backtrack_insns() for exact instruction handling - * logic). This backtracking relies on recorded jmp_history and is able to + * logic). This backtracking relies on recorded insn_hist and is able to * traverse entire chain of parent states. This process ends only when all the * necessary registers/slots and their transitive dependencies are marked as * precise. @@ -4347,8 +4344,9 @@ static int __mark_chain_precision(struct bpf_verifier_env *env, int regno) for (;;) { DECLARE_BITMAP(mask, 64); - u32 history = st->jmp_history_cnt; - struct bpf_jmp_history_entry *hist; + u32 hist_start = st->insn_hist_start; + u32 hist_end = st->insn_hist_end; + struct bpf_insn_hist_entry *hist; if (env->log.level & BPF_LOG_LEVEL2) { verbose(env, "mark_precise: frame%d: last_idx %d first_idx %d subseq_idx %d \n", @@ -4387,7 +4385,7 @@ static int __mark_chain_precision(struct bpf_verifier_env *env, int regno) err = 0; skip_first = false; } else { - hist = get_jmp_hist_entry(st, history, i); + hist = get_insn_hist_entry(env, hist_start, hist_end, i); err = backtrack_insn(env, i, subseq_idx, hist, bt); } if (err == -ENOTSUPP) { @@ -4404,7 +4402,7 @@ static int __mark_chain_precision(struct bpf_verifier_env *env, int regno) */ return 0; subseq_idx = i; - i = get_prev_insn_idx(st, i, &history); + i = get_prev_insn_idx(env, st, i, hist_start, &hist_end); if (i == -ENOENT) break; if (i >= env->prog->len) { @@ -4771,7 +4769,7 @@ static int check_stack_write_fixed_off(struct bpf_verifier_env *env, } if (insn_flags) - return push_jmp_history(env, env->cur_state, insn_flags, 0); + return push_insn_history(env, env->cur_state, insn_flags, 0); return 0; } @@ -5078,7 +5076,7 @@ static int check_stack_read_fixed_off(struct bpf_verifier_env *env, insn_flags = 0; /* we are not restoring spilled register */ } if (insn_flags) - return push_jmp_history(env, env->cur_state, insn_flags, 0); + return push_insn_history(env, env->cur_state, insn_flags, 0); return 0; } @@ -15419,6 +15417,7 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env, struct bpf_reg_state *eq_branch_regs; struct linked_regs linked_regs = {}; u8 opcode = BPF_OP(insn->code); + int insn_flags = 0; bool is_jmp32; int pred = -1; int err; @@ -15478,6 +15477,9 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env, insn->src_reg); return -EACCES; } + + if (src_reg->type == PTR_TO_STACK) + insn_flags |= INSN_F_SRC_REG_STACK; } else { if (insn->src_reg != BPF_REG_0) { verbose(env, "BPF_JMP/JMP32 uses reserved fields\n"); @@ -15489,6 +15491,14 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env, __mark_reg_known(src_reg, insn->imm); } + if (dst_reg->type == PTR_TO_STACK) + insn_flags |= INSN_F_DST_REG_STACK; + if (insn_flags) { + err = push_insn_history(env, this_branch, insn_flags, 0); + if (err) + return err; + } + is_jmp32 = BPF_CLASS(insn->code) == BPF_JMP32; pred = is_branch_taken(dst_reg, src_reg, opcode, is_jmp32); if (pred >= 0) { @@ -15542,7 +15552,7 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env, if (dst_reg->type == SCALAR_VALUE && dst_reg->id) collect_linked_regs(this_branch, dst_reg->id, &linked_regs); if (linked_regs.cnt > 1) { - err = push_jmp_history(env, this_branch, 0, linked_regs_pack(&linked_regs)); + err = push_insn_history(env, this_branch, 0, linked_regs_pack(&linked_regs)); if (err) return err; } @@ -17984,7 +17994,7 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx) force_new_state = env->test_state_freq || is_force_checkpoint(env, insn_idx) || /* Avoid accumulating infinitely long jmp history */ - cur->jmp_history_cnt > 40; + cur->insn_hist_end - cur->insn_hist_start > 40; /* bpf progs typically have pruning point every 4 instructions * http://vger.kernel.org/bpfconf2019.html#session-1 @@ -18182,7 +18192,7 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx) * the current state. */ if (is_jmp_point(env, env->insn_idx)) - err = err ? : push_jmp_history(env, cur, 0, 0); + err = err ? : push_insn_history(env, cur, 0, 0); err = err ? : propagate_precision(env, &sl->state); if (err) return err; @@ -18281,8 +18291,8 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx) cur->parent = new; cur->first_insn_idx = insn_idx; + cur->insn_hist_start = cur->insn_hist_end; cur->dfs_depth = new->dfs_depth + 1; - clear_jmp_history(cur); new_sl->next = *explored_state(env, insn_idx); *explored_state(env, insn_idx) = new_sl; /* connect new state to parentage chain. Current frame needs all @@ -18450,7 +18460,7 @@ static int do_check(struct bpf_verifier_env *env) } if (is_jmp_point(env, env->insn_idx)) { - err = push_jmp_history(env, state, 0, 0); + err = push_insn_history(env, state, 0, 0); if (err) return err; } @@ -22716,6 +22726,7 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr, bpfptr_t uattr, __u3 if (!is_priv) mutex_unlock(&bpf_verifier_lock); vfree(env->insn_aux_data); + kvfree(env->insn_hist); err_free_env: kvfree(env); return ret; diff --git a/kernel/cgroup/legacy_freezer.c b/kernel/cgroup/legacy_freezer.c index 074653f964c1d..01c02d116e8e1 100644 --- a/kernel/cgroup/legacy_freezer.c +++ b/kernel/cgroup/legacy_freezer.c @@ -188,13 +188,12 @@ static void freezer_attach(struct cgroup_taskset *tset) if (!(freezer->state & CGROUP_FREEZING)) { __thaw_task(task); } else { - freeze_task(task); - /* clear FROZEN and propagate upwards */ while (freezer && (freezer->state & CGROUP_FROZEN)) { freezer->state &= ~CGROUP_FROZEN; freezer = parent_freezer(freezer); } + freeze_task(task); } } diff --git a/kernel/events/core.c b/kernel/events/core.c index 285a4548450bd..dd745485b0f46 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -206,6 +206,19 @@ static void perf_ctx_unlock(struct perf_cpu_context *cpuctx, __perf_ctx_unlock(&cpuctx->ctx); } +typedef struct { + struct perf_cpu_context *cpuctx; + struct perf_event_context *ctx; +} class_perf_ctx_lock_t; + +static inline void class_perf_ctx_lock_destructor(class_perf_ctx_lock_t *_T) +{ perf_ctx_unlock(_T->cpuctx, _T->ctx); } + +static inline class_perf_ctx_lock_t +class_perf_ctx_lock_constructor(struct perf_cpu_context *cpuctx, + struct perf_event_context *ctx) +{ perf_ctx_lock(cpuctx, ctx); return (class_perf_ctx_lock_t){ cpuctx, ctx }; } + #define TASK_TOMBSTONE ((void *)-1L) static bool is_kernel_event(struct perf_event *event) @@ -892,13 +905,19 @@ static void perf_cgroup_switch(struct task_struct *task) if (READ_ONCE(cpuctx->cgrp) == NULL) return; - WARN_ON_ONCE(cpuctx->ctx.nr_cgroups == 0); - cgrp = perf_cgroup_from_task(task, NULL); if (READ_ONCE(cpuctx->cgrp) == cgrp) return; - perf_ctx_lock(cpuctx, cpuctx->task_ctx); + guard(perf_ctx_lock)(cpuctx, cpuctx->task_ctx); + /* + * Re-check, could've raced vs perf_remove_from_context(). + */ + if (READ_ONCE(cpuctx->cgrp) == NULL) + return; + + WARN_ON_ONCE(cpuctx->ctx.nr_cgroups == 0); + perf_ctx_disable(&cpuctx->ctx, true); ctx_sched_out(&cpuctx->ctx, NULL, EVENT_ALL|EVENT_CGROUP); @@ -916,7 +935,6 @@ static void perf_cgroup_switch(struct task_struct *task) ctx_sched_in(&cpuctx->ctx, NULL, EVENT_ALL|EVENT_CGROUP); perf_ctx_enable(&cpuctx->ctx, true); - perf_ctx_unlock(cpuctx, cpuctx->task_ctx); } static int perf_cgroup_ensure_storage(struct perf_event *event, @@ -2111,8 +2129,9 @@ perf_aux_output_match(struct perf_event *event, struct perf_event *aux_event) } static void put_event(struct perf_event *event); -static void event_sched_out(struct perf_event *event, - struct perf_event_context *ctx); +static void __event_disable(struct perf_event *event, + struct perf_event_context *ctx, + enum perf_event_state state); static void perf_put_aux_event(struct perf_event *event) { @@ -2145,8 +2164,7 @@ static void perf_put_aux_event(struct perf_event *event) * state so that we don't try to schedule it again. Note * that perf_event_enable() will clear the ERROR status. */ - event_sched_out(iter, ctx); - perf_event_set_state(event, PERF_EVENT_STATE_ERROR); + __event_disable(iter, ctx, PERF_EVENT_STATE_ERROR); } } @@ -2204,18 +2222,6 @@ static inline struct list_head *get_event_list(struct perf_event *event) &event->pmu_ctx->flexible_active; } -/* - * Events that have PERF_EV_CAP_SIBLING require being part of a group and - * cannot exist on their own, schedule them out and move them into the ERROR - * state. Also see _perf_event_enable(), it will not be able to recover - * this ERROR state. - */ -static inline void perf_remove_sibling_event(struct perf_event *event) -{ - event_sched_out(event, event->ctx); - perf_event_set_state(event, PERF_EVENT_STATE_ERROR); -} - static void perf_group_detach(struct perf_event *event) { struct perf_event *leader = event->group_leader; @@ -2251,8 +2257,15 @@ static void perf_group_detach(struct perf_event *event) */ list_for_each_entry_safe(sibling, tmp, &event->sibling_list, sibling_list) { + /* + * Events that have PERF_EV_CAP_SIBLING require being part of + * a group and cannot exist on their own, schedule them out + * and move them into the ERROR state. Also see + * _perf_event_enable(), it will not be able to recover this + * ERROR state. + */ if (sibling->event_caps & PERF_EV_CAP_SIBLING) - perf_remove_sibling_event(sibling); + __event_disable(sibling, ctx, PERF_EVENT_STATE_ERROR); sibling->group_leader = sibling; list_del_init(&sibling->sibling_list); @@ -2512,6 +2525,15 @@ static void perf_remove_from_context(struct perf_event *event, unsigned long fla event_function_call(event, __perf_remove_from_context, (void *)flags); } +static void __event_disable(struct perf_event *event, + struct perf_event_context *ctx, + enum perf_event_state state) +{ + event_sched_out(event, ctx); + perf_cgroup_event_disable(event, ctx); + perf_event_set_state(event, state); +} + /* * Cross CPU call to disable a performance event */ @@ -2526,13 +2548,18 @@ static void __perf_event_disable(struct perf_event *event, perf_pmu_disable(event->pmu_ctx->pmu); ctx_time_update_event(ctx, event); + /* + * When disabling a group leader, the whole group becomes ineligible + * to run, so schedule out the full group. + */ if (event == event->group_leader) group_sched_out(event, ctx); - else - event_sched_out(event, ctx); - perf_event_set_state(event, PERF_EVENT_STATE_OFF); - perf_cgroup_event_disable(event, ctx); + /* + * But only mark the leader OFF; the siblings will remain + * INACTIVE. + */ + __event_disable(event, ctx, PERF_EVENT_STATE_OFF); perf_pmu_enable(event->pmu_ctx->pmu); } @@ -6031,6 +6058,9 @@ static int perf_event_set_output(struct perf_event *event, static int perf_event_set_filter(struct perf_event *event, void __user *arg); static int perf_copy_attr(struct perf_event_attr __user *uattr, struct perf_event_attr *attr); +static int __perf_event_set_bpf_prog(struct perf_event *event, + struct bpf_prog *prog, + u64 bpf_cookie); static long _perf_ioctl(struct perf_event *event, unsigned int cmd, unsigned long arg) { @@ -6099,7 +6129,7 @@ static long _perf_ioctl(struct perf_event *event, unsigned int cmd, unsigned lon if (IS_ERR(prog)) return PTR_ERR(prog); - err = perf_event_set_bpf_prog(event, prog, 0); + err = __perf_event_set_bpf_prog(event, prog, 0); if (err) { bpf_prog_put(prog); return err; @@ -7094,6 +7124,10 @@ perf_sample_ustack_size(u16 stack_size, u16 header_size, if (!regs) return 0; + /* No mm, no stack, no dump. */ + if (!current->mm) + return 0; + /* * Check if we fit in with the requested stack size into the: * - TASK_SIZE @@ -7805,6 +7839,9 @@ perf_callchain(struct perf_event *event, struct pt_regs *regs) const u32 max_stack = event->attr.sample_max_stack; struct perf_callchain_entry *callchain; + if (!current->mm) + user = false; + if (!kernel && !user) return &__empty_callchain; @@ -9715,14 +9752,14 @@ __perf_event_account_interrupt(struct perf_event *event, int throttle) hwc->interrupts = 1; } else { hwc->interrupts++; - if (unlikely(throttle && - hwc->interrupts > max_samples_per_tick)) { - __this_cpu_inc(perf_throttled_count); - tick_dep_set_cpu(smp_processor_id(), TICK_DEP_BIT_PERF_EVENTS); - hwc->interrupts = MAX_INTERRUPTS; - perf_log_throttle(event, 0); - ret = 1; - } + } + + if (unlikely(throttle && hwc->interrupts >= max_samples_per_tick)) { + __this_cpu_inc(perf_throttled_count); + tick_dep_set_cpu(smp_processor_id(), TICK_DEP_BIT_PERF_EVENTS); + hwc->interrupts = MAX_INTERRUPTS; + perf_log_throttle(event, 0); + ret = 1; } if (event->attr.freq) { @@ -10700,7 +10737,7 @@ static int perf_uprobe_event_init(struct perf_event *event) if (event->attr.type != perf_uprobe.type) return -ENOENT; - if (!perfmon_capable()) + if (!capable(CAP_SYS_ADMIN)) return -EACCES; /* @@ -10756,8 +10793,9 @@ static inline bool perf_event_is_tracing(struct perf_event *event) return false; } -int perf_event_set_bpf_prog(struct perf_event *event, struct bpf_prog *prog, - u64 bpf_cookie) +static int __perf_event_set_bpf_prog(struct perf_event *event, + struct bpf_prog *prog, + u64 bpf_cookie) { bool is_kprobe, is_uprobe, is_tracepoint, is_syscall_tp; @@ -10795,6 +10833,20 @@ int perf_event_set_bpf_prog(struct perf_event *event, struct bpf_prog *prog, return perf_event_attach_bpf_prog(event, prog, bpf_cookie); } +int perf_event_set_bpf_prog(struct perf_event *event, + struct bpf_prog *prog, + u64 bpf_cookie) +{ + struct perf_event_context *ctx; + int ret; + + ctx = perf_event_ctx_lock(event); + ret = __perf_event_set_bpf_prog(event, prog, bpf_cookie); + perf_event_ctx_unlock(event, ctx); + + return ret; +} + void perf_event_free_bpf_prog(struct perf_event *event) { if (!perf_event_is_tracing(event)) { @@ -10814,7 +10866,15 @@ static void perf_event_free_filter(struct perf_event *event) { } -int perf_event_set_bpf_prog(struct perf_event *event, struct bpf_prog *prog, +static int __perf_event_set_bpf_prog(struct perf_event *event, + struct bpf_prog *prog, + u64 bpf_cookie) +{ + return -ENOENT; +} + +int perf_event_set_bpf_prog(struct perf_event *event, + struct bpf_prog *prog, u64 bpf_cookie) { return -ENOENT; diff --git a/kernel/exit.c b/kernel/exit.c index 56b8bd9487b4b..d465b36bcc869 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -923,6 +923,15 @@ void __noreturn do_exit(long code) tsk->exit_code = code; taskstats_exit(tsk, group_dead); + /* + * Since sampling can touch ->mm, make sure to stop everything before we + * tear it down. + * + * Also flushes inherited counters to the parent - before the parent + * gets woken up by child-exit notifications. + */ + perf_event_exit_task(tsk); + exit_mm(); if (group_dead) @@ -939,14 +948,6 @@ void __noreturn do_exit(long code) exit_task_work(tsk); exit_thread(tsk); - /* - * Flush inherited counters to the parent - before the parent - * gets woken up by child-exit notifications. - * - * because of cgroup mode, must be called before cgroup_exit() - */ - perf_event_exit_task(tsk); - sched_autogroup_exit_task(tsk); cgroup_exit(tsk); diff --git a/kernel/irq/irq_sim.c b/kernel/irq/irq_sim.c index 1a3d483548e2f..ae4c9cbd1b4b9 100644 --- a/kernel/irq/irq_sim.c +++ b/kernel/irq/irq_sim.c @@ -202,7 +202,7 @@ struct irq_domain *irq_domain_create_sim_full(struct fwnode_handle *fwnode, void *data) { struct irq_sim_work_ctx *work_ctx __free(kfree) = - kmalloc(sizeof(*work_ctx), GFP_KERNEL); + kzalloc(sizeof(*work_ctx), GFP_KERNEL); if (!work_ctx) return ERR_PTR(-ENOMEM); diff --git a/kernel/power/energy_model.c b/kernel/power/energy_model.c index 4e1778071d704..1c9fe741fe6d5 100644 --- a/kernel/power/energy_model.c +++ b/kernel/power/energy_model.c @@ -233,6 +233,10 @@ static int em_compute_costs(struct device *dev, struct em_perf_state *table, unsigned long prev_cost = ULONG_MAX; int i, ret; + /* This is needed only for CPUs and EAS skip other devices */ + if (!_is_cpu_device(dev)) + return 0; + /* Compute the cost of each performance state. */ for (i = nr_states - 1; i >= 0; i--) { unsigned long power_res, cost; diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c index d8bad1eeedd3e..85008ead2ac91 100644 --- a/kernel/power/hibernate.c +++ b/kernel/power/hibernate.c @@ -89,6 +89,11 @@ void hibernate_release(void) atomic_inc(&hibernate_atomic); } +bool hibernation_in_progress(void) +{ + return !atomic_read(&hibernate_atomic); +} + bool hibernation_available(void) { return nohibernate == 0 && diff --git a/kernel/power/main.c b/kernel/power/main.c index 6254814d48171..0622e7dacf172 100644 --- a/kernel/power/main.c +++ b/kernel/power/main.c @@ -613,7 +613,8 @@ bool pm_debug_messages_on __read_mostly; bool pm_debug_messages_should_print(void) { - return pm_debug_messages_on && pm_suspend_target_state != PM_SUSPEND_ON; + return pm_debug_messages_on && (hibernation_in_progress() || + pm_suspend_target_state != PM_SUSPEND_ON); } EXPORT_SYMBOL_GPL(pm_debug_messages_should_print); diff --git a/kernel/power/power.h b/kernel/power/power.h index de0e6b1077f23..6d1ec7b23e844 100644 --- a/kernel/power/power.h +++ b/kernel/power/power.h @@ -71,10 +71,14 @@ extern void enable_restore_image_protection(void); static inline void enable_restore_image_protection(void) {} #endif /* CONFIG_STRICT_KERNEL_RWX */ +extern bool hibernation_in_progress(void); + #else /* !CONFIG_HIBERNATION */ static inline void hibernate_reserved_size_init(void) {} static inline void hibernate_image_size_init(void) {} + +static inline bool hibernation_in_progress(void) { return false; } #endif /* !CONFIG_HIBERNATION */ #define power_attr(_name) \ diff --git a/kernel/power/wakelock.c b/kernel/power/wakelock.c index 52571dcad768b..4e941999a53ba 100644 --- a/kernel/power/wakelock.c +++ b/kernel/power/wakelock.c @@ -49,6 +49,9 @@ ssize_t pm_show_wakelocks(char *buf, bool show_active) len += sysfs_emit_at(buf, len, "%s ", wl->name); } + if (len > 0) + --len; + len += sysfs_emit_at(buf, len, "\n"); mutex_unlock(&wakelocks_lock); diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 4ed8632195217..552464dcffe27 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -802,6 +802,10 @@ static int rcu_watching_snap_save(struct rcu_data *rdp) return 0; } +#ifndef arch_irq_stat_cpu +#define arch_irq_stat_cpu(cpu) 0 +#endif + /* * Returns positive if the specified CPU has passed through a quiescent state * by virtue of being in or having passed through an dynticks idle state since @@ -937,9 +941,9 @@ static int rcu_watching_snap_recheck(struct rcu_data *rdp) rsrp->cputime_irq = kcpustat_field(kcsp, CPUTIME_IRQ, cpu); rsrp->cputime_softirq = kcpustat_field(kcsp, CPUTIME_SOFTIRQ, cpu); rsrp->cputime_system = kcpustat_field(kcsp, CPUTIME_SYSTEM, cpu); - rsrp->nr_hardirqs = kstat_cpu_irqs_sum(rdp->cpu); - rsrp->nr_softirqs = kstat_cpu_softirqs_sum(rdp->cpu); - rsrp->nr_csw = nr_context_switches_cpu(rdp->cpu); + rsrp->nr_hardirqs = kstat_cpu_irqs_sum(cpu) + arch_irq_stat_cpu(cpu); + rsrp->nr_softirqs = kstat_cpu_softirqs_sum(cpu); + rsrp->nr_csw = nr_context_switches_cpu(cpu); rsrp->jiffies = jiffies; rsrp->gp_seq = rdp->gp_seq; } @@ -3072,6 +3076,10 @@ __call_rcu_common(struct rcu_head *head, rcu_callback_t func, bool lazy_in) /* Misaligned rcu_head! */ WARN_ON_ONCE((unsigned long)head & (sizeof(void *) - 1)); + /* Avoid NULL dereference if callback is NULL. */ + if (WARN_ON_ONCE(!func)) + return; + if (debug_rcu_head_queue(head)) { /* * Probable double call_rcu(), so leak the callback. diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h index a9a811d9d7a37..1bba2225e7448 100644 --- a/kernel/rcu/tree.h +++ b/kernel/rcu/tree.h @@ -168,7 +168,7 @@ struct rcu_snap_record { u64 cputime_irq; /* Accumulated cputime of hard irqs */ u64 cputime_softirq;/* Accumulated cputime of soft irqs */ u64 cputime_system; /* Accumulated cputime of kernel tasks */ - unsigned long nr_hardirqs; /* Accumulated number of hard irqs */ + u64 nr_hardirqs; /* Accumulated number of hard irqs */ unsigned int nr_softirqs; /* Accumulated number of soft irqs */ unsigned long long nr_csw; /* Accumulated number of task switches */ unsigned long jiffies; /* Track jiffies value */ diff --git a/kernel/rcu/tree_stall.h b/kernel/rcu/tree_stall.h index 4432db6d0b99b..4d524a2212a8d 100644 --- a/kernel/rcu/tree_stall.h +++ b/kernel/rcu/tree_stall.h @@ -457,8 +457,8 @@ static void print_cpu_stat_info(int cpu) rsr.cputime_system = kcpustat_field(kcsp, CPUTIME_SYSTEM, cpu); pr_err("\t hardirqs softirqs csw/system\n"); - pr_err("\t number: %8ld %10d %12lld\n", - kstat_cpu_irqs_sum(cpu) - rsrp->nr_hardirqs, + pr_err("\t number: %8lld %10d %12lld\n", + kstat_cpu_irqs_sum(cpu) + arch_irq_stat_cpu(cpu) - rsrp->nr_hardirqs, kstat_cpu_softirqs_sum(cpu) - rsrp->nr_softirqs, nr_context_switches_cpu(cpu) - rsrp->nr_csw); pr_err("\tcputime: %8lld %10lld %12lld ==> %d(ms)\n", diff --git a/kernel/rseq.c b/kernel/rseq.c index 9de6e35fe6791..23894ba8250cf 100644 --- a/kernel/rseq.c +++ b/kernel/rseq.c @@ -149,6 +149,29 @@ static int rseq_reset_rseq_cpu_node_id(struct task_struct *t) return 0; } +/* + * Get the user-space pointer value stored in the 'rseq_cs' field. + */ +static int rseq_get_rseq_cs_ptr_val(struct rseq __user *rseq, u64 *rseq_cs) +{ + if (!rseq_cs) + return -EFAULT; + +#ifdef CONFIG_64BIT + if (get_user(*rseq_cs, &rseq->rseq_cs)) + return -EFAULT; +#else + if (copy_from_user(rseq_cs, &rseq->rseq_cs, sizeof(*rseq_cs))) + return -EFAULT; +#endif + + return 0; +} + +/* + * If the rseq_cs field of 'struct rseq' contains a valid pointer to + * user-space, copy 'struct rseq_cs' from user-space and validate its fields. + */ static int rseq_get_rseq_cs(struct task_struct *t, struct rseq_cs *rseq_cs) { struct rseq_cs __user *urseq_cs; @@ -157,17 +180,16 @@ static int rseq_get_rseq_cs(struct task_struct *t, struct rseq_cs *rseq_cs) u32 sig; int ret; -#ifdef CONFIG_64BIT - if (get_user(ptr, &t->rseq->rseq_cs)) - return -EFAULT; -#else - if (copy_from_user(&ptr, &t->rseq->rseq_cs, sizeof(ptr))) - return -EFAULT; -#endif + ret = rseq_get_rseq_cs_ptr_val(t->rseq, &ptr); + if (ret) + return ret; + + /* If the rseq_cs pointer is NULL, return a cleared struct rseq_cs. */ if (!ptr) { memset(rseq_cs, 0, sizeof(*rseq_cs)); return 0; } + /* Check that the pointer value fits in the user-space process space. */ if (ptr >= TASK_SIZE) return -EINVAL; urseq_cs = (struct rseq_cs __user *)(unsigned long)ptr; @@ -243,7 +265,7 @@ static int rseq_need_restart(struct task_struct *t, u32 cs_flags) return !!event_mask; } -static int clear_rseq_cs(struct task_struct *t) +static int clear_rseq_cs(struct rseq __user *rseq) { /* * The rseq_cs field is set to NULL on preemption or signal @@ -254,9 +276,9 @@ static int clear_rseq_cs(struct task_struct *t) * Set rseq_cs to NULL. */ #ifdef CONFIG_64BIT - return put_user(0UL, &t->rseq->rseq_cs); + return put_user(0UL, &rseq->rseq_cs); #else - if (clear_user(&t->rseq->rseq_cs, sizeof(t->rseq->rseq_cs))) + if (clear_user(&rseq->rseq_cs, sizeof(rseq->rseq_cs))) return -EFAULT; return 0; #endif @@ -288,11 +310,11 @@ static int rseq_ip_fixup(struct pt_regs *regs) * Clear the rseq_cs pointer and return. */ if (!in_rseq_cs(ip, &rseq_cs)) - return clear_rseq_cs(t); + return clear_rseq_cs(t->rseq); ret = rseq_need_restart(t, rseq_cs.flags); if (ret <= 0) return ret; - ret = clear_rseq_cs(t); + ret = clear_rseq_cs(t->rseq); if (ret) return ret; trace_rseq_ip_fixup(ip, rseq_cs.start_ip, rseq_cs.post_commit_offset, @@ -366,6 +388,7 @@ SYSCALL_DEFINE4(rseq, struct rseq __user *, rseq, u32, rseq_len, int, flags, u32, sig) { int ret; + u64 rseq_cs; if (flags & RSEQ_FLAG_UNREGISTER) { if (flags & ~RSEQ_FLAG_UNREGISTER) @@ -420,6 +443,19 @@ SYSCALL_DEFINE4(rseq, struct rseq __user *, rseq, u32, rseq_len, return -EINVAL; if (!access_ok(rseq, rseq_len)) return -EFAULT; + + /* + * If the rseq_cs pointer is non-NULL on registration, clear it to + * avoid a potential segfault on return to user-space. The proper thing + * to do would have been to fail the registration but this would break + * older libcs that reuse the rseq area for new threads without + * clearing the fields. + */ + if (rseq_get_rseq_cs_ptr_val(rseq, &rseq_cs)) + return -EFAULT; + if (rseq_cs && clear_rseq_cs(rseq)) + return -EFAULT; + current->rseq = rseq; current->rseq_len = rseq_len; current->rseq_sig = sig; diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 8ba19aa660f66..5bbe6ddb1888e 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -1349,7 +1349,7 @@ bool sched_can_stop_tick(struct rq *rq) if (scx_enabled() && !scx_can_stop_tick(rq)) return false; - if (rq->cfs.h_nr_running > 1) + if (rq->cfs.h_nr_queued > 1) return false; /* @@ -2275,6 +2275,12 @@ unsigned long wait_task_inactive(struct task_struct *p, unsigned int match_state * just go back and repeat. */ rq = task_rq_lock(p, &rf); + /* + * If task is sched_delayed, force dequeue it, to avoid always + * hitting the tick timeout in the queued case + */ + if (p->se.sched_delayed) + dequeue_task(rq, p, DEQUEUE_SLEEP | DEQUEUE_DELAYED); trace_sched_wait_task(p); running = task_on_cpu(rq, p); queued = task_on_rq_queued(p); @@ -3931,6 +3937,11 @@ static inline bool ttwu_queue_cond(struct task_struct *p, int cpu) if (task_on_scx(p)) return false; +#ifdef CONFIG_SMP + if (p->sched_class == &stop_sched_class) + return false; +#endif + /* * Do not complicate things with the async wake_list while the CPU is * in hotplug state. @@ -6020,7 +6031,7 @@ __pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) * opportunity to pull in more work from other CPUs. */ if (likely(!sched_class_above(prev->sched_class, &fair_sched_class) && - rq->nr_running == rq->cfs.h_nr_running)) { + rq->nr_running == rq->cfs.h_nr_queued)) { p = pick_next_task_fair(rq, prev, rf); if (unlikely(p == RETRY_TASK)) @@ -6567,12 +6578,14 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) * Otherwise marks the task's __state as RUNNING */ static bool try_to_block_task(struct rq *rq, struct task_struct *p, - unsigned long task_state) + unsigned long *task_state_p) { + unsigned long task_state = *task_state_p; int flags = DEQUEUE_NOCLOCK; if (signal_pending_state(task_state, p)) { WRITE_ONCE(p->__state, TASK_RUNNING); + *task_state_p = TASK_RUNNING; return false; } @@ -6706,7 +6719,7 @@ static void __sched notrace __schedule(int sched_mode) goto picked; } } else if (!preempt && prev_state) { - try_to_block_task(rq, prev, prev_state); + try_to_block_task(rq, prev, &prev_state); switch_count = &prev->nvcsw; } @@ -8544,7 +8557,7 @@ void __init sched_init(void) init_cfs_bandwidth(&root_task_group.cfs_bandwidth, NULL); #endif /* CONFIG_FAIR_GROUP_SCHED */ #ifdef CONFIG_EXT_GROUP_SCHED - root_task_group.scx_weight = CGROUP_WEIGHT_DFL; + scx_tg_init(&root_task_group); #endif /* CONFIG_EXT_GROUP_SCHED */ #ifdef CONFIG_RT_GROUP_SCHED root_task_group.rt_se = (struct sched_rt_entity **)ptr; @@ -8984,7 +8997,7 @@ struct task_group *sched_create_group(struct task_group *parent) if (!alloc_rt_sched_group(tg, parent)) goto err; - scx_group_set_weight(tg, CGROUP_WEIGHT_DFL); + scx_tg_init(tg); alloc_uclamp_sched_group(tg, parent); return tg; diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index 5e7ae404c8d2a..0a47e5155897c 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c @@ -1485,7 +1485,9 @@ static void update_curr_dl_se(struct rq *rq, struct sched_dl_entity *dl_se, s64 if (dl_entity_is_special(dl_se)) return; - scaled_delta_exec = dl_scaled_delta_exec(rq, dl_se, delta_exec); + scaled_delta_exec = delta_exec; + if (!dl_server(dl_se)) + scaled_delta_exec = dl_scaled_delta_exec(rq, dl_se, delta_exec); dl_se->runtime -= scaled_delta_exec; @@ -1592,7 +1594,7 @@ static void update_curr_dl_se(struct rq *rq, struct sched_dl_entity *dl_se, s64 */ void dl_server_update_idle_time(struct rq *rq, struct task_struct *p) { - s64 delta_exec, scaled_delta_exec; + s64 delta_exec; if (!rq->fair_server.dl_defer) return; @@ -1605,9 +1607,7 @@ void dl_server_update_idle_time(struct rq *rq, struct task_struct *p) if (delta_exec < 0) return; - scaled_delta_exec = dl_scaled_delta_exec(rq, &rq->fair_server, delta_exec); - - rq->fair_server.runtime -= scaled_delta_exec; + rq->fair_server.runtime -= delta_exec; if (rq->fair_server.runtime < 0) { rq->fair_server.dl_defer_running = 0; diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c index 2437b05297771..e3cacaab175b1 100644 --- a/kernel/sched/debug.c +++ b/kernel/sched/debug.c @@ -379,7 +379,7 @@ static ssize_t sched_fair_server_write(struct file *filp, const char __user *ubu return -EINVAL; } - if (rq->cfs.h_nr_running) { + if (rq->cfs.h_nr_queued) { update_rq_clock(rq); dl_server_stop(&rq->fair_server); } @@ -392,7 +392,7 @@ static ssize_t sched_fair_server_write(struct file *filp, const char __user *ubu printk_deferred("Fair server disabled in CPU %d, system may crash due to starvation.\n", cpu_of(rq)); - if (rq->cfs.h_nr_running) + if (rq->cfs.h_nr_queued) dl_server_start(&rq->fair_server); } @@ -844,7 +844,8 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq) spread = right_vruntime - left_vruntime; SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "spread", SPLIT_NS(spread)); SEQ_printf(m, " .%-30s: %d\n", "nr_running", cfs_rq->nr_running); - SEQ_printf(m, " .%-30s: %d\n", "h_nr_running", cfs_rq->h_nr_running); + SEQ_printf(m, " .%-30s: %d\n", "h_nr_runnable", cfs_rq->h_nr_runnable); + SEQ_printf(m, " .%-30s: %d\n", "h_nr_queued", cfs_rq->h_nr_queued); SEQ_printf(m, " .%-30s: %d\n", "h_nr_delayed", cfs_rq->h_nr_delayed); SEQ_printf(m, " .%-30s: %d\n", "idle_nr_running", cfs_rq->idle_nr_running); diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c index ace5262642f9e..c801dd20c63d9 100644 --- a/kernel/sched/ext.c +++ b/kernel/sched/ext.c @@ -3918,6 +3918,11 @@ static void scx_cgroup_warn_missing_idle(struct task_group *tg) cgroup_warned_missing_idle = true; } +void scx_tg_init(struct task_group *tg) +{ + tg->scx_weight = CGROUP_WEIGHT_DFL; +} + int scx_tg_online(struct task_group *tg) { int ret = 0; @@ -4053,12 +4058,12 @@ void scx_group_set_weight(struct task_group *tg, unsigned long weight) { percpu_down_read(&scx_cgroup_rwsem); - if (scx_cgroup_enabled && tg->scx_weight != weight) { - if (SCX_HAS_OP(cgroup_set_weight)) - SCX_CALL_OP(SCX_KF_UNLOCKED, cgroup_set_weight, - tg_cgrp(tg), weight); - tg->scx_weight = weight; - } + if (scx_cgroup_enabled && SCX_HAS_OP(cgroup_set_weight) && + tg->scx_weight != weight) + SCX_CALL_OP(SCX_KF_UNLOCKED, cgroup_set_weight, + tg_cgrp(tg), weight); + + tg->scx_weight = weight; percpu_up_read(&scx_cgroup_rwsem); } diff --git a/kernel/sched/ext.h b/kernel/sched/ext.h index 1079b56b0f7ae..67032c30c754c 100644 --- a/kernel/sched/ext.h +++ b/kernel/sched/ext.h @@ -70,6 +70,7 @@ static inline void scx_update_idle(struct rq *rq, bool idle, bool do_notify) {} #ifdef CONFIG_CGROUP_SCHED #ifdef CONFIG_EXT_GROUP_SCHED +void scx_tg_init(struct task_group *tg); int scx_tg_online(struct task_group *tg); void scx_tg_offline(struct task_group *tg); int scx_cgroup_can_attach(struct cgroup_taskset *tset); @@ -79,6 +80,7 @@ void scx_cgroup_cancel_attach(struct cgroup_taskset *tset); void scx_group_set_weight(struct task_group *tg, unsigned long cgrp_weight); void scx_group_set_idle(struct task_group *tg, bool idle); #else /* CONFIG_EXT_GROUP_SCHED */ +static inline void scx_tg_init(struct task_group *tg) {} static inline int scx_tg_online(struct task_group *tg) { return 0; } static inline void scx_tg_offline(struct task_group *tg) {} static inline int scx_cgroup_can_attach(struct cgroup_taskset *tset) { return 0; } diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index b34fd021f78e0..d894ccc8cb8f9 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -2147,7 +2147,7 @@ static void update_numa_stats(struct task_numa_env *env, ns->load += cpu_load(rq); ns->runnable += cpu_runnable(rq); ns->util += cpu_util_cfs(cpu); - ns->nr_running += rq->cfs.h_nr_running; + ns->nr_running += rq->cfs.h_nr_queued; ns->compute_capacity += capacity_of(cpu); if (find_idle && idle_core < 0 && !rq->nr_running && idle_cpu(cpu)) { @@ -5427,7 +5427,7 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) * When enqueuing a sched_entity, we must: * - Update loads to have both entity and cfs_rq synced with now. * - For group_entity, update its runnable_weight to reflect the new - * h_nr_running of its group cfs_rq. + * h_nr_queued of its group cfs_rq. * - For group_entity, update its weight to reflect the new share of * its group cfs_rq * - Add its new weight to cfs_rq->load.weight @@ -5511,6 +5511,7 @@ static void set_delayed(struct sched_entity *se) for_each_sched_entity(se) { struct cfs_rq *cfs_rq = cfs_rq_of(se); + cfs_rq->h_nr_runnable--; cfs_rq->h_nr_delayed++; if (cfs_rq_throttled(cfs_rq)) break; @@ -5533,6 +5534,7 @@ static void clear_delayed(struct sched_entity *se) for_each_sched_entity(se) { struct cfs_rq *cfs_rq = cfs_rq_of(se); + cfs_rq->h_nr_runnable++; cfs_rq->h_nr_delayed--; if (cfs_rq_throttled(cfs_rq)) break; @@ -5583,7 +5585,7 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) * When dequeuing a sched_entity, we must: * - Update loads to have both entity and cfs_rq synced with now. * - For group_entity, update its runnable_weight to reflect the new - * h_nr_running of its group cfs_rq. + * h_nr_queued of its group cfs_rq. * - Subtract its previous weight from cfs_rq->load.weight. * - For group entity, update its weight to reflect the new share * of its group cfs_rq. @@ -5985,8 +5987,8 @@ static bool throttle_cfs_rq(struct cfs_rq *cfs_rq) struct rq *rq = rq_of(cfs_rq); struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg); struct sched_entity *se; - long task_delta, idle_task_delta, delayed_delta, dequeue = 1; - long rq_h_nr_running = rq->cfs.h_nr_running; + long queued_delta, runnable_delta, idle_task_delta, delayed_delta, dequeue = 1; + long rq_h_nr_queued = rq->cfs.h_nr_queued; raw_spin_lock(&cfs_b->lock); /* This will start the period timer if necessary */ @@ -6016,7 +6018,8 @@ static bool throttle_cfs_rq(struct cfs_rq *cfs_rq) walk_tg_tree_from(cfs_rq->tg, tg_throttle_down, tg_nop, (void *)rq); rcu_read_unlock(); - task_delta = cfs_rq->h_nr_running; + queued_delta = cfs_rq->h_nr_queued; + runnable_delta = cfs_rq->h_nr_runnable; idle_task_delta = cfs_rq->idle_h_nr_running; delayed_delta = cfs_rq->h_nr_delayed; for_each_sched_entity(se) { @@ -6038,9 +6041,10 @@ static bool throttle_cfs_rq(struct cfs_rq *cfs_rq) dequeue_entity(qcfs_rq, se, flags); if (cfs_rq_is_idle(group_cfs_rq(se))) - idle_task_delta = cfs_rq->h_nr_running; + idle_task_delta = cfs_rq->h_nr_queued; - qcfs_rq->h_nr_running -= task_delta; + qcfs_rq->h_nr_queued -= queued_delta; + qcfs_rq->h_nr_runnable -= runnable_delta; qcfs_rq->idle_h_nr_running -= idle_task_delta; qcfs_rq->h_nr_delayed -= delayed_delta; @@ -6061,18 +6065,19 @@ static bool throttle_cfs_rq(struct cfs_rq *cfs_rq) se_update_runnable(se); if (cfs_rq_is_idle(group_cfs_rq(se))) - idle_task_delta = cfs_rq->h_nr_running; + idle_task_delta = cfs_rq->h_nr_queued; - qcfs_rq->h_nr_running -= task_delta; + qcfs_rq->h_nr_queued -= queued_delta; + qcfs_rq->h_nr_runnable -= runnable_delta; qcfs_rq->idle_h_nr_running -= idle_task_delta; qcfs_rq->h_nr_delayed -= delayed_delta; } /* At this point se is NULL and we are at root level*/ - sub_nr_running(rq, task_delta); + sub_nr_running(rq, queued_delta); /* Stop the fair server if throttling resulted in no runnable tasks */ - if (rq_h_nr_running && !rq->cfs.h_nr_running) + if (rq_h_nr_queued && !rq->cfs.h_nr_queued) dl_server_stop(&rq->fair_server); done: /* @@ -6091,8 +6096,8 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq) struct rq *rq = rq_of(cfs_rq); struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg); struct sched_entity *se; - long task_delta, idle_task_delta, delayed_delta; - long rq_h_nr_running = rq->cfs.h_nr_running; + long queued_delta, runnable_delta, idle_task_delta, delayed_delta; + long rq_h_nr_queued = rq->cfs.h_nr_queued; se = cfs_rq->tg->se[cpu_of(rq)]; @@ -6125,7 +6130,8 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq) goto unthrottle_throttle; } - task_delta = cfs_rq->h_nr_running; + queued_delta = cfs_rq->h_nr_queued; + runnable_delta = cfs_rq->h_nr_runnable; idle_task_delta = cfs_rq->idle_h_nr_running; delayed_delta = cfs_rq->h_nr_delayed; for_each_sched_entity(se) { @@ -6141,9 +6147,10 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq) enqueue_entity(qcfs_rq, se, ENQUEUE_WAKEUP); if (cfs_rq_is_idle(group_cfs_rq(se))) - idle_task_delta = cfs_rq->h_nr_running; + idle_task_delta = cfs_rq->h_nr_queued; - qcfs_rq->h_nr_running += task_delta; + qcfs_rq->h_nr_queued += queued_delta; + qcfs_rq->h_nr_runnable += runnable_delta; qcfs_rq->idle_h_nr_running += idle_task_delta; qcfs_rq->h_nr_delayed += delayed_delta; @@ -6159,9 +6166,10 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq) se_update_runnable(se); if (cfs_rq_is_idle(group_cfs_rq(se))) - idle_task_delta = cfs_rq->h_nr_running; + idle_task_delta = cfs_rq->h_nr_queued; - qcfs_rq->h_nr_running += task_delta; + qcfs_rq->h_nr_queued += queued_delta; + qcfs_rq->h_nr_runnable += runnable_delta; qcfs_rq->idle_h_nr_running += idle_task_delta; qcfs_rq->h_nr_delayed += delayed_delta; @@ -6171,11 +6179,11 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq) } /* Start the fair server if un-throttling resulted in new runnable tasks */ - if (!rq_h_nr_running && rq->cfs.h_nr_running) + if (!rq_h_nr_queued && rq->cfs.h_nr_queued) dl_server_start(&rq->fair_server); /* At this point se is NULL and we are at root level*/ - add_nr_running(rq, task_delta); + add_nr_running(rq, queued_delta); unthrottle_throttle: assert_list_leaf_cfs_rq(rq); @@ -6890,7 +6898,7 @@ static void hrtick_start_fair(struct rq *rq, struct task_struct *p) SCHED_WARN_ON(task_rq(p) != rq); - if (rq->cfs.h_nr_running > 1) { + if (rq->cfs.h_nr_queued > 1) { u64 ran = se->sum_exec_runtime - se->prev_sum_exec_runtime; u64 slice = se->slice; s64 delta = slice - ran; @@ -7033,7 +7041,7 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags) int idle_h_nr_running = task_has_idle_policy(p); int h_nr_delayed = 0; int task_new = !(flags & ENQUEUE_WAKEUP); - int rq_h_nr_running = rq->cfs.h_nr_running; + int rq_h_nr_queued = rq->cfs.h_nr_queued; u64 slice = 0; /* @@ -7081,7 +7089,9 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags) enqueue_entity(cfs_rq, se, flags); slice = cfs_rq_min_slice(cfs_rq); - cfs_rq->h_nr_running++; + if (!h_nr_delayed) + cfs_rq->h_nr_runnable++; + cfs_rq->h_nr_queued++; cfs_rq->idle_h_nr_running += idle_h_nr_running; cfs_rq->h_nr_delayed += h_nr_delayed; @@ -7107,7 +7117,9 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags) min_vruntime_cb_propagate(&se->run_node, NULL); slice = cfs_rq_min_slice(cfs_rq); - cfs_rq->h_nr_running++; + if (!h_nr_delayed) + cfs_rq->h_nr_runnable++; + cfs_rq->h_nr_queued++; cfs_rq->idle_h_nr_running += idle_h_nr_running; cfs_rq->h_nr_delayed += h_nr_delayed; @@ -7119,7 +7131,7 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags) goto enqueue_throttle; } - if (!rq_h_nr_running && rq->cfs.h_nr_running) { + if (!rq_h_nr_queued && rq->cfs.h_nr_queued) { /* Account for idle runtime */ if (!rq->nr_running) dl_server_update_idle_time(rq, rq->curr); @@ -7166,19 +7178,19 @@ static void set_next_buddy(struct sched_entity *se); static int dequeue_entities(struct rq *rq, struct sched_entity *se, int flags) { bool was_sched_idle = sched_idle_rq(rq); - int rq_h_nr_running = rq->cfs.h_nr_running; + int rq_h_nr_queued = rq->cfs.h_nr_queued; bool task_sleep = flags & DEQUEUE_SLEEP; bool task_delayed = flags & DEQUEUE_DELAYED; struct task_struct *p = NULL; int idle_h_nr_running = 0; - int h_nr_running = 0; + int h_nr_queued = 0; int h_nr_delayed = 0; struct cfs_rq *cfs_rq; u64 slice = 0; if (entity_is_task(se)) { p = task_of(se); - h_nr_running = 1; + h_nr_queued = 1; idle_h_nr_running = task_has_idle_policy(p); if (!task_sleep && !task_delayed) h_nr_delayed = !!se->sched_delayed; @@ -7195,12 +7207,14 @@ static int dequeue_entities(struct rq *rq, struct sched_entity *se, int flags) break; } - cfs_rq->h_nr_running -= h_nr_running; + if (!h_nr_delayed) + cfs_rq->h_nr_runnable -= h_nr_queued; + cfs_rq->h_nr_queued -= h_nr_queued; cfs_rq->idle_h_nr_running -= idle_h_nr_running; cfs_rq->h_nr_delayed -= h_nr_delayed; if (cfs_rq_is_idle(cfs_rq)) - idle_h_nr_running = h_nr_running; + idle_h_nr_running = h_nr_queued; /* end evaluation on encountering a throttled cfs_rq */ if (cfs_rq_throttled(cfs_rq)) @@ -7236,21 +7250,23 @@ static int dequeue_entities(struct rq *rq, struct sched_entity *se, int flags) min_vruntime_cb_propagate(&se->run_node, NULL); slice = cfs_rq_min_slice(cfs_rq); - cfs_rq->h_nr_running -= h_nr_running; + if (!h_nr_delayed) + cfs_rq->h_nr_runnable -= h_nr_queued; + cfs_rq->h_nr_queued -= h_nr_queued; cfs_rq->idle_h_nr_running -= idle_h_nr_running; cfs_rq->h_nr_delayed -= h_nr_delayed; if (cfs_rq_is_idle(cfs_rq)) - idle_h_nr_running = h_nr_running; + idle_h_nr_running = h_nr_queued; /* end evaluation on encountering a throttled cfs_rq */ if (cfs_rq_throttled(cfs_rq)) return 0; } - sub_nr_running(rq, h_nr_running); + sub_nr_running(rq, h_nr_queued); - if (rq_h_nr_running && !rq->cfs.h_nr_running) + if (rq_h_nr_queued && !rq->cfs.h_nr_queued) dl_server_stop(&rq->fair_server); /* balance early to pull high priority tasks */ @@ -7297,6 +7313,11 @@ static bool dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags) return true; } +static inline unsigned int cfs_h_nr_delayed(struct rq *rq) +{ + return (rq->cfs.h_nr_queued - rq->cfs.h_nr_runnable); +} + #ifdef CONFIG_SMP /* Working cpumask for: sched_balance_rq(), sched_balance_newidle(). */ @@ -7458,8 +7479,12 @@ wake_affine_idle(int this_cpu, int prev_cpu, int sync) if (available_idle_cpu(this_cpu) && cpus_share_cache(this_cpu, prev_cpu)) return available_idle_cpu(prev_cpu) ? prev_cpu : this_cpu; - if (sync && cpu_rq(this_cpu)->nr_running == 1) - return this_cpu; + if (sync) { + struct rq *rq = cpu_rq(this_cpu); + + if ((rq->nr_running - cfs_h_nr_delayed(rq)) == 1) + return this_cpu; + } if (available_idle_cpu(prev_cpu)) return prev_cpu; @@ -10394,7 +10419,7 @@ sched_reduced_capacity(struct rq *rq, struct sched_domain *sd) * When there is more than 1 task, the group_overloaded case already * takes care of cpu with reduced capacity */ - if (rq->cfs.h_nr_running != 1) + if (rq->cfs.h_nr_queued != 1) return false; return check_cpu_capacity(rq, sd); @@ -10429,7 +10454,7 @@ static inline void update_sg_lb_stats(struct lb_env *env, sgs->group_load += load; sgs->group_util += cpu_util_cfs(i); sgs->group_runnable += cpu_runnable(rq); - sgs->sum_h_nr_running += rq->cfs.h_nr_running; + sgs->sum_h_nr_running += rq->cfs.h_nr_queued; nr_running = rq->nr_running; sgs->sum_nr_running += nr_running; @@ -10744,7 +10769,7 @@ static inline void update_sg_wakeup_stats(struct sched_domain *sd, sgs->group_util += cpu_util_without(i, p); sgs->group_runnable += cpu_runnable_without(rq, p); local = task_running_on_cpu(i, p); - sgs->sum_h_nr_running += rq->cfs.h_nr_running - local; + sgs->sum_h_nr_running += rq->cfs.h_nr_queued - local; nr_running = rq->nr_running - local; sgs->sum_nr_running += nr_running; @@ -11526,7 +11551,7 @@ static struct rq *sched_balance_find_src_rq(struct lb_env *env, if (rt > env->fbq_type) continue; - nr_running = rq->cfs.h_nr_running; + nr_running = rq->cfs.h_nr_queued; if (!nr_running) continue; @@ -11685,7 +11710,7 @@ static int need_active_balance(struct lb_env *env) * available on dst_cpu. */ if (env->idle && - (env->src_rq->cfs.h_nr_running == 1)) { + (env->src_rq->cfs.h_nr_queued == 1)) { if ((check_cpu_capacity(env->src_rq, sd)) && (capacity_of(env->src_cpu)*sd->imbalance_pct < capacity_of(env->dst_cpu)*100)) return 1; @@ -12428,7 +12453,7 @@ static void nohz_balancer_kick(struct rq *rq) * If there's a runnable CFS task and the current CPU has reduced * capacity, kick the ILB to see if there's a better CPU to run on: */ - if (rq->cfs.h_nr_running >= 1 && check_cpu_capacity(rq, sd)) { + if (rq->cfs.h_nr_queued >= 1 && check_cpu_capacity(rq, sd)) { flags = NOHZ_STATS_KICK | NOHZ_BALANCE_KICK; goto unlock; } @@ -12926,11 +12951,11 @@ static int sched_balance_newidle(struct rq *this_rq, struct rq_flags *rf) * have been enqueued in the meantime. Since we're not going idle, * pretend we pulled a task. */ - if (this_rq->cfs.h_nr_running && !pulled_task) + if (this_rq->cfs.h_nr_queued && !pulled_task) pulled_task = 1; /* Is there a task of a high priority class? */ - if (this_rq->nr_running != this_rq->cfs.h_nr_running) + if (this_rq->nr_running != this_rq->cfs.h_nr_queued) pulled_task = -1; out: @@ -13617,7 +13642,7 @@ int sched_group_set_idle(struct task_group *tg, long idle) parent_cfs_rq->idle_nr_running--; } - idle_task_delta = grp_cfs_rq->h_nr_running - + idle_task_delta = grp_cfs_rq->h_nr_queued - grp_cfs_rq->idle_h_nr_running; if (!cfs_rq_is_idle(grp_cfs_rq)) idle_task_delta *= -1; diff --git a/kernel/sched/pelt.c b/kernel/sched/pelt.c index 171a802420a10..8189a35e53fe1 100644 --- a/kernel/sched/pelt.c +++ b/kernel/sched/pelt.c @@ -275,7 +275,7 @@ ___update_load_avg(struct sched_avg *sa, unsigned long load) * * group: [ see update_cfs_group() ] * se_weight() = tg->weight * grq->load_avg / tg->load_avg - * se_runnable() = grq->h_nr_running + * se_runnable() = grq->h_nr_queued * * runnable_sum = se_runnable() * runnable = grq->runnable_sum * runnable_avg = runnable_sum @@ -321,7 +321,7 @@ int __update_load_avg_cfs_rq(u64 now, struct cfs_rq *cfs_rq) { if (___update_load_sum(now, &cfs_rq->avg, scale_load_down(cfs_rq->load.weight), - cfs_rq->h_nr_running - cfs_rq->h_nr_delayed, + cfs_rq->h_nr_queued - cfs_rq->h_nr_delayed, cfs_rq->curr != NULL)) { ___update_load_avg(&cfs_rq->avg, 1); diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 8c30262a02632..2077b11a211af 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -651,7 +651,8 @@ struct balance_callback { struct cfs_rq { struct load_weight load; unsigned int nr_running; - unsigned int h_nr_running; /* SCHED_{NORMAL,BATCH,IDLE} */ + unsigned int h_nr_queued; /* SCHED_{NORMAL,BATCH,IDLE} */ + unsigned int h_nr_runnable; /* SCHED_{NORMAL,BATCH,IDLE} */ unsigned int idle_nr_running; /* SCHED_IDLE */ unsigned int idle_h_nr_running; /* SCHED_IDLE */ unsigned int h_nr_delayed; @@ -907,7 +908,7 @@ static inline void se_update_runnable(struct sched_entity *se) if (!entity_is_task(se)) { struct cfs_rq *cfs_rq = se->my_q; - se->runnable_weight = cfs_rq->h_nr_running - cfs_rq->h_nr_delayed; + se->runnable_weight = cfs_rq->h_nr_queued - cfs_rq->h_nr_delayed; } } diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c index da821ce258ea7..d758e66ad59e4 100644 --- a/kernel/stop_machine.c +++ b/kernel/stop_machine.c @@ -82,18 +82,15 @@ static void cpu_stop_signal_done(struct cpu_stop_done *done) } static void __cpu_stop_queue_work(struct cpu_stopper *stopper, - struct cpu_stop_work *work, - struct wake_q_head *wakeq) + struct cpu_stop_work *work) { list_add_tail(&work->list, &stopper->works); - wake_q_add(wakeq, stopper->thread); } /* queue @work to @stopper. if offline, @work is completed immediately */ static bool cpu_stop_queue_work(unsigned int cpu, struct cpu_stop_work *work) { struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu); - DEFINE_WAKE_Q(wakeq); unsigned long flags; bool enabled; @@ -101,12 +98,13 @@ static bool cpu_stop_queue_work(unsigned int cpu, struct cpu_stop_work *work) raw_spin_lock_irqsave(&stopper->lock, flags); enabled = stopper->enabled; if (enabled) - __cpu_stop_queue_work(stopper, work, &wakeq); + __cpu_stop_queue_work(stopper, work); else if (work->done) cpu_stop_signal_done(work->done); raw_spin_unlock_irqrestore(&stopper->lock, flags); - wake_up_q(&wakeq); + if (enabled) + wake_up_process(stopper->thread); preempt_enable(); return enabled; @@ -263,7 +261,6 @@ static int cpu_stop_queue_two_works(int cpu1, struct cpu_stop_work *work1, { struct cpu_stopper *stopper1 = per_cpu_ptr(&cpu_stopper, cpu1); struct cpu_stopper *stopper2 = per_cpu_ptr(&cpu_stopper, cpu2); - DEFINE_WAKE_Q(wakeq); int err; retry: @@ -299,8 +296,8 @@ static int cpu_stop_queue_two_works(int cpu1, struct cpu_stop_work *work1, } err = 0; - __cpu_stop_queue_work(stopper1, work1, &wakeq); - __cpu_stop_queue_work(stopper2, work2, &wakeq); + __cpu_stop_queue_work(stopper1, work1); + __cpu_stop_queue_work(stopper2, work2); unlock: raw_spin_unlock(&stopper2->lock); @@ -315,7 +312,10 @@ static int cpu_stop_queue_two_works(int cpu1, struct cpu_stop_work *work1, goto retry; } - wake_up_q(&wakeq); + if (!err) { + wake_up_process(stopper1->thread); + wake_up_process(stopper2->thread); + } preempt_enable(); return err; diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c index 58fb7280cabbe..ae862ad9642cb 100644 --- a/kernel/time/clocksource.c +++ b/kernel/time/clocksource.c @@ -302,7 +302,7 @@ static void clocksource_verify_choose_cpus(void) { int cpu, i, n = verify_n_cpus; - if (n < 0) { + if (n < 0 || n >= num_online_cpus()) { /* Check all of the CPUs. */ cpumask_copy(&cpus_chosen, cpu_online_mask); cpumask_clear_cpu(smp_processor_id(), &cpus_chosen); diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c index 6bcee47040592..d44641108ba81 100644 --- a/kernel/time/posix-cpu-timers.c +++ b/kernel/time/posix-cpu-timers.c @@ -1400,6 +1400,15 @@ void run_posix_cpu_timers(void) lockdep_assert_irqs_disabled(); + /* + * Ensure that release_task(tsk) can't happen while + * handle_posix_cpu_timers() is running. Otherwise, a concurrent + * posix_cpu_timer_del() may fail to lock_task_sighand(tsk) and + * miss timer->it.cpu.firing != 0. + */ + if (tsk->exit_state) + return; + /* * If the actual expiry is deferred to task work context and the * work is already scheduled there is no point to do anything here. diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index e5c063fc8ef97..3ec7df7dbeec4 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c @@ -1828,7 +1828,7 @@ static struct pt_regs *get_bpf_raw_tp_regs(void) struct bpf_raw_tp_regs *tp_regs = this_cpu_ptr(&bpf_raw_tp_regs); int nest_level = this_cpu_inc_return(bpf_raw_tp_nest_level); - if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(tp_regs->regs))) { + if (nest_level > ARRAY_SIZE(tp_regs->regs)) { this_cpu_dec(bpf_raw_tp_nest_level); return ERR_PTR(-EBUSY); } @@ -2932,6 +2932,9 @@ int bpf_kprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *pr if (sizeof(u64) != sizeof(void *)) return -EOPNOTSUPP; + if (attr->link_create.flags) + return -EINVAL; + if (!is_kprobe_multi(prog)) return -EINVAL; @@ -3346,7 +3349,9 @@ int bpf_uprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *pr } if (pid) { + rcu_read_lock(); task = get_pid_task(find_vpid(pid), PIDTYPE_TGID); + rcu_read_unlock(); if (!task) { err = -ESRCH; goto error_path_put; diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index e67d67f7b9065..ad7db84b04090 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -7295,9 +7295,10 @@ void ftrace_release_mod(struct module *mod) mutex_lock(&ftrace_lock); - if (ftrace_disabled) - goto out_unlock; - + /* + * To avoid the UAF problem after the module is unloaded, the + * 'mod_map' resource needs to be released unconditionally. + */ list_for_each_entry_safe(mod_map, n, &ftrace_mod_maps, list) { if (mod_map->mod == mod) { list_del_rcu(&mod_map->list); @@ -7306,6 +7307,9 @@ void ftrace_release_mod(struct module *mod) } } + if (ftrace_disabled) + goto out_unlock; + /* * Each module has its own ftrace_pages, remove * them from the list. diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index baa5547e977a0..6ab740d3185bc 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c @@ -2796,6 +2796,12 @@ int ring_buffer_resize(struct trace_buffer *buffer, unsigned long size, if (nr_pages < 2) nr_pages = 2; + /* + * Keep CPUs from coming online while resizing to synchronize + * with new per CPU buffers being created. + */ + guard(cpus_read_lock)(); + /* prevent another thread from changing buffer sizes */ mutex_lock(&buffer->mutex); atomic_inc(&buffer->resizing); @@ -2840,7 +2846,6 @@ int ring_buffer_resize(struct trace_buffer *buffer, unsigned long size, cond_resched(); } - cpus_read_lock(); /* * Fire off all the required work handlers * We can't schedule on offline CPUs, but it's not necessary @@ -2880,7 +2885,6 @@ int ring_buffer_resize(struct trace_buffer *buffer, unsigned long size, cpu_buffer->nr_pages_to_update = 0; } - cpus_read_unlock(); } else { cpu_buffer = buffer->buffers[cpu_id]; @@ -2908,8 +2912,6 @@ int ring_buffer_resize(struct trace_buffer *buffer, unsigned long size, goto out_err; } - cpus_read_lock(); - /* Can't run something on an offline CPU. */ if (!cpu_online(cpu_id)) rb_update_pages(cpu_buffer); @@ -2928,7 +2930,6 @@ int ring_buffer_resize(struct trace_buffer *buffer, unsigned long size, } cpu_buffer->nr_pages_to_update = 0; - cpus_read_unlock(); } out: @@ -6754,7 +6755,7 @@ int ring_buffer_subbuf_order_set(struct trace_buffer *buffer, int order) old_size = buffer->subbuf_size; /* prevent another thread from changing buffer sizes */ - mutex_lock(&buffer->mutex); + guard(mutex)(&buffer->mutex); atomic_inc(&buffer->record_disabled); /* Make sure all commits have finished */ @@ -6859,7 +6860,6 @@ int ring_buffer_subbuf_order_set(struct trace_buffer *buffer, int order) } atomic_dec(&buffer->record_disabled); - mutex_unlock(&buffer->mutex); return 0; @@ -6868,7 +6868,6 @@ int ring_buffer_subbuf_order_set(struct trace_buffer *buffer, int order) buffer->subbuf_size = old_size; atomic_dec(&buffer->record_disabled); - mutex_unlock(&buffer->mutex); for_each_buffer_cpu(buffer, cpu) { cpu_buffer = buffer->buffers[cpu]; @@ -7274,8 +7273,8 @@ int ring_buffer_map_get_reader(struct trace_buffer *buffer, int cpu) /* Check if any events were dropped */ missed_events = cpu_buffer->lost_events; - if (cpu_buffer->reader_page != cpu_buffer->commit_page) { - if (missed_events) { + if (missed_events) { + if (cpu_buffer->reader_page != cpu_buffer->commit_page) { struct buffer_data_page *bpage = reader->page; unsigned int commit; /* @@ -7296,13 +7295,23 @@ int ring_buffer_map_get_reader(struct trace_buffer *buffer, int cpu) local_add(RB_MISSED_STORED, &bpage->commit); } local_add(RB_MISSED_EVENTS, &bpage->commit); + } else if (!WARN_ONCE(cpu_buffer->reader_page == cpu_buffer->tail_page, + "Reader on commit with %ld missed events", + missed_events)) { + /* + * There shouldn't be any missed events if the tail_page + * is on the reader page. But if the tail page is not on the + * reader page and the commit_page is, that would mean that + * there's a commit_overrun (an interrupt preempted an + * addition of an event and then filled the buffer + * with new events). In this case it's not an + * error, but it should still be reported. + * + * TODO: Add missed events to the page for user space to know. + */ + pr_info("Ring buffer [%d] commit overrun lost %ld events at timestamp:%lld\n", + cpu, missed_events, cpu_buffer->reader_page->page->time_stamp); } - } else { - /* - * There really shouldn't be any missed events if the commit - * is on the reader page. - */ - WARN_ON_ONCE(missed_events); } cpu_buffer->lost_events = 0; diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index f063fba61a97e..fee97e86adff2 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -6701,7 +6701,7 @@ static ssize_t tracing_splice_read_pipe(struct file *filp, ret = trace_seq_to_buffer(&iter->seq, page_address(spd.pages[i]), min((size_t)trace_seq_used(&iter->seq), - PAGE_SIZE)); + (size_t)PAGE_SIZE)); if (ret < 0) { __free_page(spd.pages[i]); break; diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 82da3ac140242..57e1af1d3e6d4 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h @@ -1731,6 +1731,9 @@ extern int event_enable_register_trigger(char *glob, extern void event_enable_unregister_trigger(char *glob, struct event_trigger_data *test, struct trace_event_file *file); +extern struct event_trigger_data * +trigger_data_alloc(struct event_command *cmd_ops, char *cmd, char *param, + void *private_data); extern void trigger_data_free(struct event_trigger_data *data); extern int event_trigger_init(struct event_trigger_data *data); extern int trace_event_trigger_enable_disable(struct trace_event_file *file, @@ -1757,11 +1760,6 @@ extern bool event_trigger_check_remove(const char *glob); extern bool event_trigger_empty_param(const char *param); extern int event_trigger_separate_filter(char *param_and_filter, char **param, char **filter, bool param_required); -extern struct event_trigger_data * -event_trigger_alloc(struct event_command *cmd_ops, - char *cmd, - char *param, - void *private_data); extern int event_trigger_parse_num(char *trigger, struct event_trigger_data *trigger_data); extern int event_trigger_set_filter(struct event_command *cmd_ops, diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c index 4ebafc655223a..3379e14d38e9b 100644 --- a/kernel/trace/trace_events_hist.c +++ b/kernel/trace/trace_events_hist.c @@ -5249,17 +5249,94 @@ hist_trigger_actions(struct hist_trigger_data *hist_data, } } +/* + * The hist_pad structure is used to save information to create + * a histogram from the histogram trigger. It's too big to store + * on the stack, so when the histogram trigger is initialized + * a percpu array of 4 hist_pad structures is allocated. + * This will cover every context from normal, softirq, irq and NMI + * in the very unlikely event that a tigger happens at each of + * these contexts and interrupts a currently active trigger. + */ +struct hist_pad { + unsigned long entries[HIST_STACKTRACE_DEPTH]; + u64 var_ref_vals[TRACING_MAP_VARS_MAX]; + char compound_key[HIST_KEY_SIZE_MAX]; +}; + +static struct hist_pad __percpu *hist_pads; +static DEFINE_PER_CPU(int, hist_pad_cnt); +static refcount_t hist_pad_ref; + +/* One hist_pad for every context (normal, softirq, irq, NMI) */ +#define MAX_HIST_CNT 4 + +static int alloc_hist_pad(void) +{ + lockdep_assert_held(&event_mutex); + + if (refcount_read(&hist_pad_ref)) { + refcount_inc(&hist_pad_ref); + return 0; + } + + hist_pads = __alloc_percpu(sizeof(struct hist_pad) * MAX_HIST_CNT, + __alignof__(struct hist_pad)); + if (!hist_pads) + return -ENOMEM; + + refcount_set(&hist_pad_ref, 1); + return 0; +} + +static void free_hist_pad(void) +{ + lockdep_assert_held(&event_mutex); + + if (!refcount_dec_and_test(&hist_pad_ref)) + return; + + free_percpu(hist_pads); + hist_pads = NULL; +} + +static struct hist_pad *get_hist_pad(void) +{ + struct hist_pad *hist_pad; + int cnt; + + if (WARN_ON_ONCE(!hist_pads)) + return NULL; + + preempt_disable(); + + hist_pad = per_cpu_ptr(hist_pads, smp_processor_id()); + + if (this_cpu_read(hist_pad_cnt) == MAX_HIST_CNT) { + preempt_enable(); + return NULL; + } + + cnt = this_cpu_inc_return(hist_pad_cnt) - 1; + + return &hist_pad[cnt]; +} + +static void put_hist_pad(void) +{ + this_cpu_dec(hist_pad_cnt); + preempt_enable(); +} + static void event_hist_trigger(struct event_trigger_data *data, struct trace_buffer *buffer, void *rec, struct ring_buffer_event *rbe) { struct hist_trigger_data *hist_data = data->private_data; bool use_compound_key = (hist_data->n_keys > 1); - unsigned long entries[HIST_STACKTRACE_DEPTH]; - u64 var_ref_vals[TRACING_MAP_VARS_MAX]; - char compound_key[HIST_KEY_SIZE_MAX]; struct tracing_map_elt *elt = NULL; struct hist_field *key_field; + struct hist_pad *hist_pad; u64 field_contents; void *key = NULL; unsigned int i; @@ -5267,12 +5344,18 @@ static void event_hist_trigger(struct event_trigger_data *data, if (unlikely(!rbe)) return; - memset(compound_key, 0, hist_data->key_size); + hist_pad = get_hist_pad(); + if (!hist_pad) + return; + + memset(hist_pad->compound_key, 0, hist_data->key_size); for_each_hist_key_field(i, hist_data) { key_field = hist_data->fields[i]; if (key_field->flags & HIST_FIELD_FL_STACKTRACE) { + unsigned long *entries = hist_pad->entries; + memset(entries, 0, HIST_STACKTRACE_SIZE); if (key_field->field) { unsigned long *stack, n_entries; @@ -5296,26 +5379,31 @@ static void event_hist_trigger(struct event_trigger_data *data, } if (use_compound_key) - add_to_key(compound_key, key, key_field, rec); + add_to_key(hist_pad->compound_key, key, key_field, rec); } if (use_compound_key) - key = compound_key; + key = hist_pad->compound_key; if (hist_data->n_var_refs && - !resolve_var_refs(hist_data, key, var_ref_vals, false)) - return; + !resolve_var_refs(hist_data, key, hist_pad->var_ref_vals, false)) + goto out; elt = tracing_map_insert(hist_data->map, key); if (!elt) - return; + goto out; - hist_trigger_elt_update(hist_data, elt, buffer, rec, rbe, var_ref_vals); + hist_trigger_elt_update(hist_data, elt, buffer, rec, rbe, hist_pad->var_ref_vals); - if (resolve_var_refs(hist_data, key, var_ref_vals, true)) - hist_trigger_actions(hist_data, elt, buffer, rec, rbe, key, var_ref_vals); + if (resolve_var_refs(hist_data, key, hist_pad->var_ref_vals, true)) { + hist_trigger_actions(hist_data, elt, buffer, rec, rbe, + key, hist_pad->var_ref_vals); + } hist_poll_wakeup(); + + out: + put_hist_pad(); } static void hist_trigger_stacktrace_print(struct seq_file *m, @@ -6160,6 +6248,9 @@ static int event_hist_trigger_init(struct event_trigger_data *data) { struct hist_trigger_data *hist_data = data->private_data; + if (alloc_hist_pad() < 0) + return -ENOMEM; + if (!data->ref && hist_data->attrs->name) save_named_trigger(hist_data->attrs->name, data); @@ -6204,6 +6295,7 @@ static void event_hist_trigger_free(struct event_trigger_data *data) destroy_hist_data(hist_data); } + free_hist_pad(); } static struct event_trigger_ops event_hist_trigger_ops = { @@ -6219,9 +6311,7 @@ static int event_hist_trigger_named_init(struct event_trigger_data *data) save_named_trigger(data->named_data->name, data); - event_hist_trigger_init(data->named_data); - - return 0; + return event_hist_trigger_init(data->named_data); } static void event_hist_trigger_named_free(struct event_trigger_data *data) @@ -6708,7 +6798,7 @@ static int event_hist_trigger_parse(struct event_command *cmd_ops, return PTR_ERR(hist_data); } - trigger_data = event_trigger_alloc(cmd_ops, cmd, param, hist_data); + trigger_data = trigger_data_alloc(cmd_ops, cmd, param, hist_data); if (!trigger_data) { ret = -ENOMEM; goto out_free; diff --git a/kernel/trace/trace_events_trigger.c b/kernel/trace/trace_events_trigger.c index 27e21488d5741..d5dbda9b0e4b0 100644 --- a/kernel/trace/trace_events_trigger.c +++ b/kernel/trace/trace_events_trigger.c @@ -825,7 +825,7 @@ int event_trigger_separate_filter(char *param_and_filter, char **param, } /** - * event_trigger_alloc - allocate and init event_trigger_data for a trigger + * trigger_data_alloc - allocate and init event_trigger_data for a trigger * @cmd_ops: The event_command operations for the trigger * @cmd: The cmd string * @param: The param string @@ -836,14 +836,14 @@ int event_trigger_separate_filter(char *param_and_filter, char **param, * trigger_ops to assign to the event_trigger_data. @private_data can * also be passed in and associated with the event_trigger_data. * - * Use event_trigger_free() to free an event_trigger_data object. + * Use trigger_data_free() to free an event_trigger_data object. * * Return: The trigger_data object success, NULL otherwise */ -struct event_trigger_data *event_trigger_alloc(struct event_command *cmd_ops, - char *cmd, - char *param, - void *private_data) +struct event_trigger_data *trigger_data_alloc(struct event_command *cmd_ops, + char *cmd, + char *param, + void *private_data) { struct event_trigger_data *trigger_data; struct event_trigger_ops *trigger_ops; @@ -1010,13 +1010,13 @@ event_trigger_parse(struct event_command *cmd_ops, return ret; ret = -ENOMEM; - trigger_data = event_trigger_alloc(cmd_ops, cmd, param, file); + trigger_data = trigger_data_alloc(cmd_ops, cmd, param, file); if (!trigger_data) goto out; if (remove) { event_trigger_unregister(cmd_ops, file, glob+1, trigger_data); - kfree(trigger_data); + trigger_data_free(trigger_data); ret = 0; goto out; } @@ -1043,7 +1043,7 @@ event_trigger_parse(struct event_command *cmd_ops, out_free: event_trigger_reset_filter(cmd_ops, trigger_data); - kfree(trigger_data); + trigger_data_free(trigger_data); goto out; } @@ -1814,7 +1814,7 @@ int event_enable_trigger_parse(struct event_command *cmd_ops, enable_data->enable = enable; enable_data->file = event_enable_file; - trigger_data = event_trigger_alloc(cmd_ops, cmd, param, enable_data); + trigger_data = trigger_data_alloc(cmd_ops, cmd, param, enable_data); if (!trigger_data) { kfree(enable_data); goto out; diff --git a/kernel/watchdog.c b/kernel/watchdog.c index 4dc72540c3b0f..8fbb4385e8149 100644 --- a/kernel/watchdog.c +++ b/kernel/watchdog.c @@ -47,6 +47,7 @@ int __read_mostly watchdog_user_enabled = 1; static int __read_mostly watchdog_hardlockup_user_enabled = WATCHDOG_HARDLOCKUP_DEFAULT; static int __read_mostly watchdog_softlockup_user_enabled = 1; int __read_mostly watchdog_thresh = 10; +static int __read_mostly watchdog_thresh_next; static int __read_mostly watchdog_hardlockup_available; struct cpumask watchdog_cpumask __read_mostly; @@ -863,12 +864,20 @@ int lockup_detector_offline_cpu(unsigned int cpu) return 0; } -static void __lockup_detector_reconfigure(void) +static void __lockup_detector_reconfigure(bool thresh_changed) { cpus_read_lock(); watchdog_hardlockup_stop(); softlockup_stop_all(); + /* + * To prevent watchdog_timer_fn from using the old interval and + * the new watchdog_thresh at the same time, which could lead to + * false softlockup reports, it is necessary to update the + * watchdog_thresh after the softlockup is completed. + */ + if (thresh_changed) + watchdog_thresh = READ_ONCE(watchdog_thresh_next); set_sample_period(); lockup_detector_update_enable(); if (watchdog_enabled && watchdog_thresh) @@ -881,7 +890,7 @@ static void __lockup_detector_reconfigure(void) void lockup_detector_reconfigure(void) { mutex_lock(&watchdog_mutex); - __lockup_detector_reconfigure(); + __lockup_detector_reconfigure(false); mutex_unlock(&watchdog_mutex); } @@ -901,27 +910,29 @@ static __init void lockup_detector_setup(void) return; mutex_lock(&watchdog_mutex); - __lockup_detector_reconfigure(); + __lockup_detector_reconfigure(false); softlockup_initialized = true; mutex_unlock(&watchdog_mutex); } #else /* CONFIG_SOFTLOCKUP_DETECTOR */ -static void __lockup_detector_reconfigure(void) +static void __lockup_detector_reconfigure(bool thresh_changed) { cpus_read_lock(); watchdog_hardlockup_stop(); + if (thresh_changed) + watchdog_thresh = READ_ONCE(watchdog_thresh_next); lockup_detector_update_enable(); watchdog_hardlockup_start(); cpus_read_unlock(); } void lockup_detector_reconfigure(void) { - __lockup_detector_reconfigure(); + __lockup_detector_reconfigure(false); } static inline void lockup_detector_setup(void) { - __lockup_detector_reconfigure(); + __lockup_detector_reconfigure(false); } #endif /* !CONFIG_SOFTLOCKUP_DETECTOR */ @@ -939,11 +950,11 @@ void lockup_detector_soft_poweroff(void) #ifdef CONFIG_SYSCTL /* Propagate any changes to the watchdog infrastructure */ -static void proc_watchdog_update(void) +static void proc_watchdog_update(bool thresh_changed) { /* Remove impossible cpus to keep sysctl output clean. */ cpumask_and(&watchdog_cpumask, &watchdog_cpumask, cpu_possible_mask); - __lockup_detector_reconfigure(); + __lockup_detector_reconfigure(thresh_changed); } /* @@ -976,7 +987,7 @@ static int proc_watchdog_common(int which, const struct ctl_table *table, int wr old = READ_ONCE(*param); err = proc_dointvec_minmax(table, write, buffer, lenp, ppos); if (!err && old != READ_ONCE(*param)) - proc_watchdog_update(); + proc_watchdog_update(false); } mutex_unlock(&watchdog_mutex); return err; @@ -1027,11 +1038,13 @@ static int proc_watchdog_thresh(const struct ctl_table *table, int write, mutex_lock(&watchdog_mutex); - old = READ_ONCE(watchdog_thresh); + watchdog_thresh_next = READ_ONCE(watchdog_thresh); + + old = watchdog_thresh_next; err = proc_dointvec_minmax(table, write, buffer, lenp, ppos); - if (!err && write && old != READ_ONCE(watchdog_thresh)) - proc_watchdog_update(); + if (!err && write && old != READ_ONCE(watchdog_thresh_next)) + proc_watchdog_update(true); mutex_unlock(&watchdog_mutex); return err; @@ -1052,7 +1065,7 @@ static int proc_watchdog_cpumask(const struct ctl_table *table, int write, err = proc_do_large_bitmap(table, write, buffer, lenp, ppos); if (!err && write) - proc_watchdog_update(); + proc_watchdog_update(false); mutex_unlock(&watchdog_mutex); return err; @@ -1072,7 +1085,7 @@ static struct ctl_table watchdog_sysctls[] = { }, { .procname = "watchdog_thresh", - .data = &watchdog_thresh, + .data = &watchdog_thresh_next, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_watchdog_thresh, diff --git a/kernel/workqueue.c b/kernel/workqueue.c index a9d64e08dffc7..3c87eb98609c0 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -7731,7 +7731,8 @@ void __init workqueue_init_early(void) restrict_unbound_cpumask("workqueue.unbound_cpus", &wq_cmdline_cpumask); cpumask_copy(wq_requested_unbound_cpumask, wq_unbound_cpumask); - + cpumask_andnot(wq_isolated_cpumask, cpu_possible_mask, + housekeeping_cpumask(HK_TYPE_DOMAIN)); pwq_cache = KMEM_CACHE(pool_workqueue, SLAB_PANIC); unbound_wq_update_pwq_attrs_buf = alloc_workqueue_attrs(); diff --git a/lib/Kconfig b/lib/Kconfig index b38849af6f130..b893c9288c140 100644 --- a/lib/Kconfig +++ b/lib/Kconfig @@ -767,6 +767,7 @@ config GENERIC_LIB_DEVMEM_IS_ALLOWED config PLDMFW bool + select CRC32 default n config ASN1_ENCODER diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index e48375fe5a50c..b1d7c427bbe3d 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -2807,6 +2807,15 @@ config FORTIFY_KUNIT_TEST by the str*() and mem*() family of functions. For testing runtime traps of FORTIFY_SOURCE, see LKDTM's "FORTIFY_*" tests. +config LONGEST_SYM_KUNIT_TEST + tristate "Test the longest symbol possible" if !KUNIT_ALL_TESTS + depends on KUNIT && KPROBES + default KUNIT_ALL_TESTS + help + Tests the longest symbol possible + + If unsure, say N. + config HW_BREAKPOINT_KUNIT_TEST bool "Test hw_breakpoint constraints accounting" if !KUNIT_ALL_TESTS depends on HAVE_HW_BREAKPOINT diff --git a/lib/Kconfig.ubsan b/lib/Kconfig.ubsan index 37655f58b8554..4e4dc430614a4 100644 --- a/lib/Kconfig.ubsan +++ b/lib/Kconfig.ubsan @@ -118,6 +118,8 @@ config UBSAN_UNREACHABLE config UBSAN_SIGNED_WRAP bool "Perform checking for signed arithmetic wrap-around" + # This is very experimental so drop the next line if you really want it + depends on BROKEN depends on !COMPILE_TEST # The no_sanitize attribute was introduced in GCC with version 8. depends on !CC_IS_GCC || GCC_VERSION >= 80000 diff --git a/lib/Makefile b/lib/Makefile index 773adf88af416..fc878e716825d 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -389,6 +389,8 @@ CFLAGS_fortify_kunit.o += $(DISABLE_STRUCTLEAK_PLUGIN) obj-$(CONFIG_FORTIFY_KUNIT_TEST) += fortify_kunit.o obj-$(CONFIG_SIPHASH_KUNIT_TEST) += siphash_kunit.o obj-$(CONFIG_USERCOPY_KUNIT_TEST) += usercopy_kunit.o +obj-$(CONFIG_LONGEST_SYM_KUNIT_TEST) += longest_symbol_kunit.o +CFLAGS_longest_symbol_kunit.o += $(call cc-disable-warning, missing-prototypes) obj-$(CONFIG_GENERIC_LIB_DEVMEM_IS_ALLOWED) += devmem_is_allowed.o diff --git a/lib/alloc_tag.c b/lib/alloc_tag.c index 81e5f9a70f220..e76c40bf29d06 100644 --- a/lib/alloc_tag.c +++ b/lib/alloc_tag.c @@ -113,6 +113,9 @@ size_t alloc_tag_top_users(struct codetag_bytes *tags, size_t count, bool can_sl struct codetag_bytes n; unsigned int i, nr = 0; + if (IS_ERR_OR_NULL(alloc_tag_cttype)) + return 0; + if (can_sleep) codetag_lock_module_list(alloc_tag_cttype, true); else if (!codetag_trylock_module_list(alloc_tag_cttype)) diff --git a/lib/group_cpus.c b/lib/group_cpus.c index ee272c4cefcc1..18d43a406114b 100644 --- a/lib/group_cpus.c +++ b/lib/group_cpus.c @@ -352,6 +352,9 @@ struct cpumask *group_cpus_evenly(unsigned int numgrps) int ret = -ENOMEM; struct cpumask *masks = NULL; + if (numgrps == 0) + return NULL; + if (!zalloc_cpumask_var(&nmsk, GFP_KERNEL)) return NULL; @@ -426,8 +429,12 @@ struct cpumask *group_cpus_evenly(unsigned int numgrps) #else /* CONFIG_SMP */ struct cpumask *group_cpus_evenly(unsigned int numgrps) { - struct cpumask *masks = kcalloc(numgrps, sizeof(*masks), GFP_KERNEL); + struct cpumask *masks; + if (numgrps == 0) + return NULL; + + masks = kcalloc(numgrps, sizeof(*masks), GFP_KERNEL); if (!masks) return NULL; diff --git a/lib/iov_iter.c b/lib/iov_iter.c index bdb37d572e97c..8ede6be556a96 100644 --- a/lib/iov_iter.c +++ b/lib/iov_iter.c @@ -820,7 +820,7 @@ static bool iov_iter_aligned_bvec(const struct iov_iter *i, unsigned addr_mask, size_t size = i->count; do { - size_t len = bvec->bv_len; + size_t len = bvec->bv_len - skip; if (len > size) len = size; diff --git a/lib/kunit/static_stub.c b/lib/kunit/static_stub.c index 92b2cccd5e763..484fd85251b41 100644 --- a/lib/kunit/static_stub.c +++ b/lib/kunit/static_stub.c @@ -96,7 +96,7 @@ void __kunit_activate_static_stub(struct kunit *test, /* If the replacement address is NULL, deactivate the stub. */ if (!replacement_addr) { - kunit_deactivate_static_stub(test, replacement_addr); + kunit_deactivate_static_stub(test, real_fn_addr); return; } diff --git a/lib/longest_symbol_kunit.c b/lib/longest_symbol_kunit.c new file mode 100644 index 0000000000000..e3c28ff1807f0 --- /dev/null +++ b/lib/longest_symbol_kunit.c @@ -0,0 +1,82 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Test the longest symbol length. Execute with: + * ./tools/testing/kunit/kunit.py run longest-symbol + * --arch=x86_64 --kconfig_add CONFIG_KPROBES=y --kconfig_add CONFIG_MODULES=y + * --kconfig_add CONFIG_RETPOLINE=n --kconfig_add CONFIG_CFI_CLANG=n + * --kconfig_add CONFIG_MITIGATION_RETPOLINE=n + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include + +#define DI(name) s##name##name +#define DDI(name) DI(n##name##name) +#define DDDI(name) DDI(n##name##name) +#define DDDDI(name) DDDI(n##name##name) +#define DDDDDI(name) DDDDI(n##name##name) + +/*Generate a symbol whose name length is 511 */ +#define LONGEST_SYM_NAME DDDDDI(g1h2i3j4k5l6m7n) + +#define RETURN_LONGEST_SYM 0xAAAAA + +noinline int LONGEST_SYM_NAME(void); +noinline int LONGEST_SYM_NAME(void) +{ + return RETURN_LONGEST_SYM; +} + +_Static_assert(sizeof(__stringify(LONGEST_SYM_NAME)) == KSYM_NAME_LEN, +"Incorrect symbol length found. Expected KSYM_NAME_LEN: " +__stringify(KSYM_NAME_LEN) ", but found: " +__stringify(sizeof(LONGEST_SYM_NAME))); + +static void test_longest_symbol(struct kunit *test) +{ + KUNIT_EXPECT_EQ(test, RETURN_LONGEST_SYM, LONGEST_SYM_NAME()); +}; + +static void test_longest_symbol_kallsyms(struct kunit *test) +{ + unsigned long (*kallsyms_lookup_name)(const char *name); + static int (*longest_sym)(void); + + struct kprobe kp = { + .symbol_name = "kallsyms_lookup_name", + }; + + if (register_kprobe(&kp) < 0) { + pr_info("%s: kprobe not registered", __func__); + KUNIT_FAIL(test, "test_longest_symbol kallsyms: kprobe not registered\n"); + return; + } + + kunit_warn(test, "test_longest_symbol kallsyms: kprobe registered\n"); + kallsyms_lookup_name = (unsigned long (*)(const char *name))kp.addr; + unregister_kprobe(&kp); + + longest_sym = + (void *) kallsyms_lookup_name(__stringify(LONGEST_SYM_NAME)); + KUNIT_EXPECT_EQ(test, RETURN_LONGEST_SYM, longest_sym()); +}; + +static struct kunit_case longest_symbol_test_cases[] = { + KUNIT_CASE(test_longest_symbol), + KUNIT_CASE(test_longest_symbol_kallsyms), + {} +}; + +static struct kunit_suite longest_symbol_test_suite = { + .name = "longest-symbol", + .test_cases = longest_symbol_test_cases, +}; +kunit_test_suite(longest_symbol_test_suite); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Test the longest symbol length"); +MODULE_AUTHOR("Sergio González Collado"); diff --git a/lib/maple_tree.c b/lib/maple_tree.c index 8d73ccf66f3aa..59f83ece20240 100644 --- a/lib/maple_tree.c +++ b/lib/maple_tree.c @@ -5335,6 +5335,7 @@ static void mt_destroy_walk(struct maple_enode *enode, struct maple_tree *mt, struct maple_enode *start; if (mte_is_leaf(enode)) { + mte_set_node_dead(enode); node->type = mte_node_type(enode); goto free_leaf; } @@ -5542,8 +5543,9 @@ int mas_preallocate(struct ma_state *mas, void *entry, gfp_t gfp) mas_wr_store_type(&wr_mas); request = mas_prealloc_calc(mas, entry); if (!request) - return ret; + goto set_flag; + mas->mas_flags &= ~MA_STATE_PREALLOC; mas_node_count_gfp(mas, request, gfp); if (mas_is_err(mas)) { mas_set_alloc_req(mas, 0); @@ -5553,6 +5555,7 @@ int mas_preallocate(struct ma_state *mas, void *entry, gfp_t gfp) return ret; } +set_flag: mas->mas_flags |= MA_STATE_PREALLOC; return ret; } diff --git a/lib/test_objagg.c b/lib/test_objagg.c index d34df4306b874..222b39fc2629e 100644 --- a/lib/test_objagg.c +++ b/lib/test_objagg.c @@ -899,8 +899,10 @@ static int check_expect_hints_stats(struct objagg_hints *objagg_hints, int err; stats = objagg_hints_stats_get(objagg_hints); - if (IS_ERR(stats)) + if (IS_ERR(stats)) { + *errmsg = "objagg_hints_stats_get() failed."; return PTR_ERR(stats); + } err = __check_expect_stats(stats, expect_stats, errmsg); objagg_stats_put(stats); return err; diff --git a/lib/usercopy_kunit.c b/lib/usercopy_kunit.c index 77fa00a13df77..80f8abe10968c 100644 --- a/lib/usercopy_kunit.c +++ b/lib/usercopy_kunit.c @@ -27,6 +27,7 @@ !defined(CONFIG_MICROBLAZE) && \ !defined(CONFIG_NIOS2) && \ !defined(CONFIG_PPC32) && \ + !defined(CONFIG_SPARC32) && \ !defined(CONFIG_SUPERH)) # define TEST_U64 #endif diff --git a/localversion-rt b/localversion-rt index 22746d6390a42..05c35cb580779 100644 --- a/localversion-rt +++ b/localversion-rt @@ -1 +1 @@ --rt9 +-rt11 diff --git a/mm/damon/sysfs-schemes.c b/mm/damon/sysfs-schemes.c index b095457380b56..d9e01648db70e 100644 --- a/mm/damon/sysfs-schemes.c +++ b/mm/damon/sysfs-schemes.c @@ -423,6 +423,7 @@ static ssize_t memcg_path_store(struct kobject *kobj, return -ENOMEM; strscpy(path, buf, count + 1); + kfree(filter->memcg_path); filter->memcg_path = path; return count; } diff --git a/mm/gup.c b/mm/gup.c index 90866b827b60f..e323843cc5dd8 100644 --- a/mm/gup.c +++ b/mm/gup.c @@ -2326,13 +2326,13 @@ static void pofs_unpin(struct pages_or_folios *pofs) /* * Returns the number of collected folios. Return value is always >= 0. */ -static void collect_longterm_unpinnable_folios( +static unsigned long collect_longterm_unpinnable_folios( struct list_head *movable_folio_list, struct pages_or_folios *pofs) { + unsigned long i, collected = 0; struct folio *prev_folio = NULL; bool drain_allow = true; - unsigned long i; for (i = 0; i < pofs->nr_entries; i++) { struct folio *folio = pofs_get_folio(pofs, i); @@ -2344,6 +2344,8 @@ static void collect_longterm_unpinnable_folios( if (folio_is_longterm_pinnable(folio)) continue; + collected++; + if (folio_is_device_coherent(folio)) continue; @@ -2365,6 +2367,8 @@ static void collect_longterm_unpinnable_folios( NR_ISOLATED_ANON + folio_is_file_lru(folio), folio_nr_pages(folio)); } + + return collected; } /* @@ -2441,9 +2445,11 @@ static long check_and_migrate_movable_pages_or_folios(struct pages_or_folios *pofs) { LIST_HEAD(movable_folio_list); + unsigned long collected; - collect_longterm_unpinnable_folios(&movable_folio_list, pofs); - if (list_empty(&movable_folio_list)) + collected = collect_longterm_unpinnable_folios(&movable_folio_list, + pofs); + if (!collected) return 0; return migrate_longterm_unpinnable_folios(&movable_folio_list, pofs); diff --git a/mm/hugetlb.c b/mm/hugetlb.c index ad646fe6688a4..9c6a4e855481a 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -87,7 +87,7 @@ static void hugetlb_vma_lock_free(struct vm_area_struct *vma); static void hugetlb_vma_lock_alloc(struct vm_area_struct *vma); static void __hugetlb_vma_unlock_write_free(struct vm_area_struct *vma); static void hugetlb_unshare_pmds(struct vm_area_struct *vma, - unsigned long start, unsigned long end); + unsigned long start, unsigned long end, bool take_locks); static struct resv_map *vma_resv_map(struct vm_area_struct *vma); static void hugetlb_free_folio(struct folio *folio) @@ -5071,26 +5071,40 @@ static int hugetlb_vm_op_split(struct vm_area_struct *vma, unsigned long addr) { if (addr & ~(huge_page_mask(hstate_vma(vma)))) return -EINVAL; + return 0; +} +void hugetlb_split(struct vm_area_struct *vma, unsigned long addr) +{ /* * PMD sharing is only possible for PUD_SIZE-aligned address ranges * in HugeTLB VMAs. If we will lose PUD_SIZE alignment due to this * split, unshare PMDs in the PUD_SIZE interval surrounding addr now. + * This function is called in the middle of a VMA split operation, with + * MM, VMA and rmap all write-locked to prevent concurrent page table + * walks (except hardware and gup_fast()). */ + vma_assert_write_locked(vma); + i_mmap_assert_write_locked(vma->vm_file->f_mapping); + if (addr & ~PUD_MASK) { - /* - * hugetlb_vm_op_split is called right before we attempt to - * split the VMA. We will need to unshare PMDs in the old and - * new VMAs, so let's unshare before we split. - */ unsigned long floor = addr & PUD_MASK; unsigned long ceil = floor + PUD_SIZE; - if (floor >= vma->vm_start && ceil <= vma->vm_end) - hugetlb_unshare_pmds(vma, floor, ceil); + if (floor >= vma->vm_start && ceil <= vma->vm_end) { + /* + * Locking: + * Use take_locks=false here. + * The file rmap lock is already held. + * The hugetlb VMA lock can't be taken when we already + * hold the file rmap lock, and we don't need it because + * its purpose is to synchronize against concurrent page + * table walks, which are not possible thanks to the + * locks held by our caller. + */ + hugetlb_unshare_pmds(vma, floor, ceil, /* take_locks = */ false); + } } - - return 0; } static unsigned long hugetlb_vm_op_pagesize(struct vm_area_struct *vma) @@ -7252,6 +7266,13 @@ int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma, return 0; pud_clear(pud); + /* + * Once our caller drops the rmap lock, some other process might be + * using this page table as a normal, non-hugetlb page table. + * Wait for pending gup_fast() in other threads to finish before letting + * that happen. + */ + tlb_remove_table_sync_one(); ptdesc_pmd_pts_dec(virt_to_ptdesc(ptep)); mm_dec_nr_pmds(mm); return 1; @@ -7484,9 +7505,16 @@ void move_hugetlb_state(struct folio *old_folio, struct folio *new_folio, int re } } +/* + * If @take_locks is false, the caller must ensure that no concurrent page table + * access can happen (except for gup_fast() and hardware page walks). + * If @take_locks is true, we take the hugetlb VMA lock (to lock out things like + * concurrent page fault handling) and the file rmap lock. + */ static void hugetlb_unshare_pmds(struct vm_area_struct *vma, unsigned long start, - unsigned long end) + unsigned long end, + bool take_locks) { struct hstate *h = hstate_vma(vma); unsigned long sz = huge_page_size(h); @@ -7510,8 +7538,12 @@ static void hugetlb_unshare_pmds(struct vm_area_struct *vma, mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, start, end); mmu_notifier_invalidate_range_start(&range); - hugetlb_vma_lock_write(vma); - i_mmap_lock_write(vma->vm_file->f_mapping); + if (take_locks) { + hugetlb_vma_lock_write(vma); + i_mmap_lock_write(vma->vm_file->f_mapping); + } else { + i_mmap_assert_write_locked(vma->vm_file->f_mapping); + } for (address = start; address < end; address += PUD_SIZE) { ptep = hugetlb_walk(vma, address, sz); if (!ptep) @@ -7521,8 +7553,10 @@ static void hugetlb_unshare_pmds(struct vm_area_struct *vma, spin_unlock(ptl); } flush_hugetlb_tlb_range(vma, start, end); - i_mmap_unlock_write(vma->vm_file->f_mapping); - hugetlb_vma_unlock_write(vma); + if (take_locks) { + i_mmap_unlock_write(vma->vm_file->f_mapping); + hugetlb_vma_unlock_write(vma); + } /* * No need to call mmu_notifier_arch_invalidate_secondary_tlbs(), see * Documentation/mm/mmu_notifier.rst. @@ -7537,7 +7571,8 @@ static void hugetlb_unshare_pmds(struct vm_area_struct *vma, void hugetlb_unshare_all_pmds(struct vm_area_struct *vma) { hugetlb_unshare_pmds(vma, ALIGN(vma->vm_start, PUD_SIZE), - ALIGN_DOWN(vma->vm_end, PUD_SIZE)); + ALIGN_DOWN(vma->vm_end, PUD_SIZE), + /* take_locks = */ true); } #ifdef CONFIG_CMA diff --git a/mm/kasan/report.c b/mm/kasan/report.c index c7c0083203cb7..5675d6a412ef1 100644 --- a/mm/kasan/report.c +++ b/mm/kasan/report.c @@ -398,17 +398,8 @@ static void print_address_description(void *addr, u8 tag, } if (is_vmalloc_addr(addr)) { - struct vm_struct *va = find_vm_area(addr); - - if (va) { - pr_err("The buggy address belongs to the virtual mapping at\n" - " [%px, %px) created by:\n" - " %pS\n", - va->addr, va->addr + va->size, va->caller); - pr_err("\n"); - - page = vmalloc_to_page(addr); - } + pr_err("The buggy address %px belongs to a vmalloc virtual mapping\n", addr); + page = vmalloc_to_page(addr); } if (page) { diff --git a/mm/madvise.c b/mm/madvise.c index c211e8fa4e49b..2e66a08fd4f4c 100644 --- a/mm/madvise.c +++ b/mm/madvise.c @@ -495,6 +495,7 @@ static int madvise_cold_or_pageout_pte_range(pmd_t *pmd, pte_offset_map_lock(mm, pmd, addr, &ptl); if (!start_pte) break; + flush_tlb_batched_pending(mm); arch_enter_lazy_mmu_mode(); if (!err) nr = 0; @@ -728,6 +729,7 @@ static int madvise_free_pte_range(pmd_t *pmd, unsigned long addr, start_pte = pte; if (!start_pte) break; + flush_tlb_batched_pending(mm); arch_enter_lazy_mmu_mode(); if (!err) nr = 0; diff --git a/mm/page-writeback.c b/mm/page-writeback.c index fcd4c1439cb9c..bfb3f903bb6d5 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -543,8 +543,8 @@ static int dirty_ratio_handler(const struct ctl_table *table, int write, void *b ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); if (ret == 0 && write && vm_dirty_ratio != old_ratio) { - writeback_set_ratelimit(); vm_dirty_bytes = 0; + writeback_set_ratelimit(); } return ret; } diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 882903f42300b..752576749db9d 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -872,9 +872,7 @@ static inline bool page_expected_state(struct page *page, #ifdef CONFIG_MEMCG page->memcg_data | #endif -#ifdef CONFIG_PAGE_POOL - ((page->pp_magic & ~0x3UL) == PP_SIGNATURE) | -#endif + page_pool_page_is_pp(page) | (page->flags & check_flags))) return false; @@ -901,10 +899,8 @@ static const char *page_bad_reason(struct page *page, unsigned long flags) if (unlikely(page->memcg_data)) bad_reason = "page still charged to cgroup"; #endif -#ifdef CONFIG_PAGE_POOL - if (unlikely((page->pp_magic & ~0x3UL) == PP_SIGNATURE)) + if (unlikely(page_pool_page_is_pp(page))) bad_reason = "page_pool leak"; -#endif return bad_reason; } diff --git a/mm/secretmem.c b/mm/secretmem.c index 399552814fd0f..4662f2510ae5f 100644 --- a/mm/secretmem.c +++ b/mm/secretmem.c @@ -195,19 +195,11 @@ static struct file *secretmem_file_create(unsigned long flags) struct file *file; struct inode *inode; const char *anon_name = "[secretmem]"; - const struct qstr qname = QSTR_INIT(anon_name, strlen(anon_name)); - int err; - inode = alloc_anon_inode(secretmem_mnt->mnt_sb); + inode = anon_inode_make_secure_inode(secretmem_mnt->mnt_sb, anon_name, NULL); if (IS_ERR(inode)) return ERR_CAST(inode); - err = security_inode_init_security_anon(inode, &qname, NULL); - if (err) { - file = ERR_PTR(err); - goto err_free_inode; - } - file = alloc_file_pseudo(inode, secretmem_mnt, "secretmem", O_RDWR, &secretmem_fops); if (IS_ERR(file)) diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c index e06e3d2709610..2646b75163d5f 100644 --- a/mm/userfaultfd.c +++ b/mm/userfaultfd.c @@ -1078,8 +1078,18 @@ static int move_swap_pte(struct mm_struct *mm, struct vm_area_struct *dst_vma, pte_t *dst_pte, pte_t *src_pte, pte_t orig_dst_pte, pte_t orig_src_pte, spinlock_t *dst_ptl, spinlock_t *src_ptl, - struct folio *src_folio) + struct folio *src_folio, + struct swap_info_struct *si, swp_entry_t entry) { + /* + * Check if the folio still belongs to the target swap entry after + * acquiring the lock. Folio can be freed in the swap cache while + * not locked. + */ + if (src_folio && unlikely(!folio_test_swapcache(src_folio) || + entry.val != src_folio->swap.val)) + return -EAGAIN; + double_pt_lock(dst_ptl, src_ptl); if (!pte_same(ptep_get(src_pte), orig_src_pte) || @@ -1096,6 +1106,25 @@ static int move_swap_pte(struct mm_struct *mm, struct vm_area_struct *dst_vma, if (src_folio) { folio_move_anon_rmap(src_folio, dst_vma); src_folio->index = linear_page_index(dst_vma, dst_addr); + } else { + /* + * Check if the swap entry is cached after acquiring the src_pte + * lock. Otherwise, we might miss a newly loaded swap cache folio. + * + * Check swap_map directly to minimize overhead, READ_ONCE is sufficient. + * We are trying to catch newly added swap cache, the only possible case is + * when a folio is swapped in and out again staying in swap cache, using the + * same entry before the PTE check above. The PTL is acquired and released + * twice, each time after updating the swap_map's flag. So holding + * the PTL here ensures we see the updated value. False positive is possible, + * e.g. SWP_SYNCHRONOUS_IO swapin may set the flag without touching the + * cache, or during the tiny synchronization window between swap cache and + * swap_map, but it will be gone very quickly, worst result is retry jitters. + */ + if (READ_ONCE(si->swap_map[swp_offset(entry)]) & SWAP_HAS_CACHE) { + double_pt_unlock(dst_ptl, src_ptl); + return -EAGAIN; + } } orig_src_pte = ptep_get_and_clear(mm, src_addr, src_pte); @@ -1391,7 +1420,7 @@ static int move_pages_pte(struct mm_struct *mm, pmd_t *dst_pmd, pmd_t *src_pmd, } err = move_swap_pte(mm, dst_vma, dst_addr, src_addr, dst_pte, src_pte, orig_dst_pte, orig_src_pte, - dst_ptl, src_ptl, src_folio); + dst_ptl, src_ptl, src_folio, si, entry); } out: diff --git a/mm/vma.c b/mm/vma.c index 9b4517944901d..140f7017bb634 100644 --- a/mm/vma.c +++ b/mm/vma.c @@ -416,7 +416,14 @@ static int __split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma, init_vma_prep(&vp, vma); vp.insert = new; vma_prepare(&vp); + + /* + * Get rid of huge pages and shared page tables straddling the split + * boundary. + */ vma_adjust_trans_huge(vma, vma->vm_start, addr, 0); + if (is_vm_hugetlb_page(vma)) + hugetlb_split(vma, addr); if (new_below) { vma->vm_start = addr; @@ -829,9 +836,6 @@ static struct vm_area_struct *vma_merge_existing_range(struct vma_merge_struct * err = dup_anon_vma(next, vma, &anon_dup); } - if (err) - goto abort; - /* * In nearly all cases, we expand vmg->vma. There is one exception - * merge_right where we partially span the VMA. In this case we shrink @@ -839,22 +843,11 @@ static struct vm_area_struct *vma_merge_existing_range(struct vma_merge_struct * */ expanded = !merge_right || merge_will_delete_vma; - if (commit_merge(vmg, adjust, - merge_will_delete_vma ? vma : NULL, - merge_will_delete_next ? next : NULL, - adj_start, expanded)) { - if (anon_dup) - unlink_anon_vmas(anon_dup); - - /* - * We've cleaned up any cloned anon_vma's, no VMAs have been - * modified, no harm no foul if the user requests that we not - * report this and just give up, leaving the VMAs unmerged. - */ - if (!vmg->give_up_on_oom) - vmg->state = VMA_MERGE_ERROR_NOMEM; - return NULL; - } + if (err || commit_merge(vmg, adjust, + merge_will_delete_vma ? vma : NULL, + merge_will_delete_next ? next : NULL, + adj_start, expanded)) + goto abort; res = merge_left ? prev : next; khugepaged_enter_vma(res, vmg->flags); @@ -866,6 +859,9 @@ static struct vm_area_struct *vma_merge_existing_range(struct vma_merge_struct * vma_iter_set(vmg->vmi, start); vma_iter_load(vmg->vmi); + if (anon_dup) + unlink_anon_vmas(anon_dup); + /* * This means we have failed to clone anon_vma's correctly, but no * actual changes to VMAs have occurred, so no harm no foul - if the diff --git a/mm/vma_internal.h b/mm/vma_internal.h index b930ab12a5878..1dd119f266e64 100644 --- a/mm/vma_internal.h +++ b/mm/vma_internal.h @@ -17,6 +17,7 @@ #include #include #include +#include #include #include #include diff --git a/mm/vmalloc.c b/mm/vmalloc.c index cc04e501b1c53..3519c4e4f841d 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -487,6 +487,7 @@ static int vmap_pages_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, pgprot_t prot, struct page **pages, int *nr, pgtbl_mod_mask *mask) { + int err = 0; pte_t *pte; /* @@ -500,18 +501,25 @@ static int vmap_pages_pte_range(pmd_t *pmd, unsigned long addr, do { struct page *page = pages[*nr]; - if (WARN_ON(!pte_none(ptep_get(pte)))) - return -EBUSY; - if (WARN_ON(!page)) - return -ENOMEM; - if (WARN_ON(!pfn_valid(page_to_pfn(page)))) - return -EINVAL; + if (WARN_ON(!pte_none(ptep_get(pte)))) { + err = -EBUSY; + break; + } + if (WARN_ON(!page)) { + err = -ENOMEM; + break; + } + if (WARN_ON(!pfn_valid(page_to_pfn(page)))) { + err = -EINVAL; + break; + } set_pte_at(&init_mm, addr, pte, mk_pte(page, prot)); (*nr)++; } while (pte++, addr += PAGE_SIZE, addr != end); *mask |= PGTBL_PTE_MODIFIED; - return 0; + + return err; } static int vmap_pages_pmd_range(pud_t *pud, unsigned long addr, @@ -3095,7 +3103,7 @@ static void clear_vm_uninitialized_flag(struct vm_struct *vm) /* * Before removing VM_UNINITIALIZED, * we should make sure that vm has proper values. - * Pair with smp_rmb() in show_numa_info(). + * Pair with smp_rmb() in vread_iter() and vmalloc_info_show(). */ smp_wmb(); vm->flags &= ~VM_UNINITIALIZED; @@ -4938,28 +4946,29 @@ bool vmalloc_dump_obj(void *object) #endif #ifdef CONFIG_PROC_FS -static void show_numa_info(struct seq_file *m, struct vm_struct *v) -{ - if (IS_ENABLED(CONFIG_NUMA)) { - unsigned int nr, *counters = m->private; - unsigned int step = 1U << vm_area_page_order(v); - if (!counters) - return; +/* + * Print number of pages allocated on each memory node. + * + * This function can only be called if CONFIG_NUMA is enabled + * and VM_UNINITIALIZED bit in v->flags is disabled. + */ +static void show_numa_info(struct seq_file *m, struct vm_struct *v, + unsigned int *counters) +{ + unsigned int nr; + unsigned int step = 1U << vm_area_page_order(v); - if (v->flags & VM_UNINITIALIZED) - return; - /* Pair with smp_wmb() in clear_vm_uninitialized_flag() */ - smp_rmb(); + if (!counters) + return; - memset(counters, 0, nr_node_ids * sizeof(unsigned int)); + memset(counters, 0, nr_node_ids * sizeof(unsigned int)); - for (nr = 0; nr < v->nr_pages; nr += step) - counters[page_to_nid(v->pages[nr])] += step; - for_each_node_state(nr, N_HIGH_MEMORY) - if (counters[nr]) - seq_printf(m, " N%u=%u", nr, counters[nr]); - } + for (nr = 0; nr < v->nr_pages; nr += step) + counters[page_to_nid(v->pages[nr])] += step; + for_each_node_state(nr, N_HIGH_MEMORY) + if (counters[nr]) + seq_printf(m, " N%u=%u", nr, counters[nr]); } static void show_purge_info(struct seq_file *m) @@ -4987,6 +4996,10 @@ static int vmalloc_info_show(struct seq_file *m, void *p) struct vmap_area *va; struct vm_struct *v; int i; + unsigned int *counters; + + if (IS_ENABLED(CONFIG_NUMA)) + counters = kmalloc(nr_node_ids * sizeof(unsigned int), GFP_KERNEL); for (i = 0; i < nr_vmap_nodes; i++) { vn = &vmap_nodes[i]; @@ -5003,6 +5016,11 @@ static int vmalloc_info_show(struct seq_file *m, void *p) } v = va->vm; + if (v->flags & VM_UNINITIALIZED) + continue; + + /* Pair with smp_wmb() in clear_vm_uninitialized_flag() */ + smp_rmb(); seq_printf(m, "0x%pK-0x%pK %7ld", v->addr, v->addr + v->size, v->size); @@ -5037,7 +5055,9 @@ static int vmalloc_info_show(struct seq_file *m, void *p) if (is_vmalloc_addr(v->pages)) seq_puts(m, " vpages"); - show_numa_info(m, v); + if (IS_ENABLED(CONFIG_NUMA)) + show_numa_info(m, v, counters); + seq_putc(m, '\n'); } spin_unlock(&vn->busy.lock); @@ -5047,19 +5067,14 @@ static int vmalloc_info_show(struct seq_file *m, void *p) * As a final step, dump "unpurged" areas. */ show_purge_info(m); + if (IS_ENABLED(CONFIG_NUMA)) + kfree(counters); return 0; } static int __init proc_vmalloc_init(void) { - void *priv_data = NULL; - - if (IS_ENABLED(CONFIG_NUMA)) - priv_data = kmalloc(nr_node_ids * sizeof(unsigned int), GFP_KERNEL); - - proc_create_single_data("vmallocinfo", - 0400, NULL, vmalloc_info_show, priv_data); - + proc_create_single("vmallocinfo", 0400, NULL, vmalloc_info_show); return 0; } module_init(proc_vmalloc_init); diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c index b068651984fe3..fa7f002b14fa3 100644 --- a/net/appletalk/ddp.c +++ b/net/appletalk/ddp.c @@ -576,6 +576,7 @@ static int atrtr_create(struct rtentry *r, struct net_device *devhint) /* Fill in the routing entry */ rt->target = ta->sat_addr; + dev_put(rt->dev); /* Release old device */ dev_hold(devhint); rt->dev = devhint; rt->flags = r->rt_flags; diff --git a/net/atm/clip.c b/net/atm/clip.c index 42b910cb4e8ee..ebba0d6ae3248 100644 --- a/net/atm/clip.c +++ b/net/atm/clip.c @@ -45,7 +45,8 @@ #include static struct net_device *clip_devs; -static struct atm_vcc *atmarpd; +static struct atm_vcc __rcu *atmarpd; +static DEFINE_MUTEX(atmarpd_lock); static struct timer_list idle_timer; static const struct neigh_ops clip_neigh_ops; @@ -53,24 +54,35 @@ static int to_atmarpd(enum atmarp_ctrl_type type, int itf, __be32 ip) { struct sock *sk; struct atmarp_ctrl *ctrl; + struct atm_vcc *vcc; struct sk_buff *skb; + int err = 0; pr_debug("(%d)\n", type); - if (!atmarpd) - return -EUNATCH; + + rcu_read_lock(); + vcc = rcu_dereference(atmarpd); + if (!vcc) { + err = -EUNATCH; + goto unlock; + } skb = alloc_skb(sizeof(struct atmarp_ctrl), GFP_ATOMIC); - if (!skb) - return -ENOMEM; + if (!skb) { + err = -ENOMEM; + goto unlock; + } ctrl = skb_put(skb, sizeof(struct atmarp_ctrl)); ctrl->type = type; ctrl->itf_num = itf; ctrl->ip = ip; - atm_force_charge(atmarpd, skb->truesize); + atm_force_charge(vcc, skb->truesize); - sk = sk_atm(atmarpd); + sk = sk_atm(vcc); skb_queue_tail(&sk->sk_receive_queue, skb); sk->sk_data_ready(sk); - return 0; +unlock: + rcu_read_unlock(); + return err; } static void link_vcc(struct clip_vcc *clip_vcc, struct atmarp_entry *entry) @@ -193,12 +205,6 @@ static void clip_push(struct atm_vcc *vcc, struct sk_buff *skb) pr_debug("\n"); - if (!clip_devs) { - atm_return(vcc, skb->truesize); - kfree_skb(skb); - return; - } - if (!skb) { pr_debug("removing VCC %p\n", clip_vcc); if (clip_vcc->entry) @@ -208,6 +214,11 @@ static void clip_push(struct atm_vcc *vcc, struct sk_buff *skb) return; } atm_return(vcc, skb->truesize); + if (!clip_devs) { + kfree_skb(skb); + return; + } + skb->dev = clip_vcc->entry ? clip_vcc->entry->neigh->dev : clip_devs; /* clip_vcc->entry == NULL if we don't have an IP address yet */ if (!skb->dev) { @@ -418,6 +429,8 @@ static int clip_mkip(struct atm_vcc *vcc, int timeout) if (!vcc->push) return -EBADFD; + if (vcc->user_back) + return -EINVAL; clip_vcc = kmalloc(sizeof(struct clip_vcc), GFP_KERNEL); if (!clip_vcc) return -ENOMEM; @@ -608,17 +621,27 @@ static void atmarpd_close(struct atm_vcc *vcc) { pr_debug("\n"); - rtnl_lock(); - atmarpd = NULL; + mutex_lock(&atmarpd_lock); + RCU_INIT_POINTER(atmarpd, NULL); + mutex_unlock(&atmarpd_lock); + + synchronize_rcu(); skb_queue_purge(&sk_atm(vcc)->sk_receive_queue); - rtnl_unlock(); pr_debug("(done)\n"); module_put(THIS_MODULE); } +static int atmarpd_send(struct atm_vcc *vcc, struct sk_buff *skb) +{ + atm_return_tx(vcc, skb); + dev_kfree_skb_any(skb); + return 0; +} + static const struct atmdev_ops atmarpd_dev_ops = { - .close = atmarpd_close + .close = atmarpd_close, + .send = atmarpd_send }; @@ -632,15 +655,18 @@ static struct atm_dev atmarpd_dev = { static int atm_init_atmarp(struct atm_vcc *vcc) { - rtnl_lock(); + if (vcc->push == clip_push) + return -EINVAL; + + mutex_lock(&atmarpd_lock); if (atmarpd) { - rtnl_unlock(); + mutex_unlock(&atmarpd_lock); return -EADDRINUSE; } mod_timer(&idle_timer, jiffies + CLIP_CHECK_INTERVAL * HZ); - atmarpd = vcc; + rcu_assign_pointer(atmarpd, vcc); set_bit(ATM_VF_META, &vcc->flags); set_bit(ATM_VF_READY, &vcc->flags); /* allow replies and avoid getting closed if signaling dies */ @@ -649,13 +675,14 @@ static int atm_init_atmarp(struct atm_vcc *vcc) vcc->push = NULL; vcc->pop = NULL; /* crash */ vcc->push_oam = NULL; /* crash */ - rtnl_unlock(); + mutex_unlock(&atmarpd_lock); return 0; } static int clip_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) { struct atm_vcc *vcc = ATM_SD(sock); + struct sock *sk = sock->sk; int err = 0; switch (cmd) { @@ -676,14 +703,18 @@ static int clip_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) err = clip_create(arg); break; case ATMARPD_CTRL: + lock_sock(sk); err = atm_init_atmarp(vcc); if (!err) { sock->state = SS_CONNECTED; __module_get(THIS_MODULE); } + release_sock(sk); break; case ATMARP_MKIP: + lock_sock(sk); err = clip_mkip(vcc, arg); + release_sock(sk); break; case ATMARP_SETENTRY: err = clip_setentry(vcc, (__force __be32)arg); diff --git a/net/atm/common.c b/net/atm/common.c index 9b75699992ff9..d7f7976ea13ac 100644 --- a/net/atm/common.c +++ b/net/atm/common.c @@ -635,6 +635,7 @@ int vcc_sendmsg(struct socket *sock, struct msghdr *m, size_t size) skb->dev = NULL; /* for paths shared with net_device interfaces */ if (!copy_from_iter_full(skb_put(skb, size), size, &m->msg_iter)) { + atm_return_tx(vcc, skb); kfree_skb(skb); error = -EFAULT; goto out; diff --git a/net/atm/lec.c b/net/atm/lec.c index a948dd47c3f34..42e8047c65105 100644 --- a/net/atm/lec.c +++ b/net/atm/lec.c @@ -124,6 +124,7 @@ static unsigned char bus_mac[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; /* Device structures */ static struct net_device *dev_lec[MAX_LEC_ITF]; +static DEFINE_MUTEX(lec_mutex); #if IS_ENABLED(CONFIG_BRIDGE) static void lec_handle_bridge(struct sk_buff *skb, struct net_device *dev) @@ -685,6 +686,7 @@ static int lec_vcc_attach(struct atm_vcc *vcc, void __user *arg) int bytes_left; struct atmlec_ioc ioc_data; + lockdep_assert_held(&lec_mutex); /* Lecd must be up in this case */ bytes_left = copy_from_user(&ioc_data, arg, sizeof(struct atmlec_ioc)); if (bytes_left != 0) @@ -710,6 +712,7 @@ static int lec_vcc_attach(struct atm_vcc *vcc, void __user *arg) static int lec_mcast_attach(struct atm_vcc *vcc, int arg) { + lockdep_assert_held(&lec_mutex); if (arg < 0 || arg >= MAX_LEC_ITF) return -EINVAL; arg = array_index_nospec(arg, MAX_LEC_ITF); @@ -725,6 +728,7 @@ static int lecd_attach(struct atm_vcc *vcc, int arg) int i; struct lec_priv *priv; + lockdep_assert_held(&lec_mutex); if (arg < 0) arg = 0; if (arg >= MAX_LEC_ITF) @@ -742,6 +746,7 @@ static int lecd_attach(struct atm_vcc *vcc, int arg) snprintf(dev_lec[i]->name, IFNAMSIZ, "lec%d", i); if (register_netdev(dev_lec[i])) { free_netdev(dev_lec[i]); + dev_lec[i] = NULL; return -EINVAL; } @@ -904,7 +909,6 @@ static void *lec_itf_walk(struct lec_state *state, loff_t *l) v = (dev && netdev_priv(dev)) ? lec_priv_walk(state, l, netdev_priv(dev)) : NULL; if (!v && dev) { - dev_put(dev); /* Partial state reset for the next time we get called */ dev = NULL; } @@ -928,6 +932,7 @@ static void *lec_seq_start(struct seq_file *seq, loff_t *pos) { struct lec_state *state = seq->private; + mutex_lock(&lec_mutex); state->itf = 0; state->dev = NULL; state->locked = NULL; @@ -945,8 +950,9 @@ static void lec_seq_stop(struct seq_file *seq, void *v) if (state->dev) { spin_unlock_irqrestore(&state->locked->lec_arp_lock, state->flags); - dev_put(state->dev); + state->dev = NULL; } + mutex_unlock(&lec_mutex); } static void *lec_seq_next(struct seq_file *seq, void *v, loff_t *pos) @@ -1003,6 +1009,7 @@ static int lane_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) return -ENOIOCTLCMD; } + mutex_lock(&lec_mutex); switch (cmd) { case ATMLEC_CTRL: err = lecd_attach(vcc, (int)arg); @@ -1017,6 +1024,7 @@ static int lane_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) break; } + mutex_unlock(&lec_mutex); return err; } diff --git a/net/atm/raw.c b/net/atm/raw.c index 2b5f78a7ec3e4..1e6511ec842cb 100644 --- a/net/atm/raw.c +++ b/net/atm/raw.c @@ -36,7 +36,7 @@ static void atm_pop_raw(struct atm_vcc *vcc, struct sk_buff *skb) pr_debug("(%d) %d -= %d\n", vcc->vci, sk_wmem_alloc_get(sk), ATM_SKB(skb)->acct_truesize); - WARN_ON(refcount_sub_and_test(ATM_SKB(skb)->acct_truesize, &sk->sk_wmem_alloc)); + atm_return_tx(vcc, skb); dev_kfree_skb_any(skb); sk->sk_write_space(sk); } diff --git a/net/atm/resources.c b/net/atm/resources.c index 995d29e7fb138..b19d851e1f443 100644 --- a/net/atm/resources.c +++ b/net/atm/resources.c @@ -146,11 +146,10 @@ void atm_dev_deregister(struct atm_dev *dev) */ mutex_lock(&atm_dev_mutex); list_del(&dev->dev_list); - mutex_unlock(&atm_dev_mutex); - atm_dev_release_vccs(dev); atm_unregister_sysfs(dev); atm_proc_dev_deregister(dev); + mutex_unlock(&atm_dev_mutex); atm_dev_put(dev); } diff --git a/net/bluetooth/eir.c b/net/bluetooth/eir.c index 1bc51e2b05a34..3f72111ba651f 100644 --- a/net/bluetooth/eir.c +++ b/net/bluetooth/eir.c @@ -242,7 +242,7 @@ u8 eir_create_per_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr) return ad_len; } -u8 eir_create_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr) +u8 eir_create_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr, u8 size) { struct adv_info *adv = NULL; u8 ad_len = 0, flags = 0; @@ -286,7 +286,7 @@ u8 eir_create_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr) /* If flags would still be empty, then there is no need to * include the "Flags" AD field". */ - if (flags) { + if (flags && (ad_len + eir_precalc_len(1) <= size)) { ptr[0] = 0x02; ptr[1] = EIR_FLAGS; ptr[2] = flags; @@ -316,7 +316,8 @@ u8 eir_create_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr) } /* Provide Tx Power only if we can provide a valid value for it */ - if (adv_tx_power != HCI_TX_POWER_INVALID) { + if (adv_tx_power != HCI_TX_POWER_INVALID && + (ad_len + eir_precalc_len(1) <= size)) { ptr[0] = 0x02; ptr[1] = EIR_TX_POWER; ptr[2] = (u8)adv_tx_power; @@ -366,17 +367,19 @@ u8 eir_create_scan_rsp(struct hci_dev *hdev, u8 instance, u8 *ptr) void *eir_get_service_data(u8 *eir, size_t eir_len, u16 uuid, size_t *len) { - while ((eir = eir_get_data(eir, eir_len, EIR_SERVICE_DATA, len))) { + size_t dlen; + + while ((eir = eir_get_data(eir, eir_len, EIR_SERVICE_DATA, &dlen))) { u16 value = get_unaligned_le16(eir); if (uuid == value) { if (len) - *len -= 2; + *len = dlen - 2; return &eir[2]; } - eir += *len; - eir_len -= *len; + eir += dlen; + eir_len -= dlen; } return NULL; diff --git a/net/bluetooth/eir.h b/net/bluetooth/eir.h index 5c89a05e8b290..9372db83f912f 100644 --- a/net/bluetooth/eir.h +++ b/net/bluetooth/eir.h @@ -9,7 +9,7 @@ void eir_create(struct hci_dev *hdev, u8 *data); -u8 eir_create_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr); +u8 eir_create_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr, u8 size); u8 eir_create_scan_rsp(struct hci_dev *hdev, u8 instance, u8 *ptr); u8 eir_create_per_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr); diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c index ae66fa0a5fb58..c6c1232db4e28 100644 --- a/net/bluetooth/hci_conn.c +++ b/net/bluetooth/hci_conn.c @@ -2067,6 +2067,8 @@ struct hci_conn *hci_pa_create_sync(struct hci_dev *hdev, bdaddr_t *dst, { struct hci_conn *conn; + bt_dev_dbg(hdev, "dst %pMR type %d sid %d", dst, dst_type, sid); + conn = hci_conn_add_unset(hdev, ISO_LINK, dst, HCI_ROLE_SLAVE); if (IS_ERR(conn)) return conn; diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c index 72439764186ed..b74ada8092378 100644 --- a/net/bluetooth/hci_core.c +++ b/net/bluetooth/hci_core.c @@ -64,7 +64,7 @@ static DEFINE_IDA(hci_index_ida); /* Get HCI device by index. * Device is held on return. */ -struct hci_dev *hci_dev_get(int index) +static struct hci_dev *__hci_dev_get(int index, int *srcu_index) { struct hci_dev *hdev = NULL, *d; @@ -77,6 +77,8 @@ struct hci_dev *hci_dev_get(int index) list_for_each_entry(d, &hci_dev_list, list) { if (d->id == index) { hdev = hci_dev_hold(d); + if (srcu_index) + *srcu_index = srcu_read_lock(&d->srcu); break; } } @@ -84,6 +86,22 @@ struct hci_dev *hci_dev_get(int index) return hdev; } +struct hci_dev *hci_dev_get(int index) +{ + return __hci_dev_get(index, NULL); +} + +static struct hci_dev *hci_dev_get_srcu(int index, int *srcu_index) +{ + return __hci_dev_get(index, srcu_index); +} + +static void hci_dev_put_srcu(struct hci_dev *hdev, int srcu_index) +{ + srcu_read_unlock(&hdev->srcu, srcu_index); + hci_dev_put(hdev); +} + /* ---- Inquiry support ---- */ bool hci_discovery_active(struct hci_dev *hdev) @@ -568,9 +586,9 @@ static int hci_dev_do_reset(struct hci_dev *hdev) int hci_dev_reset(__u16 dev) { struct hci_dev *hdev; - int err; + int err, srcu_index; - hdev = hci_dev_get(dev); + hdev = hci_dev_get_srcu(dev, &srcu_index); if (!hdev) return -ENODEV; @@ -592,7 +610,7 @@ int hci_dev_reset(__u16 dev) err = hci_dev_do_reset(hdev); done: - hci_dev_put(hdev); + hci_dev_put_srcu(hdev, srcu_index); return err; } @@ -1877,10 +1895,8 @@ void hci_free_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor) if (monitor->handle) idr_remove(&hdev->adv_monitors_idr, monitor->handle); - if (monitor->state != ADV_MONITOR_STATE_NOT_REGISTERED) { + if (monitor->state != ADV_MONITOR_STATE_NOT_REGISTERED) hdev->adv_monitors_cnt--; - mgmt_adv_monitor_removed(hdev, monitor->handle); - } kfree(monitor); } @@ -2441,6 +2457,11 @@ struct hci_dev *hci_alloc_dev_priv(int sizeof_priv) if (!hdev) return NULL; + if (init_srcu_struct(&hdev->srcu)) { + kfree(hdev); + return NULL; + } + hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1); hdev->esco_type = (ESCO_HV1); hdev->link_mode = (HCI_LM_ACCEPT); @@ -2507,6 +2528,7 @@ struct hci_dev *hci_alloc_dev_priv(int sizeof_priv) mutex_init(&hdev->lock); mutex_init(&hdev->req_lock); + mutex_init(&hdev->mgmt_pending_lock); ida_init(&hdev->unset_handle_ida); @@ -2685,6 +2707,9 @@ void hci_unregister_dev(struct hci_dev *hdev) list_del(&hdev->list); write_unlock(&hci_dev_list_lock); + synchronize_srcu(&hdev->srcu); + cleanup_srcu_struct(&hdev->srcu); + disable_work_sync(&hdev->rx_work); disable_work_sync(&hdev->cmd_work); disable_work_sync(&hdev->tx_work); @@ -3416,23 +3441,18 @@ static void hci_link_tx_to(struct hci_dev *hdev, __u8 type) bt_dev_err(hdev, "link tx timeout"); - rcu_read_lock(); + hci_dev_lock(hdev); /* Kill stalled connections */ - list_for_each_entry_rcu(c, &h->list, list) { + list_for_each_entry(c, &h->list, list) { if (c->type == type && c->sent) { bt_dev_err(hdev, "killing stalled connection %pMR", &c->dst); - /* hci_disconnect might sleep, so, we have to release - * the RCU read lock before calling it. - */ - rcu_read_unlock(); hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM); - rcu_read_lock(); } } - rcu_read_unlock(); + hci_dev_unlock(hdev); } static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type, @@ -4071,10 +4091,13 @@ static void hci_send_cmd_sync(struct hci_dev *hdev, struct sk_buff *skb) return; } - err = hci_send_frame(hdev, skb); - if (err < 0) { - hci_cmd_sync_cancel_sync(hdev, -err); - return; + if (hci_skb_opcode(skb) != HCI_OP_NOP) { + err = hci_send_frame(hdev, skb); + if (err < 0) { + hci_cmd_sync_cancel_sync(hdev, -err); + return; + } + atomic_dec(&hdev->cmd_cnt); } if (hdev->req_status == HCI_REQ_PEND && @@ -4082,8 +4105,6 @@ static void hci_send_cmd_sync(struct hci_dev *hdev, struct sk_buff *skb) kfree_skb(hdev->req_skb); hdev->req_skb = skb_clone(hdev->sent_cmd, GFP_KERNEL); } - - atomic_dec(&hdev->cmd_cnt); } static void hci_cmd_work(struct work_struct *work) diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c index 8894633403519..b7dcebc701898 100644 --- a/net/bluetooth/hci_event.c +++ b/net/bluetooth/hci_event.c @@ -2141,40 +2141,6 @@ static u8 hci_cc_set_adv_param(struct hci_dev *hdev, void *data, return rp->status; } -static u8 hci_cc_set_ext_adv_param(struct hci_dev *hdev, void *data, - struct sk_buff *skb) -{ - struct hci_rp_le_set_ext_adv_params *rp = data; - struct hci_cp_le_set_ext_adv_params *cp; - struct adv_info *adv_instance; - - bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); - - if (rp->status) - return rp->status; - - cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_PARAMS); - if (!cp) - return rp->status; - - hci_dev_lock(hdev); - hdev->adv_addr_type = cp->own_addr_type; - if (!cp->handle) { - /* Store in hdev for instance 0 */ - hdev->adv_tx_power = rp->tx_power; - } else { - adv_instance = hci_find_adv_instance(hdev, cp->handle); - if (adv_instance) - adv_instance->tx_power = rp->tx_power; - } - /* Update adv data as tx power is known now */ - hci_update_adv_data(hdev, cp->handle); - - hci_dev_unlock(hdev); - - return rp->status; -} - static u8 hci_cc_read_rssi(struct hci_dev *hdev, void *data, struct sk_buff *skb) { @@ -4155,8 +4121,6 @@ static const struct hci_cc { HCI_CC(HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS, hci_cc_le_read_num_adv_sets, sizeof(struct hci_rp_le_read_num_supported_adv_sets)), - HCI_CC(HCI_OP_LE_SET_EXT_ADV_PARAMS, hci_cc_set_ext_adv_param, - sizeof(struct hci_rp_le_set_ext_adv_params)), HCI_CC_STATUS(HCI_OP_LE_SET_EXT_ADV_ENABLE, hci_cc_le_set_ext_adv_enable), HCI_CC_STATUS(HCI_OP_LE_SET_ADV_SET_RAND_ADDR, @@ -6333,6 +6297,17 @@ static void hci_le_ext_adv_report_evt(struct hci_dev *hdev, void *data, info->secondary_phy &= 0x1f; } + /* Check if PA Sync is pending and if the hci_conn SID has not + * been set update it. + */ + if (hci_dev_test_flag(hdev, HCI_PA_SYNC)) { + struct hci_conn *conn; + + conn = hci_conn_hash_lookup_create_pa_sync(hdev); + if (conn && conn->sid == HCI_SID_INVALID) + conn->sid = info->sid; + } + if (legacy_evt_type != LE_ADV_INVALID) { process_adv_report(hdev, legacy_evt_type, &info->bdaddr, info->bdaddr_type, NULL, 0, @@ -6970,7 +6945,10 @@ static void hci_le_big_sync_established_evt(struct hci_dev *hdev, void *data, bis->iso_qos.bcast.in.sdu = le16_to_cpu(ev->max_pdu); if (!ev->status) { + bis->state = BT_CONNECTED; set_bit(HCI_CONN_BIG_SYNC, &bis->flags); + hci_debugfs_create_conn(bis); + hci_conn_add_sysfs(bis); hci_iso_setup_path(bis); } } @@ -7136,7 +7114,8 @@ static void hci_le_meta_evt(struct hci_dev *hdev, void *data, /* Only match event if command OGF is for LE */ if (hdev->req_skb && - hci_opcode_ogf(hci_skb_opcode(hdev->req_skb)) == 0x08 && + (hci_opcode_ogf(hci_skb_opcode(hdev->req_skb)) == 0x08 || + hci_skb_opcode(hdev->req_skb) == HCI_OP_NOP) && hci_skb_event(hdev->req_skb) == ev->subevent) { *opcode = hci_skb_opcode(hdev->req_skb); hci_req_cmd_complete(hdev, *opcode, 0x00, req_complete, @@ -7492,8 +7471,10 @@ void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb) goto done; } + hci_dev_lock(hdev); kfree_skb(hdev->recv_event); hdev->recv_event = skb_clone(skb, GFP_KERNEL); + hci_dev_unlock(hdev); event = hdr->evt; if (!event) { diff --git a/net/bluetooth/hci_sync.c b/net/bluetooth/hci_sync.c index 6597936fbd51b..bc01135e43f3e 100644 --- a/net/bluetooth/hci_sync.c +++ b/net/bluetooth/hci_sync.c @@ -1205,9 +1205,126 @@ static int hci_set_adv_set_random_addr_sync(struct hci_dev *hdev, u8 instance, sizeof(cp), &cp, HCI_CMD_TIMEOUT); } +static int +hci_set_ext_adv_params_sync(struct hci_dev *hdev, struct adv_info *adv, + const struct hci_cp_le_set_ext_adv_params *cp, + struct hci_rp_le_set_ext_adv_params *rp) +{ + struct sk_buff *skb; + + skb = __hci_cmd_sync(hdev, HCI_OP_LE_SET_EXT_ADV_PARAMS, sizeof(*cp), + cp, HCI_CMD_TIMEOUT); + + /* If command return a status event, skb will be set to -ENODATA */ + if (skb == ERR_PTR(-ENODATA)) + return 0; + + if (IS_ERR(skb)) { + bt_dev_err(hdev, "Opcode 0x%4.4x failed: %ld", + HCI_OP_LE_SET_EXT_ADV_PARAMS, PTR_ERR(skb)); + return PTR_ERR(skb); + } + + if (skb->len != sizeof(*rp)) { + bt_dev_err(hdev, "Invalid response length for 0x%4.4x: %u", + HCI_OP_LE_SET_EXT_ADV_PARAMS, skb->len); + kfree_skb(skb); + return -EIO; + } + + memcpy(rp, skb->data, sizeof(*rp)); + kfree_skb(skb); + + if (!rp->status) { + hdev->adv_addr_type = cp->own_addr_type; + if (!cp->handle) { + /* Store in hdev for instance 0 */ + hdev->adv_tx_power = rp->tx_power; + } else if (adv) { + adv->tx_power = rp->tx_power; + } + } + + return rp->status; +} + +static int hci_set_ext_adv_data_sync(struct hci_dev *hdev, u8 instance) +{ + DEFINE_FLEX(struct hci_cp_le_set_ext_adv_data, pdu, data, length, + HCI_MAX_EXT_AD_LENGTH); + u8 len; + struct adv_info *adv = NULL; + int err; + + if (instance) { + adv = hci_find_adv_instance(hdev, instance); + if (!adv || !adv->adv_data_changed) + return 0; + } + + len = eir_create_adv_data(hdev, instance, pdu->data, + HCI_MAX_EXT_AD_LENGTH); + + pdu->length = len; + pdu->handle = adv ? adv->handle : instance; + pdu->operation = LE_SET_ADV_DATA_OP_COMPLETE; + pdu->frag_pref = LE_SET_ADV_DATA_NO_FRAG; + + err = __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_ADV_DATA, + struct_size(pdu, data, len), pdu, + HCI_CMD_TIMEOUT); + if (err) + return err; + + /* Update data if the command succeed */ + if (adv) { + adv->adv_data_changed = false; + } else { + memcpy(hdev->adv_data, pdu->data, len); + hdev->adv_data_len = len; + } + + return 0; +} + +static int hci_set_adv_data_sync(struct hci_dev *hdev, u8 instance) +{ + struct hci_cp_le_set_adv_data cp; + u8 len; + + memset(&cp, 0, sizeof(cp)); + + len = eir_create_adv_data(hdev, instance, cp.data, sizeof(cp.data)); + + /* There's nothing to do if the data hasn't changed */ + if (hdev->adv_data_len == len && + memcmp(cp.data, hdev->adv_data, len) == 0) + return 0; + + memcpy(hdev->adv_data, cp.data, sizeof(cp.data)); + hdev->adv_data_len = len; + + cp.length = len; + + return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_DATA, + sizeof(cp), &cp, HCI_CMD_TIMEOUT); +} + +int hci_update_adv_data_sync(struct hci_dev *hdev, u8 instance) +{ + if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) + return 0; + + if (ext_adv_capable(hdev)) + return hci_set_ext_adv_data_sync(hdev, instance); + + return hci_set_adv_data_sync(hdev, instance); +} + int hci_setup_ext_adv_instance_sync(struct hci_dev *hdev, u8 instance) { struct hci_cp_le_set_ext_adv_params cp; + struct hci_rp_le_set_ext_adv_params rp; bool connectable; u32 flags; bdaddr_t random_addr; @@ -1228,7 +1345,7 @@ int hci_setup_ext_adv_instance_sync(struct hci_dev *hdev, u8 instance) * Command Disallowed error, so we must first disable the * instance if it is active. */ - if (adv && !adv->pending) { + if (adv) { err = hci_disable_ext_adv_instance_sync(hdev, instance); if (err) return err; @@ -1314,8 +1431,12 @@ int hci_setup_ext_adv_instance_sync(struct hci_dev *hdev, u8 instance) cp.secondary_phy = HCI_ADV_PHY_1M; } - err = __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_ADV_PARAMS, - sizeof(cp), &cp, HCI_CMD_TIMEOUT); + err = hci_set_ext_adv_params_sync(hdev, adv, &cp, &rp); + if (err) + return err; + + /* Update adv data as tx power is known now */ + err = hci_set_ext_adv_data_sync(hdev, cp.handle); if (err) return err; @@ -1559,7 +1680,8 @@ static int hci_enable_per_advertising_sync(struct hci_dev *hdev, u8 instance) static int hci_adv_bcast_annoucement(struct hci_dev *hdev, struct adv_info *adv) { u8 bid[3]; - u8 ad[4 + 3]; + u8 ad[HCI_MAX_EXT_AD_LENGTH]; + u8 len; /* Skip if NULL adv as instance 0x00 is used for general purpose * advertising so it cannot used for the likes of Broadcast Announcement @@ -1585,8 +1707,10 @@ static int hci_adv_bcast_annoucement(struct hci_dev *hdev, struct adv_info *adv) /* Generate Broadcast ID */ get_random_bytes(bid, sizeof(bid)); - eir_append_service_data(ad, 0, 0x1852, bid, sizeof(bid)); - hci_set_adv_instance_data(hdev, adv->instance, sizeof(ad), ad, 0, NULL); + len = eir_append_service_data(ad, 0, 0x1852, bid, sizeof(bid)); + memcpy(ad + len, adv->adv_data, adv->adv_data_len); + hci_set_adv_instance_data(hdev, adv->instance, len + adv->adv_data_len, + ad, 0, NULL); return hci_update_adv_data_sync(hdev, adv->instance); } @@ -1603,8 +1727,15 @@ int hci_start_per_adv_sync(struct hci_dev *hdev, u8 instance, u8 data_len, if (instance) { adv = hci_find_adv_instance(hdev, instance); - /* Create an instance if that could not be found */ - if (!adv) { + if (adv) { + /* Turn it into periodic advertising */ + adv->periodic = true; + adv->per_adv_data_len = data_len; + if (data) + memcpy(adv->per_adv_data, data, data_len); + adv->flags = flags; + } else if (!adv) { + /* Create an instance if that could not be found */ adv = hci_add_per_instance(hdev, instance, flags, data_len, data, sync_interval, @@ -1822,78 +1953,6 @@ int hci_le_terminate_big_sync(struct hci_dev *hdev, u8 handle, u8 reason) sizeof(cp), &cp, HCI_CMD_TIMEOUT); } -static int hci_set_ext_adv_data_sync(struct hci_dev *hdev, u8 instance) -{ - DEFINE_FLEX(struct hci_cp_le_set_ext_adv_data, pdu, data, length, - HCI_MAX_EXT_AD_LENGTH); - u8 len; - struct adv_info *adv = NULL; - int err; - - if (instance) { - adv = hci_find_adv_instance(hdev, instance); - if (!adv || !adv->adv_data_changed) - return 0; - } - - len = eir_create_adv_data(hdev, instance, pdu->data); - - pdu->length = len; - pdu->handle = adv ? adv->handle : instance; - pdu->operation = LE_SET_ADV_DATA_OP_COMPLETE; - pdu->frag_pref = LE_SET_ADV_DATA_NO_FRAG; - - err = __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_ADV_DATA, - struct_size(pdu, data, len), pdu, - HCI_CMD_TIMEOUT); - if (err) - return err; - - /* Update data if the command succeed */ - if (adv) { - adv->adv_data_changed = false; - } else { - memcpy(hdev->adv_data, pdu->data, len); - hdev->adv_data_len = len; - } - - return 0; -} - -static int hci_set_adv_data_sync(struct hci_dev *hdev, u8 instance) -{ - struct hci_cp_le_set_adv_data cp; - u8 len; - - memset(&cp, 0, sizeof(cp)); - - len = eir_create_adv_data(hdev, instance, cp.data); - - /* There's nothing to do if the data hasn't changed */ - if (hdev->adv_data_len == len && - memcmp(cp.data, hdev->adv_data, len) == 0) - return 0; - - memcpy(hdev->adv_data, cp.data, sizeof(cp.data)); - hdev->adv_data_len = len; - - cp.length = len; - - return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_DATA, - sizeof(cp), &cp, HCI_CMD_TIMEOUT); -} - -int hci_update_adv_data_sync(struct hci_dev *hdev, u8 instance) -{ - if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) - return 0; - - if (ext_adv_capable(hdev)) - return hci_set_ext_adv_data_sync(hdev, instance); - - return hci_set_adv_data_sync(hdev, instance); -} - int hci_schedule_adv_instance_sync(struct hci_dev *hdev, u8 instance, bool force) { @@ -1969,13 +2028,10 @@ static int hci_clear_adv_sets_sync(struct hci_dev *hdev, struct sock *sk) static int hci_clear_adv_sync(struct hci_dev *hdev, struct sock *sk, bool force) { struct adv_info *adv, *n; - int err = 0; if (ext_adv_capable(hdev)) /* Remove all existing sets */ - err = hci_clear_adv_sets_sync(hdev, sk); - if (ext_adv_capable(hdev)) - return err; + return hci_clear_adv_sets_sync(hdev, sk); /* This is safe as long as there is no command send while the lock is * held. @@ -2003,13 +2059,11 @@ static int hci_clear_adv_sync(struct hci_dev *hdev, struct sock *sk, bool force) static int hci_remove_adv_sync(struct hci_dev *hdev, u8 instance, struct sock *sk) { - int err = 0; + int err; /* If we use extended advertising, instance has to be removed first. */ if (ext_adv_capable(hdev)) - err = hci_remove_ext_adv_instance_sync(hdev, instance, sk); - if (ext_adv_capable(hdev)) - return err; + return hci_remove_ext_adv_instance_sync(hdev, instance, sk); /* This is safe as long as there is no command send while the lock is * held. @@ -2108,16 +2162,13 @@ int hci_read_tx_power_sync(struct hci_dev *hdev, __le16 handle, u8 type) int hci_disable_advertising_sync(struct hci_dev *hdev) { u8 enable = 0x00; - int err = 0; /* If controller is not advertising we are done. */ if (!hci_dev_test_flag(hdev, HCI_LE_ADV)) return 0; if (ext_adv_capable(hdev)) - err = hci_disable_ext_adv_instance_sync(hdev, 0x00); - if (ext_adv_capable(hdev)) - return err; + return hci_disable_ext_adv_instance_sync(hdev, 0x00); return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable, HCI_CMD_TIMEOUT); @@ -2480,6 +2531,10 @@ static int hci_pause_advertising_sync(struct hci_dev *hdev) int err; int old_state; + /* If controller is not advertising we are done. */ + if (!hci_dev_test_flag(hdev, HCI_LE_ADV)) + return 0; + /* If already been paused there is nothing to do. */ if (hdev->advertising_paused) return 0; @@ -6240,6 +6295,7 @@ static int hci_le_ext_directed_advertising_sync(struct hci_dev *hdev, struct hci_conn *conn) { struct hci_cp_le_set_ext_adv_params cp; + struct hci_rp_le_set_ext_adv_params rp; int err; bdaddr_t random_addr; u8 own_addr_type; @@ -6281,8 +6337,12 @@ static int hci_le_ext_directed_advertising_sync(struct hci_dev *hdev, if (err) return err; - err = __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_ADV_PARAMS, - sizeof(cp), &cp, HCI_CMD_TIMEOUT); + err = hci_set_ext_adv_params_sync(hdev, NULL, &cp, &rp); + if (err) + return err; + + /* Update adv data as tx power is known now */ + err = hci_set_ext_adv_data_sync(hdev, cp.handle); if (err) return err; @@ -6890,20 +6950,37 @@ int hci_le_conn_update_sync(struct hci_dev *hdev, struct hci_conn *conn, static void create_pa_complete(struct hci_dev *hdev, void *data, int err) { + struct hci_conn *conn = data; + struct hci_conn *pa_sync; + bt_dev_dbg(hdev, "err %d", err); - if (!err) + if (err == -ECANCELED) return; + hci_dev_lock(hdev); + hci_dev_clear_flag(hdev, HCI_PA_SYNC); - if (err == -ECANCELED) - return; + if (!hci_conn_valid(hdev, conn)) + clear_bit(HCI_CONN_CREATE_PA_SYNC, &conn->flags); - hci_dev_lock(hdev); + if (!err) + goto unlock; - hci_update_passive_scan_sync(hdev); + /* Add connection to indicate PA sync error */ + pa_sync = hci_conn_add_unset(hdev, ISO_LINK, BDADDR_ANY, + HCI_ROLE_SLAVE); + + if (IS_ERR(pa_sync)) + goto unlock; + + set_bit(HCI_CONN_PA_SYNC_FAILED, &pa_sync->flags); + /* Notify iso layer */ + hci_connect_cfm(pa_sync, bt_status(err)); + +unlock: hci_dev_unlock(hdev); } @@ -6917,9 +6994,23 @@ static int hci_le_pa_create_sync(struct hci_dev *hdev, void *data) if (!hci_conn_valid(hdev, conn)) return -ECANCELED; + if (conn->sync_handle != HCI_SYNC_HANDLE_INVALID) + return -EINVAL; + if (hci_dev_test_and_set_flag(hdev, HCI_PA_SYNC)) return -EBUSY; + /* Stop scanning if SID has not been set and active scanning is enabled + * so we use passive scanning which will be scanning using the allow + * list programmed to contain only the connection address. + */ + if (conn->sid == HCI_SID_INVALID && + hci_dev_test_flag(hdev, HCI_LE_SCAN)) { + hci_scan_disable_sync(hdev); + hci_dev_set_flag(hdev, HCI_LE_SCAN_INTERRUPTED); + hci_discovery_set_state(hdev, DISCOVERY_STOPPED); + } + /* Mark HCI_CONN_CREATE_PA_SYNC so hci_update_passive_scan_sync can * program the address in the allow list so PA advertisements can be * received. @@ -6928,6 +7019,14 @@ static int hci_le_pa_create_sync(struct hci_dev *hdev, void *data) hci_update_passive_scan_sync(hdev); + /* SID has not been set listen for HCI_EV_LE_EXT_ADV_REPORT to update + * it. + */ + if (conn->sid == HCI_SID_INVALID) + __hci_cmd_sync_status_sk(hdev, HCI_OP_NOP, 0, NULL, + HCI_EV_LE_EXT_ADV_REPORT, + conn->conn_timeout, NULL); + memset(&cp, 0, sizeof(cp)); cp.options = qos->bcast.options; cp.sid = conn->sid; diff --git a/net/bluetooth/iso.c b/net/bluetooth/iso.c index 72bf9b1db2247..a08a0f3d5003c 100644 --- a/net/bluetooth/iso.c +++ b/net/bluetooth/iso.c @@ -938,7 +938,7 @@ static int iso_sock_bind_bc(struct socket *sock, struct sockaddr *addr, iso_pi(sk)->dst_type = sa->iso_bc->bc_bdaddr_type; - if (sa->iso_bc->bc_sid > 0x0f) + if (sa->iso_bc->bc_sid > 0x0f && sa->iso_bc->bc_sid != HCI_SID_INVALID) return -EINVAL; iso_pi(sk)->bc_sid = sa->iso_bc->bc_sid; @@ -1963,6 +1963,9 @@ static bool iso_match_sid(struct sock *sk, void *data) { struct hci_ev_le_pa_sync_established *ev = data; + if (iso_pi(sk)->bc_sid == HCI_SID_INVALID) + return true; + return ev->sid == iso_pi(sk)->bc_sid; } @@ -2009,8 +2012,10 @@ int iso_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 *flags) if (ev1) { sk = iso_get_sock(&hdev->bdaddr, bdaddr, BT_LISTEN, iso_match_sid, ev1); - if (sk && !ev1->status) + if (sk && !ev1->status) { iso_pi(sk)->sync_handle = le16_to_cpu(ev1->handle); + iso_pi(sk)->bc_sid = ev1->sid; + } goto done; } diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c index 66fa5d6fea6ca..0628fedc0e29b 100644 --- a/net/bluetooth/l2cap_core.c +++ b/net/bluetooth/l2cap_core.c @@ -3380,7 +3380,7 @@ static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data, size_t data struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC }; struct l2cap_conf_efs efs; u8 remote_efs = 0; - u16 mtu = L2CAP_DEFAULT_MTU; + u16 mtu = 0; u16 result = L2CAP_CONF_SUCCESS; u16 size; @@ -3485,6 +3485,13 @@ static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data, size_t data /* Configure output options and let the other side know * which ones we don't like. */ + /* If MTU is not provided in configure request, use the most recently + * explicitly or implicitly accepted value for the other direction, + * or the default value. + */ + if (mtu == 0) + mtu = chan->imtu ? chan->imtu : L2CAP_DEFAULT_MTU; + if (mtu < L2CAP_DEFAULT_MIN_MTU) result = L2CAP_CONF_UNACCEPT; else { @@ -4835,7 +4842,8 @@ static int l2cap_le_connect_req(struct l2cap_conn *conn, if (!smp_sufficient_security(conn->hcon, pchan->sec_level, SMP_ALLOW_STK)) { - result = L2CAP_CR_LE_AUTHENTICATION; + result = pchan->sec_level == BT_SECURITY_MEDIUM ? + L2CAP_CR_LE_ENCRYPTION : L2CAP_CR_LE_AUTHENTICATION; chan = NULL; goto response_unlock; } diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c index d4700f940e8a1..ade93532db34b 100644 --- a/net/bluetooth/mgmt.c +++ b/net/bluetooth/mgmt.c @@ -1073,7 +1073,8 @@ static int mesh_send_done_sync(struct hci_dev *hdev, void *data) struct mgmt_mesh_tx *mesh_tx; hci_dev_clear_flag(hdev, HCI_MESH_SENDING); - hci_disable_advertising_sync(hdev); + if (list_empty(&hdev->adv_instances)) + hci_disable_advertising_sync(hdev); mesh_tx = mgmt_mesh_next(hdev, NULL); if (mesh_tx) @@ -1440,22 +1441,17 @@ static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data) send_settings_rsp(cmd->sk, cmd->opcode, match->hdev); - list_del(&cmd->list); - if (match->sk == NULL) { match->sk = cmd->sk; sock_hold(match->sk); } - - mgmt_pending_free(cmd); } static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data) { u8 *status = data; - mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, *status); - mgmt_pending_remove(cmd); + mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode, *status); } static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data) @@ -1469,8 +1465,6 @@ static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data) if (cmd->cmd_complete) { cmd->cmd_complete(cmd, match->mgmt_status); - mgmt_pending_remove(cmd); - return; } @@ -1479,13 +1473,13 @@ static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data) static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status) { - return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, + return mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode, status, cmd->param, cmd->param_len); } static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status) { - return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, + return mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode, status, cmd->param, sizeof(struct mgmt_addr_info)); } @@ -1525,7 +1519,7 @@ static void mgmt_set_discoverable_complete(struct hci_dev *hdev, void *data, if (err) { u8 mgmt_err = mgmt_status(err); - mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err); + mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode, mgmt_err); hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE); goto done; } @@ -1700,7 +1694,7 @@ static void mgmt_set_connectable_complete(struct hci_dev *hdev, void *data, if (err) { u8 mgmt_err = mgmt_status(err); - mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err); + mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode, mgmt_err); goto done; } @@ -1936,8 +1930,8 @@ static void set_ssp_complete(struct hci_dev *hdev, void *data, int err) new_settings(hdev, NULL); } - mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp, - &mgmt_err); + mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, true, + cmd_status_rsp, &mgmt_err); return; } @@ -1947,7 +1941,7 @@ static void set_ssp_complete(struct hci_dev *hdev, void *data, int err) changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED); } - mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match); + mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, true, settings_rsp, &match); if (changed) new_settings(hdev, match.sk); @@ -2067,12 +2061,12 @@ static void set_le_complete(struct hci_dev *hdev, void *data, int err) bt_dev_dbg(hdev, "err %d", err); if (status) { - mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp, - &status); + mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, true, cmd_status_rsp, + &status); return; } - mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match); + mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, true, settings_rsp, &match); new_settings(hdev, match.sk); @@ -2131,7 +2125,7 @@ static void set_mesh_complete(struct hci_dev *hdev, void *data, int err) struct sock *sk = cmd->sk; if (status) { - mgmt_pending_foreach(MGMT_OP_SET_MESH_RECEIVER, hdev, + mgmt_pending_foreach(MGMT_OP_SET_MESH_RECEIVER, hdev, true, cmd_status_rsp, &status); return; } @@ -2153,6 +2147,9 @@ static int set_mesh_sync(struct hci_dev *hdev, void *data) else hci_dev_clear_flag(hdev, HCI_MESH); + hdev->le_scan_interval = __le16_to_cpu(cp->period); + hdev->le_scan_window = __le16_to_cpu(cp->window); + len -= sizeof(*cp); /* If filters don't fit, forward all adv pkts */ @@ -2167,6 +2164,7 @@ static int set_mesh(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { struct mgmt_cp_set_mesh *cp = data; struct mgmt_pending_cmd *cmd; + __u16 period, window; int err = 0; bt_dev_dbg(hdev, "sock %p", sk); @@ -2180,6 +2178,23 @@ static int set_mesh(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER, MGMT_STATUS_INVALID_PARAMS); + /* Keep allowed ranges in sync with set_scan_params() */ + period = __le16_to_cpu(cp->period); + + if (period < 0x0004 || period > 0x4000) + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER, + MGMT_STATUS_INVALID_PARAMS); + + window = __le16_to_cpu(cp->window); + + if (window < 0x0004 || window > 0x4000) + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER, + MGMT_STATUS_INVALID_PARAMS); + + if (window > period) + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER, + MGMT_STATUS_INVALID_PARAMS); + hci_dev_lock(hdev); cmd = mgmt_pending_add(sk, MGMT_OP_SET_MESH_RECEIVER, hdev, data, len); @@ -2572,7 +2587,7 @@ static void mgmt_class_complete(struct hci_dev *hdev, void *data, int err) bt_dev_dbg(hdev, "err %d", err); - mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, + mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode, mgmt_status(err), hdev->dev_class, 3); mgmt_pending_free(cmd); @@ -3360,7 +3375,7 @@ static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status) bacpy(&rp.addr.bdaddr, &conn->dst); rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type); - err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE, + err = mgmt_cmd_complete(cmd->sk, cmd->hdev->id, MGMT_OP_PAIR_DEVICE, status, &rp, sizeof(rp)); /* So we don't get further callbacks for this connection */ @@ -5172,24 +5187,14 @@ static void mgmt_adv_monitor_added(struct sock *sk, struct hci_dev *hdev, mgmt_event(MGMT_EV_ADV_MONITOR_ADDED, hdev, &ev, sizeof(ev), sk); } -void mgmt_adv_monitor_removed(struct hci_dev *hdev, u16 handle) +static void mgmt_adv_monitor_removed(struct sock *sk, struct hci_dev *hdev, + __le16 handle) { struct mgmt_ev_adv_monitor_removed ev; - struct mgmt_pending_cmd *cmd; - struct sock *sk_skip = NULL; - struct mgmt_cp_remove_adv_monitor *cp; - - cmd = pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev); - if (cmd) { - cp = cmd->param; - if (cp->monitor_handle) - sk_skip = cmd->sk; - } - - ev.monitor_handle = cpu_to_le16(handle); + ev.monitor_handle = handle; - mgmt_event(MGMT_EV_ADV_MONITOR_REMOVED, hdev, &ev, sizeof(ev), sk_skip); + mgmt_event(MGMT_EV_ADV_MONITOR_REMOVED, hdev, &ev, sizeof(ev), sk); } static int read_adv_mon_features(struct sock *sk, struct hci_dev *hdev, @@ -5260,7 +5265,7 @@ static void mgmt_add_adv_patterns_monitor_complete(struct hci_dev *hdev, hci_update_passive_scan(hdev); } - mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, + mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode, mgmt_status(status), &rp, sizeof(rp)); mgmt_pending_remove(cmd); @@ -5291,8 +5296,7 @@ static int __add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev, if (pending_find(MGMT_OP_SET_LE, hdev) || pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) || - pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev) || - pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev)) { + pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev)) { status = MGMT_STATUS_BUSY; goto unlock; } @@ -5462,8 +5466,7 @@ static void mgmt_remove_adv_monitor_complete(struct hci_dev *hdev, struct mgmt_pending_cmd *cmd = data; struct mgmt_cp_remove_adv_monitor *cp; - if (status == -ECANCELED || - cmd != pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev)) + if (status == -ECANCELED) return; hci_dev_lock(hdev); @@ -5472,12 +5475,14 @@ static void mgmt_remove_adv_monitor_complete(struct hci_dev *hdev, rp.monitor_handle = cp->monitor_handle; - if (!status) + if (!status) { + mgmt_adv_monitor_removed(cmd->sk, hdev, cp->monitor_handle); hci_update_passive_scan(hdev); + } - mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, + mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode, mgmt_status(status), &rp, sizeof(rp)); - mgmt_pending_remove(cmd); + mgmt_pending_free(cmd); hci_dev_unlock(hdev); bt_dev_dbg(hdev, "remove monitor %d complete, status %d", @@ -5487,10 +5492,6 @@ static void mgmt_remove_adv_monitor_complete(struct hci_dev *hdev, static int mgmt_remove_adv_monitor_sync(struct hci_dev *hdev, void *data) { struct mgmt_pending_cmd *cmd = data; - - if (cmd != pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev)) - return -ECANCELED; - struct mgmt_cp_remove_adv_monitor *cp = cmd->param; u16 handle = __le16_to_cpu(cp->monitor_handle); @@ -5509,14 +5510,13 @@ static int remove_adv_monitor(struct sock *sk, struct hci_dev *hdev, hci_dev_lock(hdev); if (pending_find(MGMT_OP_SET_LE, hdev) || - pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev) || pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) || pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev)) { status = MGMT_STATUS_BUSY; goto unlock; } - cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADV_MONITOR, hdev, data, len); + cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_ADV_MONITOR, hdev, data, len); if (!cmd) { status = MGMT_STATUS_NO_RESOURCES; goto unlock; @@ -5526,7 +5526,7 @@ static int remove_adv_monitor(struct sock *sk, struct hci_dev *hdev, mgmt_remove_adv_monitor_complete); if (err) { - mgmt_pending_remove(cmd); + mgmt_pending_free(cmd); if (err == -ENOMEM) status = MGMT_STATUS_NO_RESOURCES; @@ -5879,7 +5879,7 @@ static void start_discovery_complete(struct hci_dev *hdev, void *data, int err) cmd != pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev)) return; - mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err), + mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode, mgmt_status(err), cmd->param, 1); mgmt_pending_remove(cmd); @@ -6117,7 +6117,7 @@ static void stop_discovery_complete(struct hci_dev *hdev, void *data, int err) bt_dev_dbg(hdev, "err %d", err); - mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err), + mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode, mgmt_status(err), cmd->param, 1); mgmt_pending_remove(cmd); @@ -6342,7 +6342,7 @@ static void set_advertising_complete(struct hci_dev *hdev, void *data, int err) u8 status = mgmt_status(err); if (status) { - mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, + mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, true, cmd_status_rsp, &status); return; } @@ -6352,7 +6352,7 @@ static void set_advertising_complete(struct hci_dev *hdev, void *data, int err) else hci_dev_clear_flag(hdev, HCI_ADVERTISING); - mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp, + mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, true, settings_rsp, &match); new_settings(hdev, match.sk); @@ -6558,6 +6558,7 @@ static int set_scan_params(struct sock *sk, struct hci_dev *hdev, return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, MGMT_STATUS_NOT_SUPPORTED); + /* Keep allowed ranges in sync with set_mesh() */ interval = __le16_to_cpu(cp->interval); if (interval < 0x0004 || interval > 0x4000) @@ -6696,7 +6697,7 @@ static void set_bredr_complete(struct hci_dev *hdev, void *data, int err) */ hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED); - mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err); + mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode, mgmt_err); } else { send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev); new_settings(hdev, cmd->sk); @@ -6833,7 +6834,7 @@ static void set_secure_conn_complete(struct hci_dev *hdev, void *data, int err) if (err) { u8 mgmt_err = mgmt_status(err); - mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err); + mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode, mgmt_err); goto done; } @@ -7280,7 +7281,7 @@ static void get_conn_info_complete(struct hci_dev *hdev, void *data, int err) rp.max_tx_power = HCI_TX_POWER_INVALID; } - mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO, status, + mgmt_cmd_complete(cmd->sk, cmd->hdev->id, MGMT_OP_GET_CONN_INFO, status, &rp, sizeof(rp)); mgmt_pending_free(cmd); @@ -7440,7 +7441,7 @@ static void get_clock_info_complete(struct hci_dev *hdev, void *data, int err) } complete: - mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp, + mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode, status, &rp, sizeof(rp)); mgmt_pending_free(cmd); @@ -8690,10 +8691,10 @@ static void add_advertising_complete(struct hci_dev *hdev, void *data, int err) rp.instance = cp->instance; if (err) - mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, + mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode, mgmt_status(err)); else - mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, + mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode, mgmt_status(err), &rp, sizeof(rp)); add_adv_complete(hdev, cmd->sk, cp->instance, err); @@ -8881,10 +8882,10 @@ static void add_ext_adv_params_complete(struct hci_dev *hdev, void *data, hci_remove_adv_instance(hdev, cp->instance); - mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, + mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode, mgmt_status(err)); } else { - mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, + mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode, mgmt_status(err), &rp, sizeof(rp)); } @@ -9031,10 +9032,10 @@ static void add_ext_adv_data_complete(struct hci_dev *hdev, void *data, int err) rp.instance = cp->instance; if (err) - mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, + mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode, mgmt_status(err)); else - mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, + mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode, mgmt_status(err), &rp, sizeof(rp)); mgmt_pending_free(cmd); @@ -9193,10 +9194,10 @@ static void remove_advertising_complete(struct hci_dev *hdev, void *data, rp.instance = cp->instance; if (err) - mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, + mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode, mgmt_status(err)); else - mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, + mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode, MGMT_STATUS_SUCCESS, &rp, sizeof(rp)); mgmt_pending_free(cmd); @@ -9467,7 +9468,7 @@ void mgmt_index_removed(struct hci_dev *hdev) if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) return; - mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &match); + mgmt_pending_foreach(0, hdev, true, cmd_complete_rsp, &match); if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) { mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev, NULL, 0, @@ -9505,7 +9506,8 @@ void mgmt_power_on(struct hci_dev *hdev, int err) hci_update_passive_scan(hdev); } - mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match); + mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, true, settings_rsp, + &match); new_settings(hdev, match.sk); @@ -9520,7 +9522,8 @@ void __mgmt_power_off(struct hci_dev *hdev) struct cmd_lookup match = { NULL, hdev }; u8 zero_cod[] = { 0, 0, 0 }; - mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match); + mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, true, settings_rsp, + &match); /* If the power off is because of hdev unregistration let * use the appropriate INVALID_INDEX status. Otherwise use @@ -9534,7 +9537,7 @@ void __mgmt_power_off(struct hci_dev *hdev) else match.mgmt_status = MGMT_STATUS_NOT_POWERED; - mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &match); + mgmt_pending_foreach(0, hdev, true, cmd_complete_rsp, &match); if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0) { mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, @@ -9775,7 +9778,6 @@ static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data) device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk); cmd->cmd_complete(cmd, 0); - mgmt_pending_remove(cmd); } bool mgmt_powering_down(struct hci_dev *hdev) @@ -9831,8 +9833,8 @@ void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, struct mgmt_cp_disconnect *cp; struct mgmt_pending_cmd *cmd; - mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp, - hdev); + mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, true, + unpair_device_rsp, hdev); cmd = pending_find(MGMT_OP_DISCONNECT, hdev); if (!cmd) @@ -10025,7 +10027,7 @@ void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status) if (status) { u8 mgmt_err = mgmt_status(status); - mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, + mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, true, cmd_status_rsp, &mgmt_err); return; } @@ -10035,8 +10037,8 @@ void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status) else changed = hci_dev_test_and_clear_flag(hdev, HCI_LINK_SECURITY); - mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp, - &match); + mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, true, + settings_rsp, &match); if (changed) new_settings(hdev, match.sk); @@ -10060,9 +10062,12 @@ void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class, { struct cmd_lookup match = { NULL, hdev, mgmt_status(status) }; - mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match); - mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match); - mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match); + mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, false, sk_lookup, + &match); + mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, false, sk_lookup, + &match); + mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, false, sk_lookup, + &match); if (!status) { mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class, diff --git a/net/bluetooth/mgmt_util.c b/net/bluetooth/mgmt_util.c index 17ab909a7c07f..a88a07da39473 100644 --- a/net/bluetooth/mgmt_util.c +++ b/net/bluetooth/mgmt_util.c @@ -217,47 +217,47 @@ int mgmt_cmd_complete(struct sock *sk, u16 index, u16 cmd, u8 status, struct mgmt_pending_cmd *mgmt_pending_find(unsigned short channel, u16 opcode, struct hci_dev *hdev) { - struct mgmt_pending_cmd *cmd; + struct mgmt_pending_cmd *cmd, *tmp; + + mutex_lock(&hdev->mgmt_pending_lock); - list_for_each_entry(cmd, &hdev->mgmt_pending, list) { + list_for_each_entry_safe(cmd, tmp, &hdev->mgmt_pending, list) { if (hci_sock_get_channel(cmd->sk) != channel) continue; - if (cmd->opcode == opcode) - return cmd; - } - return NULL; -} - -struct mgmt_pending_cmd *mgmt_pending_find_data(unsigned short channel, - u16 opcode, - struct hci_dev *hdev, - const void *data) -{ - struct mgmt_pending_cmd *cmd; - - list_for_each_entry(cmd, &hdev->mgmt_pending, list) { - if (cmd->user_data != data) - continue; - if (cmd->opcode == opcode) + if (cmd->opcode == opcode) { + mutex_unlock(&hdev->mgmt_pending_lock); return cmd; + } } + mutex_unlock(&hdev->mgmt_pending_lock); + return NULL; } -void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev, +void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev, bool remove, void (*cb)(struct mgmt_pending_cmd *cmd, void *data), void *data) { struct mgmt_pending_cmd *cmd, *tmp; + mutex_lock(&hdev->mgmt_pending_lock); + list_for_each_entry_safe(cmd, tmp, &hdev->mgmt_pending, list) { if (opcode > 0 && cmd->opcode != opcode) continue; + if (remove) + list_del(&cmd->list); + cb(cmd, data); + + if (remove) + mgmt_pending_free(cmd); } + + mutex_unlock(&hdev->mgmt_pending_lock); } struct mgmt_pending_cmd *mgmt_pending_new(struct sock *sk, u16 opcode, @@ -271,7 +271,7 @@ struct mgmt_pending_cmd *mgmt_pending_new(struct sock *sk, u16 opcode, return NULL; cmd->opcode = opcode; - cmd->index = hdev->id; + cmd->hdev = hdev; cmd->param = kmemdup(data, len, GFP_KERNEL); if (!cmd->param) { @@ -297,7 +297,9 @@ struct mgmt_pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode, if (!cmd) return NULL; + mutex_lock(&hdev->mgmt_pending_lock); list_add_tail(&cmd->list, &hdev->mgmt_pending); + mutex_unlock(&hdev->mgmt_pending_lock); return cmd; } @@ -311,7 +313,10 @@ void mgmt_pending_free(struct mgmt_pending_cmd *cmd) void mgmt_pending_remove(struct mgmt_pending_cmd *cmd) { + mutex_lock(&cmd->hdev->mgmt_pending_lock); list_del(&cmd->list); + mutex_unlock(&cmd->hdev->mgmt_pending_lock); + mgmt_pending_free(cmd); } @@ -321,7 +326,7 @@ void mgmt_mesh_foreach(struct hci_dev *hdev, { struct mgmt_mesh_tx *mesh_tx, *tmp; - list_for_each_entry_safe(mesh_tx, tmp, &hdev->mgmt_pending, list) { + list_for_each_entry_safe(mesh_tx, tmp, &hdev->mesh_pending, list) { if (!sk || mesh_tx->sk == sk) cb(mesh_tx, data); } diff --git a/net/bluetooth/mgmt_util.h b/net/bluetooth/mgmt_util.h index bdf978605d5a8..024e51dd69375 100644 --- a/net/bluetooth/mgmt_util.h +++ b/net/bluetooth/mgmt_util.h @@ -33,7 +33,7 @@ struct mgmt_mesh_tx { struct mgmt_pending_cmd { struct list_head list; u16 opcode; - int index; + struct hci_dev *hdev; void *param; size_t param_len; struct sock *sk; @@ -54,11 +54,7 @@ int mgmt_cmd_complete(struct sock *sk, u16 index, u16 cmd, u8 status, struct mgmt_pending_cmd *mgmt_pending_find(unsigned short channel, u16 opcode, struct hci_dev *hdev); -struct mgmt_pending_cmd *mgmt_pending_find_data(unsigned short channel, - u16 opcode, - struct hci_dev *hdev, - const void *data); -void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev, +void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev, bool remove, void (*cb)(struct mgmt_pending_cmd *cmd, void *data), void *data); struct mgmt_pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode, diff --git a/net/bridge/br_mst.c b/net/bridge/br_mst.c index 1820f09ff59ce..3f24b4ee49c27 100644 --- a/net/bridge/br_mst.c +++ b/net/bridge/br_mst.c @@ -80,10 +80,10 @@ static void br_mst_vlan_set_state(struct net_bridge_vlan_group *vg, if (br_vlan_get_state(v) == state) return; - br_vlan_set_state(v, state); - if (v->vid == vg->pvid) br_vlan_set_pvid_state(vg, state); + + br_vlan_set_state(v, state); } int br_mst_set_state(struct net_bridge_port *p, u16 msti, u8 state, diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c index b2ae0d2434d2e..733ff6b758f69 100644 --- a/net/bridge/br_multicast.c +++ b/net/bridge/br_multicast.c @@ -2105,12 +2105,17 @@ static void __br_multicast_enable_port_ctx(struct net_bridge_mcast_port *pmctx) } } -void br_multicast_enable_port(struct net_bridge_port *port) +static void br_multicast_enable_port_ctx(struct net_bridge_mcast_port *pmctx) { - struct net_bridge *br = port->br; + struct net_bridge *br = pmctx->port->br; spin_lock_bh(&br->multicast_lock); - __br_multicast_enable_port_ctx(&port->multicast_ctx); + if (br_multicast_port_ctx_is_vlan(pmctx) && + !(pmctx->vlan->priv_flags & BR_VLFLAG_MCAST_ENABLED)) { + spin_unlock_bh(&br->multicast_lock); + return; + } + __br_multicast_enable_port_ctx(pmctx); spin_unlock_bh(&br->multicast_lock); } @@ -2137,11 +2142,67 @@ static void __br_multicast_disable_port_ctx(struct net_bridge_mcast_port *pmctx) br_multicast_rport_del_notify(pmctx, del); } +static void br_multicast_disable_port_ctx(struct net_bridge_mcast_port *pmctx) +{ + struct net_bridge *br = pmctx->port->br; + + spin_lock_bh(&br->multicast_lock); + if (br_multicast_port_ctx_is_vlan(pmctx) && + !(pmctx->vlan->priv_flags & BR_VLFLAG_MCAST_ENABLED)) { + spin_unlock_bh(&br->multicast_lock); + return; + } + + __br_multicast_disable_port_ctx(pmctx); + spin_unlock_bh(&br->multicast_lock); +} + +static void br_multicast_toggle_port(struct net_bridge_port *port, bool on) +{ +#if IS_ENABLED(CONFIG_BRIDGE_VLAN_FILTERING) + if (br_opt_get(port->br, BROPT_MCAST_VLAN_SNOOPING_ENABLED)) { + struct net_bridge_vlan_group *vg; + struct net_bridge_vlan *vlan; + + rcu_read_lock(); + vg = nbp_vlan_group_rcu(port); + if (!vg) { + rcu_read_unlock(); + return; + } + + /* iterate each vlan, toggle vlan multicast context */ + list_for_each_entry_rcu(vlan, &vg->vlan_list, vlist) { + struct net_bridge_mcast_port *pmctx = + &vlan->port_mcast_ctx; + u8 state = br_vlan_get_state(vlan); + /* enable vlan multicast context when state is + * LEARNING or FORWARDING + */ + if (on && br_vlan_state_allowed(state, true)) + br_multicast_enable_port_ctx(pmctx); + else + br_multicast_disable_port_ctx(pmctx); + } + rcu_read_unlock(); + return; + } +#endif + /* toggle port multicast context when vlan snooping is disabled */ + if (on) + br_multicast_enable_port_ctx(&port->multicast_ctx); + else + br_multicast_disable_port_ctx(&port->multicast_ctx); +} + +void br_multicast_enable_port(struct net_bridge_port *port) +{ + br_multicast_toggle_port(port, true); +} + void br_multicast_disable_port(struct net_bridge_port *port) { - spin_lock_bh(&port->br->multicast_lock); - __br_multicast_disable_port_ctx(&port->multicast_ctx); - spin_unlock_bh(&port->br->multicast_lock); + br_multicast_toggle_port(port, false); } static int __grp_src_delete_marked(struct net_bridge_port_group *pg) @@ -4211,6 +4272,32 @@ static void __br_multicast_stop(struct net_bridge_mcast *brmctx) #endif } +void br_multicast_update_vlan_mcast_ctx(struct net_bridge_vlan *v, u8 state) +{ +#if IS_ENABLED(CONFIG_BRIDGE_VLAN_FILTERING) + struct net_bridge *br; + + if (!br_vlan_should_use(v)) + return; + + if (br_vlan_is_master(v)) + return; + + br = v->port->br; + + if (!br_opt_get(br, BROPT_MCAST_VLAN_SNOOPING_ENABLED)) + return; + + if (br_vlan_state_allowed(state, true)) + br_multicast_enable_port_ctx(&v->port_mcast_ctx); + + /* Multicast is not disabled for the vlan when it goes in + * blocking state because the timers will expire and stop by + * themselves without sending more queries. + */ +#endif +} + void br_multicast_toggle_one_vlan(struct net_bridge_vlan *vlan, bool on) { struct net_bridge *br; @@ -4304,9 +4391,9 @@ int br_multicast_toggle_vlan_snooping(struct net_bridge *br, bool on, __br_multicast_open(&br->multicast_ctx); list_for_each_entry(p, &br->port_list, list) { if (on) - br_multicast_disable_port(p); + br_multicast_disable_port_ctx(&p->multicast_ctx); else - br_multicast_enable_port(p); + br_multicast_enable_port_ctx(&p->multicast_ctx); } list_for_each_entry(vlan, &vg->vlan_list, vlist) diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h index df502cc1191c3..6a1bce8959afa 100644 --- a/net/bridge/br_private.h +++ b/net/bridge/br_private.h @@ -1053,6 +1053,7 @@ void br_multicast_port_ctx_init(struct net_bridge_port *port, struct net_bridge_vlan *vlan, struct net_bridge_mcast_port *pmctx); void br_multicast_port_ctx_deinit(struct net_bridge_mcast_port *pmctx); +void br_multicast_update_vlan_mcast_ctx(struct net_bridge_vlan *v, u8 state); void br_multicast_toggle_one_vlan(struct net_bridge_vlan *vlan, bool on); int br_multicast_toggle_vlan_snooping(struct net_bridge *br, bool on, struct netlink_ext_ack *extack); @@ -1503,6 +1504,11 @@ static inline void br_multicast_port_ctx_deinit(struct net_bridge_mcast_port *pm { } +static inline void br_multicast_update_vlan_mcast_ctx(struct net_bridge_vlan *v, + u8 state) +{ +} + static inline void br_multicast_toggle_one_vlan(struct net_bridge_vlan *vlan, bool on) { @@ -1854,7 +1860,9 @@ bool br_vlan_global_opts_can_enter_range(const struct net_bridge_vlan *v_curr, bool br_vlan_global_opts_fill(struct sk_buff *skb, u16 vid, u16 vid_range, const struct net_bridge_vlan *v_opts); -/* vlan state manipulation helpers using *_ONCE to annotate lock-free access */ +/* vlan state manipulation helpers using *_ONCE to annotate lock-free access, + * while br_vlan_set_state() may access data protected by multicast_lock. + */ static inline u8 br_vlan_get_state(const struct net_bridge_vlan *v) { return READ_ONCE(v->state); @@ -1863,6 +1871,7 @@ static inline u8 br_vlan_get_state(const struct net_bridge_vlan *v) static inline void br_vlan_set_state(struct net_bridge_vlan *v, u8 state) { WRITE_ONCE(v->state, state); + br_multicast_update_vlan_mcast_ctx(v, state); } static inline u8 br_vlan_get_pvid_state(const struct net_bridge_vlan_group *vg) diff --git a/net/bridge/netfilter/nf_conntrack_bridge.c b/net/bridge/netfilter/nf_conntrack_bridge.c index 816bb0fde718e..6482de4d87509 100644 --- a/net/bridge/netfilter/nf_conntrack_bridge.c +++ b/net/bridge/netfilter/nf_conntrack_bridge.c @@ -60,19 +60,19 @@ static int nf_br_ip_fragment(struct net *net, struct sock *sk, struct ip_fraglist_iter iter; struct sk_buff *frag; - if (first_len - hlen > mtu || - skb_headroom(skb) < ll_rs) + if (first_len - hlen > mtu) goto blackhole; - if (skb_cloned(skb)) + if (skb_cloned(skb) || + skb_headroom(skb) < ll_rs) goto slow_path; skb_walk_frags(skb, frag) { - if (frag->len > mtu || - skb_headroom(frag) < hlen + ll_rs) + if (frag->len > mtu) goto blackhole; - if (skb_shared(frag)) + if (skb_shared(frag) || + skb_headroom(frag) < hlen + ll_rs) goto slow_path; } diff --git a/net/core/filter.c b/net/core/filter.c index 99b23fd2f509c..1c0cf6f2fff52 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -1980,10 +1980,11 @@ BPF_CALL_5(bpf_l4_csum_replace, struct sk_buff *, skb, u32, offset, bool is_pseudo = flags & BPF_F_PSEUDO_HDR; bool is_mmzero = flags & BPF_F_MARK_MANGLED_0; bool do_mforce = flags & BPF_F_MARK_ENFORCE; + bool is_ipv6 = flags & BPF_F_IPV6; __sum16 *ptr; if (unlikely(flags & ~(BPF_F_MARK_MANGLED_0 | BPF_F_MARK_ENFORCE | - BPF_F_PSEUDO_HDR | BPF_F_HDR_FIELD_MASK))) + BPF_F_PSEUDO_HDR | BPF_F_HDR_FIELD_MASK | BPF_F_IPV6))) return -EINVAL; if (unlikely(offset > 0xffff || offset & 1)) return -EFAULT; @@ -1999,7 +2000,7 @@ BPF_CALL_5(bpf_l4_csum_replace, struct sk_buff *, skb, u32, offset, if (unlikely(from != 0)) return -EINVAL; - inet_proto_csum_replace_by_diff(ptr, skb, to, is_pseudo); + inet_proto_csum_replace_by_diff(ptr, skb, to, is_pseudo, is_ipv6); break; case 2: inet_proto_csum_replace2(ptr, skb, from, to, is_pseudo); @@ -3249,6 +3250,13 @@ static const struct bpf_func_proto bpf_skb_vlan_pop_proto = { .arg1_type = ARG_PTR_TO_CTX, }; +static void bpf_skb_change_protocol(struct sk_buff *skb, u16 proto) +{ + skb->protocol = htons(proto); + if (skb_valid_dst(skb)) + skb_dst_drop(skb); +} + static int bpf_skb_generic_push(struct sk_buff *skb, u32 off, u32 len) { /* Caller already did skb_cow() with len as headroom, @@ -3345,7 +3353,7 @@ static int bpf_skb_proto_4_to_6(struct sk_buff *skb) } } - skb->protocol = htons(ETH_P_IPV6); + bpf_skb_change_protocol(skb, ETH_P_IPV6); skb_clear_hash(skb); return 0; @@ -3375,7 +3383,7 @@ static int bpf_skb_proto_6_to_4(struct sk_buff *skb) } } - skb->protocol = htons(ETH_P_IP); + bpf_skb_change_protocol(skb, ETH_P_IP); skb_clear_hash(skb); return 0; @@ -3566,10 +3574,10 @@ static int bpf_skb_net_grow(struct sk_buff *skb, u32 off, u32 len_diff, /* Match skb->protocol to new outer l3 protocol */ if (skb->protocol == htons(ETH_P_IP) && flags & BPF_F_ADJ_ROOM_ENCAP_L3_IPV6) - skb->protocol = htons(ETH_P_IPV6); + bpf_skb_change_protocol(skb, ETH_P_IPV6); else if (skb->protocol == htons(ETH_P_IPV6) && flags & BPF_F_ADJ_ROOM_ENCAP_L3_IPV4) - skb->protocol = htons(ETH_P_IP); + bpf_skb_change_protocol(skb, ETH_P_IP); } if (skb_is_gso(skb)) { @@ -3622,10 +3630,10 @@ static int bpf_skb_net_shrink(struct sk_buff *skb, u32 off, u32 len_diff, /* Match skb->protocol to new outer l3 protocol */ if (skb->protocol == htons(ETH_P_IP) && flags & BPF_F_ADJ_ROOM_DECAP_L3_IPV6) - skb->protocol = htons(ETH_P_IPV6); + bpf_skb_change_protocol(skb, ETH_P_IPV6); else if (skb->protocol == htons(ETH_P_IPV6) && flags & BPF_F_ADJ_ROOM_DECAP_L3_IPV4) - skb->protocol = htons(ETH_P_IP); + bpf_skb_change_protocol(skb, ETH_P_IP); if (skb_is_gso(skb)) { struct skb_shared_info *shinfo = skb_shinfo(skb); diff --git a/net/core/netmem_priv.h b/net/core/netmem_priv.h index 7eadb8393e002..cd95394399b40 100644 --- a/net/core/netmem_priv.h +++ b/net/core/netmem_priv.h @@ -5,7 +5,7 @@ static inline unsigned long netmem_get_pp_magic(netmem_ref netmem) { - return __netmem_clear_lsb(netmem)->pp_magic; + return __netmem_clear_lsb(netmem)->pp_magic & ~PP_DMA_INDEX_MASK; } static inline void netmem_or_pp_magic(netmem_ref netmem, unsigned long pp_magic) @@ -15,9 +15,16 @@ static inline void netmem_or_pp_magic(netmem_ref netmem, unsigned long pp_magic) static inline void netmem_clear_pp_magic(netmem_ref netmem) { + WARN_ON_ONCE(__netmem_clear_lsb(netmem)->pp_magic & PP_DMA_INDEX_MASK); + __netmem_clear_lsb(netmem)->pp_magic = 0; } +static inline bool netmem_is_pp(netmem_ref netmem) +{ + return (netmem_get_pp_magic(netmem) & PP_MAGIC_MASK) == PP_SIGNATURE; +} + static inline void netmem_set_pp(netmem_ref netmem, struct page_pool *pool) { __netmem_clear_lsb(netmem)->pp = pool; @@ -28,4 +35,28 @@ static inline void netmem_set_dma_addr(netmem_ref netmem, { __netmem_clear_lsb(netmem)->dma_addr = dma_addr; } + +static inline unsigned long netmem_get_dma_index(netmem_ref netmem) +{ + unsigned long magic; + + if (WARN_ON_ONCE(netmem_is_net_iov(netmem))) + return 0; + + magic = __netmem_clear_lsb(netmem)->pp_magic; + + return (magic & PP_DMA_INDEX_MASK) >> PP_DMA_INDEX_SHIFT; +} + +static inline void netmem_set_dma_index(netmem_ref netmem, + unsigned long id) +{ + unsigned long magic; + + if (WARN_ON_ONCE(netmem_is_net_iov(netmem))) + return; + + magic = netmem_get_pp_magic(netmem) | (id << PP_DMA_INDEX_SHIFT); + __netmem_clear_lsb(netmem)->pp_magic = magic; +} #endif diff --git a/net/core/page_pool.c b/net/core/page_pool.c index c8ce069605c42..b1c3e0ad6dbf4 100644 --- a/net/core/page_pool.c +++ b/net/core/page_pool.c @@ -151,9 +151,9 @@ u64 *page_pool_ethtool_stats_get(u64 *data, const void *stats) EXPORT_SYMBOL(page_pool_ethtool_stats_get); #else -#define alloc_stat_inc(pool, __stat) -#define recycle_stat_inc(pool, __stat) -#define recycle_stat_add(pool, __stat, val) +#define alloc_stat_inc(...) do { } while (0) +#define recycle_stat_inc(...) do { } while (0) +#define recycle_stat_add(...) do { } while (0) #endif static bool page_pool_producer_lock(struct page_pool *pool) @@ -273,8 +273,7 @@ static int page_pool_init(struct page_pool *pool, /* Driver calling page_pool_create() also call page_pool_destroy() */ refcount_set(&pool->user_cnt, 1); - if (pool->dma_map) - get_device(pool->p.dev); + xa_init_flags(&pool->dma_mapped, XA_FLAGS_ALLOC1); if (pool->slow.flags & PP_FLAG_ALLOW_UNREADABLE_NETMEM) { /* We rely on rtnl_lock()ing to make sure netdev_rx_queue @@ -312,9 +311,7 @@ static int page_pool_init(struct page_pool *pool, static void page_pool_uninit(struct page_pool *pool) { ptr_ring_cleanup(&pool->ring, NULL); - - if (pool->dma_map) - put_device(pool->p.dev); + xa_destroy(&pool->dma_mapped); #ifdef CONFIG_PAGE_POOL_STATS if (!pool->system) @@ -455,13 +452,21 @@ page_pool_dma_sync_for_device(const struct page_pool *pool, netmem_ref netmem, u32 dma_sync_size) { - if (pool->dma_sync && dma_dev_need_sync(pool->p.dev)) - __page_pool_dma_sync_for_device(pool, netmem, dma_sync_size); + if (pool->dma_sync && dma_dev_need_sync(pool->p.dev)) { + rcu_read_lock(); + /* re-check under rcu_read_lock() to sync with page_pool_scrub() */ + if (pool->dma_sync) + __page_pool_dma_sync_for_device(pool, netmem, + dma_sync_size); + rcu_read_unlock(); + } } -static bool page_pool_dma_map(struct page_pool *pool, netmem_ref netmem) +static bool page_pool_dma_map(struct page_pool *pool, netmem_ref netmem, gfp_t gfp) { dma_addr_t dma; + int err; + u32 id; /* Setup DMA mapping: use 'struct page' area for storing DMA-addr * since dma_addr_t can be either 32 or 64 bits and does not always fit @@ -475,15 +480,30 @@ static bool page_pool_dma_map(struct page_pool *pool, netmem_ref netmem) if (dma_mapping_error(pool->p.dev, dma)) return false; - if (page_pool_set_dma_addr_netmem(netmem, dma)) + if (page_pool_set_dma_addr_netmem(netmem, dma)) { + WARN_ONCE(1, "unexpected DMA address, please report to netdev@"); goto unmap_failed; + } + if (in_softirq()) + err = xa_alloc(&pool->dma_mapped, &id, netmem_to_page(netmem), + PP_DMA_INDEX_LIMIT, gfp); + else + err = xa_alloc_bh(&pool->dma_mapped, &id, netmem_to_page(netmem), + PP_DMA_INDEX_LIMIT, gfp); + if (err) { + WARN_ONCE(err != -ENOMEM, "couldn't track DMA mapping, please report to netdev@"); + goto unset_failed; + } + + netmem_set_dma_index(netmem, id); page_pool_dma_sync_for_device(pool, netmem, pool->p.max_len); return true; +unset_failed: + page_pool_set_dma_addr_netmem(netmem, 0); unmap_failed: - WARN_ONCE(1, "unexpected DMA address, please report to netdev@"); dma_unmap_page_attrs(pool->p.dev, dma, PAGE_SIZE << pool->p.order, pool->p.dma_dir, DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING); @@ -500,7 +520,7 @@ static struct page *__page_pool_alloc_page_order(struct page_pool *pool, if (unlikely(!page)) return NULL; - if (pool->dma_map && unlikely(!page_pool_dma_map(pool, page_to_netmem(page)))) { + if (pool->dma_map && unlikely(!page_pool_dma_map(pool, page_to_netmem(page), gfp))) { put_page(page); return NULL; } @@ -547,7 +567,7 @@ static noinline netmem_ref __page_pool_alloc_pages_slow(struct page_pool *pool, */ for (i = 0; i < nr_pages; i++) { netmem = pool->alloc.cache[i]; - if (dma_map && unlikely(!page_pool_dma_map(pool, netmem))) { + if (dma_map && unlikely(!page_pool_dma_map(pool, netmem, gfp))) { put_page(netmem_to_page(netmem)); continue; } @@ -649,6 +669,8 @@ void page_pool_clear_pp_info(netmem_ref netmem) static __always_inline void __page_pool_release_page_dma(struct page_pool *pool, netmem_ref netmem) { + struct page *old, *page = netmem_to_page(netmem); + unsigned long id; dma_addr_t dma; if (!pool->dma_map) @@ -657,6 +679,17 @@ static __always_inline void __page_pool_release_page_dma(struct page_pool *pool, */ return; + id = netmem_get_dma_index(netmem); + if (!id) + return; + + if (in_softirq()) + old = xa_cmpxchg(&pool->dma_mapped, id, page, NULL, 0); + else + old = xa_cmpxchg_bh(&pool->dma_mapped, id, page, NULL, 0); + if (old != page) + return; + dma = page_pool_get_dma_addr_netmem(netmem); /* When page is unmapped, it cannot be returned to our pool */ @@ -664,6 +697,7 @@ static __always_inline void __page_pool_release_page_dma(struct page_pool *pool, PAGE_SIZE << pool->p.order, pool->p.dma_dir, DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING); page_pool_set_dma_addr_netmem(netmem, 0); + netmem_set_dma_index(netmem, 0); } /* Disconnects a page (from a page_pool). API users can have a need @@ -700,19 +734,16 @@ void page_pool_return_page(struct page_pool *pool, netmem_ref netmem) static bool page_pool_recycle_in_ring(struct page_pool *pool, netmem_ref netmem) { - int ret; - /* BH protection not needed if current is softirq */ - if (in_softirq()) - ret = ptr_ring_produce(&pool->ring, (__force void *)netmem); - else - ret = ptr_ring_produce_bh(&pool->ring, (__force void *)netmem); + bool in_softirq, ret; - if (!ret) { + /* BH protection not needed if current is softirq */ + in_softirq = page_pool_producer_lock(pool); + ret = !__ptr_ring_produce(&pool->ring, (__force void *)netmem); + if (ret) recycle_stat_inc(pool, ring); - return true; - } + page_pool_producer_unlock(pool, in_softirq); - return false; + return ret; } /* Only allow direct recycling in special circumstances, into the @@ -798,6 +829,10 @@ static bool page_pool_napi_local(const struct page_pool *pool) const struct napi_struct *napi; u32 cpuid; + /* On PREEMPT_RT the softirq can be preempted by the consumer */ + if (IS_ENABLED(CONFIG_PREEMPT_RT)) + return false; + if (unlikely(!in_softirq())) return false; @@ -1038,8 +1073,29 @@ static void page_pool_empty_alloc_cache_once(struct page_pool *pool) static void page_pool_scrub(struct page_pool *pool) { + unsigned long id; + void *ptr; + page_pool_empty_alloc_cache_once(pool); - pool->destroy_cnt++; + if (!pool->destroy_cnt++ && pool->dma_map) { + if (pool->dma_sync) { + /* Disable page_pool_dma_sync_for_device() */ + pool->dma_sync = false; + + /* Make sure all concurrent returns that may see the old + * value of dma_sync (and thus perform a sync) have + * finished before doing the unmapping below. Skip the + * wait if the device doesn't actually need syncing, or + * if there are no outstanding mapped pages. + */ + if (dma_dev_need_sync(pool->p.dev) && + !xa_empty(&pool->dma_mapped)) + synchronize_net(); + } + + xa_for_each(&pool->dma_mapped, id, ptr) + __page_pool_release_page_dma(pool, page_to_netmem(ptr)); + } /* No more consumers should exist, but producers could still * be in-flight. @@ -1049,10 +1105,14 @@ static void page_pool_scrub(struct page_pool *pool) static int page_pool_release(struct page_pool *pool) { + bool in_softirq; int inflight; page_pool_scrub(pool); inflight = page_pool_inflight(pool, true); + /* Acquire producer lock to make sure producers have exited. */ + in_softirq = page_pool_producer_lock(pool); + page_pool_producer_unlock(pool, in_softirq); if (!inflight) __page_pool_destroy(pool); diff --git a/net/core/selftests.c b/net/core/selftests.c index 561653f9d71d4..ef27594d6a996 100644 --- a/net/core/selftests.c +++ b/net/core/selftests.c @@ -160,8 +160,9 @@ static struct sk_buff *net_test_get_skb(struct net_device *ndev, skb->csum = 0; skb->ip_summed = CHECKSUM_PARTIAL; if (attr->tcp) { - thdr->check = ~tcp_v4_check(skb->len, ihdr->saddr, - ihdr->daddr, 0); + int l4len = skb->len - skb_transport_offset(skb); + + thdr->check = ~tcp_v4_check(l4len, ihdr->saddr, ihdr->daddr, 0); skb->csum_start = skb_transport_header(skb) - skb->head; skb->csum_offset = offsetof(struct tcphdr, check); } else { diff --git a/net/core/skbuff.c b/net/core/skbuff.c index f220306731dac..cf54593149cce 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -925,11 +925,6 @@ static void skb_clone_fraglist(struct sk_buff *skb) skb_get(list); } -static bool is_pp_netmem(netmem_ref netmem) -{ - return (netmem_get_pp_magic(netmem) & ~0x3UL) == PP_SIGNATURE; -} - int skb_pp_cow_data(struct page_pool *pool, struct sk_buff **pskb, unsigned int headroom) { @@ -1027,14 +1022,7 @@ bool napi_pp_put_page(netmem_ref netmem) { netmem = netmem_compound_head(netmem); - /* page->pp_magic is OR'ed with PP_SIGNATURE after the allocation - * in order to preserve any existing bits, such as bit 0 for the - * head page of compound page and bit 1 for pfmemalloc page, so - * mask those bits for freeing side when doing below checking, - * and page_is_pfmemalloc() is checked in __page_pool_put_page() - * to avoid recycling the pfmemalloc page. - */ - if (unlikely(!is_pp_netmem(netmem))) + if (unlikely(!netmem_is_pp(netmem))) return false; page_pool_put_full_netmem(netmem_get_pp(netmem), netmem, false); @@ -1074,7 +1062,7 @@ static int skb_pp_frag_ref(struct sk_buff *skb) for (i = 0; i < shinfo->nr_frags; i++) { head_netmem = netmem_compound_head(shinfo->frags[i].netmem); - if (likely(is_pp_netmem(head_netmem))) + if (likely(netmem_is_pp(head_netmem))) page_pool_ref_netmem(head_netmem); else page_ref_inc(netmem_to_page(head_netmem)); @@ -6209,9 +6197,6 @@ int skb_ensure_writable(struct sk_buff *skb, unsigned int write_len) if (!pskb_may_pull(skb, write_len)) return -ENOMEM; - if (!skb_frags_readable(skb)) - return -EFAULT; - if (!skb_cloned(skb) || skb_clone_writable(skb, write_len)) return 0; diff --git a/net/core/skmsg.c b/net/core/skmsg.c index f76cbf49c68c8..97f52394d1eb1 100644 --- a/net/core/skmsg.c +++ b/net/core/skmsg.c @@ -529,16 +529,22 @@ static int sk_psock_skb_ingress_enqueue(struct sk_buff *skb, u32 off, u32 len, struct sk_psock *psock, struct sock *sk, - struct sk_msg *msg) + struct sk_msg *msg, + bool take_ref) { int num_sge, copied; + /* skb_to_sgvec will fail when the total number of fragments in + * frag_list and frags exceeds MAX_MSG_FRAGS. For example, the + * caller may aggregate multiple skbs. + */ num_sge = skb_to_sgvec(skb, msg->sg.data, off, len); if (num_sge < 0) { /* skb linearize may fail with ENOMEM, but lets simply try again * later if this happens. Under memory pressure we don't want to * drop the skb. We need to linearize the skb so that the mapping * in skb_to_sgvec can not error. + * Note that skb_linearize requires the skb not to be shared. */ if (skb_linearize(skb)) return -EAGAIN; @@ -555,7 +561,7 @@ static int sk_psock_skb_ingress_enqueue(struct sk_buff *skb, msg->sg.start = 0; msg->sg.size = copied; msg->sg.end = num_sge; - msg->skb = skb; + msg->skb = take_ref ? skb_get(skb) : skb; sk_psock_queue_msg(psock, msg); sk_psock_data_ready(sk, psock); @@ -563,7 +569,7 @@ static int sk_psock_skb_ingress_enqueue(struct sk_buff *skb, } static int sk_psock_skb_ingress_self(struct sk_psock *psock, struct sk_buff *skb, - u32 off, u32 len); + u32 off, u32 len, bool take_ref); static int sk_psock_skb_ingress(struct sk_psock *psock, struct sk_buff *skb, u32 off, u32 len) @@ -577,7 +583,7 @@ static int sk_psock_skb_ingress(struct sk_psock *psock, struct sk_buff *skb, * correctly. */ if (unlikely(skb->sk == sk)) - return sk_psock_skb_ingress_self(psock, skb, off, len); + return sk_psock_skb_ingress_self(psock, skb, off, len, true); msg = sk_psock_create_ingress_msg(sk, skb); if (!msg) return -EAGAIN; @@ -589,7 +595,7 @@ static int sk_psock_skb_ingress(struct sk_psock *psock, struct sk_buff *skb, * into user buffers. */ skb_set_owner_r(skb, sk); - err = sk_psock_skb_ingress_enqueue(skb, off, len, psock, sk, msg); + err = sk_psock_skb_ingress_enqueue(skb, off, len, psock, sk, msg, true); if (err < 0) kfree(msg); return err; @@ -600,7 +606,7 @@ static int sk_psock_skb_ingress(struct sk_psock *psock, struct sk_buff *skb, * because the skb is already accounted for here. */ static int sk_psock_skb_ingress_self(struct sk_psock *psock, struct sk_buff *skb, - u32 off, u32 len) + u32 off, u32 len, bool take_ref) { struct sk_msg *msg = alloc_sk_msg(GFP_ATOMIC); struct sock *sk = psock->sk; @@ -609,7 +615,7 @@ static int sk_psock_skb_ingress_self(struct sk_psock *psock, struct sk_buff *skb if (unlikely(!msg)) return -EAGAIN; skb_set_owner_r(skb, sk); - err = sk_psock_skb_ingress_enqueue(skb, off, len, psock, sk, msg); + err = sk_psock_skb_ingress_enqueue(skb, off, len, psock, sk, msg, take_ref); if (err < 0) kfree(msg); return err; @@ -618,18 +624,13 @@ static int sk_psock_skb_ingress_self(struct sk_psock *psock, struct sk_buff *skb static int sk_psock_handle_skb(struct sk_psock *psock, struct sk_buff *skb, u32 off, u32 len, bool ingress) { - int err = 0; - if (!ingress) { if (!sock_writeable(psock->sk)) return -EAGAIN; return skb_send_sock(psock->sk, skb, off, len); } - skb_get(skb); - err = sk_psock_skb_ingress(psock, skb, off, len); - if (err < 0) - kfree_skb(skb); - return err; + + return sk_psock_skb_ingress(psock, skb, off, len); } static void sk_psock_skb_state(struct sk_psock *psock, @@ -654,12 +655,14 @@ static void sk_psock_backlog(struct work_struct *work) bool ingress; int ret; + /* Increment the psock refcnt to synchronize with close(fd) path in + * sock_map_close(), ensuring we wait for backlog thread completion + * before sk_socket freed. If refcnt increment fails, it indicates + * sock_map_close() completed with sk_socket potentially already freed. + */ + if (!sk_psock_get(psock->sk)) + return; mutex_lock(&psock->work_mutex); - if (unlikely(state->len)) { - len = state->len; - off = state->off; - } - while ((skb = skb_peek(&psock->ingress_skb))) { len = skb->len; off = 0; @@ -669,6 +672,13 @@ static void sk_psock_backlog(struct work_struct *work) off = stm->offset; len = stm->full_len; } + + /* Resume processing from previous partial state */ + if (unlikely(state->len)) { + len = state->len; + off = state->off; + } + ingress = skb_bpf_ingress(skb); skb_bpf_redirect_clear(skb); do { @@ -679,7 +689,8 @@ static void sk_psock_backlog(struct work_struct *work) if (ret <= 0) { if (ret == -EAGAIN) { sk_psock_skb_state(psock, state, len, off); - + /* Restore redir info we cleared before */ + skb_bpf_set_redir(skb, psock->sk, ingress); /* Delay slightly to prioritize any * other work that might be here. */ @@ -696,11 +707,14 @@ static void sk_psock_backlog(struct work_struct *work) len -= ret; } while (len); + /* The entire skb sent, clear state */ + sk_psock_skb_state(psock, state, 0, 0); skb = skb_dequeue(&psock->ingress_skb); kfree_skb(skb); } end: mutex_unlock(&psock->work_mutex); + sk_psock_put(psock->sk, psock); } struct sk_psock *sk_psock_init(struct sock *sk, int node) @@ -1013,7 +1027,7 @@ static int sk_psock_verdict_apply(struct sk_psock *psock, struct sk_buff *skb, off = stm->offset; len = stm->full_len; } - err = sk_psock_skb_ingress_self(psock, skb, off, len); + err = sk_psock_skb_ingress_self(psock, skb, off, len, false); } if (err < 0) { spin_lock_bh(&psock->ingress_lock); diff --git a/net/core/sock.c b/net/core/sock.c index 0842dc9189bf8..9c63da2829f6e 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -3157,16 +3157,16 @@ int __sk_mem_raise_allocated(struct sock *sk, int size, int amt, int kind) { struct mem_cgroup *memcg = mem_cgroup_sockets_enabled ? sk->sk_memcg : NULL; struct proto *prot = sk->sk_prot; - bool charged = false; + bool charged = true; long allocated; sk_memory_allocated_add(sk, amt); allocated = sk_memory_allocated(sk); if (memcg) { - if (!mem_cgroup_charge_skmem(memcg, amt, gfp_memcg_charge())) + charged = mem_cgroup_charge_skmem(memcg, amt, gfp_memcg_charge()); + if (!charged) goto suppress_allocation; - charged = true; } /* Under limit. */ @@ -3251,7 +3251,7 @@ int __sk_mem_raise_allocated(struct sock *sk, int size, int amt, int kind) sk_memory_allocated_sub(sk, amt); - if (charged) + if (memcg && charged) mem_cgroup_uncharge_skmem(memcg, amt); return 0; @@ -3930,7 +3930,7 @@ static int assign_proto_idx(struct proto *prot) { prot->inuse_idx = find_first_zero_bit(proto_inuse_idx, PROTO_INUSE_NR); - if (unlikely(prot->inuse_idx == PROTO_INUSE_NR - 1)) { + if (unlikely(prot->inuse_idx == PROTO_INUSE_NR)) { pr_err("PROTO_INUSE_NR exhausted\n"); return -ENOSPC; } @@ -3941,7 +3941,7 @@ static int assign_proto_idx(struct proto *prot) static void release_proto_idx(struct proto *prot) { - if (prot->inuse_idx != PROTO_INUSE_NR - 1) + if (prot->inuse_idx != PROTO_INUSE_NR) clear_bit(prot->inuse_idx, proto_inuse_idx); } #else diff --git a/net/core/utils.c b/net/core/utils.c index 27f4cffaae05d..b8c21a859e27b 100644 --- a/net/core/utils.c +++ b/net/core/utils.c @@ -473,11 +473,11 @@ void inet_proto_csum_replace16(__sum16 *sum, struct sk_buff *skb, EXPORT_SYMBOL(inet_proto_csum_replace16); void inet_proto_csum_replace_by_diff(__sum16 *sum, struct sk_buff *skb, - __wsum diff, bool pseudohdr) + __wsum diff, bool pseudohdr, bool ipv6) { if (skb->ip_summed != CHECKSUM_PARTIAL) { csum_replace_by_diff(sum, diff); - if (skb->ip_summed == CHECKSUM_COMPLETE && pseudohdr) + if (skb->ip_summed == CHECKSUM_COMPLETE && pseudohdr && !ipv6) skb->csum = ~csum_sub(diff, skb->csum); } else if (pseudohdr) { *sum = ~csum_fold(csum_add(diff, csum_unfold(*sum))); diff --git a/net/core/xdp.c b/net/core/xdp.c index bcc5551c6424b..23e7d736718b0 100644 --- a/net/core/xdp.c +++ b/net/core/xdp.c @@ -381,8 +381,8 @@ void __xdp_return(void *data, struct xdp_mem_info *mem, bool napi_direct, page = virt_to_head_page(data); if (napi_direct && xdp_return_frame_no_direct()) napi_direct = false; - /* No need to check ((page->pp_magic & ~0x3UL) == PP_SIGNATURE) - * as mem->type knows this a page_pool page + /* No need to check netmem_is_pp() as mem->type knows this a + * page_pool page */ page_pool_put_full_page(page->pp, page, napi_direct); break; diff --git a/net/dsa/tag_brcm.c b/net/dsa/tag_brcm.c index 8c3c068728e51..fe75821623a4f 100644 --- a/net/dsa/tag_brcm.c +++ b/net/dsa/tag_brcm.c @@ -257,7 +257,7 @@ static struct sk_buff *brcm_leg_tag_rcv(struct sk_buff *skb, int source_port; u8 *brcm_tag; - if (unlikely(!pskb_may_pull(skb, BRCM_LEG_PORT_ID))) + if (unlikely(!pskb_may_pull(skb, BRCM_LEG_TAG_LEN + VLAN_HLEN))) return NULL; brcm_tag = dsa_etype_header_pos_rx(skb); diff --git a/net/ipv4/netfilter/nft_fib_ipv4.c b/net/ipv4/netfilter/nft_fib_ipv4.c index d25d717c121f0..f514eb52b8d4b 100644 --- a/net/ipv4/netfilter/nft_fib_ipv4.c +++ b/net/ipv4/netfilter/nft_fib_ipv4.c @@ -49,7 +49,12 @@ void nft_fib4_eval_type(const struct nft_expr *expr, struct nft_regs *regs, else addr = iph->saddr; - *dst = inet_dev_addr_type(nft_net(pkt), dev, addr); + if (priv->flags & (NFTA_FIB_F_IIF | NFTA_FIB_F_OIF)) { + *dst = inet_dev_addr_type(nft_net(pkt), dev, addr); + return; + } + + *dst = inet_addr_type_dev_table(nft_net(pkt), pkt->skb->dev, addr); } EXPORT_SYMBOL_GPL(nft_fib4_eval_type); @@ -64,8 +69,8 @@ void nft_fib4_eval(const struct nft_expr *expr, struct nft_regs *regs, struct flowi4 fl4 = { .flowi4_scope = RT_SCOPE_UNIVERSE, .flowi4_iif = LOOPBACK_IFINDEX, + .flowi4_proto = pkt->tprot, .flowi4_uid = sock_net_uid(nft_net(pkt), NULL), - .flowi4_l3mdev = l3mdev_master_ifindex_rcu(nft_in(pkt)), }; const struct net_device *oif; const struct net_device *found; @@ -89,6 +94,8 @@ void nft_fib4_eval(const struct nft_expr *expr, struct nft_regs *regs, else oif = NULL; + fl4.flowi4_l3mdev = nft_fib_l3mdev_master_ifindex_rcu(pkt, oif); + iph = skb_header_pointer(pkt->skb, noff, sizeof(_iph), &_iph); if (!iph) { regs->verdict.code = NFT_BREAK; diff --git a/net/ipv4/route.c b/net/ipv4/route.c index 41b320f0c20eb..88d7c96bfac06 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -189,7 +189,11 @@ const __u8 ip_tos2prio[16] = { EXPORT_SYMBOL(ip_tos2prio); static DEFINE_PER_CPU(struct rt_cache_stat, rt_cache_stat); +#ifndef CONFIG_PREEMPT_RT #define RT_CACHE_STAT_INC(field) raw_cpu_inc(rt_cache_stat.field) +#else +#define RT_CACHE_STAT_INC(field) this_cpu_inc(rt_cache_stat.field) +#endif #ifdef CONFIG_PROC_FS static void *rt_cache_seq_start(struct seq_file *seq, loff_t *pos) diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index b731a4a8f2b0d..156da81bce068 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -1145,7 +1145,7 @@ int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size) goto do_error; while (msg_data_left(msg)) { - ssize_t copy = 0; + int copy = 0; skb = tcp_write_queue_tail(sk); if (skb) diff --git a/net/ipv4/tcp_fastopen.c b/net/ipv4/tcp_fastopen.c index 32b28fc21b63c..408985eb74eef 100644 --- a/net/ipv4/tcp_fastopen.c +++ b/net/ipv4/tcp_fastopen.c @@ -3,6 +3,7 @@ #include #include #include +#include void tcp_fastopen_init_key_once(struct net *net) { @@ -279,6 +280,8 @@ static struct sock *tcp_fastopen_create_child(struct sock *sk, refcount_set(&req->rsk_refcnt, 2); + sk_mark_napi_id_set(child, skb); + /* Now finish processing the fastopen child socket. */ tcp_init_transfer(child, BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB, skb); diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index d29219e067b7f..d176e7888a203 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -665,10 +665,12 @@ EXPORT_SYMBOL(tcp_initialize_rcv_mss); */ static void tcp_rcv_rtt_update(struct tcp_sock *tp, u32 sample, int win_dep) { - u32 new_sample = tp->rcv_rtt_est.rtt_us; - long m = sample; + u32 new_sample, old_sample = tp->rcv_rtt_est.rtt_us; + long m = sample << 3; - if (new_sample != 0) { + if (old_sample == 0 || m < old_sample) { + new_sample = m; + } else { /* If we sample in larger samples in the non-timestamp * case, we could grossly overestimate the RTT especially * with chatty applications or bulk transfer apps which @@ -679,17 +681,9 @@ static void tcp_rcv_rtt_update(struct tcp_sock *tp, u32 sample, int win_dep) * else with timestamps disabled convergence takes too * long. */ - if (!win_dep) { - m -= (new_sample >> 3); - new_sample += m; - } else { - m <<= 3; - if (m < new_sample) - new_sample = m; - } - } else { - /* No previous measure. */ - new_sample = m << 3; + if (win_dep) + return; + new_sample = old_sample - (old_sample >> 3) + sample; } tp->rcv_rtt_est.rtt_us = new_sample; @@ -713,7 +707,7 @@ static inline void tcp_rcv_rtt_measure(struct tcp_sock *tp) tp->rcv_rtt_est.time = tp->tcp_mstamp; } -static s32 tcp_rtt_tsopt_us(const struct tcp_sock *tp) +static s32 tcp_rtt_tsopt_us(const struct tcp_sock *tp, u32 min_delta) { u32 delta, delta_us; @@ -723,7 +717,7 @@ static s32 tcp_rtt_tsopt_us(const struct tcp_sock *tp) if (likely(delta < INT_MAX / (USEC_PER_SEC / TCP_TS_HZ))) { if (!delta) - delta = 1; + delta = min_delta; delta_us = delta * (USEC_PER_SEC / TCP_TS_HZ); return delta_us; } @@ -741,9 +735,9 @@ static inline void tcp_rcv_rtt_measure_ts(struct sock *sk, if (TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq >= inet_csk(sk)->icsk_ack.rcv_mss) { - s32 delta = tcp_rtt_tsopt_us(tp); + s32 delta = tcp_rtt_tsopt_us(tp, 0); - if (delta >= 0) + if (delta > 0) tcp_rcv_rtt_update(tp, delta, 0); } } @@ -755,8 +749,7 @@ static inline void tcp_rcv_rtt_measure_ts(struct sock *sk, void tcp_rcv_space_adjust(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); - u32 copied; - int time; + int time, inq, copied; trace_tcp_rcv_space_adjust(sk); @@ -767,6 +760,9 @@ void tcp_rcv_space_adjust(struct sock *sk) /* Number of bytes copied to user in last RTT */ copied = tp->copied_seq - tp->rcvq_space.seq; + /* Number of bytes in receive queue. */ + inq = tp->rcv_nxt - tp->copied_seq; + copied -= inq; if (copied <= tp->rcvq_space.space) goto new_measure; @@ -2486,20 +2482,33 @@ static inline bool tcp_packet_delayed(const struct tcp_sock *tp) { const struct sock *sk = (const struct sock *)tp; - if (tp->retrans_stamp && - tcp_tsopt_ecr_before(tp, tp->retrans_stamp)) - return true; /* got echoed TS before first retransmission */ + /* Received an echoed timestamp before the first retransmission? */ + if (tp->retrans_stamp) + return tcp_tsopt_ecr_before(tp, tp->retrans_stamp); + + /* We set tp->retrans_stamp upon the first retransmission of a loss + * recovery episode, so normally if tp->retrans_stamp is 0 then no + * retransmission has happened yet (likely due to TSQ, which can cause + * fast retransmits to be delayed). So if snd_una advanced while + * (tp->retrans_stamp is 0 then apparently a packet was merely delayed, + * not lost. But there are exceptions where we retransmit but then + * clear tp->retrans_stamp, so we check for those exceptions. + */ - /* Check if nothing was retransmitted (retrans_stamp==0), which may - * happen in fast recovery due to TSQ. But we ignore zero retrans_stamp - * in TCP_SYN_SENT, since when we set FLAG_SYN_ACKED we also clear - * retrans_stamp even if we had retransmitted the SYN. + /* (1) For non-SACK connections, tcp_is_non_sack_preventing_reopen() + * clears tp->retrans_stamp when snd_una == high_seq. */ - if (!tp->retrans_stamp && /* no record of a retransmit/SYN? */ - sk->sk_state != TCP_SYN_SENT) /* not the FLAG_SYN_ACKED case? */ - return true; /* nothing was retransmitted */ + if (!tcp_is_sack(tp) && !before(tp->snd_una, tp->high_seq)) + return false; - return false; + /* (2) In TCP_SYN_SENT tcp_clean_rtx_queue() clears tp->retrans_stamp + * when setting FLAG_SYN_ACKED is set, even if the SYN was + * retransmitted. + */ + if (sk->sk_state == TCP_SYN_SENT) + return false; + + return true; /* tp->retrans_stamp is zero; no retransmit yet */ } /* Undo procedures. */ @@ -3226,7 +3235,7 @@ static bool tcp_ack_update_rtt(struct sock *sk, const int flag, */ if (seq_rtt_us < 0 && tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr && flag & FLAG_ACKED) - seq_rtt_us = ca_rtt_us = tcp_rtt_tsopt_us(tp); + seq_rtt_us = ca_rtt_us = tcp_rtt_tsopt_us(tp, 1); rs->rtt_us = ca_rtt_us; /* RTT of last (S)ACKed packet (or -1) */ if (seq_rtt_us < 0) @@ -6841,6 +6850,9 @@ tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb) if (!tp->srtt_us) tcp_synack_rtt_meas(sk, req); + if (tp->rx_opt.tstamp_ok) + tp->advmss -= TCPOLEN_TSTAMP_ALIGNED; + if (req) { tcp_rcv_synrecv_state_fastopen(sk); } else { @@ -6866,9 +6878,6 @@ tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb) tp->snd_wnd = ntohs(th->window) << tp->rx_opt.snd_wscale; tcp_init_wl(tp, TCP_SKB_CB(skb)->seq); - if (tp->rx_opt.tstamp_ok) - tp->advmss -= TCPOLEN_TSTAMP_ALIGNED; - if (!inet_csk(sk)->icsk_ca_ops->cong_control) tcp_update_pacing_rate(sk); diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c index da5d4aea1b591..845730184c5d3 100644 --- a/net/ipv4/udp_offload.c +++ b/net/ipv4/udp_offload.c @@ -332,6 +332,7 @@ struct sk_buff *__udp_gso_segment(struct sk_buff *gso_skb, bool copy_dtor; __sum16 check; __be16 newlen; + int ret = 0; mss = skb_shinfo(gso_skb)->gso_size; if (gso_skb->len <= sizeof(*uh) + mss) @@ -360,6 +361,10 @@ struct sk_buff *__udp_gso_segment(struct sk_buff *gso_skb, if (skb_pagelen(gso_skb) - sizeof(*uh) == skb_shinfo(gso_skb)->gso_size) return __udp_gso_segment_list(gso_skb, features, is_ipv6); + ret = __skb_linearize(gso_skb); + if (ret) + return ERR_PTR(ret); + /* Setup csum, as fraglist skips this in udp4_gro_receive. */ gso_skb->csum_start = skb_transport_header(gso_skb) - gso_skb->head; gso_skb->csum_offset = offsetof(struct udphdr, check); diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 16ba3bb12fc4b..be51b8792b96f 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -3548,11 +3548,9 @@ static void addrconf_gre_config(struct net_device *dev) ASSERT_RTNL(); - idev = ipv6_find_idev(dev); - if (IS_ERR(idev)) { - pr_debug("%s: add_dev failed\n", __func__); + idev = addrconf_add_dev(dev); + if (IS_ERR(idev)) return; - } /* Generate the IPv6 link-local address using addrconf_addr_gen(), * unless we have an IPv4 GRE device not bound to an IP address and @@ -3566,9 +3564,6 @@ static void addrconf_gre_config(struct net_device *dev) } add_v4_addrs(idev); - - if (dev->flags & IFF_POINTOPOINT) - addrconf_add_mroute(dev); } #endif diff --git a/net/ipv6/calipso.c b/net/ipv6/calipso.c index 62618a058b8fa..a247bb93908bf 100644 --- a/net/ipv6/calipso.c +++ b/net/ipv6/calipso.c @@ -1207,6 +1207,10 @@ static int calipso_req_setattr(struct request_sock *req, struct ipv6_opt_hdr *old, *new; struct sock *sk = sk_to_full_sk(req_to_sk(req)); + /* sk is NULL for SYN+ACK w/ SYN Cookie */ + if (!sk) + return -ENOMEM; + if (req_inet->ipv6_opt && req_inet->ipv6_opt->hopopt) old = req_inet->ipv6_opt->hopopt; else @@ -1247,6 +1251,10 @@ static void calipso_req_delattr(struct request_sock *req) struct ipv6_txoptions *txopts; struct sock *sk = sk_to_full_sk(req_to_sk(req)); + /* sk is NULL for SYN+ACK w/ SYN Cookie */ + if (!sk) + return; + if (!req_inet->ipv6_opt || !req_inet->ipv6_opt->hopopt) return; diff --git a/net/ipv6/ila/ila_common.c b/net/ipv6/ila/ila_common.c index 95e9146918cc6..b8d43ed4689db 100644 --- a/net/ipv6/ila/ila_common.c +++ b/net/ipv6/ila/ila_common.c @@ -86,7 +86,7 @@ static void ila_csum_adjust_transport(struct sk_buff *skb, diff = get_csum_diff(ip6h, p); inet_proto_csum_replace_by_diff(&th->check, skb, - diff, true); + diff, true, true); } break; case NEXTHDR_UDP: @@ -97,7 +97,7 @@ static void ila_csum_adjust_transport(struct sk_buff *skb, if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) { diff = get_csum_diff(ip6h, p); inet_proto_csum_replace_by_diff(&uh->check, skb, - diff, true); + diff, true, true); if (!uh->check) uh->check = CSUM_MANGLED_0; } @@ -111,7 +111,7 @@ static void ila_csum_adjust_transport(struct sk_buff *skb, diff = get_csum_diff(ip6h, p); inet_proto_csum_replace_by_diff(&ih->icmp6_cksum, skb, - diff, true); + diff, true, true); } break; } diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index 89a61e040e6a1..f0e5431c2d46f 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c @@ -2043,8 +2043,6 @@ struct sk_buff *ip6_make_skb(struct sock *sk, ip6_cork_release(cork, &v6_cork); return ERR_PTR(err); } - if (ipc6->dontfrag < 0) - ipc6->dontfrag = inet6_test_bit(DONTFRAG, sk); err = __ip6_append_data(sk, &queue, cork, &v6_cork, ¤t->task_frag, getfrag, from, diff --git a/net/ipv6/netfilter.c b/net/ipv6/netfilter.c index 581ce055bf520..4541836ee3da2 100644 --- a/net/ipv6/netfilter.c +++ b/net/ipv6/netfilter.c @@ -164,20 +164,20 @@ int br_ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb, struct ip6_fraglist_iter iter; struct sk_buff *frag2; - if (first_len - hlen > mtu || - skb_headroom(skb) < (hroom + sizeof(struct frag_hdr))) + if (first_len - hlen > mtu) goto blackhole; - if (skb_cloned(skb)) + if (skb_cloned(skb) || + skb_headroom(skb) < (hroom + sizeof(struct frag_hdr))) goto slow_path; skb_walk_frags(skb, frag2) { - if (frag2->len > mtu || - skb_headroom(frag2) < (hlen + hroom + sizeof(struct frag_hdr))) + if (frag2->len > mtu) goto blackhole; /* Partially cloned skb? */ - if (skb_shared(frag2)) + if (skb_shared(frag2) || + skb_headroom(frag2) < (hlen + hroom + sizeof(struct frag_hdr))) goto slow_path; } diff --git a/net/ipv6/netfilter/nft_fib_ipv6.c b/net/ipv6/netfilter/nft_fib_ipv6.c index 7fd9d7b21cd42..421036a3605b4 100644 --- a/net/ipv6/netfilter/nft_fib_ipv6.c +++ b/net/ipv6/netfilter/nft_fib_ipv6.c @@ -50,6 +50,7 @@ static int nft_fib6_flowi_init(struct flowi6 *fl6, const struct nft_fib *priv, fl6->flowi6_mark = pkt->skb->mark; fl6->flowlabel = (*(__be32 *)iph) & IPV6_FLOWINFO_MASK; + fl6->flowi6_l3mdev = nft_fib_l3mdev_master_ifindex_rcu(pkt, dev); return lookup_flags; } @@ -73,8 +74,6 @@ static u32 __nft_fib6_eval_type(const struct nft_fib *priv, else if (priv->flags & NFTA_FIB_F_OIF) dev = nft_out(pkt); - fl6.flowi6_l3mdev = l3mdev_master_ifindex_rcu(dev); - nft_fib6_flowi_init(&fl6, priv, pkt, dev, iph); if (dev && nf_ipv6_chk_addr(nft_net(pkt), &fl6.daddr, dev, true)) @@ -158,6 +157,7 @@ void nft_fib6_eval(const struct nft_expr *expr, struct nft_regs *regs, { const struct nft_fib *priv = nft_expr_priv(expr); int noff = skb_network_offset(pkt->skb); + const struct net_device *found = NULL; const struct net_device *oif = NULL; u32 *dest = ®s->data[priv->dreg]; struct ipv6hdr *iph, _iph; @@ -165,7 +165,6 @@ void nft_fib6_eval(const struct nft_expr *expr, struct nft_regs *regs, .flowi6_iif = LOOPBACK_IFINDEX, .flowi6_proto = pkt->tprot, .flowi6_uid = sock_net_uid(nft_net(pkt), NULL), - .flowi6_l3mdev = l3mdev_master_ifindex_rcu(nft_in(pkt)), }; struct rt6_info *rt; int lookup_flags; @@ -203,11 +202,15 @@ void nft_fib6_eval(const struct nft_expr *expr, struct nft_regs *regs, if (rt->rt6i_flags & (RTF_REJECT | RTF_ANYCAST | RTF_LOCAL)) goto put_rt_err; - if (oif && oif != rt->rt6i_idev->dev && - l3mdev_master_ifindex_rcu(rt->rt6i_idev->dev) != oif->ifindex) - goto put_rt_err; + if (!oif) { + found = rt->rt6i_idev->dev; + } else { + if (oif == rt->rt6i_idev->dev || + l3mdev_master_ifindex_rcu(rt->rt6i_idev->dev) == oif->ifindex) + found = oif; + } - nft_fib_store_result(dest, priv, rt->rt6i_idev->dev); + nft_fib_store_result(dest, priv, found); put_rt_err: ip6_rt_put(rt); } diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c index 608fa9d05b55b..328419e05c815 100644 --- a/net/ipv6/raw.c +++ b/net/ipv6/raw.c @@ -777,7 +777,7 @@ static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) fl6.flowi6_mark = READ_ONCE(sk->sk_mark); fl6.flowi6_uid = sk->sk_uid; - ipcm6_init(&ipc6); + ipcm6_init_sk(&ipc6, sk); ipc6.sockc.tsflags = READ_ONCE(sk->sk_tsflags); ipc6.sockc.mark = fl6.flowi6_mark; @@ -890,9 +890,6 @@ static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) if (hdrincl) fl6.flowi6_flags |= FLOWI_FLAG_KNOWN_NH; - if (ipc6.tclass < 0) - ipc6.tclass = np->tclass; - fl6.flowlabel = ip6_make_flowinfo(ipc6.tclass, fl6.flowlabel); dst = ip6_dst_lookup_flow(sock_net(sk), sk, &fl6, final_p); @@ -903,9 +900,6 @@ static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) if (ipc6.hlimit < 0) ipc6.hlimit = ip6_sk_dst_hoplimit(np, &fl6, dst); - if (ipc6.dontfrag < 0) - ipc6.dontfrag = inet6_test_bit(DONTFRAG, sk); - if (msg->msg_flags&MSG_CONFIRM) goto do_confirm; diff --git a/net/ipv6/seg6_local.c b/net/ipv6/seg6_local.c index c74705ead9849..e445a0a45568d 100644 --- a/net/ipv6/seg6_local.c +++ b/net/ipv6/seg6_local.c @@ -1644,10 +1644,8 @@ static const struct nla_policy seg6_local_policy[SEG6_LOCAL_MAX + 1] = { [SEG6_LOCAL_SRH] = { .type = NLA_BINARY }, [SEG6_LOCAL_TABLE] = { .type = NLA_U32 }, [SEG6_LOCAL_VRFTABLE] = { .type = NLA_U32 }, - [SEG6_LOCAL_NH4] = { .type = NLA_BINARY, - .len = sizeof(struct in_addr) }, - [SEG6_LOCAL_NH6] = { .type = NLA_BINARY, - .len = sizeof(struct in6_addr) }, + [SEG6_LOCAL_NH4] = NLA_POLICY_EXACT_LEN(sizeof(struct in_addr)), + [SEG6_LOCAL_NH6] = NLA_POLICY_EXACT_LEN(sizeof(struct in6_addr)), [SEG6_LOCAL_IIF] = { .type = NLA_U32 }, [SEG6_LOCAL_OIF] = { .type = NLA_U32 }, [SEG6_LOCAL_BPF] = { .type = NLA_NESTED }, diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index 197d0ac47592a..57e38e5e4be92 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@ -1399,7 +1399,7 @@ int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) int is_udplite = IS_UDPLITE(sk); int (*getfrag)(void *, char *, int, int, int, struct sk_buff *); - ipcm6_init(&ipc6); + ipcm6_init_sk(&ipc6, sk); ipc6.gso_size = READ_ONCE(up->gso_size); ipc6.sockc.tsflags = READ_ONCE(sk->sk_tsflags); ipc6.sockc.mark = READ_ONCE(sk->sk_mark); @@ -1608,9 +1608,6 @@ int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) security_sk_classify_flow(sk, flowi6_to_flowi_common(fl6)); - if (ipc6.tclass < 0) - ipc6.tclass = np->tclass; - fl6->flowlabel = ip6_make_flowinfo(ipc6.tclass, fl6->flowlabel); dst = ip6_sk_dst_lookup_flow(sk, fl6, final_p, connected); @@ -1656,8 +1653,6 @@ int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) WRITE_ONCE(up->pending, AF_INET6); do_append_data: - if (ipc6.dontfrag < 0) - ipc6.dontfrag = inet6_test_bit(DONTFRAG, sk); up->len += ulen; err = ip6_append_data(sk, getfrag, msg, ulen, sizeof(struct udphdr), &ipc6, fl6, dst_rt6_info(dst), diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c index f4c1da0708269..b98d13584c81f 100644 --- a/net/l2tp/l2tp_ip6.c +++ b/net/l2tp/l2tp_ip6.c @@ -547,7 +547,7 @@ static int l2tp_ip6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) fl6.flowi6_mark = READ_ONCE(sk->sk_mark); fl6.flowi6_uid = sk->sk_uid; - ipcm6_init(&ipc6); + ipcm6_init_sk(&ipc6, sk); if (lsa) { if (addr_len < SIN6_LEN_RFC2133) @@ -634,9 +634,6 @@ static int l2tp_ip6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) security_sk_classify_flow(sk, flowi6_to_flowi_common(&fl6)); - if (ipc6.tclass < 0) - ipc6.tclass = np->tclass; - fl6.flowlabel = ip6_make_flowinfo(ipc6.tclass, fl6.flowlabel); dst = ip6_dst_lookup_flow(sock_net(sk), sk, &fl6, final_p); @@ -648,9 +645,6 @@ static int l2tp_ip6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) if (ipc6.hlimit < 0) ipc6.hlimit = ip6_sk_dst_hoplimit(np, &fl6, dst); - if (ipc6.dontfrag < 0) - ipc6.dontfrag = inet6_test_bit(DONTFRAG, sk); - if (msg->msg_flags & MSG_CONFIRM) goto do_confirm; diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index f11fd360b422d..cf2b8a05c3389 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c @@ -2876,7 +2876,7 @@ static int ieee80211_scan(struct wiphy *wiphy, * the frames sent while scanning on other channel will be * lost) */ - if (sdata->deflink.u.ap.beacon && + if (ieee80211_num_beaconing_links(sdata) && (!(wiphy->features & NL80211_FEATURE_AP_SCAN) || !(req->flags & NL80211_SCAN_FLAG_AP))) return -EOPNOTSUPP; diff --git a/net/mac80211/chan.c b/net/mac80211/chan.c index cca6d14084d21..282e8c13e2bfc 100644 --- a/net/mac80211/chan.c +++ b/net/mac80211/chan.c @@ -2097,6 +2097,9 @@ void ieee80211_link_release_channel(struct ieee80211_link_data *link) { struct ieee80211_sub_if_data *sdata = link->sdata; + if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) + return; + lockdep_assert_wiphy(sdata->local->hw.wiphy); if (rcu_access_pointer(link->conf->chanctx_conf)) diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c index 1e9389c49a57d..e6f937cfedcf6 100644 --- a/net/mac80211/debugfs_sta.c +++ b/net/mac80211/debugfs_sta.c @@ -152,12 +152,6 @@ static ssize_t sta_aqm_read(struct file *file, char __user *userbuf, spin_lock_bh(&local->fq.lock); rcu_read_lock(); - p += scnprintf(p, - bufsz + buf - p, - "target %uus interval %uus ecn %s\n", - codel_time_to_us(sta->cparams.target), - codel_time_to_us(sta->cparams.interval), - sta->cparams.ecn ? "yes" : "no"); p += scnprintf(p, bufsz + buf - p, "tid ac backlog-bytes backlog-packets new-flows drops marks overlimit collisions tx-bytes tx-packets flags\n"); diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index bfe0514efca37..2f017dbbcb975 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@ -1209,6 +1209,15 @@ struct ieee80211_sub_if_data *vif_to_sdata(struct ieee80211_vif *p) if ((_link = wiphy_dereference((local)->hw.wiphy, \ ___sdata->link[___link_id]))) +#define for_each_link_data(sdata, __link) \ + struct ieee80211_sub_if_data *__sdata = sdata; \ + for (int __link_id = 0; \ + __link_id < ARRAY_SIZE((__sdata)->link); __link_id++) \ + if ((!(__sdata)->vif.valid_links || \ + (__sdata)->vif.valid_links & BIT(__link_id)) && \ + ((__link) = sdata_dereference((__sdata)->link[__link_id], \ + (__sdata)))) + static inline int ieee80211_get_mbssid_beacon_len(struct cfg80211_mbssid_elems *elems, struct cfg80211_rnr_elems *rnr_elems, @@ -2061,6 +2070,9 @@ static inline void ieee80211_vif_clear_links(struct ieee80211_sub_if_data *sdata ieee80211_vif_set_links(sdata, 0, 0); } +void ieee80211_apvlan_link_setup(struct ieee80211_sub_if_data *sdata); +void ieee80211_apvlan_link_clear(struct ieee80211_sub_if_data *sdata); + /* tx handling */ void ieee80211_clear_tx_pending(struct ieee80211_local *local); void ieee80211_tx_pending(struct tasklet_struct *t); diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c index 7e1e561ef76c1..209d6ffa8e426 100644 --- a/net/mac80211/iface.c +++ b/net/mac80211/iface.c @@ -494,6 +494,9 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata, bool going_do break; list_del_rcu(&sdata->u.mntr.list); break; + case NL80211_IFTYPE_AP_VLAN: + ieee80211_apvlan_link_clear(sdata); + break; default: break; } @@ -1268,6 +1271,8 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up) sdata->crypto_tx_tailroom_needed_cnt += master->crypto_tx_tailroom_needed_cnt; + ieee80211_apvlan_link_setup(sdata); + break; } case NL80211_IFTYPE_AP: @@ -1322,7 +1327,12 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up) case NL80211_IFTYPE_AP_VLAN: /* no need to tell driver, but set carrier and chanctx */ if (sdata->bss->active) { - ieee80211_link_vlan_copy_chanctx(&sdata->deflink); + struct ieee80211_link_data *link; + + for_each_link_data(sdata, link) { + ieee80211_link_vlan_copy_chanctx(link); + } + netif_carrier_on(dev); ieee80211_set_vif_encap_ops(sdata); } else { diff --git a/net/mac80211/link.c b/net/mac80211/link.c index 46092fbcde90e..9484449d6a347 100644 --- a/net/mac80211/link.c +++ b/net/mac80211/link.c @@ -12,6 +12,71 @@ #include "key.h" #include "debugfs_netdev.h" +static void ieee80211_update_apvlan_links(struct ieee80211_sub_if_data *sdata) +{ + struct ieee80211_sub_if_data *vlan; + struct ieee80211_link_data *link; + u16 ap_bss_links = sdata->vif.valid_links; + u16 new_links, vlan_links; + unsigned long add; + + list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list) { + int link_id; + + if (!vlan) + continue; + + /* No support for 4addr with MLO yet */ + if (vlan->wdev.use_4addr) + return; + + vlan_links = vlan->vif.valid_links; + + new_links = ap_bss_links; + + add = new_links & ~vlan_links; + if (!add) + continue; + + ieee80211_vif_set_links(vlan, add, 0); + + for_each_set_bit(link_id, &add, IEEE80211_MLD_MAX_NUM_LINKS) { + link = sdata_dereference(vlan->link[link_id], vlan); + ieee80211_link_vlan_copy_chanctx(link); + } + } +} + +void ieee80211_apvlan_link_setup(struct ieee80211_sub_if_data *sdata) +{ + struct ieee80211_sub_if_data *ap_bss = container_of(sdata->bss, + struct ieee80211_sub_if_data, u.ap); + u16 new_links = ap_bss->vif.valid_links; + unsigned long add; + int link_id; + + if (!ap_bss->vif.valid_links) + return; + + add = new_links; + for_each_set_bit(link_id, &add, IEEE80211_MLD_MAX_NUM_LINKS) { + sdata->wdev.valid_links |= BIT(link_id); + ether_addr_copy(sdata->wdev.links[link_id].addr, + ap_bss->wdev.links[link_id].addr); + } + + ieee80211_vif_set_links(sdata, new_links, 0); +} + +void ieee80211_apvlan_link_clear(struct ieee80211_sub_if_data *sdata) +{ + if (!sdata->wdev.valid_links) + return; + + sdata->wdev.valid_links = 0; + ieee80211_vif_clear_links(sdata); +} + void ieee80211_link_setup(struct ieee80211_link_data *link) { if (link->sdata->vif.type == NL80211_IFTYPE_STATION) @@ -28,8 +93,16 @@ void ieee80211_link_init(struct ieee80211_sub_if_data *sdata, if (link_id < 0) link_id = 0; - rcu_assign_pointer(sdata->vif.link_conf[link_id], link_conf); - rcu_assign_pointer(sdata->link[link_id], link); + if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) { + struct ieee80211_sub_if_data *ap_bss; + struct ieee80211_bss_conf *ap_bss_conf; + + ap_bss = container_of(sdata->bss, + struct ieee80211_sub_if_data, u.ap); + ap_bss_conf = sdata_dereference(ap_bss->vif.link_conf[link_id], + ap_bss); + memcpy(link_conf, ap_bss_conf, sizeof(*link_conf)); + } link->sdata = sdata; link->link_id = link_id; @@ -51,6 +124,7 @@ void ieee80211_link_init(struct ieee80211_sub_if_data *sdata, if (!deflink) { switch (sdata->vif.type) { case NL80211_IFTYPE_AP: + case NL80211_IFTYPE_AP_VLAN: ether_addr_copy(link_conf->addr, sdata->wdev.links[link_id].addr); link_conf->bssid = link_conf->addr; @@ -65,6 +139,9 @@ void ieee80211_link_init(struct ieee80211_sub_if_data *sdata, ieee80211_link_debugfs_add(link); } + + rcu_assign_pointer(sdata->vif.link_conf[link_id], link_conf); + rcu_assign_pointer(sdata->link[link_id], link); } void ieee80211_link_stop(struct ieee80211_link_data *link) @@ -174,6 +251,7 @@ static void ieee80211_set_vif_links_bitmaps(struct ieee80211_sub_if_data *sdata, switch (sdata->vif.type) { case NL80211_IFTYPE_AP: + case NL80211_IFTYPE_AP_VLAN: /* in an AP all links are always active */ sdata->vif.active_links = valid_links; @@ -275,12 +353,16 @@ static int ieee80211_vif_update_links(struct ieee80211_sub_if_data *sdata, ieee80211_set_vif_links_bitmaps(sdata, new_links, dormant_links); /* tell the driver */ - ret = drv_change_vif_links(sdata->local, sdata, - old_links & old_active, - new_links & sdata->vif.active_links, - old); + if (sdata->vif.type != NL80211_IFTYPE_AP_VLAN) + ret = drv_change_vif_links(sdata->local, sdata, + old_links & old_active, + new_links & sdata->vif.active_links, + old); if (!new_links) ieee80211_debugfs_recreate_netdev(sdata, false); + + if (sdata->vif.type == NL80211_IFTYPE_AP) + ieee80211_update_apvlan_links(sdata); } if (ret) { diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c index 2922a9fec950d..ba8aeb47bffd7 100644 --- a/net/mac80211/mesh_hwmp.c +++ b/net/mac80211/mesh_hwmp.c @@ -636,7 +636,7 @@ static void hwmp_preq_frame_process(struct ieee80211_sub_if_data *sdata, mesh_path_add_gate(mpath); } rcu_read_unlock(); - } else { + } else if (ifmsh->mshcfg.dot11MeshForwarding) { rcu_read_lock(); mpath = mesh_path_lookup(sdata, target_addr); if (mpath) { @@ -654,6 +654,8 @@ static void hwmp_preq_frame_process(struct ieee80211_sub_if_data *sdata, } } rcu_read_unlock(); + } else { + forward = false; } if (reply) { @@ -671,7 +673,7 @@ static void hwmp_preq_frame_process(struct ieee80211_sub_if_data *sdata, } } - if (forward && ifmsh->mshcfg.dot11MeshForwarding) { + if (forward) { u32 preq_id; u8 hopcount; diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index 8fa9b9dd46118..fd7434995a475 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -6702,6 +6702,7 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_link_data *link, struct ieee80211_bss_conf *bss_conf = link->conf; struct ieee80211_vif_cfg *vif_cfg = &sdata->vif.cfg; struct ieee80211_mgmt *mgmt = (void *) hdr; + struct ieee80211_ext *ext = NULL; size_t baselen; struct ieee802_11_elems *elems; struct ieee80211_local *local = sdata->local; @@ -6727,12 +6728,9 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_link_data *link, /* Process beacon from the current BSS */ bssid = ieee80211_get_bssid(hdr, len, sdata->vif.type); if (ieee80211_is_s1g_beacon(mgmt->frame_control)) { - struct ieee80211_ext *ext = (void *) mgmt; - - if (ieee80211_is_s1g_short_beacon(ext->frame_control)) - variable = ext->u.s1g_short_beacon.variable; - else - variable = ext->u.s1g_beacon.variable; + ext = (void *)mgmt; + variable = ext->u.s1g_beacon.variable + + ieee80211_s1g_optional_len(ext->frame_control); } baselen = (u8 *) variable - (u8 *) mgmt; @@ -6917,7 +6915,9 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_link_data *link, } if ((ncrc == link->u.mgd.beacon_crc && link->u.mgd.beacon_crc_valid) || - ieee80211_is_s1g_short_beacon(mgmt->frame_control)) + (ext && ieee80211_is_s1g_short_beacon(ext->frame_control, + parse_params.start, + parse_params.len))) goto free; link->u.mgd.beacon_crc = ncrc; link->u.mgd.beacon_crc_valid = true; diff --git a/net/mac80211/parse.c b/net/mac80211/parse.c index 6da39c864f45b..922ea9a6e2412 100644 --- a/net/mac80211/parse.c +++ b/net/mac80211/parse.c @@ -758,7 +758,6 @@ static size_t ieee802_11_find_bssid_profile(const u8 *start, size_t len, { const struct element *elem, *sub; size_t profile_len = 0; - bool found = false; if (!bss || !bss->transmitted_bss) return profile_len; @@ -809,15 +808,14 @@ static size_t ieee802_11_find_bssid_profile(const u8 *start, size_t len, index[2], new_bssid); if (ether_addr_equal(new_bssid, bss->bssid)) { - found = true; elems->bssid_index_len = index[1]; elems->bssid_index = (void *)&index[2]; - break; + return profile_len; } } } - return found ? profile_len : 0; + return 0; } static void diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c index 3dc9752188d58..1b045b62961f5 100644 --- a/net/mac80211/rate.c +++ b/net/mac80211/rate.c @@ -971,8 +971,6 @@ int rate_control_set_rates(struct ieee80211_hw *hw, if (sta->uploaded) drv_sta_rate_tbl_update(hw_to_local(hw), sta->sdata, pubsta); - ieee80211_sta_set_expected_throughput(pubsta, sta_get_expected_throughput(sta)); - return 0; } EXPORT_SYMBOL(rate_control_set_rates); diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index 8e1fbdd3bff10..8e1d00efa62e5 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c @@ -4481,6 +4481,10 @@ static bool ieee80211_accept_frame(struct ieee80211_rx_data *rx) if (!multicast && !ether_addr_equal(sdata->dev->dev_addr, hdr->addr1)) return false; + /* reject invalid/our STA address */ + if (!is_valid_ether_addr(hdr->addr2) || + ether_addr_equal(sdata->dev->dev_addr, hdr->addr2)) + return false; if (!rx->sta) { int rate_idx; if (status->encoding != RX_ENC_LEGACY) diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c index adb88c06b5982..ce6d5857214eb 100644 --- a/net/mac80211/scan.c +++ b/net/mac80211/scan.c @@ -260,6 +260,7 @@ void ieee80211_scan_rx(struct ieee80211_local *local, struct sk_buff *skb) struct ieee80211_mgmt *mgmt = (void *)skb->data; struct ieee80211_bss *bss; struct ieee80211_channel *channel; + struct ieee80211_ext *ext; size_t min_hdr_len = offsetof(struct ieee80211_mgmt, u.probe_resp.variable); @@ -269,12 +270,10 @@ void ieee80211_scan_rx(struct ieee80211_local *local, struct sk_buff *skb) return; if (ieee80211_is_s1g_beacon(mgmt->frame_control)) { - if (ieee80211_is_s1g_short_beacon(mgmt->frame_control)) - min_hdr_len = offsetof(struct ieee80211_ext, - u.s1g_short_beacon.variable); - else - min_hdr_len = offsetof(struct ieee80211_ext, - u.s1g_beacon); + ext = (struct ieee80211_ext *)mgmt; + min_hdr_len = + offsetof(struct ieee80211_ext, u.s1g_beacon.variable) + + ieee80211_s1g_optional_len(ext->frame_control); } if (skb->len < min_hdr_len) diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c index 49095f19a0f22..4eb45e08b97e7 100644 --- a/net/mac80211/sta_info.c +++ b/net/mac80211/sta_info.c @@ -18,7 +18,6 @@ #include #include -#include #include #include "ieee80211_i.h" #include "driver-ops.h" @@ -683,12 +682,6 @@ __sta_info_alloc(struct ieee80211_sub_if_data *sdata, } } - sta->cparams.ce_threshold = CODEL_DISABLED_THRESHOLD; - sta->cparams.target = MS2TIME(20); - sta->cparams.interval = MS2TIME(100); - sta->cparams.ecn = true; - sta->cparams.ce_threshold_selector = 0; - sta->cparams.ce_threshold_mask = 0; sta_dbg(sdata, "Allocated STA %pM\n", sta->sta.addr); @@ -2878,27 +2871,6 @@ unsigned long ieee80211_sta_last_active(struct sta_info *sta) return sta->deflink.status_stats.last_ack; } -static void sta_update_codel_params(struct sta_info *sta, u32 thr) -{ - if (thr && thr < STA_SLOW_THRESHOLD * sta->local->num_sta) { - sta->cparams.target = MS2TIME(50); - sta->cparams.interval = MS2TIME(300); - sta->cparams.ecn = false; - } else { - sta->cparams.target = MS2TIME(20); - sta->cparams.interval = MS2TIME(100); - sta->cparams.ecn = true; - } -} - -void ieee80211_sta_set_expected_throughput(struct ieee80211_sta *pubsta, - u32 thr) -{ - struct sta_info *sta = container_of(pubsta, struct sta_info, sta); - - sta_update_codel_params(sta, thr); -} - int ieee80211_sta_allocate_link(struct sta_info *sta, unsigned int link_id) { struct ieee80211_sub_if_data *sdata = sta->sdata; diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h index 9195d5a2de0a8..a9cfeeb13e53f 100644 --- a/net/mac80211/sta_info.h +++ b/net/mac80211/sta_info.h @@ -466,14 +466,6 @@ struct ieee80211_fragment_cache { unsigned int next; }; -/* - * The bandwidth threshold below which the per-station CoDel parameters will be - * scaled to be more lenient (to prevent starvation of slow stations). This - * value will be scaled by the number of active stations when it is being - * applied. - */ -#define STA_SLOW_THRESHOLD 6000 /* 6 Mbps */ - /** * struct link_sta_info - Link STA information * All link specific sta info are stored here for reference. This can be @@ -619,7 +611,6 @@ struct link_sta_info { * @sta: station information we share with the driver * @sta_state: duplicates information about station state (for debug) * @rcu_head: RCU head used for freeing this station struct - * @cparams: CoDel parameters for this station. * @reserved_tid: reserved TID (if any, otherwise IEEE80211_TID_UNRESERVED) * @amsdu_mesh_control: track the mesh A-MSDU format used by the peer: * @@ -710,8 +701,6 @@ struct sta_info { struct dentry *debugfs_dir; #endif - struct codel_params cparams; - u8 reserved_tid; s8 amsdu_mesh_control; diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index 0ff8b56f58070..00c309e7768e1 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c @@ -1401,16 +1401,9 @@ static struct sk_buff *fq_tin_dequeue_func(struct fq *fq, local = container_of(fq, struct ieee80211_local, fq); txqi = container_of(tin, struct txq_info, tin); + cparams = &local->cparams; cstats = &txqi->cstats; - if (txqi->txq.sta) { - struct sta_info *sta = container_of(txqi->txq.sta, - struct sta_info, sta); - cparams = &sta->cparams; - } else { - cparams = &local->cparams; - } - if (flow == &tin->default_flow) cvars = &txqi->def_cvars; else @@ -4523,8 +4516,10 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb, IEEE80211_TX_CTRL_MLO_LINK_UNSPEC, NULL); } else if (ieee80211_vif_is_mld(&sdata->vif) && - sdata->vif.type == NL80211_IFTYPE_AP && - !ieee80211_hw_check(&sdata->local->hw, MLO_MCAST_MULTI_LINK_TX)) { + ((sdata->vif.type == NL80211_IFTYPE_AP && + !ieee80211_hw_check(&sdata->local->hw, MLO_MCAST_MULTI_LINK_TX)) || + (sdata->vif.type == NL80211_IFTYPE_AP_VLAN && + !sdata->wdev.use_4addr))) { ieee80211_mlo_multicast_tx(dev, skb); } else { normal: diff --git a/net/mac80211/util.c b/net/mac80211/util.c index a98ae563613c0..77638e965726c 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c @@ -3908,7 +3908,7 @@ void ieee80211_recalc_dtim(struct ieee80211_local *local, { u64 tsf = drv_get_tsf(local, sdata); u64 dtim_count = 0; - u16 beacon_int = sdata->vif.bss_conf.beacon_int * 1024; + u32 beacon_int = sdata->vif.bss_conf.beacon_int * 1024; u8 dtim_period = sdata->vif.bss_conf.dtim_period; struct ps_data *ps; u8 bcns_from_dtim; diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c index df62638b64984..3373b6b34dc7d 100644 --- a/net/mpls/af_mpls.c +++ b/net/mpls/af_mpls.c @@ -81,8 +81,8 @@ static struct mpls_route *mpls_route_input_rcu(struct net *net, unsigned index) if (index < net->mpls.platform_labels) { struct mpls_route __rcu **platform_label = - rcu_dereference(net->mpls.platform_label); - rt = rcu_dereference(platform_label[index]); + rcu_dereference_rtnl(net->mpls.platform_label); + rt = rcu_dereference_rtnl(platform_label[index]); } return rt; } diff --git a/net/ncsi/internal.h b/net/ncsi/internal.h index 4e0842df5234e..2c260f33b55cc 100644 --- a/net/ncsi/internal.h +++ b/net/ncsi/internal.h @@ -143,16 +143,15 @@ struct ncsi_channel_vlan_filter { }; struct ncsi_channel_stats { - u32 hnc_cnt_hi; /* Counter cleared */ - u32 hnc_cnt_lo; /* Counter cleared */ - u32 hnc_rx_bytes; /* Rx bytes */ - u32 hnc_tx_bytes; /* Tx bytes */ - u32 hnc_rx_uc_pkts; /* Rx UC packets */ - u32 hnc_rx_mc_pkts; /* Rx MC packets */ - u32 hnc_rx_bc_pkts; /* Rx BC packets */ - u32 hnc_tx_uc_pkts; /* Tx UC packets */ - u32 hnc_tx_mc_pkts; /* Tx MC packets */ - u32 hnc_tx_bc_pkts; /* Tx BC packets */ + u64 hnc_cnt; /* Counter cleared */ + u64 hnc_rx_bytes; /* Rx bytes */ + u64 hnc_tx_bytes; /* Tx bytes */ + u64 hnc_rx_uc_pkts; /* Rx UC packets */ + u64 hnc_rx_mc_pkts; /* Rx MC packets */ + u64 hnc_rx_bc_pkts; /* Rx BC packets */ + u64 hnc_tx_uc_pkts; /* Tx UC packets */ + u64 hnc_tx_mc_pkts; /* Tx MC packets */ + u64 hnc_tx_bc_pkts; /* Tx BC packets */ u32 hnc_fcs_err; /* FCS errors */ u32 hnc_align_err; /* Alignment errors */ u32 hnc_false_carrier; /* False carrier detection */ @@ -181,7 +180,7 @@ struct ncsi_channel_stats { u32 hnc_tx_1023_frames; /* Tx 512-1023 bytes frames */ u32 hnc_tx_1522_frames; /* Tx 1024-1522 bytes frames */ u32 hnc_tx_9022_frames; /* Tx 1523-9022 bytes frames */ - u32 hnc_rx_valid_bytes; /* Rx valid bytes */ + u64 hnc_rx_valid_bytes; /* Rx valid bytes */ u32 hnc_rx_runt_pkts; /* Rx error runt packets */ u32 hnc_rx_jabber_pkts; /* Rx error jabber packets */ u32 ncsi_rx_cmds; /* Rx NCSI commands */ diff --git a/net/ncsi/ncsi-pkt.h b/net/ncsi/ncsi-pkt.h index f2f3b5c1b9412..24edb27379724 100644 --- a/net/ncsi/ncsi-pkt.h +++ b/net/ncsi/ncsi-pkt.h @@ -252,16 +252,15 @@ struct ncsi_rsp_gp_pkt { /* Get Controller Packet Statistics */ struct ncsi_rsp_gcps_pkt { struct ncsi_rsp_pkt_hdr rsp; /* Response header */ - __be32 cnt_hi; /* Counter cleared */ - __be32 cnt_lo; /* Counter cleared */ - __be32 rx_bytes; /* Rx bytes */ - __be32 tx_bytes; /* Tx bytes */ - __be32 rx_uc_pkts; /* Rx UC packets */ - __be32 rx_mc_pkts; /* Rx MC packets */ - __be32 rx_bc_pkts; /* Rx BC packets */ - __be32 tx_uc_pkts; /* Tx UC packets */ - __be32 tx_mc_pkts; /* Tx MC packets */ - __be32 tx_bc_pkts; /* Tx BC packets */ + __be64 cnt; /* Counter cleared */ + __be64 rx_bytes; /* Rx bytes */ + __be64 tx_bytes; /* Tx bytes */ + __be64 rx_uc_pkts; /* Rx UC packets */ + __be64 rx_mc_pkts; /* Rx MC packets */ + __be64 rx_bc_pkts; /* Rx BC packets */ + __be64 tx_uc_pkts; /* Tx UC packets */ + __be64 tx_mc_pkts; /* Tx MC packets */ + __be64 tx_bc_pkts; /* Tx BC packets */ __be32 fcs_err; /* FCS errors */ __be32 align_err; /* Alignment errors */ __be32 false_carrier; /* False carrier detection */ @@ -290,11 +289,11 @@ struct ncsi_rsp_gcps_pkt { __be32 tx_1023_frames; /* Tx 512-1023 bytes frames */ __be32 tx_1522_frames; /* Tx 1024-1522 bytes frames */ __be32 tx_9022_frames; /* Tx 1523-9022 bytes frames */ - __be32 rx_valid_bytes; /* Rx valid bytes */ + __be64 rx_valid_bytes; /* Rx valid bytes */ __be32 rx_runt_pkts; /* Rx error runt packets */ __be32 rx_jabber_pkts; /* Rx error jabber packets */ __be32 checksum; /* Checksum */ -}; +} __packed __aligned(4); /* Get NCSI Statistics */ struct ncsi_rsp_gns_pkt { diff --git a/net/ncsi/ncsi-rsp.c b/net/ncsi/ncsi-rsp.c index 4a8ce2949faea..8668888c5a2f9 100644 --- a/net/ncsi/ncsi-rsp.c +++ b/net/ncsi/ncsi-rsp.c @@ -926,16 +926,15 @@ static int ncsi_rsp_handler_gcps(struct ncsi_request *nr) /* Update HNC's statistics */ ncs = &nc->stats; - ncs->hnc_cnt_hi = ntohl(rsp->cnt_hi); - ncs->hnc_cnt_lo = ntohl(rsp->cnt_lo); - ncs->hnc_rx_bytes = ntohl(rsp->rx_bytes); - ncs->hnc_tx_bytes = ntohl(rsp->tx_bytes); - ncs->hnc_rx_uc_pkts = ntohl(rsp->rx_uc_pkts); - ncs->hnc_rx_mc_pkts = ntohl(rsp->rx_mc_pkts); - ncs->hnc_rx_bc_pkts = ntohl(rsp->rx_bc_pkts); - ncs->hnc_tx_uc_pkts = ntohl(rsp->tx_uc_pkts); - ncs->hnc_tx_mc_pkts = ntohl(rsp->tx_mc_pkts); - ncs->hnc_tx_bc_pkts = ntohl(rsp->tx_bc_pkts); + ncs->hnc_cnt = be64_to_cpu(rsp->cnt); + ncs->hnc_rx_bytes = be64_to_cpu(rsp->rx_bytes); + ncs->hnc_tx_bytes = be64_to_cpu(rsp->tx_bytes); + ncs->hnc_rx_uc_pkts = be64_to_cpu(rsp->rx_uc_pkts); + ncs->hnc_rx_mc_pkts = be64_to_cpu(rsp->rx_mc_pkts); + ncs->hnc_rx_bc_pkts = be64_to_cpu(rsp->rx_bc_pkts); + ncs->hnc_tx_uc_pkts = be64_to_cpu(rsp->tx_uc_pkts); + ncs->hnc_tx_mc_pkts = be64_to_cpu(rsp->tx_mc_pkts); + ncs->hnc_tx_bc_pkts = be64_to_cpu(rsp->tx_bc_pkts); ncs->hnc_fcs_err = ntohl(rsp->fcs_err); ncs->hnc_align_err = ntohl(rsp->align_err); ncs->hnc_false_carrier = ntohl(rsp->false_carrier); @@ -964,7 +963,7 @@ static int ncsi_rsp_handler_gcps(struct ncsi_request *nr) ncs->hnc_tx_1023_frames = ntohl(rsp->tx_1023_frames); ncs->hnc_tx_1522_frames = ntohl(rsp->tx_1522_frames); ncs->hnc_tx_9022_frames = ntohl(rsp->tx_9022_frames); - ncs->hnc_rx_valid_bytes = ntohl(rsp->rx_valid_bytes); + ncs->hnc_rx_valid_bytes = be64_to_cpu(rsp->rx_valid_bytes); ncs->hnc_rx_runt_pkts = ntohl(rsp->rx_runt_pkts); ncs->hnc_rx_jabber_pkts = ntohl(rsp->rx_jabber_pkts); diff --git a/net/netfilter/nf_nat_core.c b/net/netfilter/nf_nat_core.c index 4085c436e3062..02f10a46fab7c 100644 --- a/net/netfilter/nf_nat_core.c +++ b/net/netfilter/nf_nat_core.c @@ -248,7 +248,7 @@ static noinline bool nf_nat_used_tuple_new(const struct nf_conntrack_tuple *tuple, const struct nf_conn *ignored_ct) { - static const unsigned long uses_nat = IPS_NAT_MASK | IPS_SEQ_ADJUST_BIT; + static const unsigned long uses_nat = IPS_NAT_MASK | IPS_SEQ_ADJUST; const struct nf_conntrack_tuple_hash *thash; const struct nf_conntrack_zone *zone; struct nf_conn *ct; @@ -287,8 +287,14 @@ nf_nat_used_tuple_new(const struct nf_conntrack_tuple *tuple, zone = nf_ct_zone(ignored_ct); thash = nf_conntrack_find_get(net, zone, tuple); - if (unlikely(!thash)) /* clashing entry went away */ - return false; + if (unlikely(!thash)) { + struct nf_conntrack_tuple reply; + + nf_ct_invert_tuple(&reply, tuple); + thash = nf_conntrack_find_get(net, zone, &reply); + if (!thash) /* clashing entry went away */ + return false; + } ct = nf_ct_tuplehash_to_ctrack(thash); diff --git a/net/netfilter/nft_quota.c b/net/netfilter/nft_quota.c index 9b2d7463d3d32..df0798da2329b 100644 --- a/net/netfilter/nft_quota.c +++ b/net/netfilter/nft_quota.c @@ -19,10 +19,16 @@ struct nft_quota { }; static inline bool nft_overquota(struct nft_quota *priv, - const struct sk_buff *skb) + const struct sk_buff *skb, + bool *report) { - return atomic64_add_return(skb->len, priv->consumed) >= - atomic64_read(&priv->quota); + u64 consumed = atomic64_add_return(skb->len, priv->consumed); + u64 quota = atomic64_read(&priv->quota); + + if (report) + *report = consumed >= quota; + + return consumed > quota; } static inline bool nft_quota_invert(struct nft_quota *priv) @@ -34,7 +40,7 @@ static inline void nft_quota_do_eval(struct nft_quota *priv, struct nft_regs *regs, const struct nft_pktinfo *pkt) { - if (nft_overquota(priv, pkt->skb) ^ nft_quota_invert(priv)) + if (nft_overquota(priv, pkt->skb, NULL) ^ nft_quota_invert(priv)) regs->verdict.code = NFT_BREAK; } @@ -51,13 +57,13 @@ static void nft_quota_obj_eval(struct nft_object *obj, const struct nft_pktinfo *pkt) { struct nft_quota *priv = nft_obj_data(obj); - bool overquota; + bool overquota, report; - overquota = nft_overquota(priv, pkt->skb); + overquota = nft_overquota(priv, pkt->skb, &report); if (overquota ^ nft_quota_invert(priv)) regs->verdict.code = NFT_BREAK; - if (overquota && + if (report && !test_and_set_bit(NFT_QUOTA_DEPLETED_BIT, &priv->flags)) nft_obj_notify(nft_net(pkt), obj->key.table, obj, 0, 0, NFT_MSG_NEWOBJ, 0, nft_pf(pkt), 0, GFP_ATOMIC); diff --git a/net/netfilter/nft_set_pipapo.c b/net/netfilter/nft_set_pipapo.c index 7be342b495f5f..c5855069bdaba 100644 --- a/net/netfilter/nft_set_pipapo.c +++ b/net/netfilter/nft_set_pipapo.c @@ -663,6 +663,9 @@ static int pipapo_realloc_mt(struct nft_pipapo_field *f, check_add_overflow(rules, extra, &rules_alloc)) return -EOVERFLOW; + if (rules_alloc > (INT_MAX / sizeof(*new_mt))) + return -ENOMEM; + new_mt = kvmalloc_array(rules_alloc, sizeof(*new_mt), GFP_KERNEL_ACCOUNT); if (!new_mt) return -ENOMEM; @@ -683,6 +686,30 @@ static int pipapo_realloc_mt(struct nft_pipapo_field *f, return 0; } + +/** + * lt_calculate_size() - Get storage size for lookup table with overflow check + * @groups: Amount of bit groups + * @bb: Number of bits grouped together in lookup table buckets + * @bsize: Size of each bucket in lookup table, in longs + * + * Return: allocation size including alignment overhead, negative on overflow + */ +static ssize_t lt_calculate_size(unsigned int groups, unsigned int bb, + unsigned int bsize) +{ + ssize_t ret = groups * NFT_PIPAPO_BUCKETS(bb) * sizeof(long); + + if (check_mul_overflow(ret, bsize, &ret)) + return -1; + if (check_add_overflow(ret, NFT_PIPAPO_ALIGN_HEADROOM, &ret)) + return -1; + if (ret > INT_MAX) + return -1; + + return ret; +} + /** * pipapo_resize() - Resize lookup or mapping table, or both * @f: Field containing lookup and mapping tables @@ -701,6 +728,7 @@ static int pipapo_resize(struct nft_pipapo_field *f, long *new_lt = NULL, *new_p, *old_lt = f->lt, *old_p; unsigned int new_bucket_size, copy; int group, bucket, err; + ssize_t lt_size; if (rules >= NFT_PIPAPO_RULE0_MAX) return -ENOSPC; @@ -719,10 +747,11 @@ static int pipapo_resize(struct nft_pipapo_field *f, else copy = new_bucket_size; - new_lt = kvzalloc(f->groups * NFT_PIPAPO_BUCKETS(f->bb) * - new_bucket_size * sizeof(*new_lt) + - NFT_PIPAPO_ALIGN_HEADROOM, - GFP_KERNEL); + lt_size = lt_calculate_size(f->groups, f->bb, new_bucket_size); + if (lt_size < 0) + return -ENOMEM; + + new_lt = kvzalloc(lt_size, GFP_KERNEL_ACCOUNT); if (!new_lt) return -ENOMEM; @@ -907,7 +936,7 @@ static void pipapo_lt_bits_adjust(struct nft_pipapo_field *f) { unsigned int groups, bb; unsigned long *new_lt; - size_t lt_size; + ssize_t lt_size; lt_size = f->groups * NFT_PIPAPO_BUCKETS(f->bb) * f->bsize * sizeof(*f->lt); @@ -917,15 +946,17 @@ static void pipapo_lt_bits_adjust(struct nft_pipapo_field *f) groups = f->groups * 2; bb = NFT_PIPAPO_GROUP_BITS_LARGE_SET; - lt_size = groups * NFT_PIPAPO_BUCKETS(bb) * f->bsize * - sizeof(*f->lt); + lt_size = lt_calculate_size(groups, bb, f->bsize); + if (lt_size < 0) + return; } else if (f->bb == NFT_PIPAPO_GROUP_BITS_LARGE_SET && lt_size < NFT_PIPAPO_LT_SIZE_LOW) { groups = f->groups / 2; bb = NFT_PIPAPO_GROUP_BITS_SMALL_SET; - lt_size = groups * NFT_PIPAPO_BUCKETS(bb) * f->bsize * - sizeof(*f->lt); + lt_size = lt_calculate_size(groups, bb, f->bsize); + if (lt_size < 0) + return; /* Don't increase group width if the resulting lookup table size * would exceed the upper size threshold for a "small" set. @@ -936,7 +967,7 @@ static void pipapo_lt_bits_adjust(struct nft_pipapo_field *f) return; } - new_lt = kvzalloc(lt_size + NFT_PIPAPO_ALIGN_HEADROOM, GFP_KERNEL_ACCOUNT); + new_lt = kvzalloc(lt_size, GFP_KERNEL_ACCOUNT); if (!new_lt) return; @@ -1451,13 +1482,15 @@ static struct nft_pipapo_match *pipapo_clone(struct nft_pipapo_match *old) for (i = 0; i < old->field_count; i++) { unsigned long *new_lt; + ssize_t lt_size; memcpy(dst, src, offsetof(struct nft_pipapo_field, lt)); - new_lt = kvzalloc(src->groups * NFT_PIPAPO_BUCKETS(src->bb) * - src->bsize * sizeof(*dst->lt) + - NFT_PIPAPO_ALIGN_HEADROOM, - GFP_KERNEL_ACCOUNT); + lt_size = lt_calculate_size(src->groups, src->bb, src->bsize); + if (lt_size < 0) + goto out_lt; + + new_lt = kvzalloc(lt_size, GFP_KERNEL_ACCOUNT); if (!new_lt) goto out_lt; @@ -1469,6 +1502,9 @@ static struct nft_pipapo_match *pipapo_clone(struct nft_pipapo_match *old) src->groups * NFT_PIPAPO_BUCKETS(src->bb)); if (src->rules > 0) { + if (src->rules_alloc > (INT_MAX / sizeof(*src->mt))) + goto out_mt; + dst->mt = kvmalloc_array(src->rules_alloc, sizeof(*src->mt), GFP_KERNEL_ACCOUNT); diff --git a/net/netfilter/nft_set_pipapo_avx2.c b/net/netfilter/nft_set_pipapo_avx2.c index c15db28c5ebc4..be7c16c79f711 100644 --- a/net/netfilter/nft_set_pipapo_avx2.c +++ b/net/netfilter/nft_set_pipapo_avx2.c @@ -1113,6 +1113,25 @@ bool nft_pipapo_avx2_estimate(const struct nft_set_desc *desc, u32 features, return true; } +/** + * pipapo_resmap_init_avx2() - Initialise result map before first use + * @m: Matching data, including mapping table + * @res_map: Result map + * + * Like pipapo_resmap_init() but do not set start map bits covered by the first field. + */ +static inline void pipapo_resmap_init_avx2(const struct nft_pipapo_match *m, unsigned long *res_map) +{ + const struct nft_pipapo_field *f = m->f; + int i; + + /* Starting map doesn't need to be set to all-ones for this implementation, + * but we do need to zero the remaining bits, if any. + */ + for (i = f->bsize; i < m->bsize_max; i++) + res_map[i] = 0ul; +} + /** * nft_pipapo_avx2_lookup() - Lookup function for AVX2 implementation * @net: Network namespace @@ -1171,7 +1190,7 @@ bool nft_pipapo_avx2_lookup(const struct net *net, const struct nft_set *set, res = scratch->map + (map_index ? m->bsize_max : 0); fill = scratch->map + (map_index ? 0 : m->bsize_max); - /* Starting map doesn't need to be set for this implementation */ + pipapo_resmap_init_avx2(m, res); nft_pipapo_avx2_prepare(); diff --git a/net/netfilter/nft_tunnel.c b/net/netfilter/nft_tunnel.c index 0d99786c322e8..e18d322290fb0 100644 --- a/net/netfilter/nft_tunnel.c +++ b/net/netfilter/nft_tunnel.c @@ -624,10 +624,10 @@ static int nft_tunnel_opts_dump(struct sk_buff *skb, struct geneve_opt *opt; int offset = 0; - inner = nla_nest_start_noflag(skb, NFTA_TUNNEL_KEY_OPTS_GENEVE); - if (!inner) - goto failure; while (opts->len > offset) { + inner = nla_nest_start_noflag(skb, NFTA_TUNNEL_KEY_OPTS_GENEVE); + if (!inner) + goto failure; opt = (struct geneve_opt *)(opts->u.data + offset); if (nla_put_be16(skb, NFTA_TUNNEL_KEY_GENEVE_CLASS, opt->opt_class) || @@ -637,8 +637,8 @@ static int nft_tunnel_opts_dump(struct sk_buff *skb, opt->length * 4, opt->opt_data)) goto inner_failure; offset += sizeof(*opt) + opt->length * 4; + nla_nest_end(skb, inner); } - nla_nest_end(skb, inner); } nla_nest_end(skb, nest); return 0; diff --git a/net/netfilter/xt_TCPOPTSTRIP.c b/net/netfilter/xt_TCPOPTSTRIP.c index 30e99464171b7..93f064306901c 100644 --- a/net/netfilter/xt_TCPOPTSTRIP.c +++ b/net/netfilter/xt_TCPOPTSTRIP.c @@ -91,7 +91,7 @@ tcpoptstrip_tg4(struct sk_buff *skb, const struct xt_action_param *par) return tcpoptstrip_mangle_packet(skb, par, ip_hdrlen(skb)); } -#if IS_ENABLED(CONFIG_IP6_NF_MANGLE) +#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES) static unsigned int tcpoptstrip_tg6(struct sk_buff *skb, const struct xt_action_param *par) { @@ -119,7 +119,7 @@ static struct xt_target tcpoptstrip_tg_reg[] __read_mostly = { .targetsize = sizeof(struct xt_tcpoptstrip_target_info), .me = THIS_MODULE, }, -#if IS_ENABLED(CONFIG_IP6_NF_MANGLE) +#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES) { .name = "TCPOPTSTRIP", .family = NFPROTO_IPV6, diff --git a/net/netfilter/xt_mark.c b/net/netfilter/xt_mark.c index 65b965ca40ea7..59b9d04400cac 100644 --- a/net/netfilter/xt_mark.c +++ b/net/netfilter/xt_mark.c @@ -48,7 +48,7 @@ static struct xt_target mark_tg_reg[] __read_mostly = { .targetsize = sizeof(struct xt_mark_tginfo2), .me = THIS_MODULE, }, -#if IS_ENABLED(CONFIG_IP_NF_ARPTABLES) +#if IS_ENABLED(CONFIG_IP_NF_ARPTABLES) || IS_ENABLED(CONFIG_NFT_COMPAT_ARP) { .name = "MARK", .revision = 2, diff --git a/net/netlabel/netlabel_kapi.c b/net/netlabel/netlabel_kapi.c index cd9160bbc9197..33b77084a4e5f 100644 --- a/net/netlabel/netlabel_kapi.c +++ b/net/netlabel/netlabel_kapi.c @@ -1165,6 +1165,11 @@ int netlbl_conn_setattr(struct sock *sk, break; #if IS_ENABLED(CONFIG_IPV6) case AF_INET6: + if (sk->sk_family != AF_INET6) { + ret_val = -EAFNOSUPPORT; + goto conn_setattr_return; + } + addr6 = (struct sockaddr_in6 *)addr; entry = netlbl_domhsh_getentry_af6(secattr->domain, &addr6->sin6_addr); diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index 775d707ec708a..b02fb75f8d4fd 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c @@ -387,7 +387,6 @@ static void netlink_skb_set_owner_r(struct sk_buff *skb, struct sock *sk) WARN_ON(skb->sk != NULL); skb->sk = sk; skb->destructor = netlink_skb_destructor; - atomic_add(skb->truesize, &sk->sk_rmem_alloc); sk_mem_charge(sk, skb->truesize); } @@ -1216,41 +1215,48 @@ struct sk_buff *netlink_alloc_large_skb(unsigned int size, int broadcast) int netlink_attachskb(struct sock *sk, struct sk_buff *skb, long *timeo, struct sock *ssk) { + DECLARE_WAITQUEUE(wait, current); struct netlink_sock *nlk; + unsigned int rmem; nlk = nlk_sk(sk); + rmem = atomic_add_return(skb->truesize, &sk->sk_rmem_alloc); - if ((atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf || - test_bit(NETLINK_S_CONGESTED, &nlk->state))) { - DECLARE_WAITQUEUE(wait, current); - if (!*timeo) { - if (!ssk || netlink_is_kernel(ssk)) - netlink_overrun(sk); - sock_put(sk); - kfree_skb(skb); - return -EAGAIN; - } - - __set_current_state(TASK_INTERRUPTIBLE); - add_wait_queue(&nlk->wait, &wait); + if ((rmem == skb->truesize || rmem < READ_ONCE(sk->sk_rcvbuf)) && + !test_bit(NETLINK_S_CONGESTED, &nlk->state)) { + netlink_skb_set_owner_r(skb, sk); + return 0; + } - if ((atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf || - test_bit(NETLINK_S_CONGESTED, &nlk->state)) && - !sock_flag(sk, SOCK_DEAD)) - *timeo = schedule_timeout(*timeo); + atomic_sub(skb->truesize, &sk->sk_rmem_alloc); - __set_current_state(TASK_RUNNING); - remove_wait_queue(&nlk->wait, &wait); + if (!*timeo) { + if (!ssk || netlink_is_kernel(ssk)) + netlink_overrun(sk); sock_put(sk); + kfree_skb(skb); + return -EAGAIN; + } - if (signal_pending(current)) { - kfree_skb(skb); - return sock_intr_errno(*timeo); - } - return 1; + __set_current_state(TASK_INTERRUPTIBLE); + add_wait_queue(&nlk->wait, &wait); + rmem = atomic_read(&sk->sk_rmem_alloc); + + if (((rmem && rmem + skb->truesize > READ_ONCE(sk->sk_rcvbuf)) || + test_bit(NETLINK_S_CONGESTED, &nlk->state)) && + !sock_flag(sk, SOCK_DEAD)) + *timeo = schedule_timeout(*timeo); + + __set_current_state(TASK_RUNNING); + remove_wait_queue(&nlk->wait, &wait); + sock_put(sk); + + if (signal_pending(current)) { + kfree_skb(skb); + return sock_intr_errno(*timeo); } - netlink_skb_set_owner_r(skb, sk); - return 0; + + return 1; } static int __netlink_sendskb(struct sock *sk, struct sk_buff *skb) @@ -1310,6 +1316,7 @@ static int netlink_unicast_kernel(struct sock *sk, struct sk_buff *skb, ret = -ECONNREFUSED; if (nlk->netlink_rcv != NULL) { ret = skb->len; + atomic_add(skb->truesize, &sk->sk_rmem_alloc); netlink_skb_set_owner_r(skb, sk); NETLINK_CB(skb).sk = ssk; netlink_deliver_tap_kernel(sk, ssk, skb); @@ -1386,13 +1393,19 @@ EXPORT_SYMBOL_GPL(netlink_strict_get_check); static int netlink_broadcast_deliver(struct sock *sk, struct sk_buff *skb) { struct netlink_sock *nlk = nlk_sk(sk); + unsigned int rmem, rcvbuf; - if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf && + rmem = atomic_add_return(skb->truesize, &sk->sk_rmem_alloc); + rcvbuf = READ_ONCE(sk->sk_rcvbuf); + + if ((rmem == skb->truesize || rmem <= rcvbuf) && !test_bit(NETLINK_S_CONGESTED, &nlk->state)) { netlink_skb_set_owner_r(skb, sk); __netlink_sendskb(sk, skb); - return atomic_read(&sk->sk_rmem_alloc) > (sk->sk_rcvbuf >> 1); + return rmem > (rcvbuf >> 1); } + + atomic_sub(skb->truesize, &sk->sk_rmem_alloc); return -1; } @@ -2248,6 +2261,7 @@ static int netlink_dump(struct sock *sk, bool lock_taken) struct netlink_ext_ack extack = {}; struct netlink_callback *cb; struct sk_buff *skb = NULL; + unsigned int rmem, rcvbuf; size_t max_recvmsg_len; struct module *module; int err = -ENOBUFS; @@ -2261,9 +2275,6 @@ static int netlink_dump(struct sock *sk, bool lock_taken) goto errout_skb; } - if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) - goto errout_skb; - /* NLMSG_GOODSIZE is small to avoid high order allocations being * required, but it makes sense to _attempt_ a 16K bytes allocation * to reduce number of system calls on dump operations, if user @@ -2286,6 +2297,13 @@ static int netlink_dump(struct sock *sk, bool lock_taken) if (!skb) goto errout_skb; + rcvbuf = READ_ONCE(sk->sk_rcvbuf); + rmem = atomic_add_return(skb->truesize, &sk->sk_rmem_alloc); + if (rmem != skb->truesize && rmem >= rcvbuf) { + atomic_sub(skb->truesize, &sk->sk_rmem_alloc); + goto errout_skb; + } + /* Trim skb to allocated size. User is expected to provide buffer as * large as max(min_dump_alloc, 16KiB (mac_recvmsg_len capped at * netlink_recvmsg())). dump will pack as many smaller messages as diff --git a/net/nfc/nci/uart.c b/net/nfc/nci/uart.c index ed1508a9e093e..aab107727f186 100644 --- a/net/nfc/nci/uart.c +++ b/net/nfc/nci/uart.c @@ -119,22 +119,22 @@ static int nci_uart_set_driver(struct tty_struct *tty, unsigned int driver) memcpy(nu, nci_uart_drivers[driver], sizeof(struct nci_uart)); nu->tty = tty; - tty->disc_data = nu; skb_queue_head_init(&nu->tx_q); INIT_WORK(&nu->write_work, nci_uart_write_work); spin_lock_init(&nu->rx_lock); ret = nu->ops.open(nu); if (ret) { - tty->disc_data = NULL; kfree(nu); + return ret; } else if (!try_module_get(nu->owner)) { nu->ops.close(nu); - tty->disc_data = NULL; kfree(nu); return -ENOENT; } - return ret; + tty->disc_data = nu; + + return 0; } /* ------ LDISC part ------ */ diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c index 8a848ce72e291..b80bd3a907739 100644 --- a/net/openvswitch/flow.c +++ b/net/openvswitch/flow.c @@ -788,7 +788,7 @@ static int key_extract_l3l4(struct sk_buff *skb, struct sw_flow_key *key) memset(&key->ipv4, 0, sizeof(key->ipv4)); } } else if (eth_p_mpls(key->eth.type)) { - u8 label_count = 1; + size_t label_count = 1; memset(&key->mpls, 0, sizeof(key->mpls)); skb_set_inner_network_header(skb, skb->mac_len); diff --git a/net/rose/rose_route.c b/net/rose/rose_route.c index fee772b4637c8..a7054546f52df 100644 --- a/net/rose/rose_route.c +++ b/net/rose/rose_route.c @@ -497,22 +497,15 @@ void rose_rt_device_down(struct net_device *dev) t = rose_node; rose_node = rose_node->next; - for (i = 0; i < t->count; i++) { + for (i = t->count - 1; i >= 0; i--) { if (t->neighbour[i] != s) continue; t->count--; - switch (i) { - case 0: - t->neighbour[0] = t->neighbour[1]; - fallthrough; - case 1: - t->neighbour[1] = t->neighbour[2]; - break; - case 2: - break; - } + memmove(&t->neighbour[i], &t->neighbour[i + 1], + sizeof(t->neighbour[0]) * + (t->count - i)); } if (t->count <= 0) diff --git a/net/rxrpc/call_accept.c b/net/rxrpc/call_accept.c index 0f5a1d77b890f..773bdb2e37daf 100644 --- a/net/rxrpc/call_accept.c +++ b/net/rxrpc/call_accept.c @@ -149,6 +149,7 @@ static int rxrpc_service_prealloc_one(struct rxrpc_sock *rx, id_in_use: write_unlock(&rx->call_lock); + rxrpc_prefail_call(call, RXRPC_CALL_LOCAL_ERROR, -EBADSLT); rxrpc_cleanup_call(call); _leave(" = -EBADSLT"); return -EBADSLT; @@ -253,6 +254,9 @@ static struct rxrpc_call *rxrpc_alloc_incoming_call(struct rxrpc_sock *rx, unsigned short call_tail, conn_tail, peer_tail; unsigned short call_count, conn_count; + if (!b) + return NULL; + /* #calls >= #conns >= #peers must hold true. */ call_head = smp_load_acquire(&b->call_backlog_head); call_tail = b->call_backlog_tail; diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c index 518f52f65a49d..c56a01992cb28 100644 --- a/net/sched/sch_api.c +++ b/net/sched/sch_api.c @@ -334,17 +334,22 @@ struct Qdisc *qdisc_lookup_rcu(struct net_device *dev, u32 handle) return q; } -static struct Qdisc *qdisc_leaf(struct Qdisc *p, u32 classid) +static struct Qdisc *qdisc_leaf(struct Qdisc *p, u32 classid, + struct netlink_ext_ack *extack) { unsigned long cl; const struct Qdisc_class_ops *cops = p->ops->cl_ops; - if (cops == NULL) - return NULL; + if (cops == NULL) { + NL_SET_ERR_MSG(extack, "Parent qdisc is not classful"); + return ERR_PTR(-EOPNOTSUPP); + } cl = cops->find(p, classid); - if (cl == 0) - return NULL; + if (cl == 0) { + NL_SET_ERR_MSG(extack, "Specified class not found"); + return ERR_PTR(-ENOENT); + } return cops->leaf(p, cl); } @@ -779,15 +784,12 @@ static u32 qdisc_alloc_handle(struct net_device *dev) void qdisc_tree_reduce_backlog(struct Qdisc *sch, int n, int len) { - bool qdisc_is_offloaded = sch->flags & TCQ_F_OFFLOADED; const struct Qdisc_class_ops *cops; unsigned long cl; u32 parentid; bool notify; int drops; - if (n == 0 && len == 0) - return; drops = max_t(int, n, 0); rcu_read_lock(); while ((parentid = sch->parent)) { @@ -796,17 +798,8 @@ void qdisc_tree_reduce_backlog(struct Qdisc *sch, int n, int len) if (sch->flags & TCQ_F_NOPARENT) break; - /* Notify parent qdisc only if child qdisc becomes empty. - * - * If child was empty even before update then backlog - * counter is screwed and we skip notification because - * parent class is already passive. - * - * If the original child was offloaded then it is allowed - * to be seem as empty, so the parent is notified anyway. - */ - notify = !sch->q.qlen && !WARN_ON_ONCE(!n && - !qdisc_is_offloaded); + /* Notify parent qdisc only if child qdisc becomes empty. */ + notify = !sch->q.qlen; /* TODO: perform the search on a per txq basis */ sch = qdisc_lookup_rcu(qdisc_dev(sch), TC_H_MAJ(parentid)); if (sch == NULL) { @@ -815,6 +808,9 @@ void qdisc_tree_reduce_backlog(struct Qdisc *sch, int n, int len) } cops = sch->ops->cl_ops; if (notify && cops->qlen_notify) { + /* Note that qlen_notify must be idempotent as it may get called + * multiple times. + */ cl = cops->find(sch, parentid); cops->qlen_notify(sch, cl); } @@ -1535,7 +1531,7 @@ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n, NL_SET_ERR_MSG(extack, "Failed to find qdisc with specified classid"); return -ENOENT; } - q = qdisc_leaf(p, clid); + q = qdisc_leaf(p, clid, extack); } else if (dev_ingress_queue(dev)) { q = rtnl_dereference(dev_ingress_queue(dev)->qdisc_sleeping); } @@ -1546,6 +1542,8 @@ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n, NL_SET_ERR_MSG(extack, "Cannot find specified qdisc on specified device"); return -ENOENT; } + if (IS_ERR(q)) + return PTR_ERR(q); if (tcm->tcm_handle && q->handle != tcm->tcm_handle) { NL_SET_ERR_MSG(extack, "Invalid handle"); @@ -1639,7 +1637,9 @@ static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n, NL_SET_ERR_MSG(extack, "Failed to find specified qdisc"); return -ENOENT; } - q = qdisc_leaf(p, clid); + q = qdisc_leaf(p, clid, extack); + if (IS_ERR(q)) + return PTR_ERR(q); } else if (dev_ingress_queue_create(dev)) { q = rtnl_dereference(dev_ingress_queue(dev)->qdisc_sleeping); } diff --git a/net/sched/sch_ets.c b/net/sched/sch_ets.c index 2c069f0181c62..037f764822b96 100644 --- a/net/sched/sch_ets.c +++ b/net/sched/sch_ets.c @@ -661,7 +661,7 @@ static int ets_qdisc_change(struct Qdisc *sch, struct nlattr *opt, for (i = q->nbands; i < oldbands; i++) { if (i >= q->nstrict && q->classes[i].qdisc->q.qlen) list_del_init(&q->classes[i].alist); - qdisc_tree_flush_backlog(q->classes[i].qdisc); + qdisc_purge_queue(q->classes[i].qdisc); } WRITE_ONCE(q->nstrict, nstrict); memcpy(q->prio2band, priomap, sizeof(priomap)); diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c index 7986145a527cb..5a7745170e84b 100644 --- a/net/sched/sch_hfsc.c +++ b/net/sched/sch_hfsc.c @@ -175,6 +175,11 @@ struct hfsc_sched { #define HT_INFINITY 0xffffffffffffffffULL /* infinite time value */ +static bool cl_in_el_or_vttree(struct hfsc_class *cl) +{ + return ((cl->cl_flags & HFSC_FSC) && cl->cl_nactive) || + ((cl->cl_flags & HFSC_RSC) && !RB_EMPTY_NODE(&cl->el_node)); +} /* * eligible tree holds backlogged classes being sorted by their eligible times. @@ -1040,6 +1045,8 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid, if (cl == NULL) return -ENOBUFS; + RB_CLEAR_NODE(&cl->el_node); + err = tcf_block_get(&cl->block, &cl->filter_list, sch, extack); if (err) { kfree(cl); @@ -1572,7 +1579,7 @@ hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) sch->qstats.backlog += len; sch->q.qlen++; - if (first && !cl->cl_nactive) { + if (first && !cl_in_el_or_vttree(cl)) { if (cl->cl_flags & HFSC_RSC) init_ed(cl, len); if (cl->cl_flags & HFSC_FSC) diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c index cc30f7a32f1a7..9e2b9a490db23 100644 --- a/net/sched/sch_prio.c +++ b/net/sched/sch_prio.c @@ -211,7 +211,7 @@ static int prio_tune(struct Qdisc *sch, struct nlattr *opt, memcpy(q->prio2band, qopt->priomap, TC_PRIO_MAX+1); for (i = q->bands; i < oldbands; i++) - qdisc_tree_flush_backlog(q->queues[i]); + qdisc_purge_queue(q->queues[i]); for (i = oldbands; i < q->bands; i++) { q->queues[i] = queues[i]; diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c index b5f096588fae6..0f0701ed397e9 100644 --- a/net/sched/sch_red.c +++ b/net/sched/sch_red.c @@ -283,7 +283,7 @@ static int __red_change(struct Qdisc *sch, struct nlattr **tb, q->userbits = userbits; q->limit = ctl->limit; if (child) { - qdisc_tree_flush_backlog(q->qdisc); + qdisc_purge_queue(q->qdisc); old_child = q->qdisc; q->qdisc = child; } diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c index 58b42dcf8f201..11a7d5a25d6b1 100644 --- a/net/sched/sch_sfq.c +++ b/net/sched/sch_sfq.c @@ -310,7 +310,10 @@ static unsigned int sfq_drop(struct Qdisc *sch, struct sk_buff **to_free) /* It is difficult to believe, but ALL THE SLOTS HAVE LENGTH 1. */ x = q->tail->next; slot = &q->slots[x]; - q->tail->next = slot->next; + if (slot->next == x) + q->tail = NULL; /* no more active slots */ + else + q->tail->next = slot->next; q->ht[slot->hash] = SFQ_EMPTY_SLOT; goto drop; } @@ -653,6 +656,14 @@ static int sfq_change(struct Qdisc *sch, struct nlattr *opt, NL_SET_ERR_MSG_MOD(extack, "invalid quantum"); return -EINVAL; } + + if (ctl->perturb_period < 0 || + ctl->perturb_period > INT_MAX / HZ) { + NL_SET_ERR_MSG_MOD(extack, "invalid perturb period"); + return -EINVAL; + } + perturb_period = ctl->perturb_period * HZ; + if (ctl_v1 && !red_check_params(ctl_v1->qth_min, ctl_v1->qth_max, ctl_v1->Wlog, ctl_v1->Scell_log, NULL)) return -EINVAL; @@ -669,14 +680,12 @@ static int sfq_change(struct Qdisc *sch, struct nlattr *opt, headdrop = q->headdrop; maxdepth = q->maxdepth; maxflows = q->maxflows; - perturb_period = q->perturb_period; quantum = q->quantum; flags = q->flags; /* update and validate configuration */ if (ctl->quantum) quantum = ctl->quantum; - perturb_period = ctl->perturb_period * HZ; if (ctl->flows) maxflows = min_t(u32, ctl->flows, SFQ_MAX_FLOWS); if (ctl->divisor) { diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c index 8623dc0bafc09..3142715d7e41e 100644 --- a/net/sched/sch_taprio.c +++ b/net/sched/sch_taprio.c @@ -1328,13 +1328,15 @@ static int taprio_dev_notifier(struct notifier_block *nb, unsigned long event, stab = rtnl_dereference(q->root->stab); - oper = rtnl_dereference(q->oper_sched); + rcu_read_lock(); + oper = rcu_dereference(q->oper_sched); if (oper) taprio_update_queue_max_sdu(q, oper, stab); - admin = rtnl_dereference(q->admin_sched); + admin = rcu_dereference(q->admin_sched); if (admin) taprio_update_queue_max_sdu(q, admin, stab); + rcu_read_unlock(); break; } diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c index dc26b22d53c73..4c977f049670a 100644 --- a/net/sched/sch_tbf.c +++ b/net/sched/sch_tbf.c @@ -452,7 +452,7 @@ static int tbf_change(struct Qdisc *sch, struct nlattr *opt, sch_tree_lock(sch); if (child) { - qdisc_tree_flush_backlog(q->qdisc); + qdisc_purge_queue(q->qdisc); old = q->qdisc; q->qdisc = child; } diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 53725ee7ba06d..b301d64d9d80f 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -9100,7 +9100,8 @@ static void __sctp_write_space(struct sctp_association *asoc) wq = rcu_dereference(sk->sk_wq); if (wq) { if (waitqueue_active(&wq->wait)) - wake_up_interruptible(&wq->wait); + wake_up_interruptible_poll(&wq->wait, EPOLLOUT | + EPOLLWRNORM | EPOLLWRBAND); /* Note that we try to include the Async I/O support * here by modeling from the current TCP/UDP code. diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c index 17a4de75bfaf6..e492655cb2212 100644 --- a/net/sunrpc/clnt.c +++ b/net/sunrpc/clnt.c @@ -2749,8 +2749,13 @@ rpc_decode_header(struct rpc_task *task, struct xdr_stream *xdr) case -EPROTONOSUPPORT: goto out_err; case -EACCES: - /* Re-encode with a fresh cred */ - fallthrough; + /* possible RPCSEC_GSS out-of-sequence event (RFC2203), + * reset recv state and keep waiting, don't retransmit + */ + task->tk_rqstp->rq_reply_bytes_recvd = 0; + task->tk_status = xprt_request_enqueue_receive(task); + task->tk_action = call_transmit_status; + return -EBADMSG; default: goto out_garbage; } diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c index 7ce3721c06ca5..eadc00410ebc5 100644 --- a/net/sunrpc/rpc_pipe.c +++ b/net/sunrpc/rpc_pipe.c @@ -630,7 +630,7 @@ static int __rpc_rmpipe(struct inode *dir, struct dentry *dentry) static struct dentry *__rpc_lookup_create_exclusive(struct dentry *parent, const char *name) { - struct qstr q = QSTR_INIT(name, strlen(name)); + struct qstr q = QSTR(name); struct dentry *dentry = d_hash_and_lookup(parent, &q); if (!dentry) { dentry = d_alloc(parent, &q); @@ -1190,8 +1190,7 @@ static const struct rpc_filelist files[] = { struct dentry *rpc_d_lookup_sb(const struct super_block *sb, const unsigned char *dir_name) { - struct qstr dir = QSTR_INIT(dir_name, strlen(dir_name)); - return d_hash_and_lookup(sb->s_root, &dir); + return d_hash_and_lookup(sb->s_root, &QSTR(dir_name)); } EXPORT_SYMBOL_GPL(rpc_d_lookup_sb); @@ -1300,11 +1299,9 @@ rpc_gssd_dummy_populate(struct dentry *root, struct rpc_pipe *pipe_data) struct dentry *gssd_dentry; struct dentry *clnt_dentry = NULL; struct dentry *pipe_dentry = NULL; - struct qstr q = QSTR_INIT(files[RPCAUTH_gssd].name, - strlen(files[RPCAUTH_gssd].name)); /* We should never get this far if "gssd" doesn't exist */ - gssd_dentry = d_hash_and_lookup(root, &q); + gssd_dentry = d_hash_and_lookup(root, &QSTR(files[RPCAUTH_gssd].name)); if (!gssd_dentry) return ERR_PTR(-ENOENT); @@ -1314,9 +1311,8 @@ rpc_gssd_dummy_populate(struct dentry *root, struct rpc_pipe *pipe_data) goto out; } - q.name = gssd_dummy_clnt_dir[0].name; - q.len = strlen(gssd_dummy_clnt_dir[0].name); - clnt_dentry = d_hash_and_lookup(gssd_dentry, &q); + clnt_dentry = d_hash_and_lookup(gssd_dentry, + &QSTR(gssd_dummy_clnt_dir[0].name)); if (!clnt_dentry) { __rpc_depopulate(gssd_dentry, gssd_dummy_clnt_dir, 0, 1); pipe_dentry = ERR_PTR(-ENOENT); diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c index 79879b7d39cb4..46a95877d2deb 100644 --- a/net/sunrpc/svc.c +++ b/net/sunrpc/svc.c @@ -1369,7 +1369,8 @@ svc_process_common(struct svc_rqst *rqstp) case SVC_OK: break; case SVC_GARBAGE: - goto err_garbage_args; + rqstp->rq_auth_stat = rpc_autherr_badcred; + goto err_bad_auth; case SVC_SYSERR: goto err_system_err; case SVC_DENIED: @@ -1510,14 +1511,6 @@ svc_process_common(struct svc_rqst *rqstp) *rqstp->rq_accept_statp = rpc_proc_unavail; goto sendit; -err_garbage_args: - svc_printk(rqstp, "failed to decode RPC header\n"); - - if (serv->sv_stats) - serv->sv_stats->rpcbadfmt++; - *rqstp->rq_accept_statp = rpc_garbage_args; - goto sendit; - err_system_err: if (serv->sv_stats) serv->sv_stats->rpcbadfmt++; diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c index aca8bdf65d729..3d7f1413df023 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_transport.c +++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c @@ -406,12 +406,12 @@ static void svc_rdma_xprt_done(struct rpcrdma_notification *rn) */ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt) { + unsigned int ctxts, rq_depth, maxpayload; struct svcxprt_rdma *listen_rdma; struct svcxprt_rdma *newxprt = NULL; struct rdma_conn_param conn_param; struct rpcrdma_connect_private pmsg; struct ib_qp_init_attr qp_attr; - unsigned int ctxts, rq_depth; struct ib_device *dev; int ret = 0; RPC_IFDEBUG(struct sockaddr *sap); @@ -462,12 +462,14 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt) newxprt->sc_max_bc_requests = 2; } - /* Arbitrarily estimate the number of rw_ctxs needed for - * this transport. This is enough rw_ctxs to make forward - * progress even if the client is using one rkey per page - * in each Read chunk. + /* Arbitrary estimate of the needed number of rdma_rw contexts. */ - ctxts = 3 * RPCSVC_MAXPAGES; + maxpayload = min(xprt->xpt_server->sv_max_payload, + RPCSVC_MAXPAYLOAD_RDMA); + ctxts = newxprt->sc_max_requests * 3 * + rdma_rw_mr_factor(dev, newxprt->sc_port_num, + maxpayload >> PAGE_SHIFT); + newxprt->sc_sq_depth = rq_depth + ctxts; if (newxprt->sc_sq_depth > dev->attrs.max_qp_wr) newxprt->sc_sq_depth = dev->attrs.max_qp_wr; @@ -575,6 +577,7 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt) if (newxprt->sc_qp && !IS_ERR(newxprt->sc_qp)) ib_destroy_qp(newxprt->sc_qp); rdma_destroy_id(newxprt->sc_cm_id); + rpcrdma_rn_unregister(dev, &newxprt->sc_rn); /* This call to put will destroy the transport */ svc_xprt_put(&newxprt->sc_xprt); return NULL; diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index 171ad4e2523f1..67d099c7c6625 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c @@ -2743,6 +2743,11 @@ static void xs_tcp_tls_setup_socket(struct work_struct *work) } rpc_shutdown_client(lower_clnt); + /* Check for ingress data that arrived before the socket's + * ->data_ready callback was set up. + */ + xs_poll_check_readable(upper_transport); + out_unlock: current_restore_flags(pflags, PF_MEMALLOC); upper_transport->clnt = NULL; diff --git a/net/tipc/crypto.c b/net/tipc/crypto.c index 8584893b47851..ea5bb131ebd06 100644 --- a/net/tipc/crypto.c +++ b/net/tipc/crypto.c @@ -425,7 +425,7 @@ static void tipc_aead_free(struct rcu_head *rp) } free_percpu(aead->tfm_entry); kfree_sensitive(aead->key); - kfree(aead); + kfree_sensitive(aead); } static int tipc_aead_users(struct tipc_aead __rcu *aead) @@ -818,7 +818,11 @@ static int tipc_aead_encrypt(struct tipc_aead *aead, struct sk_buff *skb, } /* Get net to avoid freed tipc_crypto when delete namespace */ - get_net(aead->crypto->net); + if (!maybe_get_net(aead->crypto->net)) { + tipc_bearer_put(b); + rc = -ENODEV; + goto exit; + } /* Now, do encrypt */ rc = crypto_aead_encrypt(req); diff --git a/net/tipc/topsrv.c b/net/tipc/topsrv.c index 8ee0c07d00e9b..ffe577bf6b515 100644 --- a/net/tipc/topsrv.c +++ b/net/tipc/topsrv.c @@ -704,8 +704,10 @@ static void tipc_topsrv_stop(struct net *net) for (id = 0; srv->idr_in_use; id++) { con = idr_find(&srv->conn_idr, id); if (con) { + conn_get(con); spin_unlock_bh(&srv->idr_lock); tipc_conn_close(con); + conn_put(con); spin_lock_bh(&srv->idr_lock); } } diff --git a/net/tipc/udp_media.c b/net/tipc/udp_media.c index 108a4cc2e0010..258d6aa4f21ae 100644 --- a/net/tipc/udp_media.c +++ b/net/tipc/udp_media.c @@ -489,7 +489,7 @@ int tipc_udp_nl_dump_remoteip(struct sk_buff *skb, struct netlink_callback *cb) rtnl_lock(); b = tipc_bearer_find(net, bname); - if (!b) { + if (!b || b->bcast_addr.media_id != TIPC_MEDIA_TYPE_UDP) { rtnl_unlock(); return -EINVAL; } @@ -500,7 +500,7 @@ int tipc_udp_nl_dump_remoteip(struct sk_buff *skb, struct netlink_callback *cb) rtnl_lock(); b = rtnl_dereference(tn->bearer_list[bid]); - if (!b) { + if (!b || b->bcast_addr.media_id != TIPC_MEDIA_TYPE_UDP) { rtnl_unlock(); return -EINVAL; } diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c index 7bcc9b4408a2c..8fb5925f2389e 100644 --- a/net/tls/tls_sw.c +++ b/net/tls/tls_sw.c @@ -908,6 +908,13 @@ static int bpf_exec_tx_verdict(struct sk_msg *msg, struct sock *sk, &msg_redir, send, flags); lock_sock(sk); if (err < 0) { + /* Regardless of whether the data represented by + * msg_redir is sent successfully, we have already + * uncharged it via sk_msg_return_zero(). The + * msg->sg.size represents the remaining unprocessed + * data, which needs to be uncharged here. + */ + sk_mem_uncharge(sk, msg->sg.size); *copied -= sk_msg_free_nocharge(sk, &msg_redir); msg->sg.size = 0; } @@ -1120,9 +1127,13 @@ static int tls_sw_sendmsg_locked(struct sock *sk, struct msghdr *msg, num_async++; else if (ret == -ENOMEM) goto wait_for_memory; - else if (ctx->open_rec && ret == -ENOSPC) + else if (ctx->open_rec && ret == -ENOSPC) { + if (msg_pl->cork_bytes) { + ret = 0; + goto send_end; + } goto rollback_iter; - else if (ret != -EAGAIN) + } else if (ret != -EAGAIN) goto send_end; } continue; diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index 6b17623004439..45f8e21829ecd 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@ -666,6 +666,11 @@ static void unix_sock_destructor(struct sock *sk) #endif } +static unsigned int unix_skb_len(const struct sk_buff *skb) +{ + return skb->len - UNIXCB(skb).consumed; +} + static void unix_release_sock(struct sock *sk, int embrion) { struct unix_sock *u = unix_sk(sk); @@ -700,10 +705,16 @@ static void unix_release_sock(struct sock *sk, int embrion) if (skpair != NULL) { if (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) { + struct sk_buff *skb = skb_peek(&sk->sk_receive_queue); + +#if IS_ENABLED(CONFIG_AF_UNIX_OOB) + if (skb && !unix_skb_len(skb)) + skb = skb_peek_next(skb, &sk->sk_receive_queue); +#endif unix_state_lock(skpair); /* No more writes */ WRITE_ONCE(skpair->sk_shutdown, SHUTDOWN_MASK); - if (!skb_queue_empty_lockless(&sk->sk_receive_queue) || embrion) + if (skb || embrion) WRITE_ONCE(skpair->sk_err, ECONNRESET); unix_state_unlock(skpair); skpair->sk_state_change(skpair); @@ -2594,11 +2605,6 @@ static long unix_stream_data_wait(struct sock *sk, long timeo, return timeo; } -static unsigned int unix_skb_len(const struct sk_buff *skb) -{ - return skb->len - UNIXCB(skb).consumed; -} - struct unix_stream_read_state { int (*recv_actor)(struct sk_buff *, int, int, struct unix_stream_read_state *); @@ -2613,11 +2619,11 @@ struct unix_stream_read_state { #if IS_ENABLED(CONFIG_AF_UNIX_OOB) static int unix_stream_recv_urg(struct unix_stream_read_state *state) { + struct sk_buff *oob_skb, *read_skb = NULL; struct socket *sock = state->socket; struct sock *sk = sock->sk; struct unix_sock *u = unix_sk(sk); int chunk = 1; - struct sk_buff *oob_skb; mutex_lock(&u->iolock); unix_state_lock(sk); @@ -2632,9 +2638,16 @@ static int unix_stream_recv_urg(struct unix_stream_read_state *state) oob_skb = u->oob_skb; - if (!(state->flags & MSG_PEEK)) + if (!(state->flags & MSG_PEEK)) { WRITE_ONCE(u->oob_skb, NULL); + if (oob_skb->prev != (struct sk_buff *)&sk->sk_receive_queue && + !unix_skb_len(oob_skb->prev)) { + read_skb = oob_skb->prev; + __skb_unlink(read_skb, &sk->sk_receive_queue); + } + } + spin_unlock(&sk->sk_receive_queue.lock); unix_state_unlock(sk); @@ -2645,6 +2658,8 @@ static int unix_stream_recv_urg(struct unix_stream_read_state *state) mutex_unlock(&u->iolock); + consume_skb(read_skb); + if (chunk < 0) return -EFAULT; diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c index d08f205b33dcc..08565e41b8e92 100644 --- a/net/vmw_vsock/af_vsock.c +++ b/net/vmw_vsock/af_vsock.c @@ -407,6 +407,8 @@ EXPORT_SYMBOL_GPL(vsock_enqueue_accept); static bool vsock_use_local_transport(unsigned int remote_cid) { + lockdep_assert_held(&vsock_register_mutex); + if (!transport_local) return false; @@ -464,6 +466,8 @@ int vsock_assign_transport(struct vsock_sock *vsk, struct vsock_sock *psk) remote_flags = vsk->remote_addr.svm_flags; + mutex_lock(&vsock_register_mutex); + switch (sk->sk_type) { case SOCK_DGRAM: new_transport = transport_dgram; @@ -479,12 +483,15 @@ int vsock_assign_transport(struct vsock_sock *vsk, struct vsock_sock *psk) new_transport = transport_h2g; break; default: - return -ESOCKTNOSUPPORT; + ret = -ESOCKTNOSUPPORT; + goto err; } if (vsk->transport) { - if (vsk->transport == new_transport) - return 0; + if (vsk->transport == new_transport) { + ret = 0; + goto err; + } /* transport->release() must be called with sock lock acquired. * This path can only be taken during vsock_connect(), where we @@ -508,8 +515,16 @@ int vsock_assign_transport(struct vsock_sock *vsk, struct vsock_sock *psk) /* We increase the module refcnt to prevent the transport unloading * while there are open sockets assigned to it. */ - if (!new_transport || !try_module_get(new_transport->module)) - return -ENODEV; + if (!new_transport || !try_module_get(new_transport->module)) { + ret = -ENODEV; + goto err; + } + + /* It's safe to release the mutex after a successful try_module_get(). + * Whichever transport `new_transport` points at, it won't go away until + * the last module_put() below or in vsock_deassign_transport(). + */ + mutex_unlock(&vsock_register_mutex); if (sk->sk_type == SOCK_SEQPACKET) { if (!new_transport->seqpacket_allow || @@ -528,12 +543,31 @@ int vsock_assign_transport(struct vsock_sock *vsk, struct vsock_sock *psk) vsk->transport = new_transport; return 0; +err: + mutex_unlock(&vsock_register_mutex); + return ret; } EXPORT_SYMBOL_GPL(vsock_assign_transport); +/* + * Provide safe access to static transport_{h2g,g2h,dgram,local} callbacks. + * Otherwise we may race with module removal. Do not use on `vsk->transport`. + */ +static u32 vsock_registered_transport_cid(const struct vsock_transport **transport) +{ + u32 cid = VMADDR_CID_ANY; + + mutex_lock(&vsock_register_mutex); + if (*transport) + cid = (*transport)->get_local_cid(); + mutex_unlock(&vsock_register_mutex); + + return cid; +} + bool vsock_find_cid(unsigned int cid) { - if (transport_g2h && cid == transport_g2h->get_local_cid()) + if (cid == vsock_registered_transport_cid(&transport_g2h)) return true; if (transport_h2g && cid == VMADDR_CID_HOST) @@ -2502,18 +2536,19 @@ static long vsock_dev_do_ioctl(struct file *filp, unsigned int cmd, void __user *ptr) { u32 __user *p = ptr; - u32 cid = VMADDR_CID_ANY; int retval = 0; + u32 cid; switch (cmd) { case IOCTL_VM_SOCKETS_GET_LOCAL_CID: /* To be compatible with the VMCI behavior, we prioritize the * guest CID instead of well-know host CID (VMADDR_CID_HOST). */ - if (transport_g2h) - cid = transport_g2h->get_local_cid(); - else if (transport_h2g) - cid = transport_h2g->get_local_cid(); + cid = vsock_registered_transport_cid(&transport_g2h); + if (cid == VMADDR_CID_ANY) + cid = vsock_registered_transport_cid(&transport_h2g); + if (cid == VMADDR_CID_ANY) + cid = vsock_registered_transport_cid(&transport_local); if (put_user(cid, p) != 0) retval = -EFAULT; diff --git a/net/vmw_vsock/virtio_transport_common.c b/net/vmw_vsock/virtio_transport_common.c index 7f7de6d880965..2c9b1011cdcc8 100644 --- a/net/vmw_vsock/virtio_transport_common.c +++ b/net/vmw_vsock/virtio_transport_common.c @@ -441,18 +441,20 @@ static int virtio_transport_send_pkt_info(struct vsock_sock *vsk, static bool virtio_transport_inc_rx_pkt(struct virtio_vsock_sock *vvs, u32 len) { - if (vvs->rx_bytes + len > vvs->buf_alloc) + if (vvs->buf_used + len > vvs->buf_alloc) return false; vvs->rx_bytes += len; + vvs->buf_used += len; return true; } static void virtio_transport_dec_rx_pkt(struct virtio_vsock_sock *vvs, - u32 len) + u32 bytes_read, u32 bytes_dequeued) { - vvs->rx_bytes -= len; - vvs->fwd_cnt += len; + vvs->rx_bytes -= bytes_read; + vvs->buf_used -= bytes_dequeued; + vvs->fwd_cnt += bytes_dequeued; } void virtio_transport_inc_tx_pkt(struct virtio_vsock_sock *vvs, struct sk_buff *skb) @@ -581,11 +583,11 @@ virtio_transport_stream_do_dequeue(struct vsock_sock *vsk, size_t len) { struct virtio_vsock_sock *vvs = vsk->trans; - size_t bytes, total = 0; struct sk_buff *skb; u32 fwd_cnt_delta; bool low_rx_bytes; int err = -EFAULT; + size_t total = 0; u32 free_space; spin_lock_bh(&vvs->rx_lock); @@ -597,6 +599,8 @@ virtio_transport_stream_do_dequeue(struct vsock_sock *vsk, } while (total < len && !skb_queue_empty(&vvs->rx_queue)) { + size_t bytes, dequeued = 0; + skb = skb_peek(&vvs->rx_queue); bytes = min_t(size_t, len - total, @@ -620,12 +624,12 @@ virtio_transport_stream_do_dequeue(struct vsock_sock *vsk, VIRTIO_VSOCK_SKB_CB(skb)->offset += bytes; if (skb->len == VIRTIO_VSOCK_SKB_CB(skb)->offset) { - u32 pkt_len = le32_to_cpu(virtio_vsock_hdr(skb)->len); - - virtio_transport_dec_rx_pkt(vvs, pkt_len); + dequeued = le32_to_cpu(virtio_vsock_hdr(skb)->len); __skb_unlink(skb, &vvs->rx_queue); consume_skb(skb); } + + virtio_transport_dec_rx_pkt(vvs, bytes, dequeued); } fwd_cnt_delta = vvs->fwd_cnt - vvs->last_fwd_cnt; @@ -781,7 +785,7 @@ static int virtio_transport_seqpacket_do_dequeue(struct vsock_sock *vsk, msg->msg_flags |= MSG_EOR; } - virtio_transport_dec_rx_pkt(vvs, pkt_len); + virtio_transport_dec_rx_pkt(vvs, pkt_len, pkt_len); kfree_skb(skb); } @@ -1735,6 +1739,7 @@ int virtio_transport_read_skb(struct vsock_sock *vsk, skb_read_actor_t recv_acto struct sock *sk = sk_vsock(vsk); struct virtio_vsock_hdr *hdr; struct sk_buff *skb; + u32 pkt_len; int off = 0; int err; @@ -1752,7 +1757,8 @@ int virtio_transport_read_skb(struct vsock_sock *vsk, skb_read_actor_t recv_acto if (le32_to_cpu(hdr->flags) & VIRTIO_VSOCK_SEQ_EOM) vvs->msg_count--; - virtio_transport_dec_rx_pkt(vvs, le32_to_cpu(hdr->len)); + pkt_len = le32_to_cpu(hdr->len); + virtio_transport_dec_rx_pkt(vvs, pkt_len, pkt_len); spin_unlock_bh(&vvs->rx_lock); virtio_transport_send_credit_update(vsk); diff --git a/net/vmw_vsock/vmci_transport.c b/net/vmw_vsock/vmci_transport.c index b370070194fa4..7eccd6708d664 100644 --- a/net/vmw_vsock/vmci_transport.c +++ b/net/vmw_vsock/vmci_transport.c @@ -119,6 +119,8 @@ vmci_transport_packet_init(struct vmci_transport_packet *pkt, u16 proto, struct vmci_handle handle) { + memset(pkt, 0, sizeof(*pkt)); + /* We register the stream control handler as an any cid handle so we * must always send from a source address of VMADDR_CID_ANY */ @@ -131,8 +133,6 @@ vmci_transport_packet_init(struct vmci_transport_packet *pkt, pkt->type = type; pkt->src_port = src->svm_port; pkt->dst_port = dst->svm_port; - memset(&pkt->proto, 0, sizeof(pkt->proto)); - memset(&pkt->_reserved2, 0, sizeof(pkt->_reserved2)); switch (pkt->type) { case VMCI_TRANSPORT_PACKET_TYPE_INVALID: diff --git a/net/wireless/core.c b/net/wireless/core.c index 1ce8fff2a28a4..586e50678ed80 100644 --- a/net/wireless/core.c +++ b/net/wireless/core.c @@ -553,6 +553,9 @@ struct wiphy *wiphy_new_nm(const struct cfg80211_ops *ops, int sizeof_priv, INIT_WORK(&rdev->mgmt_registrations_update_wk, cfg80211_mgmt_registrations_update_wk); spin_lock_init(&rdev->mgmt_registrations_lock); + INIT_WORK(&rdev->wiphy_work, cfg80211_wiphy_work); + INIT_LIST_HEAD(&rdev->wiphy_work_list); + spin_lock_init(&rdev->wiphy_work_lock); #ifdef CONFIG_CFG80211_DEFAULT_PS rdev->wiphy.flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT; @@ -570,9 +573,6 @@ struct wiphy *wiphy_new_nm(const struct cfg80211_ops *ops, int sizeof_priv, return NULL; } - INIT_WORK(&rdev->wiphy_work, cfg80211_wiphy_work); - INIT_LIST_HEAD(&rdev->wiphy_work_list); - spin_lock_init(&rdev->wiphy_work_lock); INIT_WORK(&rdev->rfkill_block, cfg80211_rfkill_block_work); INIT_WORK(&rdev->conn_work, cfg80211_conn_work); INIT_WORK(&rdev->event_work, cfg80211_event_work); diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index c778ffa1c8efd..4eb44821c70d3 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -229,6 +229,7 @@ static int validate_beacon_head(const struct nlattr *attr, unsigned int len = nla_len(attr); const struct element *elem; const struct ieee80211_mgmt *mgmt = (void *)data; + const struct ieee80211_ext *ext; unsigned int fixedlen, hdrlen; bool s1g_bcn; @@ -237,8 +238,10 @@ static int validate_beacon_head(const struct nlattr *attr, s1g_bcn = ieee80211_is_s1g_beacon(mgmt->frame_control); if (s1g_bcn) { - fixedlen = offsetof(struct ieee80211_ext, - u.s1g_beacon.variable); + ext = (struct ieee80211_ext *)mgmt; + fixedlen = + offsetof(struct ieee80211_ext, u.s1g_beacon.variable) + + ieee80211_s1g_optional_len(ext->frame_control); hdrlen = offsetof(struct ieee80211_ext, u.s1g_beacon); } else { fixedlen = offsetof(struct ieee80211_mgmt, diff --git a/net/wireless/scan.c b/net/wireless/scan.c index ba20cfa9f2f53..8346ccadee439 100644 --- a/net/wireless/scan.c +++ b/net/wireless/scan.c @@ -3213,6 +3213,7 @@ cfg80211_inform_bss_frame_data(struct wiphy *wiphy, const u8 *ie; size_t ielen; u64 tsf; + size_t s1g_optional_len; if (WARN_ON(!mgmt)) return NULL; @@ -3227,12 +3228,11 @@ cfg80211_inform_bss_frame_data(struct wiphy *wiphy, if (ieee80211_is_s1g_beacon(mgmt->frame_control)) { ext = (void *) mgmt; - if (ieee80211_is_s1g_short_beacon(mgmt->frame_control)) - min_hdr_len = offsetof(struct ieee80211_ext, - u.s1g_short_beacon.variable); - else - min_hdr_len = offsetof(struct ieee80211_ext, - u.s1g_beacon.variable); + s1g_optional_len = + ieee80211_s1g_optional_len(ext->frame_control); + min_hdr_len = + offsetof(struct ieee80211_ext, u.s1g_beacon.variable) + + s1g_optional_len; } else { /* same for beacons */ min_hdr_len = offsetof(struct ieee80211_mgmt, @@ -3248,11 +3248,7 @@ cfg80211_inform_bss_frame_data(struct wiphy *wiphy, const struct ieee80211_s1g_bcn_compat_ie *compat; const struct element *elem; - if (ieee80211_is_s1g_short_beacon(mgmt->frame_control)) - ie = ext->u.s1g_short_beacon.variable; - else - ie = ext->u.s1g_beacon.variable; - + ie = ext->u.s1g_beacon.variable + s1g_optional_len; elem = cfg80211_find_elem(WLAN_EID_S1G_BCN_COMPAT, ie, ielen); if (!elem) return NULL; diff --git a/net/wireless/util.c b/net/wireless/util.c index 18585b1416c66..b115489a846f8 100644 --- a/net/wireless/util.c +++ b/net/wireless/util.c @@ -820,6 +820,52 @@ bool ieee80211_is_valid_amsdu(struct sk_buff *skb, u8 mesh_hdr) } EXPORT_SYMBOL(ieee80211_is_valid_amsdu); + +/* + * Detects if an MSDU frame was maliciously converted into an A-MSDU + * frame by an adversary. This is done by parsing the received frame + * as if it were a regular MSDU, even though the A-MSDU flag is set. + * + * For non-mesh interfaces, detection involves checking whether the + * payload, when interpreted as an MSDU, begins with a valid RFC1042 + * header. This is done by comparing the A-MSDU subheader's destination + * address to the start of the RFC1042 header. + * + * For mesh interfaces, the MSDU includes a 6-byte Mesh Control field + * and an optional variable-length Mesh Address Extension field before + * the RFC1042 header. The position of the RFC1042 header must therefore + * be calculated based on the mesh header length. + * + * Since this function intentionally parses an A-MSDU frame as an MSDU, + * it only assumes that the A-MSDU subframe header is present, and + * beyond this it performs its own bounds checks under the assumption + * that the frame is instead parsed as a non-aggregated MSDU. + */ +static bool +is_amsdu_aggregation_attack(struct ethhdr *eth, struct sk_buff *skb, + enum nl80211_iftype iftype) +{ + int offset; + + /* Non-mesh case can be directly compared */ + if (iftype != NL80211_IFTYPE_MESH_POINT) + return ether_addr_equal(eth->h_dest, rfc1042_header); + + offset = __ieee80211_get_mesh_hdrlen(eth->h_dest[0]); + if (offset == 6) { + /* Mesh case with empty address extension field */ + return ether_addr_equal(eth->h_source, rfc1042_header); + } else if (offset + ETH_ALEN <= skb->len) { + /* Mesh case with non-empty address extension field */ + u8 temp[ETH_ALEN]; + + skb_copy_bits(skb, offset, temp, ETH_ALEN); + return ether_addr_equal(temp, rfc1042_header); + } + + return false; +} + void ieee80211_amsdu_to_8023s(struct sk_buff *skb, struct sk_buff_head *list, const u8 *addr, enum nl80211_iftype iftype, const unsigned int extra_headroom, @@ -861,8 +907,10 @@ void ieee80211_amsdu_to_8023s(struct sk_buff *skb, struct sk_buff_head *list, /* the last MSDU has no padding */ if (subframe_len > remaining) goto purge; - /* mitigate A-MSDU aggregation injection attacks */ - if (ether_addr_equal(hdr.eth.h_dest, rfc1042_header)) + /* mitigate A-MSDU aggregation injection attacks, to be + * checked when processing first subframe (offset == 0). + */ + if (offset == 0 && is_amsdu_aggregation_attack(&hdr.eth, skb, iftype)) goto purge; offset += sizeof(struct ethhdr); diff --git a/net/xfrm/xfrm_device.c b/net/xfrm/xfrm_device.c index b33c4591e09a4..32ad8f3fc81e8 100644 --- a/net/xfrm/xfrm_device.c +++ b/net/xfrm/xfrm_device.c @@ -373,7 +373,6 @@ int xfrm_dev_policy_add(struct net *net, struct xfrm_policy *xp, xdo->dev = dev; netdev_tracker_alloc(dev, &xdo->dev_tracker, GFP_ATOMIC); - xdo->real_dev = dev; xdo->type = XFRM_DEV_OFFLOAD_PACKET; switch (dir) { case XFRM_POLICY_IN: @@ -395,7 +394,6 @@ int xfrm_dev_policy_add(struct net *net, struct xfrm_policy *xp, err = dev->xfrmdev_ops->xdo_dev_policy_add(xp, extack); if (err) { xdo->dev = NULL; - xdo->real_dev = NULL; xdo->type = XFRM_DEV_OFFLOAD_UNSPECIFIED; xdo->dir = 0; netdev_put(dev, &xdo->dev_tracker); diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c index abd725386cb60..7a298058fc16c 100644 --- a/net/xfrm/xfrm_state.c +++ b/net/xfrm/xfrm_state.c @@ -1487,7 +1487,6 @@ xfrm_state_find(const xfrm_address_t *daddr, const xfrm_address_t *saddr, xso->type = XFRM_DEV_OFFLOAD_PACKET; xso->dir = xdo->dir; xso->dev = xdo->dev; - xso->real_dev = xdo->real_dev; xso->flags = XFRM_DEV_OFFLOAD_FLAG_ACQ; netdev_hold(xso->dev, &xso->dev_tracker, GFP_ATOMIC); error = xso->dev->xfrmdev_ops->xdo_dev_state_add(x, NULL); @@ -1495,7 +1494,6 @@ xfrm_state_find(const xfrm_address_t *daddr, const xfrm_address_t *saddr, xso->dir = 0; netdev_put(xso->dev, &xso->dev_tracker); xso->dev = NULL; - xso->real_dev = NULL; xso->type = XFRM_DEV_OFFLOAD_UNSPECIFIED; x->km.state = XFRM_STATE_DEAD; to_put = x; diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c index da2a1c00ca8a6..d41e5642625e3 100644 --- a/net/xfrm/xfrm_user.c +++ b/net/xfrm/xfrm_user.c @@ -178,11 +178,27 @@ static inline int verify_replay(struct xfrm_usersa_info *p, "Replay seq and seq_hi should be 0 for output SA"); return -EINVAL; } - if (rs->oseq_hi && !(p->flags & XFRM_STATE_ESN)) { - NL_SET_ERR_MSG( - extack, - "Replay oseq_hi should be 0 in non-ESN mode for output SA"); - return -EINVAL; + + if (!(p->flags & XFRM_STATE_ESN)) { + if (rs->oseq_hi) { + NL_SET_ERR_MSG( + extack, + "Replay oseq_hi should be 0 in non-ESN mode for output SA"); + return -EINVAL; + } + if (rs->oseq == U32_MAX) { + NL_SET_ERR_MSG( + extack, + "Replay oseq should be less than 0xFFFFFFFF in non-ESN mode for output SA"); + return -EINVAL; + } + } else { + if (rs->oseq == U32_MAX && rs->oseq_hi == U32_MAX) { + NL_SET_ERR_MSG( + extack, + "Replay oseq and oseq_hi should be less than 0xFFFFFFFF for output SA"); + return -EINVAL; + } } if (rs->bmp_len) { NL_SET_ERR_MSG(extack, "Replay bmp_len should 0 for output SA"); @@ -196,11 +212,27 @@ static inline int verify_replay(struct xfrm_usersa_info *p, "Replay oseq and oseq_hi should be 0 for input SA"); return -EINVAL; } - if (rs->seq_hi && !(p->flags & XFRM_STATE_ESN)) { - NL_SET_ERR_MSG( - extack, - "Replay seq_hi should be 0 in non-ESN mode for input SA"); - return -EINVAL; + if (!(p->flags & XFRM_STATE_ESN)) { + if (rs->seq_hi) { + NL_SET_ERR_MSG( + extack, + "Replay seq_hi should be 0 in non-ESN mode for input SA"); + return -EINVAL; + } + + if (rs->seq == U32_MAX) { + NL_SET_ERR_MSG( + extack, + "Replay seq should be less than 0xFFFFFFFF in non-ESN mode for input SA"); + return -EINVAL; + } + } else { + if (rs->seq == U32_MAX && rs->seq_hi == U32_MAX) { + NL_SET_ERR_MSG( + extack, + "Replay seq and seq_hi should be less than 0xFFFFFFFF for input SA"); + return -EINVAL; + } } } diff --git a/rust/Makefile b/rust/Makefile index 1b00e16951eeb..b8b7f817c48e4 100644 --- a/rust/Makefile +++ b/rust/Makefile @@ -53,6 +53,8 @@ endif core-cfgs = \ --cfg no_fp_fmt_parse +core-edition := $(if $(call rustc-min-version,108700),2024,2021) + quiet_cmd_rustdoc = RUSTDOC $(if $(rustdoc_host),H, ) $< cmd_rustdoc = \ OBJTREE=$(abspath $(objtree)) \ @@ -95,8 +97,8 @@ rustdoc-macros: $(src)/macros/lib.rs FORCE # Starting with Rust 1.82.0, skipping `-Wrustdoc::unescaped_backticks` should # not be needed -- see https://github.com/rust-lang/rust/pull/128307. -rustdoc-core: private skip_flags = -Wrustdoc::unescaped_backticks -rustdoc-core: private rustc_target_flags = $(core-cfgs) +rustdoc-core: private skip_flags = --edition=2021 -Wrustdoc::unescaped_backticks +rustdoc-core: private rustc_target_flags = --edition=$(core-edition) $(core-cfgs) rustdoc-core: $(RUST_LIB_SRC)/core/src/lib.rs FORCE +$(call if_changed,rustdoc) @@ -236,7 +238,7 @@ bindgen_skip_c_flags := -mno-fp-ret-in-387 -mpreferred-stack-boundary=% \ -fzero-call-used-regs=% -fno-stack-clash-protection \ -fno-inline-functions-called-once -fsanitize=bounds-strict \ -fstrict-flex-arrays=% -fmin-function-alignment=% \ - -fzero-init-padding-bits=% \ + -fzero-init-padding-bits=% -mno-fdpic \ --param=% --param asan-% # Derived from `scripts/Makefile.clang`. @@ -372,7 +374,7 @@ quiet_cmd_rustc_library = $(if $(skip_clippy),RUSTC,$(RUSTC_OR_CLIPPY_QUIET)) L cmd_rustc_library = \ OBJTREE=$(abspath $(objtree)) \ $(if $(skip_clippy),$(RUSTC),$(RUSTC_OR_CLIPPY)) \ - $(filter-out $(skip_flags),$(rust_flags) $(rustc_target_flags)) \ + $(filter-out $(skip_flags),$(rust_flags)) $(rustc_target_flags) \ --emit=dep-info=$(depfile) --emit=obj=$@ \ --emit=metadata=$(dir $@)$(patsubst %.o,lib%.rmeta,$(notdir $@)) \ --crate-type rlib -L$(objtree)/$(obj) \ @@ -383,7 +385,7 @@ quiet_cmd_rustc_library = $(if $(skip_clippy),RUSTC,$(RUSTC_OR_CLIPPY_QUIET)) L rust-analyzer: $(Q)$(srctree)/scripts/generate_rust_analyzer.py \ - --cfgs='core=$(core-cfgs)' \ + --cfgs='core=$(core-cfgs)' $(core-edition) \ $(realpath $(srctree)) $(realpath $(objtree)) \ $(rustc_sysroot) $(RUST_LIB_SRC) $(KBUILD_EXTMOD) > \ $(if $(KBUILD_EXTMOD),$(extmod_prefix),$(objtree))/rust-project.json @@ -407,9 +409,9 @@ define rule_rustc_library endef $(obj)/core.o: private skip_clippy = 1 -$(obj)/core.o: private skip_flags = -Wunreachable_pub +$(obj)/core.o: private skip_flags = --edition=2021 -Wunreachable_pub $(obj)/core.o: private rustc_objcopy = $(foreach sym,$(redirect-intrinsics),--redefine-sym $(sym)=__rust$(sym)) -$(obj)/core.o: private rustc_target_flags = $(core-cfgs) +$(obj)/core.o: private rustc_target_flags = --edition=$(core-edition) $(core-cfgs) $(obj)/core.o: $(RUST_LIB_SRC)/core/src/lib.rs \ $(wildcard $(objtree)/include/config/RUSTC_VERSION_TEXT) FORCE +$(call if_changed_rule,rustc_library) diff --git a/rust/kernel/alloc/kvec.rs b/rust/kernel/alloc/kvec.rs index 87a71fd40c3ca..f62204fe563f5 100644 --- a/rust/kernel/alloc/kvec.rs +++ b/rust/kernel/alloc/kvec.rs @@ -196,6 +196,9 @@ where #[inline] pub unsafe fn set_len(&mut self, new_len: usize) { debug_assert!(new_len <= self.capacity()); + + // INVARIANT: By the safety requirements of this method `new_len` represents the exact + // number of elements stored within `self`. self.len = new_len; } diff --git a/rust/kernel/init/macros.rs b/rust/kernel/init/macros.rs index b7213962a6a5a..e530028bb9edb 100644 --- a/rust/kernel/init/macros.rs +++ b/rust/kernel/init/macros.rs @@ -924,6 +924,7 @@ macro_rules! __pin_data { // We prevent this by creating a trait that will be implemented for all types implementing // `Drop`. Additionally we will implement this trait for the struct leading to a conflict, // if it also implements `Drop` + #[allow(dead_code)] trait MustNotImplDrop {} #[expect(drop_bounds)] impl MustNotImplDrop for T {} @@ -932,6 +933,7 @@ macro_rules! __pin_data { // We also take care to prevent users from writing a useless `PinnedDrop` implementation. // They might implement `PinnedDrop` correctly for the struct, but forget to give // `PinnedDrop` as the parameter to `#[pin_data]`. + #[allow(dead_code)] #[expect(non_camel_case_types)] trait UselessPinnedDropImpl_you_need_to_specify_PinnedDrop {} impl diff --git a/rust/macros/module.rs b/rust/macros/module.rs index da2a18b276e0b..a5ea5850e307a 100644 --- a/rust/macros/module.rs +++ b/rust/macros/module.rs @@ -260,6 +260,7 @@ pub(crate) fn module(ts: TokenStream) -> TokenStream { #[cfg(MODULE)] #[doc(hidden)] #[no_mangle] + #[link_section = \".exit.text\"] pub extern \"C\" fn cleanup_module() {{ // SAFETY: // - This function is inaccessible to the outside due to the double diff --git a/scripts/Makefile.compiler b/scripts/Makefile.compiler index c6cd729b65cbf..638e1e729986d 100644 --- a/scripts/Makefile.compiler +++ b/scripts/Makefile.compiler @@ -43,7 +43,7 @@ as-instr = $(call try-run,\ # __cc-option # Usage: MY_CFLAGS += $(call __cc-option,$(CC),$(MY_CFLAGS),-march=winchip-c6,-march=i586) __cc-option = $(call try-run,\ - $(1) -Werror $(2) $(3) -c -x c /dev/null -o "$$TMP",$(3),$(4)) + $(1) -Werror $(2) $(3:-Wno-%=-W%) -c -x c /dev/null -o "$$TMP",$(3),$(4)) # cc-option # Usage: cflags-y += $(call cc-option,-march=winchip-c6,-march=i586) @@ -57,7 +57,7 @@ cc-option-yn = $(if $(call cc-option,$1),y,n) # cc-disable-warning # Usage: cflags-y += $(call cc-disable-warning,unused-but-set-variable) -cc-disable-warning = $(if $(call cc-option,-W$(strip $1)),-Wno-$(strip $1)) +cc-disable-warning = $(call cc-option,-Wno-$(strip $1)) # gcc-min-version # Usage: cflags-$(call gcc-min-version, 70100) += -foo @@ -67,6 +67,10 @@ gcc-min-version = $(call test-ge, $(CONFIG_GCC_VERSION), $1) # Usage: cflags-$(call clang-min-version, 110000) += -foo clang-min-version = $(call test-ge, $(CONFIG_CLANG_VERSION), $1) +# rustc-min-version +# Usage: rustc-$(call rustc-min-version, 108500) += -Cfoo +rustc-min-version = $(call test-ge, $(CONFIG_RUSTC_VERSION), $1) + # ld-option # Usage: KBUILD_LDFLAGS += $(call ld-option, -X, -Y) ld-option = $(call try-run, $(LD) $(KBUILD_LDFLAGS) $(1) -v,$(1),$(2),$(3)) diff --git a/scripts/gcc-plugins/gcc-common.h b/scripts/gcc-plugins/gcc-common.h index 3222c1070444f..ef12c8f929eda 100644 --- a/scripts/gcc-plugins/gcc-common.h +++ b/scripts/gcc-plugins/gcc-common.h @@ -123,6 +123,38 @@ static inline tree build_const_char_string(int len, const char *str) return cstr; } +static inline void __add_type_attr(tree type, const char *attr, tree args) +{ + tree oldattr; + + if (type == NULL_TREE) + return; + oldattr = lookup_attribute(attr, TYPE_ATTRIBUTES(type)); + if (oldattr != NULL_TREE) { + gcc_assert(TREE_VALUE(oldattr) == args || TREE_VALUE(TREE_VALUE(oldattr)) == TREE_VALUE(args)); + return; + } + + TYPE_ATTRIBUTES(type) = copy_list(TYPE_ATTRIBUTES(type)); + TYPE_ATTRIBUTES(type) = tree_cons(get_identifier(attr), args, TYPE_ATTRIBUTES(type)); +} + +static inline void add_type_attr(tree type, const char *attr, tree args) +{ + tree main_variant = TYPE_MAIN_VARIANT(type); + + __add_type_attr(TYPE_CANONICAL(type), attr, args); + __add_type_attr(TYPE_CANONICAL(main_variant), attr, args); + __add_type_attr(main_variant, attr, args); + + for (type = TYPE_NEXT_VARIANT(main_variant); type; type = TYPE_NEXT_VARIANT(type)) { + if (!lookup_attribute(attr, TYPE_ATTRIBUTES(type))) + TYPE_ATTRIBUTES(type) = TYPE_ATTRIBUTES(main_variant); + + __add_type_attr(TYPE_CANONICAL(type), attr, args); + } +} + #define PASS_INFO(NAME, REF, ID, POS) \ struct register_pass_info NAME##_pass_info = { \ .pass = make_##NAME##_pass(), \ diff --git a/scripts/gcc-plugins/randomize_layout_plugin.c b/scripts/gcc-plugins/randomize_layout_plugin.c index 5694df3da2e95..ff65a4f87f240 100644 --- a/scripts/gcc-plugins/randomize_layout_plugin.c +++ b/scripts/gcc-plugins/randomize_layout_plugin.c @@ -73,6 +73,9 @@ static tree handle_randomize_layout_attr(tree *node, tree name, tree args, int f if (TYPE_P(*node)) { type = *node; + } else if (TREE_CODE(*node) == FIELD_DECL) { + *no_add_attrs = false; + return NULL_TREE; } else { gcc_assert(TREE_CODE(*node) == TYPE_DECL); type = TREE_TYPE(*node); @@ -344,35 +347,18 @@ static int relayout_struct(tree type) shuffle(type, (tree *)newtree, shuffle_length); - /* - * set up a bogus anonymous struct field designed to error out on unnamed struct initializers - * as gcc provides no other way to detect such code - */ - list = make_node(FIELD_DECL); - TREE_CHAIN(list) = newtree[0]; - TREE_TYPE(list) = void_type_node; - DECL_SIZE(list) = bitsize_zero_node; - DECL_NONADDRESSABLE_P(list) = 1; - DECL_FIELD_BIT_OFFSET(list) = bitsize_zero_node; - DECL_SIZE_UNIT(list) = size_zero_node; - DECL_FIELD_OFFSET(list) = size_zero_node; - DECL_CONTEXT(list) = type; - // to satisfy the constify plugin - TREE_READONLY(list) = 1; - for (i = 0; i < num_fields - 1; i++) TREE_CHAIN(newtree[i]) = newtree[i+1]; TREE_CHAIN(newtree[num_fields - 1]) = NULL_TREE; + add_type_attr(type, "randomize_performed", NULL_TREE); + add_type_attr(type, "designated_init", NULL_TREE); + if (has_flexarray) + add_type_attr(type, "has_flexarray", NULL_TREE); + main_variant = TYPE_MAIN_VARIANT(type); - for (variant = main_variant; variant; variant = TYPE_NEXT_VARIANT(variant)) { - TYPE_FIELDS(variant) = list; - TYPE_ATTRIBUTES(variant) = copy_list(TYPE_ATTRIBUTES(variant)); - TYPE_ATTRIBUTES(variant) = tree_cons(get_identifier("randomize_performed"), NULL_TREE, TYPE_ATTRIBUTES(variant)); - TYPE_ATTRIBUTES(variant) = tree_cons(get_identifier("designated_init"), NULL_TREE, TYPE_ATTRIBUTES(variant)); - if (has_flexarray) - TYPE_ATTRIBUTES(type) = tree_cons(get_identifier("has_flexarray"), NULL_TREE, TYPE_ATTRIBUTES(type)); - } + for (variant = main_variant; variant; variant = TYPE_NEXT_VARIANT(variant)) + TYPE_FIELDS(variant) = newtree[0]; /* * force a re-layout of the main variant @@ -440,10 +426,8 @@ static void randomize_type(tree type) if (lookup_attribute("randomize_layout", TYPE_ATTRIBUTES(TYPE_MAIN_VARIANT(type))) || is_pure_ops_struct(type)) relayout_struct(type); - for (variant = TYPE_MAIN_VARIANT(type); variant; variant = TYPE_NEXT_VARIANT(variant)) { - TYPE_ATTRIBUTES(type) = copy_list(TYPE_ATTRIBUTES(type)); - TYPE_ATTRIBUTES(type) = tree_cons(get_identifier("randomize_considered"), NULL_TREE, TYPE_ATTRIBUTES(type)); - } + add_type_attr(type, "randomize_considered", NULL_TREE); + #ifdef __DEBUG_PLUGIN fprintf(stderr, "Marking randomize_considered on struct %s\n", ORIG_TYPE_NAME(type)); #ifdef __DEBUG_VERBOSE diff --git a/scripts/gdb/linux/constants.py.in b/scripts/gdb/linux/constants.py.in index fd6bd69c5096a..f795302ddfa8b 100644 --- a/scripts/gdb/linux/constants.py.in +++ b/scripts/gdb/linux/constants.py.in @@ -20,6 +20,7 @@ #include #include #include +#include #include #include #include @@ -93,6 +94,12 @@ LX_GDBPARSED(RADIX_TREE_MAP_SIZE) LX_GDBPARSED(RADIX_TREE_MAP_SHIFT) LX_GDBPARSED(RADIX_TREE_MAP_MASK) +/* linux/maple_tree.h */ +LX_VALUE(MAPLE_NODE_SLOTS) +LX_VALUE(MAPLE_RANGE64_SLOTS) +LX_VALUE(MAPLE_ARANGE64_SLOTS) +LX_GDBPARSED(MAPLE_NODE_MASK) + /* linux/vmalloc.h */ LX_VALUE(VM_IOREMAP) LX_VALUE(VM_ALLOC) diff --git a/scripts/gdb/linux/interrupts.py b/scripts/gdb/linux/interrupts.py index 616a5f26377a8..f4f715a8f0e36 100644 --- a/scripts/gdb/linux/interrupts.py +++ b/scripts/gdb/linux/interrupts.py @@ -7,7 +7,7 @@ from linux import constants from linux import cpus from linux import utils -from linux import radixtree +from linux import mapletree irq_desc_type = utils.CachedType("struct irq_desc") @@ -23,12 +23,12 @@ def irqd_is_level(desc): def show_irq_desc(prec, irq): text = "" - desc = radixtree.lookup(gdb.parse_and_eval("&irq_desc_tree"), irq) + desc = mapletree.mtree_load(gdb.parse_and_eval("&sparse_irqs"), irq) if desc is None: return text - desc = desc.cast(irq_desc_type.get_type()) - if desc is None: + desc = desc.cast(irq_desc_type.get_type().pointer()) + if desc == 0: return text if irq_settings_is_hidden(desc): @@ -110,7 +110,7 @@ def x86_show_mce(prec, var, pfx, desc): pvar = gdb.parse_and_eval(var) text = "%*s: " % (prec, pfx) for cpu in cpus.each_online_cpu(): - text += "%10u " % (cpus.per_cpu(pvar, cpu)) + text += "%10u " % (cpus.per_cpu(pvar, cpu).dereference()) text += " %s\n" % (desc) return text @@ -142,7 +142,7 @@ def x86_show_interupts(prec): if constants.LX_CONFIG_X86_MCE: text += x86_show_mce(prec, "&mce_exception_count", "MCE", "Machine check exceptions") - text == x86_show_mce(prec, "&mce_poll_count", "MCP", "Machine check polls") + text += x86_show_mce(prec, "&mce_poll_count", "MCP", "Machine check polls") text += show_irq_err_count(prec) @@ -221,8 +221,8 @@ def invoke(self, arg, from_tty): gdb.write("CPU%-8d" % cpu) gdb.write("\n") - if utils.gdb_eval_or_none("&irq_desc_tree") is None: - return + if utils.gdb_eval_or_none("&sparse_irqs") is None: + raise gdb.GdbError("Unable to find the sparse IRQ tree, is CONFIG_SPARSE_IRQ enabled?") for irq in range(nr_irqs): gdb.write(show_irq_desc(prec, irq)) diff --git a/scripts/gdb/linux/mapletree.py b/scripts/gdb/linux/mapletree.py new file mode 100644 index 0000000000000..d52d51c0a03fc --- /dev/null +++ b/scripts/gdb/linux/mapletree.py @@ -0,0 +1,252 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Maple tree helpers +# +# Copyright (c) 2025 Broadcom +# +# Authors: +# Florian Fainelli + +import gdb + +from linux import utils +from linux import constants +from linux import xarray + +maple_tree_root_type = utils.CachedType("struct maple_tree") +maple_node_type = utils.CachedType("struct maple_node") +maple_enode_type = utils.CachedType("void") + +maple_dense = 0 +maple_leaf_64 = 1 +maple_range_64 = 2 +maple_arange_64 = 3 + +class Mas(object): + ma_active = 0 + ma_start = 1 + ma_root = 2 + ma_none = 3 + ma_pause = 4 + ma_overflow = 5 + ma_underflow = 6 + ma_error = 7 + + def __init__(self, mt, first, end): + if mt.type == maple_tree_root_type.get_type().pointer(): + self.tree = mt.dereference() + elif mt.type != maple_tree_root_type.get_type(): + raise gdb.GdbError("must be {} not {}" + .format(maple_tree_root_type.get_type().pointer(), mt.type)) + self.tree = mt + self.index = first + self.last = end + self.node = None + self.status = self.ma_start + self.min = 0 + self.max = -1 + + def is_start(self): + # mas_is_start() + return self.status == self.ma_start + + def is_ptr(self): + # mas_is_ptr() + return self.status == self.ma_root + + def is_none(self): + # mas_is_none() + return self.status == self.ma_none + + def root(self): + # mas_root() + return self.tree['ma_root'].cast(maple_enode_type.get_type().pointer()) + + def start(self): + # mas_start() + if self.is_start() is False: + return None + + self.min = 0 + self.max = ~0 + + while True: + self.depth = 0 + root = self.root() + if xarray.xa_is_node(root): + self.depth = 0 + self.status = self.ma_active + self.node = mte_safe_root(root) + self.offset = 0 + if mte_dead_node(self.node) is True: + continue + + return None + + self.node = None + # Empty tree + if root is None: + self.status = self.ma_none + self.offset = constants.LX_MAPLE_NODE_SLOTS + return None + + # Single entry tree + self.status = self.ma_root + self.offset = constants.LX_MAPLE_NODE_SLOTS + + if self.index != 0: + return None + + return root + + return None + + def reset(self): + # mas_reset() + self.status = self.ma_start + self.node = None + +def mte_safe_root(node): + if node.type != maple_enode_type.get_type().pointer(): + raise gdb.GdbError("{} must be {} not {}" + .format(mte_safe_root.__name__, maple_enode_type.get_type().pointer(), node.type)) + ulong_type = utils.get_ulong_type() + indirect_ptr = node.cast(ulong_type) & ~0x2 + val = indirect_ptr.cast(maple_enode_type.get_type().pointer()) + return val + +def mte_node_type(entry): + ulong_type = utils.get_ulong_type() + val = None + if entry.type == maple_enode_type.get_type().pointer(): + val = entry.cast(ulong_type) + elif entry.type == ulong_type: + val = entry + else: + raise gdb.GdbError("{} must be {} not {}" + .format(mte_node_type.__name__, maple_enode_type.get_type().pointer(), entry.type)) + return (val >> 0x3) & 0xf + +def ma_dead_node(node): + if node.type != maple_node_type.get_type().pointer(): + raise gdb.GdbError("{} must be {} not {}" + .format(ma_dead_node.__name__, maple_node_type.get_type().pointer(), node.type)) + ulong_type = utils.get_ulong_type() + parent = node['parent'] + indirect_ptr = node['parent'].cast(ulong_type) & ~constants.LX_MAPLE_NODE_MASK + return indirect_ptr == node + +def mte_to_node(enode): + ulong_type = utils.get_ulong_type() + if enode.type == maple_enode_type.get_type().pointer(): + indirect_ptr = enode.cast(ulong_type) + elif enode.type == ulong_type: + indirect_ptr = enode + else: + raise gdb.GdbError("{} must be {} not {}" + .format(mte_to_node.__name__, maple_enode_type.get_type().pointer(), enode.type)) + indirect_ptr = indirect_ptr & ~constants.LX_MAPLE_NODE_MASK + return indirect_ptr.cast(maple_node_type.get_type().pointer()) + +def mte_dead_node(enode): + if enode.type != maple_enode_type.get_type().pointer(): + raise gdb.GdbError("{} must be {} not {}" + .format(mte_dead_node.__name__, maple_enode_type.get_type().pointer(), enode.type)) + node = mte_to_node(enode) + return ma_dead_node(node) + +def ma_is_leaf(tp): + result = tp < maple_range_64 + return tp < maple_range_64 + +def mt_pivots(t): + if t == maple_dense: + return 0 + elif t == maple_leaf_64 or t == maple_range_64: + return constants.LX_MAPLE_RANGE64_SLOTS - 1 + elif t == maple_arange_64: + return constants.LX_MAPLE_ARANGE64_SLOTS - 1 + +def ma_pivots(node, t): + if node.type != maple_node_type.get_type().pointer(): + raise gdb.GdbError("{}: must be {} not {}" + .format(ma_pivots.__name__, maple_node_type.get_type().pointer(), node.type)) + if t == maple_arange_64: + return node['ma64']['pivot'] + elif t == maple_leaf_64 or t == maple_range_64: + return node['mr64']['pivot'] + else: + return None + +def ma_slots(node, tp): + if node.type != maple_node_type.get_type().pointer(): + raise gdb.GdbError("{}: must be {} not {}" + .format(ma_slots.__name__, maple_node_type.get_type().pointer(), node.type)) + if tp == maple_arange_64: + return node['ma64']['slot'] + elif tp == maple_range_64 or tp == maple_leaf_64: + return node['mr64']['slot'] + elif tp == maple_dense: + return node['slot'] + else: + return None + +def mt_slot(mt, slots, offset): + ulong_type = utils.get_ulong_type() + return slots[offset].cast(ulong_type) + +def mtree_lookup_walk(mas): + ulong_type = utils.get_ulong_type() + n = mas.node + + while True: + node = mte_to_node(n) + tp = mte_node_type(n) + pivots = ma_pivots(node, tp) + end = mt_pivots(tp) + offset = 0 + while True: + if pivots[offset] >= mas.index: + break + if offset >= end: + break + offset += 1 + + slots = ma_slots(node, tp) + n = mt_slot(mas.tree, slots, offset) + if ma_dead_node(node) is True: + mas.reset() + return None + break + + if ma_is_leaf(tp) is True: + break + + return n + +def mtree_load(mt, index): + ulong_type = utils.get_ulong_type() + # MT_STATE(...) + mas = Mas(mt, index, index) + entry = None + + while True: + entry = mas.start() + if mas.is_none(): + return None + + if mas.is_ptr(): + if index != 0: + entry = None + return entry + + entry = mtree_lookup_walk(mas) + if entry is None and mas.is_start(): + continue + else: + break + + if xarray.xa_is_zero(entry): + return None + + return entry diff --git a/scripts/gdb/linux/xarray.py b/scripts/gdb/linux/xarray.py new file mode 100644 index 0000000000000..f4477b5def75f --- /dev/null +++ b/scripts/gdb/linux/xarray.py @@ -0,0 +1,28 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Xarray helpers +# +# Copyright (c) 2025 Broadcom +# +# Authors: +# Florian Fainelli + +import gdb + +from linux import utils +from linux import constants + +def xa_is_internal(entry): + ulong_type = utils.get_ulong_type() + return ((entry.cast(ulong_type) & 3) == 2) + +def xa_mk_internal(v): + return ((v << 2) | 2) + +def xa_is_zero(entry): + ulong_type = utils.get_ulong_type() + return entry.cast(ulong_type) == xa_mk_internal(257) + +def xa_is_node(entry): + ulong_type = utils.get_ulong_type() + return xa_is_internal(entry) and (entry.cast(ulong_type) > 4096) diff --git a/scripts/generate_rust_analyzer.py b/scripts/generate_rust_analyzer.py index 690f9830f0648..f9c9a2117632c 100755 --- a/scripts/generate_rust_analyzer.py +++ b/scripts/generate_rust_analyzer.py @@ -18,7 +18,7 @@ def args_crates_cfgs(cfgs): return crates_cfgs -def generate_crates(srctree, objtree, sysroot_src, external_src, cfgs): +def generate_crates(srctree, objtree, sysroot_src, external_src, cfgs, core_edition): # Generate the configuration list. cfg = [] with open(objtree / "include" / "generated" / "rustc_cfg") as fd: @@ -34,7 +34,7 @@ def generate_crates(srctree, objtree, sysroot_src, external_src, cfgs): crates_indexes = {} crates_cfgs = args_crates_cfgs(cfgs) - def append_crate(display_name, root_module, deps, cfg=[], is_workspace_member=True, is_proc_macro=False): + def append_crate(display_name, root_module, deps, cfg=[], is_workspace_member=True, is_proc_macro=False, edition="2021"): crates_indexes[display_name] = len(crates) crates.append({ "display_name": display_name, @@ -43,7 +43,7 @@ def append_crate(display_name, root_module, deps, cfg=[], is_workspace_member=Tr "is_proc_macro": is_proc_macro, "deps": [{"crate": crates_indexes[dep], "name": dep} for dep in deps], "cfg": cfg, - "edition": "2021", + "edition": edition, "env": { "RUST_MODFILE": "This is only for rust-analyzer" } @@ -53,6 +53,7 @@ def append_sysroot_crate( display_name, deps, cfg=[], + edition="2021", ): append_crate( display_name, @@ -60,12 +61,13 @@ def append_sysroot_crate( deps, cfg, is_workspace_member=False, + edition=edition, ) # NB: sysroot crates reexport items from one another so setting up our transitive dependencies # here is important for ensuring that rust-analyzer can resolve symbols. The sources of truth # for this dependency graph are `(sysroot_src / crate / "Cargo.toml" for crate in crates)`. - append_sysroot_crate("core", [], cfg=crates_cfgs.get("core", [])) + append_sysroot_crate("core", [], cfg=crates_cfgs.get("core", []), edition=core_edition) append_sysroot_crate("alloc", ["core"]) append_sysroot_crate("std", ["alloc", "core"]) append_sysroot_crate("proc_macro", ["core", "std"]) @@ -155,6 +157,7 @@ def main(): parser = argparse.ArgumentParser() parser.add_argument('--verbose', '-v', action='store_true') parser.add_argument('--cfgs', action='append', default=[]) + parser.add_argument("core_edition") parser.add_argument("srctree", type=pathlib.Path) parser.add_argument("objtree", type=pathlib.Path) parser.add_argument("sysroot", type=pathlib.Path) @@ -171,7 +174,7 @@ def main(): assert args.sysroot in args.sysroot_src.parents rust_project = { - "crates": generate_crates(args.srctree, args.objtree, args.sysroot_src, args.exttree, args.cfgs), + "crates": generate_crates(args.srctree, args.objtree, args.sysroot_src, args.exttree, args.cfgs, args.core_edition), "sysroot": str(args.sysroot), } diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c index 88850405ded92..f36332e64c4d1 100644 --- a/security/selinux/ss/services.c +++ b/security/selinux/ss/services.c @@ -1884,11 +1884,17 @@ static int security_compute_sid(u32 ssid, goto out_unlock; } /* Obtain the sid for the context. */ - rc = sidtab_context_to_sid(sidtab, &newcontext, out_sid); - if (rc == -ESTALE) { - rcu_read_unlock(); - context_destroy(&newcontext); - goto retry; + if (context_cmp(scontext, &newcontext)) + *out_sid = ssid; + else if (context_cmp(tcontext, &newcontext)) + *out_sid = tsid; + else { + rc = sidtab_context_to_sid(sidtab, &newcontext, out_sid); + if (rc == -ESTALE) { + rcu_read_unlock(); + context_destroy(&newcontext); + goto retry; + } } out_unlock: rcu_read_unlock(); diff --git a/security/selinux/xfrm.c b/security/selinux/xfrm.c index 90ec4ef1b082f..61d56b0c2be13 100644 --- a/security/selinux/xfrm.c +++ b/security/selinux/xfrm.c @@ -94,7 +94,7 @@ static int selinux_xfrm_alloc_user(struct xfrm_sec_ctx **ctxp, ctx->ctx_doi = XFRM_SC_DOI_LSM; ctx->ctx_alg = XFRM_SC_ALG_SELINUX; - ctx->ctx_len = str_len; + ctx->ctx_len = str_len + 1; memcpy(ctx->ctx_str, &uctx[1], str_len); ctx->ctx_str[str_len] = '\0'; rc = security_context_to_sid(ctx->ctx_str, str_len, diff --git a/sound/core/seq_device.c b/sound/core/seq_device.c index 4492be5d2317c..bac9f86037342 100644 --- a/sound/core/seq_device.c +++ b/sound/core/seq_device.c @@ -43,7 +43,7 @@ MODULE_LICENSE("GPL"); static int snd_seq_bus_match(struct device *dev, const struct device_driver *drv) { struct snd_seq_device *sdev = to_seq_dev(dev); - struct snd_seq_driver *sdrv = to_seq_drv(drv); + const struct snd_seq_driver *sdrv = to_seq_drv(drv); return strcmp(sdrv->id, sdev->id) == 0 && sdrv->argsize == sdev->argsize; diff --git a/sound/hda/hda_bus_type.c b/sound/hda/hda_bus_type.c index 7545ace7b0ee4..eb72a7af2e56e 100644 --- a/sound/hda/hda_bus_type.c +++ b/sound/hda/hda_bus_type.c @@ -21,7 +21,7 @@ MODULE_LICENSE("GPL"); * driver id_table and returns the matching device id entry. */ const struct hda_device_id * -hdac_get_device_id(struct hdac_device *hdev, struct hdac_driver *drv) +hdac_get_device_id(struct hdac_device *hdev, const struct hdac_driver *drv) { if (drv->id_table) { const struct hda_device_id *id = drv->id_table; @@ -38,7 +38,7 @@ hdac_get_device_id(struct hdac_device *hdev, struct hdac_driver *drv) } EXPORT_SYMBOL_GPL(hdac_get_device_id); -static int hdac_codec_match(struct hdac_device *dev, struct hdac_driver *drv) +static int hdac_codec_match(struct hdac_device *dev, const struct hdac_driver *drv) { if (hdac_get_device_id(dev, drv)) return 1; @@ -49,7 +49,7 @@ static int hdac_codec_match(struct hdac_device *dev, struct hdac_driver *drv) static int hda_bus_match(struct device *dev, const struct device_driver *drv) { struct hdac_device *hdev = dev_to_hdac_dev(dev); - struct hdac_driver *hdrv = drv_to_hdac_driver(drv); + const struct hdac_driver *hdrv = drv_to_hdac_driver(drv); if (hdev->type != hdrv->type) return 0; diff --git a/sound/isa/ad1816a/ad1816a.c b/sound/isa/ad1816a/ad1816a.c index 99006dc4777e9..5c9e2d41d9005 100644 --- a/sound/isa/ad1816a/ad1816a.c +++ b/sound/isa/ad1816a/ad1816a.c @@ -98,7 +98,7 @@ static int snd_card_ad1816a_pnp(int dev, struct pnp_card_link *card, pdev = pnp_request_card_device(card, id->devs[1].id, NULL); if (pdev == NULL) { mpu_port[dev] = -1; - dev_warn(&pdev->dev, "MPU401 device busy, skipping.\n"); + pr_warn("MPU401 device busy, skipping.\n"); return 0; } diff --git a/sound/isa/sb/sb16_main.c b/sound/isa/sb/sb16_main.c index 74db115250030..5a083eecaa6b9 100644 --- a/sound/isa/sb/sb16_main.c +++ b/sound/isa/sb/sb16_main.c @@ -703,6 +703,9 @@ static int snd_sb16_dma_control_put(struct snd_kcontrol *kcontrol, struct snd_ct unsigned char nval, oval; int change; + if (chip->mode & (SB_MODE_PLAYBACK | SB_MODE_CAPTURE)) + return -EBUSY; + nval = ucontrol->value.enumerated.item[0]; if (nval > 2) return -EINVAL; @@ -711,6 +714,10 @@ static int snd_sb16_dma_control_put(struct snd_kcontrol *kcontrol, struct snd_ct change = nval != oval; snd_sb16_set_dma_mode(chip, nval); spin_unlock_irqrestore(&chip->reg_lock, flags); + if (change) { + snd_dma_disable(chip->dma8); + snd_dma_disable(chip->dma16); + } return change; } diff --git a/sound/pci/hda/cs35l41_hda_property.c b/sound/pci/hda/cs35l41_hda_property.c index 61d2314834e7b..d8249d997c2a0 100644 --- a/sound/pci/hda/cs35l41_hda_property.c +++ b/sound/pci/hda/cs35l41_hda_property.c @@ -31,6 +31,9 @@ struct cs35l41_config { }; static const struct cs35l41_config cs35l41_config_table[] = { + { "10251826", 2, EXTERNAL, { CS35L41_LEFT, CS35L41_RIGHT, 0, 0 }, 0, -1, -1, 0, 0, 0 }, + { "1025182C", 2, EXTERNAL, { CS35L41_LEFT, CS35L41_RIGHT, 0, 0 }, 0, -1, -1, 0, 0, 0 }, + { "10251844", 2, EXTERNAL, { CS35L41_LEFT, CS35L41_RIGHT, 0, 0 }, 0, -1, -1, 0, 0, 0 }, { "10280B27", 2, INTERNAL, { CS35L41_LEFT, CS35L41_RIGHT, 0, 0 }, 1, 2, 0, 1000, 4500, 24 }, { "10280B28", 2, INTERNAL, { CS35L41_LEFT, CS35L41_RIGHT, 0, 0 }, 1, 2, 0, 1000, 4500, 24 }, { "10280BEB", 2, EXTERNAL, { CS35L41_LEFT, CS35L41_RIGHT, 0, 0 }, 1, -1, 0, 0, 0, 0 }, @@ -452,6 +455,9 @@ struct cs35l41_prop_model { static const struct cs35l41_prop_model cs35l41_prop_model_table[] = { { "CLSA0100", NULL, lenovo_legion_no_acpi }, { "CLSA0101", NULL, lenovo_legion_no_acpi }, + { "CSC3551", "10251826", generic_dsd_config }, + { "CSC3551", "1025182C", generic_dsd_config }, + { "CSC3551", "10251844", generic_dsd_config }, { "CSC3551", "10280B27", generic_dsd_config }, { "CSC3551", "10280B28", generic_dsd_config }, { "CSC3551", "10280BEB", generic_dsd_config }, diff --git a/sound/pci/hda/hda_bind.c b/sound/pci/hda/hda_bind.c index b7ca2a83fbb08..f8f1b1f6b1382 100644 --- a/sound/pci/hda/hda_bind.c +++ b/sound/pci/hda/hda_bind.c @@ -18,10 +18,10 @@ /* * find a matching codec id */ -static int hda_codec_match(struct hdac_device *dev, struct hdac_driver *drv) +static int hda_codec_match(struct hdac_device *dev, const struct hdac_driver *drv) { struct hda_codec *codec = container_of(dev, struct hda_codec, core); - struct hda_codec_driver *driver = + const struct hda_codec_driver *driver = container_of(drv, struct hda_codec_driver, core); const struct hda_device_id *list; /* check probe_id instead of vendor_id if set */ @@ -44,7 +44,7 @@ static void hda_codec_unsol_event(struct hdac_device *dev, unsigned int ev) struct hda_codec *codec = container_of(dev, struct hda_codec, core); /* ignore unsol events during shutdown */ - if (codec->bus->shutdown) + if (codec->card->shutdown || codec->bus->shutdown) return; /* ignore unsol events during system suspend/resume */ diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index 25b1984898ab2..d4e325b785332 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c @@ -2286,6 +2286,8 @@ static const struct snd_pci_quirk power_save_denylist[] = { SND_PCI_QUIRK(0x1734, 0x1232, "KONTRON SinglePC", 0), /* Dell ALC3271 */ SND_PCI_QUIRK(0x1028, 0x0962, "Dell ALC3271", 0), + /* https://bugzilla.kernel.org/show_bug.cgi?id=220210 */ + SND_PCI_QUIRK(0x17aa, 0x5079, "Lenovo Thinkpad E15", 0), {} }; @@ -2725,6 +2727,9 @@ static const struct pci_device_id azx_ids[] = { { PCI_VDEVICE(ATI, 0xab38), .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS | AZX_DCAPS_PM_RUNTIME }, + { PCI_VDEVICE(ATI, 0xab40), + .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS | + AZX_DCAPS_PM_RUNTIME }, /* GLENFLY */ { PCI_DEVICE(PCI_VENDOR_ID_GLENFLY, PCI_ANY_ID), .class = PCI_CLASS_MULTIMEDIA_HD_AUDIO << 8, diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 13ffc9a6555f6..e98823bd3634f 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -2658,6 +2658,7 @@ static const struct hda_quirk alc882_fixup_tbl[] = { SND_PCI_QUIRK(0x147b, 0x107a, "Abit AW9D-MAX", ALC882_FIXUP_ABIT_AW9D_MAX), SND_PCI_QUIRK(0x1558, 0x3702, "Clevo X370SN[VW]", ALC1220_FIXUP_CLEVO_PB51ED_PINS), SND_PCI_QUIRK(0x1558, 0x50d3, "Clevo PC50[ER][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS), + SND_PCI_QUIRK(0x1558, 0x5802, "Clevo X58[05]WN[RST]", ALC1220_FIXUP_CLEVO_PB51ED_PINS), SND_PCI_QUIRK(0x1558, 0x65d1, "Clevo PB51[ER][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS), SND_PCI_QUIRK(0x1558, 0x65d2, "Clevo PB51R[CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS), SND_PCI_QUIRK(0x1558, 0x65e1, "Clevo PB51[ED][DF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS), @@ -6611,6 +6612,7 @@ static void alc294_fixup_bass_speaker_15(struct hda_codec *codec, if (action == HDA_FIXUP_ACT_PRE_PROBE) { static const hda_nid_t conn[] = { 0x02, 0x03 }; snd_hda_override_conn_list(codec, 0x15, ARRAY_SIZE(conn), conn); + snd_hda_gen_add_micmute_led_cdev(codec, NULL); } } @@ -6813,7 +6815,10 @@ static void alc256_fixup_chromebook(struct hda_codec *codec, switch (action) { case HDA_FIXUP_ACT_PRE_PROBE: - spec->gen.suppress_auto_mute = 1; + if (codec->core.subsystem_id == 0x10280d76) + spec->gen.suppress_auto_mute = 0; + else + spec->gen.suppress_auto_mute = 1; spec->gen.suppress_auto_mic = 1; spec->en_3kpull_low = false; break; @@ -7613,6 +7618,24 @@ static void alc245_fixup_hp_spectre_x360_16_aa0xxx(struct hda_codec *codec, alc245_fixup_hp_gpio_led(codec, fix, action); } +static void alc245_fixup_hp_zbook_firefly_g12a(struct hda_codec *codec, + const struct hda_fixup *fix, int action) +{ + struct alc_spec *spec = codec->spec; + static const hda_nid_t conn[] = { 0x02 }; + + switch (action) { + case HDA_FIXUP_ACT_PRE_PROBE: + spec->gen.auto_mute_via_amp = 1; + snd_hda_override_conn_list(codec, 0x17, ARRAY_SIZE(conn), conn); + break; + } + + cs35l41_fixup_i2c_two(codec, fix, action); + alc245_fixup_hp_mute_led_coefbit(codec, fix, action); + alc285_fixup_hp_coef_micmute_led(codec, fix, action); +} + /* * ALC287 PCM hooks */ @@ -7960,6 +7983,7 @@ enum { ALC256_FIXUP_HEADPHONE_AMP_VOL, ALC245_FIXUP_HP_SPECTRE_X360_EU0XXX, ALC245_FIXUP_HP_SPECTRE_X360_16_AA0XXX, + ALC245_FIXUP_HP_ZBOOK_FIREFLY_G12A, ALC285_FIXUP_ASUS_GA403U, ALC285_FIXUP_ASUS_GA403U_HEADSET_MIC, ALC285_FIXUP_ASUS_GA403U_I2C_SPEAKER2_TO_DAC1, @@ -10248,6 +10272,10 @@ static const struct hda_fixup alc269_fixups[] = { .type = HDA_FIXUP_FUNC, .v.func = alc245_fixup_hp_spectre_x360_16_aa0xxx, }, + [ALC245_FIXUP_HP_ZBOOK_FIREFLY_G12A] = { + .type = HDA_FIXUP_FUNC, + .v.func = alc245_fixup_hp_zbook_firefly_g12a, + }, [ALC285_FIXUP_ASUS_GA403U] = { .type = HDA_FIXUP_FUNC, .v.func = alc285_fixup_asus_ga403u, @@ -10423,6 +10451,7 @@ static const struct hda_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x1028, 0x0871, "Dell Precision 3630", ALC255_FIXUP_DELL_HEADSET_MIC), SND_PCI_QUIRK(0x1028, 0x0872, "Dell Precision 3630", ALC255_FIXUP_DELL_HEADSET_MIC), SND_PCI_QUIRK(0x1028, 0x0873, "Dell Precision 3930", ALC255_FIXUP_DUMMY_LINEOUT_VERB), + SND_PCI_QUIRK(0x1028, 0x0879, "Dell Latitude 5420 Rugged", ALC269_FIXUP_DELL4_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1028, 0x08ad, "Dell WYSE AIO", ALC225_FIXUP_DELL_WYSE_AIO_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1028, 0x08ae, "Dell WYSE NB", ALC225_FIXUP_DELL1_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1028, 0x0935, "Dell", ALC274_FIXUP_DELL_AIO_LINEOUT_VERB), @@ -10627,6 +10656,7 @@ static const struct hda_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x103c, 0x8975, "HP EliteBook x360 840 Aero G9", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x897d, "HP mt440 Mobile Thin Client U74", ALC236_FIXUP_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x8981, "HP Elite Dragonfly G3", ALC245_FIXUP_CS35L41_SPI_4), + SND_PCI_QUIRK(0x103c, 0x898a, "HP Pavilion 15-eg100", ALC287_FIXUP_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x898e, "HP EliteBook 835 G9", ALC287_FIXUP_CS35L41_I2C_2), SND_PCI_QUIRK(0x103c, 0x898f, "HP EliteBook 835 G9", ALC287_FIXUP_CS35L41_I2C_2), SND_PCI_QUIRK(0x103c, 0x8991, "HP EliteBook 845 G9", ALC287_FIXUP_CS35L41_I2C_2_HP_GPIO_LED), @@ -10701,6 +10731,7 @@ static const struct hda_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x103c, 0x8b97, "HP", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF), SND_PCI_QUIRK(0x103c, 0x8bb3, "HP Slim OMEN", ALC287_FIXUP_CS35L41_I2C_2), SND_PCI_QUIRK(0x103c, 0x8bb4, "HP Slim OMEN", ALC287_FIXUP_CS35L41_I2C_2), + SND_PCI_QUIRK(0x103c, 0x8bc8, "HP Victus 15-fa1xxx", ALC245_FIXUP_HP_MUTE_LED_COEFBIT), SND_PCI_QUIRK(0x103c, 0x8bcd, "HP Omen 16-xd0xxx", ALC245_FIXUP_HP_MUTE_LED_V1_COEFBIT), SND_PCI_QUIRK(0x103c, 0x8bdd, "HP Envy 17", ALC287_FIXUP_CS35L41_I2C_2), SND_PCI_QUIRK(0x103c, 0x8bde, "HP Envy 17", ALC287_FIXUP_CS35L41_I2C_2), @@ -10754,6 +10785,7 @@ static const struct hda_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x103c, 0x8c91, "HP EliteBook 660", ALC236_FIXUP_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x8c96, "HP", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF), SND_PCI_QUIRK(0x103c, 0x8c97, "HP ZBook", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF), + SND_PCI_QUIRK(0x103c, 0x8c9c, "HP Victus 16-s1xxx (MB 8C9C)", ALC245_FIXUP_HP_MUTE_LED_COEFBIT), SND_PCI_QUIRK(0x103c, 0x8ca1, "HP ZBook Power", ALC236_FIXUP_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x8ca2, "HP ZBook Power", ALC236_FIXUP_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x8ca4, "HP ZBook Fury", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED), @@ -10767,11 +10799,52 @@ static const struct hda_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x103c, 0x8cf5, "HP ZBook Studio 16", ALC245_FIXUP_CS35L41_SPI_4_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x8d01, "HP ZBook Power 14 G12", ALC285_FIXUP_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x8d84, "HP EliteBook X G1i", ALC285_FIXUP_HP_GPIO_LED), + SND_PCI_QUIRK(0x103c, 0x8d85, "HP EliteBook 14 G12", ALC285_FIXUP_HP_GPIO_LED), + SND_PCI_QUIRK(0x103c, 0x8d86, "HP Elite X360 14 G12", ALC285_FIXUP_HP_GPIO_LED), + SND_PCI_QUIRK(0x103c, 0x8d8c, "HP EliteBook 13 G12", ALC285_FIXUP_HP_GPIO_LED), + SND_PCI_QUIRK(0x103c, 0x8d8d, "HP Elite X360 13 G12", ALC285_FIXUP_HP_GPIO_LED), + SND_PCI_QUIRK(0x103c, 0x8d8e, "HP EliteBook 14 G12", ALC285_FIXUP_HP_GPIO_LED), + SND_PCI_QUIRK(0x103c, 0x8d8f, "HP EliteBook 14 G12", ALC285_FIXUP_HP_GPIO_LED), + SND_PCI_QUIRK(0x103c, 0x8d90, "HP EliteBook 16 G12", ALC285_FIXUP_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x8d91, "HP ZBook Firefly 14 G12", ALC285_FIXUP_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x8d92, "HP ZBook Firefly 16 G12", ALC285_FIXUP_HP_GPIO_LED), - SND_PCI_QUIRK(0x103c, 0x8e18, "HP ZBook Firefly 14 G12A", ALC285_FIXUP_HP_GPIO_LED), - SND_PCI_QUIRK(0x103c, 0x8e19, "HP ZBook Firefly 14 G12A", ALC285_FIXUP_HP_GPIO_LED), - SND_PCI_QUIRK(0x103c, 0x8e1a, "HP ZBook Firefly 14 G12A", ALC285_FIXUP_HP_GPIO_LED), + SND_PCI_QUIRK(0x103c, 0x8d9b, "HP 17 Turbine OmniBook 7 UMA", ALC287_FIXUP_CS35L41_I2C_2), + SND_PCI_QUIRK(0x103c, 0x8d9c, "HP 17 Turbine OmniBook 7 DIS", ALC287_FIXUP_CS35L41_I2C_2), + SND_PCI_QUIRK(0x103c, 0x8d9d, "HP 17 Turbine OmniBook X UMA", ALC287_FIXUP_CS35L41_I2C_2), + SND_PCI_QUIRK(0x103c, 0x8d9e, "HP 17 Turbine OmniBook X DIS", ALC287_FIXUP_CS35L41_I2C_2), + SND_PCI_QUIRK(0x103c, 0x8d9f, "HP 14 Cadet (x360)", ALC287_FIXUP_CS35L41_I2C_2), + SND_PCI_QUIRK(0x103c, 0x8da0, "HP 16 Clipper OmniBook 7(X360)", ALC287_FIXUP_CS35L41_I2C_2), + SND_PCI_QUIRK(0x103c, 0x8da1, "HP 16 Clipper OmniBook X", ALC287_FIXUP_CS35L41_I2C_2), + SND_PCI_QUIRK(0x103c, 0x8da7, "HP 14 Enstrom OmniBook X", ALC287_FIXUP_CS35L41_I2C_2), + SND_PCI_QUIRK(0x103c, 0x8da8, "HP 16 Piston OmniBook X", ALC287_FIXUP_CS35L41_I2C_2), + SND_PCI_QUIRK(0x103c, 0x8dec, "HP EliteBook 640 G12", ALC236_FIXUP_HP_GPIO_LED), + SND_PCI_QUIRK(0x103c, 0x8dee, "HP EliteBook 660 G12", ALC236_FIXUP_HP_GPIO_LED), + SND_PCI_QUIRK(0x103c, 0x8df0, "HP EliteBook 630 G12", ALC236_FIXUP_HP_GPIO_LED), + SND_PCI_QUIRK(0x103c, 0x8dfc, "HP EliteBook 645 G12", ALC236_FIXUP_HP_GPIO_LED), + SND_PCI_QUIRK(0x103c, 0x8dfe, "HP EliteBook 665 G12", ALC236_FIXUP_HP_GPIO_LED), + SND_PCI_QUIRK(0x103c, 0x8e11, "HP Trekker", ALC287_FIXUP_CS35L41_I2C_2), + SND_PCI_QUIRK(0x103c, 0x8e12, "HP Trekker", ALC287_FIXUP_CS35L41_I2C_2), + SND_PCI_QUIRK(0x103c, 0x8e13, "HP Trekker", ALC287_FIXUP_CS35L41_I2C_2), + SND_PCI_QUIRK(0x103c, 0x8e14, "HP ZBook Firefly 14 G12", ALC245_FIXUP_HP_ZBOOK_FIREFLY_G12A), + SND_PCI_QUIRK(0x103c, 0x8e15, "HP ZBook Firefly 14 G12", ALC245_FIXUP_HP_ZBOOK_FIREFLY_G12A), + SND_PCI_QUIRK(0x103c, 0x8e16, "HP ZBook Firefly 14 G12", ALC245_FIXUP_HP_ZBOOK_FIREFLY_G12A), + SND_PCI_QUIRK(0x103c, 0x8e17, "HP ZBook Firefly 14 G12", ALC245_FIXUP_HP_ZBOOK_FIREFLY_G12A), + SND_PCI_QUIRK(0x103c, 0x8e18, "HP ZBook Firefly 14 G12A", ALC245_FIXUP_HP_ZBOOK_FIREFLY_G12A), + SND_PCI_QUIRK(0x103c, 0x8e19, "HP ZBook Firefly 14 G12A", ALC245_FIXUP_HP_ZBOOK_FIREFLY_G12A), + SND_PCI_QUIRK(0x103c, 0x8e1a, "HP ZBook Firefly 14 G12A", ALC245_FIXUP_HP_ZBOOK_FIREFLY_G12A), + SND_PCI_QUIRK(0x103c, 0x8e1b, "HP EliteBook G12", ALC245_FIXUP_HP_ZBOOK_FIREFLY_G12A), + SND_PCI_QUIRK(0x103c, 0x8e1c, "HP EliteBook G12", ALC245_FIXUP_HP_ZBOOK_FIREFLY_G12A), + SND_PCI_QUIRK(0x103c, 0x8e1d, "HP ZBook X Gli 16 G12", ALC236_FIXUP_HP_GPIO_LED), + SND_PCI_QUIRK(0x103c, 0x8e2c, "HP EliteBook 16 G12", ALC285_FIXUP_HP_GPIO_LED), + SND_PCI_QUIRK(0x103c, 0x8e36, "HP 14 Enstrom OmniBook X", ALC287_FIXUP_CS35L41_I2C_2), + SND_PCI_QUIRK(0x103c, 0x8e37, "HP 16 Piston OmniBook X", ALC287_FIXUP_CS35L41_I2C_2), + SND_PCI_QUIRK(0x103c, 0x8e3a, "HP Agusta", ALC287_FIXUP_CS35L41_I2C_2), + SND_PCI_QUIRK(0x103c, 0x8e3b, "HP Agusta", ALC287_FIXUP_CS35L41_I2C_2), + SND_PCI_QUIRK(0x103c, 0x8e60, "HP Trekker ", ALC287_FIXUP_CS35L41_I2C_2), + SND_PCI_QUIRK(0x103c, 0x8e61, "HP Trekker ", ALC287_FIXUP_CS35L41_I2C_2), + SND_PCI_QUIRK(0x103c, 0x8e62, "HP Trekker ", ALC287_FIXUP_CS35L41_I2C_2), + SND_PCI_QUIRK(0x1043, 0x1032, "ASUS VivoBook X513EA", ALC256_FIXUP_ASUS_MIC_NO_PRESENCE), + SND_PCI_QUIRK(0x1043, 0x1034, "ASUS GU605C", ALC285_FIXUP_ASUS_GU605_SPI_SPEAKER2_TO_DAC1), SND_PCI_QUIRK(0x1043, 0x103e, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC), SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300), SND_PCI_QUIRK(0x1043, 0x1054, "ASUS G614FH/FM/FP", ALC287_FIXUP_CS35L41_I2C_2), @@ -10789,6 +10862,7 @@ static const struct hda_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x1043, 0x1204, "ASUS Strix G615JHR_JMR_JPR", ALC287_FIXUP_TAS2781_I2C), SND_PCI_QUIRK(0x1043, 0x1214, "ASUS Strix G615LH_LM_LP", ALC287_FIXUP_TAS2781_I2C), SND_PCI_QUIRK(0x1043, 0x125e, "ASUS Q524UQK", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE), + SND_PCI_QUIRK(0x1043, 0x1264, "ASUS UM5606KA", ALC294_FIXUP_BASS_SPEAKER_15), SND_PCI_QUIRK(0x1043, 0x1271, "ASUS X430UN", ALC256_FIXUP_ASUS_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1043, 0x1290, "ASUS X441SA", ALC233_FIXUP_EAPD_COEF_AND_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1043, 0x1294, "ASUS B3405CVA", ALC245_FIXUP_CS35L41_SPI_2), @@ -10863,6 +10937,7 @@ static const struct hda_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x1043, 0x1da2, "ASUS UP6502ZA/ZD", ALC245_FIXUP_CS35L41_SPI_2), SND_PCI_QUIRK(0x1043, 0x1df3, "ASUS UM5606WA", ALC294_FIXUP_BASS_SPEAKER_15), SND_PCI_QUIRK(0x1043, 0x1e02, "ASUS UX3402ZA", ALC245_FIXUP_CS35L41_SPI_2), + SND_PCI_QUIRK(0x1043, 0x1e10, "ASUS VivoBook X507UAR", ALC256_FIXUP_ASUS_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1043, 0x1e11, "ASUS Zephyrus G15", ALC289_FIXUP_ASUS_GA502), SND_PCI_QUIRK(0x1043, 0x1e12, "ASUS UM3402", ALC287_FIXUP_CS35L41_I2C_2), SND_PCI_QUIRK(0x1043, 0x1e1f, "ASUS Vivobook 15 X1504VAP", ALC2XX_FIXUP_HEADSET_MIC), @@ -10972,6 +11047,8 @@ static const struct hda_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x1558, 0x14a1, "Clevo L141MU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1558, 0x2624, "Clevo L240TU", ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1558, 0x28c1, "Clevo V370VND", ALC2XX_FIXUP_HEADSET_MIC), + SND_PCI_QUIRK(0x1558, 0x35a1, "Clevo V3[56]0EN[CDE]", ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE), + SND_PCI_QUIRK(0x1558, 0x35b1, "Clevo V3[57]0WN[MNP]Q", ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1558, 0x4018, "Clevo NV40M[BE]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1558, 0x4019, "Clevo NV40MZ", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1558, 0x4020, "Clevo NV40MB", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE), @@ -10999,6 +11076,7 @@ static const struct hda_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x1558, 0x51b1, "Clevo NS50AU", ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1558, 0x51b3, "Clevo NS70AU", ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1558, 0x5630, "Clevo NP50RNJS", ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE), + SND_PCI_QUIRK(0x1558, 0x5700, "Clevo X560WN[RST]", ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1558, 0x70a1, "Clevo NB70T[HJK]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1558, 0x70b3, "Clevo NK70SB", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1558, 0x70f2, "Clevo NH79EPY", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE), @@ -11038,6 +11116,7 @@ static const struct hda_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x1558, 0xa650, "Clevo NP[567]0SN[CD]", ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1558, 0xa671, "Clevo NP70SN[CDE]", ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1558, 0xa741, "Clevo V54x_6x_TNE", ALC245_FIXUP_CLEVO_NOISY_MIC), + SND_PCI_QUIRK(0x1558, 0xa743, "Clevo V54x_6x_TU", ALC245_FIXUP_CLEVO_NOISY_MIC), SND_PCI_QUIRK(0x1558, 0xa763, "Clevo V54x_6x_TU", ALC245_FIXUP_CLEVO_NOISY_MIC), SND_PCI_QUIRK(0x1558, 0xb018, "Clevo NP50D[BE]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1558, 0xb019, "Clevo NH77D[BE]Q", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE), diff --git a/sound/soc/amd/acp/acp-sdw-sof-mach.c b/sound/soc/amd/acp/acp-sdw-sof-mach.c index 3be401c722704..99a244f495bd3 100644 --- a/sound/soc/amd/acp/acp-sdw-sof-mach.c +++ b/sound/soc/amd/acp/acp-sdw-sof-mach.c @@ -267,7 +267,7 @@ static int create_sdw_dailinks(struct snd_soc_card *card, /* generate DAI links by each sdw link */ while (sof_dais->initialised) { - int current_be_id; + int current_be_id = 0; ret = create_sdw_dailink(card, sof_dais, dai_links, ¤t_be_id, codec_conf); diff --git a/sound/soc/amd/yc/acp6x-mach.c b/sound/soc/amd/yc/acp6x-mach.c index e632f16c91025..1689b6b22598e 100644 --- a/sound/soc/amd/yc/acp6x-mach.c +++ b/sound/soc/amd/yc/acp6x-mach.c @@ -311,6 +311,13 @@ static const struct dmi_system_id yc_acp_quirk_table[] = { DMI_MATCH(DMI_PRODUCT_NAME, "83AS"), } }, + { + .driver_data = &acp6x_card, + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "83HN"), + } + }, { .driver_data = &acp6x_card, .matches = { @@ -339,6 +346,13 @@ static const struct dmi_system_id yc_acp_quirk_table[] = { DMI_MATCH(DMI_PRODUCT_NAME, "83Q3"), } }, + { + .driver_data = &acp6x_card, + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "RB"), + DMI_MATCH(DMI_PRODUCT_NAME, "Nitro ANV15-41"), + } + }, { .driver_data = &acp6x_card, .matches = { @@ -346,6 +360,13 @@ static const struct dmi_system_id yc_acp_quirk_table[] = { DMI_MATCH(DMI_PRODUCT_NAME, "83J2"), } }, + { + .driver_data = &acp6x_card, + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "83J3"), + } + }, { .driver_data = &acp6x_card, .matches = { @@ -360,7 +381,7 @@ static const struct dmi_system_id yc_acp_quirk_table[] = { DMI_MATCH(DMI_PRODUCT_NAME, "M5402RA"), } }, - { + { .driver_data = &acp6x_card, .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK COMPUTER INC."), @@ -437,6 +458,13 @@ static const struct dmi_system_id yc_acp_quirk_table[] = { DMI_MATCH(DMI_PRODUCT_NAME, "Bravo 17 D7VEK"), } }, + { + .driver_data = &acp6x_card, + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "Micro-Star International Co., Ltd."), + DMI_MATCH(DMI_PRODUCT_NAME, "Bravo 17 D7VF"), + } + }, { .driver_data = &acp6x_card, .matches = { @@ -500,6 +528,13 @@ static const struct dmi_system_id yc_acp_quirk_table[] = { DMI_MATCH(DMI_PRODUCT_NAME, "OMEN by HP Gaming Laptop 16z-n000"), } }, + { + .driver_data = &acp6x_card, + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "HP"), + DMI_MATCH(DMI_PRODUCT_NAME, "Victus by HP Gaming Laptop 15-fb2xxx"), + } + }, { .driver_data = &acp6x_card, .matches = { diff --git a/sound/soc/apple/mca.c b/sound/soc/apple/mca.c index c9e7d40c47cc1..4a4ec1c09e132 100644 --- a/sound/soc/apple/mca.c +++ b/sound/soc/apple/mca.c @@ -464,6 +464,28 @@ static int mca_configure_serdes(struct mca_cluster *cl, int serdes_unit, return -EINVAL; } +static int mca_fe_startup(struct snd_pcm_substream *substream, + struct snd_soc_dai *dai) +{ + struct mca_cluster *cl = mca_dai_to_cluster(dai); + unsigned int mask, nchannels; + + if (cl->tdm_slots) { + if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) + mask = cl->tdm_tx_mask; + else + mask = cl->tdm_rx_mask; + + nchannels = hweight32(mask); + } else { + nchannels = 2; + } + + return snd_pcm_hw_constraint_minmax(substream->runtime, + SNDRV_PCM_HW_PARAM_CHANNELS, + 1, nchannels); +} + static int mca_fe_set_tdm_slot(struct snd_soc_dai *dai, unsigned int tx_mask, unsigned int rx_mask, int slots, int slot_width) { @@ -680,6 +702,7 @@ static int mca_fe_hw_params(struct snd_pcm_substream *substream, } static const struct snd_soc_dai_ops mca_fe_ops = { + .startup = mca_fe_startup, .set_fmt = mca_fe_set_fmt, .set_bclk_ratio = mca_set_bclk_ratio, .set_tdm_slot = mca_fe_set_tdm_slot, diff --git a/sound/soc/codecs/cs35l56-shared.c b/sound/soc/codecs/cs35l56-shared.c index 195841a567c3d..9007484b31c71 100644 --- a/sound/soc/codecs/cs35l56-shared.c +++ b/sound/soc/codecs/cs35l56-shared.c @@ -811,7 +811,7 @@ int cs35l56_hw_init(struct cs35l56_base *cs35l56_base) break; default: dev_err(cs35l56_base->dev, "Unknown device %x\n", devid); - return ret; + return -ENODEV; } cs35l56_base->type = devid & 0xFF; diff --git a/sound/soc/codecs/hda.c b/sound/soc/codecs/hda.c index ddc00927313cf..dc7794c9ac44c 100644 --- a/sound/soc/codecs/hda.c +++ b/sound/soc/codecs/hda.c @@ -152,7 +152,7 @@ int hda_codec_probe_complete(struct hda_codec *codec) ret = snd_hda_codec_build_controls(codec); if (ret < 0) { dev_err(&hdev->dev, "unable to create controls %d\n", ret); - goto out; + return ret; } /* Bus suspended codecs as it does not manage their pm */ @@ -160,7 +160,7 @@ int hda_codec_probe_complete(struct hda_codec *codec) /* rpm was forbidden in snd_hda_codec_device_new() */ snd_hda_codec_set_power_save(codec, 2000); snd_hda_codec_register(codec); -out: + /* Complement pm_runtime_get_sync(bus) in probe */ pm_runtime_mark_last_busy(bus->dev); pm_runtime_put_autosuspend(bus->dev); diff --git a/sound/soc/codecs/rt1320-sdw.c b/sound/soc/codecs/rt1320-sdw.c index f2d194e76a947..8755a63478d79 100644 --- a/sound/soc/codecs/rt1320-sdw.c +++ b/sound/soc/codecs/rt1320-sdw.c @@ -2085,7 +2085,7 @@ static const struct reg_sequence rt1320_vc_patch_code_write[] = { { 0x3fc2bfc0, 0x03 }, { 0x0000d486, 0x43 }, { SDW_SDCA_CTL(FUNC_NUM_AMP, RT1320_SDCA_ENT_PDE23, RT1320_SDCA_CTL_REQ_POWER_STATE, 0), 0x00 }, - { 0x1000db00, 0x04 }, + { 0x1000db00, 0x07 }, { 0x1000db01, 0x00 }, { 0x1000db02, 0x11 }, { 0x1000db03, 0x00 }, @@ -2106,6 +2106,21 @@ static const struct reg_sequence rt1320_vc_patch_code_write[] = { { 0x1000db12, 0x00 }, { 0x1000db13, 0x00 }, { 0x1000db14, 0x45 }, + { 0x1000db15, 0x0d }, + { 0x1000db16, 0x01 }, + { 0x1000db17, 0x00 }, + { 0x1000db18, 0x00 }, + { 0x1000db19, 0xbf }, + { 0x1000db1a, 0x13 }, + { 0x1000db1b, 0x09 }, + { 0x1000db1c, 0x00 }, + { 0x1000db1d, 0x00 }, + { 0x1000db1e, 0x00 }, + { 0x1000db1f, 0x12 }, + { 0x1000db20, 0x09 }, + { 0x1000db21, 0x00 }, + { 0x1000db22, 0x00 }, + { 0x1000db23, 0x00 }, { 0x0000d540, 0x01 }, { 0x0000c081, 0xfc }, { 0x0000f01e, 0x80 }, diff --git a/sound/soc/codecs/tas2764.c b/sound/soc/codecs/tas2764.c index 39a7d39536fe6..e8fbe8a399f6d 100644 --- a/sound/soc/codecs/tas2764.c +++ b/sound/soc/codecs/tas2764.c @@ -14,6 +14,7 @@ #include #include #include +#include #include #include #include @@ -23,6 +24,11 @@ #include "tas2764.h" +enum tas2764_devid { + DEVID_TAS2764 = 0, + DEVID_SN012776 = 1 +}; + struct tas2764_priv { struct snd_soc_component *component; struct gpio_desc *reset_gpio; @@ -30,7 +36,8 @@ struct tas2764_priv { struct regmap *regmap; struct device *dev; int irq; - + enum tas2764_devid devid; + int v_sense_slot; int i_sense_slot; @@ -525,10 +532,18 @@ static struct snd_soc_dai_driver tas2764_dai_driver[] = { }, }; +static uint8_t sn012776_bop_presets[] = { + 0x01, 0x32, 0x02, 0x22, 0x83, 0x2d, 0x80, 0x02, 0x06, + 0x32, 0x46, 0x30, 0x02, 0x06, 0x38, 0x40, 0x30, 0x02, + 0x06, 0x3e, 0x37, 0x30, 0xff, 0xe6 +}; + +static const struct regmap_config tas2764_i2c_regmap; + static int tas2764_codec_probe(struct snd_soc_component *component) { struct tas2764_priv *tas2764 = snd_soc_component_get_drvdata(component); - int ret; + int ret, i; tas2764->component = component; @@ -538,9 +553,10 @@ static int tas2764_codec_probe(struct snd_soc_component *component) } tas2764_reset(tas2764); + regmap_reinit_cache(tas2764->regmap, &tas2764_i2c_regmap); if (tas2764->irq) { - ret = snd_soc_component_write(tas2764->component, TAS2764_INT_MASK0, 0xff); + ret = snd_soc_component_write(tas2764->component, TAS2764_INT_MASK0, 0x00); if (ret < 0) return ret; @@ -577,6 +593,27 @@ static int tas2764_codec_probe(struct snd_soc_component *component) if (ret < 0) return ret; + switch (tas2764->devid) { + case DEVID_SN012776: + ret = snd_soc_component_update_bits(component, TAS2764_PWR_CTRL, + TAS2764_PWR_CTRL_BOP_SRC, + TAS2764_PWR_CTRL_BOP_SRC); + if (ret < 0) + return ret; + + for (i = 0; i < ARRAY_SIZE(sn012776_bop_presets); i++) { + ret = snd_soc_component_write(component, + TAS2764_BOP_CFG0 + i, + sn012776_bop_presets[i]); + + if (ret < 0) + return ret; + } + break; + default: + break; + } + return 0; } @@ -708,6 +745,8 @@ static int tas2764_i2c_probe(struct i2c_client *client) if (!tas2764) return -ENOMEM; + tas2764->devid = (enum tas2764_devid)of_device_get_match_data(&client->dev); + tas2764->dev = &client->dev; tas2764->irq = client->irq; i2c_set_clientdata(client, tas2764); @@ -744,7 +783,8 @@ MODULE_DEVICE_TABLE(i2c, tas2764_i2c_id); #if defined(CONFIG_OF) static const struct of_device_id tas2764_of_match[] = { - { .compatible = "ti,tas2764" }, + { .compatible = "ti,tas2764", .data = (void *)DEVID_TAS2764 }, + { .compatible = "ti,sn012776", .data = (void *)DEVID_SN012776 }, {}, }; MODULE_DEVICE_TABLE(of, tas2764_of_match); diff --git a/sound/soc/codecs/tas2764.h b/sound/soc/codecs/tas2764.h index 9490f2686e389..69c0f91cb4239 100644 --- a/sound/soc/codecs/tas2764.h +++ b/sound/soc/codecs/tas2764.h @@ -29,6 +29,7 @@ #define TAS2764_PWR_CTRL_ACTIVE 0x0 #define TAS2764_PWR_CTRL_MUTE BIT(0) #define TAS2764_PWR_CTRL_SHUTDOWN BIT(1) +#define TAS2764_PWR_CTRL_BOP_SRC BIT(7) #define TAS2764_VSENSE_POWER_EN 3 #define TAS2764_ISENSE_POWER_EN 4 @@ -116,4 +117,6 @@ #define TAS2764_INT_CLK_CFG TAS2764_REG(0x0, 0x5c) #define TAS2764_INT_CLK_CFG_IRQZ_CLR BIT(2) +#define TAS2764_BOP_CFG0 TAS2764_REG(0X0, 0x1d) + #endif /* __TAS2764__ */ diff --git a/sound/soc/codecs/tas2770.c b/sound/soc/codecs/tas2770.c index 863c3f672ba98..0931b6109755f 100644 --- a/sound/soc/codecs/tas2770.c +++ b/sound/soc/codecs/tas2770.c @@ -156,11 +156,37 @@ static const struct snd_kcontrol_new isense_switch = static const struct snd_kcontrol_new vsense_switch = SOC_DAPM_SINGLE("Switch", TAS2770_PWR_CTRL, 2, 1, 1); +static int sense_event(struct snd_soc_dapm_widget *w, + struct snd_kcontrol *kcontrol, int event) +{ + struct snd_soc_component *component = snd_soc_dapm_to_component(w->dapm); + struct tas2770_priv *tas2770 = snd_soc_component_get_drvdata(component); + + /* + * Powering up ISENSE/VSENSE requires a trip through the shutdown state. + * Do that here to ensure that our changes are applied properly, otherwise + * we might end up with non-functional IVSENSE if playback started earlier, + * which would break software speaker protection. + */ + switch (event) { + case SND_SOC_DAPM_PRE_REG: + return snd_soc_component_update_bits(component, TAS2770_PWR_CTRL, + TAS2770_PWR_CTRL_MASK, + TAS2770_PWR_CTRL_SHUTDOWN); + case SND_SOC_DAPM_POST_REG: + return tas2770_update_pwr_ctrl(tas2770); + default: + return 0; + } +} + static const struct snd_soc_dapm_widget tas2770_dapm_widgets[] = { SND_SOC_DAPM_AIF_IN("ASI1", "ASI1 Playback", 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_MUX("ASI1 Sel", SND_SOC_NOPM, 0, 0, &tas2770_asi1_mux), - SND_SOC_DAPM_SWITCH("ISENSE", TAS2770_PWR_CTRL, 3, 1, &isense_switch), - SND_SOC_DAPM_SWITCH("VSENSE", TAS2770_PWR_CTRL, 2, 1, &vsense_switch), + SND_SOC_DAPM_SWITCH_E("ISENSE", TAS2770_PWR_CTRL, 3, 1, &isense_switch, + sense_event, SND_SOC_DAPM_PRE_REG | SND_SOC_DAPM_POST_REG), + SND_SOC_DAPM_SWITCH_E("VSENSE", TAS2770_PWR_CTRL, 2, 1, &vsense_switch, + sense_event, SND_SOC_DAPM_PRE_REG | SND_SOC_DAPM_POST_REG), SND_SOC_DAPM_DAC_E("DAC", NULL, SND_SOC_NOPM, 0, 0, tas2770_dac_event, SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD), SND_SOC_DAPM_OUTPUT("OUT"), diff --git a/sound/soc/codecs/wcd9335.c b/sound/soc/codecs/wcd9335.c index 373a31ddccb2d..1375ac571fbf3 100644 --- a/sound/soc/codecs/wcd9335.c +++ b/sound/soc/codecs/wcd9335.c @@ -17,7 +17,7 @@ #include #include #include -#include +#include #include #include #include @@ -329,8 +329,7 @@ struct wcd9335_codec { int comp_enabled[COMPANDER_MAX]; int intr1; - int reset_gpio; - struct regulator_bulk_data supplies[WCD9335_MAX_SUPPLY]; + struct gpio_desc *reset_gpio; unsigned int rx_port_value[WCD9335_RX_MAX]; unsigned int tx_port_value[WCD9335_TX_MAX]; @@ -353,6 +352,10 @@ struct wcd9335_irq { char *name; }; +static const char * const wcd9335_supplies[] = { + "vdd-buck", "vdd-buck-sido", "vdd-tx", "vdd-rx", "vdd-io", +}; + static const struct wcd9335_slim_ch wcd9335_tx_chs[WCD9335_TX_MAX] = { WCD9335_SLIM_TX_CH(0), WCD9335_SLIM_TX_CH(1), @@ -4973,12 +4976,11 @@ static const struct regmap_irq_chip wcd9335_regmap_irq1_chip = { static int wcd9335_parse_dt(struct wcd9335_codec *wcd) { struct device *dev = wcd->dev; - struct device_node *np = dev->of_node; int ret; - wcd->reset_gpio = of_get_named_gpio(np, "reset-gpios", 0); - if (wcd->reset_gpio < 0) - return dev_err_probe(dev, wcd->reset_gpio, "Reset GPIO missing from DT\n"); + wcd->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW); + if (IS_ERR(wcd->reset_gpio)) + return dev_err_probe(dev, PTR_ERR(wcd->reset_gpio), "Reset GPIO missing from DT\n"); wcd->mclk = devm_clk_get(dev, "mclk"); if (IS_ERR(wcd->mclk)) @@ -4988,30 +4990,16 @@ static int wcd9335_parse_dt(struct wcd9335_codec *wcd) if (IS_ERR(wcd->native_clk)) return dev_err_probe(dev, PTR_ERR(wcd->native_clk), "slimbus clock not found\n"); - wcd->supplies[0].supply = "vdd-buck"; - wcd->supplies[1].supply = "vdd-buck-sido"; - wcd->supplies[2].supply = "vdd-tx"; - wcd->supplies[3].supply = "vdd-rx"; - wcd->supplies[4].supply = "vdd-io"; - - ret = regulator_bulk_get(dev, WCD9335_MAX_SUPPLY, wcd->supplies); + ret = devm_regulator_bulk_get_enable(dev, ARRAY_SIZE(wcd9335_supplies), + wcd9335_supplies); if (ret) - return dev_err_probe(dev, ret, "Failed to get supplies\n"); + return dev_err_probe(dev, ret, "Failed to get and enable supplies\n"); return 0; } static int wcd9335_power_on_reset(struct wcd9335_codec *wcd) { - struct device *dev = wcd->dev; - int ret; - - ret = regulator_bulk_enable(WCD9335_MAX_SUPPLY, wcd->supplies); - if (ret) { - dev_err(dev, "Failed to get supplies: err = %d\n", ret); - return ret; - } - /* * For WCD9335, it takes about 600us for the Vout_A and * Vout_D to be ready after BUCK_SIDO is powered up. @@ -5021,9 +5009,9 @@ static int wcd9335_power_on_reset(struct wcd9335_codec *wcd) */ usleep_range(600, 650); - gpio_direction_output(wcd->reset_gpio, 0); + gpiod_set_value(wcd->reset_gpio, 1); msleep(20); - gpio_set_value(wcd->reset_gpio, 1); + gpiod_set_value(wcd->reset_gpio, 0); msleep(20); return 0; diff --git a/sound/soc/codecs/wcd937x.c b/sound/soc/codecs/wcd937x.c index 9c1997a42334d..1df827a084cac 100644 --- a/sound/soc/codecs/wcd937x.c +++ b/sound/soc/codecs/wcd937x.c @@ -92,7 +92,6 @@ struct wcd937x_priv { struct regmap_irq_chip *wcd_regmap_irq_chip; struct regmap_irq_chip_data *irq_chip; struct regulator_bulk_data supplies[WCD937X_MAX_BULK_SUPPLY]; - struct regulator *buck_supply; struct snd_soc_jack *jack; unsigned long status_mask; s32 micb_ref[WCD937X_MAX_MICBIAS]; @@ -2897,10 +2896,8 @@ static int wcd937x_probe(struct platform_device *pdev) return dev_err_probe(dev, ret, "Failed to get supplies\n"); ret = regulator_bulk_enable(WCD937X_MAX_BULK_SUPPLY, wcd937x->supplies); - if (ret) { - regulator_bulk_free(WCD937X_MAX_BULK_SUPPLY, wcd937x->supplies); + if (ret) return dev_err_probe(dev, ret, "Failed to enable supplies\n"); - } wcd937x_dt_parse_micbias_info(dev, wcd937x); @@ -2936,7 +2933,6 @@ static int wcd937x_probe(struct platform_device *pdev) err_disable_regulators: regulator_bulk_disable(WCD937X_MAX_BULK_SUPPLY, wcd937x->supplies); - regulator_bulk_free(WCD937X_MAX_BULK_SUPPLY, wcd937x->supplies); return ret; } @@ -2953,7 +2949,6 @@ static void wcd937x_remove(struct platform_device *pdev) pm_runtime_dont_use_autosuspend(dev); regulator_bulk_disable(WCD937X_MAX_BULK_SUPPLY, wcd937x->supplies); - regulator_bulk_free(WCD937X_MAX_BULK_SUPPLY, wcd937x->supplies); } #if defined(CONFIG_OF) diff --git a/sound/soc/fsl/fsl_asrc.c b/sound/soc/fsl/fsl_asrc.c index bd5c46d763c0f..ffd4a6ca5f3cb 100644 --- a/sound/soc/fsl/fsl_asrc.c +++ b/sound/soc/fsl/fsl_asrc.c @@ -517,7 +517,8 @@ static int fsl_asrc_config_pair(struct fsl_asrc_pair *pair, bool use_ideal_rate) regmap_update_bits(asrc->regmap, REG_ASRCTR, ASRCTR_ATSi_MASK(index), ASRCTR_ATS(index)); regmap_update_bits(asrc->regmap, REG_ASRCTR, - ASRCTR_USRi_MASK(index), 0); + ASRCTR_IDRi_MASK(index) | ASRCTR_USRi_MASK(index), + ASRCTR_USR(index)); /* Set the input and output clock sources */ regmap_update_bits(asrc->regmap, REG_ASRCSR, diff --git a/sound/soc/fsl/fsl_sai.c b/sound/soc/fsl/fsl_sai.c index c5efbceb06d1f..25d4b27f5b766 100644 --- a/sound/soc/fsl/fsl_sai.c +++ b/sound/soc/fsl/fsl_sai.c @@ -771,13 +771,15 @@ static void fsl_sai_config_disable(struct fsl_sai *sai, int dir) * anymore. Add software reset to fix this issue. * This is a hardware bug, and will be fix in the * next sai version. + * + * In consumer mode, this can happen even after a + * single open/close, especially if both tx and rx + * are running concurrently. */ - if (!sai->is_consumer_mode[tx]) { - /* Software Reset */ - regmap_write(sai->regmap, FSL_SAI_xCSR(tx, ofs), FSL_SAI_CSR_SR); - /* Clear SR bit to finish the reset */ - regmap_write(sai->regmap, FSL_SAI_xCSR(tx, ofs), 0); - } + /* Software Reset */ + regmap_write(sai->regmap, FSL_SAI_xCSR(tx, ofs), FSL_SAI_CSR_SR); + /* Clear SR bit to finish the reset */ + regmap_write(sai->regmap, FSL_SAI_xCSR(tx, ofs), 0); } static int fsl_sai_trigger(struct snd_pcm_substream *substream, int cmd, diff --git a/sound/soc/intel/avs/debugfs.c b/sound/soc/intel/avs/debugfs.c index 1767ded4d9830..c9978fb9c74e2 100644 --- a/sound/soc/intel/avs/debugfs.c +++ b/sound/soc/intel/avs/debugfs.c @@ -372,7 +372,10 @@ static ssize_t trace_control_write(struct file *file, const char __user *from, s return ret; num_elems = *array; - resource_mask = array[1]; + if (!num_elems) { + ret = -EINVAL; + goto free_array; + } /* * Disable if just resource mask is provided - no log priority flags. @@ -380,6 +383,7 @@ static ssize_t trace_control_write(struct file *file, const char __user *from, s * Enable input format: mask, prio1, .., prioN * Where 'N' equals number of bits set in the 'mask'. */ + resource_mask = array[1]; if (num_elems == 1) { ret = disable_logs(adev, resource_mask); } else { diff --git a/sound/soc/intel/avs/ipc.c b/sound/soc/intel/avs/ipc.c index 4fba46e77c470..eff1d46040da6 100644 --- a/sound/soc/intel/avs/ipc.c +++ b/sound/soc/intel/avs/ipc.c @@ -169,7 +169,9 @@ static void avs_dsp_exception_caught(struct avs_dev *adev, union avs_notify_msg dev_crit(adev->dev, "communication severed, rebooting dsp..\n"); - cancel_delayed_work_sync(&ipc->d0ix_work); + /* Avoid deadlock as the exception may be the response to SET_D0IX. */ + if (current_work() != &ipc->d0ix_work.work) + cancel_delayed_work_sync(&ipc->d0ix_work); ipc->in_d0ix = false; /* Re-enabled on recovery completion. */ pm_runtime_disable(adev->dev); diff --git a/sound/soc/intel/boards/Kconfig b/sound/soc/intel/boards/Kconfig index cc10ae58b0c7e..8dee46abf346d 100644 --- a/sound/soc/intel/boards/Kconfig +++ b/sound/soc/intel/boards/Kconfig @@ -42,6 +42,7 @@ config SND_SOC_INTEL_SOF_NUVOTON_COMMON tristate config SND_SOC_INTEL_SOF_BOARD_HELPERS + select SND_SOC_ACPI_INTEL_MATCH tristate if SND_SOC_INTEL_CATPT diff --git a/sound/soc/intel/common/Makefile b/sound/soc/intel/common/Makefile index 91e146e2487da..a9a740e249698 100644 --- a/sound/soc/intel/common/Makefile +++ b/sound/soc/intel/common/Makefile @@ -14,7 +14,7 @@ snd-soc-acpi-intel-match-y := soc-acpi-intel-byt-match.o soc-acpi-intel-cht-matc soc-acpi-intel-lnl-match.o \ soc-acpi-intel-ptl-match.o \ soc-acpi-intel-hda-match.o \ - soc-acpi-intel-sdw-mockup-match.o + soc-acpi-intel-sdw-mockup-match.o sof-function-topology-lib.o snd-soc-acpi-intel-match-y += soc-acpi-intel-ssp-common.o diff --git a/sound/soc/intel/common/soc-acpi-intel-arl-match.c b/sound/soc/intel/common/soc-acpi-intel-arl-match.c index 24d850df77ca8..1ad704ca2c5f2 100644 --- a/sound/soc/intel/common/soc-acpi-intel-arl-match.c +++ b/sound/soc/intel/common/soc-acpi-intel-arl-match.c @@ -8,6 +8,7 @@ #include #include #include +#include "sof-function-topology-lib.h" static const struct snd_soc_acpi_endpoint single_endpoint = { .num = 0, @@ -138,7 +139,7 @@ static const struct snd_soc_acpi_adr_device cs35l56_2_r1_adr[] = { }, }; -static const struct snd_soc_acpi_adr_device cs35l56_3_l1_adr[] = { +static const struct snd_soc_acpi_adr_device cs35l56_3_l3_adr[] = { { .adr = 0x00033301fa355601ull, .num_endpoints = 1, @@ -147,6 +148,24 @@ static const struct snd_soc_acpi_adr_device cs35l56_3_l1_adr[] = { }, }; +static const struct snd_soc_acpi_adr_device cs35l56_2_r3_adr[] = { + { + .adr = 0x00023301fa355601ull, + .num_endpoints = 1, + .endpoints = &spk_r_endpoint, + .name_prefix = "AMP2" + }, +}; + +static const struct snd_soc_acpi_adr_device cs35l56_3_l1_adr[] = { + { + .adr = 0x00033101fa355601ull, + .num_endpoints = 1, + .endpoints = &spk_l_endpoint, + .name_prefix = "AMP1" + }, +}; + static const struct snd_soc_acpi_endpoint cs42l43_endpoints[] = { { /* Jack Playback Endpoint */ .num = 0, @@ -304,6 +323,25 @@ static const struct snd_soc_acpi_link_adr arl_cs42l43_l0_cs35l56_2_l23[] = { .num_adr = ARRAY_SIZE(cs35l56_2_r1_adr), .adr_d = cs35l56_2_r1_adr, }, + { + .mask = BIT(3), + .num_adr = ARRAY_SIZE(cs35l56_3_l3_adr), + .adr_d = cs35l56_3_l3_adr, + }, + {} +}; + +static const struct snd_soc_acpi_link_adr arl_cs42l43_l0_cs35l56_3_l23[] = { + { + .mask = BIT(0), + .num_adr = ARRAY_SIZE(cs42l43_0_adr), + .adr_d = cs42l43_0_adr, + }, + { + .mask = BIT(2), + .num_adr = ARRAY_SIZE(cs35l56_2_r3_adr), + .adr_d = cs35l56_2_r3_adr, + }, { .mask = BIT(3), .num_adr = ARRAY_SIZE(cs35l56_3_l1_adr), @@ -399,36 +437,49 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_arl_sdw_machines[] = { .links = arl_cs42l43_l0_cs35l56_l23, .drv_name = "sof_sdw", .sof_tplg_filename = "sof-arl-cs42l43-l0-cs35l56-l23.tplg", + .get_function_tplg_files = sof_sdw_get_tplg_files, }, { .link_mask = BIT(0) | BIT(2) | BIT(3), .links = arl_cs42l43_l0_cs35l56_2_l23, .drv_name = "sof_sdw", .sof_tplg_filename = "sof-arl-cs42l43-l0-cs35l56-l23.tplg", + .get_function_tplg_files = sof_sdw_get_tplg_files, + }, + { + .link_mask = BIT(0) | BIT(2) | BIT(3), + .links = arl_cs42l43_l0_cs35l56_3_l23, + .drv_name = "sof_sdw", + .sof_tplg_filename = "sof-arl-cs42l43-l0-cs35l56-l23.tplg", + .get_function_tplg_files = sof_sdw_get_tplg_files, }, { .link_mask = BIT(0) | BIT(2), .links = arl_cs42l43_l0_cs35l56_l2, .drv_name = "sof_sdw", .sof_tplg_filename = "sof-arl-cs42l43-l0-cs35l56-l2.tplg", + .get_function_tplg_files = sof_sdw_get_tplg_files, }, { .link_mask = BIT(0), .links = arl_cs42l43_l0, .drv_name = "sof_sdw", .sof_tplg_filename = "sof-arl-cs42l43-l0.tplg", - }, - { - .link_mask = BIT(2), - .links = arl_cs42l43_l2, - .drv_name = "sof_sdw", - .sof_tplg_filename = "sof-arl-cs42l43-l2.tplg", + .get_function_tplg_files = sof_sdw_get_tplg_files, }, { .link_mask = BIT(2) | BIT(3), .links = arl_cs42l43_l2_cs35l56_l3, .drv_name = "sof_sdw", .sof_tplg_filename = "sof-arl-cs42l43-l2-cs35l56-l3.tplg", + .get_function_tplg_files = sof_sdw_get_tplg_files, + }, + { + .link_mask = BIT(2), + .links = arl_cs42l43_l2, + .drv_name = "sof_sdw", + .sof_tplg_filename = "sof-arl-cs42l43-l2.tplg", + .get_function_tplg_files = sof_sdw_get_tplg_files, }, { .link_mask = 0x1, /* link0 required */ @@ -447,6 +498,7 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_arl_sdw_machines[] = { .links = arl_rt722_l0_rt1320_l2, .drv_name = "sof_sdw", .sof_tplg_filename = "sof-arl-rt722-l0_rt1320-l2.tplg", + .get_function_tplg_files = sof_sdw_get_tplg_files, }, {}, }; diff --git a/sound/soc/intel/common/sof-function-topology-lib.c b/sound/soc/intel/common/sof-function-topology-lib.c new file mode 100644 index 0000000000000..3cc81dcf047e3 --- /dev/null +++ b/sound/soc/intel/common/sof-function-topology-lib.c @@ -0,0 +1,136 @@ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) +// +// This file is provided under a dual BSD/GPLv2 license. When using or +// redistributing this file, you may do so under either license. +// +// Copyright(c) 2025 Intel Corporation. +// + +#include +#include +#include +#include +#include +#include "sof-function-topology-lib.h" + +enum tplg_device_id { + TPLG_DEVICE_SDCA_JACK, + TPLG_DEVICE_SDCA_AMP, + TPLG_DEVICE_SDCA_MIC, + TPLG_DEVICE_INTEL_PCH_DMIC, + TPLG_DEVICE_HDMI, + TPLG_DEVICE_MAX +}; + +#define SDCA_DEVICE_MASK (BIT(TPLG_DEVICE_SDCA_JACK) | BIT(TPLG_DEVICE_SDCA_AMP) | \ + BIT(TPLG_DEVICE_SDCA_MIC)) + +#define SOF_INTEL_PLATFORM_NAME_MAX 4 + +int sof_sdw_get_tplg_files(struct snd_soc_card *card, const struct snd_soc_acpi_mach *mach, + const char *prefix, const char ***tplg_files) +{ + struct snd_soc_acpi_mach_params mach_params = mach->mach_params; + struct snd_soc_dai_link *dai_link; + const struct firmware *fw; + char platform[SOF_INTEL_PLATFORM_NAME_MAX]; + unsigned long tplg_mask = 0; + int tplg_num = 0; + int tplg_dev; + int ret; + int i; + + ret = sscanf(mach->sof_tplg_filename, "sof-%3s-*.tplg", platform); + if (ret != 1) { + dev_err(card->dev, "Invalid platform name %s of tplg %s\n", + platform, mach->sof_tplg_filename); + return -EINVAL; + } + + for_each_card_prelinks(card, i, dai_link) { + char *tplg_dev_name; + + dev_dbg(card->dev, "dai_link %s id %d\n", dai_link->name, dai_link->id); + if (strstr(dai_link->name, "SimpleJack")) { + tplg_dev = TPLG_DEVICE_SDCA_JACK; + tplg_dev_name = "sdca-jack"; + } else if (strstr(dai_link->name, "SmartAmp")) { + tplg_dev = TPLG_DEVICE_SDCA_AMP; + tplg_dev_name = devm_kasprintf(card->dev, GFP_KERNEL, + "sdca-%damp", dai_link->num_cpus); + if (!tplg_dev_name) + return -ENOMEM; + } else if (strstr(dai_link->name, "SmartMic")) { + tplg_dev = TPLG_DEVICE_SDCA_MIC; + tplg_dev_name = "sdca-mic"; + } else if (strstr(dai_link->name, "dmic")) { + switch (mach_params.dmic_num) { + case 2: + tplg_dev_name = "dmic-2ch"; + break; + case 4: + tplg_dev_name = "dmic-4ch"; + break; + default: + dev_warn(card->dev, + "unsupported number of dmics: %d\n", + mach_params.dmic_num); + continue; + } + tplg_dev = TPLG_DEVICE_INTEL_PCH_DMIC; + } else if (strstr(dai_link->name, "iDisp")) { + tplg_dev = TPLG_DEVICE_HDMI; + tplg_dev_name = "hdmi-pcm5"; + + } else { + /* The dai link is not supported by separated tplg yet */ + dev_dbg(card->dev, + "dai_link %s is not supported by separated tplg yet\n", + dai_link->name); + return 0; + } + if (tplg_mask & BIT(tplg_dev)) + continue; + + tplg_mask |= BIT(tplg_dev); + + /* + * The tplg file naming rule is sof---id.tplg + * where is only required for the DMIC function as the nhlt blob + * is platform dependent. + */ + switch (tplg_dev) { + case TPLG_DEVICE_INTEL_PCH_DMIC: + (*tplg_files)[tplg_num] = devm_kasprintf(card->dev, GFP_KERNEL, + "%s/sof-%s-%s-id%d.tplg", + prefix, platform, + tplg_dev_name, dai_link->id); + break; + default: + (*tplg_files)[tplg_num] = devm_kasprintf(card->dev, GFP_KERNEL, + "%s/sof-%s-id%d.tplg", + prefix, tplg_dev_name, + dai_link->id); + break; + } + if (!(*tplg_files)[tplg_num]) + return -ENOMEM; + tplg_num++; + } + + dev_dbg(card->dev, "tplg_mask %#lx tplg_num %d\n", tplg_mask, tplg_num); + + /* Check presence of sub-topologies */ + for (i = 0; i < tplg_num; i++) { + ret = firmware_request_nowarn(&fw, (*tplg_files)[i], card->dev); + if (!ret) { + release_firmware(fw); + } else { + dev_dbg(card->dev, "Failed to open topology file: %s\n", (*tplg_files)[i]); + return 0; + } + } + + return tplg_num; +} + diff --git a/sound/soc/intel/common/sof-function-topology-lib.h b/sound/soc/intel/common/sof-function-topology-lib.h new file mode 100644 index 0000000000000..e7d0c39d07883 --- /dev/null +++ b/sound/soc/intel/common/sof-function-topology-lib.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * soc-acpi-intel-get-tplg.h - get-tplg-files ops + * + * Copyright (c) 2025, Intel Corporation. + * + */ + +#ifndef _SND_SOC_ACPI_INTEL_GET_TPLG_H +#define _SND_SOC_ACPI_INTEL_GET_TPLG_H + +int sof_sdw_get_tplg_files(struct snd_soc_card *card, const struct snd_soc_acpi_mach *mach, + const char *prefix, const char ***tplg_files); + +#endif diff --git a/sound/soc/mediatek/mt8195/mt8195-mt6359.c b/sound/soc/mediatek/mt8195/mt8195-mt6359.c index 8ebf6c7502aa3..400cec09c3a3c 100644 --- a/sound/soc/mediatek/mt8195/mt8195-mt6359.c +++ b/sound/soc/mediatek/mt8195/mt8195-mt6359.c @@ -822,12 +822,12 @@ SND_SOC_DAILINK_DEFS(ETDM1_IN_BE, SND_SOC_DAILINK_DEFS(ETDM2_IN_BE, DAILINK_COMP_ARRAY(COMP_CPU("ETDM2_IN")), - DAILINK_COMP_ARRAY(COMP_EMPTY()), + DAILINK_COMP_ARRAY(COMP_DUMMY()), DAILINK_COMP_ARRAY(COMP_EMPTY())); SND_SOC_DAILINK_DEFS(ETDM1_OUT_BE, DAILINK_COMP_ARRAY(COMP_CPU("ETDM1_OUT")), - DAILINK_COMP_ARRAY(COMP_EMPTY()), + DAILINK_COMP_ARRAY(COMP_DUMMY()), DAILINK_COMP_ARRAY(COMP_EMPTY())); SND_SOC_DAILINK_DEFS(ETDM2_OUT_BE, diff --git a/sound/soc/meson/meson-card-utils.c b/sound/soc/meson/meson-card-utils.c index 1a4ef124e4e25..ad38c74166a46 100644 --- a/sound/soc/meson/meson-card-utils.c +++ b/sound/soc/meson/meson-card-utils.c @@ -231,7 +231,7 @@ static int meson_card_parse_of_optional(struct snd_soc_card *card, const char *p)) { /* If property is not provided, don't fail ... */ - if (!of_property_read_bool(card->dev->of_node, propname)) + if (!of_property_present(card->dev->of_node, propname)) return 0; /* ... but do fail if it is provided and the parsing fails */ diff --git a/sound/soc/qcom/sdm845.c b/sound/soc/qcom/sdm845.c index a479d7e5b7fbd..314ff68506d9f 100644 --- a/sound/soc/qcom/sdm845.c +++ b/sound/soc/qcom/sdm845.c @@ -91,6 +91,10 @@ static int sdm845_slim_snd_hw_params(struct snd_pcm_substream *substream, else ret = snd_soc_dai_set_channel_map(cpu_dai, tx_ch_cnt, tx_ch, 0, NULL); + if (ret != 0 && ret != -ENOTSUPP) { + dev_err(rtd->dev, "failed to set cpu chan map, err:%d\n", ret); + return ret; + } } return 0; diff --git a/sound/soc/sdw_utils/soc_sdw_rt_amp.c b/sound/soc/sdw_utils/soc_sdw_rt_amp.c index 6951dfb565263..b3d6ca2499734 100644 --- a/sound/soc/sdw_utils/soc_sdw_rt_amp.c +++ b/sound/soc/sdw_utils/soc_sdw_rt_amp.c @@ -190,7 +190,7 @@ int asoc_sdw_rt_amp_spk_rtd_init(struct snd_soc_pcm_runtime *rtd, struct snd_soc const struct snd_soc_dapm_route *rt_amp_map; char codec_name[CODEC_NAME_SIZE]; struct snd_soc_dai *codec_dai; - int ret; + int ret = -EINVAL; int i; rt_amp_map = get_codec_name_and_route(dai, codec_name); diff --git a/sound/soc/sof/amd/pci-acp70.c b/sound/soc/sof/amd/pci-acp70.c index a5d8b6a95a222..fe2ad0395f5d3 100644 --- a/sound/soc/sof/amd/pci-acp70.c +++ b/sound/soc/sof/amd/pci-acp70.c @@ -34,6 +34,7 @@ static const struct sof_amd_acp_desc acp70_chip_info = { .ext_intr_cntl = ACP70_EXTERNAL_INTR_CNTL, .ext_intr_stat = ACP70_EXT_INTR_STAT, .ext_intr_stat1 = ACP70_EXT_INTR_STAT1, + .acp_error_stat = ACP70_ERROR_STATUS, .dsp_intr_base = ACP70_DSP_SW_INTR_BASE, .acp_sw0_i2s_err_reason = ACP7X_SW0_I2S_ERROR_REASON, .sram_pte_offset = ACP70_SRAM_PTE_OFFSET, diff --git a/sound/soc/sof/intel/hda.c b/sound/soc/sof/intel/hda.c index 9c8f79e55ec5d..624598c9e2df8 100644 --- a/sound/soc/sof/intel/hda.c +++ b/sound/soc/sof/intel/hda.c @@ -1209,11 +1209,11 @@ static int check_tplg_quirk_mask(struct snd_soc_acpi_mach *mach) return 0; } -static char *remove_file_ext(const char *tplg_filename) +static char *remove_file_ext(struct device *dev, const char *tplg_filename) { char *filename, *tmp; - filename = kstrdup(tplg_filename, GFP_KERNEL); + filename = devm_kstrdup(dev, tplg_filename, GFP_KERNEL); if (!filename) return NULL; @@ -1297,7 +1297,7 @@ struct snd_soc_acpi_mach *hda_machine_select(struct snd_sof_dev *sdev) */ if (!sof_pdata->tplg_filename) { /* remove file extension if it exists */ - tplg_filename = remove_file_ext(mach->sof_tplg_filename); + tplg_filename = remove_file_ext(sdev->dev, mach->sof_tplg_filename); if (!tplg_filename) return NULL; diff --git a/sound/soc/sof/ipc4-pcm.c b/sound/soc/sof/ipc4-pcm.c index 2fe4969cdc3b4..9db2cdb321282 100644 --- a/sound/soc/sof/ipc4-pcm.c +++ b/sound/soc/sof/ipc4-pcm.c @@ -780,7 +780,8 @@ static int sof_ipc4_pcm_setup(struct snd_sof_dev *sdev, struct snd_sof_pcm *spcm /* allocate memory for max number of pipeline IDs */ pipeline_list->pipelines = kcalloc(ipc4_data->max_num_pipelines, - sizeof(struct snd_sof_widget *), GFP_KERNEL); + sizeof(*pipeline_list->pipelines), + GFP_KERNEL); if (!pipeline_list->pipelines) { sof_ipc4_pcm_free(sdev, spcm); return -ENOMEM; diff --git a/sound/soc/tegra/tegra210_ahub.c b/sound/soc/tegra/tegra210_ahub.c index 1920b996e9aad..51043e556b3e9 100644 --- a/sound/soc/tegra/tegra210_ahub.c +++ b/sound/soc/tegra/tegra210_ahub.c @@ -1359,6 +1359,8 @@ static int tegra_ahub_probe(struct platform_device *pdev) return -ENOMEM; ahub->soc_data = of_device_get_match_data(&pdev->dev); + if (!ahub->soc_data) + return -ENODEV; platform_set_drvdata(pdev, ahub); diff --git a/sound/soc/ti/omap-hdmi.c b/sound/soc/ti/omap-hdmi.c index cf43ac19c4a6d..55e7cb96858fc 100644 --- a/sound/soc/ti/omap-hdmi.c +++ b/sound/soc/ti/omap-hdmi.c @@ -361,17 +361,20 @@ static int omap_hdmi_audio_probe(struct platform_device *pdev) if (!card->dai_link) return -ENOMEM; - compnent = devm_kzalloc(dev, sizeof(*compnent), GFP_KERNEL); + compnent = devm_kzalloc(dev, 2 * sizeof(*compnent), GFP_KERNEL); if (!compnent) return -ENOMEM; - card->dai_link->cpus = compnent; + card->dai_link->cpus = &compnent[0]; card->dai_link->num_cpus = 1; card->dai_link->codecs = &snd_soc_dummy_dlc; card->dai_link->num_codecs = 1; + card->dai_link->platforms = &compnent[1]; + card->dai_link->num_platforms = 1; card->dai_link->name = card->name; card->dai_link->stream_name = card->name; card->dai_link->cpus->dai_name = dev_name(ad->dssdev); + card->dai_link->platforms->name = dev_name(ad->dssdev); card->num_links = 1; card->dev = dev; diff --git a/sound/usb/implicit.c b/sound/usb/implicit.c index 4727043fd7458..77f06da93151e 100644 --- a/sound/usb/implicit.c +++ b/sound/usb/implicit.c @@ -57,6 +57,7 @@ static const struct snd_usb_implicit_fb_match playback_implicit_fb_quirks[] = { IMPLICIT_FB_FIXED_DEV(0x31e9, 0x0002, 0x81, 2), /* Solid State Logic SSL2+ */ IMPLICIT_FB_FIXED_DEV(0x0499, 0x172f, 0x81, 2), /* Steinberg UR22C */ IMPLICIT_FB_FIXED_DEV(0x0d9a, 0x00df, 0x81, 2), /* RTX6001 */ + IMPLICIT_FB_FIXED_DEV(0x19f7, 0x000a, 0x84, 3), /* RODE AI-1 */ IMPLICIT_FB_FIXED_DEV(0x22f0, 0x0006, 0x81, 3), /* Allen&Heath Qu-16 */ IMPLICIT_FB_FIXED_DEV(0x1686, 0xf029, 0x82, 2), /* Zoom UAC-2 */ IMPLICIT_FB_FIXED_DEV(0x2466, 0x8003, 0x86, 2), /* Fractal Audio Axe-Fx II */ diff --git a/sound/usb/mixer_maps.c b/sound/usb/mixer_maps.c index 0e9b5431a47f2..faac7df1fbcf0 100644 --- a/sound/usb/mixer_maps.c +++ b/sound/usb/mixer_maps.c @@ -383,6 +383,13 @@ static const struct usbmix_name_map ms_usb_link_map[] = { { 0 } /* terminator */ }; +/* KTMicro USB */ +static struct usbmix_name_map s31b2_0022_map[] = { + { 23, "Speaker Playback" }, + { 18, "Headphone Playback" }, + { 0 } +}; + /* ASUS ROG Zenith II with Realtek ALC1220-VB */ static const struct usbmix_name_map asus_zenith_ii_map[] = { { 19, NULL, 12 }, /* FU, Input Gain Pad - broken response, disabled */ @@ -692,6 +699,11 @@ static const struct usbmix_ctl_map usbmix_ctl_maps[] = { .id = USB_ID(0x045e, 0x083c), .map = ms_usb_link_map, }, + { + /* KTMicro USB */ + .id = USB_ID(0X31b2, 0x0022), + .map = s31b2_0022_map, + }, { 0 } /* terminator */ }; diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c index c7387081577cd..0da4ee9757c01 100644 --- a/sound/usb/quirks.c +++ b/sound/usb/quirks.c @@ -2282,6 +2282,8 @@ static const struct usb_audio_quirk_flags_table quirk_flags_table[] = { QUIRK_FLAG_DISABLE_AUTOSUSPEND), DEVICE_FLG(0x17aa, 0x104d, /* Lenovo ThinkStation P620 Internal Speaker + Front Headset */ QUIRK_FLAG_DISABLE_AUTOSUSPEND), + DEVICE_FLG(0x17ef, 0x3083, /* Lenovo TBT3 dock */ + QUIRK_FLAG_GET_SAMPLE_RATE), DEVICE_FLG(0x1852, 0x5062, /* Luxman D-08u */ QUIRK_FLAG_ITF_USB_DSD_DAC | QUIRK_FLAG_CTL_MSG_DELAY), DEVICE_FLG(0x1852, 0x5065, /* Luxman DA-06 */ diff --git a/sound/usb/stream.c b/sound/usb/stream.c index c1ea8844a46fc..aa91d63749f2c 100644 --- a/sound/usb/stream.c +++ b/sound/usb/stream.c @@ -987,6 +987,8 @@ snd_usb_get_audioformat_uac3(struct snd_usb_audio *chip, * and request Cluster Descriptor */ wLength = le16_to_cpu(hc_header.wLength); + if (wLength < sizeof(cluster)) + return NULL; cluster = kzalloc(wLength, GFP_KERNEL); if (!cluster) return ERR_PTR(-ENOMEM); diff --git a/tools/arch/x86/include/asm/msr-index.h b/tools/arch/x86/include/asm/msr-index.h index 3ae84c3b8e6db..3deb6c11f1344 100644 --- a/tools/arch/x86/include/asm/msr-index.h +++ b/tools/arch/x86/include/asm/msr-index.h @@ -612,6 +612,7 @@ #define MSR_AMD64_OSVW_STATUS 0xc0010141 #define MSR_AMD_PPIN_CTL 0xc00102f0 #define MSR_AMD_PPIN 0xc00102f1 +#define MSR_AMD64_CPUID_FN_7 0xc0011002 #define MSR_AMD64_CPUID_FN_1 0xc0011004 #define MSR_AMD64_LS_CFG 0xc0011020 #define MSR_AMD64_DC_CFG 0xc0011022 diff --git a/tools/arch/x86/kcpuid/kcpuid.c b/tools/arch/x86/kcpuid/kcpuid.c index 1b25c0a95d3f9..40a9e59c2fd56 100644 --- a/tools/arch/x86/kcpuid/kcpuid.c +++ b/tools/arch/x86/kcpuid/kcpuid.c @@ -1,11 +1,12 @@ // SPDX-License-Identifier: GPL-2.0 #define _GNU_SOURCE -#include +#include +#include #include +#include #include #include -#include #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) #define min(a, b) (((a) < (b)) ? (a) : (b)) @@ -145,14 +146,14 @@ static bool cpuid_store(struct cpuid_range *range, u32 f, int subleaf, if (!func->leafs) { func->leafs = malloc(sizeof(struct subleaf)); if (!func->leafs) - perror("malloc func leaf"); + err(EXIT_FAILURE, NULL); func->nr = 1; } else { s = func->nr; func->leafs = realloc(func->leafs, (s + 1) * sizeof(*leaf)); if (!func->leafs) - perror("realloc f->leafs"); + err(EXIT_FAILURE, NULL); func->nr++; } @@ -211,7 +212,7 @@ struct cpuid_range *setup_cpuid_range(u32 input_eax) range = malloc(sizeof(struct cpuid_range)); if (!range) - perror("malloc range"); + err(EXIT_FAILURE, NULL); if (input_eax & 0x80000000) range->is_ext = true; @@ -220,7 +221,7 @@ struct cpuid_range *setup_cpuid_range(u32 input_eax) range->funcs = malloc(sizeof(struct cpuid_func) * idx_func); if (!range->funcs) - perror("malloc range->funcs"); + err(EXIT_FAILURE, NULL); range->nr = idx_func; memset(range->funcs, 0, sizeof(struct cpuid_func) * idx_func); @@ -395,8 +396,8 @@ static int parse_line(char *line) return 0; err_exit: - printf("Warning: wrong line format:\n"); - printf("\tline[%d]: %s\n", flines, line); + warnx("Wrong line format:\n" + "\tline[%d]: %s", flines, line); return -1; } @@ -418,10 +419,8 @@ static void parse_text(void) file = fopen("./cpuid.csv", "r"); } - if (!file) { - printf("Fail to open '%s'\n", filename); - return; - } + if (!file) + err(EXIT_FAILURE, "%s", filename); while (1) { ret = getline(&line, &len, file); @@ -530,7 +529,7 @@ static inline struct cpuid_func *index_to_func(u32 index) func_idx = index & 0xffff; if ((func_idx + 1) > (u32)range->nr) { - printf("ERR: invalid input index (0x%x)\n", index); + warnx("Invalid input index (0x%x)", index); return NULL; } return &range->funcs[func_idx]; @@ -562,7 +561,7 @@ static void show_info(void) return; } - printf("ERR: invalid input subleaf (0x%x)\n", user_sub); + warnx("Invalid input subleaf (0x%x)", user_sub); } show_func(func); @@ -593,15 +592,15 @@ static void setup_platform_cpuid(void) static void usage(void) { - printf("kcpuid [-abdfhr] [-l leaf] [-s subleaf]\n" - "\t-a|--all Show both bit flags and complex bit fields info\n" - "\t-b|--bitflags Show boolean flags only\n" - "\t-d|--detail Show details of the flag/fields (default)\n" - "\t-f|--flags Specify the cpuid csv file\n" - "\t-h|--help Show usage info\n" - "\t-l|--leaf=index Specify the leaf you want to check\n" - "\t-r|--raw Show raw cpuid data\n" - "\t-s|--subleaf=sub Specify the subleaf you want to check\n" + warnx("kcpuid [-abdfhr] [-l leaf] [-s subleaf]\n" + "\t-a|--all Show both bit flags and complex bit fields info\n" + "\t-b|--bitflags Show boolean flags only\n" + "\t-d|--detail Show details of the flag/fields (default)\n" + "\t-f|--flags Specify the CPUID CSV file\n" + "\t-h|--help Show usage info\n" + "\t-l|--leaf=index Specify the leaf you want to check\n" + "\t-r|--raw Show raw CPUID data\n" + "\t-s|--subleaf=sub Specify the subleaf you want to check" ); } @@ -652,7 +651,7 @@ static int parse_options(int argc, char *argv[]) user_sub = strtoul(optarg, NULL, 0); break; default: - printf("%s: Invalid option '%c'\n", argv[0], optopt); + warnx("Invalid option '%c'", optopt); return -1; } diff --git a/tools/arch/x86/lib/x86-opcode-map.txt b/tools/arch/x86/lib/x86-opcode-map.txt index f5dd84eb55dcd..cd3fd5155f6ec 100644 --- a/tools/arch/x86/lib/x86-opcode-map.txt +++ b/tools/arch/x86/lib/x86-opcode-map.txt @@ -35,7 +35,7 @@ # - (!F3) : the last prefix is not 0xF3 (including non-last prefix case) # - (66&F2): Both 0x66 and 0xF2 prefixes are specified. # -# REX2 Prefix +# REX2 Prefix Superscripts # - (!REX2): REX2 is not allowed # - (REX2): REX2 variant e.g. JMPABS @@ -286,10 +286,10 @@ df: ESC # Note: "forced64" is Intel CPU behavior: they ignore 0x66 prefix # in 64-bit mode. AMD CPUs accept 0x66 prefix, it causes RIP truncation # to 16 bits. In 32-bit mode, 0x66 is accepted by both Intel and AMD. -e0: LOOPNE/LOOPNZ Jb (f64) (!REX2) -e1: LOOPE/LOOPZ Jb (f64) (!REX2) -e2: LOOP Jb (f64) (!REX2) -e3: JrCXZ Jb (f64) (!REX2) +e0: LOOPNE/LOOPNZ Jb (f64),(!REX2) +e1: LOOPE/LOOPZ Jb (f64),(!REX2) +e2: LOOP Jb (f64),(!REX2) +e3: JrCXZ Jb (f64),(!REX2) e4: IN AL,Ib (!REX2) e5: IN eAX,Ib (!REX2) e6: OUT Ib,AL (!REX2) @@ -298,10 +298,10 @@ e7: OUT Ib,eAX (!REX2) # in "near" jumps and calls is 16-bit. For CALL, # push of return address is 16-bit wide, RSP is decremented by 2 # but is not truncated to 16 bits, unlike RIP. -e8: CALL Jz (f64) (!REX2) -e9: JMP-near Jz (f64) (!REX2) -ea: JMP-far Ap (i64) (!REX2) -eb: JMP-short Jb (f64) (!REX2) +e8: CALL Jz (f64),(!REX2) +e9: JMP-near Jz (f64),(!REX2) +ea: JMP-far Ap (i64),(!REX2) +eb: JMP-short Jb (f64),(!REX2) ec: IN AL,DX (!REX2) ed: IN eAX,DX (!REX2) ee: OUT DX,AL (!REX2) @@ -478,22 +478,22 @@ AVXcode: 1 7f: movq Qq,Pq | vmovdqa Wx,Vx (66) | vmovdqa32/64 Wx,Vx (66),(evo) | vmovdqu Wx,Vx (F3) | vmovdqu32/64 Wx,Vx (F3),(evo) | vmovdqu8/16 Wx,Vx (F2),(ev) # 0x0f 0x80-0x8f # Note: "forced64" is Intel CPU behavior (see comment about CALL insn). -80: JO Jz (f64) (!REX2) -81: JNO Jz (f64) (!REX2) -82: JB/JC/JNAE Jz (f64) (!REX2) -83: JAE/JNB/JNC Jz (f64) (!REX2) -84: JE/JZ Jz (f64) (!REX2) -85: JNE/JNZ Jz (f64) (!REX2) -86: JBE/JNA Jz (f64) (!REX2) -87: JA/JNBE Jz (f64) (!REX2) -88: JS Jz (f64) (!REX2) -89: JNS Jz (f64) (!REX2) -8a: JP/JPE Jz (f64) (!REX2) -8b: JNP/JPO Jz (f64) (!REX2) -8c: JL/JNGE Jz (f64) (!REX2) -8d: JNL/JGE Jz (f64) (!REX2) -8e: JLE/JNG Jz (f64) (!REX2) -8f: JNLE/JG Jz (f64) (!REX2) +80: JO Jz (f64),(!REX2) +81: JNO Jz (f64),(!REX2) +82: JB/JC/JNAE Jz (f64),(!REX2) +83: JAE/JNB/JNC Jz (f64),(!REX2) +84: JE/JZ Jz (f64),(!REX2) +85: JNE/JNZ Jz (f64),(!REX2) +86: JBE/JNA Jz (f64),(!REX2) +87: JA/JNBE Jz (f64),(!REX2) +88: JS Jz (f64),(!REX2) +89: JNS Jz (f64),(!REX2) +8a: JP/JPE Jz (f64),(!REX2) +8b: JNP/JPO Jz (f64),(!REX2) +8c: JL/JNGE Jz (f64),(!REX2) +8d: JNL/JGE Jz (f64),(!REX2) +8e: JLE/JNG Jz (f64),(!REX2) +8f: JNLE/JG Jz (f64),(!REX2) # 0x0f 0x90-0x9f 90: SETO Eb | kmovw/q Vk,Wk | kmovb/d Vk,Wk (66) 91: SETNO Eb | kmovw/q Mv,Vk | kmovb/d Mv,Vk (66) diff --git a/tools/bpf/bpftool/cgroup.c b/tools/bpf/bpftool/cgroup.c index 9af426d432993..4189c9d74fb06 100644 --- a/tools/bpf/bpftool/cgroup.c +++ b/tools/bpf/bpftool/cgroup.c @@ -221,7 +221,7 @@ static int cgroup_has_attached_progs(int cgroup_fd) for (i = 0; i < ARRAY_SIZE(cgroup_attach_types); i++) { int count = count_attached_bpf_progs(cgroup_fd, cgroup_attach_types[i]); - if (count < 0) + if (count < 0 && errno != EINVAL) return -1; if (count > 0) { @@ -318,11 +318,11 @@ static int show_bpf_progs(int cgroup_fd, enum bpf_attach_type type, static int do_show(int argc, char **argv) { - enum bpf_attach_type type; int has_attached_progs; const char *path; int cgroup_fd; int ret = -1; + unsigned int i; query_flags = 0; @@ -370,14 +370,14 @@ static int do_show(int argc, char **argv) "AttachFlags", "Name"); btf_vmlinux = libbpf_find_kernel_btf(); - for (type = 0; type < __MAX_BPF_ATTACH_TYPE; type++) { + for (i = 0; i < ARRAY_SIZE(cgroup_attach_types); i++) { /* * Not all attach types may be supported, so it's expected, * that some requests will fail. * If we were able to get the show for at least one * attach type, let's return 0. */ - if (show_bpf_progs(cgroup_fd, type, 0) == 0) + if (show_bpf_progs(cgroup_fd, cgroup_attach_types[i], 0) == 0) ret = 0; } @@ -400,9 +400,9 @@ static int do_show(int argc, char **argv) static int do_show_tree_fn(const char *fpath, const struct stat *sb, int typeflag, struct FTW *ftw) { - enum bpf_attach_type type; int has_attached_progs; int cgroup_fd; + unsigned int i; if (typeflag != FTW_D) return 0; @@ -434,8 +434,8 @@ static int do_show_tree_fn(const char *fpath, const struct stat *sb, } btf_vmlinux = libbpf_find_kernel_btf(); - for (type = 0; type < __MAX_BPF_ATTACH_TYPE; type++) - show_bpf_progs(cgroup_fd, type, ftw->level); + for (i = 0; i < ARRAY_SIZE(cgroup_attach_types); i++) + show_bpf_progs(cgroup_fd, cgroup_attach_types[i], ftw->level); if (errno == EINVAL) /* Last attach type does not support query. diff --git a/tools/bpf/resolve_btfids/Makefile b/tools/bpf/resolve_btfids/Makefile index 4b8079f294f65..b0072e64b0102 100644 --- a/tools/bpf/resolve_btfids/Makefile +++ b/tools/bpf/resolve_btfids/Makefile @@ -19,7 +19,7 @@ endif # Overrides for the prepare step libraries. HOST_OVERRIDES := AR="$(HOSTAR)" CC="$(HOSTCC)" LD="$(HOSTLD)" ARCH="$(HOSTARCH)" \ - CROSS_COMPILE="" EXTRA_CFLAGS="$(HOSTCFLAGS)" + CROSS_COMPILE="" CLANG_CROSS_FLAGS="" EXTRA_CFLAGS="$(HOSTCFLAGS)" RM ?= rm HOSTCC ?= gcc diff --git a/tools/include/linux/kallsyms.h b/tools/include/linux/kallsyms.h index 5a37ccbec54fb..f61a01dd7eb7c 100644 --- a/tools/include/linux/kallsyms.h +++ b/tools/include/linux/kallsyms.h @@ -18,6 +18,7 @@ static inline const char *kallsyms_lookup(unsigned long addr, return NULL; } +#ifdef HAVE_BACKTRACE_SUPPORT #include #include static inline void print_ip_sym(const char *loglvl, unsigned long ip) @@ -30,5 +31,8 @@ static inline void print_ip_sym(const char *loglvl, unsigned long ip) free(name); } +#else +static inline void print_ip_sym(const char *loglvl, unsigned long ip) {} +#endif #endif diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index 552fd633f8200..5a5cdb4539358 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -2035,6 +2035,7 @@ union bpf_attr { * for updates resulting in a null checksum the value is set to * **CSUM_MANGLED_0** instead. Flag **BPF_F_PSEUDO_HDR** indicates * the checksum is to be computed against a pseudo-header. + * Flag **BPF_F_IPV6** should be set for IPv6 packets. * * This helper works in combination with **bpf_csum_diff**\ (), * which does not update the checksum in-place, but offers more @@ -6049,6 +6050,7 @@ enum { BPF_F_PSEUDO_HDR = (1ULL << 4), BPF_F_MARK_MANGLED_0 = (1ULL << 5), BPF_F_MARK_ENFORCE = (1ULL << 6), + BPF_F_IPV6 = (1ULL << 7), }; /* BPF_FUNC_skb_set_tunnel_key and BPF_FUNC_skb_get_tunnel_key flags. */ diff --git a/tools/lib/bpf/bpf_core_read.h b/tools/lib/bpf/bpf_core_read.h index c0e13cdf96607..b997c68bd9453 100644 --- a/tools/lib/bpf/bpf_core_read.h +++ b/tools/lib/bpf/bpf_core_read.h @@ -388,7 +388,13 @@ extern void *bpf_rdonly_cast(const void *obj, __u32 btf_id) __ksym __weak; #define ___arrow10(a, b, c, d, e, f, g, h, i, j) a->b->c->d->e->f->g->h->i->j #define ___arrow(...) ___apply(___arrow, ___narg(__VA_ARGS__))(__VA_ARGS__) +#if defined(__clang__) && (__clang_major__ >= 19) +#define ___type(...) __typeof_unqual__(___arrow(__VA_ARGS__)) +#elif defined(__GNUC__) && (__GNUC__ >= 14) +#define ___type(...) __typeof_unqual__(___arrow(__VA_ARGS__)) +#else #define ___type(...) typeof(___arrow(__VA_ARGS__)) +#endif #define ___read(read_fn, dst, src_type, src, accessor) \ read_fn((void *)(dst), sizeof(*(dst)), &((src_type)(src))->accessor) diff --git a/tools/lib/bpf/btf.c b/tools/lib/bpf/btf.c index 27e7bfae953bd..b770702dab372 100644 --- a/tools/lib/bpf/btf.c +++ b/tools/lib/bpf/btf.c @@ -995,7 +995,7 @@ static struct btf *btf_new_empty(struct btf *base_btf) if (base_btf) { btf->base_btf = base_btf; btf->start_id = btf__type_cnt(base_btf); - btf->start_str_off = base_btf->hdr->str_len; + btf->start_str_off = base_btf->hdr->str_len + base_btf->start_str_off; btf->swapped_endian = base_btf->swapped_endian; } @@ -4176,6 +4176,19 @@ static bool btf_dedup_identical_structs(struct btf_dedup *d, __u32 id1, __u32 id return true; } +static bool btf_dedup_identical_ptrs(struct btf_dedup *d, __u32 id1, __u32 id2) +{ + struct btf_type *t1, *t2; + + t1 = btf_type_by_id(d->btf, id1); + t2 = btf_type_by_id(d->btf, id2); + + if (!btf_is_ptr(t1) || !btf_is_ptr(t2)) + return false; + + return t1->type == t2->type; +} + /* * Check equivalence of BTF type graph formed by candidate struct/union (we'll * call it "candidate graph" in this description for brevity) to a type graph @@ -4308,6 +4321,9 @@ static int btf_dedup_is_equiv(struct btf_dedup *d, __u32 cand_id, */ if (btf_dedup_identical_structs(d, hypot_type_id, cand_id)) return 1; + /* A similar case is again observed for PTRs. */ + if (btf_dedup_identical_ptrs(d, hypot_type_id, cand_id)) + return 1; return 0; } diff --git a/tools/lib/bpf/btf_dump.c b/tools/lib/bpf/btf_dump.c index 46cce18c83086..12306b5de3efb 100644 --- a/tools/lib/bpf/btf_dump.c +++ b/tools/lib/bpf/btf_dump.c @@ -225,6 +225,9 @@ static void btf_dump_free_names(struct hashmap *map) size_t bkt; struct hashmap_entry *cur; + if (!map) + return; + hashmap__for_each_entry(map, cur, bkt) free((void *)cur->pkey); diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index 6e4d417604fa0..36e341b4b77bf 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -60,6 +60,8 @@ #define BPF_FS_MAGIC 0xcafe4a11 #endif +#define MAX_EVENT_NAME_LEN 64 + #define BPF_FS_DEFAULT_PATH "/sys/fs/bpf" #define BPF_INSN_SZ (sizeof(struct bpf_insn)) @@ -283,7 +285,7 @@ void libbpf_print(enum libbpf_print_level level, const char *format, ...) old_errno = errno; va_start(args, format); - __libbpf_pr(level, format, args); + print_fn(level, format, args); va_end(args); errno = old_errno; @@ -594,7 +596,7 @@ struct extern_desc { int sym_idx; int btf_id; int sec_btf_id; - const char *name; + char *name; char *essent_name; bool is_set; bool is_weak; @@ -887,7 +889,7 @@ bpf_object__add_programs(struct bpf_object *obj, Elf_Data *sec_data, return -LIBBPF_ERRNO__FORMAT; } - if (sec_off + prog_sz > sec_sz) { + if (sec_off + prog_sz > sec_sz || sec_off + prog_sz < sec_off) { pr_warn("sec '%s': program at offset %zu crosses section boundary\n", sec_name, sec_off); return -LIBBPF_ERRNO__FORMAT; @@ -4221,7 +4223,9 @@ static int bpf_object__collect_externs(struct bpf_object *obj) return ext->btf_id; } t = btf__type_by_id(obj->btf, ext->btf_id); - ext->name = btf__name_by_offset(obj->btf, t->name_off); + ext->name = strdup(btf__name_by_offset(obj->btf, t->name_off)); + if (!ext->name) + return -ENOMEM; ext->sym_idx = i; ext->is_weak = ELF64_ST_BIND(sym->st_info) == STB_WEAK; @@ -9060,8 +9064,10 @@ void bpf_object__close(struct bpf_object *obj) zfree(&obj->btf_custom_path); zfree(&obj->kconfig); - for (i = 0; i < obj->nr_extern; i++) + for (i = 0; i < obj->nr_extern; i++) { + zfree(&obj->externs[i].name); zfree(&obj->externs[i].essent_name); + } zfree(&obj->externs); obj->nr_extern = 0; @@ -11039,16 +11045,16 @@ static const char *tracefs_available_filter_functions_addrs(void) : TRACEFS"/available_filter_functions_addrs"; } -static void gen_kprobe_legacy_event_name(char *buf, size_t buf_sz, - const char *kfunc_name, size_t offset) +static void gen_probe_legacy_event_name(char *buf, size_t buf_sz, + const char *name, size_t offset) { static int index = 0; int i; - snprintf(buf, buf_sz, "libbpf_%u_%s_0x%zx_%d", getpid(), kfunc_name, offset, - __sync_fetch_and_add(&index, 1)); + snprintf(buf, buf_sz, "libbpf_%u_%d_%s_0x%zx", getpid(), + __sync_fetch_and_add(&index, 1), name, offset); - /* sanitize binary_path in the probe name */ + /* sanitize name in the probe name */ for (i = 0; buf[i]; i++) { if (!isalnum(buf[i])) buf[i] = '_'; @@ -11174,9 +11180,9 @@ int probe_kern_syscall_wrapper(int token_fd) return pfd >= 0 ? 1 : 0; } else { /* legacy mode */ - char probe_name[128]; + char probe_name[MAX_EVENT_NAME_LEN]; - gen_kprobe_legacy_event_name(probe_name, sizeof(probe_name), syscall_name, 0); + gen_probe_legacy_event_name(probe_name, sizeof(probe_name), syscall_name, 0); if (add_kprobe_event_legacy(probe_name, false, syscall_name, 0) < 0) return 0; @@ -11233,10 +11239,10 @@ bpf_program__attach_kprobe_opts(const struct bpf_program *prog, func_name, offset, -1 /* pid */, 0 /* ref_ctr_off */); } else { - char probe_name[256]; + char probe_name[MAX_EVENT_NAME_LEN]; - gen_kprobe_legacy_event_name(probe_name, sizeof(probe_name), - func_name, offset); + gen_probe_legacy_event_name(probe_name, sizeof(probe_name), + func_name, offset); legacy_probe = strdup(probe_name); if (!legacy_probe) @@ -11744,20 +11750,6 @@ static int attach_uprobe_multi(const struct bpf_program *prog, long cookie, stru return ret; } -static void gen_uprobe_legacy_event_name(char *buf, size_t buf_sz, - const char *binary_path, uint64_t offset) -{ - int i; - - snprintf(buf, buf_sz, "libbpf_%u_%s_0x%zx", getpid(), binary_path, (size_t)offset); - - /* sanitize binary_path in the probe name */ - for (i = 0; buf[i]; i++) { - if (!isalnum(buf[i])) - buf[i] = '_'; - } -} - static inline int add_uprobe_event_legacy(const char *probe_name, bool retprobe, const char *binary_path, size_t offset) { @@ -12173,13 +12165,14 @@ bpf_program__attach_uprobe_opts(const struct bpf_program *prog, pid_t pid, pfd = perf_event_open_probe(true /* uprobe */, retprobe, binary_path, func_offset, pid, ref_ctr_off); } else { - char probe_name[PATH_MAX + 64]; + char probe_name[MAX_EVENT_NAME_LEN]; if (ref_ctr_off) return libbpf_err_ptr(-EINVAL); - gen_uprobe_legacy_event_name(probe_name, sizeof(probe_name), - binary_path, func_offset); + gen_probe_legacy_event_name(probe_name, sizeof(probe_name), + strrchr(binary_path, '/') ? : binary_path, + func_offset); legacy_probe = strdup(probe_name); if (!legacy_probe) @@ -13256,7 +13249,6 @@ struct perf_buffer *perf_buffer__new(int map_fd, size_t page_cnt, attr.config = PERF_COUNT_SW_BPF_OUTPUT; attr.type = PERF_TYPE_SOFTWARE; attr.sample_type = PERF_SAMPLE_RAW; - attr.sample_period = sample_period; attr.wakeup_events = sample_period; p.attr = &attr; @@ -13983,6 +13975,12 @@ int bpf_object__attach_skeleton(struct bpf_object_skeleton *s) } link = map_skel->link; + if (!link) { + pr_warn("map '%s': BPF map skeleton link is uninitialized\n", + bpf_map__name(map)); + continue; + } + if (*link) continue; diff --git a/tools/lib/bpf/linker.c b/tools/lib/bpf/linker.c index 179f6b31cbd6f..d4ab9315afe71 100644 --- a/tools/lib/bpf/linker.c +++ b/tools/lib/bpf/linker.c @@ -1220,7 +1220,7 @@ static int linker_append_sec_data(struct bpf_linker *linker, struct src_obj *obj } else { if (!secs_match(dst_sec, src_sec)) { pr_warn("ELF sections %s are incompatible\n", src_sec->sec_name); - return -1; + return -EINVAL; } /* "license" and "version" sections are deduped */ @@ -2067,7 +2067,7 @@ static int linker_append_elf_relos(struct bpf_linker *linker, struct src_obj *ob } } else if (!secs_match(dst_sec, src_sec)) { pr_warn("sections %s are not compatible\n", src_sec->sec_name); - return -1; + return -EINVAL; } /* shdr->sh_link points to SYMTAB */ diff --git a/tools/lib/bpf/nlattr.c b/tools/lib/bpf/nlattr.c index 975e265eab3bf..06663f9ea581f 100644 --- a/tools/lib/bpf/nlattr.c +++ b/tools/lib/bpf/nlattr.c @@ -63,16 +63,16 @@ static int validate_nla(struct nlattr *nla, int maxtype, minlen = nla_attr_minlen[pt->type]; if (libbpf_nla_len(nla) < minlen) - return -1; + return -EINVAL; if (pt->maxlen && libbpf_nla_len(nla) > pt->maxlen) - return -1; + return -EINVAL; if (pt->type == LIBBPF_NLA_STRING) { char *data = libbpf_nla_data(nla); if (data[libbpf_nla_len(nla) - 1] != '\0') - return -1; + return -EINVAL; } return 0; @@ -118,19 +118,18 @@ int libbpf_nla_parse(struct nlattr *tb[], int maxtype, struct nlattr *head, if (policy) { err = validate_nla(nla, maxtype, policy); if (err < 0) - goto errout; + return err; } - if (tb[type]) + if (tb[type]) { pr_warn("Attribute of type %#x found multiple times in message, " "previous attribute is being ignored.\n", type); + } tb[type] = nla; } - err = 0; -errout: - return err; + return 0; } /** diff --git a/tools/objtool/check.c b/tools/objtool/check.c index 4fce0074076f3..a737286de7592 100644 --- a/tools/objtool/check.c +++ b/tools/objtool/check.c @@ -222,7 +222,8 @@ static bool is_rust_noreturn(const struct symbol *func) str_ends_with(func->name, "_7___rustc17rust_begin_unwind") || strstr(func->name, "_4core9panicking13assert_failed") || strstr(func->name, "_4core9panicking11panic_const24panic_const_") || - (strstr(func->name, "_4core5slice5index24slice_") && + (strstr(func->name, "_4core5slice5index") && + strstr(func->name, "slice_") && str_ends_with(func->name, "_fail")); } diff --git a/tools/perf/Makefile.config b/tools/perf/Makefile.config index b102a4c525e4b..a2034fa183254 100644 --- a/tools/perf/Makefile.config +++ b/tools/perf/Makefile.config @@ -569,6 +569,8 @@ ifndef NO_LIBELF ifeq ($(feature-libdebuginfod), 1) CFLAGS += -DHAVE_DEBUGINFOD_SUPPORT EXTLIBS += -ldebuginfod + else + $(warning No elfutils/debuginfod.h found, no debuginfo server support, please install libdebuginfod-dev/elfutils-debuginfod-client-devel or equivalent) endif endif diff --git a/tools/perf/Makefile.perf b/tools/perf/Makefile.perf index 8ee59ecb14110..b61c355fbdeed 100644 --- a/tools/perf/Makefile.perf +++ b/tools/perf/Makefile.perf @@ -1143,7 +1143,8 @@ install-tests: all install-gtk $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests/shell/base_probe'; \ $(INSTALL) tests/shell/base_probe/*.sh '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests/shell/base_probe'; \ $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests/shell/base_report'; \ - $(INSTALL) tests/shell/base_probe/*.sh '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests/shell/base_report'; \ + $(INSTALL) tests/shell/base_report/*.sh '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests/shell/base_report'; \ + $(INSTALL) tests/shell/base_report/*.txt '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests/shell/base_report'; \ $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests/shell/coresight' ; \ $(INSTALL) tests/shell/coresight/*.sh '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests/shell/coresight' $(Q)$(MAKE) -C tests/shell/coresight install-tests diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c index adbaf80b398c1..ab9035573a15e 100644 --- a/tools/perf/builtin-record.c +++ b/tools/perf/builtin-record.c @@ -3471,7 +3471,7 @@ static struct option __record_options[] = { "sample selected machine registers on interrupt," " use '-I?' to list register names", parse_intr_regs), OPT_CALLBACK_OPTARG(0, "user-regs", &record.opts.sample_user_regs, NULL, "any register", - "sample selected machine registers on interrupt," + "sample selected machine registers in user space," " use '--user-regs=?' to list register names", parse_user_regs), OPT_BOOLEAN(0, "running-time", &record.opts.running_time, "Record running/enabled time of read (:S) events"), diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c index ecd26e058baf6..f77e4f4b6f03e 100644 --- a/tools/perf/builtin-trace.c +++ b/tools/perf/builtin-trace.c @@ -1327,7 +1327,7 @@ static const struct syscall_fmt syscall_fmts[] = { .arg = { [0] = { .scnprintf = SCA_FDAT, /* olddirfd */ }, [2] = { .scnprintf = SCA_FDAT, /* newdirfd */ }, [4] = { .scnprintf = SCA_RENAMEAT2_FLAGS, /* flags */ }, }, }, - { .name = "rseq", .errpid = true, + { .name = "rseq", .arg = { [0] = { .from_user = true /* rseq */, }, }, }, { .name = "rt_sigaction", .arg = { [0] = { .scnprintf = SCA_SIGNUM, /* sig */ }, }, }, @@ -1351,7 +1351,7 @@ static const struct syscall_fmt syscall_fmts[] = { { .name = "sendto", .arg = { [3] = { .scnprintf = SCA_MSG_FLAGS, /* flags */ }, [4] = SCA_SOCKADDR_FROM_USER(addr), }, }, - { .name = "set_robust_list", .errpid = true, + { .name = "set_robust_list", .arg = { [0] = { .from_user = true /* head */, }, }, }, { .name = "set_tid_address", .errpid = true, }, { .name = "setitimer", @@ -2873,8 +2873,8 @@ errno_print: { else if (sc->fmt->errpid) { struct thread *child = machine__find_thread(trace->host, ret, ret); + fprintf(trace->output, "%ld", ret); if (child != NULL) { - fprintf(trace->output, "%ld", ret); if (thread__comm_set(child)) fprintf(trace->output, " (%s)", thread__comm_str(child)); thread__put(child); @@ -3986,10 +3986,13 @@ static int trace__set_filter_loop_pids(struct trace *trace) if (!strcmp(thread__comm_str(parent), "sshd") || strstarts(thread__comm_str(parent), "gnome-terminal")) { pids[nr++] = thread__tid(parent); + thread__put(parent); break; } + thread__put(thread); thread = parent; } + thread__put(thread); err = evlist__append_tp_filter_pids(trace->evlist, nr, pids); if (!err && trace->filter_pids.map) diff --git a/tools/perf/scripts/python/exported-sql-viewer.py b/tools/perf/scripts/python/exported-sql-viewer.py index 121cf61ba1b34..e0b2e7268ef68 100755 --- a/tools/perf/scripts/python/exported-sql-viewer.py +++ b/tools/perf/scripts/python/exported-sql-viewer.py @@ -680,7 +680,10 @@ def FindSelect(self, value, pattern, query): s = value.replace("%", "\\%") s = s.replace("_", "\\_") # Translate * and ? into SQL LIKE pattern characters % and _ - trans = string.maketrans("*?", "%_") + if sys.version_info[0] == 3: + trans = str.maketrans("*?", "%_") + else: + trans = string.maketrans("*?", "%_") match = " LIKE '" + str(s).translate(trans) + "'" else: match = " GLOB '" + str(value) + "'" diff --git a/tools/perf/tests/switch-tracking.c b/tools/perf/tests/switch-tracking.c index 5cab17a1942e6..ee43d8fa2ed67 100644 --- a/tools/perf/tests/switch-tracking.c +++ b/tools/perf/tests/switch-tracking.c @@ -258,7 +258,7 @@ static int compar(const void *a, const void *b) const struct event_node *nodeb = b; s64 cmp = nodea->event_time - nodeb->event_time; - return cmp; + return cmp < 0 ? -1 : (cmp > 0 ? 1 : 0); } static int process_events(struct evlist *evlist, diff --git a/tools/perf/tests/tests-scripts.c b/tools/perf/tests/tests-scripts.c index ed114b0442936..b6986d50dde6c 100644 --- a/tools/perf/tests/tests-scripts.c +++ b/tools/perf/tests/tests-scripts.c @@ -255,6 +255,7 @@ static void append_scripts_in_dir(int dir_fd, continue; /* Skip scripts that have a separate driver. */ fd = openat(dir_fd, ent->d_name, O_PATH); append_scripts_in_dir(fd, result, result_sz); + close(fd); } for (i = 0; i < n_dirs; i++) /* Clean up */ zfree(&entlist[i]); diff --git a/tools/perf/ui/browsers/hists.c b/tools/perf/ui/browsers/hists.c index 49ba82bf33918..3283b6313bab8 100644 --- a/tools/perf/ui/browsers/hists.c +++ b/tools/perf/ui/browsers/hists.c @@ -3267,10 +3267,10 @@ static int evsel__hists_browse(struct evsel *evsel, int nr_events, const char *h /* * No need to set actions->dso here since * it's just to remove the current filter. - * Ditto for thread below. */ do_zoom_dso(browser, actions); } else if (top == &browser->hists->thread_filter) { + actions->thread = thread; do_zoom_thread(browser, actions); } else if (top == &browser->hists->socket_filter) { do_zoom_socket(browser, actions); diff --git a/tools/perf/util/intel-pt.c b/tools/perf/util/intel-pt.c index fd2597613f3dc..61f10578e1212 100644 --- a/tools/perf/util/intel-pt.c +++ b/tools/perf/util/intel-pt.c @@ -127,6 +127,7 @@ struct intel_pt { bool single_pebs; bool sample_pebs; + int pebs_data_src_fmt; struct evsel *pebs_evsel; u64 evt_sample_type; @@ -175,6 +176,7 @@ enum switch_state { struct intel_pt_pebs_event { struct evsel *evsel; u64 id; + int data_src_fmt; }; struct intel_pt_queue { @@ -2232,7 +2234,146 @@ static void intel_pt_add_lbrs(struct branch_stack *br_stack, } } -static int intel_pt_do_synth_pebs_sample(struct intel_pt_queue *ptq, struct evsel *evsel, u64 id) +#define P(a, b) PERF_MEM_S(a, b) +#define OP_LH (P(OP, LOAD) | P(LVL, HIT)) +#define LEVEL(x) P(LVLNUM, x) +#define REM P(REMOTE, REMOTE) +#define SNOOP_NONE_MISS (P(SNOOP, NONE) | P(SNOOP, MISS)) + +#define PERF_PEBS_DATA_SOURCE_GRT_MAX 0x10 +#define PERF_PEBS_DATA_SOURCE_GRT_MASK (PERF_PEBS_DATA_SOURCE_GRT_MAX - 1) + +/* Based on kernel __intel_pmu_pebs_data_source_grt() and pebs_data_source */ +static const u64 pebs_data_source_grt[PERF_PEBS_DATA_SOURCE_GRT_MAX] = { + P(OP, LOAD) | P(LVL, MISS) | LEVEL(L3) | P(SNOOP, NA), /* L3 miss|SNP N/A */ + OP_LH | P(LVL, L1) | LEVEL(L1) | P(SNOOP, NONE), /* L1 hit|SNP None */ + OP_LH | P(LVL, LFB) | LEVEL(LFB) | P(SNOOP, NONE), /* LFB/MAB hit|SNP None */ + OP_LH | P(LVL, L2) | LEVEL(L2) | P(SNOOP, NONE), /* L2 hit|SNP None */ + OP_LH | P(LVL, L3) | LEVEL(L3) | P(SNOOP, NONE), /* L3 hit|SNP None */ + OP_LH | P(LVL, L3) | LEVEL(L3) | P(SNOOP, HIT), /* L3 hit|SNP Hit */ + OP_LH | P(LVL, L3) | LEVEL(L3) | P(SNOOP, HITM), /* L3 hit|SNP HitM */ + OP_LH | P(LVL, L3) | LEVEL(L3) | P(SNOOP, HITM), /* L3 hit|SNP HitM */ + OP_LH | P(LVL, L3) | LEVEL(L3) | P(SNOOPX, FWD), /* L3 hit|SNP Fwd */ + OP_LH | P(LVL, REM_CCE1) | REM | LEVEL(L3) | P(SNOOP, HITM), /* Remote L3 hit|SNP HitM */ + OP_LH | P(LVL, LOC_RAM) | LEVEL(RAM) | P(SNOOP, HIT), /* RAM hit|SNP Hit */ + OP_LH | P(LVL, REM_RAM1) | REM | LEVEL(L3) | P(SNOOP, HIT), /* Remote L3 hit|SNP Hit */ + OP_LH | P(LVL, LOC_RAM) | LEVEL(RAM) | SNOOP_NONE_MISS, /* RAM hit|SNP None or Miss */ + OP_LH | P(LVL, REM_RAM1) | LEVEL(RAM) | REM | SNOOP_NONE_MISS, /* Remote RAM hit|SNP None or Miss */ + OP_LH | P(LVL, IO) | LEVEL(NA) | P(SNOOP, NONE), /* I/O hit|SNP None */ + OP_LH | P(LVL, UNC) | LEVEL(NA) | P(SNOOP, NONE), /* Uncached hit|SNP None */ +}; + +/* Based on kernel __intel_pmu_pebs_data_source_cmt() and pebs_data_source */ +static const u64 pebs_data_source_cmt[PERF_PEBS_DATA_SOURCE_GRT_MAX] = { + P(OP, LOAD) | P(LVL, MISS) | LEVEL(L3) | P(SNOOP, NA), /* L3 miss|SNP N/A */ + OP_LH | P(LVL, L1) | LEVEL(L1) | P(SNOOP, NONE), /* L1 hit|SNP None */ + OP_LH | P(LVL, LFB) | LEVEL(LFB) | P(SNOOP, NONE), /* LFB/MAB hit|SNP None */ + OP_LH | P(LVL, L2) | LEVEL(L2) | P(SNOOP, NONE), /* L2 hit|SNP None */ + OP_LH | P(LVL, L3) | LEVEL(L3) | P(SNOOP, NONE), /* L3 hit|SNP None */ + OP_LH | P(LVL, L3) | LEVEL(L3) | P(SNOOP, MISS), /* L3 hit|SNP Hit */ + OP_LH | P(LVL, L3) | LEVEL(L3) | P(SNOOP, HIT), /* L3 hit|SNP HitM */ + OP_LH | P(LVL, L3) | LEVEL(L3) | P(SNOOPX, FWD), /* L3 hit|SNP HitM */ + OP_LH | P(LVL, L3) | LEVEL(L3) | P(SNOOP, HITM), /* L3 hit|SNP Fwd */ + OP_LH | P(LVL, REM_CCE1) | REM | LEVEL(L3) | P(SNOOP, HITM), /* Remote L3 hit|SNP HitM */ + OP_LH | P(LVL, LOC_RAM) | LEVEL(RAM) | P(SNOOP, NONE), /* RAM hit|SNP Hit */ + OP_LH | LEVEL(RAM) | REM | P(SNOOP, NONE), /* Remote L3 hit|SNP Hit */ + OP_LH | LEVEL(RAM) | REM | P(SNOOPX, FWD), /* RAM hit|SNP None or Miss */ + OP_LH | LEVEL(RAM) | REM | P(SNOOP, HITM), /* Remote RAM hit|SNP None or Miss */ + OP_LH | P(LVL, IO) | LEVEL(NA) | P(SNOOP, NONE), /* I/O hit|SNP None */ + OP_LH | P(LVL, UNC) | LEVEL(NA) | P(SNOOP, NONE), /* Uncached hit|SNP None */ +}; + +/* Based on kernel pebs_set_tlb_lock() */ +static inline void pebs_set_tlb_lock(u64 *val, bool tlb, bool lock) +{ + /* + * TLB access + * 0 = did not miss 2nd level TLB + * 1 = missed 2nd level TLB + */ + if (tlb) + *val |= P(TLB, MISS) | P(TLB, L2); + else + *val |= P(TLB, HIT) | P(TLB, L1) | P(TLB, L2); + + /* locked prefix */ + if (lock) + *val |= P(LOCK, LOCKED); +} + +/* Based on kernel __grt_latency_data() */ +static u64 intel_pt_grt_latency_data(u8 dse, bool tlb, bool lock, bool blk, + const u64 *pebs_data_source) +{ + u64 val; + + dse &= PERF_PEBS_DATA_SOURCE_GRT_MASK; + val = pebs_data_source[dse]; + + pebs_set_tlb_lock(&val, tlb, lock); + + if (blk) + val |= P(BLK, DATA); + else + val |= P(BLK, NA); + + return val; +} + +/* Default value for data source */ +#define PERF_MEM_NA (PERF_MEM_S(OP, NA) |\ + PERF_MEM_S(LVL, NA) |\ + PERF_MEM_S(SNOOP, NA) |\ + PERF_MEM_S(LOCK, NA) |\ + PERF_MEM_S(TLB, NA) |\ + PERF_MEM_S(LVLNUM, NA)) + +enum DATA_SRC_FORMAT { + DATA_SRC_FORMAT_ERR = -1, + DATA_SRC_FORMAT_NA = 0, + DATA_SRC_FORMAT_GRT = 1, + DATA_SRC_FORMAT_CMT = 2, +}; + +/* Based on kernel grt_latency_data() and cmt_latency_data */ +static u64 intel_pt_get_data_src(u64 mem_aux_info, int data_src_fmt) +{ + switch (data_src_fmt) { + case DATA_SRC_FORMAT_GRT: { + union { + u64 val; + struct { + unsigned int dse:4; + unsigned int locked:1; + unsigned int stlb_miss:1; + unsigned int fwd_blk:1; + unsigned int reserved:25; + }; + } x = {.val = mem_aux_info}; + return intel_pt_grt_latency_data(x.dse, x.stlb_miss, x.locked, x.fwd_blk, + pebs_data_source_grt); + } + case DATA_SRC_FORMAT_CMT: { + union { + u64 val; + struct { + unsigned int dse:5; + unsigned int locked:1; + unsigned int stlb_miss:1; + unsigned int fwd_blk:1; + unsigned int reserved:24; + }; + } x = {.val = mem_aux_info}; + return intel_pt_grt_latency_data(x.dse, x.stlb_miss, x.locked, x.fwd_blk, + pebs_data_source_cmt); + } + default: + return PERF_MEM_NA; + } +} + +static int intel_pt_do_synth_pebs_sample(struct intel_pt_queue *ptq, struct evsel *evsel, + u64 id, int data_src_fmt) { const struct intel_pt_blk_items *items = &ptq->state->items; struct perf_sample sample = { .ip = 0, }; @@ -2350,6 +2491,18 @@ static int intel_pt_do_synth_pebs_sample(struct intel_pt_queue *ptq, struct evse } } + if (sample_type & PERF_SAMPLE_DATA_SRC) { + if (items->has_mem_aux_info && data_src_fmt) { + if (data_src_fmt < 0) { + pr_err("Intel PT missing data_src info\n"); + return -1; + } + sample.data_src = intel_pt_get_data_src(items->mem_aux_info, data_src_fmt); + } else { + sample.data_src = PERF_MEM_NA; + } + } + if (sample_type & PERF_SAMPLE_TRANSACTION && items->has_tsx_aux_info) { u64 ax = items->has_rax ? items->rax : 0; /* Refer kernel's intel_hsw_transaction() */ @@ -2368,9 +2521,10 @@ static int intel_pt_synth_single_pebs_sample(struct intel_pt_queue *ptq) { struct intel_pt *pt = ptq->pt; struct evsel *evsel = pt->pebs_evsel; + int data_src_fmt = pt->pebs_data_src_fmt; u64 id = evsel->core.id[0]; - return intel_pt_do_synth_pebs_sample(ptq, evsel, id); + return intel_pt_do_synth_pebs_sample(ptq, evsel, id, data_src_fmt); } static int intel_pt_synth_pebs_sample(struct intel_pt_queue *ptq) @@ -2395,7 +2549,7 @@ static int intel_pt_synth_pebs_sample(struct intel_pt_queue *ptq) hw_id); return intel_pt_synth_single_pebs_sample(ptq); } - err = intel_pt_do_synth_pebs_sample(ptq, pe->evsel, pe->id); + err = intel_pt_do_synth_pebs_sample(ptq, pe->evsel, pe->id, pe->data_src_fmt); if (err) return err; } @@ -3355,6 +3509,49 @@ static int intel_pt_process_itrace_start(struct intel_pt *pt, event->itrace_start.tid); } +/* + * Events with data_src are identified by L1_Hit_Indication + * refer https://github.com/intel/perfmon + */ +static int intel_pt_data_src_fmt(struct intel_pt *pt, struct evsel *evsel) +{ + struct perf_env *env = pt->machine->env; + int fmt = DATA_SRC_FORMAT_NA; + + if (!env->cpuid) + return DATA_SRC_FORMAT_ERR; + + /* + * PEBS-via-PT is only supported on E-core non-hybrid. Of those only + * Gracemont and Crestmont have data_src. Check for: + * Alderlake N (Gracemont) + * Sierra Forest (Crestmont) + * Grand Ridge (Crestmont) + */ + + if (!strncmp(env->cpuid, "GenuineIntel,6,190,", 19)) + fmt = DATA_SRC_FORMAT_GRT; + + if (!strncmp(env->cpuid, "GenuineIntel,6,175,", 19) || + !strncmp(env->cpuid, "GenuineIntel,6,182,", 19)) + fmt = DATA_SRC_FORMAT_CMT; + + if (fmt == DATA_SRC_FORMAT_NA) + return fmt; + + /* + * Only data_src events are: + * mem-loads event=0xd0,umask=0x5 + * mem-stores event=0xd0,umask=0x6 + */ + if (evsel->core.attr.type == PERF_TYPE_RAW && + ((evsel->core.attr.config & 0xffff) == 0x5d0 || + (evsel->core.attr.config & 0xffff) == 0x6d0)) + return fmt; + + return DATA_SRC_FORMAT_NA; +} + static int intel_pt_process_aux_output_hw_id(struct intel_pt *pt, union perf_event *event, struct perf_sample *sample) @@ -3375,6 +3572,7 @@ static int intel_pt_process_aux_output_hw_id(struct intel_pt *pt, ptq->pebs[hw_id].evsel = evsel; ptq->pebs[hw_id].id = sample->id; + ptq->pebs[hw_id].data_src_fmt = intel_pt_data_src_fmt(pt, evsel); return 0; } @@ -3924,6 +4122,7 @@ static void intel_pt_setup_pebs_events(struct intel_pt *pt) } pt->single_pebs = true; pt->sample_pebs = true; + pt->pebs_data_src_fmt = intel_pt_data_src_fmt(pt, evsel); pt->pebs_evsel = evsel; } } diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c index 9be2f4479f525..20fd742984e3c 100644 --- a/tools/perf/util/machine.c +++ b/tools/perf/util/machine.c @@ -1974,7 +1974,7 @@ static void ip__resolve_ams(struct thread *thread, * Thus, we have to try consecutively until we find a match * or else, the symbol is unknown */ - thread__find_cpumode_addr_location(thread, ip, &al); + thread__find_cpumode_addr_location(thread, ip, /*symbols=*/true, &al); ams->addr = ip; ams->al_addr = al.addr; @@ -2076,7 +2076,7 @@ static int add_callchain_ip(struct thread *thread, al.sym = NULL; al.srcline = NULL; if (!cpumode) { - thread__find_cpumode_addr_location(thread, ip, &al); + thread__find_cpumode_addr_location(thread, ip, symbols, &al); } else { if (ip >= PERF_CONTEXT_MAX) { switch (ip) { @@ -2104,6 +2104,8 @@ static int add_callchain_ip(struct thread *thread, } if (symbols) thread__find_symbol(thread, *cpumode, ip, &al); + else + thread__find_map(thread, *cpumode, ip, &al); } if (al.sym != NULL) { diff --git a/tools/perf/util/print-events.c b/tools/perf/util/print-events.c index 81e0135cddf01..a1c71d9793bd8 100644 --- a/tools/perf/util/print-events.c +++ b/tools/perf/util/print-events.c @@ -282,6 +282,7 @@ bool is_event_supported(u8 type, u64 config) ret = evsel__open(evsel, NULL, tmap) >= 0; } + evsel__close(evsel); evsel__delete(evsel); } diff --git a/tools/perf/util/symbol-minimal.c b/tools/perf/util/symbol-minimal.c index c6f369b5d893f..36c1d3090689f 100644 --- a/tools/perf/util/symbol-minimal.c +++ b/tools/perf/util/symbol-minimal.c @@ -90,11 +90,23 @@ int filename__read_build_id(const char *filename, struct build_id *bid) { FILE *fp; int ret = -1; - bool need_swap = false; + bool need_swap = false, elf32; u8 e_ident[EI_NIDENT]; - size_t buf_size; - void *buf; int i; + union { + struct { + Elf32_Ehdr ehdr32; + Elf32_Phdr *phdr32; + }; + struct { + Elf64_Ehdr ehdr64; + Elf64_Phdr *phdr64; + }; + } hdrs; + void *phdr; + size_t phdr_size; + void *buf = NULL; + size_t buf_size = 0; fp = fopen(filename, "r"); if (fp == NULL) @@ -108,117 +120,79 @@ int filename__read_build_id(const char *filename, struct build_id *bid) goto out; need_swap = check_need_swap(e_ident[EI_DATA]); + elf32 = e_ident[EI_CLASS] == ELFCLASS32; - /* for simplicity */ - fseek(fp, 0, SEEK_SET); - - if (e_ident[EI_CLASS] == ELFCLASS32) { - Elf32_Ehdr ehdr; - Elf32_Phdr *phdr; - - if (fread(&ehdr, sizeof(ehdr), 1, fp) != 1) - goto out; + if (fread(elf32 ? (void *)&hdrs.ehdr32 : (void *)&hdrs.ehdr64, + elf32 ? sizeof(hdrs.ehdr32) : sizeof(hdrs.ehdr64), + 1, fp) != 1) + goto out; - if (need_swap) { - ehdr.e_phoff = bswap_32(ehdr.e_phoff); - ehdr.e_phentsize = bswap_16(ehdr.e_phentsize); - ehdr.e_phnum = bswap_16(ehdr.e_phnum); + if (need_swap) { + if (elf32) { + hdrs.ehdr32.e_phoff = bswap_32(hdrs.ehdr32.e_phoff); + hdrs.ehdr32.e_phentsize = bswap_16(hdrs.ehdr32.e_phentsize); + hdrs.ehdr32.e_phnum = bswap_16(hdrs.ehdr32.e_phnum); + } else { + hdrs.ehdr64.e_phoff = bswap_64(hdrs.ehdr64.e_phoff); + hdrs.ehdr64.e_phentsize = bswap_16(hdrs.ehdr64.e_phentsize); + hdrs.ehdr64.e_phnum = bswap_16(hdrs.ehdr64.e_phnum); } + } + phdr_size = elf32 ? hdrs.ehdr32.e_phentsize * hdrs.ehdr32.e_phnum + : hdrs.ehdr64.e_phentsize * hdrs.ehdr64.e_phnum; + phdr = malloc(phdr_size); + if (phdr == NULL) + goto out; - buf_size = ehdr.e_phentsize * ehdr.e_phnum; - buf = malloc(buf_size); - if (buf == NULL) - goto out; - - fseek(fp, ehdr.e_phoff, SEEK_SET); - if (fread(buf, buf_size, 1, fp) != 1) - goto out_free; - - for (i = 0, phdr = buf; i < ehdr.e_phnum; i++, phdr++) { - void *tmp; - long offset; - - if (need_swap) { - phdr->p_type = bswap_32(phdr->p_type); - phdr->p_offset = bswap_32(phdr->p_offset); - phdr->p_filesz = bswap_32(phdr->p_filesz); - } - - if (phdr->p_type != PT_NOTE) - continue; - - buf_size = phdr->p_filesz; - offset = phdr->p_offset; - tmp = realloc(buf, buf_size); - if (tmp == NULL) - goto out_free; - - buf = tmp; - fseek(fp, offset, SEEK_SET); - if (fread(buf, buf_size, 1, fp) != 1) - goto out_free; + fseek(fp, elf32 ? hdrs.ehdr32.e_phoff : hdrs.ehdr64.e_phoff, SEEK_SET); + if (fread(phdr, phdr_size, 1, fp) != 1) + goto out_free; - ret = read_build_id(buf, buf_size, bid, need_swap); - if (ret == 0) { - ret = bid->size; - break; - } - } - } else { - Elf64_Ehdr ehdr; - Elf64_Phdr *phdr; + if (elf32) + hdrs.phdr32 = phdr; + else + hdrs.phdr64 = phdr; - if (fread(&ehdr, sizeof(ehdr), 1, fp) != 1) - goto out; + for (i = 0; i < elf32 ? hdrs.ehdr32.e_phnum : hdrs.ehdr64.e_phnum; i++) { + size_t p_filesz; if (need_swap) { - ehdr.e_phoff = bswap_64(ehdr.e_phoff); - ehdr.e_phentsize = bswap_16(ehdr.e_phentsize); - ehdr.e_phnum = bswap_16(ehdr.e_phnum); + if (elf32) { + hdrs.phdr32[i].p_type = bswap_32(hdrs.phdr32[i].p_type); + hdrs.phdr32[i].p_offset = bswap_32(hdrs.phdr32[i].p_offset); + hdrs.phdr32[i].p_filesz = bswap_32(hdrs.phdr32[i].p_offset); + } else { + hdrs.phdr64[i].p_type = bswap_32(hdrs.phdr64[i].p_type); + hdrs.phdr64[i].p_offset = bswap_64(hdrs.phdr64[i].p_offset); + hdrs.phdr64[i].p_filesz = bswap_64(hdrs.phdr64[i].p_filesz); + } } + if ((elf32 ? hdrs.phdr32[i].p_type : hdrs.phdr64[i].p_type) != PT_NOTE) + continue; - buf_size = ehdr.e_phentsize * ehdr.e_phnum; - buf = malloc(buf_size); - if (buf == NULL) - goto out; - - fseek(fp, ehdr.e_phoff, SEEK_SET); - if (fread(buf, buf_size, 1, fp) != 1) - goto out_free; - - for (i = 0, phdr = buf; i < ehdr.e_phnum; i++, phdr++) { + p_filesz = elf32 ? hdrs.phdr32[i].p_filesz : hdrs.phdr64[i].p_filesz; + if (p_filesz > buf_size) { void *tmp; - long offset; - - if (need_swap) { - phdr->p_type = bswap_32(phdr->p_type); - phdr->p_offset = bswap_64(phdr->p_offset); - phdr->p_filesz = bswap_64(phdr->p_filesz); - } - if (phdr->p_type != PT_NOTE) - continue; - - buf_size = phdr->p_filesz; - offset = phdr->p_offset; + buf_size = p_filesz; tmp = realloc(buf, buf_size); if (tmp == NULL) goto out_free; - buf = tmp; - fseek(fp, offset, SEEK_SET); - if (fread(buf, buf_size, 1, fp) != 1) - goto out_free; + } + fseek(fp, elf32 ? hdrs.phdr32[i].p_offset : hdrs.phdr64[i].p_offset, SEEK_SET); + if (fread(buf, p_filesz, 1, fp) != 1) + goto out_free; - ret = read_build_id(buf, buf_size, bid, need_swap); - if (ret == 0) { - ret = bid->size; - break; - } + ret = read_build_id(buf, p_filesz, bid, need_swap); + if (ret == 0) { + ret = bid->size; + break; } } out_free: free(buf); + free(phdr); out: fclose(fp); return ret; diff --git a/tools/perf/util/thread.c b/tools/perf/util/thread.c index 0ffdd52d86d70..309d573eac9a9 100644 --- a/tools/perf/util/thread.c +++ b/tools/perf/util/thread.c @@ -406,7 +406,7 @@ int thread__fork(struct thread *thread, struct thread *parent, u64 timestamp, bo } void thread__find_cpumode_addr_location(struct thread *thread, u64 addr, - struct addr_location *al) + bool symbols, struct addr_location *al) { size_t i; const u8 cpumodes[] = { @@ -417,7 +417,11 @@ void thread__find_cpumode_addr_location(struct thread *thread, u64 addr, }; for (i = 0; i < ARRAY_SIZE(cpumodes); i++) { - thread__find_symbol(thread, cpumodes[i], addr, al); + if (symbols) + thread__find_symbol(thread, cpumodes[i], addr, al); + else + thread__find_map(thread, cpumodes[i], addr, al); + if (al->map) break; } diff --git a/tools/perf/util/thread.h b/tools/perf/util/thread.h index 6cbf6eb2812e0..1fb32e7d62a4d 100644 --- a/tools/perf/util/thread.h +++ b/tools/perf/util/thread.h @@ -122,7 +122,7 @@ struct symbol *thread__find_symbol_fb(struct thread *thread, u8 cpumode, u64 addr, struct addr_location *al); void thread__find_cpumode_addr_location(struct thread *thread, u64 addr, - struct addr_location *al); + bool symbols, struct addr_location *al); int thread__memcpy(struct thread *thread, struct machine *machine, void *buf, u64 ip, int len, bool *is64bit); diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c index 12424bf08551d..4c322586730d4 100644 --- a/tools/power/x86/turbostat/turbostat.c +++ b/tools/power/x86/turbostat/turbostat.c @@ -4491,6 +4491,38 @@ unsigned long pmt_read_counter(struct pmt_counter *ppmt, unsigned int domain_id) return (value & value_mask) >> value_shift; } + +/* Rapl domain enumeration helpers */ +static inline int get_rapl_num_domains(void) +{ + int num_packages = topo.max_package_id + 1; + int num_cores_per_package; + int num_cores; + + if (!platform->has_per_core_rapl) + return num_packages; + + num_cores_per_package = topo.max_core_id + 1; + num_cores = num_cores_per_package * num_packages; + + return num_cores; +} + +static inline int get_rapl_domain_id(int cpu) +{ + int nr_cores_per_package = topo.max_core_id + 1; + int rapl_core_id; + + if (!platform->has_per_core_rapl) + return cpus[cpu].physical_package_id; + + /* Compute the system-wide unique core-id for @cpu */ + rapl_core_id = cpus[cpu].physical_core_id; + rapl_core_id += cpus[cpu].physical_package_id * nr_cores_per_package; + + return rapl_core_id; +} + /* * get_counters(...) * migrate to cpu @@ -4544,7 +4576,7 @@ int get_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p) goto done; if (platform->has_per_core_rapl) { - status = get_rapl_counters(cpu, c->core_id, c, p); + status = get_rapl_counters(cpu, get_rapl_domain_id(cpu), c, p); if (status != 0) return status; } @@ -4610,7 +4642,7 @@ int get_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p) p->sys_lpi = cpuidle_cur_sys_lpi_us; if (!platform->has_per_core_rapl) { - status = get_rapl_counters(cpu, p->package_id, c, p); + status = get_rapl_counters(cpu, get_rapl_domain_id(cpu), c, p); if (status != 0) return status; } @@ -7570,7 +7602,7 @@ void linux_perf_init(void) void rapl_perf_init(void) { - const unsigned int num_domains = (platform->has_per_core_rapl ? topo.max_core_id : topo.max_package_id) + 1; + const unsigned int num_domains = get_rapl_num_domains(); bool *domain_visited = calloc(num_domains, sizeof(bool)); rapl_counter_info_perdomain = calloc(num_domains, sizeof(*rapl_counter_info_perdomain)); @@ -7611,8 +7643,7 @@ void rapl_perf_init(void) continue; /* Skip already seen and handled RAPL domains */ - next_domain = - platform->has_per_core_rapl ? cpus[cpu].physical_core_id : cpus[cpu].physical_package_id; + next_domain = get_rapl_domain_id(cpu); assert(next_domain < num_domains); diff --git a/tools/testing/kunit/qemu_configs/sparc.py b/tools/testing/kunit/qemu_configs/sparc.py index e975c4331a7c2..2019550a1b692 100644 --- a/tools/testing/kunit/qemu_configs/sparc.py +++ b/tools/testing/kunit/qemu_configs/sparc.py @@ -2,8 +2,11 @@ QEMU_ARCH = QemuArchParams(linux_arch='sparc', kconfig=''' -CONFIG_SERIAL_8250=y -CONFIG_SERIAL_8250_CONSOLE=y''', +CONFIG_KUNIT_FAULT_TEST=n +CONFIG_SPARC32=y +CONFIG_SERIAL_SUNZILOG=y +CONFIG_SERIAL_SUNZILOG_CONSOLE=y +''', qemu_arch='sparc', kernel_path='arch/sparc/boot/zImage', kernel_command_line='console=ttyS0 mem=256M', diff --git a/tools/testing/selftests/Makefile b/tools/testing/selftests/Makefile index 9cf769d415687..85c5f39131d34 100644 --- a/tools/testing/selftests/Makefile +++ b/tools/testing/selftests/Makefile @@ -196,7 +196,7 @@ export KHDR_INCLUDES all: @ret=1; \ - for TARGET in $(TARGETS); do \ + for TARGET in $(TARGETS) $(INSTALL_DEP_TARGETS); do \ BUILD_TARGET=$$BUILD/$$TARGET; \ mkdir $$BUILD_TARGET -p; \ $(MAKE) OUTPUT=$$BUILD_TARGET -C $$TARGET \ diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_nf.c b/tools/testing/selftests/bpf/prog_tests/bpf_nf.c index a4a1f93878d40..fad98f01e2c06 100644 --- a/tools/testing/selftests/bpf/prog_tests/bpf_nf.c +++ b/tools/testing/selftests/bpf/prog_tests/bpf_nf.c @@ -63,6 +63,12 @@ static void test_bpf_nf_ct(int mode) .repeat = 1, ); + if (SYS_NOFAIL("iptables-legacy --version")) { + fprintf(stdout, "Missing required iptables-legacy tool\n"); + test__skip(); + return; + } + skel = test_bpf_nf__open_and_load(); if (!ASSERT_OK_PTR(skel, "test_bpf_nf__open_and_load")) return; diff --git a/tools/testing/selftests/bpf/progs/test_global_map_resize.c b/tools/testing/selftests/bpf/progs/test_global_map_resize.c index a3f220ba7025b..ee65bad0436d0 100644 --- a/tools/testing/selftests/bpf/progs/test_global_map_resize.c +++ b/tools/testing/selftests/bpf/progs/test_global_map_resize.c @@ -32,6 +32,16 @@ int my_int_last SEC(".data.array_not_last"); int percpu_arr[1] SEC(".data.percpu_arr"); +/* at least one extern is included, to ensure that a specific + * regression is tested whereby resizing resulted in a free-after-use + * bug after type information is invalidated by the resize operation. + * + * There isn't a particularly good API to test for this specific condition, + * but by having externs for the resizing tests it will cover this path. + */ +extern int LINUX_KERNEL_VERSION __kconfig; +long version_sink; + SEC("tp/syscalls/sys_enter_getpid") int bss_array_sum(void *ctx) { @@ -44,6 +54,9 @@ int bss_array_sum(void *ctx) for (size_t i = 0; i < bss_array_len; ++i) sum += array[i]; + /* see above; ensure this is not optimized out */ + version_sink = LINUX_KERNEL_VERSION; + return 0; } @@ -59,6 +72,9 @@ int data_array_sum(void *ctx) for (size_t i = 0; i < data_array_len; ++i) sum += my_array[i]; + /* see above; ensure this is not optimized out */ + version_sink = LINUX_KERNEL_VERSION; + return 0; } diff --git a/tools/testing/selftests/bpf/test_loader.c b/tools/testing/selftests/bpf/test_loader.c index 3e9b009580d4e..7f69d7b5bd4d4 100644 --- a/tools/testing/selftests/bpf/test_loader.c +++ b/tools/testing/selftests/bpf/test_loader.c @@ -970,6 +970,14 @@ void run_subtest(struct test_loader *tester, emit_verifier_log(tester->log_buf, false /*force*/); validate_msgs(tester->log_buf, &subspec->expect_msgs, emit_verifier_log); + /* Restore capabilities because the kernel will silently ignore requests + * for program info (such as xlated program text) if we are not + * bpf-capable. Also, for some reason test_verifier executes programs + * with all capabilities restored. Do the same here. + */ + if (restore_capabilities(&caps)) + goto tobj_cleanup; + if (subspec->expect_xlated.cnt) { err = get_xlated_program_text(bpf_program__fd(tprog), tester->log_buf, tester->log_buf_sz); @@ -995,12 +1003,6 @@ void run_subtest(struct test_loader *tester, } if (should_do_test_run(spec, subspec)) { - /* For some reason test_verifier executes programs - * with all capabilities restored. Do the same here. - */ - if (restore_capabilities(&caps)) - goto tobj_cleanup; - /* Do bpf_map__attach_struct_ops() for each struct_ops map. * This should trigger bpf_struct_ops->reg callback on kernel side. */ diff --git a/tools/testing/selftests/bpf/test_lru_map.c b/tools/testing/selftests/bpf/test_lru_map.c index fda7589c50236..0921939532c6c 100644 --- a/tools/testing/selftests/bpf/test_lru_map.c +++ b/tools/testing/selftests/bpf/test_lru_map.c @@ -138,6 +138,18 @@ static int sched_next_online(int pid, int *next_to_try) return ret; } +/* Derive target_free from map_size, same as bpf_common_lru_populate */ +static unsigned int __tgt_size(unsigned int map_size) +{ + return (map_size / nr_cpus) / 2; +} + +/* Inverse of how bpf_common_lru_populate derives target_free from map_size. */ +static unsigned int __map_size(unsigned int tgt_free) +{ + return tgt_free * nr_cpus * 2; +} + /* Size of the LRU map is 2 * Add key=1 (+1 key) * Add key=2 (+1 key) @@ -231,11 +243,11 @@ static void test_lru_sanity0(int map_type, int map_flags) printf("Pass\n"); } -/* Size of the LRU map is 1.5*tgt_free - * Insert 1 to tgt_free (+tgt_free keys) - * Lookup 1 to tgt_free/2 - * Insert 1+tgt_free to 2*tgt_free (+tgt_free keys) - * => 1+tgt_free/2 to LOCALFREE_TARGET will be removed by LRU +/* Verify that unreferenced elements are recycled before referenced ones. + * Insert elements. + * Reference a subset of these. + * Insert more, enough to trigger recycling. + * Verify that unreferenced are recycled. */ static void test_lru_sanity1(int map_type, int map_flags, unsigned int tgt_free) { @@ -257,7 +269,7 @@ static void test_lru_sanity1(int map_type, int map_flags, unsigned int tgt_free) batch_size = tgt_free / 2; assert(batch_size * 2 == tgt_free); - map_size = tgt_free + batch_size; + map_size = __map_size(tgt_free) + batch_size; lru_map_fd = create_map(map_type, map_flags, map_size); assert(lru_map_fd != -1); @@ -266,13 +278,13 @@ static void test_lru_sanity1(int map_type, int map_flags, unsigned int tgt_free) value[0] = 1234; - /* Insert 1 to tgt_free (+tgt_free keys) */ - end_key = 1 + tgt_free; + /* Insert map_size - batch_size keys */ + end_key = 1 + __map_size(tgt_free); for (key = 1; key < end_key; key++) assert(!bpf_map_update_elem(lru_map_fd, &key, value, BPF_NOEXIST)); - /* Lookup 1 to tgt_free/2 */ + /* Lookup 1 to batch_size */ end_key = 1 + batch_size; for (key = 1; key < end_key; key++) { assert(!bpf_map_lookup_elem_with_ref_bit(lru_map_fd, key, value)); @@ -280,12 +292,13 @@ static void test_lru_sanity1(int map_type, int map_flags, unsigned int tgt_free) BPF_NOEXIST)); } - /* Insert 1+tgt_free to 2*tgt_free - * => 1+tgt_free/2 to LOCALFREE_TARGET will be + /* Insert another map_size - batch_size keys + * Map will contain 1 to batch_size plus these latest, i.e., + * => previous 1+batch_size to map_size - batch_size will have been * removed by LRU */ - key = 1 + tgt_free; - end_key = key + tgt_free; + key = 1 + __map_size(tgt_free); + end_key = key + __map_size(tgt_free); for (; key < end_key; key++) { assert(!bpf_map_update_elem(lru_map_fd, &key, value, BPF_NOEXIST)); @@ -301,17 +314,8 @@ static void test_lru_sanity1(int map_type, int map_flags, unsigned int tgt_free) printf("Pass\n"); } -/* Size of the LRU map 1.5 * tgt_free - * Insert 1 to tgt_free (+tgt_free keys) - * Update 1 to tgt_free/2 - * => The original 1 to tgt_free/2 will be removed due to - * the LRU shrink process - * Re-insert 1 to tgt_free/2 again and do a lookup immeidately - * Insert 1+tgt_free to tgt_free*3/2 - * Insert 1+tgt_free*3/2 to tgt_free*5/2 - * => Key 1+tgt_free to tgt_free*3/2 - * will be removed from LRU because it has never - * been lookup and ref bit is not set +/* Verify that insertions exceeding map size will recycle the oldest. + * Verify that unreferenced elements are recycled before referenced. */ static void test_lru_sanity2(int map_type, int map_flags, unsigned int tgt_free) { @@ -334,7 +338,7 @@ static void test_lru_sanity2(int map_type, int map_flags, unsigned int tgt_free) batch_size = tgt_free / 2; assert(batch_size * 2 == tgt_free); - map_size = tgt_free + batch_size; + map_size = __map_size(tgt_free) + batch_size; lru_map_fd = create_map(map_type, map_flags, map_size); assert(lru_map_fd != -1); @@ -343,8 +347,8 @@ static void test_lru_sanity2(int map_type, int map_flags, unsigned int tgt_free) value[0] = 1234; - /* Insert 1 to tgt_free (+tgt_free keys) */ - end_key = 1 + tgt_free; + /* Insert map_size - batch_size keys */ + end_key = 1 + __map_size(tgt_free); for (key = 1; key < end_key; key++) assert(!bpf_map_update_elem(lru_map_fd, &key, value, BPF_NOEXIST)); @@ -357,8 +361,7 @@ static void test_lru_sanity2(int map_type, int map_flags, unsigned int tgt_free) * shrink the inactive list to get tgt_free * number of free nodes. * - * Hence, the oldest key 1 to tgt_free/2 - * are removed from the LRU list. + * Hence, the oldest key is removed from the LRU list. */ key = 1; if (map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) { @@ -370,8 +373,7 @@ static void test_lru_sanity2(int map_type, int map_flags, unsigned int tgt_free) BPF_EXIST)); } - /* Re-insert 1 to tgt_free/2 again and do a lookup - * immeidately. + /* Re-insert 1 to batch_size again and do a lookup immediately. */ end_key = 1 + batch_size; value[0] = 4321; @@ -387,17 +389,18 @@ static void test_lru_sanity2(int map_type, int map_flags, unsigned int tgt_free) value[0] = 1234; - /* Insert 1+tgt_free to tgt_free*3/2 */ - end_key = 1 + tgt_free + batch_size; - for (key = 1 + tgt_free; key < end_key; key++) + /* Insert batch_size new elements */ + key = 1 + __map_size(tgt_free); + end_key = key + batch_size; + for (; key < end_key; key++) /* These newly added but not referenced keys will be * gone during the next LRU shrink. */ assert(!bpf_map_update_elem(lru_map_fd, &key, value, BPF_NOEXIST)); - /* Insert 1+tgt_free*3/2 to tgt_free*5/2 */ - end_key = key + tgt_free; + /* Insert map_size - batch_size elements */ + end_key += __map_size(tgt_free); for (; key < end_key; key++) { assert(!bpf_map_update_elem(lru_map_fd, &key, value, BPF_NOEXIST)); @@ -413,12 +416,12 @@ static void test_lru_sanity2(int map_type, int map_flags, unsigned int tgt_free) printf("Pass\n"); } -/* Size of the LRU map is 2*tgt_free - * It is to test the active/inactive list rotation - * Insert 1 to 2*tgt_free (+2*tgt_free keys) - * Lookup key 1 to tgt_free*3/2 - * Add 1+2*tgt_free to tgt_free*5/2 (+tgt_free/2 keys) - * => key 1+tgt_free*3/2 to 2*tgt_free are removed from LRU +/* Test the active/inactive list rotation + * + * Fill the whole map, deplete the free list. + * Reference all except the last lru->target_free elements. + * Insert lru->target_free new elements. This triggers one shrink. + * Verify that the non-referenced elements are replaced. */ static void test_lru_sanity3(int map_type, int map_flags, unsigned int tgt_free) { @@ -437,8 +440,7 @@ static void test_lru_sanity3(int map_type, int map_flags, unsigned int tgt_free) assert(sched_next_online(0, &next_cpu) != -1); - batch_size = tgt_free / 2; - assert(batch_size * 2 == tgt_free); + batch_size = __tgt_size(tgt_free); map_size = tgt_free * 2; lru_map_fd = create_map(map_type, map_flags, map_size); @@ -449,23 +451,21 @@ static void test_lru_sanity3(int map_type, int map_flags, unsigned int tgt_free) value[0] = 1234; - /* Insert 1 to 2*tgt_free (+2*tgt_free keys) */ - end_key = 1 + (2 * tgt_free); + /* Fill the map */ + end_key = 1 + map_size; for (key = 1; key < end_key; key++) assert(!bpf_map_update_elem(lru_map_fd, &key, value, BPF_NOEXIST)); - /* Lookup key 1 to tgt_free*3/2 */ - end_key = tgt_free + batch_size; + /* Reference all but the last batch_size */ + end_key = 1 + map_size - batch_size; for (key = 1; key < end_key; key++) { assert(!bpf_map_lookup_elem_with_ref_bit(lru_map_fd, key, value)); assert(!bpf_map_update_elem(expected_map_fd, &key, value, BPF_NOEXIST)); } - /* Add 1+2*tgt_free to tgt_free*5/2 - * (+tgt_free/2 keys) - */ + /* Insert new batch_size: replaces the non-referenced elements */ key = 2 * tgt_free + 1; end_key = key + batch_size; for (; key < end_key; key++) { @@ -500,7 +500,8 @@ static void test_lru_sanity4(int map_type, int map_flags, unsigned int tgt_free) lru_map_fd = create_map(map_type, map_flags, 3 * tgt_free * nr_cpus); else - lru_map_fd = create_map(map_type, map_flags, 3 * tgt_free); + lru_map_fd = create_map(map_type, map_flags, + 3 * __map_size(tgt_free)); assert(lru_map_fd != -1); expected_map_fd = create_map(BPF_MAP_TYPE_HASH, 0, diff --git a/tools/testing/selftests/cpufreq/cpufreq.sh b/tools/testing/selftests/cpufreq/cpufreq.sh index e350c521b4675..3aad9db921b53 100755 --- a/tools/testing/selftests/cpufreq/cpufreq.sh +++ b/tools/testing/selftests/cpufreq/cpufreq.sh @@ -244,9 +244,10 @@ do_suspend() printf "Failed to suspend using RTC wake alarm\n" return 1 fi + else + echo $filename > $SYSFS/power/state fi - echo $filename > $SYSFS/power/state printf "Came out of $1\n" printf "Do basic tests after finishing $1 to verify cpufreq state\n\n" diff --git a/tools/testing/selftests/iommu/iommufd.c b/tools/testing/selftests/iommu/iommufd.c index 06f252733660a..a81c22d520070 100644 --- a/tools/testing/selftests/iommu/iommufd.c +++ b/tools/testing/selftests/iommu/iommufd.c @@ -1748,6 +1748,7 @@ FIXTURE_VARIANT(iommufd_dirty_tracking) FIXTURE_SETUP(iommufd_dirty_tracking) { + size_t mmap_buffer_size; unsigned long size; int mmap_flags; void *vrc; @@ -1762,22 +1763,33 @@ FIXTURE_SETUP(iommufd_dirty_tracking) self->fd = open("/dev/iommu", O_RDWR); ASSERT_NE(-1, self->fd); - rc = posix_memalign(&self->buffer, HUGEPAGE_SIZE, variant->buffer_size); - if (rc || !self->buffer) { - SKIP(return, "Skipping buffer_size=%lu due to errno=%d", - variant->buffer_size, rc); - } - mmap_flags = MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED; + mmap_buffer_size = variant->buffer_size; if (variant->hugepages) { /* * MAP_POPULATE will cause the kernel to fail mmap if THPs are * not available. */ mmap_flags |= MAP_HUGETLB | MAP_POPULATE; + + /* + * Allocation must be aligned to the HUGEPAGE_SIZE, because the + * following mmap() will automatically align the length to be a + * multiple of the underlying huge page size. Failing to do the + * same at this allocation will result in a memory overwrite by + * the mmap(). + */ + if (mmap_buffer_size < HUGEPAGE_SIZE) + mmap_buffer_size = HUGEPAGE_SIZE; + } + + rc = posix_memalign(&self->buffer, HUGEPAGE_SIZE, mmap_buffer_size); + if (rc || !self->buffer) { + SKIP(return, "Skipping buffer_size=%lu due to errno=%d", + mmap_buffer_size, rc); } assert((uintptr_t)self->buffer % HUGEPAGE_SIZE == 0); - vrc = mmap(self->buffer, variant->buffer_size, PROT_READ | PROT_WRITE, + vrc = mmap(self->buffer, mmap_buffer_size, PROT_READ | PROT_WRITE, mmap_flags, -1, 0); assert(vrc == self->buffer); @@ -1806,8 +1818,8 @@ FIXTURE_SETUP(iommufd_dirty_tracking) FIXTURE_TEARDOWN(iommufd_dirty_tracking) { - munmap(self->buffer, variant->buffer_size); - munmap(self->bitmap, DIV_ROUND_UP(self->bitmap_size, BITS_PER_BYTE)); + free(self->buffer); + free(self->bitmap); teardown_iommufd(self->fd, _metadata); } diff --git a/tools/testing/selftests/net/forwarding/lib.sh b/tools/testing/selftests/net/forwarding/lib.sh index c992e385159c0..195360082d949 100644 --- a/tools/testing/selftests/net/forwarding/lib.sh +++ b/tools/testing/selftests/net/forwarding/lib.sh @@ -48,7 +48,6 @@ declare -A NETIFS=( : "${WAIT_TIME:=5}" # Whether to pause on, respectively, after a failure and before cleanup. -: "${PAUSE_ON_FAIL:=no}" : "${PAUSE_ON_CLEANUP:=no}" # Whether to create virtual interfaces, and what netdevice type they should be. @@ -446,22 +445,6 @@ done ############################################################################## # Helpers -# Exit status to return at the end. Set in case one of the tests fails. -EXIT_STATUS=0 -# Per-test return value. Clear at the beginning of each test. -RET=0 - -ret_set_ksft_status() -{ - local ksft_status=$1; shift - local msg=$1; shift - - RET=$(ksft_status_merge $RET $ksft_status) - if (( $? )); then - retmsg=$msg - fi -} - # Whether FAILs should be interpreted as XFAILs. Internal. FAIL_TO_XFAIL= @@ -535,102 +518,6 @@ xfail_on_veth() fi } -log_test_result() -{ - local test_name=$1; shift - local opt_str=$1; shift - local result=$1; shift - local retmsg=$1; shift - - printf "TEST: %-60s [%s]\n" "$test_name $opt_str" "$result" - if [[ $retmsg ]]; then - printf "\t%s\n" "$retmsg" - fi -} - -pause_on_fail() -{ - if [[ $PAUSE_ON_FAIL == yes ]]; then - echo "Hit enter to continue, 'q' to quit" - read a - [[ $a == q ]] && exit 1 - fi -} - -handle_test_result_pass() -{ - local test_name=$1; shift - local opt_str=$1; shift - - log_test_result "$test_name" "$opt_str" " OK " -} - -handle_test_result_fail() -{ - local test_name=$1; shift - local opt_str=$1; shift - - log_test_result "$test_name" "$opt_str" FAIL "$retmsg" - pause_on_fail -} - -handle_test_result_xfail() -{ - local test_name=$1; shift - local opt_str=$1; shift - - log_test_result "$test_name" "$opt_str" XFAIL "$retmsg" - pause_on_fail -} - -handle_test_result_skip() -{ - local test_name=$1; shift - local opt_str=$1; shift - - log_test_result "$test_name" "$opt_str" SKIP "$retmsg" -} - -log_test() -{ - local test_name=$1 - local opt_str=$2 - - if [[ $# -eq 2 ]]; then - opt_str="($opt_str)" - fi - - if ((RET == ksft_pass)); then - handle_test_result_pass "$test_name" "$opt_str" - elif ((RET == ksft_xfail)); then - handle_test_result_xfail "$test_name" "$opt_str" - elif ((RET == ksft_skip)); then - handle_test_result_skip "$test_name" "$opt_str" - else - handle_test_result_fail "$test_name" "$opt_str" - fi - - EXIT_STATUS=$(ksft_exit_status_merge $EXIT_STATUS $RET) - return $RET -} - -log_test_skip() -{ - RET=$ksft_skip retmsg= log_test "$@" -} - -log_test_xfail() -{ - RET=$ksft_xfail retmsg= log_test "$@" -} - -log_info() -{ - local msg=$1 - - echo "INFO: $msg" -} - not() { "$@" diff --git a/tools/testing/selftests/net/lib.sh b/tools/testing/selftests/net/lib.sh index be8707bfb46e5..bb4d2f8d50d67 100644 --- a/tools/testing/selftests/net/lib.sh +++ b/tools/testing/selftests/net/lib.sh @@ -6,6 +6,9 @@ : "${WAIT_TIMEOUT:=20}" +# Whether to pause on after a failure. +: "${PAUSE_ON_FAIL:=no}" + BUSYWAIT_TIMEOUT=$((WAIT_TIMEOUT * 1000)) # ms # Kselftest framework constants. @@ -17,6 +20,11 @@ ksft_skip=4 # namespace list created by setup_ns NS_LIST=() +# Exit status to return at the end. Set in case one of the tests fails. +EXIT_STATUS=0 +# Per-test return value. Clear at the beginning of each test. +RET=0 + ############################################################################## # Helpers @@ -233,3 +241,110 @@ tc_rule_handle_stats_get() | jq ".[] | select(.options.handle == $handle) | \ .options.actions[0].stats$selector" } + +ret_set_ksft_status() +{ + local ksft_status=$1; shift + local msg=$1; shift + + RET=$(ksft_status_merge $RET $ksft_status) + if (( $? )); then + retmsg=$msg + fi +} + +log_test_result() +{ + local test_name=$1; shift + local opt_str=$1; shift + local result=$1; shift + local retmsg=$1 + + printf "TEST: %-60s [%s]\n" "$test_name $opt_str" "$result" + if [[ $retmsg ]]; then + printf "\t%s\n" "$retmsg" + fi +} + +pause_on_fail() +{ + if [[ $PAUSE_ON_FAIL == yes ]]; then + echo "Hit enter to continue, 'q' to quit" + read a + [[ $a == q ]] && exit 1 + fi +} + +handle_test_result_pass() +{ + local test_name=$1; shift + local opt_str=$1; shift + + log_test_result "$test_name" "$opt_str" " OK " +} + +handle_test_result_fail() +{ + local test_name=$1; shift + local opt_str=$1; shift + + log_test_result "$test_name" "$opt_str" FAIL "$retmsg" + pause_on_fail +} + +handle_test_result_xfail() +{ + local test_name=$1; shift + local opt_str=$1; shift + + log_test_result "$test_name" "$opt_str" XFAIL "$retmsg" + pause_on_fail +} + +handle_test_result_skip() +{ + local test_name=$1; shift + local opt_str=$1; shift + + log_test_result "$test_name" "$opt_str" SKIP "$retmsg" +} + +log_test() +{ + local test_name=$1 + local opt_str=$2 + + if [[ $# -eq 2 ]]; then + opt_str="($opt_str)" + fi + + if ((RET == ksft_pass)); then + handle_test_result_pass "$test_name" "$opt_str" + elif ((RET == ksft_xfail)); then + handle_test_result_xfail "$test_name" "$opt_str" + elif ((RET == ksft_skip)); then + handle_test_result_skip "$test_name" "$opt_str" + else + handle_test_result_fail "$test_name" "$opt_str" + fi + + EXIT_STATUS=$(ksft_exit_status_merge $EXIT_STATUS $RET) + return $RET +} + +log_test_skip() +{ + RET=$ksft_skip retmsg= log_test "$@" +} + +log_test_xfail() +{ + RET=$ksft_xfail retmsg= log_test "$@" +} + +log_info() +{ + local msg=$1 + + echo "INFO: $msg" +} diff --git a/tools/testing/selftests/seccomp/seccomp_bpf.c b/tools/testing/selftests/seccomp/seccomp_bpf.c index 8c3a73461475b..60c84d935a2b0 100644 --- a/tools/testing/selftests/seccomp/seccomp_bpf.c +++ b/tools/testing/selftests/seccomp/seccomp_bpf.c @@ -1618,14 +1618,8 @@ void teardown_trace_fixture(struct __test_metadata *_metadata, { if (tracer) { int status; - /* - * Extract the exit code from the other process and - * adopt it for ourselves in case its asserts failed. - */ ASSERT_EQ(0, kill(tracer, SIGUSR1)); ASSERT_EQ(tracer, waitpid(tracer, &status, 0)); - if (WEXITSTATUS(status)) - _metadata->exit_code = KSFT_FAIL; } } @@ -3155,12 +3149,15 @@ TEST(syscall_restart) ret = get_syscall(_metadata, child_pid); #if defined(__arm__) /* - * FIXME: * - native ARM registers do NOT expose true syscall. * - compat ARM registers on ARM64 DO expose true syscall. + * - values of utsbuf.machine include 'armv8l' or 'armb8b' + * for ARM64 running in compat mode. */ ASSERT_EQ(0, uname(&utsbuf)); - if (strncmp(utsbuf.machine, "arm", 3) == 0) { + if ((strncmp(utsbuf.machine, "arm", 3) == 0) && + (strncmp(utsbuf.machine, "armv8l", 6) != 0) && + (strncmp(utsbuf.machine, "armv8b", 6) != 0)) { EXPECT_EQ(__NR_nanosleep, ret); } else #endif diff --git a/tools/testing/selftests/x86/Makefile b/tools/testing/selftests/x86/Makefile index d51249f14e2fe..5656e58a53803 100644 --- a/tools/testing/selftests/x86/Makefile +++ b/tools/testing/selftests/x86/Makefile @@ -12,7 +12,7 @@ CAN_BUILD_WITH_NOPIE := $(shell ./check_cc.sh "$(CC)" trivial_program.c -no-pie) TARGETS_C_BOTHBITS := single_step_syscall sysret_ss_attrs syscall_nt test_mremap_vdso \ check_initial_reg_state sigreturn iopl ioperm \ - test_vsyscall mov_ss_trap \ + test_vsyscall mov_ss_trap sigtrap_loop \ syscall_arg_fault fsgsbase_restore sigaltstack TARGETS_C_BOTHBITS += nx_stack TARGETS_C_32BIT_ONLY := entry_from_vm86 test_syscall_vdso unwind_vdso \ diff --git a/tools/testing/selftests/x86/sigtrap_loop.c b/tools/testing/selftests/x86/sigtrap_loop.c new file mode 100644 index 0000000000000..9d065479e89f9 --- /dev/null +++ b/tools/testing/selftests/x86/sigtrap_loop.c @@ -0,0 +1,101 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2025 Intel Corporation + */ +#define _GNU_SOURCE + +#include +#include +#include +#include +#include +#include + +#ifdef __x86_64__ +# define REG_IP REG_RIP +#else +# define REG_IP REG_EIP +#endif + +static void sethandler(int sig, void (*handler)(int, siginfo_t *, void *), int flags) +{ + struct sigaction sa; + + memset(&sa, 0, sizeof(sa)); + sa.sa_sigaction = handler; + sa.sa_flags = SA_SIGINFO | flags; + sigemptyset(&sa.sa_mask); + + if (sigaction(sig, &sa, 0)) + err(1, "sigaction"); + + return; +} + +static void sigtrap(int sig, siginfo_t *info, void *ctx_void) +{ + ucontext_t *ctx = (ucontext_t *)ctx_void; + static unsigned int loop_count_on_same_ip; + static unsigned long last_trap_ip; + + if (last_trap_ip == ctx->uc_mcontext.gregs[REG_IP]) { + printf("\tTrapped at %016lx\n", last_trap_ip); + + /* + * If the same IP is hit more than 10 times in a row, it is + * _considered_ an infinite loop. + */ + if (++loop_count_on_same_ip > 10) { + printf("[FAIL]\tDetected SIGTRAP infinite loop\n"); + exit(1); + } + + return; + } + + loop_count_on_same_ip = 0; + last_trap_ip = ctx->uc_mcontext.gregs[REG_IP]; + printf("\tTrapped at %016lx\n", last_trap_ip); +} + +int main(int argc, char *argv[]) +{ + sethandler(SIGTRAP, sigtrap, 0); + + /* + * Set the Trap Flag (TF) to single-step the test code, therefore to + * trigger a SIGTRAP signal after each instruction until the TF is + * cleared. + * + * Because the arithmetic flags are not significant here, the TF is + * set by pushing 0x302 onto the stack and then popping it into the + * flags register. + * + * Four instructions in the following asm code are executed with the + * TF set, thus the SIGTRAP handler is expected to run four times. + */ + printf("[RUN]\tSIGTRAP infinite loop detection\n"); + asm volatile( +#ifdef __x86_64__ + /* + * Avoid clobbering the redzone + * + * Equivalent to "sub $128, %rsp", however -128 can be encoded + * in a single byte immediate while 128 uses 4 bytes. + */ + "add $-128, %rsp\n\t" +#endif + "push $0x302\n\t" + "popf\n\t" + "nop\n\t" + "nop\n\t" + "push $0x202\n\t" + "popf\n\t" +#ifdef __x86_64__ + "sub $-128, %rsp\n\t" +#endif + ); + + printf("[OK]\tNo SIGTRAP infinite loop detected\n"); + return 0; +} diff --git a/tools/testing/vma/vma_internal.h b/tools/testing/vma/vma_internal.h index c5b9da034511c..1d5bbc8464f18 100644 --- a/tools/testing/vma/vma_internal.h +++ b/tools/testing/vma/vma_internal.h @@ -735,6 +735,8 @@ static inline void vma_adjust_trans_huge(struct vm_area_struct *vma, (void)adjust_next; } +static inline void hugetlb_split(struct vm_area_struct *, unsigned long) {} + static inline void vma_iter_free(struct vma_iterator *vmi) { mas_destroy(&vmi->mas); diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index b99de3b5ffbc0..aba4078ae2250 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -2557,6 +2557,8 @@ static int kvm_vm_set_mem_attributes(struct kvm *kvm, gfn_t start, gfn_t end, r = xa_reserve(&kvm->mem_attr_array, i, GFP_KERNEL_ACCOUNT); if (r) goto out_unlock; + + cond_resched(); } kvm_handle_gfn_range(kvm, &pre_set_range); @@ -2565,6 +2567,7 @@ static int kvm_vm_set_mem_attributes(struct kvm *kvm, gfn_t start, gfn_t end, r = xa_err(xa_store(&kvm->mem_attr_array, i, entry, GFP_KERNEL_ACCOUNT)); KVM_BUG_ON(r, kvm); + cond_resched(); } kvm_handle_gfn_range(kvm, &post_set_range);