diff options
author | David S. Miller <davem@davemloft.net> | 2019-08-19 11:54:03 -0700 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2019-08-19 11:54:03 -0700 |
commit | 446bf64b613c4433dac4b15f4eaf326beaad3c8e (patch) | |
tree | b20b9457234f6f9a5d7464213ef6d10f95f3eed3 | |
parent | 20e79a0a2cfd15b6cfb18119f2e108396be56716 (diff) | |
parent | 06821504fd47a5e5b641aeeb638a0ae10a216ef8 (diff) | |
download | linux-446bf64b613c4433dac4b15f4eaf326beaad3c8e.tar.gz |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
Merge conflict of mlx5 resolved using instructions in merge
commit 9566e650bf7fdf58384bb06df634f7531ca3a97e.
Signed-off-by: David S. Miller <davem@davemloft.net>
549 files changed, 4711 insertions, 2903 deletions
diff --git a/Documentation/admin-guide/sysctl/net.rst b/Documentation/admin-guide/sysctl/net.rst index a7d44e71019d..287b98708a40 100644 --- a/Documentation/admin-guide/sysctl/net.rst +++ b/Documentation/admin-guide/sysctl/net.rst @@ -39,7 +39,6 @@ Table : Subdirectories in /proc/sys/net 802 E802 protocol ax25 AX25 ethernet Ethernet protocol rose X.25 PLP layer ipv4 IP version 4 x25 X.25 protocol - ipx IPX token-ring IBM token ring bridge Bridging decnet DEC net ipv6 IP version 6 tipc TIPC ========= =================== = ========== ================== @@ -401,33 +400,7 @@ interface. (network) that the route leads to, the router (may be directly connected), the route flags, and the device the route is using. - -5. IPX ------- - -The IPX protocol has no tunable values in proc/sys/net. - -The IPX protocol does, however, provide proc/net/ipx. This lists each IPX -socket giving the local and remote addresses in Novell format (that is -network:node:port). In accordance with the strange Novell tradition, -everything but the port is in hex. Not_Connected is displayed for sockets that -are not tied to a specific remote address. The Tx and Rx queue sizes indicate -the number of bytes pending for transmission and reception. The state -indicates the state the socket is in and the uid is the owning uid of the -socket. - -The /proc/net/ipx_interface file lists all IPX interfaces. For each interface -it gives the network number, the node number, and indicates if the network is -the primary network. It also indicates which device it is bound to (or -Internal for internal networks) and the Frame Type if appropriate. Linux -supports 802.3, 802.2, 802.2 SNAP and DIX (Blue Book) ethernet framing for -IPX. - -The /proc/net/ipx_route table holds a list of IPX routes. For each route it -gives the destination network, the router node (or Directly) and the network -address of the router (or Connected) for internal networks. - -6. TIPC +5. TIPC ------- tipc_rmem diff --git a/Documentation/devicetree/bindings/Makefile b/Documentation/devicetree/bindings/Makefile index 6b0dfd5c17ba..5138a2f6232a 100644 --- a/Documentation/devicetree/bindings/Makefile +++ b/Documentation/devicetree/bindings/Makefile @@ -19,7 +19,9 @@ quiet_cmd_mk_schema = SCHEMA $@ DT_DOCS = $(shell \ cd $(srctree)/$(src) && \ - find * \( -name '*.yaml' ! -name $(DT_TMP_SCHEMA) \) \ + find * \( -name '*.yaml' ! \ + -name $(DT_TMP_SCHEMA) ! \ + -name '*.example.dt.yaml' \) \ ) DT_SCHEMA_FILES ?= $(addprefix $(src)/,$(DT_DOCS)) diff --git a/Documentation/devicetree/bindings/net/fsl-fec.txt b/Documentation/devicetree/bindings/net/fsl-fec.txt index 2d41fb96ce0a..5b88fae0307d 100644 --- a/Documentation/devicetree/bindings/net/fsl-fec.txt +++ b/Documentation/devicetree/bindings/net/fsl-fec.txt @@ -7,18 +7,6 @@ Required properties: - phy-mode : See ethernet.txt file in the same directory Optional properties: -- phy-reset-gpios : Should specify the gpio for phy reset -- phy-reset-duration : Reset duration in milliseconds. Should present - only if property "phy-reset-gpios" is available. Missing the property - will have the duration be 1 millisecond. Numbers greater than 1000 are - invalid and 1 millisecond will be used instead. -- phy-reset-active-high : If present then the reset sequence using the GPIO - specified in the "phy-reset-gpios" property is reversed (H=reset state, - L=operation state). -- phy-reset-post-delay : Post reset delay in milliseconds. If present then - a delay of phy-reset-post-delay milliseconds will be observed after the - phy-reset-gpios has been toggled. Can be omitted thus no delay is - observed. Delay is in range of 1ms to 1000ms. Other delays are invalid. - phy-supply : regulator that powers the Ethernet PHY. - phy-handle : phandle to the PHY device connected to this device. - fixed-link : Assume a fixed link. See fixed-link.txt in the same directory. @@ -47,11 +35,27 @@ Optional properties: For imx6sx, "int0" handles all 3 queues and ENET_MII. "pps" is for the pulse per second interrupt associated with 1588 precision time protocol(PTP). - Optional subnodes: - mdio : specifies the mdio bus in the FEC, used as a container for phy nodes according to phy.txt in the same directory +Deprecated optional properties: + To avoid these, create a phy node according to phy.txt in the same + directory, and point the fec's "phy-handle" property to it. Then use + the phy's reset binding, again described by phy.txt. +- phy-reset-gpios : Should specify the gpio for phy reset +- phy-reset-duration : Reset duration in milliseconds. Should present + only if property "phy-reset-gpios" is available. Missing the property + will have the duration be 1 millisecond. Numbers greater than 1000 are + invalid and 1 millisecond will be used instead. +- phy-reset-active-high : If present then the reset sequence using the GPIO + specified in the "phy-reset-gpios" property is reversed (H=reset state, + L=operation state). +- phy-reset-post-delay : Post reset delay in milliseconds. If present then + a delay of phy-reset-post-delay milliseconds will be observed after the + phy-reset-gpios has been toggled. Can be omitted thus no delay is + observed. Delay is in range of 1ms to 1000ms. Other delays are invalid. + Example: ethernet@83fec000 { diff --git a/Documentation/devicetree/bindings/pinctrl/st,stm32-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/st,stm32-pinctrl.yaml index 91d3e78b3395..400df2da018a 100644 --- a/Documentation/devicetree/bindings/pinctrl/st,stm32-pinctrl.yaml +++ b/Documentation/devicetree/bindings/pinctrl/st,stm32-pinctrl.yaml @@ -37,7 +37,8 @@ properties: hwlocks: true st,syscfg: - $ref: "/schemas/types.yaml#/definitions/phandle-array" + allOf: + - $ref: "/schemas/types.yaml#/definitions/phandle-array" description: Should be phandle/offset/mask items: - description: Phandle to the syscon node which includes IRQ mux selection. diff --git a/Documentation/devicetree/bindings/riscv/cpus.txt b/Documentation/devicetree/bindings/riscv/cpus.txt deleted file mode 100644 index adf7b7af5dc3..000000000000 --- a/Documentation/devicetree/bindings/riscv/cpus.txt +++ /dev/null @@ -1,162 +0,0 @@ -=================== -RISC-V CPU Bindings -=================== - -The device tree allows to describe the layout of CPUs in a system through -the "cpus" node, which in turn contains a number of subnodes (ie "cpu") -defining properties for every cpu. - -Bindings for CPU nodes follow the Devicetree Specification, available from: - -https://www.devicetree.org/specifications/ - -with updates for 32-bit and 64-bit RISC-V systems provided in this document. - -=========== -Terminology -=========== - -This document uses some terminology common to the RISC-V community that is not -widely used, the definitions of which are listed here: - -* hart: A hardware execution context, which contains all the state mandated by - the RISC-V ISA: a PC and some registers. This terminology is designed to - disambiguate software's view of execution contexts from any particular - microarchitectural implementation strategy. For example, my Intel laptop is - described as having one socket with two cores, each of which has two hyper - threads. Therefore this system has four harts. - -===================================== -cpus and cpu node bindings definition -===================================== - -The RISC-V architecture, in accordance with the Devicetree Specification, -requires the cpus and cpu nodes to be present and contain the properties -described below. - -- cpus node - - Description: Container of cpu nodes - - The node name must be "cpus". - - A cpus node must define the following properties: - - - #address-cells - Usage: required - Value type: <u32> - Definition: must be set to 1 - - #size-cells - Usage: required - Value type: <u32> - Definition: must be set to 0 - -- cpu node - - Description: Describes a hart context - - PROPERTIES - - - device_type - Usage: required - Value type: <string> - Definition: must be "cpu" - - reg - Usage: required - Value type: <u32> - Definition: The hart ID of this CPU node - - compatible: - Usage: required - Value type: <stringlist> - Definition: must contain "riscv", may contain one of - "sifive,rocket0" - - mmu-type: - Usage: optional - Value type: <string> - Definition: Specifies the CPU's MMU type. Possible values are - "riscv,sv32" - "riscv,sv39" - "riscv,sv48" - - riscv,isa: - Usage: required - Value type: <string> - Definition: Contains the RISC-V ISA string of this hart. These - ISA strings are defined by the RISC-V ISA manual. - -Example: SiFive Freedom U540G Development Kit ---------------------------------------------- - -This system contains two harts: a hart marked as disabled that's used for -low-level system tasks and should be ignored by Linux, and a second hart that -Linux is allowed to run on. - - cpus { - #address-cells = <1>; - #size-cells = <0>; - timebase-frequency = <1000000>; - cpu@0 { - clock-frequency = <1600000000>; - compatible = "sifive,rocket0", "riscv"; - device_type = "cpu"; - i-cache-block-size = <64>; - i-cache-sets = <128>; - i-cache-size = <16384>; - next-level-cache = <&L15 &L0>; - reg = <0>; - riscv,isa = "rv64imac"; - status = "disabled"; - L10: interrupt-controller { - #interrupt-cells = <1>; - compatible = "riscv,cpu-intc"; - interrupt-controller; - }; - }; - cpu@1 { - clock-frequency = <1600000000>; - compatible = "sifive,rocket0", "riscv"; - d-cache-block-size = <64>; - d-cache-sets = <64>; - d-cache-size = <32768>; - d-tlb-sets = <1>; - d-tlb-size = <32>; - device_type = "cpu"; - i-cache-block-size = <64>; - i-cache-sets = <64>; - i-cache-size = <32768>; - i-tlb-sets = <1>; - i-tlb-size = <32>; - mmu-type = "riscv,sv39"; - next-level-cache = <&L15 &L0>; - reg = <1>; - riscv,isa = "rv64imafdc"; - status = "okay"; - tlb-split; - L13: interrupt-controller { - #interrupt-cells = <1>; - compatible = "riscv,cpu-intc"; - interrupt-controller; - }; - }; - }; - -Example: Spike ISA Simulator with 1 Hart ----------------------------------------- - -This device tree matches the Spike ISA golden model as run with `spike -p1`. - - cpus { - cpu@0 { - device_type = "cpu"; - reg = <0x00000000>; - status = "okay"; - compatible = "riscv"; - riscv,isa = "rv64imafdc"; - mmu-type = "riscv,sv48"; - clock-frequency = <0x3b9aca00>; - interrupt-controller { - #interrupt-cells = <0x00000001>; - interrupt-controller; - compatible = "riscv,cpu-intc"; - } - } - } diff --git a/Documentation/devicetree/bindings/riscv/cpus.yaml b/Documentation/devicetree/bindings/riscv/cpus.yaml index c899111aa5e3..b261a3015f84 100644 --- a/Documentation/devicetree/bindings/riscv/cpus.yaml +++ b/Documentation/devicetree/bindings/riscv/cpus.yaml @@ -10,6 +10,18 @@ maintainers: - Paul Walmsley <paul.walmsley@sifive.com> - Palmer Dabbelt <palmer@sifive.com> +description: | + This document uses some terminology common to the RISC-V community + that is not widely used, the definitions of which are listed here: + + hart: A hardware execution context, which contains all the state + mandated by the RISC-V ISA: a PC and some registers. This + terminology is designed to disambiguate software's view of execution + contexts from any particular microarchitectural implementation + strategy. For example, an Intel laptop containing one socket with + two cores, each of which has two hyperthreads, could be described as + having four harts. + properties: compatible: items: @@ -50,6 +62,10 @@ properties: User-Level ISA document, available from https://riscv.org/specifications/ + While the isa strings in ISA specification are case + insensitive, letters in the riscv,isa string must be all + lowercase to simplify parsing. + timebase-frequency: type: integer minimum: 1 diff --git a/Documentation/devicetree/bindings/riscv/sifive.yaml b/Documentation/devicetree/bindings/riscv/sifive.yaml index 9d17dc2f3f84..3ab532713dc1 100644 --- a/Documentation/devicetree/bindings/riscv/sifive.yaml +++ b/Documentation/devicetree/bindings/riscv/sifive.yaml @@ -19,7 +19,7 @@ properties: compatible: items: - enum: - - sifive,freedom-unleashed-a00 + - sifive,hifive-unleashed-a00 - const: sifive,fu540-c000 - const: sifive,fu540 ... diff --git a/Documentation/filesystems/cifs/TODO b/Documentation/filesystems/cifs/TODO index 9267f3fb131f..edbbccda1942 100644 --- a/Documentation/filesystems/cifs/TODO +++ b/Documentation/filesystems/cifs/TODO @@ -13,7 +13,8 @@ a) SMB3 (and SMB3.1.1) missing optional features: - T10 copy offload ie "ODX" (copy chunk, and "Duplicate Extents" ioctl currently the only two server side copy mechanisms supported) -b) improved sparse file support +b) improved sparse file support (fiemap and SEEK_HOLE are implemented +but additional features would be supportable by the protocol). c) Directory entry caching relies on a 1 second timer, rather than using Directory Leases, currently only the root file handle is cached longer @@ -21,9 +22,13 @@ using Directory Leases, currently only the root file handle is cached longer d) quota support (needs minor kernel change since quota calls to make it to network filesystems or deviceless filesystems) -e) Additional use cases where we use "compoounding" (e.g. open/query/close -and open/setinfo/close) to reduce the number of roundtrips, and also -open to reduce redundant opens (using deferred close and reference counts more). +e) Additional use cases can be optimized to use "compounding" +(e.g. open/query/close and open/setinfo/close) to reduce the number +of roundtrips to the server and improve performance. Various cases +(stat, statfs, create, unlink, mkdir) already have been improved by +using compounding but more can be done. In addition we could significantly +reduce redundant opens by using deferred close (with handle caching leases) +and better using reference counters on file handles. f) Finish inotify support so kde and gnome file list windows will autorefresh (partially complete by Asser). Needs minor kernel @@ -43,18 +48,17 @@ mount or a per server basis to client UIDs or nobody if no mapping exists. Also better integration with winbind for resolving SID owners k) Add tools to take advantage of more smb3 specific ioctls and features -(passthrough ioctl/fsctl for sending various SMB3 fsctls to the server -is in progress, and a passthrough query_info call is already implemented -in cifs.ko to allow smb3 info levels queries to be sent from userspace) +(passthrough ioctl/fsctl is now implemented in cifs.ko to allow sending +various SMB3 fsctls and query info and set info calls directly from user space) +Add tools to make setting various non-POSIX metadata attributes easier +from tools (e.g. extending what was done in smb-info tool). l) encrypted file support m) improved stats gathering tools (perhaps integration with nfsometer?) to extend and make easier to use what is currently in /proc/fs/cifs/Stats -n) allow setting more NTFS/SMB3 file attributes remotely (currently limited to compressed -file attribute via chflags) and improve user space tools for managing and -viewing them. +n) Add support for claims based ACLs ("DAC") o) mount helper GUI (to simplify the various configuration options on mount) @@ -82,6 +86,8 @@ so far). w) Add support for additional strong encryption types, and additional spnego authentication mechanisms (see MS-SMB2) +x) Finish support for SMB3.1.1 compression + KNOWN BUGS ==================================== See http://bugzilla.samba.org - search on product "CifsVFS" for diff --git a/Documentation/networking/tls-offload.rst b/Documentation/networking/tls-offload.rst index b70b70dc4524..0dd3f748239f 100644 --- a/Documentation/networking/tls-offload.rst +++ b/Documentation/networking/tls-offload.rst @@ -506,21 +506,3 @@ Drivers should ignore the changes to TLS the device feature flags. These flags will be acted upon accordingly by the core ``ktls`` code. TLS device feature flags only control adding of new TLS connection offloads, old connections will remain active after flags are cleared. - -Known bugs -========== - -skb_orphan() leaks clear text ------------------------------ - -Currently drivers depend on the :c:member:`sk` member of -:c:type:`struct sk_buff <sk_buff>` to identify segments requiring -encryption. Any operation which removes or does not preserve the socket -association such as :c:func:`skb_orphan` or :c:func:`skb_clone` -will cause the driver to miss the packets and lead to clear text leaks. - -Redirects leak clear text -------------------------- - -In the RX direction, if segment has already been decrypted by the device -and it gets redirected or mirrored - clear text will be transmitted out. diff --git a/Documentation/networking/tuntap.txt b/Documentation/networking/tuntap.txt index 949d5dcdd9a3..0104830d5075 100644 --- a/Documentation/networking/tuntap.txt +++ b/Documentation/networking/tuntap.txt @@ -204,8 +204,8 @@ Ethernet device, which instead of receiving packets from a physical media, receives them from user space program and instead of sending packets via physical media sends them to the user space program. -Let's say that you configured IPX on the tap0, then whenever -the kernel sends an IPX packet to tap0, it is passed to the application +Let's say that you configured IPv6 on the tap0, then whenever +the kernel sends an IPv6 packet to tap0, it is passed to the application (VTun for example). The application encrypts, compresses and sends it to the other side over TCP or UDP. The application on the other side decompresses and decrypts the data received and writes the packet to the TAP device, diff --git a/MAINTAINERS b/MAINTAINERS index 96d3e60697f5..a406947b369e 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -183,7 +183,7 @@ M: Realtek linux nic maintainers <nic_swsd@realtek.com> M: Heiner Kallweit <hkallweit1@gmail.com> L: netdev@vger.kernel.org S: Maintained -F: drivers/net/ethernet/realtek/r8169.c +F: drivers/net/ethernet/realtek/r8169* 8250/16?50 (AND CLONE UARTS) SERIAL DRIVER M: Greg Kroah-Hartman <gregkh@linuxfoundation.org> @@ -6078,7 +6078,7 @@ M: Florian Fainelli <f.fainelli@gmail.com> M: Heiner Kallweit <hkallweit1@gmail.com> L: netdev@vger.kernel.org S: Maintained -F: Documentation/ABI/testing/sysfs-bus-mdio +F: Documentation/ABI/testing/sysfs-class-net-phydev F: Documentation/devicetree/bindings/net/ethernet-phy.yaml F: Documentation/devicetree/bindings/net/mdio* F: Documentation/networking/phy.rst @@ -6357,7 +6357,7 @@ FPGA MANAGER FRAMEWORK M: Moritz Fischer <mdf@kernel.org> L: linux-fpga@vger.kernel.org S: Maintained -T: git git://git.kernel.org/pub/scm/linux/kernel/git/atull/linux-fpga.git +T: git git://git.kernel.org/pub/scm/linux/kernel/git/mdf/linux-fpga.git Q: http://patchwork.kernel.org/project/linux-fpga/list/ F: Documentation/fpga/ F: Documentation/driver-api/fpga/ @@ -6390,7 +6390,7 @@ FRAMEBUFFER LAYER M: Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com> L: dri-devel@lists.freedesktop.org L: linux-fbdev@vger.kernel.org -T: git git://github.com/bzolnier/linux.git +T: git git://anongit.freedesktop.org/drm/drm-misc Q: http://patchwork.kernel.org/project/linux-fbdev/list/ S: Maintained F: Documentation/fb/ @@ -6454,6 +6454,14 @@ S: Maintained F: drivers/perf/fsl_imx8_ddr_perf.c F: Documentation/devicetree/bindings/perf/fsl-imx-ddr.txt +FREESCALE IMX I2C DRIVER +M: Oleksij Rempel <o.rempel@pengutronix.de> +R: Pengutronix Kernel Team <kernel@pengutronix.de> +L: linux-i2c@vger.kernel.org +S: Maintained +F: drivers/i2c/busses/i2c-imx.c +F: Documentation/devicetree/bindings/i2c/i2c-imx.txt + FREESCALE IMX LPI2C DRIVER M: Dong Aisheng <aisheng.dong@nxp.com> L: linux-i2c@vger.kernel.org @@ -7465,7 +7473,7 @@ F: drivers/net/hyperv/ F: drivers/scsi/storvsc_drv.c F: drivers/uio/uio_hv_generic.c F: drivers/video/fbdev/hyperv_fb.c -F: drivers/iommu/hyperv_iommu.c +F: drivers/iommu/hyperv-iommu.c F: net/vmw_vsock/hyperv_transport.c F: include/clocksource/hyperv_timer.h F: include/linux/hyperv.h @@ -8055,6 +8063,7 @@ S: Maintained F: drivers/video/fbdev/i810/ INTEL ASoC DRIVERS +M: Cezary Rojewski <cezary.rojewski@intel.com> M: Pierre-Louis Bossart <pierre-louis.bossart@linux.intel.com> M: Liam Girdwood <liam.r.girdwood@linux.intel.com> M: Jie Yang <yang.jie@linux.intel.com> @@ -8076,6 +8085,13 @@ T: git git://git.code.sf.net/p/intel-sas/isci S: Supported F: drivers/scsi/isci/ +INTEL CPU family model numbers +M: Tony Luck <tony.luck@intel.com> +M: x86@kernel.org +L: linux-kernel@vger.kernel.org +S: Supported +F: arch/x86/include/asm/intel-family.h + INTEL DRM DRIVERS (excluding Poulsbo, Moorestown and derivative chipsets) M: Jani Nikula <jani.nikula@linux.intel.com> M: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> @@ -8427,7 +8443,6 @@ L: linux-xfs@vger.kernel.org L: linux-fsdevel@vger.kernel.org T: git git://git.kernel.org/pub/scm/fs/xfs/xfs-linux.git S: Supported -F: fs/iomap.c F: fs/iomap/ F: include/linux/iomap.h @@ -16097,7 +16112,7 @@ S: Maintained F: drivers/net/ethernet/ti/netcp* TI PCM3060 ASoC CODEC DRIVER -M: Kirill Marinushkin <kmarinushkin@birdec.tech> +M: Kirill Marinushkin <kmarinushkin@birdec.com> L: alsa-devel@alsa-project.org (moderated for non-subscribers) S: Maintained F: Documentation/devicetree/bindings/sound/pcm3060.txt @@ -2,7 +2,7 @@ VERSION = 5 PATCHLEVEL = 3 SUBLEVEL = 0 -EXTRAVERSION = -rc3 +EXTRAVERSION = -rc5 NAME = Bobtail Squid # *DOCUMENTATION* @@ -419,6 +419,7 @@ NM = $(CROSS_COMPILE)nm STRIP = $(CROSS_COMPILE)strip OBJCOPY = $(CROSS_COMPILE)objcopy OBJDUMP = $(CROSS_COMPILE)objdump +OBJSIZE = $(CROSS_COMPILE)size PAHOLE = pahole LEX = flex YACC = bison @@ -475,9 +476,9 @@ GCC_PLUGINS_CFLAGS := CLANG_FLAGS := export ARCH SRCARCH CONFIG_SHELL HOSTCC KBUILD_HOSTCFLAGS CROSS_COMPILE AS LD CC -export CPP AR NM STRIP OBJCOPY OBJDUMP PAHOLE KBUILD_HOSTLDFLAGS KBUILD_HOSTLDLIBS -export MAKE LEX YACC AWK INSTALLKERNEL PERL PYTHON PYTHON2 PYTHON3 UTS_MACHINE -export HOSTCXX KBUILD_HOSTCXXFLAGS LDFLAGS_MODULE CHECK CHECKFLAGS +export CPP AR NM STRIP OBJCOPY OBJDUMP OBJSIZE PAHOLE LEX YACC AWK INSTALLKERNEL +export PERL PYTHON PYTHON2 PYTHON3 CHECK CHECKFLAGS MAKE UTS_MACHINE HOSTCXX +export KBUILD_HOSTCXXFLAGS KBUILD_HOSTLDFLAGS KBUILD_HOSTLDLIBS LDFLAGS_MODULE export KBUILD_CPPFLAGS NOSTDINC_FLAGS LINUXINCLUDE OBJCOPYFLAGS KBUILD_LDFLAGS export KBUILD_CFLAGS CFLAGS_KERNEL CFLAGS_MODULE @@ -845,7 +846,7 @@ NOSTDINC_FLAGS += -nostdinc -isystem $(shell $(CC) -print-file-name=include) KBUILD_CFLAGS += -Wdeclaration-after-statement # Warn about unmarked fall-throughs in switch statement. -KBUILD_CFLAGS += $(call cc-option,-Wimplicit-fallthrough=3,) +KBUILD_CFLAGS += $(call cc-option,-Wimplicit-fallthrough,) # Variable Length Arrays (VLAs) should not be used anywhere in the kernel KBUILD_CFLAGS += -Wvla @@ -1002,6 +1003,8 @@ endif PHONY += prepare0 +export MODORDER := $(if $(KBUILD_EXTMOD),$(KBUILD_EXTMOD)/)modules.order + ifeq ($(KBUILD_EXTMOD),) core-y += kernel/ certs/ mm/ fs/ ipc/ security/ crypto/ block/ @@ -1771,13 +1774,22 @@ build-dir = $(patsubst %/,%,$(dir $(build-target))) $(Q)$(MAKE) $(build)=$(build-dir) $(build-target) %.symtypes: prepare FORCE $(Q)$(MAKE) $(build)=$(build-dir) $(build-target) +ifeq ($(KBUILD_EXTMOD),) +# For the single build of an in-tree module, use a temporary file to avoid +# the situation of modules_install installing an invalid modules.order. +%.ko: MODORDER := .modules.tmp +endif +%.ko: prepare FORCE + $(Q)$(MAKE) $(build)=$(build-dir) $(build-target:.ko=.mod) + $(Q)echo $(build-target) > $(MODORDER) + $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost # Modules PHONY += / /: ./ %/: prepare FORCE - $(Q)$(MAKE) KBUILD_MODULES=1 $(build)=$(build-dir) + $(Q)$(MAKE) KBUILD_MODULES=1 $(build)=$(build-dir) need-modorder=1 # FIXME Should go into a make.lib or something # =========================================================================== diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c index af8b8e15f589..b0c195e3a06d 100644 --- a/arch/arm/kernel/hw_breakpoint.c +++ b/arch/arm/kernel/hw_breakpoint.c @@ -544,6 +544,7 @@ static int arch_build_bp_info(struct perf_event *bp, if ((hw->ctrl.type != ARM_BREAKPOINT_EXECUTE) && max_watchpoint_len >= 8) break; + /* Else, fall through */ default: return -EINVAL; } @@ -608,10 +609,12 @@ int hw_breakpoint_arch_parse(struct perf_event *bp, /* Allow halfword watchpoints and breakpoints. */ if (hw->ctrl.len == ARM_BREAKPOINT_LEN_2) break; + /* Else, fall through */ case 3: /* Allow single byte watchpoint. */ if (hw->ctrl.len == ARM_BREAKPOINT_LEN_1) break; + /* Else, fall through */ default: ret = -EINVAL; goto out; @@ -861,6 +864,7 @@ static int hw_breakpoint_pending(unsigned long addr, unsigned int fsr, break; case ARM_ENTRY_ASYNC_WATCHPOINT: WARN(1, "Asynchronous watchpoint exception taken. Debugging results may be unreliable\n"); + /* Fall through */ case ARM_ENTRY_SYNC_WATCHPOINT: watchpoint_handler(addr, fsr, regs); break; @@ -909,6 +913,7 @@ static bool core_has_os_save_restore(void) ARM_DBG_READ(c1, c1, 4, oslsr); if (oslsr & ARM_OSLSR_OSLM0) return true; + /* Else, fall through */ default: return false; } diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c index 09f6fdd41974..ab2568996ddb 100644 --- a/arch/arm/kernel/signal.c +++ b/arch/arm/kernel/signal.c @@ -596,6 +596,7 @@ static int do_signal(struct pt_regs *regs, int syscall) switch (retval) { case -ERESTART_RESTARTBLOCK: restart -= 2; + /* Fall through */ case -ERESTARTNOHAND: case -ERESTARTSYS: case -ERESTARTNOINTR: diff --git a/arch/arm/kvm/coproc.c b/arch/arm/kvm/coproc.c index d2806bcff8bb..07745ee022a1 100644 --- a/arch/arm/kvm/coproc.c +++ b/arch/arm/kvm/coproc.c @@ -651,13 +651,22 @@ int kvm_handle_cp14_64(struct kvm_vcpu *vcpu, struct kvm_run *run) } static void reset_coproc_regs(struct kvm_vcpu *vcpu, - const struct coproc_reg *table, size_t num) + const struct coproc_reg *table, size_t num, + unsigned long *bmap) { unsigned long i; for (i = 0; i < num; i++) - if (table[i].reset) + if (table[i].reset) { + int reg = table[i].reg; + table[i].reset(vcpu, &table[i]); + if (reg > 0 && reg < NR_CP15_REGS) { + set_bit(reg, bmap); + if (table[i].is_64bit) + set_bit(reg + 1, bmap); + } + } } static struct coproc_params decode_32bit_hsr(struct kvm_vcpu *vcpu) @@ -1432,17 +1441,15 @@ void kvm_reset_coprocs(struct kvm_vcpu *vcpu) { size_t num; const struct coproc_reg *table; - - /* Catch someone adding a register without putting in reset entry. */ - memset(vcpu->arch.ctxt.cp15, 0x42, sizeof(vcpu->arch.ctxt.cp15)); + DECLARE_BITMAP(bmap, NR_CP15_REGS) = { 0, }; /* Generic chip reset first (so target could override). */ - reset_coproc_regs(vcpu, cp15_regs, ARRAY_SIZE(cp15_regs)); + reset_coproc_regs(vcpu, cp15_regs, ARRAY_SIZE(cp15_regs), bmap); table = get_target_table(vcpu->arch.target, &num); - reset_coproc_regs(vcpu, table, num); + reset_coproc_regs(vcpu, table, num, bmap); for (num = 1; num < NR_CP15_REGS; num++) - WARN(vcpu_cp15(vcpu, num) == 0x42424242, + WARN(!test_bit(num, bmap), "Didn't reset vcpu_cp15(vcpu, %zi)", num); } diff --git a/arch/arm/mach-ep93xx/crunch.c b/arch/arm/mach-ep93xx/crunch.c index 1c9a4be8b503..1c05c5bf7e5c 100644 --- a/arch/arm/mach-ep93xx/crunch.c +++ b/arch/arm/mach-ep93xx/crunch.c @@ -49,6 +49,7 @@ static int crunch_do(struct notifier_block *self, unsigned long cmd, void *t) * FALLTHROUGH: Ensure we don't try to overwrite our newly * initialised state information on the first fault. */ + /* Fall through */ case THREAD_NOTIFY_EXIT: crunch_task_release(thread); diff --git a/arch/arm/mach-tegra/reset.c b/arch/arm/mach-tegra/reset.c index 5a67a71f80cc..76a65df42d10 100644 --- a/arch/arm/mach-tegra/reset.c +++ b/arch/arm/mach-tegra/reset.c @@ -70,7 +70,7 @@ static void __init tegra_cpu_reset_handler_enable(void) switch (err) { case -ENOSYS: tegra_cpu_reset_handler_set(reset_address); - /* pass-through */ + /* fall through */ case 0: is_enabled = true; break; diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c index 8cdb78642e93..04b36436cbc0 100644 --- a/arch/arm/mm/alignment.c +++ b/arch/arm/mm/alignment.c @@ -695,7 +695,7 @@ thumb2arm(u16 tinstr) return subset[(L<<1) | ((tinstr & (1<<8)) >> 8)] | (tinstr & 255); /* register_list */ } - /* Else fall through for illegal instruction case */ + /* Else, fall through - for illegal instruction case */ default: return BAD_INSTR; @@ -751,6 +751,8 @@ do_alignment_t32_to_handler(unsigned long *pinstr, struct pt_regs *regs, case 0xe8e0: case 0xe9e0: poffset->un = (tinst2 & 0xff) << 2; + /* Fall through */ + case 0xe940: case 0xe9c0: return do_alignment_ldrdstrd; diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 6774b03aa405..d42557ee69c2 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -2405,9 +2405,7 @@ long arch_dma_coherent_to_pfn(struct device *dev, void *cpu_addr, pgprot_t arch_dma_mmap_pgprot(struct device *dev, pgprot_t prot, unsigned long attrs) { - if (!dev_is_dma_coherent(dev)) - return __get_dma_pgprot(attrs, prot); - return prot; + return __get_dma_pgprot(attrs, prot); } void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, diff --git a/arch/arm/plat-omap/dma.c b/arch/arm/plat-omap/dma.c index 79f43acf9acb..08c99413d02c 100644 --- a/arch/arm/plat-omap/dma.c +++ b/arch/arm/plat-omap/dma.c @@ -388,17 +388,15 @@ void omap_set_dma_src_burst_mode(int lch, enum omap_dma_burst_mode burst_mode) /* * not supported by current hardware on OMAP1 * w |= (0x03 << 7); - * fall through */ + /* fall through */ case OMAP_DMA_DATA_BURST_16: if (dma_omap2plus()) { burst = 0x3; break; } - /* - * OMAP1 don't support burst 16 - * fall through - */ + /* OMAP1 don't support burst 16 */ + /* fall through */ default: BUG(); } @@ -474,10 +472,8 @@ void omap_set_dma_dest_burst_mode(int lch, enum omap_dma_burst_mode burst_mode) burst = 0x3; break; } - /* - * OMAP1 don't support burst 16 - * fall through - */ + /* OMAP1 don't support burst 16 */ + /* fall through */ default: printk(KERN_ERR "Invalid DMA burst mode\n"); BUG(); diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h index a8b205e5c4a8..ddf9d762ac62 100644 --- a/arch/arm64/include/asm/kvm_arm.h +++ b/arch/arm64/include/asm/kvm_arm.h @@ -316,9 +316,10 @@ #define kvm_arm_exception_class \ ECN(UNKNOWN), ECN(WFx), ECN(CP15_32), ECN(CP15_64), ECN(CP14_MR), \ - ECN(CP14_LS), ECN(FP_ASIMD), ECN(CP10_ID), ECN(CP14_64), ECN(SVC64), \ - ECN(HVC64), ECN(SMC64), ECN(SYS64), ECN(IMP_DEF), ECN(IABT_LOW), \ - ECN(IABT_CUR), ECN(PC_ALIGN), ECN(DABT_LOW), ECN(DABT_CUR), \ + ECN(CP14_LS), ECN(FP_ASIMD), ECN(CP10_ID), ECN(PAC), ECN(CP14_64), \ + ECN(SVC64), ECN(HVC64), ECN(SMC64), ECN(SYS64), ECN(SVE), \ + ECN(IMP_DEF), ECN(IABT_LOW), ECN(IABT_CUR), \ + ECN(PC_ALIGN), ECN(DABT_LOW), ECN(DABT_CUR), \ ECN(SP_ALIGN), ECN(FP_EXC32), ECN(FP_EXC64), ECN(SERROR), \ ECN(BREAKPT_LOW), ECN(BREAKPT_CUR), ECN(SOFTSTP_LOW), \ ECN(SOFTSTP_CUR), ECN(WATCHPT_LOW), ECN(WATCHPT_CUR), \ diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index 5fdcfe237338..e09760ece844 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -209,7 +209,7 @@ static inline pmd_t pmd_mkcont(pmd_t pmd) static inline pte_t pte_mkdevmap(pte_t pte) { - return set_pte_bit(pte, __pgprot(PTE_DEVMAP)); + return set_pte_bit(pte, __pgprot(PTE_DEVMAP | PTE_SPECIAL)); } static inline void set_pte(pte_t *ptep, pte_t pte) @@ -396,7 +396,10 @@ static inline int pmd_protnone(pmd_t pmd) #ifdef CONFIG_TRANSPARENT_HUGEPAGE #define pmd_devmap(pmd) pte_devmap(pmd_pte(pmd)) #endif -#define pmd_mkdevmap(pmd) pte_pmd(pte_mkdevmap(pmd_pte(pmd))) +static inline pmd_t pmd_mkdevmap(pmd_t pmd) +{ + return pte_pmd(set_pte_bit(pmd_pte(pmd), __pgprot(PTE_DEVMAP))); +} #define __pmd_to_phys(pmd) __pte_to_phys(pmd_pte(pmd)) #define __phys_to_pmd_val(phys) __phys_to_pte_val(phys) diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index d19d14ba9ae4..b1fdc486aed8 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -184,9 +184,17 @@ static const struct arm64_ftr_bits ftr_id_aa64zfr0[] = { }; static const struct arm64_ftr_bits ftr_id_aa64mmfr0[] = { - S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN4_SHIFT, 4, ID_AA64MMFR0_TGRAN4_NI), - S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN64_SHIFT, 4, ID_AA64MMFR0_TGRAN64_NI), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN16_SHIFT, 4, ID_AA64MMFR0_TGRAN16_NI), + /* + * We already refuse to boot CPUs that don't support our configured + * page size, so we can only detect mismatches for a page size other + * than the one we're currently using. Unfortunately, SoCs like this + * exist in the wild so, even though we don't like it, we'll have to go + * along with it and treat them as non-strict. + */ + S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN4_SHIFT, 4, ID_AA64MMFR0_TGRAN4_NI), + S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN64_SHIFT, 4, ID_AA64MMFR0_TGRAN64_NI), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN16_SHIFT, 4, ID_AA64MMFR0_TGRAN16_NI), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_BIGENDEL0_SHIFT, 4, 0), /* Linux shouldn't care about secure memory */ ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_SNSMEM_SHIFT, 4, 0), diff --git a/arch/arm64/kernel/ftrace.c b/arch/arm64/kernel/ftrace.c index 1285c7b2947f..171773257974 100644 --- a/arch/arm64/kernel/ftrace.c +++ b/arch/arm64/kernel/ftrace.c @@ -73,7 +73,7 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) if (offset < -SZ_128M || offset >= SZ_128M) { #ifdef CONFIG_ARM64_MODULE_PLTS - struct plt_entry trampoline; + struct plt_entry trampoline, *dst; struct module *mod; /* @@ -106,23 +106,27 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) * to check if the actual opcodes are in fact identical, * regardless of the offset in memory so use memcmp() instead. */ - trampoline = get_plt_entry(addr, mod->arch.ftrace_trampoline); - if (memcmp(mod->arch.ftrace_trampoline, &trampoline, - sizeof(trampoline))) { - if (plt_entry_is_initialized(mod->arch.ftrace_trampoline)) { + dst = mod->arch.ftrace_trampoline; + trampoline = get_plt_entry(addr, dst); + if (memcmp(dst, &trampoline, sizeof(trampoline))) { + if (plt_entry_is_initialized(dst)) { pr_err("ftrace: far branches to multiple entry points unsupported inside a single module\n"); return -EINVAL; } /* point the trampoline to our ftrace entry point */ module_disable_ro(mod); - *mod->arch.ftrace_trampoline = trampoline; + *dst = trampoline; module_enable_ro(mod, true); - /* update trampoline before patching in the branch */ - smp_wmb(); + /* + * Ensure updated trampoline is visible to instruction + * fetch before we patch in the branch. + */ + __flush_icache_range((unsigned long)&dst[0], + (unsigned long)&dst[1]); } - addr = (unsigned long)(void *)mod->arch.ftrace_trampoline; + addr = (unsigned long)dst; #else /* CONFIG_ARM64_MODULE_PLTS */ return -EINVAL; #endif /* CONFIG_ARM64_MODULE_PLTS */ diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c index d3313797cca9..32893b3d9164 100644 --- a/arch/arm64/kernel/traps.c +++ b/arch/arm64/kernel/traps.c @@ -733,6 +733,7 @@ static const char *esr_class_str[] = { [ESR_ELx_EC_CP14_LS] = "CP14 LDC/STC", [ESR_ELx_EC_FP_ASIMD] = "ASIMD", [ESR_ELx_EC_CP10_ID] = "CP10 MRC/VMRS", + [ESR_ELx_EC_PAC] = "PAC", [ESR_ELx_EC_CP14_64] = "CP14 MCRR/MRRC", [ESR_ELx_EC_ILL] = "PSTATE.IL", [ESR_ELx_EC_SVC32] = "SVC (AArch32)", diff --git a/arch/arm64/kvm/hyp/debug-sr.c b/arch/arm64/kvm/hyp/debug-sr.c index 26781da3ad3e..0fc9872a1467 100644 --- a/arch/arm64/kvm/hyp/debug-sr.c +++ b/arch/arm64/kvm/hyp/debug-sr.c @@ -18,40 +18,70 @@ #define save_debug(ptr,reg,nr) \ switch (nr) { \ case 15: ptr[15] = read_debug(reg, 15); \ + /* Fall through */ \ case 14: ptr[14] = read_debug(reg, 14); \ + /* Fall through */ \ case 13: ptr[13] = read_debug(reg, 13); \ + /* Fall through */ \ case 12: ptr[12] = read_debug(reg, 12); \ + /* Fall through */ \ case 11: ptr[11] = read_debug(reg, 11); \ + /* Fall through */ \ case 10: ptr[10] = read_debug(reg, 10); \ + /* Fall through */ \ case 9: ptr[9] = read_debug(reg, 9); \ + /* Fall through */ \ case 8: ptr[8] = read_debug(reg, 8); \ + /* Fall through */ \ case 7: ptr[7] = read_debug(reg, 7); \ + /* Fall through */ \ case 6: ptr[6] = read_debug(reg, 6); \ + /* Fall through */ \ case 5: ptr[5] = read_debug(reg, 5); \ + /* Fall through */ \ case 4: ptr[4] = read_debug(reg, 4); \ + /* Fall through */ \ case 3: ptr[3] = read_debug(reg, 3); \ + /* Fall through */ \ case 2: ptr[2] = read_debug(reg, 2); \ + /* Fall through */ \ case 1: ptr[1] = read_debug(reg, 1); \ + /* Fall through */ \ default: ptr[0] = read_debug(reg, 0); \ } #define restore_debug(ptr,reg,nr) \ switch (nr) { \ case 15: write_debug(ptr[15], reg, 15); \ + /* Fall through */ \ case 14: write_debug(ptr[14], reg, 14); \ + /* Fall through */ \ case 13: write_debug(ptr[13], reg, 13); \ + /* Fall through */ \ case 12: write_debug(ptr[12], reg, 12); \ + /* Fall through */ \ case 11: write_debug(ptr[11], reg, 11); \ + /* Fall through */ \ case 10: write_debug(ptr[10], reg, 10); \ + /* Fall through */ \ case 9: write_debug(ptr[9], reg, 9); \ + /* Fall through */ \ case 8: write_debug(ptr[8], reg, 8); \ + /* Fall through */ \ case 7: write_debug(ptr[7], reg, 7); \ + /* Fall through */ \ case 6: write_debug(ptr[6], reg, 6); \ + /* Fall through */ \ case 5: write_debug(ptr[5], reg, 5); \ + /* Fall through */ \ case 4: write_debug(ptr[4], reg, 4); \ + /* Fall through */ \ case 3: write_debug(ptr[3], reg, 3); \ + /* Fall through */ \ case 2: write_debug(ptr[2], reg, 2); \ + /* Fall through */ \ case 1: write_debug(ptr[1], reg, 1); \ + /* Fall through */ \ default: write_debug(ptr[0], reg, 0); \ } diff --git a/arch/arm64/kvm/regmap.c b/arch/arm64/kvm/regmap.c index 0d60e4f0af66..a900181e3867 100644 --- a/arch/arm64/kvm/regmap.c +++ b/arch/arm64/kvm/regmap.c @@ -178,13 +178,18 @@ void vcpu_write_spsr32(struct kvm_vcpu *vcpu, unsigned long v) switch (spsr_idx) { case KVM_SPSR_SVC: write_sysreg_el1(v, SYS_SPSR); + break; case KVM_SPSR_ABT: write_sysreg(v, spsr_abt); + break; case KVM_SPSR_UND: write_sysreg(v, spsr_und); + break; case KVM_SPSR_IRQ: write_sysreg(v, spsr_irq); + break; case KVM_SPSR_FIQ: write_sysreg(v, spsr_fiq); + break; } } diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index f26e181d881c..2071260a275b 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -632,7 +632,7 @@ static void reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) */ val = ((pmcr & ~ARMV8_PMU_PMCR_MASK) | (ARMV8_PMU_PMCR_MASK & 0xdecafbad)) & (~ARMV8_PMU_PMCR_E); - __vcpu_sys_reg(vcpu, PMCR_EL0) = val; + __vcpu_sys_reg(vcpu, r->reg) = val; } static bool check_pmu_access_disabled(struct kvm_vcpu *vcpu, u64 flags) @@ -981,13 +981,13 @@ static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, /* Silly macro to expand the DBG{BCR,BVR,WVR,WCR}n_EL1 registers in one go */ #define DBG_BCR_BVR_WCR_WVR_EL1(n) \ { SYS_DESC(SYS_DBGBVRn_EL1(n)), \ - trap_bvr, reset_bvr, n, 0, get_bvr, set_bvr }, \ + trap_bvr, reset_bvr, 0, 0, get_bvr, set_bvr }, \ { SYS_DESC(SYS_DBGBCRn_EL1(n)), \ - trap_bcr, reset_bcr, n, 0, get_bcr, set_bcr }, \ + trap_bcr, reset_bcr, 0, 0, get_bcr, set_bcr }, \ { SYS_DESC(SYS_DBGWVRn_EL1(n)), \ - trap_wvr, reset_wvr, n, 0, get_wvr, set_wvr }, \ + trap_wvr, reset_wvr, 0, 0, get_wvr, set_wvr }, \ { SYS_DESC(SYS_DBGWCRn_EL1(n)), \ - trap_wcr, reset_wcr, n, 0, get_wcr, set_wcr } + trap_wcr, reset_wcr, 0, 0, get_wcr, set_wcr } /* Macro to expand the PMEVCNTRn_EL0 register */ #define PMU_PMEVCNTR_EL0(n) \ @@ -1540,7 +1540,7 @@ static const struct sys_reg_desc sys_reg_descs[] = { { SYS_DESC(SYS_CSSELR_EL1), access_csselr, reset_unknown, CSSELR_EL1 }, { SYS_DESC(SYS_CTR_EL0), access_ctr }, - { SYS_DESC(SYS_PMCR_EL0), access_pmcr, reset_pmcr, }, + { SYS_DESC(SYS_PMCR_EL0), access_pmcr, reset_pmcr, PMCR_EL0 }, { SYS_DESC(SYS_PMCNTENSET_EL0), access_pmcnten, reset_unknown, PMCNTENSET_EL0 }, { SYS_DESC(SYS_PMCNTENCLR_EL0), access_pmcnten, NULL, PMCNTENSET_EL0 }, { SYS_DESC(SYS_PMOVSCLR_EL0), access_pmovs, NULL, PMOVSSET_EL0 }, @@ -2254,13 +2254,19 @@ static int emulate_sys_reg(struct kvm_vcpu *vcpu, } static void reset_sys_reg_descs(struct kvm_vcpu *vcpu, - const struct sys_reg_desc *table, size_t num) + const struct sys_reg_desc *table, size_t num, + unsigned long *bmap) { unsigned long i; for (i = 0; i < num; i++) - if (table[i].reset) + if (table[i].reset) { + int reg = table[i].reg; + table[i].reset(vcpu, &table[i]); + if (reg > 0 && reg < NR_SYS_REGS) + set_bit(reg, bmap); + } } /** @@ -2774,18 +2780,16 @@ void kvm_reset_sys_regs(struct kvm_vcpu *vcpu) { size_t num; const struct sys_reg_desc *table; - - /* Catch someone adding a register without putting in reset entry. */ - memset(&vcpu->arch.ctxt.sys_regs, 0x42, sizeof(vcpu->arch.ctxt.sys_regs)); + DECLARE_BITMAP(bmap, NR_SYS_REGS) = { 0, }; /* Generic chip reset first (so target could override). */ - reset_sys_reg_descs(vcpu, sys_reg_descs, ARRAY_SIZE(sys_reg_descs)); + reset_sys_reg_descs(vcpu, sys_reg_descs, ARRAY_SIZE(sys_reg_descs), bmap); table = get_target_table(vcpu->arch.target, true, &num); - reset_sys_reg_descs(vcpu, table, num); + reset_sys_reg_descs(vcpu, table, num, bmap); for (num = 1; num < NR_SYS_REGS; num++) { - if (WARN(__vcpu_sys_reg(vcpu, num) == 0x4242424242424242, + if (WARN(!test_bit(num, bmap), "Didn't reset __vcpu_sys_reg(%zi)\n", num)) break; } diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c index 1d3f0b5a9940..bd2b039f43a6 100644 --- a/arch/arm64/mm/dma-mapping.c +++ b/arch/arm64/mm/dma-mapping.c @@ -14,9 +14,7 @@ pgprot_t arch_dma_mmap_pgprot(struct device *dev, pgprot_t prot, unsigned long attrs) { - if (!dev_is_dma_coherent(dev) || (attrs & DMA_ATTR_WRITE_COMBINE)) - return pgprot_writecombine(prot); - return prot; + return pgprot_writecombine(prot); } void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr, diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c index 2cfe839f0b3a..1109924560d8 100644 --- a/arch/mips/kvm/mips.c +++ b/arch/mips/kvm/mips.c @@ -150,16 +150,6 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) return 0; } -bool kvm_arch_has_vcpu_debugfs(void) -{ - return false; -} - -int kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu) -{ - return 0; -} - void kvm_mips_free_vcpus(struct kvm *kvm) { unsigned int i; diff --git a/arch/parisc/math-emu/Makefile b/arch/parisc/math-emu/Makefile index 55c1396580a4..3747a0cbd3b8 100644 --- a/arch/parisc/math-emu/Makefile +++ b/arch/parisc/math-emu/Makefile @@ -18,4 +18,4 @@ obj-y := frnd.o driver.o decode_exc.o fpudispatch.o denormal.o \ # other very old or stripped-down PA-RISC CPUs -- not currently supported obj-$(CONFIG_MATH_EMULATION) += unimplemented-math-emulation.o -CFLAGS_REMOVE_fpudispatch.o = -Wimplicit-fallthrough=3 +CFLAGS_REMOVE_fpudispatch.o = -Wimplicit-fallthrough diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index 77f6ebf97113..d8dcd8820369 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -121,7 +121,6 @@ config PPC select ARCH_32BIT_OFF_T if PPC32 select ARCH_HAS_DEBUG_VIRTUAL select ARCH_HAS_DEVMEM_IS_ALLOWED - select ARCH_HAS_DMA_MMAP_PGPROT select ARCH_HAS_ELF_RANDOMIZE select ARCH_HAS_FORTIFY_SOURCE select ARCH_HAS_GCOV_PROFILE_ALL diff --git a/arch/powerpc/include/asm/cache.h b/arch/powerpc/include/asm/cache.h index b3388d95f451..45e3137ccd71 100644 --- a/arch/powerpc/include/asm/cache.h +++ b/arch/powerpc/include/asm/cache.h @@ -107,22 +107,22 @@ extern void _set_L3CR(unsigned long); static inline void dcbz(void *addr) { - __asm__ __volatile__ ("dcbz %y0" : : "Z"(*(u8 *)addr) : "memory"); + __asm__ __volatile__ ("dcbz 0, %0" : : "r"(addr) : "memory"); } static inline void dcbi(void *addr) { - __asm__ __volatile__ ("dcbi %y0" : : "Z"(*(u8 *)addr) : "memory"); + __asm__ __volatile__ ("dcbi 0, %0" : : "r"(addr) : "memory"); } static inline void dcbf(void *addr) { - __asm__ __volatile__ ("dcbf %y0" : : "Z"(*(u8 *)addr) : "memory"); + __asm__ __volatile__ ("dcbf 0, %0" : : "r"(addr) : "memory"); } static inline void dcbst(void *addr) { - __asm__ __volatile__ ("dcbst %y0" : : "Z"(*(u8 *)addr) : "memory"); + __asm__ __volatile__ ("dcbst 0, %0" : : "r"(addr) : "memory"); } #endif /* !__ASSEMBLY__ */ #endif /* __KERNEL__ */ diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile index ea0c69236789..56dfa7a2a6f2 100644 --- a/arch/powerpc/kernel/Makefile +++ b/arch/powerpc/kernel/Makefile @@ -49,8 +49,7 @@ obj-y := cputable.o ptrace.o syscalls.o \ signal.o sysfs.o cacheinfo.o time.o \ prom.o traps.o setup-common.o \ udbg.o misc.o io.o misc_$(BITS).o \ - of_platform.o prom_parse.o \ - dma-common.o + of_platform.o prom_parse.o obj-$(CONFIG_PPC64) += setup_64.o sys_ppc32.o \ signal_64.o ptrace32.o \ paca.o nvram_64.o firmware.o diff --git a/arch/powerpc/kernel/dma-common.c b/arch/powerpc/kernel/dma-common.c deleted file mode 100644 index dc7ef6b17b69..000000000000 --- a/arch/powerpc/kernel/dma-common.c +++ /dev/null @@ -1,17 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * Contains common dma routines for all powerpc platforms. - * - * Copyright (C) 2019 Shawn Anastasio. - */ - -#include <linux/mm.h> -#include <linux/dma-noncoherent.h> - -pgprot_t arch_dma_mmap_pgprot(struct device *dev, pgprot_t prot, - unsigned long attrs) -{ - if (!dev_is_dma_coherent(dev)) - return pgprot_noncached(prot); - return prot; -} diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c index 0dba7eb24f92..3e566c2e6066 100644 --- a/arch/powerpc/kvm/powerpc.c +++ b/arch/powerpc/kvm/powerpc.c @@ -50,6 +50,11 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *v) return !!(v->arch.pending_exceptions) || kvm_request_pending(v); } +bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu) +{ + return kvm_arch_vcpu_runnable(vcpu); +} + bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu) { return false; @@ -452,16 +457,6 @@ err_out: return -EINVAL; } -bool kvm_arch_has_vcpu_debugfs(void) -{ - return false; -} - -int kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu) -{ - return 0; -} - void kvm_arch_destroy_vm(struct kvm *kvm) { unsigned int i; diff --git a/arch/riscv/configs/defconfig b/arch/riscv/configs/defconfig index 93205c0bf71d..3efff552a261 100644 --- a/arch/riscv/configs/defconfig +++ b/arch/riscv/configs/defconfig @@ -54,6 +54,8 @@ CONFIG_SERIAL_8250_CONSOLE=y CONFIG_SERIAL_OF_PLATFORM=y CONFIG_SERIAL_EARLYCON_RISCV_SBI=y CONFIG_HVC_RISCV_SBI=y +CONFIG_HW_RANDOM=y +CONFIG_HW_RANDOM_VIRTIO=y CONFIG_SPI=y CONFIG_SPI_SIFIVE=y # CONFIG_PTP_1588_CLOCK is not set diff --git a/arch/riscv/configs/rv32_defconfig b/arch/riscv/configs/rv32_defconfig index d5449ef805a3..7da93e494445 100644 --- a/arch/riscv/configs/rv32_defconfig +++ b/arch/riscv/configs/rv32_defconfig @@ -34,6 +34,7 @@ CONFIG_PCIEPORTBUS=y CONFIG_PCI_HOST_GENERIC=y CONFIG_PCIE_XILINX=y CONFIG_DEVTMPFS=y +CONFIG_DEVTMPFS_MOUNT=y CONFIG_BLK_DEV_LOOP=y CONFIG_VIRTIO_BLK=y CONFIG_BLK_DEV_SD=y @@ -53,6 +54,8 @@ CONFIG_SERIAL_8250_CONSOLE=y CONFIG_SERIAL_OF_PLATFORM=y CONFIG_SERIAL_EARLYCON_RISCV_SBI=y CONFIG_HVC_RISCV_SBI=y +CONFIG_HW_RANDOM=y +CONFIG_HW_RANDOM_VIRTIO=y # CONFIG_PTP_1588_CLOCK is not set CONFIG_DRM=y CONFIG_DRM_RADEON=y diff --git a/arch/riscv/include/asm/switch_to.h b/arch/riscv/include/asm/switch_to.h index 853b65ef656d..f0227bdce0f0 100644 --- a/arch/riscv/include/asm/switch_to.h +++ b/arch/riscv/include/asm/switch_to.h @@ -16,7 +16,13 @@ extern void __fstate_restore(struct task_struct *restore_from); static inline void __fstate_clean(struct pt_regs *regs) { - regs->sstatus |= (regs->sstatus & ~(SR_FS)) | SR_FS_CLEAN; + regs->sstatus = (regs->sstatus & ~SR_FS) | SR_FS_CLEAN; +} + +static inline void fstate_off(struct task_struct *task, + struct pt_regs *regs) +{ + regs->sstatus = (regs->sstatus & ~SR_FS) | SR_FS_OFF; } static inline void fstate_save(struct task_struct *task, diff --git a/arch/riscv/include/asm/tlbflush.h b/arch/riscv/include/asm/tlbflush.h index 687dd19735a7..4d9bbe8438bf 100644 --- a/arch/riscv/include/asm/tlbflush.h +++ b/arch/riscv/include/asm/tlbflush.h @@ -53,10 +53,17 @@ static inline void remote_sfence_vma(struct cpumask *cmask, unsigned long start, } #define flush_tlb_all() sbi_remote_sfence_vma(NULL, 0, -1) -#define flush_tlb_page(vma, addr) flush_tlb_range(vma, addr, 0) + #define flush_tlb_range(vma, start, end) \ remote_sfence_vma(mm_cpumask((vma)->vm_mm), start, (end) - (start)) -#define flush_tlb_mm(mm) \ + +static inline void flush_tlb_page(struct vm_area_struct *vma, + unsigned long addr) +{ + flush_tlb_range(vma, addr, addr + PAGE_SIZE); +} + +#define flush_tlb_mm(mm) \ remote_sfence_vma(mm_cpumask(mm), 0, -1) #endif /* CONFIG_SMP */ diff --git a/arch/riscv/kernel/process.c b/arch/riscv/kernel/process.c index f23794bd1e90..fb3a082362eb 100644 --- a/arch/riscv/kernel/process.c +++ b/arch/riscv/kernel/process.c @@ -64,8 +64,14 @@ void start_thread(struct pt_regs *regs, unsigned long pc, unsigned long sp) { regs->sstatus = SR_SPIE; - if (has_fpu) + if (has_fpu) { regs->sstatus |= SR_FS_INITIAL; + /* + * Restore the initial value to the FP register + * before starting the user program. + */ + fstate_restore(current, regs); + } regs->sepc = pc; regs->sp = sp; set_fs(USER_DS); @@ -75,10 +81,11 @@ void flush_thread(void) { #ifdef CONFIG_FPU /* - * Reset FPU context + * Reset FPU state and context * frm: round to nearest, ties to even (IEEE default) * fflags: accrued exceptions cleared */ + fstate_off(current, task_pt_regs(current)); memset(¤t->thread.fstate, 0, sizeof(current->thread.fstate)); #endif } diff --git a/arch/riscv/lib/Makefile b/arch/riscv/lib/Makefile index 8e364ebf37de..267feaa10f6a 100644 --- a/arch/riscv/lib/Makefile +++ b/arch/riscv/lib/Makefile @@ -5,5 +5,3 @@ lib-y += memset.o lib-y += uaccess.o lib-$(CONFIG_64BIT) += tishift.o - -lib-$(CONFIG_32BIT) += udivdi3.o diff --git a/arch/riscv/lib/delay.c b/arch/riscv/lib/delay.c index 87ff89e88f2c..f51c9a03bca1 100644 --- a/arch/riscv/lib/delay.c +++ b/arch/riscv/lib/delay.c @@ -81,9 +81,13 @@ EXPORT_SYMBOL(__delay); void udelay(unsigned long usecs) { u64 ucycles = (u64)usecs * lpj_fine * UDELAY_MULT; + u64 n; if (unlikely(usecs > MAX_UDELAY_US)) { - __delay((u64)usecs * riscv_timebase / 1000000ULL); + n = (u64)usecs * riscv_timebase; + do_div(n, 1000000); + + __delay(n); return; } diff --git a/arch/riscv/lib/udivdi3.S b/arch/riscv/lib/udivdi3.S deleted file mode 100644 index 3f07476a91a9..000000000000 --- a/arch/riscv/lib/udivdi3.S +++ /dev/null @@ -1,32 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Copyright (C) 2016-2017 Free Software Foundation, Inc. - */ - -#include <linux/linkage.h> - -ENTRY(__udivdi3) - mv a2, a1 - mv a1, a0 - li a0, -1 - beqz a2, .L5 - li a3, 1 - bgeu a2, a1, .L2 -.L1: - blez a2, .L2 - slli a2, a2, 1 - slli a3, a3, 1 - bgtu a1, a2, .L1 -.L2: - li a0, 0 -.L3: - bltu a1, a2, .L4 - sub a1, a1, a2 - or a0, a0, a3 -.L4: - srli a3, a3, 1 - srli a2, a2, 1 - bnez a3, .L3 -.L5: - ret -ENDPROC(__udivdi3) diff --git a/arch/s390/boot/ipl_parm.c b/arch/s390/boot/ipl_parm.c index 3c49bde8aa5e..b8aa6a9f937b 100644 --- a/arch/s390/boot/ipl_parm.c +++ b/arch/s390/boot/ipl_parm.c @@ -48,9 +48,7 @@ void store_ipl_parmblock(void) { int rc; - uv_set_shared(__pa(&ipl_block)); rc = __diag308(DIAG308_STORE, &ipl_block); - uv_remove_shared(__pa(&ipl_block)); if (rc == DIAG308_RC_OK && ipl_block.hdr.version <= IPL_MAX_SUPPORTED_VERSION) ipl_block_valid = 1; diff --git a/arch/s390/kernel/dumpstack.c b/arch/s390/kernel/dumpstack.c index ac06c3949ab3..34bdc60c0b11 100644 --- a/arch/s390/kernel/dumpstack.c +++ b/arch/s390/kernel/dumpstack.c @@ -114,12 +114,8 @@ recursion_check: * If it comes up a second time then there's something wrong going on: * just break out and report an unknown stack type. */ - if (*visit_mask & (1UL << info->type)) { - printk_deferred_once(KERN_WARNING - "WARNING: stack recursion on stack type %d\n", - info->type); + if (*visit_mask & (1UL << info->type)) goto unknown; - } *visit_mask |= 1UL << info->type; return 0; unknown: diff --git a/arch/s390/kernel/head64.S b/arch/s390/kernel/head64.S index 5aea1a527443..f384a18e6c26 100644 --- a/arch/s390/kernel/head64.S +++ b/arch/s390/kernel/head64.S @@ -60,12 +60,5 @@ ENTRY(startup_continue) .align 16 .LPG1: -.Lpcmsk:.quad 0x0000000180000000 -.L4malign:.quad 0xffffffffffc00000 -.Lscan2g:.quad 0x80000000 + 0x20000 - 8 # 2GB + 128K - 8 -.Lnop: .long 0x07000700 -.Lparmaddr: - .quad PARMAREA - .align 64 .Ldw: .quad 0x0002000180000000,0x0000000000000000 .Laregs:.long 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c index 2c0a515428d6..6837affc19e8 100644 --- a/arch/s390/kernel/ipl.c +++ b/arch/s390/kernel/ipl.c @@ -31,7 +31,6 @@ #include <asm/os_info.h> #include <asm/sections.h> #include <asm/boot_data.h> -#include <asm/uv.h> #include "entry.h" #define IPL_PARM_BLOCK_VERSION 0 @@ -892,21 +891,15 @@ static void __reipl_run(void *unused) { switch (reipl_type) { case IPL_TYPE_CCW: - uv_set_shared(__pa(reipl_block_ccw)); diag308(DIAG308_SET, reipl_block_ccw); - uv_remove_shared(__pa(reipl_block_ccw)); diag308(DIAG308_LOAD_CLEAR, NULL); break; case IPL_TYPE_FCP: - uv_set_shared(__pa(reipl_block_fcp)); diag308(DIAG308_SET, reipl_block_fcp); - uv_remove_shared(__pa(reipl_block_fcp)); diag308(DIAG308_LOAD_CLEAR, NULL); break; case IPL_TYPE_NSS: - uv_set_shared(__pa(reipl_block_nss)); diag308(DIAG308_SET, reipl_block_nss); - uv_remove_shared(__pa(reipl_block_nss)); diag308(DIAG308_LOAD_CLEAR, NULL); break; case IPL_TYPE_UNKNOWN: @@ -1176,9 +1169,7 @@ static struct kset *dump_kset; static void diag308_dump(void *dump_block) { - uv_set_shared(__pa(dump_block)); diag308(DIAG308_SET, dump_block); - uv_remove_shared(__pa(dump_block)); while (1) { if (diag308(DIAG308_LOAD_NORMAL_DUMP, NULL) != 0x302) break; diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index 2b94b0ad3588..253177900950 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c @@ -1114,8 +1114,7 @@ void __init setup_arch(char **cmdline_p) ROOT_DEV = Root_RAM0; - /* Is init_mm really needed? */ - init_mm.start_code = PAGE_OFFSET; + init_mm.start_code = (unsigned long) _text; init_mm.end_code = (unsigned long) _etext; init_mm.end_data = (unsigned long) _edata; init_mm.brk = (unsigned long) _end; diff --git a/arch/s390/kernel/vdso.c b/arch/s390/kernel/vdso.c index 243d8b1185bf..c6bc190f3c28 100644 --- a/arch/s390/kernel/vdso.c +++ b/arch/s390/kernel/vdso.c @@ -216,11 +216,6 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) if (!vdso_enabled) return 0; - /* - * Only map the vdso for dynamically linked elf binaries. - */ - if (!uses_interp) - return 0; vdso_pages = vdso64_pages; #ifdef CONFIG_COMPAT_VDSO diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S index 49d55327de0b..7e0eb4020917 100644 --- a/arch/s390/kernel/vmlinux.lds.S +++ b/arch/s390/kernel/vmlinux.lds.S @@ -32,10 +32,9 @@ PHDRS { SECTIONS { . = 0x100000; - _stext = .; /* Start of text section */ .text : { - /* Text and read-only data */ - _text = .; + _stext = .; /* Start of text section */ + _text = .; /* Text and read-only data */ HEAD_TEXT TEXT_TEXT SCHED_TEXT @@ -47,11 +46,10 @@ SECTIONS *(.text.*_indirect_*) *(.fixup) *(.gnu.warning) + . = ALIGN(PAGE_SIZE); + _etext = .; /* End of text section */ } :text = 0x0700 - . = ALIGN(PAGE_SIZE); - _etext = .; /* End of text section */ - NOTES :text :note .dummy : { *(.dummy) } :data diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 3f520cd837fb..f329dcb3f44c 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -2516,16 +2516,6 @@ out_err: return rc; } -bool kvm_arch_has_vcpu_debugfs(void) -{ - return false; -} - -int kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu) -{ - return 0; -} - void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) { VCPU_EVENT(vcpu, 3, "%s", "free cpu"); diff --git a/arch/s390/mm/dump_pagetables.c b/arch/s390/mm/dump_pagetables.c index 3b93ba0b5d8d..5d67b81c704a 100644 --- a/arch/s390/mm/dump_pagetables.c +++ b/arch/s390/mm/dump_pagetables.c @@ -161,9 +161,9 @@ static void walk_pmd_level(struct seq_file *m, struct pg_state *st, } #endif - for (i = 0; i < PTRS_PER_PMD && addr < max_addr; i++) { + pmd = pmd_offset(pud, addr); + for (i = 0; i < PTRS_PER_PMD && addr < max_addr; i++, pmd++) { st->current_address = addr; - pmd = pmd_offset(pud, addr); if (!pmd_none(*pmd)) { if (pmd_large(*pmd)) { prot = pmd_val(*pmd) & @@ -192,9 +192,9 @@ static void walk_pud_level(struct seq_file *m, struct pg_state *st, } #endif - for (i = 0; i < PTRS_PER_PUD && addr < max_addr; i++) { + pud = pud_offset(p4d, addr); + for (i = 0; i < PTRS_PER_PUD && addr < max_addr; i++, pud++) { st->current_address = addr; - pud = pud_offset(p4d, addr); if (!pud_none(*pud)) if (pud_large(*pud)) { prot = pud_val(*pud) & @@ -222,9 +222,9 @@ static void walk_p4d_level(struct seq_file *m, struct pg_state *st, } #endif - for (i = 0; i < PTRS_PER_P4D && addr < max_addr; i++) { + p4d = p4d_offset(pgd, addr); + for (i = 0; i < PTRS_PER_P4D && addr < max_addr; i++, p4d++) { st->current_address = addr; - p4d = p4d_offset(pgd, addr); if (!p4d_none(*p4d)) walk_pud_level(m, st, p4d, addr); else diff --git a/arch/s390/scripts/Makefile.chkbss b/arch/s390/scripts/Makefile.chkbss index 884a9caff5fb..f4f4c2c6dee9 100644 --- a/arch/s390/scripts/Makefile.chkbss +++ b/arch/s390/scripts/Makefile.chkbss @@ -11,8 +11,7 @@ chkbss: $(addprefix $(obj)/, $(chkbss-files)) quiet_cmd_chkbss = CHKBSS $< cmd_chkbss = \ - if $(OBJDUMP) -h $< | grep -q "\.bss" && \ - ! $(OBJDUMP) -j .bss -w -h $< | awk 'END { if ($$3) exit 1 }'; then \ + if ! $(OBJSIZE) --common $< | $(AWK) 'END { if ($$3) exit 1 }'; then \ echo "error: $< .bss section is not empty" >&2; exit 1; \ fi; \ touch $@; diff --git a/arch/sh/kernel/disassemble.c b/arch/sh/kernel/disassemble.c index defebf1a9c8a..845543780cc5 100644 --- a/arch/sh/kernel/disassemble.c +++ b/arch/sh/kernel/disassemble.c @@ -475,8 +475,6 @@ static void print_sh_insn(u32 memaddr, u16 insn) printk("dbr"); break; case FD_REG_N: - if (0) - goto d_reg_n; case F_REG_N: printk("fr%d", rn); break; @@ -488,7 +486,7 @@ static void print_sh_insn(u32 memaddr, u16 insn) printk("xd%d", rn & ~1); break; } - d_reg_n: + /* else, fall through */ case D_REG_N: printk("dr%d", rn); break; @@ -497,6 +495,7 @@ static void print_sh_insn(u32 memaddr, u16 insn) printk("xd%d", rm & ~1); break; } + /* else, fall through */ case D_REG_M: printk("dr%d", rm); break; diff --git a/arch/sh/kernel/hw_breakpoint.c b/arch/sh/kernel/hw_breakpoint.c index 3bd010b4c55f..f10d64311127 100644 --- a/arch/sh/kernel/hw_breakpoint.c +++ b/arch/sh/kernel/hw_breakpoint.c @@ -157,6 +157,7 @@ int arch_bp_generic_fields(int sh_len, int sh_type, switch (sh_type) { case SH_BREAKPOINT_READ: *gen_type = HW_BREAKPOINT_R; + break; case SH_BREAKPOINT_WRITE: *gen_type = HW_BREAKPOINT_W; break; diff --git a/arch/x86/boot/string.c b/arch/x86/boot/string.c index 401e30ca0a75..8272a4492844 100644 --- a/arch/x86/boot/string.c +++ b/arch/x86/boot/string.c @@ -37,6 +37,14 @@ int memcmp(const void *s1, const void *s2, size_t len) return diff; } +/* + * Clang may lower `memcmp == 0` to `bcmp == 0`. + */ +int bcmp(const void *s1, const void *s2, size_t len) +{ + return memcmp(s1, s2, len); +} + int strcmp(const char *str1, const char *str2) { const unsigned char *s1 = (const unsigned char *)str1; diff --git a/arch/x86/include/asm/bootparam_utils.h b/arch/x86/include/asm/bootparam_utils.h index 101eb944f13c..f5e90a849bca 100644 --- a/arch/x86/include/asm/bootparam_utils.h +++ b/arch/x86/include/asm/bootparam_utils.h @@ -18,6 +18,20 @@ * Note: efi_info is commonly left uninitialized, but that field has a * private magic, so it is better to leave it unchanged. */ + +#define sizeof_mbr(type, member) ({ sizeof(((type *)0)->member); }) + +#define BOOT_PARAM_PRESERVE(struct_member) \ + { \ + .start = offsetof(struct boot_params, struct_member), \ + .len = sizeof_mbr(struct boot_params, struct_member), \ + } + +struct boot_params_to_save { + unsigned int start; + unsigned int len; +}; + static void sanitize_boot_params(struct boot_params *boot_params) { /* @@ -35,21 +49,40 @@ static void sanitize_boot_params(struct boot_params *boot_params) * problems again. */ if (boot_params->sentinel) { - /* fields in boot_params are left uninitialized, clear them */ - boot_params->acpi_rsdp_addr = 0; - memset(&boot_params->ext_ramdisk_image, 0, - (char *)&boot_params->efi_info - - (char *)&boot_params->ext_ramdisk_image); - memset(&boot_params->kbd_status, 0, - (char *)&boot_params->hdr - - (char *)&boot_params->kbd_status); - memset(&boot_params->_pad7[0], 0, - (char *)&boot_params->edd_mbr_sig_buffer[0] - - (char *)&boot_params->_pad7[0]); - memset(&boot_params->_pad8[0], 0, - (char *)&boot_params->eddbuf[0] - - (char *)&boot_params->_pad8[0]); - memset(&boot_params->_pad9[0], 0, sizeof(boot_params->_pad9)); + static struct boot_params scratch; + char *bp_base = (char *)boot_params; + char *save_base = (char *)&scratch; + int i; + + const struct boot_params_to_save to_save[] = { + BOOT_PARAM_PRESERVE(screen_info), + BOOT_PARAM_PRESERVE(apm_bios_info), + BOOT_PARAM_PRESERVE(tboot_addr), + BOOT_PARAM_PRESERVE(ist_info), + BOOT_PARAM_PRESERVE(acpi_rsdp_addr), + BOOT_PARAM_PRESERVE(hd0_info), + BOOT_PARAM_PRESERVE(hd1_info), + BOOT_PARAM_PRESERVE(sys_desc_table), + BOOT_PARAM_PRESERVE(olpc_ofw_header), + BOOT_PARAM_PRESERVE(efi_info), + BOOT_PARAM_PRESERVE(alt_mem_k), + BOOT_PARAM_PRESERVE(scratch), + BOOT_PARAM_PRESERVE(e820_entries), + BOOT_PARAM_PRESERVE(eddbuf_entries), + BOOT_PARAM_PRESERVE(edd_mbr_sig_buf_entries), + BOOT_PARAM_PRESERVE(edd_mbr_sig_buffer), + BOOT_PARAM_PRESERVE(e820_table), + BOOT_PARAM_PRESERVE(eddbuf), + }; + + memset(&scratch, 0, sizeof(scratch)); + + for (i = 0; i < ARRAY_SIZE(to_save); i++) { + memcpy(save_base + to_save[i].start, + bp_base + to_save[i].start, to_save[i].len); + } + + memcpy(boot_params, save_base, sizeof(*boot_params)); } } diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 7b0a4ee77313..74e88e5edd9c 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -35,6 +35,8 @@ #include <asm/kvm_vcpu_regs.h> #include <asm/hyperv-tlfs.h> +#define __KVM_HAVE_ARCH_VCPU_DEBUGFS + #define KVM_MAX_VCPUS 288 #define KVM_SOFT_MAX_VCPUS 240 #define KVM_MAX_VCPU_ID 1023 @@ -1175,6 +1177,7 @@ struct kvm_x86_ops { int (*update_pi_irte)(struct kvm *kvm, unsigned int host_irq, uint32_t guest_irq, bool set); void (*apicv_post_state_restore)(struct kvm_vcpu *vcpu); + bool (*dy_apicv_has_pending_interrupt)(struct kvm_vcpu *vcpu); int (*set_hv_timer)(struct kvm_vcpu *vcpu, u64 guest_deadline_tsc, bool *expired); diff --git a/arch/x86/kernel/apic/probe_32.c b/arch/x86/kernel/apic/probe_32.c index 1492799b8f43..ee2d91e382f1 100644 --- a/arch/x86/kernel/apic/probe_32.c +++ b/arch/x86/kernel/apic/probe_32.c @@ -184,7 +184,8 @@ void __init default_setup_apic_routing(void) def_to_bigsmp = 0; break; } - /* If P4 and above fall through */ + /* P4 and above */ + /* fall through */ case X86_VENDOR_HYGON: case X86_VENDOR_AMD: def_to_bigsmp = 1; diff --git a/arch/x86/kernel/cpu/mtrr/cyrix.c b/arch/x86/kernel/cpu/mtrr/cyrix.c index 4296c702a3f7..72182809b333 100644 --- a/arch/x86/kernel/cpu/mtrr/cyrix.c +++ b/arch/x86/kernel/cpu/mtrr/cyrix.c @@ -98,6 +98,7 @@ cyrix_get_free_region(unsigned long base, unsigned long size, int replace_reg) case 7: if (size < 0x40) break; + /* Else, fall through */ case 6: case 5: case 4: diff --git a/arch/x86/kernel/cpu/umwait.c b/arch/x86/kernel/cpu/umwait.c index 6a204e7336c1..32b4dc9030aa 100644 --- a/arch/x86/kernel/cpu/umwait.c +++ b/arch/x86/kernel/cpu/umwait.c @@ -18,6 +18,12 @@ static u32 umwait_control_cached = UMWAIT_CTRL_VAL(100000, UMWAIT_C02_ENABLE); /* + * Cache the original IA32_UMWAIT_CONTROL MSR value which is configured by + * hardware or BIOS before kernel boot. + */ +static u32 orig_umwait_control_cached __ro_after_init; + +/* * Serialize access to umwait_control_cached and IA32_UMWAIT_CONTROL MSR in * the sysfs write functions. */ @@ -53,6 +59,23 @@ static int umwait_cpu_online(unsigned int cpu) } /* + * The CPU hotplug callback sets the control MSR to the original control + * value. + */ +static int umwait_cpu_offline(unsigned int cpu) +{ + /* + * This code is protected by the CPU hotplug already and + * orig_umwait_control_cached is never changed after it caches + * the original control MSR value in umwait_init(). So there + * is no race condition here. + */ + wrmsr(MSR_IA32_UMWAIT_CONTROL, orig_umwait_control_cached, 0); + + return 0; +} + +/* * On resume, restore IA32_UMWAIT_CONTROL MSR on the boot processor which * is the only active CPU at this time. The MSR is set up on the APs via the * CPU hotplug callback. @@ -185,8 +208,22 @@ static int __init umwait_init(void) if (!boot_cpu_has(X86_FEATURE_WAITPKG)) return -ENODEV; + /* + * Cache the original control MSR value before the control MSR is + * changed. This is the only place where orig_umwait_control_cached + * is modified. + */ + rdmsrl(MSR_IA32_UMWAIT_CONTROL, orig_umwait_control_cached); + ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "umwait:online", - umwait_cpu_online, NULL); + umwait_cpu_online, umwait_cpu_offline); + if (ret < 0) { + /* + * On failure, the control MSR on all CPUs has the + * original control value. + */ + return ret; + } register_syscore_ops(&umwait_syscore_ops); diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c index b7f34fe2171e..4ab377c9fffe 100644 --- a/arch/x86/kernel/kvm.c +++ b/arch/x86/kernel/kvm.c @@ -308,9 +308,6 @@ static notrace void kvm_guest_apic_eoi_write(u32 reg, u32 val) static void kvm_guest_cpu_init(void) { - if (!kvm_para_available()) - return; - if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF) && kvmapf) { u64 pa = slow_virt_to_phys(this_cpu_ptr(&apf_reason)); @@ -625,9 +622,6 @@ static void __init kvm_guest_init(void) { int i; - if (!kvm_para_available()) - return; - paravirt_ops_setup(); register_reboot_notifier(&kvm_pv_reboot_nb); for (i = 0; i < KVM_TASK_SLEEP_HASHSIZE; i++) @@ -848,8 +842,6 @@ asm( */ void __init kvm_spinlock_init(void) { - if (!kvm_para_available()) - return; /* Does host kernel support KVM_FEATURE_PV_UNHALT? */ if (!kvm_para_has_feature(KVM_FEATURE_PV_UNHALT)) return; diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c index 0fdbe89d0754..3c5bbe8e4120 100644 --- a/arch/x86/kernel/ptrace.c +++ b/arch/x86/kernel/ptrace.c @@ -201,6 +201,7 @@ static int set_segment_reg(struct task_struct *task, case offsetof(struct user_regs_struct, ss): if (unlikely(value == 0)) return -EIO; + /* Else, fall through */ default: *pt_regs_access(task_pt_regs(task), offset) = value; diff --git a/arch/x86/kvm/debugfs.c b/arch/x86/kvm/debugfs.c index 329361b69d5e..018aebce33ff 100644 --- a/arch/x86/kvm/debugfs.c +++ b/arch/x86/kvm/debugfs.c @@ -8,11 +8,6 @@ #include <linux/debugfs.h> #include "lapic.h" -bool kvm_arch_has_vcpu_debugfs(void) -{ - return true; -} - static int vcpu_get_timer_advance_ns(void *data, u64 *val) { struct kvm_vcpu *vcpu = (struct kvm_vcpu *) data; @@ -48,37 +43,22 @@ static int vcpu_get_tsc_scaling_frac_bits(void *data, u64 *val) DEFINE_SIMPLE_ATTRIBUTE(vcpu_tsc_scaling_frac_fops, vcpu_get_tsc_scaling_frac_bits, NULL, "%llu\n"); -int kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu) +void kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu) { - struct dentry *ret; - - ret = debugfs_create_file("tsc-offset", 0444, - vcpu->debugfs_dentry, - vcpu, &vcpu_tsc_offset_fops); - if (!ret) - return -ENOMEM; + debugfs_create_file("tsc-offset", 0444, vcpu->debugfs_dentry, vcpu, + &vcpu_tsc_offset_fops); - if (lapic_in_kernel(vcpu)) { - ret = debugfs_create_file("lapic_timer_advance_ns", 0444, - vcpu->debugfs_dentry, - vcpu, &vcpu_timer_advance_ns_fops); - if (!ret) - return -ENOMEM; - } + if (lapic_in_kernel(vcpu)) + debugfs_create_file("lapic_timer_advance_ns", 0444, + vcpu->debugfs_dentry, vcpu, + &vcpu_timer_advance_ns_fops); if (kvm_has_tsc_control) { - ret = debugfs_create_file("tsc-scaling-ratio", 0444, - vcpu->debugfs_dentry, - vcpu, &vcpu_tsc_scaling_fops); - if (!ret) - return -ENOMEM; - ret = debugfs_create_file("tsc-scaling-ratio-frac-bits", 0444, - vcpu->debugfs_dentry, - vcpu, &vcpu_tsc_scaling_frac_fops); - if (!ret) - return -ENOMEM; - + debugfs_create_file("tsc-scaling-ratio", 0444, + vcpu->debugfs_dentry, vcpu, + &vcpu_tsc_scaling_fops); + debugfs_create_file("tsc-scaling-ratio-frac-bits", 0444, + vcpu->debugfs_dentry, vcpu, + &vcpu_tsc_scaling_frac_fops); } - - return 0; } diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index 0aa158657f20..685d17c11461 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c @@ -1548,7 +1548,6 @@ static void kvm_apic_inject_pending_timer_irqs(struct kvm_lapic *apic) static void apic_timer_expired(struct kvm_lapic *apic) { struct kvm_vcpu *vcpu = apic->vcpu; - struct swait_queue_head *q = &vcpu->wq; struct kvm_timer *ktimer = &apic->lapic_timer; if (atomic_read(&apic->lapic_timer.pending)) @@ -1566,13 +1565,6 @@ static void apic_timer_expired(struct kvm_lapic *apic) atomic_inc(&apic->lapic_timer.pending); kvm_set_pending_timer(vcpu); - - /* - * For x86, the atomic_inc() is serialized, thus - * using swait_active() is safe. - */ - if (swait_active(q)) - swake_up_one(q); } static void start_sw_tscdeadline(struct kvm_lapic *apic) diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 7eafc6907861..d685491fce4d 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -5190,6 +5190,11 @@ static void svm_deliver_avic_intr(struct kvm_vcpu *vcpu, int vec) kvm_vcpu_wake_up(vcpu); } +static bool svm_dy_apicv_has_pending_interrupt(struct kvm_vcpu *vcpu) +{ + return false; +} + static void svm_ir_list_del(struct vcpu_svm *svm, struct amd_iommu_pi_data *pi) { unsigned long flags; @@ -7314,6 +7319,7 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = { .pmu_ops = &amd_pmu_ops, .deliver_posted_interrupt = svm_deliver_avic_intr, + .dy_apicv_has_pending_interrupt = svm_dy_apicv_has_pending_interrupt, .update_pi_irte = svm_update_pi_irte, .setup_mce = svm_setup_mce, diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 074385c86c09..42ed3faa6af8 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -6117,6 +6117,11 @@ static int vmx_sync_pir_to_irr(struct kvm_vcpu *vcpu) return max_irr; } +static bool vmx_dy_apicv_has_pending_interrupt(struct kvm_vcpu *vcpu) +{ + return pi_test_on(vcpu_to_pi_desc(vcpu)); +} + static void vmx_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap) { if (!kvm_vcpu_apicv_active(vcpu)) @@ -7726,6 +7731,7 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = { .guest_apic_has_interrupt = vmx_guest_apic_has_interrupt, .sync_pir_to_irr = vmx_sync_pir_to_irr, .deliver_posted_interrupt = vmx_deliver_posted_interrupt, + .dy_apicv_has_pending_interrupt = vmx_dy_apicv_has_pending_interrupt, .set_tss_addr = vmx_set_tss_addr, .set_identity_map_addr = vmx_set_identity_map_addr, diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index c6d951cbd76c..93b0bd45ac73 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -9698,6 +9698,22 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) return kvm_vcpu_running(vcpu) || kvm_vcpu_has_events(vcpu); } +bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu) +{ + if (READ_ONCE(vcpu->arch.pv.pv_unhalted)) + return true; + + if (kvm_test_request(KVM_REQ_NMI, vcpu) || + kvm_test_request(KVM_REQ_SMI, vcpu) || + kvm_test_request(KVM_REQ_EVENT, vcpu)) + return true; + + if (vcpu->arch.apicv_active && kvm_x86_ops->dy_apicv_has_pending_interrupt(vcpu)) + return true; + + return false; +} + bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu) { return vcpu->arch.preempted_in_kernel; diff --git a/arch/x86/lib/cpu.c b/arch/x86/lib/cpu.c index 04967cdce5d1..7ad68917a51e 100644 --- a/arch/x86/lib/cpu.c +++ b/arch/x86/lib/cpu.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-2.0-only #include <linux/types.h> #include <linux/export.h> +#include <asm/cpu.h> unsigned int x86_family(unsigned int sig) { diff --git a/arch/x86/math-emu/errors.c b/arch/x86/math-emu/errors.c index 6b468517ab71..73dc66d887f3 100644 --- a/arch/x86/math-emu/errors.c +++ b/arch/x86/math-emu/errors.c @@ -178,13 +178,15 @@ void FPU_printall(void) for (i = 0; i < 8; i++) { FPU_REG *r = &st(i); u_char tagi = FPU_gettagi(i); + switch (tagi) { case TAG_Empty: continue; - break; case TAG_Zero: case TAG_Special: + /* Update tagi for the printk below */ tagi = FPU_Special(r); + /* fall through */ case TAG_Valid: printk("st(%d) %c .%04lx %04lx %04lx %04lx e%+-6d ", i, getsign(r) ? '-' : '+', @@ -198,7 +200,6 @@ void FPU_printall(void) printk("Whoops! Error in errors.c: tag%d is %d ", i, tagi); continue; - break; } printk("%s\n", tag_desc[(int)(unsigned)tagi]); } diff --git a/arch/x86/math-emu/fpu_trig.c b/arch/x86/math-emu/fpu_trig.c index 783c509f957a..127ea54122d7 100644 --- a/arch/x86/math-emu/fpu_trig.c +++ b/arch/x86/math-emu/fpu_trig.c @@ -1352,7 +1352,7 @@ static void fyl2xp1(FPU_REG *st0_ptr, u_char st0_tag) case TW_Denormal: if (denormal_operand() < 0) return; - + /* fall through */ case TAG_Zero: case TAG_Valid: setsign(st0_ptr, getsign(st0_ptr) ^ getsign(st1_ptr)); diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c index eaaed5bfc4a4..991549a1c5f3 100644 --- a/arch/x86/net/bpf_jit_comp.c +++ b/arch/x86/net/bpf_jit_comp.c @@ -390,8 +390,9 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, emit_prologue(&prog, bpf_prog->aux->stack_depth, bpf_prog_was_classic(bpf_prog)); + addrs[0] = prog - temp; - for (i = 0; i < insn_cnt; i++, insn++) { + for (i = 1; i <= insn_cnt; i++, insn++) { const s32 imm32 = insn->imm; u32 dst_reg = insn->dst_reg; u32 src_reg = insn->src_reg; @@ -1105,7 +1106,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) extra_pass = true; goto skip_init_addrs; } - addrs = kmalloc_array(prog->len, sizeof(*addrs), GFP_KERNEL); + addrs = kmalloc_array(prog->len + 1, sizeof(*addrs), GFP_KERNEL); if (!addrs) { prog = orig_prog; goto out_addrs; @@ -1115,7 +1116,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) * Before first pass, make a rough estimation of addrs[] * each BPF instruction is translated to less than 64 bytes */ - for (proglen = 0, i = 0; i < prog->len; i++) { + for (proglen = 0, i = 0; i <= prog->len; i++) { proglen += 64; addrs[i] = proglen; } @@ -1180,7 +1181,7 @@ out_image: if (!image || !prog->is_func || extra_pass) { if (image) - bpf_prog_fill_jited_linfo(prog, addrs); + bpf_prog_fill_jited_linfo(prog, addrs + 1); out_addrs: kfree(addrs); kfree(jit_data); diff --git a/arch/x86/purgatory/Makefile b/arch/x86/purgatory/Makefile index 3cf302b26332..8901a1f89cf5 100644 --- a/arch/x86/purgatory/Makefile +++ b/arch/x86/purgatory/Makefile @@ -6,6 +6,9 @@ purgatory-y := purgatory.o stack.o setup-x86_$(BITS).o sha256.o entry64.o string targets += $(purgatory-y) PURGATORY_OBJS = $(addprefix $(obj)/,$(purgatory-y)) +$(obj)/string.o: $(srctree)/arch/x86/boot/compressed/string.c FORCE + $(call if_changed_rule,cc_o_c) + $(obj)/sha256.o: $(srctree)/lib/sha256.c FORCE $(call if_changed_rule,cc_o_c) @@ -17,11 +20,34 @@ KCOV_INSTRUMENT := n # Default KBUILD_CFLAGS can have -pg option set when FTRACE is enabled. That # in turn leaves some undefined symbols like __fentry__ in purgatory and not -# sure how to relocate those. Like kexec-tools, use custom flags. - -KBUILD_CFLAGS := -fno-strict-aliasing -Wall -Wstrict-prototypes -fno-zero-initialized-in-bss -fno-builtin -ffreestanding -c -Os -mcmodel=large -KBUILD_CFLAGS += -m$(BITS) -KBUILD_CFLAGS += $(call cc-option,-fno-PIE) +# sure how to relocate those. +ifdef CONFIG_FUNCTION_TRACER +CFLAGS_REMOVE_sha256.o += $(CC_FLAGS_FTRACE) +CFLAGS_REMOVE_purgatory.o += $(CC_FLAGS_FTRACE) +CFLAGS_REMOVE_string.o += $(CC_FLAGS_FTRACE) +CFLAGS_REMOVE_kexec-purgatory.o += $(CC_FLAGS_FTRACE) +endif + +ifdef CONFIG_STACKPROTECTOR +CFLAGS_REMOVE_sha256.o += -fstack-protector +CFLAGS_REMOVE_purgatory.o += -fstack-protector +CFLAGS_REMOVE_string.o += -fstack-protector +CFLAGS_REMOVE_kexec-purgatory.o += -fstack-protector +endif + +ifdef CONFIG_STACKPROTECTOR_STRONG +CFLAGS_REMOVE_sha256.o += -fstack-protector-strong +CFLAGS_REMOVE_purgatory.o += -fstack-protector-strong +CFLAGS_REMOVE_string.o += -fstack-protector-strong +CFLAGS_REMOVE_kexec-purgatory.o += -fstack-protector-strong +endif + +ifdef CONFIG_RETPOLINE +CFLAGS_REMOVE_sha256.o += $(RETPOLINE_CFLAGS) +CFLAGS_REMOVE_purgatory.o += $(RETPOLINE_CFLAGS) +CFLAGS_REMOVE_string.o += $(RETPOLINE_CFLAGS) +CFLAGS_REMOVE_kexec-purgatory.o += $(RETPOLINE_CFLAGS) +endif $(obj)/purgatory.ro: $(PURGATORY_OBJS) FORCE $(call if_changed,ld) diff --git a/arch/x86/purgatory/purgatory.c b/arch/x86/purgatory/purgatory.c index 6d8d5a34c377..b607bda786f6 100644 --- a/arch/x86/purgatory/purgatory.c +++ b/arch/x86/purgatory/purgatory.c @@ -68,3 +68,9 @@ void purgatory(void) } copy_backup_region(); } + +/* + * Defined in order to reuse memcpy() and memset() from + * arch/x86/boot/compressed/string.c + */ +void warn(const char *msg) {} diff --git a/arch/x86/purgatory/string.c b/arch/x86/purgatory/string.c deleted file mode 100644 index 01ad43873ad9..000000000000 --- a/arch/x86/purgatory/string.c +++ /dev/null @@ -1,23 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Simple string functions. - * - * Copyright (C) 2014 Red Hat Inc. - * - * Author: - * Vivek Goyal <vgoyal@redhat.com> - */ - -#include <linux/types.h> - -#include "../boot/string.c" - -void *memcpy(void *dst, const void *src, size_t len) -{ - return __builtin_memcpy(dst, src, len); -} - -void *memset(void *dst, int c, size_t len) -{ - return __builtin_memset(dst, c, len); -} diff --git a/arch/xtensa/kernel/setup.c b/arch/xtensa/kernel/setup.c index 5cb8a62e091c..7c3106093c75 100644 --- a/arch/xtensa/kernel/setup.c +++ b/arch/xtensa/kernel/setup.c @@ -511,6 +511,7 @@ void cpu_reset(void) "add %2, %2, %7\n\t" "addi %0, %0, -1\n\t" "bnez %0, 1b\n\t" + "isync\n\t" /* Jump to identity mapping */ "jx %3\n" "2:\n\t" diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c index 586fcfe227ea..b33be928d164 100644 --- a/block/bfq-iosched.c +++ b/block/bfq-iosched.c @@ -1924,12 +1924,13 @@ static void bfq_add_request(struct request *rq) * confirmed no later than during the next * I/O-plugging interval for bfqq. */ - if (!bfq_bfqq_has_short_ttime(bfqq) && + if (bfqd->last_completed_rq_bfqq && + !bfq_bfqq_has_short_ttime(bfqq) && ktime_get_ns() - bfqd->last_completion < 200 * NSEC_PER_USEC) { if (bfqd->last_completed_rq_bfqq != bfqq && - bfqd->last_completed_rq_bfqq != - bfqq->waker_bfqq) { + bfqd->last_completed_rq_bfqq != + bfqq->waker_bfqq) { /* * First synchronization detected with * a candidate waker queue, or with a @@ -2250,9 +2251,14 @@ static void bfq_request_merged(struct request_queue *q, struct request *req, blk_rq_pos(container_of(rb_prev(&req->rb_node), struct request, rb_node))) { struct bfq_queue *bfqq = bfq_init_rq(req); - struct bfq_data *bfqd = bfqq->bfqd; + struct bfq_data *bfqd; struct request *prev, *next_rq; + if (!bfqq) + return; + + bfqd = bfqq->bfqd; + /* Reposition request in its sort_list */ elv_rb_del(&bfqq->sort_list, req); elv_rb_add(&bfqq->sort_list, req); @@ -2299,6 +2305,9 @@ static void bfq_requests_merged(struct request_queue *q, struct request *rq, struct bfq_queue *bfqq = bfq_init_rq(rq), *next_bfqq = bfq_init_rq(next); + if (!bfqq) + return; + /* * If next and rq belong to the same bfq_queue and next is older * than rq, then reposition rq in the fifo (by substituting next @@ -4764,6 +4773,8 @@ static struct request *bfq_dispatch_request(struct blk_mq_hw_ctx *hctx) */ void bfq_put_queue(struct bfq_queue *bfqq) { + struct bfq_queue *item; + struct hlist_node *n; #ifdef CONFIG_BFQ_GROUP_IOSCHED struct bfq_group *bfqg = bfqq_group(bfqq); #endif @@ -4808,6 +4819,36 @@ void bfq_put_queue(struct bfq_queue *bfqq) bfqq->bfqd->burst_size--; } + /* + * bfqq does not exist any longer, so it cannot be woken by + * any other queue, and cannot wake any other queue. Then bfqq + * must be removed from the woken list of its possible waker + * queue, and all queues in the woken list of bfqq must stop + * having a waker queue. Strictly speaking, these updates + * should be performed when bfqq remains with no I/O source + * attached to it, which happens before bfqq gets freed. In + * particular, this happens when the last process associated + * with bfqq exits or gets associated with a different + * queue. However, both events lead to bfqq being freed soon, + * and dangling references would come out only after bfqq gets + * freed. So these updates are done here, as a simple and safe + * way to handle all cases. + */ + /* remove bfqq from woken list */ + if (!hlist_unhashed(&bfqq->woken_list_node)) + hlist_del_init(&bfqq->woken_list_node); + + /* reset waker for all queues in woken list */ + hlist_for_each_entry_safe(item, n, &bfqq->woken_list, + woken_list_node) { + item->waker_bfqq = NULL; + bfq_clear_bfqq_has_waker(item); + hlist_del_init(&item->woken_list_node); + } + + if (bfqq->bfqd && bfqq->bfqd->last_completed_rq_bfqq == bfqq) + bfqq->bfqd->last_completed_rq_bfqq = NULL; + kmem_cache_free(bfq_pool, bfqq); #ifdef CONFIG_BFQ_GROUP_IOSCHED bfqg_and_blkg_put(bfqg); @@ -4835,9 +4876,6 @@ static void bfq_put_cooperator(struct bfq_queue *bfqq) static void bfq_exit_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq) { - struct bfq_queue *item; - struct hlist_node *n; - if (bfqq == bfqd->in_service_queue) { __bfq_bfqq_expire(bfqd, bfqq, BFQQE_BUDGET_TIMEOUT); bfq_schedule_dispatch(bfqd); @@ -4847,18 +4885,6 @@ static void bfq_exit_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq) bfq_put_cooperator(bfqq); - /* remove bfqq from woken list */ - if (!hlist_unhashed(&bfqq->woken_list_node)) - hlist_del_init(&bfqq->woken_list_node); - - /* reset waker for all queues in woken list */ - hlist_for_each_entry_safe(item, n, &bfqq->woken_list, - woken_list_node) { - item->waker_bfqq = NULL; - bfq_clear_bfqq_has_waker(item); - hlist_del_init(&item->woken_list_node); - } - bfq_put_queue(bfqq); /* release process reference */ } @@ -5436,12 +5462,12 @@ static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, spin_lock_irq(&bfqd->lock); bfqq = bfq_init_rq(rq); - if (at_head || blk_rq_is_passthrough(rq)) { + if (!bfqq || at_head || blk_rq_is_passthrough(rq)) { if (at_head) list_add(&rq->queuelist, &bfqd->dispatch); else list_add_tail(&rq->queuelist, &bfqd->dispatch); - } else { /* bfqq is assumed to be non null here */ + } else { idle_timer_disabled = __bfq_insert_request(bfqd, rq); /* * Update bfqq, because, if a queue merge has occurred diff --git a/block/blk-mq.c b/block/blk-mq.c index f78d3287dd82..0835f4d8d42e 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -1958,13 +1958,9 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio) rq = blk_mq_get_request(q, bio, &data); if (unlikely(!rq)) { rq_qos_cleanup(q, bio); - - cookie = BLK_QC_T_NONE; - if (bio->bi_opf & REQ_NOWAIT_INLINE) - cookie = BLK_QC_T_EAGAIN; - else if (bio->bi_opf & REQ_NOWAIT) + if (bio->bi_opf & REQ_NOWAIT) bio_wouldblock_error(bio); - return cookie; + return BLK_QC_T_NONE; } trace_block_getrq(q, bio, bio->bi_opf); @@ -2666,8 +2662,6 @@ void blk_mq_release(struct request_queue *q) struct blk_mq_hw_ctx *hctx, *next; int i; - cancel_delayed_work_sync(&q->requeue_work); - queue_for_each_hw_ctx(q, hctx, i) WARN_ON_ONCE(hctx && list_empty(&hctx->hctx_list)); diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index 977c659dcd18..9bfa3ea4ed63 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c @@ -892,6 +892,9 @@ static void __blk_release_queue(struct work_struct *work) blk_free_queue_stats(q->stats); + if (queue_is_mq(q)) + cancel_delayed_work_sync(&q->requeue_work); + blk_exit_queue(q); blk_queue_free_zone_bitmaps(q); diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index 391ac0503dc0..76d0f9de767b 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c @@ -1786,6 +1786,21 @@ nothing_to_do: return 1; } +static bool ata_check_nblocks(struct scsi_cmnd *scmd, u32 n_blocks) +{ + struct request *rq = scmd->request; + u32 req_blocks; + + if (!blk_rq_is_passthrough(rq)) + return true; + + req_blocks = blk_rq_bytes(rq) / scmd->device->sector_size; + if (n_blocks > req_blocks) + return false; + + return true; +} + /** * ata_scsi_rw_xlat - Translate SCSI r/w command into an ATA one * @qc: Storage for translated ATA taskfile @@ -1830,6 +1845,8 @@ static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc) scsi_10_lba_len(cdb, &block, &n_block); if (cdb[1] & (1 << 3)) tf_flags |= ATA_TFLAG_FUA; + if (!ata_check_nblocks(scmd, n_block)) + goto invalid_fld; break; case READ_6: case WRITE_6: @@ -1844,6 +1861,8 @@ static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc) */ if (!n_block) n_block = 256; + if (!ata_check_nblocks(scmd, n_block)) + goto invalid_fld; break; case READ_16: case WRITE_16: @@ -1854,6 +1873,8 @@ static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc) scsi_16_lba_len(cdb, &block, &n_block); if (cdb[1] & (1 << 3)) tf_flags |= ATA_TFLAG_FUA; + if (!ata_check_nblocks(scmd, n_block)) + goto invalid_fld; break; default: DPRINTK("no-byte command\n"); diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c index 10aa27882142..4f115adb4ee8 100644 --- a/drivers/ata/libata-sff.c +++ b/drivers/ata/libata-sff.c @@ -658,6 +658,10 @@ static void ata_pio_sector(struct ata_queued_cmd *qc) unsigned int offset; unsigned char *buf; + if (!qc->cursg) { + qc->curbytes = qc->nbytes; + return; + } if (qc->curbytes == qc->nbytes - qc->sect_size) ap->hsm_task_state = HSM_ST_LAST; @@ -683,6 +687,8 @@ static void ata_pio_sector(struct ata_queued_cmd *qc) if (qc->cursg_ofs == qc->cursg->length) { qc->cursg = sg_next(qc->cursg); + if (!qc->cursg) + ap->hsm_task_state = HSM_ST_LAST; qc->cursg_ofs = 0; } } diff --git a/drivers/ata/pata_rb532_cf.c b/drivers/ata/pata_rb532_cf.c index 7c37f2ff09e4..deae466395de 100644 --- a/drivers/ata/pata_rb532_cf.c +++ b/drivers/ata/pata_rb532_cf.c @@ -158,7 +158,6 @@ static int rb532_pata_driver_probe(struct platform_device *pdev) static int rb532_pata_driver_remove(struct platform_device *pdev) { struct ata_host *ah = platform_get_drvdata(pdev); - struct rb532_cf_info *info = ah->private_data; ata_host_detach(ah); diff --git a/drivers/auxdisplay/Kconfig b/drivers/auxdisplay/Kconfig index dd61fdd400f0..68489d1f00bb 100644 --- a/drivers/auxdisplay/Kconfig +++ b/drivers/auxdisplay/Kconfig @@ -448,6 +448,11 @@ config PANEL_BOOT_MESSAGE choice prompt "Backlight initial state" default CHARLCD_BL_FLASH + ---help--- + Select the initial backlight state on boot or module load. + + Previously, there was no option for this: the backlight flashed + briefly on init. Now you can also turn it off/on. config CHARLCD_BL_OFF bool "Off" diff --git a/drivers/auxdisplay/charlcd.c b/drivers/auxdisplay/charlcd.c index 92745efefb54..bef6b85778b6 100644 --- a/drivers/auxdisplay/charlcd.c +++ b/drivers/auxdisplay/charlcd.c @@ -20,7 +20,7 @@ #include <generated/utsrelease.h> -#include <misc/charlcd.h> +#include "charlcd.h" #define LCD_MINOR 156 diff --git a/include/misc/charlcd.h b/drivers/auxdisplay/charlcd.h index 8cf6c18b0adb..00911ad0f3de 100644 --- a/include/misc/charlcd.h +++ b/drivers/auxdisplay/charlcd.h @@ -6,6 +6,9 @@ * Copyright (C) 2016-2017 Glider bvba */ +#ifndef _CHARLCD_H +#define _CHARLCD_H + struct charlcd { const struct charlcd_ops *ops; const unsigned char *char_conv; /* Optional */ @@ -37,3 +40,5 @@ int charlcd_register(struct charlcd *lcd); int charlcd_unregister(struct charlcd *lcd); void charlcd_poke(struct charlcd *lcd); + +#endif /* CHARLCD_H */ diff --git a/drivers/auxdisplay/hd44780.c b/drivers/auxdisplay/hd44780.c index ab15b64707ad..bcbe13092327 100644 --- a/drivers/auxdisplay/hd44780.c +++ b/drivers/auxdisplay/hd44780.c @@ -14,8 +14,7 @@ #include <linux/property.h> #include <linux/slab.h> -#include <misc/charlcd.h> - +#include "charlcd.h" enum hd44780_pin { /* Order does matter due to writing to GPIO array subsets! */ diff --git a/drivers/auxdisplay/panel.c b/drivers/auxdisplay/panel.c index e06de63497cf..85965953683e 100644 --- a/drivers/auxdisplay/panel.c +++ b/drivers/auxdisplay/panel.c @@ -55,7 +55,7 @@ #include <linux/io.h> #include <linux/uaccess.h> -#include <misc/charlcd.h> +#include "charlcd.h" #define KEYPAD_MINOR 185 @@ -1617,6 +1617,8 @@ static void panel_attach(struct parport *port) return; err_lcd_unreg: + if (scan_timer.function) + del_timer_sync(&scan_timer); if (lcd.enabled) charlcd_unregister(lcd.charlcd); err_unreg_device: diff --git a/drivers/base/core.c b/drivers/base/core.c index 636058bbf48a..1669d41fcddc 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c @@ -1823,12 +1823,63 @@ static inline struct kobject *get_glue_dir(struct device *dev) */ static void cleanup_glue_dir(struct device *dev, struct kobject *glue_dir) { + unsigned int ref; + /* see if we live in a "glue" directory */ if (!live_in_glue_dir(glue_dir, dev)) return; mutex_lock(&gdp_mutex); - if (!kobject_has_children(glue_dir)) + /** + * There is a race condition between removing glue directory + * and adding a new device under the glue directory. + * + * CPU1: CPU2: + * + * device_add() + * get_device_parent() + * class_dir_create_and_add() + * kobject_add_internal() + * create_dir() // create glue_dir + * + * device_add() + * get_device_parent() + * kobject_get() // get glue_dir + * + * device_del() + * cleanup_glue_dir() + * kobject_del(glue_dir) + * + * kobject_add() + * kobject_add_internal() + * create_dir() // in glue_dir + * sysfs_create_dir_ns() + * kernfs_create_dir_ns(sd) + * + * sysfs_remove_dir() // glue_dir->sd=NULL + * sysfs_put() // free glue_dir->sd + * + * // sd is freed + * kernfs_new_node(sd) + * kernfs_get(glue_dir) + * kernfs_add_one() + * kernfs_put() + * + * Before CPU1 remove last child device under glue dir, if CPU2 add + * a new device under glue dir, the glue_dir kobject reference count + * will be increase to 2 in kobject_get(k). And CPU2 has been called + * kernfs_create_dir_ns(). Meanwhile, CPU1 call sysfs_remove_dir() + * and sysfs_put(). This result in glue_dir->sd is freed. + * + * Then the CPU2 will see a stale "empty" but still potentially used + * glue dir around in kernfs_new_node(). + * + * In order to avoid this happening, we also should make sure that + * kernfs_node for glue_dir is released in CPU1 only when refcount + * for glue_dir kobj is 1. + */ + ref = kref_read(&glue_dir->kref); + if (!kobject_has_children(glue_dir) && !--ref) kobject_del(glue_dir); kobject_put(glue_dir); mutex_unlock(&gdp_mutex); diff --git a/drivers/base/platform.c b/drivers/base/platform.c index 506a0175a5a7..ec974ba9c0c4 100644 --- a/drivers/base/platform.c +++ b/drivers/base/platform.c @@ -157,8 +157,13 @@ int platform_get_irq(struct platform_device *dev, unsigned int num) * the device will only expose one IRQ, and this fallback * allows a common code path across either kind of resource. */ - if (num == 0 && has_acpi_companion(&dev->dev)) - return acpi_dev_gpio_irq_get(ACPI_COMPANION(&dev->dev), num); + if (num == 0 && has_acpi_companion(&dev->dev)) { + int ret = acpi_dev_gpio_irq_get(ACPI_COMPANION(&dev->dev), num); + + /* Our callers expect -ENXIO for missing IRQs. */ + if (ret >= 0 || ret == -EPROBE_DEFER) + return ret; + } return -ENXIO; #endif diff --git a/drivers/base/regmap/Kconfig b/drivers/base/regmap/Kconfig index a4984136c19d..0fd6f97ee523 100644 --- a/drivers/base/regmap/Kconfig +++ b/drivers/base/regmap/Kconfig @@ -44,7 +44,7 @@ config REGMAP_IRQ config REGMAP_SOUNDWIRE tristate - depends on SOUNDWIRE_BUS + depends on SOUNDWIRE config REGMAP_SCCB tristate diff --git a/drivers/block/aoe/aoedev.c b/drivers/block/aoe/aoedev.c index 5b49f1b33ebe..e2ea2356da06 100644 --- a/drivers/block/aoe/aoedev.c +++ b/drivers/block/aoe/aoedev.c @@ -323,10 +323,14 @@ flush(const char __user *str, size_t cnt, int exiting) } flush_scheduled_work(); - /* pass one: without sleeping, do aoedev_downdev */ + /* pass one: do aoedev_downdev, which might sleep */ +restart1: spin_lock_irqsave(&devlist_lock, flags); for (d = devlist; d; d = d->next) { spin_lock(&d->lock); + if (d->flags & DEVFL_TKILL) + goto cont; + if (exiting) { /* unconditionally take each device down */ } else if (specified) { @@ -338,8 +342,11 @@ flush(const char __user *str, size_t cnt, int exiting) || d->ref) goto cont; + spin_unlock(&d->lock); + spin_unlock_irqrestore(&devlist_lock, flags); aoedev_downdev(d); d->flags |= DEVFL_TKILL; + goto restart1; cont: spin_unlock(&d->lock); } @@ -348,7 +355,7 @@ cont: /* pass two: call freedev, which might sleep, * for aoedevs marked with DEVFL_TKILL */ -restart: +restart2: spin_lock_irqsave(&devlist_lock, flags); for (d = devlist; d; d = d->next) { spin_lock(&d->lock); @@ -357,7 +364,7 @@ restart: spin_unlock(&d->lock); spin_unlock_irqrestore(&devlist_lock, flags); freedev(d); - goto restart; + goto restart2; } spin_unlock(&d->lock); } diff --git a/drivers/block/loop.c b/drivers/block/loop.c index 3036883fc9f8..ab7ca5989097 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -885,7 +885,7 @@ static void loop_unprepare_queue(struct loop_device *lo) static int loop_kthread_worker_fn(void *worker_ptr) { - current->flags |= PF_LESS_THROTTLE; + current->flags |= PF_LESS_THROTTLE | PF_MEMALLOC_NOIO; return kthread_worker_fn(worker_ptr); } diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c index 3ac6a5d18071..b90dbcd99c03 100644 --- a/drivers/block/xen-blkback/xenbus.c +++ b/drivers/block/xen-blkback/xenbus.c @@ -965,6 +965,7 @@ static int read_per_ring_refs(struct xen_blkif_ring *ring, const char *dir) } } + err = -ENOMEM; for (i = 0; i < nr_grefs * XEN_BLKIF_REQS_PER_PAGE; i++) { req = kzalloc(sizeof(*req), GFP_KERNEL); if (!req) @@ -987,7 +988,7 @@ static int read_per_ring_refs(struct xen_blkif_ring *ring, const char *dir) err = xen_blkif_map(ring, ring_ref, nr_grefs, evtchn); if (err) { xenbus_dev_fatal(dev, err, "mapping ring-ref port %u", evtchn); - return err; + goto fail; } return 0; @@ -1007,8 +1008,7 @@ fail: } kfree(req); } - return -ENOMEM; - + return err; } static int connect_ring(struct backend_info *be) diff --git a/drivers/bluetooth/btqca.c b/drivers/bluetooth/btqca.c index 8b33128dccee..0875470a7806 100644 --- a/drivers/bluetooth/btqca.c +++ b/drivers/bluetooth/btqca.c @@ -99,6 +99,27 @@ static int qca_send_reset(struct hci_dev *hdev) return 0; } +int qca_send_pre_shutdown_cmd(struct hci_dev *hdev) +{ + struct sk_buff *skb; + int err; + + bt_dev_dbg(hdev, "QCA pre shutdown cmd"); + + skb = __hci_cmd_sync(hdev, QCA_PRE_SHUTDOWN_CMD, 0, + NULL, HCI_INIT_TIMEOUT); + if (IS_ERR(skb)) { + err = PTR_ERR(skb); + bt_dev_err(hdev, "QCA preshutdown_cmd failed (%d)", err); + return err; + } + + kfree_skb(skb); + + return 0; +} +EXPORT_SYMBOL_GPL(qca_send_pre_shutdown_cmd); + static void qca_tlv_check_data(struct rome_config *config, const struct firmware *fw) { @@ -119,6 +140,7 @@ static void qca_tlv_check_data(struct rome_config *config, BT_DBG("Length\t\t : %d bytes", length); config->dnld_mode = ROME_SKIP_EVT_NONE; + config->dnld_type = ROME_SKIP_EVT_NONE; switch (config->type) { case TLV_TYPE_PATCH: @@ -268,7 +290,7 @@ static int qca_inject_cmd_complete_event(struct hci_dev *hdev) evt = skb_put(skb, sizeof(*evt)); evt->ncmd = 1; - evt->opcode = QCA_HCI_CC_OPCODE; + evt->opcode = cpu_to_le16(QCA_HCI_CC_OPCODE); skb_put_u8(skb, QCA_HCI_CC_SUCCESS); @@ -323,7 +345,7 @@ static int qca_download_firmware(struct hci_dev *hdev, */ if (config->dnld_type == ROME_SKIP_EVT_VSE_CC || config->dnld_type == ROME_SKIP_EVT_VSE) - return qca_inject_cmd_complete_event(hdev); + ret = qca_inject_cmd_complete_event(hdev); out: release_firmware(fw); @@ -388,6 +410,9 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate, return err; } + /* Give the controller some time to get ready to receive the NVM */ + msleep(10); + /* Download NVM configuration */ config.type = TLV_TYPE_NVM; if (firmware_name) diff --git a/drivers/bluetooth/btqca.h b/drivers/bluetooth/btqca.h index 6a291a7a5d96..69c5315a65fd 100644 --- a/drivers/bluetooth/btqca.h +++ b/drivers/bluetooth/btqca.h @@ -13,6 +13,7 @@ #define EDL_PATCH_TLV_REQ_CMD (0x1E) #define EDL_NVM_ACCESS_SET_REQ_CMD (0x01) #define MAX_SIZE_PER_TLV_SEGMENT (243) +#define QCA_PRE_SHUTDOWN_CMD (0xFC08) #define EDL_CMD_REQ_RES_EVT (0x00) #define EDL_PATCH_VER_RES_EVT (0x19) @@ -135,6 +136,7 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate, const char *firmware_name); int qca_read_soc_version(struct hci_dev *hdev, u32 *soc_version); int qca_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr); +int qca_send_pre_shutdown_cmd(struct hci_dev *hdev); static inline bool qca_is_wcn399x(enum qca_btsoc_type soc_type) { return soc_type == QCA_WCN3990 || soc_type == QCA_WCN3998; @@ -167,4 +169,9 @@ static inline bool qca_is_wcn399x(enum qca_btsoc_type soc_type) { return false; } + +static inline int qca_send_pre_shutdown_cmd(struct hci_dev *hdev) +{ + return -EOPNOTSUPP; +} #endif diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index 3876fee6ad13..5cf0734eb31b 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c @@ -2762,8 +2762,10 @@ static int btusb_mtk_setup_firmware(struct hci_dev *hdev, const char *fwname) fw_size = fw->size; /* The size of patch header is 30 bytes, should be skip */ - if (fw_size < 30) + if (fw_size < 30) { + err = -EINVAL; goto err_release_fw; + } fw_size -= 30; fw_ptr += 30; diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c index 82a0a3691a63..9a970fd1975a 100644 --- a/drivers/bluetooth/hci_qca.c +++ b/drivers/bluetooth/hci_qca.c @@ -705,7 +705,7 @@ static void device_want_to_sleep(struct hci_uart *hu) unsigned long flags; struct qca_data *qca = hu->priv; - BT_DBG("hu %p want to sleep", hu); + BT_DBG("hu %p want to sleep in %d state", hu, qca->rx_ibs_state); spin_lock_irqsave(&qca->hci_ibs_lock, flags); @@ -720,7 +720,7 @@ static void device_want_to_sleep(struct hci_uart *hu) break; case HCI_IBS_RX_ASLEEP: - /* Fall through */ + break; default: /* Any other state is illegal */ @@ -912,7 +912,7 @@ static int qca_recv_event(struct hci_dev *hdev, struct sk_buff *skb) if (hdr->evt == HCI_EV_VENDOR) complete(&qca->drop_ev_comp); - kfree(skb); + kfree_skb(skb); return 0; } @@ -1386,6 +1386,9 @@ static int qca_power_off(struct hci_dev *hdev) { struct hci_uart *hu = hci_get_drvdata(hdev); + /* Perform pre shutdown command */ + qca_send_pre_shutdown_cmd(hdev); + qca_power_shutdown(hu); return 0; } diff --git a/drivers/clocksource/timer-riscv.c b/drivers/clocksource/timer-riscv.c index 5e6038fbf115..09e031176bc6 100644 --- a/drivers/clocksource/timer-riscv.c +++ b/drivers/clocksource/timer-riscv.c @@ -55,7 +55,7 @@ static u64 riscv_sched_clock(void) return get_cycles64(); } -static DEFINE_PER_CPU(struct clocksource, riscv_clocksource) = { +static struct clocksource riscv_clocksource = { .name = "riscv_clocksource", .rating = 300, .mask = CLOCKSOURCE_MASK(64), @@ -92,7 +92,6 @@ void riscv_timer_interrupt(void) static int __init riscv_timer_init_dt(struct device_node *n) { int cpuid, hartid, error; - struct clocksource *cs; hartid = riscv_of_processor_hartid(n); if (hartid < 0) { @@ -112,8 +111,7 @@ static int __init riscv_timer_init_dt(struct device_node *n) pr_info("%s: Registering clocksource cpuid [%d] hartid [%d]\n", __func__, cpuid, hartid); - cs = per_cpu_ptr(&riscv_clocksource, cpuid); - error = clocksource_register_hz(cs, riscv_timebase); + error = clocksource_register_hz(&riscv_clocksource, riscv_timebase); if (error) { pr_err("RISCV timer register failed [%d] for cpu = [%d]\n", error, cpuid); diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 8dda62367816..c28ebf2810f1 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -2528,7 +2528,7 @@ static int cpufreq_boost_set_sw(int state) } ret = dev_pm_qos_update_request(policy->max_freq_req, policy->max); - if (ret) + if (ret < 0) break; } diff --git a/drivers/crypto/ccp/ccp-crypto-aes-galois.c b/drivers/crypto/ccp/ccp-crypto-aes-galois.c index f9fec2ddf56a..94c1ad7eeddf 100644 --- a/drivers/crypto/ccp/ccp-crypto-aes-galois.c +++ b/drivers/crypto/ccp/ccp-crypto-aes-galois.c @@ -58,6 +58,19 @@ static int ccp_aes_gcm_setkey(struct crypto_aead *tfm, const u8 *key, static int ccp_aes_gcm_setauthsize(struct crypto_aead *tfm, unsigned int authsize) { + switch (authsize) { + case 16: + case 15: + case 14: + case 13: + case 12: + case 8: + case 4: + break; + default: + return -EINVAL; + } + return 0; } @@ -104,6 +117,7 @@ static int ccp_aes_gcm_crypt(struct aead_request *req, bool encrypt) memset(&rctx->cmd, 0, sizeof(rctx->cmd)); INIT_LIST_HEAD(&rctx->cmd.entry); rctx->cmd.engine = CCP_ENGINE_AES; + rctx->cmd.u.aes.authsize = crypto_aead_authsize(tfm); rctx->cmd.u.aes.type = ctx->u.aes.type; rctx->cmd.u.aes.mode = ctx->u.aes.mode; rctx->cmd.u.aes.action = encrypt; diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c index c69ed4bae2eb..9bc3c62157d7 100644 --- a/drivers/crypto/ccp/ccp-ops.c +++ b/drivers/crypto/ccp/ccp-ops.c @@ -622,6 +622,7 @@ static int ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q, unsigned long long *final; unsigned int dm_offset; + unsigned int authsize; unsigned int jobid; unsigned int ilen; bool in_place = true; /* Default value */ @@ -643,6 +644,21 @@ static int ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q, if (!aes->key) /* Gotta have a key SGL */ return -EINVAL; + /* Zero defaults to 16 bytes, the maximum size */ + authsize = aes->authsize ? aes->authsize : AES_BLOCK_SIZE; + switch (authsize) { + case 16: + case 15: + case 14: + case 13: + case 12: + case 8: + case 4: + break; + default: + return -EINVAL; + } + /* First, decompose the source buffer into AAD & PT, * and the destination buffer into AAD, CT & tag, or * the input into CT & tag. @@ -657,7 +673,7 @@ static int ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q, p_tag = scatterwalk_ffwd(sg_tag, p_outp, ilen); } else { /* Input length for decryption includes tag */ - ilen = aes->src_len - AES_BLOCK_SIZE; + ilen = aes->src_len - authsize; p_tag = scatterwalk_ffwd(sg_tag, p_inp, ilen); } @@ -766,8 +782,7 @@ static int ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q, while (src.sg_wa.bytes_left) { ccp_prepare_data(&src, &dst, &op, AES_BLOCK_SIZE, true); if (!src.sg_wa.bytes_left) { - unsigned int nbytes = aes->src_len - % AES_BLOCK_SIZE; + unsigned int nbytes = ilen % AES_BLOCK_SIZE; if (nbytes) { op.eom = 1; @@ -839,19 +854,19 @@ static int ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q, if (aes->action == CCP_AES_ACTION_ENCRYPT) { /* Put the ciphered tag after the ciphertext. */ - ccp_get_dm_area(&final_wa, 0, p_tag, 0, AES_BLOCK_SIZE); + ccp_get_dm_area(&final_wa, 0, p_tag, 0, authsize); } else { /* Does this ciphered tag match the input? */ - ret = ccp_init_dm_workarea(&tag, cmd_q, AES_BLOCK_SIZE, + ret = ccp_init_dm_workarea(&tag, cmd_q, authsize, DMA_BIDIRECTIONAL); if (ret) goto e_tag; - ret = ccp_set_dm_area(&tag, 0, p_tag, 0, AES_BLOCK_SIZE); + ret = ccp_set_dm_area(&tag, 0, p_tag, 0, authsize); if (ret) goto e_tag; ret = crypto_memneq(tag.address, final_wa.address, - AES_BLOCK_SIZE) ? -EBADMSG : 0; + authsize) ? -EBADMSG : 0; ccp_dm_free(&tag); } @@ -859,11 +874,11 @@ e_tag: ccp_dm_free(&final_wa); e_dst: - if (aes->src_len && !in_place) + if (ilen > 0 && !in_place) ccp_free_data(&dst, cmd_q); e_src: - if (aes->src_len) + if (ilen > 0) ccp_free_data(&src, cmd_q); e_aad: diff --git a/drivers/crypto/ux500/cryp/cryp.c b/drivers/crypto/ux500/cryp/cryp.c index ece83a363e11..f22f6fa612b3 100644 --- a/drivers/crypto/ux500/cryp/cryp.c +++ b/drivers/crypto/ux500/cryp/cryp.c @@ -314,14 +314,17 @@ void cryp_save_device_context(struct cryp_device_data *device_data, case CRYP_KEY_SIZE_256: ctx->key_4_l = readl_relaxed(&src_reg->key_4_l); ctx->key_4_r = readl_relaxed(&src_reg->key_4_r); + /* Fall through */ case CRYP_KEY_SIZE_192: ctx->key_3_l = readl_relaxed(&src_reg->key_3_l); ctx->key_3_r = readl_relaxed(&src_reg->key_3_r); + /* Fall through */ case CRYP_KEY_SIZE_128: ctx->key_2_l = readl_relaxed(&src_reg->key_2_l); ctx->key_2_r = readl_relaxed(&src_reg->key_2_r); + /* Fall through */ default: ctx->key_1_l = readl_relaxed(&src_reg->key_1_l); @@ -361,14 +364,17 @@ void cryp_restore_device_context(struct cryp_device_data *device_data, case CRYP_KEY_SIZE_256: writel_relaxed(ctx->key_4_l, ®->key_4_l); writel_relaxed(ctx->key_4_r, ®->key_4_r); + /* Fall through */ case CRYP_KEY_SIZE_192: writel_relaxed(ctx->key_3_l, ®->key_3_l); writel_relaxed(ctx->key_3_r, ®->key_3_r); + /* Fall through */ case CRYP_KEY_SIZE_128: writel_relaxed(ctx->key_2_l, ®->key_2_l); writel_relaxed(ctx->key_2_r, ®->key_2_r); + /* Fall through */ default: writel_relaxed(ctx->key_1_l, ®->key_1_l); diff --git a/drivers/dma/dw-edma/dw-edma-core.h b/drivers/dma/dw-edma/dw-edma-core.h index b6cc90cbc9dc..4e5f9f6e901b 100644 --- a/drivers/dma/dw-edma/dw-edma-core.h +++ b/drivers/dma/dw-edma/dw-edma-core.h @@ -50,7 +50,7 @@ struct dw_edma_burst { struct dw_edma_region { phys_addr_t paddr; - dma_addr_t vaddr; + void __iomem *vaddr; size_t sz; }; diff --git a/drivers/dma/dw-edma/dw-edma-pcie.c b/drivers/dma/dw-edma/dw-edma-pcie.c index 4c96e1c948f2..dc85f55e1bb8 100644 --- a/drivers/dma/dw-edma/dw-edma-pcie.c +++ b/drivers/dma/dw-edma/dw-edma-pcie.c @@ -130,19 +130,19 @@ static int dw_edma_pcie_probe(struct pci_dev *pdev, chip->id = pdev->devfn; chip->irq = pdev->irq; - dw->rg_region.vaddr = (dma_addr_t)pcim_iomap_table(pdev)[pdata->rg_bar]; + dw->rg_region.vaddr = pcim_iomap_table(pdev)[pdata->rg_bar]; dw->rg_region.vaddr += pdata->rg_off; dw->rg_region.paddr = pdev->resource[pdata->rg_bar].start; dw->rg_region.paddr += pdata->rg_off; dw->rg_region.sz = pdata->rg_sz; - dw->ll_region.vaddr = (dma_addr_t)pcim_iomap_table(pdev)[pdata->ll_bar]; + dw->ll_region.vaddr = pcim_iomap_table(pdev)[pdata->ll_bar]; dw->ll_region.vaddr += pdata->ll_off; dw->ll_region.paddr = pdev->resource[pdata->ll_bar].start; dw->ll_region.paddr += pdata->ll_off; dw->ll_region.sz = pdata->ll_sz; - dw->dt_region.vaddr = (dma_addr_t)pcim_iomap_table(pdev)[pdata->dt_bar]; + dw->dt_region.vaddr = pcim_iomap_table(pdev)[pdata->dt_bar]; dw->dt_region.vaddr += pdata->dt_off; dw->dt_region.paddr = pdev->resource[pdata->dt_bar].start; dw->dt_region.paddr += pdata->dt_off; @@ -158,17 +158,17 @@ static int dw_edma_pcie_probe(struct pci_dev *pdev, pci_dbg(pdev, "Mode:\t%s\n", dw->mode == EDMA_MODE_LEGACY ? "Legacy" : "Unroll"); - pci_dbg(pdev, "Registers:\tBAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%pa, p=%pa)\n", + pci_dbg(pdev, "Registers:\tBAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p, p=%pa)\n", pdata->rg_bar, pdata->rg_off, pdata->rg_sz, - &dw->rg_region.vaddr, &dw->rg_region.paddr); + dw->rg_region.vaddr, &dw->rg_region.paddr); - pci_dbg(pdev, "L. List:\tBAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%pa, p=%pa)\n", + pci_dbg(pdev, "L. List:\tBAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p, p=%pa)\n", pdata->ll_bar, pdata->ll_off, pdata->ll_sz, - &dw->ll_region.vaddr, &dw->ll_region.paddr); + dw->ll_region.vaddr, &dw->ll_region.paddr); - pci_dbg(pdev, "Data:\tBAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%pa, p=%pa)\n", + pci_dbg(pdev, "Data:\tBAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p, p=%pa)\n", pdata->dt_bar, pdata->dt_off, pdata->dt_sz, - &dw->dt_region.vaddr, &dw->dt_region.paddr); + dw->dt_region.vaddr, &dw->dt_region.paddr); pci_dbg(pdev, "Nr. IRQs:\t%u\n", dw->nr_irqs); diff --git a/drivers/dma/dw-edma/dw-edma-v0-core.c b/drivers/dma/dw-edma/dw-edma-v0-core.c index 8a3180ed49a6..692de47b1670 100644 --- a/drivers/dma/dw-edma/dw-edma-v0-core.c +++ b/drivers/dma/dw-edma/dw-edma-v0-core.c @@ -25,7 +25,7 @@ enum dw_edma_control { static inline struct dw_edma_v0_regs __iomem *__dw_regs(struct dw_edma *dw) { - return (struct dw_edma_v0_regs __iomem *)dw->rg_region.vaddr; + return dw->rg_region.vaddr; } #define SET(dw, name, value) \ @@ -192,13 +192,12 @@ u32 dw_edma_v0_core_status_abort_int(struct dw_edma *dw, enum dw_edma_dir dir) static void dw_edma_v0_core_write_chunk(struct dw_edma_chunk *chunk) { struct dw_edma_burst *child; - struct dw_edma_v0_lli *lli; - struct dw_edma_v0_llp *llp; + struct dw_edma_v0_lli __iomem *lli; + struct dw_edma_v0_llp __iomem *llp; u32 control = 0, i = 0; - u64 sar, dar, addr; int j; - lli = (struct dw_edma_v0_lli *)chunk->ll_region.vaddr; + lli = chunk->ll_region.vaddr; if (chunk->cb) control = DW_EDMA_V0_CB; @@ -214,17 +213,15 @@ static void dw_edma_v0_core_write_chunk(struct dw_edma_chunk *chunk) /* Transfer size */ SET_LL(&lli[i].transfer_size, child->sz); /* SAR - low, high */ - sar = cpu_to_le64(child->sar); - SET_LL(&lli[i].sar_low, lower_32_bits(sar)); - SET_LL(&lli[i].sar_high, upper_32_bits(sar)); + SET_LL(&lli[i].sar_low, lower_32_bits(child->sar)); + SET_LL(&lli[i].sar_high, upper_32_bits(child->sar)); /* DAR - low, high */ - dar = cpu_to_le64(child->dar); - SET_LL(&lli[i].dar_low, lower_32_bits(dar)); - SET_LL(&lli[i].dar_high, upper_32_bits(dar)); + SET_LL(&lli[i].dar_low, lower_32_bits(child->dar)); + SET_LL(&lli[i].dar_high, upper_32_bits(child->dar)); i++; } - llp = (struct dw_edma_v0_llp *)&lli[i]; + llp = (void __iomem *)&lli[i]; control = DW_EDMA_V0_LLP | DW_EDMA_V0_TCB; if (!chunk->cb) control |= DW_EDMA_V0_CB; @@ -232,9 +229,8 @@ static void dw_edma_v0_core_write_chunk(struct dw_edma_chunk *chunk) /* Channel control */ SET_LL(&llp->control, control); /* Linked list - low, high */ - addr = cpu_to_le64(chunk->ll_region.paddr); - SET_LL(&llp->llp_low, lower_32_bits(addr)); - SET_LL(&llp->llp_high, upper_32_bits(addr)); + SET_LL(&llp->llp_low, lower_32_bits(chunk->ll_region.paddr)); + SET_LL(&llp->llp_high, upper_32_bits(chunk->ll_region.paddr)); } void dw_edma_v0_core_start(struct dw_edma_chunk *chunk, bool first) @@ -242,7 +238,6 @@ void dw_edma_v0_core_start(struct dw_edma_chunk *chunk, bool first) struct dw_edma_chan *chan = chunk->chan; struct dw_edma *dw = chan->chip->dw; u32 tmp; - u64 llp; dw_edma_v0_core_write_chunk(chunk); @@ -262,9 +257,10 @@ void dw_edma_v0_core_start(struct dw_edma_chunk *chunk, bool first) SET_CH(dw, chan->dir, chan->id, ch_control1, (DW_EDMA_V0_CCS | DW_EDMA_V0_LLE)); /* Linked list - low, high */ - llp = cpu_to_le64(chunk->ll_region.paddr); - SET_CH(dw, chan->dir, chan->id, llp_low, lower_32_bits(llp)); - SET_CH(dw, chan->dir, chan->id, llp_high, upper_32_bits(llp)); + SET_CH(dw, chan->dir, chan->id, llp_low, + lower_32_bits(chunk->ll_region.paddr)); + SET_CH(dw, chan->dir, chan->id, llp_high, + upper_32_bits(chunk->ll_region.paddr)); } /* Doorbell */ SET_RW(dw, chan->dir, doorbell, diff --git a/drivers/dma/dw-edma/dw-edma-v0-debugfs.c b/drivers/dma/dw-edma/dw-edma-v0-debugfs.c index 3226f528cc11..42739508c0d8 100644 --- a/drivers/dma/dw-edma/dw-edma-v0-debugfs.c +++ b/drivers/dma/dw-edma/dw-edma-v0-debugfs.c @@ -14,7 +14,7 @@ #include "dw-edma-core.h" #define REGS_ADDR(name) \ - ((dma_addr_t *)®s->name) + ((void __force *)®s->name) #define REGISTER(name) \ { #name, REGS_ADDR(name) } @@ -40,36 +40,37 @@ static struct dentry *base_dir; static struct dw_edma *dw; -static struct dw_edma_v0_regs *regs; +static struct dw_edma_v0_regs __iomem *regs; static struct { - void *start; - void *end; + void __iomem *start; + void __iomem *end; } lim[2][EDMA_V0_MAX_NR_CH]; struct debugfs_entries { - char name[24]; + const char *name; dma_addr_t *reg; }; static int dw_edma_debugfs_u32_get(void *data, u64 *val) { + void __iomem *reg = (void __force __iomem *)data; if (dw->mode == EDMA_MODE_LEGACY && - data >= (void *)®s->type.legacy.ch) { - void *ptr = (void *)®s->type.legacy.ch; + reg >= (void __iomem *)®s->type.legacy.ch) { + void __iomem *ptr = ®s->type.legacy.ch; u32 viewport_sel = 0; unsigned long flags; u16 ch; for (ch = 0; ch < dw->wr_ch_cnt; ch++) - if (lim[0][ch].start >= data && data < lim[0][ch].end) { - ptr += (data - lim[0][ch].start); + if (lim[0][ch].start >= reg && reg < lim[0][ch].end) { + ptr += (reg - lim[0][ch].start); goto legacy_sel_wr; } for (ch = 0; ch < dw->rd_ch_cnt; ch++) - if (lim[1][ch].start >= data && data < lim[1][ch].end) { - ptr += (data - lim[1][ch].start); + if (lim[1][ch].start >= reg && reg < lim[1][ch].end) { + ptr += (reg - lim[1][ch].start); goto legacy_sel_rd; } @@ -86,7 +87,7 @@ legacy_sel_wr: raw_spin_unlock_irqrestore(&dw->lock, flags); } else { - *val = readl(data); + *val = readl(reg); } return 0; @@ -105,7 +106,7 @@ static void dw_edma_debugfs_create_x32(const struct debugfs_entries entries[], } } -static void dw_edma_debugfs_regs_ch(struct dw_edma_v0_ch_regs *regs, +static void dw_edma_debugfs_regs_ch(struct dw_edma_v0_ch_regs __iomem *regs, struct dentry *dir) { int nr_entries; @@ -288,7 +289,7 @@ void dw_edma_v0_debugfs_on(struct dw_edma_chip *chip) if (!dw) return; - regs = (struct dw_edma_v0_regs *)dw->rg_region.vaddr; + regs = dw->rg_region.vaddr; if (!regs) return; diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c index 89d710899010..de8bfd9a76e9 100644 --- a/drivers/dma/ste_dma40.c +++ b/drivers/dma/ste_dma40.c @@ -142,7 +142,7 @@ enum d40_events { * when the DMA hw is powered off. * TODO: Add save/restore of D40_DREG_GCC on dma40 v3 or later, if that works. */ -static u32 d40_backup_regs[] = { +static __maybe_unused u32 d40_backup_regs[] = { D40_DREG_LCPA, D40_DREG_LCLA, D40_DREG_PRMSE, @@ -211,7 +211,7 @@ static u32 d40_backup_regs_v4b[] = { #define BACKUP_REGS_SZ_V4B ARRAY_SIZE(d40_backup_regs_v4b) -static u32 d40_backup_regs_chan[] = { +static __maybe_unused u32 d40_backup_regs_chan[] = { D40_CHAN_REG_SSCFG, D40_CHAN_REG_SSELT, D40_CHAN_REG_SSPTR, diff --git a/drivers/dma/stm32-mdma.c b/drivers/dma/stm32-mdma.c index d6e919d3936a..1311de74bfdd 100644 --- a/drivers/dma/stm32-mdma.c +++ b/drivers/dma/stm32-mdma.c @@ -1366,7 +1366,7 @@ static irqreturn_t stm32_mdma_irq_handler(int irq, void *devid) chan = &dmadev->chan[id]; if (!chan) { - dev_err(chan2dev(chan), "MDMA channel not initialized\n"); + dev_dbg(mdma2dev(dmadev), "MDMA channel not initialized\n"); goto exit; } diff --git a/drivers/dma/tegra210-adma.c b/drivers/dma/tegra210-adma.c index 2805853e963f..b33cf6e8ab8e 100644 --- a/drivers/dma/tegra210-adma.c +++ b/drivers/dma/tegra210-adma.c @@ -712,7 +712,7 @@ static struct dma_chan *tegra_dma_of_xlate(struct of_phandle_args *dma_spec, return chan; } -static int tegra_adma_runtime_suspend(struct device *dev) +static int __maybe_unused tegra_adma_runtime_suspend(struct device *dev) { struct tegra_adma *tdma = dev_get_drvdata(dev); struct tegra_adma_chan_regs *ch_reg; @@ -744,7 +744,7 @@ clk_disable: return 0; } -static int tegra_adma_runtime_resume(struct device *dev) +static int __maybe_unused tegra_adma_runtime_resume(struct device *dev) { struct tegra_adma *tdma = dev_get_drvdata(dev); struct tegra_adma_chan_regs *ch_reg; diff --git a/drivers/dma/ti/omap-dma.c b/drivers/dma/ti/omap-dma.c index ba2489d4ea24..ba27802efcd0 100644 --- a/drivers/dma/ti/omap-dma.c +++ b/drivers/dma/ti/omap-dma.c @@ -1234,7 +1234,7 @@ static struct dma_async_tx_descriptor *omap_dma_prep_dma_interleaved( if (src_icg) { d->ccr |= CCR_SRC_AMODE_DBLIDX; d->ei = 1; - d->fi = src_icg; + d->fi = src_icg + 1; } else if (xt->src_inc) { d->ccr |= CCR_SRC_AMODE_POSTINC; d->fi = 0; @@ -1249,7 +1249,7 @@ static struct dma_async_tx_descriptor *omap_dma_prep_dma_interleaved( if (dst_icg) { d->ccr |= CCR_DST_AMODE_DBLIDX; sg->ei = 1; - sg->fi = dst_icg; + sg->fi = dst_icg + 1; } else if (xt->dst_inc) { d->ccr |= CCR_DST_AMODE_POSTINC; sg->fi = 0; diff --git a/drivers/firmware/efi/libstub/efi-stub-helper.c b/drivers/firmware/efi/libstub/efi-stub-helper.c index 1db780c0f07b..3caae7f2cf56 100644 --- a/drivers/firmware/efi/libstub/efi-stub-helper.c +++ b/drivers/firmware/efi/libstub/efi-stub-helper.c @@ -927,17 +927,33 @@ fail: return status; } +#define GET_EFI_CONFIG_TABLE(bits) \ +static void *get_efi_config_table##bits(efi_system_table_t *_sys_table, \ + efi_guid_t guid) \ +{ \ + efi_system_table_##bits##_t *sys_table; \ + efi_config_table_##bits##_t *tables; \ + int i; \ + \ + sys_table = (typeof(sys_table))_sys_table; \ + tables = (typeof(tables))(unsigned long)sys_table->tables; \ + \ + for (i = 0; i < sys_table->nr_tables; i++) { \ + if (efi_guidcmp(tables[i].guid, guid) != 0) \ + continue; \ + \ + return (void *)(unsigned long)tables[i].table; \ + } \ + \ + return NULL; \ +} +GET_EFI_CONFIG_TABLE(32) +GET_EFI_CONFIG_TABLE(64) + void *get_efi_config_table(efi_system_table_t *sys_table, efi_guid_t guid) { - efi_config_table_t *tables = (efi_config_table_t *)sys_table->tables; - int i; - - for (i = 0; i < sys_table->nr_tables; i++) { - if (efi_guidcmp(tables[i].guid, guid) != 0) - continue; - - return (void *)tables[i].table; - } - - return NULL; + if (efi_is_64bit()) + return get_efi_config_table64(sys_table, guid); + else + return get_efi_config_table32(sys_table, guid); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gds.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gds.h index df8a23554831..f6ac1e9548f2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gds.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gds.h @@ -32,7 +32,6 @@ struct amdgpu_gds { uint32_t gws_size; uint32_t oa_size; uint32_t gds_compute_max_wave_id; - uint32_t vgt_gs_max_wave_id; }; struct amdgpu_gds_reg_offset { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h index 99f14fcc1460..19661c645703 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h @@ -30,6 +30,7 @@ #define AMDGPU_VCN_FIRMWARE_OFFSET 256 #define AMDGPU_VCN_MAX_ENC_RINGS 3 +#define VCN_DEC_KMD_CMD 0x80000000 #define VCN_DEC_CMD_FENCE 0x00000000 #define VCN_DEC_CMD_TRAP 0x00000001 #define VCN_DEC_CMD_WRITE_REG 0x00000004 diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c index 32773b7523d2..f41287f9000d 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c @@ -4206,15 +4206,6 @@ static void gfx_v10_0_ring_emit_ib_gfx(struct amdgpu_ring *ring, unsigned vmid = AMDGPU_JOB_GET_VMID(job); u32 header, control = 0; - /* Prevent a hw deadlock due to a wave ID mismatch between ME and GDS. - * This resets the wave ID counters. (needed by transform feedback) - * TODO: This might only be needed on a VMID switch when we change - * the GDS OA mapping, not sure. - */ - amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1)); - amdgpu_ring_write(ring, mmVGT_GS_MAX_WAVE_ID); - amdgpu_ring_write(ring, ring->adev->gds.vgt_gs_max_wave_id); - if (ib->flags & AMDGPU_IB_FLAG_CE) header = PACKET3(PACKET3_INDIRECT_BUFFER_CNST, 2); else @@ -4961,7 +4952,7 @@ static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_gfx = { 5 + /* HDP_INVL */ 8 + 8 + /* FENCE x2 */ 2, /* SWITCH_BUFFER */ - .emit_ib_size = 7, /* gfx_v10_0_ring_emit_ib_gfx */ + .emit_ib_size = 4, /* gfx_v10_0_ring_emit_ib_gfx */ .emit_ib = gfx_v10_0_ring_emit_ib_gfx, .emit_fence = gfx_v10_0_ring_emit_fence, .emit_pipeline_sync = gfx_v10_0_ring_emit_pipeline_sync, @@ -5112,7 +5103,6 @@ static void gfx_v10_0_set_gds_init(struct amdgpu_device *adev) default: adev->gds.gds_size = 0x10000; adev->gds.gds_compute_max_wave_id = 0x4ff; - adev->gds.vgt_gs_max_wave_id = 0x3ff; break; } diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index 751567f78567..ee1ccdcf2d30 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -1321,6 +1321,39 @@ static int gfx_v8_0_rlc_init(struct amdgpu_device *adev) return 0; } +static int gfx_v8_0_csb_vram_pin(struct amdgpu_device *adev) +{ + int r; + + r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, false); + if (unlikely(r != 0)) + return r; + + r = amdgpu_bo_pin(adev->gfx.rlc.clear_state_obj, + AMDGPU_GEM_DOMAIN_VRAM); + if (!r) + adev->gfx.rlc.clear_state_gpu_addr = + amdgpu_bo_gpu_offset(adev->gfx.rlc.clear_state_obj); + + amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj); + + return r; +} + +static void gfx_v8_0_csb_vram_unpin(struct amdgpu_device *adev) +{ + int r; + + if (!adev->gfx.rlc.clear_state_obj) + return; + + r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, true); + if (likely(r == 0)) { + amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj); + amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj); + } +} + static void gfx_v8_0_mec_fini(struct amdgpu_device *adev) { amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL); @@ -4785,6 +4818,10 @@ static int gfx_v8_0_hw_init(void *handle) gfx_v8_0_init_golden_registers(adev); gfx_v8_0_constants_init(adev); + r = gfx_v8_0_csb_vram_pin(adev); + if (r) + return r; + r = adev->gfx.rlc.funcs->resume(adev); if (r) return r; @@ -4901,6 +4938,9 @@ static int gfx_v8_0_hw_fini(void *handle) else pr_err("rlc is busy, skip halt rlc\n"); amdgpu_gfx_rlc_exit_safe_mode(adev); + + gfx_v8_0_csb_vram_unpin(adev); + return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 1cf639a51178..04b8ac4432c7 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -4869,7 +4869,7 @@ static void gfx_v9_0_ring_soft_recovery(struct amdgpu_ring *ring, unsigned vmid) value = REG_SET_FIELD(value, SQ_CMD, MODE, 0x01); value = REG_SET_FIELD(value, SQ_CMD, CHECK_VMID, 1); value = REG_SET_FIELD(value, SQ_CMD, VM_ID, vmid); - WREG32(mmSQ_CMD, value); + WREG32_SOC15(GC, 0, mmSQ_CMD, value); } static void gfx_v9_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev, diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c index 1cfc2620b2dd..dfde886cc6bd 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c @@ -1485,7 +1485,7 @@ static void vcn_v2_0_dec_ring_insert_start(struct amdgpu_ring *ring) amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0_INTERNAL_OFFSET, 0)); amdgpu_ring_write(ring, 0); amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET, 0)); - amdgpu_ring_write(ring, VCN_DEC_CMD_PACKET_START << 1); + amdgpu_ring_write(ring, VCN_DEC_KMD_CMD | (VCN_DEC_CMD_PACKET_START << 1)); } /** @@ -1498,7 +1498,7 @@ static void vcn_v2_0_dec_ring_insert_start(struct amdgpu_ring *ring) static void vcn_v2_0_dec_ring_insert_end(struct amdgpu_ring *ring) { amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET, 0)); - amdgpu_ring_write(ring, VCN_DEC_CMD_PACKET_END << 1); + amdgpu_ring_write(ring, VCN_DEC_KMD_CMD | (VCN_DEC_CMD_PACKET_END << 1)); } /** @@ -1543,7 +1543,7 @@ static void vcn_v2_0_dec_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 amdgpu_ring_write(ring, upper_32_bits(addr) & 0xff); amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET, 0)); - amdgpu_ring_write(ring, VCN_DEC_CMD_FENCE << 1); + amdgpu_ring_write(ring, VCN_DEC_KMD_CMD | (VCN_DEC_CMD_FENCE << 1)); amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0_INTERNAL_OFFSET, 0)); amdgpu_ring_write(ring, 0); @@ -1553,7 +1553,7 @@ static void vcn_v2_0_dec_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET, 0)); - amdgpu_ring_write(ring, VCN_DEC_CMD_TRAP << 1); + amdgpu_ring_write(ring, VCN_DEC_KMD_CMD | (VCN_DEC_CMD_TRAP << 1)); } /** @@ -1597,7 +1597,7 @@ static void vcn_v2_0_dec_ring_emit_reg_wait(struct amdgpu_ring *ring, amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET, 0)); - amdgpu_ring_write(ring, VCN_DEC_CMD_REG_READ_COND_WAIT << 1); + amdgpu_ring_write(ring, VCN_DEC_KMD_CMD | (VCN_DEC_CMD_REG_READ_COND_WAIT << 1)); } static void vcn_v2_0_dec_ring_emit_vm_flush(struct amdgpu_ring *ring, @@ -1626,7 +1626,7 @@ static void vcn_v2_0_dec_ring_emit_wreg(struct amdgpu_ring *ring, amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET, 0)); - amdgpu_ring_write(ring, VCN_DEC_CMD_WRITE_REG << 1); + amdgpu_ring_write(ring, VCN_DEC_KMD_CMD | (VCN_DEC_CMD_WRITE_REG << 1)); } /** @@ -2079,6 +2079,36 @@ static int vcn_v2_0_process_interrupt(struct amdgpu_device *adev, return 0; } +static int vcn_v2_0_dec_ring_test_ring(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + uint32_t tmp = 0; + unsigned i; + int r; + + WREG32(adev->vcn.external.scratch9, 0xCAFEDEAD); + r = amdgpu_ring_alloc(ring, 4); + if (r) + return r; + amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET, 0)); + amdgpu_ring_write(ring, VCN_DEC_KMD_CMD | (VCN_DEC_CMD_PACKET_START << 1)); + amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.scratch9, 0)); + amdgpu_ring_write(ring, 0xDEADBEEF); + amdgpu_ring_commit(ring); + for (i = 0; i < adev->usec_timeout; i++) { + tmp = RREG32(adev->vcn.external.scratch9); + if (tmp == 0xDEADBEEF) + break; + DRM_UDELAY(1); + } + + if (i >= adev->usec_timeout) + r = -ETIMEDOUT; + + return r; +} + + static int vcn_v2_0_set_powergating_state(void *handle, enum amd_powergating_state state) { @@ -2142,7 +2172,7 @@ static const struct amdgpu_ring_funcs vcn_v2_0_dec_ring_vm_funcs = { .emit_ib = vcn_v2_0_dec_ring_emit_ib, .emit_fence = vcn_v2_0_dec_ring_emit_fence, .emit_vm_flush = vcn_v2_0_dec_ring_emit_vm_flush, - .test_ring = amdgpu_vcn_dec_ring_test_ring, + .test_ring = vcn_v2_0_dec_ring_test_ring, .test_ib = amdgpu_vcn_dec_ring_test_ib, .insert_nop = vcn_v2_0_dec_ring_insert_nop, .insert_start = vcn_v2_0_dec_ring_insert_start, diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c index 26b15cc56c31..1d3cd5c50d5f 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c @@ -1567,32 +1567,6 @@ copy_from_user_failed: return err; } -static int kfd_ioctl_alloc_queue_gws(struct file *filep, - struct kfd_process *p, void *data) -{ - int retval; - struct kfd_ioctl_alloc_queue_gws_args *args = data; - struct kfd_dev *dev; - - if (!hws_gws_support) - return -ENODEV; - - dev = kfd_device_by_id(args->gpu_id); - if (!dev) { - pr_debug("Could not find gpu id 0x%x\n", args->gpu_id); - return -ENODEV; - } - if (dev->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) - return -ENODEV; - - mutex_lock(&p->mutex); - retval = pqm_set_gws(&p->pqm, args->queue_id, args->num_gws ? dev->gws : NULL); - mutex_unlock(&p->mutex); - - args->first_gws = 0; - return retval; -} - static int kfd_ioctl_get_dmabuf_info(struct file *filep, struct kfd_process *p, void *data) { @@ -1795,8 +1769,6 @@ static const struct amdkfd_ioctl_desc amdkfd_ioctls[] = { AMDKFD_IOCTL_DEF(AMDKFD_IOC_IMPORT_DMABUF, kfd_ioctl_import_dmabuf, 0), - AMDKFD_IOCTL_DEF(AMDKFD_IOC_ALLOC_QUEUE_GWS, - kfd_ioctl_alloc_queue_gws, 0), }; #define AMDKFD_CORE_IOCTL_COUNT ARRAY_SIZE(amdkfd_ioctls) diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index fa20201eef3a..cbc480a33376 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -23,6 +23,7 @@ */ #include <linux/slab.h> +#include <linux/mm.h> #include "dm_services.h" @@ -1171,8 +1172,8 @@ bool dc_post_update_surfaces_to_stream(struct dc *dc) struct dc_state *dc_create_state(struct dc *dc) { - struct dc_state *context = kzalloc(sizeof(struct dc_state), - GFP_KERNEL); + struct dc_state *context = kvzalloc(sizeof(struct dc_state), + GFP_KERNEL); if (!context) return NULL; @@ -1192,11 +1193,11 @@ struct dc_state *dc_create_state(struct dc *dc) struct dc_state *dc_copy_state(struct dc_state *src_ctx) { int i, j; - struct dc_state *new_ctx = kmemdup(src_ctx, - sizeof(struct dc_state), GFP_KERNEL); + struct dc_state *new_ctx = kvmalloc(sizeof(struct dc_state), GFP_KERNEL); if (!new_ctx) return NULL; + memcpy(new_ctx, src_ctx, sizeof(struct dc_state)); for (i = 0; i < MAX_PIPES; i++) { struct pipe_ctx *cur_pipe = &new_ctx->res_ctx.pipe_ctx[i]; @@ -1230,7 +1231,7 @@ static void dc_state_free(struct kref *kref) { struct dc_state *context = container_of(kref, struct dc_state, refcount); dc_resource_state_destruct(context); - kfree(context); + kvfree(context); } void dc_release_state(struct dc_state *context) diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c index 0685a3388e38..8a3eadeebdcb 100644 --- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c @@ -315,6 +315,8 @@ int smu_get_power_num_states(struct smu_context *smu, int smu_common_read_sensor(struct smu_context *smu, enum amd_pp_sensors sensor, void *data, uint32_t *size) { + struct smu_power_context *smu_power = &smu->smu_power; + struct smu_power_gate *power_gate = &smu_power->power_gate; int ret = 0; switch (sensor) { @@ -339,7 +341,7 @@ int smu_common_read_sensor(struct smu_context *smu, enum amd_pp_sensors sensor, *size = 4; break; case AMDGPU_PP_SENSOR_VCN_POWER_STATE: - *(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT) ? 1 : 0; + *(uint32_t *)data = power_gate->vcn_gated ? 0 : 1; *size = 4; break; default: diff --git a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h index 208e6711d506..a0f52c86d8c7 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h +++ b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h @@ -451,6 +451,7 @@ struct smu_dpm_context { struct smu_power_gate { bool uvd_gated; bool vce_gated; + bool vcn_gated; }; struct smu_power_context { diff --git a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c index cc0a3b2256af..b81c7e715dc9 100644 --- a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c +++ b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c @@ -502,6 +502,8 @@ static int navi10_store_powerplay_table(struct smu_context *smu) static int navi10_tables_init(struct smu_context *smu, struct smu_table *tables) { + struct smu_table_context *smu_table = &smu->smu_table; + SMU_TABLE_INIT(tables, SMU_TABLE_PPTABLE, sizeof(PPTable_t), PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); SMU_TABLE_INIT(tables, SMU_TABLE_WATERMARKS, sizeof(Watermarks_t), @@ -516,9 +518,35 @@ static int navi10_tables_init(struct smu_context *smu, struct smu_table *tables) sizeof(DpmActivityMonitorCoeffInt_t), PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); + smu_table->metrics_table = kzalloc(sizeof(SmuMetrics_t), GFP_KERNEL); + if (!smu_table->metrics_table) + return -ENOMEM; + smu_table->metrics_time = 0; + return 0; } +static int navi10_get_metrics_table(struct smu_context *smu, + SmuMetrics_t *metrics_table) +{ + struct smu_table_context *smu_table= &smu->smu_table; + int ret = 0; + + if (!smu_table->metrics_time || time_after(jiffies, smu_table->metrics_time + HZ / 1000)) { + ret = smu_update_table(smu, SMU_TABLE_SMU_METRICS, 0, + (void *)smu_table->metrics_table, false); + if (ret) { + pr_info("Failed to export SMU metrics table!\n"); + return ret; + } + smu_table->metrics_time = jiffies; + } + + memcpy(metrics_table, smu_table->metrics_table, sizeof(SmuMetrics_t)); + + return ret; +} + static int navi10_allocate_dpm_context(struct smu_context *smu) { struct smu_dpm_context *smu_dpm = &smu->smu_dpm; @@ -577,20 +605,27 @@ static int navi10_set_default_dpm_table(struct smu_context *smu) static int navi10_dpm_set_uvd_enable(struct smu_context *smu, bool enable) { + struct smu_power_context *smu_power = &smu->smu_power; + struct smu_power_gate *power_gate = &smu_power->power_gate; int ret = 0; if (enable) { - ret = smu_send_smc_msg_with_param(smu, SMU_MSG_PowerUpVcn, 1); - if (ret) - return ret; + /* vcn dpm on is a prerequisite for vcn power gate messages */ + if (smu_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) { + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_PowerUpVcn, 1); + if (ret) + return ret; + } + power_gate->vcn_gated = false; } else { - ret = smu_send_smc_msg(smu, SMU_MSG_PowerDownVcn); - if (ret) - return ret; + if (smu_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) { + ret = smu_send_smc_msg(smu, SMU_MSG_PowerDownVcn); + if (ret) + return ret; + } + power_gate->vcn_gated = true; } - ret = smu_feature_set_enabled(smu, SMU_FEATURE_VCN_PG_BIT, enable); - return ret; } @@ -598,15 +633,10 @@ static int navi10_get_current_clk_freq_by_table(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t *value) { - static SmuMetrics_t metrics; int ret = 0, clk_id = 0; + SmuMetrics_t metrics; - if (!value) - return -EINVAL; - - memset(&metrics, 0, sizeof(metrics)); - - ret = smu_update_table(smu, SMU_TABLE_SMU_METRICS, 0, (void *)&metrics, false); + ret = navi10_get_metrics_table(smu, &metrics); if (ret) return ret; @@ -894,8 +924,9 @@ static int navi10_get_gpu_power(struct smu_context *smu, uint32_t *value) if (!value) return -EINVAL; - ret = smu_update_table(smu, SMU_TABLE_SMU_METRICS, 0, (void *)&metrics, - false); + ret = navi10_get_metrics_table(smu, &metrics); + if (ret) + return ret; if (ret) return ret; @@ -914,10 +945,7 @@ static int navi10_get_current_activity_percent(struct smu_context *smu, if (!value) return -EINVAL; - msleep(1); - - ret = smu_update_table(smu, SMU_TABLE_SMU_METRICS, 0, - (void *)&metrics, false); + ret = navi10_get_metrics_table(smu, &metrics); if (ret) return ret; @@ -956,10 +984,9 @@ static int navi10_get_fan_speed_rpm(struct smu_context *smu, if (!speed) return -EINVAL; - memset(&metrics, 0, sizeof(metrics)); - - ret = smu_update_table(smu, SMU_TABLE_SMU_METRICS, 0, - (void *)&metrics, false); + ret = navi10_get_metrics_table(smu, &metrics); + if (ret) + return ret; if (ret) return ret; @@ -1307,7 +1334,7 @@ static int navi10_thermal_get_temperature(struct smu_context *smu, if (!value) return -EINVAL; - ret = smu_update_table(smu, SMU_TABLE_SMU_METRICS, 0, (void *)&metrics, false); + ret = navi10_get_metrics_table(smu, &metrics); if (ret) return ret; diff --git a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c index ac5b26228e75..5fde5cf65b42 100644 --- a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c +++ b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c @@ -1391,7 +1391,7 @@ smu_v11_0_smc_fan_control(struct smu_context *smu, bool start) { int ret = 0; - if (smu_feature_is_supported(smu, SMU_FEATURE_FAN_CONTROL_BIT)) + if (!smu_feature_is_supported(smu, SMU_FEATURE_FAN_CONTROL_BIT)) return 0; ret = smu_feature_set_enabled(smu, SMU_FEATURE_FAN_CONTROL_BIT, start); diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c index 4c7e31cb45ff..a5d1494a3dc4 100644 --- a/drivers/gpu/drm/ast/ast_main.c +++ b/drivers/gpu/drm/ast/ast_main.c @@ -131,8 +131,8 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post) /* Enable extended register access */ - ast_enable_mmio(dev); ast_open_key(ast); + ast_enable_mmio(dev); /* Find out whether P2A works or whether to use device-tree */ ast_detect_config_mode(dev, &scu_rev); @@ -576,6 +576,9 @@ void ast_driver_unload(struct drm_device *dev) { struct ast_private *ast = dev->dev_private; + /* enable standard VGA decode */ + ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa1, 0x04); + ast_release_firmware(dev); kfree(ast->dp501_fw_addr); ast_mode_fini(dev); diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c index ffccbef962a4..a1cb020e07e5 100644 --- a/drivers/gpu/drm/ast/ast_mode.c +++ b/drivers/gpu/drm/ast/ast_mode.c @@ -604,7 +604,7 @@ static int ast_crtc_mode_set(struct drm_crtc *crtc, return -EINVAL; ast_open_key(ast); - ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa1, 0xff, 0x04); + ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa1, 0x06); ast_set_std_reg(crtc, adjusted_mode, &vbios_mode); ast_set_crtc_reg(crtc, adjusted_mode, &vbios_mode); diff --git a/drivers/gpu/drm/ast/ast_post.c b/drivers/gpu/drm/ast/ast_post.c index f7d421359d56..c1d1ac51d1c2 100644 --- a/drivers/gpu/drm/ast/ast_post.c +++ b/drivers/gpu/drm/ast/ast_post.c @@ -46,7 +46,7 @@ void ast_enable_mmio(struct drm_device *dev) { struct ast_private *ast = dev->dev_private; - ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa1, 0xff, 0x04); + ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa1, 0x06); } diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c index 80fcd5dc1558..b0369e690f36 100644 --- a/drivers/gpu/drm/drm_modes.c +++ b/drivers/gpu/drm/drm_modes.c @@ -1770,7 +1770,9 @@ bool drm_mode_parse_command_line_for_connector(const char *mode_option, } if (named_mode) { - strncpy(mode->name, name, mode_end); + if (mode_end + 1 > DRM_DISPLAY_MODE_LEN) + return false; + strscpy(mode->name, name, mode_end + 1); } else { ret = drm_mode_parse_cmdline_res_mode(name, mode_end, parse_extras, diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.c b/drivers/gpu/drm/i915/display/intel_hdcp.c index bc3a94d491c4..27bd7276a82d 100644 --- a/drivers/gpu/drm/i915/display/intel_hdcp.c +++ b/drivers/gpu/drm/i915/display/intel_hdcp.c @@ -536,7 +536,8 @@ int intel_hdcp_auth_downstream(struct intel_connector *connector) if (drm_hdcp_check_ksvs_revoked(dev, ksv_fifo, num_downstream)) { DRM_ERROR("Revoked Ksv(s) in ksv_fifo\n"); - return -EPERM; + ret = -EPERM; + goto err; } /* diff --git a/drivers/gpu/drm/i915/display/vlv_dsi_pll.c b/drivers/gpu/drm/i915/display/vlv_dsi_pll.c index 99cc3e2e9c2c..f016a776a39e 100644 --- a/drivers/gpu/drm/i915/display/vlv_dsi_pll.c +++ b/drivers/gpu/drm/i915/display/vlv_dsi_pll.c @@ -396,8 +396,8 @@ static void glk_dsi_program_esc_clock(struct drm_device *dev, else txesc2_div = 10; - I915_WRITE(MIPIO_TXESC_CLK_DIV1, txesc1_div & GLK_TX_ESC_CLK_DIV1_MASK); - I915_WRITE(MIPIO_TXESC_CLK_DIV2, txesc2_div & GLK_TX_ESC_CLK_DIV2_MASK); + I915_WRITE(MIPIO_TXESC_CLK_DIV1, (1 << (txesc1_div - 1)) & GLK_TX_ESC_CLK_DIV1_MASK); + I915_WRITE(MIPIO_TXESC_CLK_DIV2, (1 << (txesc2_div - 1)) & GLK_TX_ESC_CLK_DIV2_MASK); } /* Program BXT Mipi clocks and dividers */ diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c index 5fae0e50aad0..41dab9ea33cd 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c @@ -1628,6 +1628,7 @@ static int check_relocations(const struct drm_i915_gem_exec_object2 *entry) static int eb_copy_relocations(const struct i915_execbuffer *eb) { + struct drm_i915_gem_relocation_entry *relocs; const unsigned int count = eb->buffer_count; unsigned int i; int err; @@ -1635,7 +1636,6 @@ static int eb_copy_relocations(const struct i915_execbuffer *eb) for (i = 0; i < count; i++) { const unsigned int nreloc = eb->exec[i].relocation_count; struct drm_i915_gem_relocation_entry __user *urelocs; - struct drm_i915_gem_relocation_entry *relocs; unsigned long size; unsigned long copied; @@ -1663,14 +1663,8 @@ static int eb_copy_relocations(const struct i915_execbuffer *eb) if (__copy_from_user((char *)relocs + copied, (char __user *)urelocs + copied, - len)) { -end_user: - user_access_end(); -end: - kvfree(relocs); - err = -EFAULT; - goto err; - } + len)) + goto end; copied += len; } while (copied < size); @@ -1699,10 +1693,14 @@ end: return 0; +end_user: + user_access_end(); +end: + kvfree(relocs); + err = -EFAULT; err: while (i--) { - struct drm_i915_gem_relocation_entry *relocs = - u64_to_ptr(typeof(*relocs), eb->exec[i].relocs_ptr); + relocs = u64_to_ptr(typeof(*relocs), eb->exec[i].relocs_ptr); if (eb->exec[i].relocation_count) kvfree(relocs); } diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c index 9f3fd7d96a69..75baff657e43 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.c +++ b/drivers/gpu/drm/i915/gvt/scheduler.c @@ -1528,9 +1528,9 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id, if (!intel_gvt_ggtt_validate_range(vgpu, workload->wa_ctx.indirect_ctx.guest_gma, workload->wa_ctx.indirect_ctx.size)) { - kmem_cache_free(s->workloads, workload); gvt_vgpu_err("invalid wa_ctx at: 0x%lx\n", workload->wa_ctx.indirect_ctx.guest_gma); + kmem_cache_free(s->workloads, workload); return ERR_PTR(-EINVAL); } } @@ -1542,9 +1542,9 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id, if (!intel_gvt_ggtt_validate_range(vgpu, workload->wa_ctx.per_ctx.guest_gma, CACHELINE_BYTES)) { - kmem_cache_free(s->workloads, workload); gvt_vgpu_err("invalid per_ctx at: 0x%lx\n", workload->wa_ctx.per_ctx.guest_gma); + kmem_cache_free(s->workloads, workload); return ERR_PTR(-EINVAL); } } diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c index 126703816794..5c36c75232e6 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/disp.c +++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c @@ -771,16 +771,20 @@ nv50_msto_atomic_check(struct drm_encoder *encoder, struct nv50_head_atom *asyh = nv50_head_atom(crtc_state); int slots; - /* When restoring duplicated states, we need to make sure that the - * bw remains the same and avoid recalculating it, as the connector's - * bpc may have changed after the state was duplicated - */ - if (!state->duplicated) - asyh->dp.pbn = - drm_dp_calc_pbn_mode(crtc_state->adjusted_mode.clock, - connector->display_info.bpc * 3); + if (crtc_state->mode_changed || crtc_state->connectors_changed) { + /* + * When restoring duplicated states, we need to make sure that + * the bw remains the same and avoid recalculating it, as the + * connector's bpc may have changed after the state was + * duplicated + */ + if (!state->duplicated) { + const int bpp = connector->display_info.bpc * 3; + const int clock = crtc_state->adjusted_mode.clock; + + asyh->dp.pbn = drm_dp_calc_pbn_mode(clock, bpp); + } - if (crtc_state->mode_changed) { slots = drm_dp_atomic_find_vcpi_slots(state, &mstm->mgr, mstc->port, asyh->dp.pbn); diff --git a/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c index 95e5c517a15f..9aae3d8e99ef 100644 --- a/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c +++ b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c @@ -432,7 +432,7 @@ static int rockchip_dp_resume(struct device *dev) static const struct dev_pm_ops rockchip_dp_pm_ops = { #ifdef CONFIG_PM_SLEEP - .suspend = rockchip_dp_suspend, + .suspend_late = rockchip_dp_suspend, .resume_early = rockchip_dp_resume, #endif }; diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c index 35ddbec1375a..671c90f34ede 100644 --- a/drivers/gpu/drm/scheduler/sched_entity.c +++ b/drivers/gpu/drm/scheduler/sched_entity.c @@ -95,7 +95,7 @@ static bool drm_sched_entity_is_idle(struct drm_sched_entity *entity) rmb(); /* for list_empty to work without lock */ if (list_empty(&entity->list) || - spsc_queue_peek(&entity->job_queue) == NULL) + spsc_queue_count(&entity->job_queue) == 0) return true; return false; @@ -281,7 +281,7 @@ void drm_sched_entity_fini(struct drm_sched_entity *entity) /* Consumption of existing IBs wasn't completed. Forcefully * remove them here. */ - if (spsc_queue_peek(&entity->job_queue)) { + if (spsc_queue_count(&entity->job_queue)) { if (sched) { /* Park the kernel for a moment to make sure it isn't processing * our enity. diff --git a/drivers/gpu/drm/tegra/output.c b/drivers/gpu/drm/tegra/output.c index 274cb955e2e1..bdcaa4c7168c 100644 --- a/drivers/gpu/drm/tegra/output.c +++ b/drivers/gpu/drm/tegra/output.c @@ -126,8 +126,12 @@ int tegra_output_probe(struct tegra_output *output) "nvidia,hpd-gpio", 0, GPIOD_IN, "HDMI hotplug detect"); - if (IS_ERR(output->hpd_gpio)) - return PTR_ERR(output->hpd_gpio); + if (IS_ERR(output->hpd_gpio)) { + if (PTR_ERR(output->hpd_gpio) != -ENOENT) + return PTR_ERR(output->hpd_gpio); + + output->hpd_gpio = NULL; + } if (output->hpd_gpio) { err = gpiod_to_irq(output->hpd_gpio); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c index e4e09d47c5c0..59e9d05ab928 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c @@ -389,8 +389,10 @@ static int vmw_recv_msg(struct rpc_channel *channel, void **msg, break; } - if (retries == RETRIES) + if (retries == RETRIES) { + kfree(reply); return -EINVAL; + } *msg_len = reply_len; *msg = reply; diff --git a/drivers/hv/hv_trace.h b/drivers/hv/hv_trace.h index 999f80a63bff..e70783e33680 100644 --- a/drivers/hv/hv_trace.h +++ b/drivers/hv/hv_trace.h @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 +/* SPDX-License-Identifier: GPL-2.0 */ #undef TRACE_SYSTEM #define TRACE_SYSTEM hyperv diff --git a/drivers/hwmon/lm75.c b/drivers/hwmon/lm75.c index 3fb9c0a2d6d0..ce5ec403ec73 100644 --- a/drivers/hwmon/lm75.c +++ b/drivers/hwmon/lm75.c @@ -343,7 +343,7 @@ lm75_probe(struct i2c_client *client, const struct i2c_device_id *id) data->sample_time = MSEC_PER_SEC / 2; break; case tmp75b: /* not one-shot mode, Conversion rate 37Hz */ - clr_mask |= 1 << 15 | 0x3 << 13; + clr_mask |= 1 << 7 | 0x3 << 5; data->resolution = 12; data->sample_time = MSEC_PER_SEC / 37; break; diff --git a/drivers/hwmon/nct7802.c b/drivers/hwmon/nct7802.c index ec7bcf8d7cd6..f3dd2a17bd42 100644 --- a/drivers/hwmon/nct7802.c +++ b/drivers/hwmon/nct7802.c @@ -704,7 +704,7 @@ static struct attribute *nct7802_in_attrs[] = { &sensor_dev_attr_in3_alarm.dev_attr.attr, &sensor_dev_attr_in3_beep.dev_attr.attr, - &sensor_dev_attr_in4_input.dev_attr.attr, /* 17 */ + &sensor_dev_attr_in4_input.dev_attr.attr, /* 16 */ &sensor_dev_attr_in4_min.dev_attr.attr, &sensor_dev_attr_in4_max.dev_attr.attr, &sensor_dev_attr_in4_alarm.dev_attr.attr, @@ -730,9 +730,9 @@ static umode_t nct7802_in_is_visible(struct kobject *kobj, if (index >= 6 && index < 11 && (reg & 0x03) != 0x03) /* VSEN1 */ return 0; - if (index >= 11 && index < 17 && (reg & 0x0c) != 0x0c) /* VSEN2 */ + if (index >= 11 && index < 16 && (reg & 0x0c) != 0x0c) /* VSEN2 */ return 0; - if (index >= 17 && (reg & 0x30) != 0x30) /* VSEN3 */ + if (index >= 16 && (reg & 0x30) != 0x30) /* VSEN3 */ return 0; return attr->mode; diff --git a/drivers/hwtracing/coresight/coresight-etm-perf.c b/drivers/hwtracing/coresight/coresight-etm-perf.c index 5c1ca0df5cb0..84f1dcb69827 100644 --- a/drivers/hwtracing/coresight/coresight-etm-perf.c +++ b/drivers/hwtracing/coresight/coresight-etm-perf.c @@ -544,6 +544,7 @@ int etm_perf_add_symlink_sink(struct coresight_device *csdev) /* See function coresight_get_sink_by_id() to know where this is used */ hash = hashlen_hash(hashlen_string(NULL, name)); + sysfs_attr_init(&ea->attr.attr); ea->attr.attr.name = devm_kstrdup(dev, name, GFP_KERNEL); if (!ea->attr.attr.name) return -ENOMEM; diff --git a/drivers/hwtracing/intel_th/msu.h b/drivers/hwtracing/intel_th/msu.h index 574c16004cb2..13d9b141daaa 100644 --- a/drivers/hwtracing/intel_th/msu.h +++ b/drivers/hwtracing/intel_th/msu.h @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 +/* SPDX-License-Identifier: GPL-2.0 */ /* * Intel(R) Trace Hub Memory Storage Unit (MSU) data structures * diff --git a/drivers/hwtracing/intel_th/pti.h b/drivers/hwtracing/intel_th/pti.h index e9381babc84c..7dfc0431333b 100644 --- a/drivers/hwtracing/intel_th/pti.h +++ b/drivers/hwtracing/intel_th/pti.h @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 +/* SPDX-License-Identifier: GPL-2.0 */ /* * Intel(R) Trace Hub PTI output data structures * diff --git a/drivers/i2c/busses/i2c-emev2.c b/drivers/i2c/busses/i2c-emev2.c index 35b302d983e0..959d4912ec0d 100644 --- a/drivers/i2c/busses/i2c-emev2.c +++ b/drivers/i2c/busses/i2c-emev2.c @@ -69,6 +69,7 @@ struct em_i2c_device { struct completion msg_done; struct clk *sclk; struct i2c_client *slave; + int irq; }; static inline void em_clear_set_bit(struct em_i2c_device *priv, u8 clear, u8 set, u8 reg) @@ -339,6 +340,12 @@ static int em_i2c_unreg_slave(struct i2c_client *slave) writeb(0, priv->base + I2C_OFS_SVA0); + /* + * Wait for interrupt to finish. New slave irqs cannot happen because we + * cleared the slave address and, thus, only extension codes will be + * detected which do not use the slave ptr. + */ + synchronize_irq(priv->irq); priv->slave = NULL; return 0; @@ -355,7 +362,7 @@ static int em_i2c_probe(struct platform_device *pdev) { struct em_i2c_device *priv; struct resource *r; - int irq, ret; + int ret; priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); if (!priv) @@ -390,8 +397,8 @@ static int em_i2c_probe(struct platform_device *pdev) em_i2c_reset(&priv->adap); - irq = platform_get_irq(pdev, 0); - ret = devm_request_irq(&pdev->dev, irq, em_i2c_irq_handler, 0, + priv->irq = platform_get_irq(pdev, 0); + ret = devm_request_irq(&pdev->dev, priv->irq, em_i2c_irq_handler, 0, "em_i2c", priv); if (ret) goto err_clk; @@ -401,7 +408,8 @@ static int em_i2c_probe(struct platform_device *pdev) if (ret) goto err_clk; - dev_info(&pdev->dev, "Added i2c controller %d, irq %d\n", priv->adap.nr, irq); + dev_info(&pdev->dev, "Added i2c controller %d, irq %d\n", priv->adap.nr, + priv->irq); return 0; diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c index b1b8b938d7f4..15f6cde6452f 100644 --- a/drivers/i2c/busses/i2c-imx.c +++ b/drivers/i2c/busses/i2c-imx.c @@ -273,8 +273,8 @@ static inline unsigned char imx_i2c_read_reg(struct imx_i2c_struct *i2c_imx, } /* Functions for DMA support */ -static int i2c_imx_dma_request(struct imx_i2c_struct *i2c_imx, - dma_addr_t phy_addr) +static void i2c_imx_dma_request(struct imx_i2c_struct *i2c_imx, + dma_addr_t phy_addr) { struct imx_i2c_dma *dma; struct dma_slave_config dma_sconfig; @@ -283,7 +283,7 @@ static int i2c_imx_dma_request(struct imx_i2c_struct *i2c_imx, dma = devm_kzalloc(dev, sizeof(*dma), GFP_KERNEL); if (!dma) - return -ENOMEM; + return; dma->chan_tx = dma_request_chan(dev, "tx"); if (IS_ERR(dma->chan_tx)) { @@ -328,7 +328,7 @@ static int i2c_imx_dma_request(struct imx_i2c_struct *i2c_imx, dev_info(dev, "using %s (tx) and %s (rx) for DMA transfers\n", dma_chan_name(dma->chan_tx), dma_chan_name(dma->chan_rx)); - return 0; + return; fail_rx: dma_release_channel(dma->chan_rx); @@ -336,8 +336,6 @@ fail_tx: dma_release_channel(dma->chan_tx); fail_al: devm_kfree(dev, dma); - /* return successfully if there is no dma support */ - return ret == -ENODEV ? 0 : ret; } static void i2c_imx_dma_callback(void *arg) @@ -1165,17 +1163,13 @@ static int i2c_imx_probe(struct platform_device *pdev) dev_dbg(&i2c_imx->adapter.dev, "device resources: %pR\n", res); dev_dbg(&i2c_imx->adapter.dev, "adapter name: \"%s\"\n", i2c_imx->adapter.name); + dev_info(&i2c_imx->adapter.dev, "IMX I2C adapter registered\n"); /* Init DMA config if supported */ - ret = i2c_imx_dma_request(i2c_imx, phy_addr); - if (ret < 0) - goto del_adapter; + i2c_imx_dma_request(i2c_imx, phy_addr); - dev_info(&i2c_imx->adapter.dev, "IMX I2C adapter registered\n"); return 0; /* Return OK */ -del_adapter: - i2c_del_adapter(&i2c_imx->adapter); clk_notifier_unregister: clk_notifier_unregister(i2c_imx->clk, &i2c_imx->clk_change_nb); rpm_disable: diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c index d39a4606f72d..531c01100b56 100644 --- a/drivers/i2c/busses/i2c-rcar.c +++ b/drivers/i2c/busses/i2c-rcar.c @@ -139,6 +139,7 @@ struct rcar_i2c_priv { enum dma_data_direction dma_direction; struct reset_control *rstc; + int irq; }; #define rcar_i2c_priv_to_dev(p) ((p)->adap.dev.parent) @@ -861,9 +862,11 @@ static int rcar_unreg_slave(struct i2c_client *slave) WARN_ON(!priv->slave); + /* disable irqs and ensure none is running before clearing ptr */ rcar_i2c_write(priv, ICSIER, 0); rcar_i2c_write(priv, ICSCR, 0); + synchronize_irq(priv->irq); priv->slave = NULL; pm_runtime_put(rcar_i2c_priv_to_dev(priv)); @@ -918,7 +921,7 @@ static int rcar_i2c_probe(struct platform_device *pdev) struct i2c_adapter *adap; struct device *dev = &pdev->dev; struct i2c_timings i2c_t; - int irq, ret; + int ret; /* Otherwise logic will break because some bytes must always use PIO */ BUILD_BUG_ON_MSG(RCAR_MIN_DMA_LEN < 3, "Invalid min DMA length"); @@ -984,10 +987,10 @@ static int rcar_i2c_probe(struct platform_device *pdev) pm_runtime_put(dev); - irq = platform_get_irq(pdev, 0); - ret = devm_request_irq(dev, irq, rcar_i2c_irq, 0, dev_name(dev), priv); + priv->irq = platform_get_irq(pdev, 0); + ret = devm_request_irq(dev, priv->irq, rcar_i2c_irq, 0, dev_name(dev), priv); if (ret < 0) { - dev_err(dev, "cannot get irq %d\n", irq); + dev_err(dev, "cannot get irq %d\n", priv->irq); goto out_pm_disable; } diff --git a/drivers/i2c/busses/i2c-stm32.h b/drivers/i2c/busses/i2c-stm32.h index 868755f82f88..2c21893905a3 100644 --- a/drivers/i2c/busses/i2c-stm32.h +++ b/drivers/i2c/busses/i2c-stm32.h @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 +/* SPDX-License-Identifier: GPL-2.0 */ /* * i2c-stm32.h * diff --git a/drivers/iio/accel/cros_ec_accel_legacy.c b/drivers/iio/accel/cros_ec_accel_legacy.c index 46bb2e421bb9..ad19d9c716f4 100644 --- a/drivers/iio/accel/cros_ec_accel_legacy.c +++ b/drivers/iio/accel/cros_ec_accel_legacy.c @@ -319,7 +319,6 @@ static const struct iio_chan_spec_ext_info cros_ec_accel_legacy_ext_info[] = { .modified = 1, \ .info_mask_separate = \ BIT(IIO_CHAN_INFO_RAW) | \ - BIT(IIO_CHAN_INFO_SCALE) | \ BIT(IIO_CHAN_INFO_CALIBBIAS), \ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SCALE), \ .ext_info = cros_ec_accel_legacy_ext_info, \ diff --git a/drivers/iio/adc/ingenic-adc.c b/drivers/iio/adc/ingenic-adc.c index 92b1d5037ac9..e234970b7150 100644 --- a/drivers/iio/adc/ingenic-adc.c +++ b/drivers/iio/adc/ingenic-adc.c @@ -11,6 +11,7 @@ #include <linux/iio/iio.h> #include <linux/io.h> #include <linux/iopoll.h> +#include <linux/kernel.h> #include <linux/module.h> #include <linux/mutex.h> #include <linux/platform_device.h> @@ -22,8 +23,11 @@ #define JZ_ADC_REG_ADTCH 0x18 #define JZ_ADC_REG_ADBDAT 0x1c #define JZ_ADC_REG_ADSDAT 0x20 +#define JZ_ADC_REG_ADCLK 0x28 #define JZ_ADC_REG_CFG_BAT_MD BIT(4) +#define JZ_ADC_REG_ADCLK_CLKDIV_LSB 0 +#define JZ_ADC_REG_ADCLK_CLKDIV10US_LSB 16 #define JZ_ADC_AUX_VREF 3300 #define JZ_ADC_AUX_VREF_BITS 12 @@ -34,6 +38,8 @@ #define JZ4740_ADC_BATTERY_HIGH_VREF (7500 * 0.986) #define JZ4740_ADC_BATTERY_HIGH_VREF_BITS 12 +struct ingenic_adc; + struct ingenic_adc_soc_data { unsigned int battery_high_vref; unsigned int battery_high_vref_bits; @@ -41,6 +47,7 @@ struct ingenic_adc_soc_data { size_t battery_raw_avail_size; const int *battery_scale_avail; size_t battery_scale_avail_size; + int (*init_clk_div)(struct device *dev, struct ingenic_adc *adc); }; struct ingenic_adc { @@ -151,6 +158,42 @@ static const int jz4740_adc_battery_scale_avail[] = { JZ_ADC_BATTERY_LOW_VREF, JZ_ADC_BATTERY_LOW_VREF_BITS, }; +static int jz4725b_adc_init_clk_div(struct device *dev, struct ingenic_adc *adc) +{ + struct clk *parent_clk; + unsigned long parent_rate, rate; + unsigned int div_main, div_10us; + + parent_clk = clk_get_parent(adc->clk); + if (!parent_clk) { + dev_err(dev, "ADC clock has no parent\n"); + return -ENODEV; + } + parent_rate = clk_get_rate(parent_clk); + + /* + * The JZ4725B ADC works at 500 kHz to 8 MHz. + * We pick the highest rate possible. + * In practice we typically get 6 MHz, half of the 12 MHz EXT clock. + */ + div_main = DIV_ROUND_UP(parent_rate, 8000000); + div_main = clamp(div_main, 1u, 64u); + rate = parent_rate / div_main; + if (rate < 500000 || rate > 8000000) { + dev_err(dev, "No valid divider for ADC main clock\n"); + return -EINVAL; + } + + /* We also need a divider that produces a 10us clock. */ + div_10us = DIV_ROUND_UP(rate, 100000); + + writel(((div_10us - 1) << JZ_ADC_REG_ADCLK_CLKDIV10US_LSB) | + (div_main - 1) << JZ_ADC_REG_ADCLK_CLKDIV_LSB, + adc->base + JZ_ADC_REG_ADCLK); + + return 0; +} + static const struct ingenic_adc_soc_data jz4725b_adc_soc_data = { .battery_high_vref = JZ4725B_ADC_BATTERY_HIGH_VREF, .battery_high_vref_bits = JZ4725B_ADC_BATTERY_HIGH_VREF_BITS, @@ -158,6 +201,7 @@ static const struct ingenic_adc_soc_data jz4725b_adc_soc_data = { .battery_raw_avail_size = ARRAY_SIZE(jz4725b_adc_battery_raw_avail), .battery_scale_avail = jz4725b_adc_battery_scale_avail, .battery_scale_avail_size = ARRAY_SIZE(jz4725b_adc_battery_scale_avail), + .init_clk_div = jz4725b_adc_init_clk_div, }; static const struct ingenic_adc_soc_data jz4740_adc_soc_data = { @@ -167,6 +211,7 @@ static const struct ingenic_adc_soc_data jz4740_adc_soc_data = { .battery_raw_avail_size = ARRAY_SIZE(jz4740_adc_battery_raw_avail), .battery_scale_avail = jz4740_adc_battery_scale_avail, .battery_scale_avail_size = ARRAY_SIZE(jz4740_adc_battery_scale_avail), + .init_clk_div = NULL, /* no ADCLK register on JZ4740 */ }; static int ingenic_adc_read_avail(struct iio_dev *iio_dev, @@ -317,6 +362,15 @@ static int ingenic_adc_probe(struct platform_device *pdev) return ret; } + /* Set clock dividers. */ + if (soc_data->init_clk_div) { + ret = soc_data->init_clk_div(dev, adc); + if (ret) { + clk_disable_unprepare(adc->clk); + return ret; + } + } + /* Put hardware in a known passive state. */ writeb(0x00, adc->base + JZ_ADC_REG_ENABLE); writeb(0xff, adc->base + JZ_ADC_REG_CTRL); diff --git a/drivers/iio/adc/max9611.c b/drivers/iio/adc/max9611.c index 917223d5ff5b..da073d72f649 100644 --- a/drivers/iio/adc/max9611.c +++ b/drivers/iio/adc/max9611.c @@ -83,7 +83,7 @@ #define MAX9611_TEMP_MAX_POS 0x7f80 #define MAX9611_TEMP_MAX_NEG 0xff80 #define MAX9611_TEMP_MIN_NEG 0xd980 -#define MAX9611_TEMP_MASK GENMASK(7, 15) +#define MAX9611_TEMP_MASK GENMASK(15, 7) #define MAX9611_TEMP_SHIFT 0x07 #define MAX9611_TEMP_RAW(_r) ((_r) >> MAX9611_TEMP_SHIFT) #define MAX9611_TEMP_SCALE_NUM 1000000 @@ -480,7 +480,7 @@ static int max9611_init(struct max9611_dev *max9611) if (ret) return ret; - regval = ret & MAX9611_TEMP_MASK; + regval &= MAX9611_TEMP_MASK; if ((regval > MAX9611_TEMP_MAX_POS && regval < MAX9611_TEMP_MIN_NEG) || diff --git a/drivers/iio/adc/rcar-gyroadc.c b/drivers/iio/adc/rcar-gyroadc.c index 2d685730f867..c37f201294b2 100644 --- a/drivers/iio/adc/rcar-gyroadc.c +++ b/drivers/iio/adc/rcar-gyroadc.c @@ -382,7 +382,7 @@ static int rcar_gyroadc_parse_subdevs(struct iio_dev *indio_dev) dev_err(dev, "Only %i channels supported with %pOFn, but reg = <%i>.\n", num_channels, child, reg); - return ret; + return -EINVAL; } } @@ -391,7 +391,7 @@ static int rcar_gyroadc_parse_subdevs(struct iio_dev *indio_dev) dev_err(dev, "Channel %i uses different ADC mode than the rest.\n", reg); - return ret; + return -EINVAL; } /* Channel is valid, grab the regulator. */ diff --git a/drivers/iio/frequency/adf4371.c b/drivers/iio/frequency/adf4371.c index e48f15cc9ab5..ff82863cbf42 100644 --- a/drivers/iio/frequency/adf4371.c +++ b/drivers/iio/frequency/adf4371.c @@ -276,11 +276,11 @@ static int adf4371_set_freq(struct adf4371_state *st, unsigned long long freq, st->buf[0] = st->integer >> 8; st->buf[1] = 0x40; /* REG12 default */ st->buf[2] = 0x00; - st->buf[3] = st->fract2 & 0xFF; - st->buf[4] = st->fract2 >> 7; - st->buf[5] = st->fract2 >> 15; + st->buf[3] = st->fract1 & 0xFF; + st->buf[4] = st->fract1 >> 8; + st->buf[5] = st->fract1 >> 16; st->buf[6] = ADF4371_FRAC2WORD_L(st->fract2 & 0x7F) | - ADF4371_FRAC1WORD(st->fract1 >> 23); + ADF4371_FRAC1WORD(st->fract1 >> 24); st->buf[7] = ADF4371_FRAC2WORD_H(st->fract2 >> 7); st->buf[8] = st->mod2 & 0xFF; st->buf[9] = ADF4371_MOD2WORD(st->mod2 >> 8); diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c index 53a59957cc54..8a704cd5bddb 100644 --- a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c +++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c @@ -845,6 +845,25 @@ static const struct iio_chan_spec inv_mpu_channels[] = { INV_MPU6050_CHAN(IIO_ACCEL, IIO_MOD_Z, INV_MPU6050_SCAN_ACCL_Z), }; +static const unsigned long inv_mpu_scan_masks[] = { + /* 3-axis accel */ + BIT(INV_MPU6050_SCAN_ACCL_X) + | BIT(INV_MPU6050_SCAN_ACCL_Y) + | BIT(INV_MPU6050_SCAN_ACCL_Z), + /* 3-axis gyro */ + BIT(INV_MPU6050_SCAN_GYRO_X) + | BIT(INV_MPU6050_SCAN_GYRO_Y) + | BIT(INV_MPU6050_SCAN_GYRO_Z), + /* 6-axis accel + gyro */ + BIT(INV_MPU6050_SCAN_ACCL_X) + | BIT(INV_MPU6050_SCAN_ACCL_Y) + | BIT(INV_MPU6050_SCAN_ACCL_Z) + | BIT(INV_MPU6050_SCAN_GYRO_X) + | BIT(INV_MPU6050_SCAN_GYRO_Y) + | BIT(INV_MPU6050_SCAN_GYRO_Z), + 0, +}; + static const struct iio_chan_spec inv_icm20602_channels[] = { IIO_CHAN_SOFT_TIMESTAMP(INV_ICM20602_SCAN_TIMESTAMP), { @@ -871,6 +890,28 @@ static const struct iio_chan_spec inv_icm20602_channels[] = { INV_MPU6050_CHAN(IIO_ACCEL, IIO_MOD_Z, INV_ICM20602_SCAN_ACCL_Z), }; +static const unsigned long inv_icm20602_scan_masks[] = { + /* 3-axis accel + temp (mandatory) */ + BIT(INV_ICM20602_SCAN_ACCL_X) + | BIT(INV_ICM20602_SCAN_ACCL_Y) + | BIT(INV_ICM20602_SCAN_ACCL_Z) + | BIT(INV_ICM20602_SCAN_TEMP), + /* 3-axis gyro + temp (mandatory) */ + BIT(INV_ICM20602_SCAN_GYRO_X) + | BIT(INV_ICM20602_SCAN_GYRO_Y) + | BIT(INV_ICM20602_SCAN_GYRO_Z) + | BIT(INV_ICM20602_SCAN_TEMP), + /* 6-axis accel + gyro + temp (mandatory) */ + BIT(INV_ICM20602_SCAN_ACCL_X) + | BIT(INV_ICM20602_SCAN_ACCL_Y) + | BIT(INV_ICM20602_SCAN_ACCL_Z) + | BIT(INV_ICM20602_SCAN_GYRO_X) + | BIT(INV_ICM20602_SCAN_GYRO_Y) + | BIT(INV_ICM20602_SCAN_GYRO_Z) + | BIT(INV_ICM20602_SCAN_TEMP), + 0, +}; + /* * The user can choose any frequency between INV_MPU6050_MIN_FIFO_RATE and * INV_MPU6050_MAX_FIFO_RATE, but only these frequencies are matched by the @@ -1130,9 +1171,11 @@ int inv_mpu_core_probe(struct regmap *regmap, int irq, const char *name, if (chip_type == INV_ICM20602) { indio_dev->channels = inv_icm20602_channels; indio_dev->num_channels = ARRAY_SIZE(inv_icm20602_channels); + indio_dev->available_scan_masks = inv_icm20602_scan_masks; } else { indio_dev->channels = inv_mpu_channels; indio_dev->num_channels = ARRAY_SIZE(inv_mpu_channels); + indio_dev->available_scan_masks = inv_mpu_scan_masks; } indio_dev->info = &mpu_info; diff --git a/drivers/infiniband/core/counters.c b/drivers/infiniband/core/counters.c index 45d5164e9574..b79890739a2c 100644 --- a/drivers/infiniband/core/counters.c +++ b/drivers/infiniband/core/counters.c @@ -38,6 +38,9 @@ int rdma_counter_set_auto_mode(struct ib_device *dev, u8 port, int ret; port_counter = &dev->port_data[port].port_counter; + if (!port_counter->hstats) + return -EOPNOTSUPP; + mutex_lock(&port_counter->lock); if (on) { ret = __counter_set_mode(&port_counter->mode, @@ -509,6 +512,9 @@ int rdma_counter_bind_qpn_alloc(struct ib_device *dev, u8 port, if (!rdma_is_port_valid(dev, port)) return -EINVAL; + if (!dev->port_data[port].port_counter.hstats) + return -EOPNOTSUPP; + qp = rdma_counter_get_qp(dev, qp_num); if (!qp) return -ENOENT; diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c index 783e465e7c41..87d40d1ecdde 100644 --- a/drivers/infiniband/core/nldev.c +++ b/drivers/infiniband/core/nldev.c @@ -1952,12 +1952,16 @@ static int stat_get_doit_qp(struct sk_buff *skb, struct nlmsghdr *nlh, if (fill_nldev_handle(msg, device) || nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, port) || - nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_MODE, mode)) + nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_MODE, mode)) { + ret = -EMSGSIZE; goto err_msg; + } if ((mode == RDMA_COUNTER_MODE_AUTO) && - nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_AUTO_MODE_MASK, mask)) + nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_AUTO_MODE_MASK, mask)) { + ret = -EMSGSIZE; goto err_msg; + } nlmsg_end(msg, nlh); ib_device_put(device); diff --git a/drivers/infiniband/core/umem_odp.c b/drivers/infiniband/core/umem_odp.c index 2a75c6f8d827..c0e15db34680 100644 --- a/drivers/infiniband/core/umem_odp.c +++ b/drivers/infiniband/core/umem_odp.c @@ -112,10 +112,6 @@ static int ib_umem_notifier_release_trampoline(struct ib_umem_odp *umem_odp, * prevent any further fault handling on this MR. */ ib_umem_notifier_start_account(umem_odp); - umem_odp->dying = 1; - /* Make sure that the fact the umem is dying is out before we release - * all pending page faults. */ - smp_wmb(); complete_all(&umem_odp->notifier_completion); umem_odp->umem.context->invalidate_range( umem_odp, ib_umem_start(umem_odp), ib_umem_end(umem_odp)); diff --git a/drivers/infiniband/hw/mlx5/devx.c b/drivers/infiniband/hw/mlx5/devx.c index ec4370f99381..af5bbb35c058 100644 --- a/drivers/infiniband/hw/mlx5/devx.c +++ b/drivers/infiniband/hw/mlx5/devx.c @@ -2026,7 +2026,7 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_SUBSCRIBE_EVENT)( event_sub->eventfd = eventfd_ctx_fdget(redirect_fd); - if (IS_ERR(event_sub)) { + if (IS_ERR(event_sub->eventfd)) { err = PTR_ERR(event_sub->eventfd); event_sub->eventfd = NULL; goto err; @@ -2644,12 +2644,13 @@ static int devx_async_event_close(struct inode *inode, struct file *filp) struct devx_async_event_file *ev_file = filp->private_data; struct devx_event_subscription *event_sub, *event_sub_tmp; struct devx_async_event_data *entry, *tmp; + struct mlx5_ib_dev *dev = ev_file->dev; - mutex_lock(&ev_file->dev->devx_event_table.event_xa_lock); + mutex_lock(&dev->devx_event_table.event_xa_lock); /* delete the subscriptions which are related to this FD */ list_for_each_entry_safe(event_sub, event_sub_tmp, &ev_file->subscribed_events_list, file_list) { - devx_cleanup_subscription(ev_file->dev, event_sub); + devx_cleanup_subscription(dev, event_sub); if (event_sub->eventfd) eventfd_ctx_put(event_sub->eventfd); @@ -2658,7 +2659,7 @@ static int devx_async_event_close(struct inode *inode, struct file *filp) kfree_rcu(event_sub, rcu); } - mutex_unlock(&ev_file->dev->devx_event_table.event_xa_lock); + mutex_unlock(&dev->devx_event_table.event_xa_lock); /* free the pending events allocation */ if (!ev_file->omit_data) { @@ -2670,7 +2671,7 @@ static int devx_async_event_close(struct inode *inode, struct file *filp) } uverbs_close_fd(filp); - put_device(&ev_file->dev->ib_dev.dev); + put_device(&dev->ib_dev.dev); return 0; } diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c index 81da82050d05..1d257d1b3b0d 100644 --- a/drivers/infiniband/hw/mlx5/odp.c +++ b/drivers/infiniband/hw/mlx5/odp.c @@ -579,7 +579,6 @@ static int pagefault_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr, u32 flags) { int npages = 0, current_seq, page_shift, ret, np; - bool implicit = false; struct ib_umem_odp *odp_mr = to_ib_umem_odp(mr->umem); bool downgrade = flags & MLX5_PF_FLAGS_DOWNGRADE; bool prefetch = flags & MLX5_PF_FLAGS_PREFETCH; @@ -594,7 +593,6 @@ static int pagefault_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr, if (IS_ERR(odp)) return PTR_ERR(odp); mr = odp->private; - implicit = true; } else { odp = odp_mr; } @@ -682,19 +680,15 @@ next_mr: out: if (ret == -EAGAIN) { - if (implicit || !odp->dying) { - unsigned long timeout = - msecs_to_jiffies(MMU_NOTIFIER_TIMEOUT); - - if (!wait_for_completion_timeout( - &odp->notifier_completion, - timeout)) { - mlx5_ib_warn(dev, "timeout waiting for mmu notifier. seq %d against %d. notifiers_count=%d\n", - current_seq, odp->notifiers_seq, odp->notifiers_count); - } - } else { - /* The MR is being killed, kill the QP as well. */ - ret = -EFAULT; + unsigned long timeout = msecs_to_jiffies(MMU_NOTIFIER_TIMEOUT); + + if (!wait_for_completion_timeout(&odp->notifier_completion, + timeout)) { + mlx5_ib_warn( + dev, + "timeout waiting for mmu notifier. seq %d against %d. notifiers_count=%d\n", + current_seq, odp->notifiers_seq, + odp->notifiers_count); } } diff --git a/drivers/infiniband/sw/siw/Kconfig b/drivers/infiniband/sw/siw/Kconfig index dace276aea14..b622fc62f2cd 100644 --- a/drivers/infiniband/sw/siw/Kconfig +++ b/drivers/infiniband/sw/siw/Kconfig @@ -1,6 +1,6 @@ config RDMA_SIW tristate "Software RDMA over TCP/IP (iWARP) driver" - depends on INET && INFINIBAND && LIBCRC32C && 64BIT + depends on INET && INFINIBAND && LIBCRC32C select DMA_VIRT_OPS help This driver implements the iWARP RDMA transport over diff --git a/drivers/infiniband/sw/siw/siw.h b/drivers/infiniband/sw/siw/siw.h index 03fd7b2f595f..77b1aabf6ff3 100644 --- a/drivers/infiniband/sw/siw/siw.h +++ b/drivers/infiniband/sw/siw/siw.h @@ -214,7 +214,7 @@ struct siw_wqe { struct siw_cq { struct ib_cq base_cq; spinlock_t lock; - u64 *notify; + struct siw_cq_ctrl *notify; struct siw_cqe *queue; u32 cq_put; u32 cq_get; diff --git a/drivers/infiniband/sw/siw/siw_main.c b/drivers/infiniband/sw/siw/siw_main.c index d0f140daf659..05a92f997f60 100644 --- a/drivers/infiniband/sw/siw/siw_main.c +++ b/drivers/infiniband/sw/siw/siw_main.c @@ -160,10 +160,8 @@ static int siw_init_cpulist(void) out_err: siw_cpu_info.num_nodes = 0; - while (i) { + while (--i >= 0) kfree(siw_cpu_info.tx_valid_cpus[i]); - siw_cpu_info.tx_valid_cpus[i--] = NULL; - } kfree(siw_cpu_info.tx_valid_cpus); siw_cpu_info.tx_valid_cpus = NULL; diff --git a/drivers/infiniband/sw/siw/siw_qp.c b/drivers/infiniband/sw/siw/siw_qp.c index e27bd5b35b96..0990307c5d2c 100644 --- a/drivers/infiniband/sw/siw/siw_qp.c +++ b/drivers/infiniband/sw/siw/siw_qp.c @@ -1013,18 +1013,24 @@ out: */ static bool siw_cq_notify_now(struct siw_cq *cq, u32 flags) { - u64 cq_notify; + u32 cq_notify; if (!cq->base_cq.comp_handler) return false; - cq_notify = READ_ONCE(*cq->notify); + /* Read application shared notification state */ + cq_notify = READ_ONCE(cq->notify->flags); if ((cq_notify & SIW_NOTIFY_NEXT_COMPLETION) || ((cq_notify & SIW_NOTIFY_SOLICITED) && (flags & SIW_WQE_SOLICITED))) { - /* dis-arm CQ */ - smp_store_mb(*cq->notify, SIW_NOTIFY_NOT); + /* + * CQ notification is one-shot: Since the + * current CQE causes user notification, + * the CQ gets dis-aremd and must be re-aremd + * by the user for a new notification. + */ + WRITE_ONCE(cq->notify->flags, SIW_NOTIFY_NOT); return true; } diff --git a/drivers/infiniband/sw/siw/siw_verbs.c b/drivers/infiniband/sw/siw/siw_verbs.c index 32dc79d0e898..e7f3a2379d9d 100644 --- a/drivers/infiniband/sw/siw/siw_verbs.c +++ b/drivers/infiniband/sw/siw/siw_verbs.c @@ -1049,7 +1049,7 @@ int siw_create_cq(struct ib_cq *base_cq, const struct ib_cq_init_attr *attr, spin_lock_init(&cq->lock); - cq->notify = &((struct siw_cq_ctrl *)&cq->queue[size])->notify; + cq->notify = (struct siw_cq_ctrl *)&cq->queue[size]; if (udata) { struct siw_uresp_create_cq uresp = {}; @@ -1141,11 +1141,17 @@ int siw_req_notify_cq(struct ib_cq *base_cq, enum ib_cq_notify_flags flags) siw_dbg_cq(cq, "flags: 0x%02x\n", flags); if ((flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED) - /* CQ event for next solicited completion */ - smp_store_mb(*cq->notify, SIW_NOTIFY_SOLICITED); + /* + * Enable CQ event for next solicited completion. + * and make it visible to all associated producers. + */ + smp_store_mb(cq->notify->flags, SIW_NOTIFY_SOLICITED); else - /* CQ event for any signalled completion */ - smp_store_mb(*cq->notify, SIW_NOTIFY_ALL); + /* + * Enable CQ event for any signalled completion. + * and make it visible to all associated producers. + */ + smp_store_mb(cq->notify->flags, SIW_NOTIFY_ALL); if (flags & IB_CQ_REPORT_MISSED_EVENTS) return cq->cq_put - cq->cq_get; diff --git a/drivers/input/joystick/iforce/iforce-usb.c b/drivers/input/joystick/iforce/iforce-usb.c index 29abfeeef9a5..6c554c11a7ac 100644 --- a/drivers/input/joystick/iforce/iforce-usb.c +++ b/drivers/input/joystick/iforce/iforce-usb.c @@ -201,7 +201,12 @@ static int iforce_usb_probe(struct usb_interface *intf, return -ENODEV; epirq = &interface->endpoint[0].desc; + if (!usb_endpoint_is_int_in(epirq)) + return -ENODEV; + epout = &interface->endpoint[1].desc; + if (!usb_endpoint_is_int_out(epout)) + return -ENODEV; iforce_usb = kzalloc(sizeof(*iforce_usb), GFP_KERNEL); if (!iforce_usb) diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig index 8e9c3ea9d5e7..90e8a7f2f07c 100644 --- a/drivers/input/keyboard/Kconfig +++ b/drivers/input/keyboard/Kconfig @@ -76,6 +76,8 @@ config KEYBOARD_APPLESPI depends on ACPI && EFI depends on SPI depends on X86 || COMPILE_TEST + depends on LEDS_CLASS + select CRC16 help Say Y here if you are running Linux on any Apple MacBook8,1 or later, or any MacBookPro13,* or MacBookPro14,*. diff --git a/drivers/input/keyboard/applespi.c b/drivers/input/keyboard/applespi.c index 548737e7aeda..584289b67fb3 100644 --- a/drivers/input/keyboard/applespi.c +++ b/drivers/input/keyboard/applespi.c @@ -134,10 +134,10 @@ struct keyboard_protocol { * struct tp_finger - single trackpad finger structure, le16-aligned * * @origin: zero when switching track finger - * @abs_x: absolute x coodinate - * @abs_y: absolute y coodinate - * @rel_x: relative x coodinate - * @rel_y: relative y coodinate + * @abs_x: absolute x coordinate + * @abs_y: absolute y coordinate + * @rel_x: relative x coordinate + * @rel_y: relative y coordinate * @tool_major: tool area, major axis * @tool_minor: tool area, minor axis * @orientation: 16384 when point, else 15 bit angle @@ -944,10 +944,14 @@ static inline int le16_to_int(__le16 x) static void applespi_debug_update_dimensions(struct applespi_data *applespi, const struct tp_finger *f) { - applespi->tp_dim_min_x = min_t(int, applespi->tp_dim_min_x, f->abs_x); - applespi->tp_dim_max_x = max_t(int, applespi->tp_dim_max_x, f->abs_x); - applespi->tp_dim_min_y = min_t(int, applespi->tp_dim_min_y, f->abs_y); - applespi->tp_dim_max_y = max_t(int, applespi->tp_dim_max_y, f->abs_y); + applespi->tp_dim_min_x = min(applespi->tp_dim_min_x, + le16_to_int(f->abs_x)); + applespi->tp_dim_max_x = max(applespi->tp_dim_max_x, + le16_to_int(f->abs_x)); + applespi->tp_dim_min_y = min(applespi->tp_dim_min_y, + le16_to_int(f->abs_y)); + applespi->tp_dim_max_y = max(applespi->tp_dim_max_y, + le16_to_int(f->abs_y)); } static int applespi_tp_dim_open(struct inode *inode, struct file *file) @@ -1490,8 +1494,7 @@ static void applespi_got_data(struct applespi_data *applespi) size_t tp_len; tp = &message->touchpad; - tp_len = sizeof(*tp) + - tp->number_of_fingers * sizeof(tp->fingers[0]); + tp_len = struct_size(tp, fingers, tp->number_of_fingers); if (le16_to_cpu(message->length) + 2 != tp_len) { dev_warn_ratelimited(&applespi->spi->dev, @@ -1611,8 +1614,8 @@ static void applespi_save_bl_level(struct applespi_data *applespi, efi_attr = EFI_VARIABLE_NON_VOLATILE | EFI_VARIABLE_BOOTSERVICE_ACCESS | EFI_VARIABLE_RUNTIME_ACCESS; - sts = efivar_entry_set_safe(EFI_BL_LEVEL_NAME, efi_guid, efi_attr, true, - efi_data_len, &efi_data); + sts = efivar_entry_set_safe((efi_char16_t *)EFI_BL_LEVEL_NAME, efi_guid, + efi_attr, true, efi_data_len, &efi_data); if (sts) dev_warn(&applespi->spi->dev, "Error saving backlight level to EFI vars: %d\n", sts); @@ -1953,7 +1956,7 @@ static const struct acpi_device_id applespi_acpi_match[] = { }; MODULE_DEVICE_TABLE(acpi, applespi_acpi_match); -const struct dev_pm_ops applespi_pm_ops = { +static const struct dev_pm_ops applespi_pm_ops = { SET_SYSTEM_SLEEP_PM_OPS(applespi_suspend, applespi_resume) .poweroff_late = applespi_poweroff_late, }; diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c index 2d8434b7b623..04fe43440a3c 100644 --- a/drivers/input/mouse/elantech.c +++ b/drivers/input/mouse/elantech.c @@ -1827,6 +1827,31 @@ static int elantech_create_smbus(struct psmouse *psmouse, leave_breadcrumbs); } +static bool elantech_use_host_notify(struct psmouse *psmouse, + struct elantech_device_info *info) +{ + if (ETP_NEW_IC_SMBUS_HOST_NOTIFY(info->fw_version)) + return true; + + switch (info->bus) { + case ETP_BUS_PS2_ONLY: + /* expected case */ + break; + case ETP_BUS_SMB_HST_NTFY_ONLY: + case ETP_BUS_PS2_SMB_HST_NTFY: + /* SMbus implementation is stable since 2018 */ + if (dmi_get_bios_year() >= 2018) + return true; + /* fall through */ + default: + psmouse_dbg(psmouse, + "Ignoring SMBus bus provider %d\n", info->bus); + break; + } + + return false; +} + /** * elantech_setup_smbus - called once the PS/2 devices are enumerated * and decides to instantiate a SMBus InterTouch device. @@ -1846,7 +1871,7 @@ static int elantech_setup_smbus(struct psmouse *psmouse, * i2c_blacklist_pnp_ids. * Old ICs are up to the user to decide. */ - if (!ETP_NEW_IC_SMBUS_HOST_NOTIFY(info->fw_version) || + if (!elantech_use_host_notify(psmouse, info) || psmouse_matches_pnp_id(psmouse, i2c_blacklist_pnp_ids)) return -ENXIO; } @@ -1866,34 +1891,6 @@ static int elantech_setup_smbus(struct psmouse *psmouse, return 0; } -static bool elantech_use_host_notify(struct psmouse *psmouse, - struct elantech_device_info *info) -{ - if (ETP_NEW_IC_SMBUS_HOST_NOTIFY(info->fw_version)) - return true; - - switch (info->bus) { - case ETP_BUS_PS2_ONLY: - /* expected case */ - break; - case ETP_BUS_SMB_ALERT_ONLY: - /* fall-through */ - case ETP_BUS_PS2_SMB_ALERT: - psmouse_dbg(psmouse, "Ignoring SMBus provider through alert protocol.\n"); - break; - case ETP_BUS_SMB_HST_NTFY_ONLY: - /* fall-through */ - case ETP_BUS_PS2_SMB_HST_NTFY: - return true; - default: - psmouse_dbg(psmouse, - "Ignoring SMBus bus provider %d.\n", - info->bus); - } - - return false; -} - int elantech_init_smbus(struct psmouse *psmouse) { struct elantech_device_info info; diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c index b1956ed4c0dd..46bbe99d6511 100644 --- a/drivers/input/mouse/synaptics.c +++ b/drivers/input/mouse/synaptics.c @@ -182,6 +182,7 @@ static const char * const smbus_pnp_ids[] = { "LEN2055", /* E580 */ "SYN3052", /* HP EliteBook 840 G4 */ "SYN3221", /* HP 15-ay000 */ + "SYN323d", /* HP Spectre X360 13-w013dx */ NULL }; diff --git a/drivers/input/tablet/kbtab.c b/drivers/input/tablet/kbtab.c index 04b85571f41e..aa577898e952 100644 --- a/drivers/input/tablet/kbtab.c +++ b/drivers/input/tablet/kbtab.c @@ -117,6 +117,10 @@ static int kbtab_probe(struct usb_interface *intf, const struct usb_device_id *i if (intf->cur_altsetting->desc.bNumEndpoints < 1) return -ENODEV; + endpoint = &intf->cur_altsetting->endpoint[0].desc; + if (!usb_endpoint_is_int_in(endpoint)) + return -ENODEV; + kbtab = kzalloc(sizeof(struct kbtab), GFP_KERNEL); input_dev = input_allocate_device(); if (!kbtab || !input_dev) @@ -155,8 +159,6 @@ static int kbtab_probe(struct usb_interface *intf, const struct usb_device_id *i input_set_abs_params(input_dev, ABS_Y, 0, 0x1750, 4, 0); input_set_abs_params(input_dev, ABS_PRESSURE, 0, 0xff, 0, 0); - endpoint = &intf->cur_altsetting->endpoint[0].desc; - usb_fill_int_urb(kbtab->irq, dev, usb_rcvintpipe(dev, endpoint->bEndpointAddress), kbtab->data, 8, diff --git a/drivers/input/touchscreen/usbtouchscreen.c b/drivers/input/touchscreen/usbtouchscreen.c index a2cec6cacf57..16d70201de4a 100644 --- a/drivers/input/touchscreen/usbtouchscreen.c +++ b/drivers/input/touchscreen/usbtouchscreen.c @@ -1659,6 +1659,8 @@ static int usbtouch_probe(struct usb_interface *intf, if (!usbtouch || !input_dev) goto out_free; + mutex_init(&usbtouch->pm_mutex); + type = &usbtouch_dev_info[id->driver_info]; usbtouch->type = type; if (!type->process_pkt) diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c index a9a9fabd3968..c5c93e48b4db 100644 --- a/drivers/iommu/arm-smmu-v3.c +++ b/drivers/iommu/arm-smmu-v3.c @@ -1186,8 +1186,8 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_master *master, u32 sid, ste_live = true; break; case STRTAB_STE_0_CFG_ABORT: - if (disable_bypass) - break; + BUG_ON(!disable_bypass); + break; default: BUG(); /* STE corruption */ } diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c index a7f9c3edbcb2..d991d40f797f 100644 --- a/drivers/iommu/dma-iommu.c +++ b/drivers/iommu/dma-iommu.c @@ -459,13 +459,11 @@ static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys, { struct iommu_domain *domain = iommu_get_dma_domain(dev); struct iommu_dma_cookie *cookie = domain->iova_cookie; - size_t iova_off = 0; + struct iova_domain *iovad = &cookie->iovad; + size_t iova_off = iova_offset(iovad, phys); dma_addr_t iova; - if (cookie->type == IOMMU_DMA_IOVA_COOKIE) { - iova_off = iova_offset(&cookie->iovad, phys); - size = iova_align(&cookie->iovad, size + iova_off); - } + size = iova_align(iovad, size + iova_off); iova = iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev); if (!iova) @@ -574,7 +572,7 @@ static void *iommu_dma_alloc_remap(struct device *dev, size_t size, struct iova_domain *iovad = &cookie->iovad; bool coherent = dev_is_dma_coherent(dev); int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs); - pgprot_t prot = arch_dma_mmap_pgprot(dev, PAGE_KERNEL, attrs); + pgprot_t prot = dma_pgprot(dev, PAGE_KERNEL, attrs); unsigned int count, min_size, alloc_sizes = domain->pgsize_bitmap; struct page **pages; struct sg_table sgt; @@ -764,7 +762,7 @@ static int __finalise_sg(struct device *dev, struct scatterlist *sg, int nents, * - and wouldn't make the resulting output segment too long */ if (cur_len && !s_iova_off && (dma_addr & seg_mask) && - (cur_len + s_length <= max_len)) { + (max_len - cur_len >= s_length)) { /* ...then concatenate it with the previous one */ cur_len += s_length; } else { @@ -975,7 +973,7 @@ static void *iommu_dma_alloc_pages(struct device *dev, size_t size, return NULL; if (IS_ENABLED(CONFIG_DMA_REMAP) && (!coherent || PageHighMem(page))) { - pgprot_t prot = arch_dma_mmap_pgprot(dev, PAGE_KERNEL, attrs); + pgprot_t prot = dma_pgprot(dev, PAGE_KERNEL, attrs); cpu_addr = dma_common_contiguous_remap(page, alloc_size, VM_USERMAP, prot, __builtin_return_address(0)); @@ -1035,7 +1033,7 @@ static int iommu_dma_mmap(struct device *dev, struct vm_area_struct *vma, unsigned long pfn, off = vma->vm_pgoff; int ret; - vma->vm_page_prot = arch_dma_mmap_pgprot(dev, vma->vm_page_prot, attrs); + vma->vm_page_prot = dma_pgprot(dev, vma->vm_page_prot, attrs); if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret)) return ret; @@ -1147,16 +1145,21 @@ static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev, if (!msi_page) return NULL; - iova = __iommu_dma_map(dev, msi_addr, size, prot); - if (iova == DMA_MAPPING_ERROR) + iova = iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev); + if (!iova) goto out_free_page; + if (iommu_map(domain, iova, msi_addr, size, prot)) + goto out_free_iova; + INIT_LIST_HEAD(&msi_page->list); msi_page->phys = msi_addr; msi_page->iova = iova; list_add(&msi_page->list, &cookie->msi_page_list); return msi_page; +out_free_iova: + iommu_dma_free_iova(cookie, iova, size); out_free_page: kfree(msi_page); return NULL; diff --git a/drivers/iommu/intel-iommu-debugfs.c b/drivers/iommu/intel-iommu-debugfs.c index 2b25d9c59336..471f05d452e0 100644 --- a/drivers/iommu/intel-iommu-debugfs.c +++ b/drivers/iommu/intel-iommu-debugfs.c @@ -235,7 +235,7 @@ static void ctx_tbl_walk(struct seq_file *m, struct intel_iommu *iommu, u16 bus) tbl_wlk.ctx_entry = context; m->private = &tbl_wlk; - if (pasid_supported(iommu) && is_pasid_enabled(context)) { + if (dmar_readq(iommu->reg + DMAR_RTADDR_REG) & DMA_RTADDR_SMT) { pasid_dir_ptr = context->lo & VTD_PAGE_MASK; pasid_dir_size = get_pasid_dir_size(context); pasid_dir_walk(m, pasid_dir_ptr, pasid_dir_size); diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index bdaed2da8a55..12d094d08c0a 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c @@ -3449,6 +3449,7 @@ static bool iommu_need_mapping(struct device *dev) dmar_domain = to_dmar_domain(domain); dmar_domain->flags |= DOMAIN_FLAG_LOSE_CHILDREN; } + dmar_remove_one_dev_info(dev); get_private_domain_for_dev(dev); } @@ -4790,7 +4791,8 @@ static void __dmar_remove_one_dev_info(struct device_domain_info *info) /* free the private domain */ if (domain->flags & DOMAIN_FLAG_LOSE_CHILDREN && - !(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY)) + !(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) && + list_empty(&domain->devices)) domain_exit(info->domain); free_devinfo_mem(info); @@ -4803,7 +4805,8 @@ static void dmar_remove_one_dev_info(struct device *dev) spin_lock_irqsave(&device_domain_lock, flags); info = dev->archdata.iommu; - __dmar_remove_one_dev_info(info); + if (info) + __dmar_remove_one_dev_info(info); spin_unlock_irqrestore(&device_domain_lock, flags); } @@ -5281,6 +5284,7 @@ static int intel_iommu_add_device(struct device *dev) if (device_def_domain_type(dev) == IOMMU_DOMAIN_IDENTITY) { ret = iommu_request_dm_for_dev(dev); if (ret) { + dmar_remove_one_dev_info(dev); dmar_domain->flags |= DOMAIN_FLAG_LOSE_CHILDREN; domain_add_dev_info(si_domain, dev); dev_info(dev, @@ -5291,6 +5295,7 @@ static int intel_iommu_add_device(struct device *dev) if (device_def_domain_type(dev) == IOMMU_DOMAIN_DMA) { ret = iommu_request_dma_domain_for_dev(dev); if (ret) { + dmar_remove_one_dev_info(dev); dmar_domain->flags |= DOMAIN_FLAG_LOSE_CHILDREN; if (!get_private_domain_for_dev(dev)) { dev_warn(dev, @@ -5316,6 +5321,8 @@ static void intel_iommu_remove_device(struct device *dev) if (!iommu) return; + dmar_remove_one_dev_info(dev); + iommu_group_remove_device(dev); iommu_device_unlink(&iommu->iommu, dev); diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c index 9f0826712845..e2059af90791 100644 --- a/drivers/md/bcache/sysfs.c +++ b/drivers/md/bcache/sysfs.c @@ -23,24 +23,28 @@ static const char * const bch_cache_modes[] = { "writethrough", "writeback", "writearound", - "none" + "none", + NULL }; /* Default is 0 ("auto") */ static const char * const bch_stop_on_failure_modes[] = { "auto", - "always" + "always", + NULL }; static const char * const cache_replacement_policies[] = { "lru", "fifo", - "random" + "random", + NULL }; static const char * const error_actions[] = { "unregister", - "panic" + "panic", + NULL }; write_attribute(attach); @@ -338,7 +342,7 @@ STORE(__cached_dev) } if (attr == &sysfs_cache_mode) { - v = sysfs_match_string(bch_cache_modes, buf); + v = __sysfs_match_string(bch_cache_modes, -1, buf); if (v < 0) return v; @@ -349,7 +353,7 @@ STORE(__cached_dev) } if (attr == &sysfs_stop_when_cache_set_failed) { - v = sysfs_match_string(bch_stop_on_failure_modes, buf); + v = __sysfs_match_string(bch_stop_on_failure_modes, -1, buf); if (v < 0) return v; @@ -816,7 +820,7 @@ STORE(__bch_cache_set) 0, UINT_MAX); if (attr == &sysfs_errors) { - v = sysfs_match_string(error_actions, buf); + v = __sysfs_match_string(error_actions, -1, buf); if (v < 0) return v; @@ -1088,7 +1092,7 @@ STORE(__bch_cache) } if (attr == &sysfs_cache_replacement_policy) { - v = sysfs_match_string(cache_replacement_policies, buf); + v = __sysfs_match_string(cache_replacement_policies, -1, buf); if (v < 0) return v; diff --git a/drivers/media/platform/omap/omap_vout_vrfb.c b/drivers/media/platform/omap/omap_vout_vrfb.c index 29e3f5da59c1..11ec048929e8 100644 --- a/drivers/media/platform/omap/omap_vout_vrfb.c +++ b/drivers/media/platform/omap/omap_vout_vrfb.c @@ -253,8 +253,7 @@ int omap_vout_prepare_vrfb(struct omap_vout_device *vout, */ pixsize = vout->bpp * vout->vrfb_bpp; - dst_icg = ((MAX_PIXELS_PER_LINE * pixsize) - - (vout->pix.width * vout->bpp)) + 1; + dst_icg = MAX_PIXELS_PER_LINE * pixsize - vout->pix.width * vout->bpp; xt->src_start = vout->buf_phy_addr[vb->i]; xt->dst_start = vout->vrfb_context[vb->i].paddr[0]; diff --git a/drivers/media/platform/vivid/vivid-core.c b/drivers/media/platform/vivid/vivid-core.c index bc2a176937a4..d535aac68ce1 100644 --- a/drivers/media/platform/vivid/vivid-core.c +++ b/drivers/media/platform/vivid/vivid-core.c @@ -1099,6 +1099,8 @@ static int vivid_create_instance(struct platform_device *pdev, int inst) /* start creating the vb2 queues */ if (dev->has_vid_cap) { + snprintf(dev->vid_cap_dev.name, sizeof(dev->vid_cap_dev.name), + "vivid-%03d-vid-cap", inst); /* initialize vid_cap queue */ q = &dev->vb_vid_cap_q; q->type = dev->multiplanar ? V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE : @@ -1122,6 +1124,8 @@ static int vivid_create_instance(struct platform_device *pdev, int inst) } if (dev->has_vid_out) { + snprintf(dev->vid_out_dev.name, sizeof(dev->vid_out_dev.name), + "vivid-%03d-vid-out", inst); /* initialize vid_out queue */ q = &dev->vb_vid_out_q; q->type = dev->multiplanar ? V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE : @@ -1265,8 +1269,6 @@ static int vivid_create_instance(struct platform_device *pdev, int inst) /* finally start creating the device nodes */ if (dev->has_vid_cap) { vfd = &dev->vid_cap_dev; - snprintf(vfd->name, sizeof(vfd->name), - "vivid-%03d-vid-cap", inst); vfd->fops = &vivid_fops; vfd->ioctl_ops = &vivid_ioctl_ops; vfd->device_caps = dev->vid_cap_caps; @@ -1312,8 +1314,6 @@ static int vivid_create_instance(struct platform_device *pdev, int inst) if (dev->has_vid_out) { vfd = &dev->vid_out_dev; - snprintf(vfd->name, sizeof(vfd->name), - "vivid-%03d-vid-out", inst); vfd->vfl_dir = VFL_DIR_TX; vfd->fops = &vivid_fops; vfd->ioctl_ops = &vivid_ioctl_ops; diff --git a/drivers/mfd/db8500-prcmu.c b/drivers/mfd/db8500-prcmu.c index 3f21e26b8d36..90e0f21bc49c 100644 --- a/drivers/mfd/db8500-prcmu.c +++ b/drivers/mfd/db8500-prcmu.c @@ -1590,8 +1590,10 @@ static unsigned long dsiclk_rate(u8 n) switch (divsel) { case PRCM_DSI_PLLOUT_SEL_PHI_4: div *= 2; + /* Fall through */ case PRCM_DSI_PLLOUT_SEL_PHI_2: div *= 2; + /* Fall through */ case PRCM_DSI_PLLOUT_SEL_PHI: return pll_rate(PRCM_PLLDSI_FREQ, clock_rate(PRCMU_HDMICLK), PLL_RAW) / div; diff --git a/drivers/mfd/omap-usb-host.c b/drivers/mfd/omap-usb-host.c index 792b855a9104..4798d9f3f9d5 100644 --- a/drivers/mfd/omap-usb-host.c +++ b/drivers/mfd/omap-usb-host.c @@ -308,7 +308,7 @@ static int usbhs_runtime_resume(struct device *dev) i, r); } } - /* Fall through as HSIC mode needs utmi_clk */ + /* Fall through - as HSIC mode needs utmi_clk */ case OMAP_EHCI_PORT_MODE_TLL: if (!IS_ERR(omap->utmi_clk[i])) { @@ -344,7 +344,7 @@ static int usbhs_runtime_suspend(struct device *dev) if (!IS_ERR(omap->hsic480m_clk[i])) clk_disable_unprepare(omap->hsic480m_clk[i]); - /* Fall through as utmi_clks were used in HSIC mode */ + /* Fall through - as utmi_clks were used in HSIC mode */ case OMAP_EHCI_PORT_MODE_TLL: if (!IS_ERR(omap->utmi_clk[i])) diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig index 6abfc8e92fcc..16900357afc2 100644 --- a/drivers/misc/Kconfig +++ b/drivers/misc/Kconfig @@ -465,6 +465,7 @@ config PCI_ENDPOINT_TEST config XILINX_SDFEC tristate "Xilinx SDFEC 16" + depends on HAS_IOMEM help This option enables support for the Xilinx SDFEC (Soft Decision Forward Error Correction) driver. This enables a char driver diff --git a/drivers/misc/habanalabs/command_submission.c b/drivers/misc/habanalabs/command_submission.c index 6ad83d5ef4b0..f00d1c32f6d6 100644 --- a/drivers/misc/habanalabs/command_submission.c +++ b/drivers/misc/habanalabs/command_submission.c @@ -683,7 +683,7 @@ int hl_cs_ioctl(struct hl_fpriv *hpriv, void *data) rc = hl_poll_timeout_memory(hdev, &ctx->thread_ctx_switch_wait_token, tmp, (tmp == 1), - 100, jiffies_to_usecs(hdev->timeout_jiffies)); + 100, jiffies_to_usecs(hdev->timeout_jiffies), false); if (rc == -ETIMEDOUT) { dev_err(hdev->dev, diff --git a/drivers/misc/habanalabs/device.c b/drivers/misc/habanalabs/device.c index 0c4894dd9c02..7a8f9d0b71b5 100644 --- a/drivers/misc/habanalabs/device.c +++ b/drivers/misc/habanalabs/device.c @@ -970,7 +970,8 @@ int hl_device_init(struct hl_device *hdev, struct class *hclass) rc = hl_ctx_init(hdev, hdev->kernel_ctx, true); if (rc) { dev_err(hdev->dev, "failed to initialize kernel context\n"); - goto free_ctx; + kfree(hdev->kernel_ctx); + goto mmu_fini; } rc = hl_cb_pool_init(hdev); @@ -1053,8 +1054,6 @@ release_ctx: if (hl_ctx_put(hdev->kernel_ctx) != 1) dev_err(hdev->dev, "kernel ctx is still alive on initialization failure\n"); -free_ctx: - kfree(hdev->kernel_ctx); mmu_fini: hl_mmu_fini(hdev); eq_fini: diff --git a/drivers/misc/habanalabs/firmware_if.c b/drivers/misc/habanalabs/firmware_if.c index cc8168bacb24..ea2ca67fbfbf 100644 --- a/drivers/misc/habanalabs/firmware_if.c +++ b/drivers/misc/habanalabs/firmware_if.c @@ -24,7 +24,7 @@ int hl_fw_push_fw_to_device(struct hl_device *hdev, const char *fw_name, { const struct firmware *fw; const u64 *fw_data; - size_t fw_size, i; + size_t fw_size; int rc; rc = request_firmware(&fw, fw_name, hdev->dev); @@ -45,22 +45,7 @@ int hl_fw_push_fw_to_device(struct hl_device *hdev, const char *fw_name, fw_data = (const u64 *) fw->data; - if ((fw->size % 8) != 0) - fw_size -= 8; - - for (i = 0 ; i < fw_size ; i += 8, fw_data++, dst += 8) { - if (!(i & (0x80000 - 1))) { - dev_dbg(hdev->dev, - "copied so far %zu out of %zu for %s firmware", - i, fw_size, fw_name); - usleep_range(20, 100); - } - - writeq(*fw_data, dst); - } - - if ((fw->size % 8) != 0) - writel(*(const u32 *) fw_data, dst); + memcpy_toio(dst, fw_data, fw_size); out: release_firmware(fw); @@ -112,7 +97,8 @@ int hl_fw_send_cpu_message(struct hl_device *hdev, u32 hw_queue_id, u32 *msg, } rc = hl_poll_timeout_memory(hdev, &pkt->fence, tmp, - (tmp == ARMCP_PACKET_FENCE_VAL), 1000, timeout); + (tmp == ARMCP_PACKET_FENCE_VAL), 1000, + timeout, true); hl_hw_queue_inc_ci_kernel(hdev, hw_queue_id); diff --git a/drivers/misc/habanalabs/goya/goya.c b/drivers/misc/habanalabs/goya/goya.c index 1a2c062a57d4..271c5c8f53b4 100644 --- a/drivers/misc/habanalabs/goya/goya.c +++ b/drivers/misc/habanalabs/goya/goya.c @@ -2729,9 +2729,10 @@ void goya_ring_doorbell(struct hl_device *hdev, u32 hw_queue_id, u32 pi) GOYA_ASYNC_EVENT_ID_PI_UPDATE); } -void goya_flush_pq_write(struct hl_device *hdev, u64 *pq, u64 exp_val) +void goya_pqe_write(struct hl_device *hdev, __le64 *pqe, struct hl_bd *bd) { - /* Not needed in Goya */ + /* The QMANs are on the SRAM so need to copy to IO space */ + memcpy_toio((void __iomem *) pqe, bd, sizeof(struct hl_bd)); } static void *goya_dma_alloc_coherent(struct hl_device *hdev, size_t size, @@ -2864,7 +2865,8 @@ static int goya_send_job_on_qman0(struct hl_device *hdev, struct hl_cs_job *job) } rc = hl_poll_timeout_memory(hdev, fence_ptr, tmp, - (tmp == GOYA_QMAN0_FENCE_VAL), 1000, timeout); + (tmp == GOYA_QMAN0_FENCE_VAL), 1000, + timeout, true); hl_hw_queue_inc_ci_kernel(hdev, GOYA_QUEUE_ID_DMA_0); @@ -2945,7 +2947,7 @@ int goya_test_queue(struct hl_device *hdev, u32 hw_queue_id) } rc = hl_poll_timeout_memory(hdev, fence_ptr, tmp, (tmp == fence_val), - 1000, GOYA_TEST_QUEUE_WAIT_USEC); + 1000, GOYA_TEST_QUEUE_WAIT_USEC, true); hl_hw_queue_inc_ci_kernel(hdev, hw_queue_id); @@ -3312,9 +3314,11 @@ static int goya_validate_dma_pkt_no_mmu(struct hl_device *hdev, int rc; dev_dbg(hdev->dev, "DMA packet details:\n"); - dev_dbg(hdev->dev, "source == 0x%llx\n", user_dma_pkt->src_addr); - dev_dbg(hdev->dev, "destination == 0x%llx\n", user_dma_pkt->dst_addr); - dev_dbg(hdev->dev, "size == %u\n", user_dma_pkt->tsize); + dev_dbg(hdev->dev, "source == 0x%llx\n", + le64_to_cpu(user_dma_pkt->src_addr)); + dev_dbg(hdev->dev, "destination == 0x%llx\n", + le64_to_cpu(user_dma_pkt->dst_addr)); + dev_dbg(hdev->dev, "size == %u\n", le32_to_cpu(user_dma_pkt->tsize)); ctl = le32_to_cpu(user_dma_pkt->ctl); user_dir = (ctl & GOYA_PKT_LIN_DMA_CTL_DMA_DIR_MASK) >> @@ -3343,9 +3347,11 @@ static int goya_validate_dma_pkt_mmu(struct hl_device *hdev, struct packet_lin_dma *user_dma_pkt) { dev_dbg(hdev->dev, "DMA packet details:\n"); - dev_dbg(hdev->dev, "source == 0x%llx\n", user_dma_pkt->src_addr); - dev_dbg(hdev->dev, "destination == 0x%llx\n", user_dma_pkt->dst_addr); - dev_dbg(hdev->dev, "size == %u\n", user_dma_pkt->tsize); + dev_dbg(hdev->dev, "source == 0x%llx\n", + le64_to_cpu(user_dma_pkt->src_addr)); + dev_dbg(hdev->dev, "destination == 0x%llx\n", + le64_to_cpu(user_dma_pkt->dst_addr)); + dev_dbg(hdev->dev, "size == %u\n", le32_to_cpu(user_dma_pkt->tsize)); /* * WA for HW-23. @@ -3385,7 +3391,8 @@ static int goya_validate_wreg32(struct hl_device *hdev, dev_dbg(hdev->dev, "WREG32 packet details:\n"); dev_dbg(hdev->dev, "reg_offset == 0x%x\n", reg_offset); - dev_dbg(hdev->dev, "value == 0x%x\n", wreg_pkt->value); + dev_dbg(hdev->dev, "value == 0x%x\n", + le32_to_cpu(wreg_pkt->value)); if (reg_offset != (mmDMA_CH_0_WR_COMP_ADDR_LO & 0x1FFF)) { dev_err(hdev->dev, "WREG32 packet with illegal address 0x%x\n", @@ -3427,12 +3434,13 @@ static int goya_validate_cb(struct hl_device *hdev, while (cb_parsed_length < parser->user_cb_size) { enum packet_id pkt_id; u16 pkt_size; - void *user_pkt; + struct goya_packet *user_pkt; - user_pkt = (void *) (uintptr_t) + user_pkt = (struct goya_packet *) (uintptr_t) (parser->user_cb->kernel_address + cb_parsed_length); - pkt_id = (enum packet_id) (((*(u64 *) user_pkt) & + pkt_id = (enum packet_id) ( + (le64_to_cpu(user_pkt->header) & PACKET_HEADER_PACKET_ID_MASK) >> PACKET_HEADER_PACKET_ID_SHIFT); @@ -3452,7 +3460,8 @@ static int goya_validate_cb(struct hl_device *hdev, * need to validate here as well because patch_cb() is * not called in MMU path while this function is called */ - rc = goya_validate_wreg32(hdev, parser, user_pkt); + rc = goya_validate_wreg32(hdev, + parser, (struct packet_wreg32 *) user_pkt); break; case PACKET_WREG_BULK: @@ -3480,10 +3489,10 @@ static int goya_validate_cb(struct hl_device *hdev, case PACKET_LIN_DMA: if (is_mmu) rc = goya_validate_dma_pkt_mmu(hdev, parser, - user_pkt); + (struct packet_lin_dma *) user_pkt); else rc = goya_validate_dma_pkt_no_mmu(hdev, parser, - user_pkt); + (struct packet_lin_dma *) user_pkt); break; case PACKET_MSG_LONG: @@ -3656,15 +3665,16 @@ static int goya_patch_cb(struct hl_device *hdev, enum packet_id pkt_id; u16 pkt_size; u32 new_pkt_size = 0; - void *user_pkt, *kernel_pkt; + struct goya_packet *user_pkt, *kernel_pkt; - user_pkt = (void *) (uintptr_t) + user_pkt = (struct goya_packet *) (uintptr_t) (parser->user_cb->kernel_address + cb_parsed_length); - kernel_pkt = (void *) (uintptr_t) + kernel_pkt = (struct goya_packet *) (uintptr_t) (parser->patched_cb->kernel_address + cb_patched_cur_length); - pkt_id = (enum packet_id) (((*(u64 *) user_pkt) & + pkt_id = (enum packet_id) ( + (le64_to_cpu(user_pkt->header) & PACKET_HEADER_PACKET_ID_MASK) >> PACKET_HEADER_PACKET_ID_SHIFT); @@ -3679,15 +3689,18 @@ static int goya_patch_cb(struct hl_device *hdev, switch (pkt_id) { case PACKET_LIN_DMA: - rc = goya_patch_dma_packet(hdev, parser, user_pkt, - kernel_pkt, &new_pkt_size); + rc = goya_patch_dma_packet(hdev, parser, + (struct packet_lin_dma *) user_pkt, + (struct packet_lin_dma *) kernel_pkt, + &new_pkt_size); cb_patched_cur_length += new_pkt_size; break; case PACKET_WREG_32: memcpy(kernel_pkt, user_pkt, pkt_size); cb_patched_cur_length += pkt_size; - rc = goya_validate_wreg32(hdev, parser, kernel_pkt); + rc = goya_validate_wreg32(hdev, parser, + (struct packet_wreg32 *) kernel_pkt); break; case PACKET_WREG_BULK: @@ -4351,6 +4364,8 @@ static int goya_unmask_irq_arr(struct hl_device *hdev, u32 *irq_arr, size_t total_pkt_size; long result; int rc; + int irq_num_entries, irq_arr_index; + __le32 *goya_irq_arr; total_pkt_size = sizeof(struct armcp_unmask_irq_arr_packet) + irq_arr_size; @@ -4368,8 +4383,16 @@ static int goya_unmask_irq_arr(struct hl_device *hdev, u32 *irq_arr, if (!pkt) return -ENOMEM; - pkt->length = cpu_to_le32(irq_arr_size / sizeof(irq_arr[0])); - memcpy(&pkt->irqs, irq_arr, irq_arr_size); + irq_num_entries = irq_arr_size / sizeof(irq_arr[0]); + pkt->length = cpu_to_le32(irq_num_entries); + + /* We must perform any necessary endianness conversation on the irq + * array being passed to the goya hardware + */ + for (irq_arr_index = 0, goya_irq_arr = (__le32 *) &pkt->irqs; + irq_arr_index < irq_num_entries ; irq_arr_index++) + goya_irq_arr[irq_arr_index] = + cpu_to_le32(irq_arr[irq_arr_index]); pkt->armcp_pkt.ctl = cpu_to_le32(ARMCP_PACKET_UNMASK_RAZWI_IRQ_ARRAY << ARMCP_PKT_CTL_OPCODE_SHIFT); @@ -5041,7 +5064,7 @@ static const struct hl_asic_funcs goya_funcs = { .resume = goya_resume, .cb_mmap = goya_cb_mmap, .ring_doorbell = goya_ring_doorbell, - .flush_pq_write = goya_flush_pq_write, + .pqe_write = goya_pqe_write, .asic_dma_alloc_coherent = goya_dma_alloc_coherent, .asic_dma_free_coherent = goya_dma_free_coherent, .get_int_queue_base = goya_get_int_queue_base, diff --git a/drivers/misc/habanalabs/goya/goyaP.h b/drivers/misc/habanalabs/goya/goyaP.h index f8c611883dc1..d7f48c9c41cd 100644 --- a/drivers/misc/habanalabs/goya/goyaP.h +++ b/drivers/misc/habanalabs/goya/goyaP.h @@ -177,7 +177,7 @@ int goya_late_init(struct hl_device *hdev); void goya_late_fini(struct hl_device *hdev); void goya_ring_doorbell(struct hl_device *hdev, u32 hw_queue_id, u32 pi); -void goya_flush_pq_write(struct hl_device *hdev, u64 *pq, u64 exp_val); +void goya_pqe_write(struct hl_device *hdev, __le64 *pqe, struct hl_bd *bd); void goya_update_eq_ci(struct hl_device *hdev, u32 val); void goya_restore_phase_topology(struct hl_device *hdev); int goya_context_switch(struct hl_device *hdev, u32 asid); diff --git a/drivers/misc/habanalabs/habanalabs.h b/drivers/misc/habanalabs/habanalabs.h index 10da9940ee0d..ce83adafcf2d 100644 --- a/drivers/misc/habanalabs/habanalabs.h +++ b/drivers/misc/habanalabs/habanalabs.h @@ -441,7 +441,11 @@ enum hl_pll_frequency { * @resume: handles IP specific H/W or SW changes for resume. * @cb_mmap: maps a CB. * @ring_doorbell: increment PI on a given QMAN. - * @flush_pq_write: flush PQ entry write if necessary, WARN if flushing failed. + * @pqe_write: Write the PQ entry to the PQ. This is ASIC-specific + * function because the PQs are located in different memory areas + * per ASIC (SRAM, DRAM, Host memory) and therefore, the method of + * writing the PQE must match the destination memory area + * properties. * @asic_dma_alloc_coherent: Allocate coherent DMA memory by calling * dma_alloc_coherent(). This is ASIC function because * its implementation is not trivial when the driver @@ -510,7 +514,8 @@ struct hl_asic_funcs { int (*cb_mmap)(struct hl_device *hdev, struct vm_area_struct *vma, u64 kaddress, phys_addr_t paddress, u32 size); void (*ring_doorbell)(struct hl_device *hdev, u32 hw_queue_id, u32 pi); - void (*flush_pq_write)(struct hl_device *hdev, u64 *pq, u64 exp_val); + void (*pqe_write)(struct hl_device *hdev, __le64 *pqe, + struct hl_bd *bd); void* (*asic_dma_alloc_coherent)(struct hl_device *hdev, size_t size, dma_addr_t *dma_handle, gfp_t flag); void (*asic_dma_free_coherent)(struct hl_device *hdev, size_t size, @@ -1062,9 +1067,17 @@ void hl_wreg(struct hl_device *hdev, u32 reg, u32 val); /* * address in this macro points always to a memory location in the * host's (server's) memory. That location is updated asynchronously - * either by the direct access of the device or by another core + * either by the direct access of the device or by another core. + * + * To work both in LE and BE architectures, we need to distinguish between the + * two states (device or another core updates the memory location). Therefore, + * if mem_written_by_device is true, the host memory being polled will be + * updated directly by the device. If false, the host memory being polled will + * be updated by host CPU. Required so host knows whether or not the memory + * might need to be byte-swapped before returning value to caller. */ -#define hl_poll_timeout_memory(hdev, addr, val, cond, sleep_us, timeout_us) \ +#define hl_poll_timeout_memory(hdev, addr, val, cond, sleep_us, timeout_us, \ + mem_written_by_device) \ ({ \ ktime_t __timeout; \ /* timeout should be longer when working with simulator */ \ @@ -1077,10 +1090,14 @@ void hl_wreg(struct hl_device *hdev, u32 reg, u32 val); /* Verify we read updates done by other cores or by device */ \ mb(); \ (val) = *((u32 *) (uintptr_t) (addr)); \ + if (mem_written_by_device) \ + (val) = le32_to_cpu(val); \ if (cond) \ break; \ if (timeout_us && ktime_compare(ktime_get(), __timeout) > 0) { \ (val) = *((u32 *) (uintptr_t) (addr)); \ + if (mem_written_by_device) \ + (val) = le32_to_cpu(val); \ break; \ } \ if (sleep_us) \ diff --git a/drivers/misc/habanalabs/hw_queue.c b/drivers/misc/habanalabs/hw_queue.c index e3b5517897ea..5f5673b74985 100644 --- a/drivers/misc/habanalabs/hw_queue.c +++ b/drivers/misc/habanalabs/hw_queue.c @@ -290,23 +290,19 @@ static void int_hw_queue_schedule_job(struct hl_cs_job *job) struct hl_device *hdev = job->cs->ctx->hdev; struct hl_hw_queue *q = &hdev->kernel_queues[job->hw_queue_id]; struct hl_bd bd; - u64 *pi, *pbd = (u64 *) &bd; + __le64 *pi; bd.ctl = 0; - bd.len = __cpu_to_le32(job->job_cb_size); - bd.ptr = __cpu_to_le64((u64) (uintptr_t) job->user_cb); + bd.len = cpu_to_le32(job->job_cb_size); + bd.ptr = cpu_to_le64((u64) (uintptr_t) job->user_cb); - pi = (u64 *) (uintptr_t) (q->kernel_address + + pi = (__le64 *) (uintptr_t) (q->kernel_address + ((q->pi & (q->int_queue_len - 1)) * sizeof(bd))); - pi[0] = pbd[0]; - pi[1] = pbd[1]; - q->pi++; q->pi &= ((q->int_queue_len << 1) - 1); - /* Flush PQ entry write. Relevant only for specific ASICs */ - hdev->asic_funcs->flush_pq_write(hdev, pi, pbd[0]); + hdev->asic_funcs->pqe_write(hdev, pi, &bd); hdev->asic_funcs->ring_doorbell(hdev, q->hw_queue_id, q->pi); } diff --git a/drivers/misc/habanalabs/include/goya/goya_packets.h b/drivers/misc/habanalabs/include/goya/goya_packets.h index a14407b975e4..ef54bad20509 100644 --- a/drivers/misc/habanalabs/include/goya/goya_packets.h +++ b/drivers/misc/habanalabs/include/goya/goya_packets.h @@ -52,6 +52,19 @@ enum goya_dma_direction { #define GOYA_PKT_CTL_MB_SHIFT 31 #define GOYA_PKT_CTL_MB_MASK 0x80000000 +/* All packets have, at least, an 8-byte header, which contains + * the packet type. The kernel driver uses the packet header for packet + * validation and to perform any necessary required preparation before + * sending them off to the hardware. + */ +struct goya_packet { + __le64 header; + /* The rest of the packet data follows. Use the corresponding + * packet_XXX struct to deference the data, based on packet type + */ + u8 contents[0]; +}; + struct packet_nop { __le32 reserved; __le32 ctl; diff --git a/drivers/misc/habanalabs/irq.c b/drivers/misc/habanalabs/irq.c index ea9f72ff456c..199791b57caf 100644 --- a/drivers/misc/habanalabs/irq.c +++ b/drivers/misc/habanalabs/irq.c @@ -80,8 +80,7 @@ irqreturn_t hl_irq_handler_cq(int irq, void *arg) struct hl_cs_job *job; bool shadow_index_valid; u16 shadow_index; - u32 *cq_entry; - u32 *cq_base; + struct hl_cq_entry *cq_entry, *cq_base; if (hdev->disabled) { dev_dbg(hdev->dev, @@ -90,29 +89,29 @@ irqreturn_t hl_irq_handler_cq(int irq, void *arg) return IRQ_HANDLED; } - cq_base = (u32 *) (uintptr_t) cq->kernel_address; + cq_base = (struct hl_cq_entry *) (uintptr_t) cq->kernel_address; while (1) { - bool entry_ready = ((cq_base[cq->ci] & CQ_ENTRY_READY_MASK) + bool entry_ready = ((le32_to_cpu(cq_base[cq->ci].data) & + CQ_ENTRY_READY_MASK) >> CQ_ENTRY_READY_SHIFT); if (!entry_ready) break; - cq_entry = (u32 *) &cq_base[cq->ci]; + cq_entry = (struct hl_cq_entry *) &cq_base[cq->ci]; - /* - * Make sure we read CQ entry contents after we've + /* Make sure we read CQ entry contents after we've * checked the ownership bit. */ dma_rmb(); - shadow_index_valid = - ((*cq_entry & CQ_ENTRY_SHADOW_INDEX_VALID_MASK) + shadow_index_valid = ((le32_to_cpu(cq_entry->data) & + CQ_ENTRY_SHADOW_INDEX_VALID_MASK) >> CQ_ENTRY_SHADOW_INDEX_VALID_SHIFT); - shadow_index = (u16) - ((*cq_entry & CQ_ENTRY_SHADOW_INDEX_MASK) + shadow_index = (u16) ((le32_to_cpu(cq_entry->data) & + CQ_ENTRY_SHADOW_INDEX_MASK) >> CQ_ENTRY_SHADOW_INDEX_SHIFT); queue = &hdev->kernel_queues[cq->hw_queue_id]; @@ -122,8 +121,7 @@ irqreturn_t hl_irq_handler_cq(int irq, void *arg) queue_work(hdev->cq_wq, &job->finish_work); } - /* - * Update ci of the context's queue. There is no + /* Update ci of the context's queue. There is no * need to protect it with spinlock because this update is * done only inside IRQ and there is a different IRQ per * queue @@ -131,7 +129,8 @@ irqreturn_t hl_irq_handler_cq(int irq, void *arg) queue->ci = hl_queue_inc_ptr(queue->ci); /* Clear CQ entry ready bit */ - cq_base[cq->ci] &= ~CQ_ENTRY_READY_MASK; + cq_entry->data = cpu_to_le32(le32_to_cpu(cq_entry->data) & + ~CQ_ENTRY_READY_MASK); cq->ci = hl_cq_inc_ptr(cq->ci); diff --git a/drivers/misc/habanalabs/memory.c b/drivers/misc/habanalabs/memory.c index 42d237cae1dc..365fb0cb8dff 100644 --- a/drivers/misc/habanalabs/memory.c +++ b/drivers/misc/habanalabs/memory.c @@ -1629,6 +1629,8 @@ void hl_vm_ctx_fini(struct hl_ctx *ctx) dev_dbg(hdev->dev, "page list 0x%p of asid %d is still alive\n", phys_pg_list, ctx->asid); + atomic64_sub(phys_pg_list->total_size, + &hdev->dram_used_mem); free_phys_pg_pack(hdev, phys_pg_list); idr_remove(&vm->phys_pg_pack_handles, i); } diff --git a/drivers/mmc/host/cavium.c b/drivers/mmc/host/cavium.c index ed5cefb83768..89deb451e0ac 100644 --- a/drivers/mmc/host/cavium.c +++ b/drivers/mmc/host/cavium.c @@ -374,6 +374,7 @@ static int finish_dma_single(struct cvm_mmc_host *host, struct mmc_data *data) { data->bytes_xfered = data->blocks * data->blksz; data->error = 0; + dma_unmap_sg(host->dev, data->sg, data->sg_len, get_dma_dir(data)); return 1; } @@ -1046,7 +1047,8 @@ int cvm_mmc_of_slot_probe(struct device *dev, struct cvm_mmc_host *host) mmc->max_segs = 1; /* DMA size field can address up to 8 MB */ - mmc->max_seg_size = 8 * 1024 * 1024; + mmc->max_seg_size = min_t(unsigned int, 8 * 1024 * 1024, + dma_get_max_seg_size(host->dev)); mmc->max_req_size = mmc->max_seg_size; /* External DMA is in 512 byte blocks */ mmc->max_blk_size = 512; diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c index b3a130a9ee23..1604f512c7bd 100644 --- a/drivers/mmc/host/sdhci-acpi.c +++ b/drivers/mmc/host/sdhci-acpi.c @@ -883,7 +883,7 @@ static int sdhci_acpi_runtime_resume(struct device *dev) sdhci_acpi_byt_setting(&c->pdev->dev); - return sdhci_runtime_resume_host(c->host); + return sdhci_runtime_resume_host(c->host, 0); } #endif diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c index c391510e9ef4..776a94216248 100644 --- a/drivers/mmc/host/sdhci-esdhc-imx.c +++ b/drivers/mmc/host/sdhci-esdhc-imx.c @@ -1705,7 +1705,7 @@ static int sdhci_esdhc_runtime_resume(struct device *dev) esdhc_pltfm_set_clock(host, imx_data->actual_clock); } - err = sdhci_runtime_resume_host(host); + err = sdhci_runtime_resume_host(host, 0); if (err) goto disable_ipg_clk; diff --git a/drivers/mmc/host/sdhci-of-at91.c b/drivers/mmc/host/sdhci-of-at91.c index e377b9bc55a4..d4e7e8b7be77 100644 --- a/drivers/mmc/host/sdhci-of-at91.c +++ b/drivers/mmc/host/sdhci-of-at91.c @@ -289,7 +289,7 @@ static int sdhci_at91_runtime_resume(struct device *dev) } out: - return sdhci_runtime_resume_host(host); + return sdhci_runtime_resume_host(host, 0); } #endif /* CONFIG_PM */ diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c index 4041878eb0f3..7d06e2860c36 100644 --- a/drivers/mmc/host/sdhci-pci-core.c +++ b/drivers/mmc/host/sdhci-pci-core.c @@ -167,7 +167,7 @@ static int sdhci_pci_runtime_suspend_host(struct sdhci_pci_chip *chip) err_pci_runtime_suspend: while (--i >= 0) - sdhci_runtime_resume_host(chip->slots[i]->host); + sdhci_runtime_resume_host(chip->slots[i]->host, 0); return ret; } @@ -181,7 +181,7 @@ static int sdhci_pci_runtime_resume_host(struct sdhci_pci_chip *chip) if (!slot) continue; - ret = sdhci_runtime_resume_host(slot->host); + ret = sdhci_runtime_resume_host(slot->host, 0); if (ret) return ret; } diff --git a/drivers/mmc/host/sdhci-pxav3.c b/drivers/mmc/host/sdhci-pxav3.c index 3ddecf479295..e55037ceda73 100644 --- a/drivers/mmc/host/sdhci-pxav3.c +++ b/drivers/mmc/host/sdhci-pxav3.c @@ -554,7 +554,7 @@ static int sdhci_pxav3_runtime_resume(struct device *dev) if (!IS_ERR(pxa->clk_core)) clk_prepare_enable(pxa->clk_core); - return sdhci_runtime_resume_host(host); + return sdhci_runtime_resume_host(host, 0); } #endif diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c index 8e4a8ba33f05..f5753aef7151 100644 --- a/drivers/mmc/host/sdhci-s3c.c +++ b/drivers/mmc/host/sdhci-s3c.c @@ -745,7 +745,7 @@ static int sdhci_s3c_runtime_resume(struct device *dev) clk_prepare_enable(busclk); if (ourhost->cur_clk >= 0) clk_prepare_enable(ourhost->clk_bus[ourhost->cur_clk]); - ret = sdhci_runtime_resume_host(host); + ret = sdhci_runtime_resume_host(host, 0); return ret; } #endif diff --git a/drivers/mmc/host/sdhci-sprd.c b/drivers/mmc/host/sdhci-sprd.c index 603a5d9f045a..83a4767ca680 100644 --- a/drivers/mmc/host/sdhci-sprd.c +++ b/drivers/mmc/host/sdhci-sprd.c @@ -696,7 +696,7 @@ static int sdhci_sprd_runtime_resume(struct device *dev) if (ret) goto clk_disable; - sdhci_runtime_resume_host(host); + sdhci_runtime_resume_host(host, 1); return 0; clk_disable: diff --git a/drivers/mmc/host/sdhci-xenon.c b/drivers/mmc/host/sdhci-xenon.c index 8a18f14cf842..1dea1ba66f7b 100644 --- a/drivers/mmc/host/sdhci-xenon.c +++ b/drivers/mmc/host/sdhci-xenon.c @@ -638,7 +638,7 @@ static int xenon_runtime_resume(struct device *dev) priv->restore_needed = false; } - ret = sdhci_runtime_resume_host(host); + ret = sdhci_runtime_resume_host(host, 0); if (ret) goto out; return 0; diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c index 59acf8e3331e..a5dc5aae973e 100644 --- a/drivers/mmc/host/sdhci.c +++ b/drivers/mmc/host/sdhci.c @@ -3320,7 +3320,7 @@ int sdhci_runtime_suspend_host(struct sdhci_host *host) } EXPORT_SYMBOL_GPL(sdhci_runtime_suspend_host); -int sdhci_runtime_resume_host(struct sdhci_host *host) +int sdhci_runtime_resume_host(struct sdhci_host *host, int soft_reset) { struct mmc_host *mmc = host->mmc; unsigned long flags; @@ -3331,7 +3331,7 @@ int sdhci_runtime_resume_host(struct sdhci_host *host) host->ops->enable_dma(host); } - sdhci_init(host, 0); + sdhci_init(host, soft_reset); if (mmc->ios.power_mode != MMC_POWER_UNDEFINED && mmc->ios.power_mode != MMC_POWER_OFF) { diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h index 89fd96596a1f..902f855efe8f 100644 --- a/drivers/mmc/host/sdhci.h +++ b/drivers/mmc/host/sdhci.h @@ -781,7 +781,7 @@ void sdhci_adma_write_desc(struct sdhci_host *host, void **desc, int sdhci_suspend_host(struct sdhci_host *host); int sdhci_resume_host(struct sdhci_host *host); int sdhci_runtime_suspend_host(struct sdhci_host *host); -int sdhci_runtime_resume_host(struct sdhci_host *host); +int sdhci_runtime_resume_host(struct sdhci_host *host, int soft_reset); #endif void sdhci_cqe_enable(struct mmc_host *mmc); diff --git a/drivers/mtd/spi-nor/spi-nor.c b/drivers/mtd/spi-nor/spi-nor.c index 03cc788511d5..654bdc41fc99 100644 --- a/drivers/mtd/spi-nor/spi-nor.c +++ b/drivers/mtd/spi-nor/spi-nor.c @@ -3780,8 +3780,6 @@ static int spi_nor_init_params(struct spi_nor *nor, default: /* Kept only for backward compatibility purpose. */ params->quad_enable = spansion_quad_enable; - if (nor->clear_sr_bp) - nor->clear_sr_bp = spi_nor_spansion_clear_sr_bp; break; } @@ -4035,6 +4033,9 @@ static int spi_nor_init(struct spi_nor *nor) int err; if (nor->clear_sr_bp) { + if (nor->quad_enable == spansion_quad_enable) + nor->clear_sr_bp = spi_nor_spansion_clear_sr_bp; + err = nor->clear_sr_bp(nor); if (err) { dev_err(nor->dev, diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 02fd7822c14a..931d9d935686 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -1126,6 +1126,8 @@ static void bond_compute_features(struct bonding *bond) done: bond_dev->vlan_features = vlan_features; bond_dev->hw_enc_features = enc_features | NETIF_F_GSO_ENCAP_ALL | + NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_STAG_TX | NETIF_F_GSO_UDP_L4; bond_dev->mpls_features = mpls_features; bond_dev->gso_max_segs = gso_max_segs; diff --git a/drivers/net/dsa/sja1105/sja1105_main.c b/drivers/net/dsa/sja1105/sja1105_main.c index d073baffc20b..df976b259e43 100644 --- a/drivers/net/dsa/sja1105/sja1105_main.c +++ b/drivers/net/dsa/sja1105/sja1105_main.c @@ -1223,12 +1223,8 @@ static int sja1105_fdb_dump(struct dsa_switch *ds, int port, { struct sja1105_private *priv = ds->priv; struct device *dev = ds->dev; - u16 rx_vid, tx_vid; int i; - rx_vid = dsa_8021q_rx_vid(ds, port); - tx_vid = dsa_8021q_tx_vid(ds, port); - for (i = 0; i < SJA1105_MAX_L2_LOOKUP_COUNT; i++) { struct sja1105_l2_lookup_entry l2_lookup = {0}; u8 macaddr[ETH_ALEN]; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index e47ea92e2ae3..d10b421ed1f1 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -3057,12 +3057,13 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link) /* if VF indicate to PF this function is going down (PF will delete sp * elements and clear initializations */ - if (IS_VF(bp)) + if (IS_VF(bp)) { + bnx2x_clear_vlan_info(bp); bnx2x_vfpf_close_vf(bp); - else if (unload_mode != UNLOAD_RECOVERY) + } else if (unload_mode != UNLOAD_RECOVERY) { /* if this is a normal/close unload need to clean up chip*/ bnx2x_chip_cleanup(bp, unload_mode, keep_link); - else { + } else { /* Send the UNLOAD_REQUEST to the MCP */ bnx2x_send_unload_req(bp, unload_mode); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h index c2f6e44e9a3f..8b08cb18e363 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h @@ -425,6 +425,8 @@ void bnx2x_set_reset_global(struct bnx2x *bp); void bnx2x_disable_close_the_gate(struct bnx2x *bp); int bnx2x_init_hw_func_cnic(struct bnx2x *bp); +void bnx2x_clear_vlan_info(struct bnx2x *bp); + /** * bnx2x_sp_event - handle ramrods completion. * diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index 2cc14db8f0ec..192ff8d5da32 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -8482,11 +8482,21 @@ int bnx2x_set_vlan_one(struct bnx2x *bp, u16 vlan, return rc; } +void bnx2x_clear_vlan_info(struct bnx2x *bp) +{ + struct bnx2x_vlan_entry *vlan; + + /* Mark that hw forgot all entries */ + list_for_each_entry(vlan, &bp->vlan_reg, link) + vlan->hw = false; + + bp->vlan_cnt = 0; +} + static int bnx2x_del_all_vlans(struct bnx2x *bp) { struct bnx2x_vlan_mac_obj *vlan_obj = &bp->sp_objs[0].vlan_obj; unsigned long ramrod_flags = 0, vlan_flags = 0; - struct bnx2x_vlan_entry *vlan; int rc; __set_bit(RAMROD_COMP_WAIT, &ramrod_flags); @@ -8495,10 +8505,7 @@ static int bnx2x_del_all_vlans(struct bnx2x *bp) if (rc) return rc; - /* Mark that hw forgot all entries */ - list_for_each_entry(vlan, &bp->vlan_reg, link) - vlan->hw = false; - bp->vlan_cnt = 0; + bnx2x_clear_vlan_info(bp); return 0; } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 94be97b7952c..4c790ffa1a73 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -2152,9 +2152,9 @@ static void __bnxt_poll_work_done(struct bnxt *bp, struct bnxt_napi *bnapi) if (bnapi->events & BNXT_RX_EVENT) { struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; - bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod); if (bnapi->events & BNXT_AGG_EVENT) bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod); + bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod); } bnapi->events = 0; } @@ -5255,6 +5255,7 @@ static void bnxt_set_db(struct bnxt *bp, struct bnxt_db_info *db, u32 ring_type, static int bnxt_hwrm_ring_alloc(struct bnxt *bp) { + bool agg_rings = !!(bp->flags & BNXT_FLAG_AGG_RINGS); int i, rc = 0; u32 type; @@ -5330,7 +5331,9 @@ static int bnxt_hwrm_ring_alloc(struct bnxt *bp) if (rc) goto err_out; bnxt_set_db(bp, &rxr->rx_db, type, map_idx, ring->fw_ring_id); - bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod); + /* If we have agg rings, post agg buffers first. */ + if (!agg_rings) + bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod); bp->grp_info[map_idx].rx_fw_ring_id = ring->fw_ring_id; if (bp->flags & BNXT_FLAG_CHIP_P5) { struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; @@ -5349,7 +5352,7 @@ static int bnxt_hwrm_ring_alloc(struct bnxt *bp) } } - if (bp->flags & BNXT_FLAG_AGG_RINGS) { + if (agg_rings) { type = HWRM_RING_ALLOC_AGG; for (i = 0; i < bp->rx_nr_rings; i++) { struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; @@ -5365,6 +5368,7 @@ static int bnxt_hwrm_ring_alloc(struct bnxt *bp) bnxt_set_db(bp, &rxr->rx_agg_db, type, map_idx, ring->fw_ring_id); bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod); + bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod); bp->grp_info[grp_idx].agg_fw_ring_id = ring->fw_ring_id; } } @@ -7208,19 +7212,29 @@ static void bnxt_hwrm_clear_vnic_rss(struct bnxt *bp) bnxt_hwrm_vnic_set_rss(bp, i, false); } -static void bnxt_hwrm_resource_free(struct bnxt *bp, bool close_path, - bool irq_re_init) +static void bnxt_clear_vnic(struct bnxt *bp) { - if (bp->vnic_info) { - bnxt_hwrm_clear_vnic_filter(bp); + if (!bp->vnic_info) + return; + + bnxt_hwrm_clear_vnic_filter(bp); + if (!(bp->flags & BNXT_FLAG_CHIP_P5)) { /* clear all RSS setting before free vnic ctx */ bnxt_hwrm_clear_vnic_rss(bp); bnxt_hwrm_vnic_ctx_free(bp); - /* before free the vnic, undo the vnic tpa settings */ - if (bp->flags & BNXT_FLAG_TPA) - bnxt_set_tpa(bp, false); - bnxt_hwrm_vnic_free(bp); } + /* before free the vnic, undo the vnic tpa settings */ + if (bp->flags & BNXT_FLAG_TPA) + bnxt_set_tpa(bp, false); + bnxt_hwrm_vnic_free(bp); + if (bp->flags & BNXT_FLAG_CHIP_P5) + bnxt_hwrm_vnic_ctx_free(bp); +} + +static void bnxt_hwrm_resource_free(struct bnxt *bp, bool close_path, + bool irq_re_init) +{ + bnxt_clear_vnic(bp); bnxt_hwrm_ring_free(bp, close_path); bnxt_hwrm_ring_grp_free(bp); if (irq_re_init) { diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c index 549c90d3e465..c05d663212b2 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c @@ -98,10 +98,13 @@ static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg, if (idx) req->dimensions = cpu_to_le16(1); - if (req->req_type == cpu_to_le16(HWRM_NVM_SET_VARIABLE)) + if (req->req_type == cpu_to_le16(HWRM_NVM_SET_VARIABLE)) { memcpy(data_addr, buf, bytesize); - - rc = hwrm_send_message(bp, msg, msg_len, HWRM_CMD_TIMEOUT); + rc = hwrm_send_message(bp, msg, msg_len, HWRM_CMD_TIMEOUT); + } else { + rc = hwrm_send_message_silent(bp, msg, msg_len, + HWRM_CMD_TIMEOUT); + } if (!rc && req->req_type == cpu_to_le16(HWRM_NVM_GET_VARIABLE)) memcpy(buf, data_addr, bytesize); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index 3a3d8a9be5ed..b624174c8594 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -2072,21 +2072,19 @@ static int bnxt_flash_package_from_file(struct net_device *dev, mutex_lock(&bp->hwrm_cmd_lock); hwrm_err = _hwrm_send_message(bp, &install, sizeof(install), INSTALL_PACKAGE_TIMEOUT); - if (hwrm_err) - goto flash_pkg_exit; - - if (resp->error_code) { + if (hwrm_err) { u8 error_code = ((struct hwrm_err_output *)resp)->cmd_err; - if (error_code == NVM_INSTALL_UPDATE_CMD_ERR_CODE_FRAG_ERR) { + if (resp->error_code && error_code == + NVM_INSTALL_UPDATE_CMD_ERR_CODE_FRAG_ERR) { install.flags |= cpu_to_le16( NVM_INSTALL_UPDATE_REQ_FLAGS_ALLOWED_TO_DEFRAG); hwrm_err = _hwrm_send_message(bp, &install, sizeof(install), INSTALL_PACKAGE_TIMEOUT); - if (hwrm_err) - goto flash_pkg_exit; } + if (hwrm_err) + goto flash_pkg_exit; } if (resp->result) { diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c index 6fe4a7174271..dd621f6bd127 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c @@ -1236,7 +1236,7 @@ static int __bnxt_tc_del_flow(struct bnxt *bp, static void bnxt_tc_set_flow_dir(struct bnxt *bp, struct bnxt_tc_flow *flow, u16 src_fid) { - flow->dir = (bp->pf.fw_fid == src_fid) ? BNXT_DIR_RX : BNXT_DIR_TX; + flow->l2_key.dir = (bp->pf.fw_fid == src_fid) ? BNXT_DIR_RX : BNXT_DIR_TX; } static void bnxt_tc_set_src_fid(struct bnxt *bp, struct bnxt_tc_flow *flow, @@ -1285,9 +1285,7 @@ static int bnxt_tc_add_flow(struct bnxt *bp, u16 src_fid, goto free_node; bnxt_tc_set_src_fid(bp, flow, src_fid); - - if (bp->fw_cap & BNXT_FW_CAP_OVS_64BIT_HANDLE) - bnxt_tc_set_flow_dir(bp, flow, src_fid); + bnxt_tc_set_flow_dir(bp, flow, flow->src_fid); if (!bnxt_tc_can_offload(bp, flow)) { rc = -EOPNOTSUPP; @@ -1407,7 +1405,7 @@ static void bnxt_fill_cfa_stats_req(struct bnxt *bp, * 2. 15th bit of flow_handle must specify the flow * direction (TX/RX). */ - if (flow_node->flow.dir == BNXT_DIR_RX) + if (flow_node->flow.l2_key.dir == BNXT_DIR_RX) handle = CFA_FLOW_INFO_REQ_FLOW_HANDLE_DIR_RX | CFA_FLOW_INFO_REQ_FLOW_HANDLE_MAX_MASK; else diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.h index ffec57d1a5ec..4f05305052f2 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.h @@ -23,6 +23,9 @@ struct bnxt_tc_l2_key { __be16 inner_vlan_tci; __be16 ether_type; u8 num_vlans; + u8 dir; +#define BNXT_DIR_RX 1 +#define BNXT_DIR_TX 0 }; struct bnxt_tc_l3_key { @@ -98,9 +101,6 @@ struct bnxt_tc_flow { /* flow applicable to pkts ingressing on this fid */ u16 src_fid; - u8 dir; -#define BNXT_DIR_RX 1 -#define BNXT_DIR_TX 0 struct bnxt_tc_l2_key l2_key; struct bnxt_tc_l2_key l2_mask; struct bnxt_tc_l3_key l3_key; diff --git a/drivers/net/ethernet/cavium/common/cavium_ptp.c b/drivers/net/ethernet/cavium/common/cavium_ptp.c index 73632b843749..b821c9e1604c 100644 --- a/drivers/net/ethernet/cavium/common/cavium_ptp.c +++ b/drivers/net/ethernet/cavium/common/cavium_ptp.c @@ -10,7 +10,7 @@ #include "cavium_ptp.h" -#define DRV_NAME "Cavium PTP Driver" +#define DRV_NAME "cavium_ptp" #define PCI_DEVICE_ID_CAVIUM_PTP 0xA00C #define PCI_DEVICE_ID_CAVIUM_RST 0xA00E diff --git a/drivers/net/ethernet/cavium/liquidio/request_manager.c b/drivers/net/ethernet/cavium/liquidio/request_manager.c index 032224178b64..6dd65f9b347c 100644 --- a/drivers/net/ethernet/cavium/liquidio/request_manager.c +++ b/drivers/net/ethernet/cavium/liquidio/request_manager.c @@ -237,8 +237,10 @@ int octeon_setup_iq(struct octeon_device *oct, } oct->num_iqs++; - if (oct->fn_list.enable_io_queues(oct)) + if (oct->fn_list.enable_io_queues(oct)) { + octeon_delete_instr_queue(oct, iq_no); return 1; + } return 0; } diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c index dd99c55d9a88..ae6a47dd7dc9 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c @@ -3236,8 +3236,10 @@ static ssize_t blocked_fl_write(struct file *filp, const char __user *ubuf, return -ENOMEM; err = bitmap_parse_user(ubuf, count, t, adap->sge.egr_sz); - if (err) + if (err) { + kvfree(t); return err; + } bitmap_copy(adap->sge.blocked_fl, t, adap->sge.egr_sz); kvfree(t); diff --git a/drivers/net/ethernet/ezchip/nps_enet.h b/drivers/net/ethernet/ezchip/nps_enet.h index 133acca0bf31..092da2d90026 100644 --- a/drivers/net/ethernet/ezchip/nps_enet.h +++ b/drivers/net/ethernet/ezchip/nps_enet.h @@ -167,7 +167,7 @@ struct nps_enet_priv { }; /** - * nps_reg_set - Sets ENET register with provided value. + * nps_enet_reg_set - Sets ENET register with provided value. * @priv: Pointer to EZchip ENET private data structure. * @reg: Register offset from base address. * @value: Value to set in register. @@ -179,7 +179,7 @@ static inline void nps_enet_reg_set(struct nps_enet_priv *priv, } /** - * nps_reg_get - Gets value of specified ENET register. + * nps_enet_reg_get - Gets value of specified ENET register. * @priv: Pointer to EZchip ENET private data structure. * @reg: Register offset from base address. * diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c index d654c234aaf7..c5be4ebd8437 100644 --- a/drivers/net/ethernet/ibm/ibmveth.c +++ b/drivers/net/ethernet/ibm/ibmveth.c @@ -1605,7 +1605,7 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id) struct net_device *netdev; struct ibmveth_adapter *adapter; unsigned char *mac_addr_p; - unsigned int *mcastFilterSize_p; + __be32 *mcastFilterSize_p; long ret; unsigned long ret_attr; @@ -1627,8 +1627,9 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id) return -EINVAL; } - mcastFilterSize_p = (unsigned int *)vio_get_attribute(dev, - VETH_MCAST_FILTER_SIZE, NULL); + mcastFilterSize_p = (__be32 *)vio_get_attribute(dev, + VETH_MCAST_FILTER_SIZE, + NULL); if (!mcastFilterSize_p) { dev_err(&dev->dev, "Can't find VETH_MCAST_FILTER_SIZE " "attribute\n"); @@ -1645,7 +1646,7 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id) adapter->vdev = dev; adapter->netdev = netdev; - adapter->mcastFilterSize = *mcastFilterSize_p; + adapter->mcastFilterSize = be32_to_cpu(*mcastFilterSize_p); adapter->pool_config = 0; netif_napi_add(netdev, &adapter->napi, ibmveth_poll, 16); diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index 81a05ea38237..07efa2b40003 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -1568,6 +1568,8 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) lpar_rc = send_subcrq_indirect(adapter, handle_array[queue_num], (u64)tx_buff->indir_dma, (u64)num_entries); + dma_unmap_single(dev, tx_buff->indir_dma, + sizeof(tx_buff->indir_arr), DMA_TO_DEVICE); } else { tx_buff->num_entries = num_entries; lpar_rc = send_subcrq(adapter, handle_array[queue_num], @@ -2788,7 +2790,6 @@ static int ibmvnic_complete_tx(struct ibmvnic_adapter *adapter, union sub_crq *next; int index; int i, j; - u8 *first; restart_loop: while (pending_scrq(adapter, scrq)) { @@ -2818,14 +2819,6 @@ restart_loop: txbuff->data_dma[j] = 0; } - /* if sub_crq was sent indirectly */ - first = &txbuff->indir_arr[0].generic.first; - if (*first == IBMVNIC_CRQ_CMD) { - dma_unmap_single(dev, txbuff->indir_dma, - sizeof(txbuff->indir_arr), - DMA_TO_DEVICE); - *first = 0; - } if (txbuff->last_frag) { dev_kfree_skb_any(txbuff->skb); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index dc7b128c780e..17b7ae9f46ec 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -7897,11 +7897,8 @@ static void ixgbe_service_task(struct work_struct *work) return; } if (ixgbe_check_fw_error(adapter)) { - if (!test_bit(__IXGBE_DOWN, &adapter->state)) { - rtnl_lock(); + if (!test_bit(__IXGBE_DOWN, &adapter->state)) unregister_netdev(adapter->netdev); - rtnl_unlock(); - } ixgbe_service_event_complete(adapter); return; } diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c index 6c01314e87b0..db3552f2d087 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c @@ -1187,7 +1187,7 @@ int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv) err = mlx4_qp_alloc(mdev->dev, priv->base_qpn, rss_map->indir_qp); if (err) { en_err(priv, "Failed to allocate RSS indirection QP\n"); - goto rss_err; + goto qp_alloc_err; } rss_map->indir_qp->event = mlx4_en_sqp_event; @@ -1241,6 +1241,7 @@ indir_err: MLX4_QP_STATE_RST, NULL, 0, 0, rss_map->indir_qp); mlx4_qp_remove(mdev->dev, rss_map->indir_qp); mlx4_qp_free(mdev->dev, rss_map->indir_qp); +qp_alloc_err: kfree(rss_map->indir_qp); rss_map->indir_qp = NULL; rss_err: diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 0807992090b8..8cf548c7ad9c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -184,8 +184,13 @@ static inline int mlx5e_get_max_num_channels(struct mlx5_core_dev *mdev) struct mlx5e_tx_wqe { struct mlx5_wqe_ctrl_seg ctrl; - struct mlx5_wqe_eth_seg eth; - struct mlx5_wqe_data_seg data[0]; + union { + struct { + struct mlx5_wqe_eth_seg eth; + struct mlx5_wqe_data_seg data[0]; + }; + u8 tls_progress_params_ctx[0]; + }; }; struct mlx5e_rx_wqe_ll { @@ -1099,6 +1104,8 @@ u32 mlx5e_ethtool_get_rxfh_key_size(struct mlx5e_priv *priv); u32 mlx5e_ethtool_get_rxfh_indir_size(struct mlx5e_priv *priv); int mlx5e_ethtool_get_ts_info(struct mlx5e_priv *priv, struct ethtool_ts_info *info); +int mlx5e_ethtool_flash_device(struct mlx5e_priv *priv, + struct ethtool_flash *flash); void mlx5e_ethtool_get_pauseparam(struct mlx5e_priv *priv, struct ethtool_pauseparam *pauseparam); int mlx5e_ethtool_set_pauseparam(struct mlx5e_priv *priv, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c index 6e54fefea410..817c6ea7e349 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c @@ -75,26 +75,21 @@ static int mlx5e_tx_reporter_err_cqe_recover(struct mlx5e_txqsq *sq) u8 state; int err; - if (!test_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state)) - return 0; - err = mlx5_core_query_sq_state(mdev, sq->sqn, &state); if (err) { netdev_err(dev, "Failed to query SQ 0x%x state. err = %d\n", sq->sqn, err); - return err; + goto out; } - if (state != MLX5_SQC_STATE_ERR) { - netdev_err(dev, "SQ 0x%x not in ERROR state\n", sq->sqn); - return -EINVAL; - } + if (state != MLX5_SQC_STATE_ERR) + goto out; mlx5e_tx_disable_queue(sq->txq); err = mlx5e_wait_for_sq_flush(sq); if (err) - return err; + goto out; /* At this point, no new packets will arrive from the stack as TXQ is * marked with QUEUE_STATE_DRV_XOFF. In addition, NAPI cleared all @@ -103,13 +98,17 @@ static int mlx5e_tx_reporter_err_cqe_recover(struct mlx5e_txqsq *sq) err = mlx5e_sq_to_ready(sq, state); if (err) - return err; + goto out; mlx5e_reset_txqsq_cc_pc(sq); sq->stats->recover++; + clear_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state); mlx5e_activate_txqsq(sq); return 0; +out: + clear_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state); + return err; } static int mlx5_tx_health_report(struct devlink_health_reporter *tx_reporter, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c index f701e4f3c076..2c4d1f415968 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c @@ -152,7 +152,10 @@ void mlx5e_activate_xsk(struct mlx5e_channel *c) { set_bit(MLX5E_RQ_STATE_ENABLED, &c->xskrq.state); /* TX queue is created active. */ + + spin_lock(&c->xskicosq_lock); mlx5e_trigger_irq(&c->xskicosq); + spin_unlock(&c->xskicosq_lock); } void mlx5e_deactivate_xsk(struct mlx5e_channel *c) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h index 407da83474ef..b7298f9ee3d3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h @@ -11,12 +11,14 @@ #include "accel/tls.h" #define MLX5E_KTLS_STATIC_UMR_WQE_SZ \ - (sizeof(struct mlx5e_umr_wqe) + MLX5_ST_SZ_BYTES(tls_static_params)) + (offsetof(struct mlx5e_umr_wqe, tls_static_params_ctx) + \ + MLX5_ST_SZ_BYTES(tls_static_params)) #define MLX5E_KTLS_STATIC_WQEBBS \ (DIV_ROUND_UP(MLX5E_KTLS_STATIC_UMR_WQE_SZ, MLX5_SEND_WQE_BB)) #define MLX5E_KTLS_PROGRESS_WQE_SZ \ - (sizeof(struct mlx5e_tx_wqe) + MLX5_ST_SZ_BYTES(tls_progress_params)) + (offsetof(struct mlx5e_tx_wqe, tls_progress_params_ctx) + \ + MLX5_ST_SZ_BYTES(tls_progress_params)) #define MLX5E_KTLS_PROGRESS_WQEBBS \ (DIV_ROUND_UP(MLX5E_KTLS_PROGRESS_WQE_SZ, MLX5_SEND_WQE_BB)) #define MLX5E_KTLS_MAX_DUMP_WQEBBS 2 diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c index 3766545ce259..8b93101e1a09 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c @@ -69,7 +69,7 @@ build_static_params(struct mlx5e_umr_wqe *wqe, u16 pc, u32 sqn, cseg->qpn_ds = cpu_to_be32((sqn << MLX5_WQE_CTRL_QPN_SHIFT) | STATIC_PARAMS_DS_CNT); cseg->fm_ce_se = fence ? MLX5_FENCE_MODE_INITIATOR_SMALL : 0; - cseg->imm = cpu_to_be32(priv_tx->tisn); + cseg->tisn = cpu_to_be32(priv_tx->tisn << 8); ucseg->flags = MLX5_UMR_INLINE; ucseg->bsf_octowords = cpu_to_be16(MLX5_ST_SZ_BYTES(tls_static_params) / 16); @@ -80,7 +80,7 @@ build_static_params(struct mlx5e_umr_wqe *wqe, u16 pc, u32 sqn, static void fill_progress_params_ctx(void *ctx, struct mlx5e_ktls_offload_context_tx *priv_tx) { - MLX5_SET(tls_progress_params, ctx, pd, priv_tx->tisn); + MLX5_SET(tls_progress_params, ctx, tisn, priv_tx->tisn); MLX5_SET(tls_progress_params, ctx, record_tracker_state, MLX5E_TLS_PROGRESS_PARAMS_RECORD_TRACKER_STATE_START); MLX5_SET(tls_progress_params, ctx, auth_state, @@ -104,7 +104,7 @@ build_progress_params(struct mlx5e_tx_wqe *wqe, u16 pc, u32 sqn, PROGRESS_PARAMS_DS_CNT); cseg->fm_ce_se = fence ? MLX5_FENCE_MODE_INITIATOR_SMALL : 0; - fill_progress_params_ctx(wqe->data, priv_tx); + fill_progress_params_ctx(wqe->tls_progress_params_ctx, priv_tx); } static void tx_fill_wi(struct mlx5e_txqsq *sq, @@ -278,7 +278,7 @@ tx_post_resync_dump(struct mlx5e_txqsq *sq, struct sk_buff *skb, cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_DUMP); cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt); - cseg->imm = cpu_to_be32(tisn); + cseg->tisn = cpu_to_be32(tisn << 8); cseg->fm_ce_se = first ? MLX5_FENCE_MODE_INITIATOR_SMALL : 0; eseg->inline_hdr.sz = cpu_to_be16(ihs); @@ -434,7 +434,7 @@ struct sk_buff *mlx5e_ktls_handle_tx_skb(struct net_device *netdev, priv_tx->expected_seq = seq + datalen; cseg = &(*wqe)->ctrl; - cseg->imm = cpu_to_be32(priv_tx->tisn); + cseg->tisn = cpu_to_be32(priv_tx->tisn << 8); stats->tls_encrypted_packets += skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1; stats->tls_encrypted_bytes += datalen; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c index 8657e0f26995..2c75b2752f58 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c @@ -437,12 +437,6 @@ arfs_hash_bucket(struct arfs_table *arfs_t, __be16 src_port, return &arfs_t->rules_hash[bucket_idx]; } -static u8 arfs_get_ip_proto(const struct sk_buff *skb) -{ - return (skb->protocol == htons(ETH_P_IP)) ? - ip_hdr(skb)->protocol : ipv6_hdr(skb)->nexthdr; -} - static struct arfs_table *arfs_get_table(struct mlx5e_arfs_tables *arfs, u8 ip_proto, __be16 etype) { @@ -602,31 +596,9 @@ out: arfs_may_expire_flow(priv); } -/* return L4 destination port from ip4/6 packets */ -static __be16 arfs_get_dst_port(const struct sk_buff *skb) -{ - char *transport_header; - - transport_header = skb_transport_header(skb); - if (arfs_get_ip_proto(skb) == IPPROTO_TCP) - return ((struct tcphdr *)transport_header)->dest; - return ((struct udphdr *)transport_header)->dest; -} - -/* return L4 source port from ip4/6 packets */ -static __be16 arfs_get_src_port(const struct sk_buff *skb) -{ - char *transport_header; - - transport_header = skb_transport_header(skb); - if (arfs_get_ip_proto(skb) == IPPROTO_TCP) - return ((struct tcphdr *)transport_header)->source; - return ((struct udphdr *)transport_header)->source; -} - static struct arfs_rule *arfs_alloc_rule(struct mlx5e_priv *priv, struct arfs_table *arfs_t, - const struct sk_buff *skb, + const struct flow_keys *fk, u16 rxq, u32 flow_id) { struct arfs_rule *rule; @@ -641,19 +613,19 @@ static struct arfs_rule *arfs_alloc_rule(struct mlx5e_priv *priv, INIT_WORK(&rule->arfs_work, arfs_handle_work); tuple = &rule->tuple; - tuple->etype = skb->protocol; + tuple->etype = fk->basic.n_proto; + tuple->ip_proto = fk->basic.ip_proto; if (tuple->etype == htons(ETH_P_IP)) { - tuple->src_ipv4 = ip_hdr(skb)->saddr; - tuple->dst_ipv4 = ip_hdr(skb)->daddr; + tuple->src_ipv4 = fk->addrs.v4addrs.src; + tuple->dst_ipv4 = fk->addrs.v4addrs.dst; } else { - memcpy(&tuple->src_ipv6, &ipv6_hdr(skb)->saddr, + memcpy(&tuple->src_ipv6, &fk->addrs.v6addrs.src, sizeof(struct in6_addr)); - memcpy(&tuple->dst_ipv6, &ipv6_hdr(skb)->daddr, + memcpy(&tuple->dst_ipv6, &fk->addrs.v6addrs.dst, sizeof(struct in6_addr)); } - tuple->ip_proto = arfs_get_ip_proto(skb); - tuple->src_port = arfs_get_src_port(skb); - tuple->dst_port = arfs_get_dst_port(skb); + tuple->src_port = fk->ports.src; + tuple->dst_port = fk->ports.dst; rule->flow_id = flow_id; rule->filter_id = priv->fs.arfs.last_filter_id++ % RPS_NO_FILTER; @@ -664,37 +636,33 @@ static struct arfs_rule *arfs_alloc_rule(struct mlx5e_priv *priv, return rule; } -static bool arfs_cmp_ips(struct arfs_tuple *tuple, - const struct sk_buff *skb) +static bool arfs_cmp(const struct arfs_tuple *tuple, const struct flow_keys *fk) { - if (tuple->etype == htons(ETH_P_IP) && - tuple->src_ipv4 == ip_hdr(skb)->saddr && - tuple->dst_ipv4 == ip_hdr(skb)->daddr) - return true; - if (tuple->etype == htons(ETH_P_IPV6) && - (!memcmp(&tuple->src_ipv6, &ipv6_hdr(skb)->saddr, - sizeof(struct in6_addr))) && - (!memcmp(&tuple->dst_ipv6, &ipv6_hdr(skb)->daddr, - sizeof(struct in6_addr)))) - return true; + if (tuple->src_port != fk->ports.src || tuple->dst_port != fk->ports.dst) + return false; + if (tuple->etype != fk->basic.n_proto) + return false; + if (tuple->etype == htons(ETH_P_IP)) + return tuple->src_ipv4 == fk->addrs.v4addrs.src && + tuple->dst_ipv4 == fk->addrs.v4addrs.dst; + if (tuple->etype == htons(ETH_P_IPV6)) + return !memcmp(&tuple->src_ipv6, &fk->addrs.v6addrs.src, + sizeof(struct in6_addr)) && + !memcmp(&tuple->dst_ipv6, &fk->addrs.v6addrs.dst, + sizeof(struct in6_addr)); return false; } static struct arfs_rule *arfs_find_rule(struct arfs_table *arfs_t, - const struct sk_buff *skb) + const struct flow_keys *fk) { struct arfs_rule *arfs_rule; struct hlist_head *head; - __be16 src_port = arfs_get_src_port(skb); - __be16 dst_port = arfs_get_dst_port(skb); - head = arfs_hash_bucket(arfs_t, src_port, dst_port); + head = arfs_hash_bucket(arfs_t, fk->ports.src, fk->ports.dst); hlist_for_each_entry(arfs_rule, head, hlist) { - if (arfs_rule->tuple.src_port == src_port && - arfs_rule->tuple.dst_port == dst_port && - arfs_cmp_ips(&arfs_rule->tuple, skb)) { + if (arfs_cmp(&arfs_rule->tuple, fk)) return arfs_rule; - } } return NULL; @@ -707,20 +675,24 @@ int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb, struct mlx5e_arfs_tables *arfs = &priv->fs.arfs; struct arfs_table *arfs_t; struct arfs_rule *arfs_rule; + struct flow_keys fk; + + if (!skb_flow_dissect_flow_keys(skb, &fk, 0)) + return -EPROTONOSUPPORT; - if (skb->protocol != htons(ETH_P_IP) && - skb->protocol != htons(ETH_P_IPV6)) + if (fk.basic.n_proto != htons(ETH_P_IP) && + fk.basic.n_proto != htons(ETH_P_IPV6)) return -EPROTONOSUPPORT; if (skb->encapsulation) return -EPROTONOSUPPORT; - arfs_t = arfs_get_table(arfs, arfs_get_ip_proto(skb), skb->protocol); + arfs_t = arfs_get_table(arfs, fk.basic.ip_proto, fk.basic.n_proto); if (!arfs_t) return -EPROTONOSUPPORT; spin_lock_bh(&arfs->arfs_lock); - arfs_rule = arfs_find_rule(arfs_t, skb); + arfs_rule = arfs_find_rule(arfs_t, &fk); if (arfs_rule) { if (arfs_rule->rxq == rxq_index) { spin_unlock_bh(&arfs->arfs_lock); @@ -728,8 +700,7 @@ int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb, } arfs_rule->rxq = rxq_index; } else { - arfs_rule = arfs_alloc_rule(priv, arfs_t, skb, - rxq_index, flow_id); + arfs_rule = arfs_alloc_rule(priv, arfs_t, &fk, rxq_index, flow_id); if (!arfs_rule) { spin_unlock_bh(&arfs->arfs_lock); return -ENOMEM; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index 02530b50609c..7347d673f448 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -1081,6 +1081,14 @@ int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv, link_modes = autoneg == AUTONEG_ENABLE ? ethtool2ptys_adver_func(adver) : mlx5e_port_speed2linkmodes(mdev, speed, !ext); + if ((link_modes & MLX5E_PROT_MASK(MLX5E_56GBASE_R4)) && + autoneg != AUTONEG_ENABLE) { + netdev_err(priv->netdev, "%s: 56G link speed requires autoneg enabled\n", + __func__); + err = -EINVAL; + goto out; + } + link_modes = link_modes & eproto.cap; if (!link_modes) { netdev_err(priv->netdev, "%s: Not supported link mode(s) requested", @@ -1338,6 +1346,9 @@ int mlx5e_ethtool_set_pauseparam(struct mlx5e_priv *priv, struct mlx5_core_dev *mdev = priv->mdev; int err; + if (!MLX5_CAP_GEN(mdev, vport_group_manager)) + return -EOPNOTSUPP; + if (pauseparam->autoneg) return -EINVAL; @@ -1679,6 +1690,40 @@ static int mlx5e_get_module_eeprom(struct net_device *netdev, return 0; } +int mlx5e_ethtool_flash_device(struct mlx5e_priv *priv, + struct ethtool_flash *flash) +{ + struct mlx5_core_dev *mdev = priv->mdev; + struct net_device *dev = priv->netdev; + const struct firmware *fw; + int err; + + if (flash->region != ETHTOOL_FLASH_ALL_REGIONS) + return -EOPNOTSUPP; + + err = request_firmware_direct(&fw, flash->data, &dev->dev); + if (err) + return err; + + dev_hold(dev); + rtnl_unlock(); + + err = mlx5_firmware_flash(mdev, fw, NULL); + release_firmware(fw); + + rtnl_lock(); + dev_put(dev); + return err; +} + +static int mlx5e_flash_device(struct net_device *dev, + struct ethtool_flash *flash) +{ + struct mlx5e_priv *priv = netdev_priv(dev); + + return mlx5e_ethtool_flash_device(priv, flash); +} + static int set_pflag_cqe_based_moder(struct net_device *netdev, bool enable, bool is_rx_cq) { @@ -1965,6 +2010,7 @@ const struct ethtool_ops mlx5e_ethtool_ops = { .set_wol = mlx5e_set_wol, .get_module_info = mlx5e_get_module_info, .get_module_eeprom = mlx5e_get_module_eeprom, + .flash_device = mlx5e_flash_device, .get_priv_flags = mlx5e_get_priv_flags, .set_priv_flags = mlx5e_set_priv_flags, .self_test = mlx5e_self_test, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 9a2fcef6e7f0..0c8e847a9eee 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -1323,7 +1323,6 @@ err_free_txqsq: void mlx5e_activate_txqsq(struct mlx5e_txqsq *sq) { sq->txq = netdev_get_tx_queue(sq->channel->netdev, sq->txq_ix); - clear_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state); set_bit(MLX5E_SQ_STATE_ENABLED, &sq->state); netdev_tx_reset_queue(sq->txq); netif_tx_start_queue(sq->txq); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index 5be3da621499..c57f7533a6d0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -1702,7 +1702,7 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, struct mlx5_flow_spec *spec, struct flow_cls_offload *f, struct net_device *filter_dev, - u8 *match_level, u8 *tunnel_match_level) + u8 *inner_match_level, u8 *outer_match_level) { struct netlink_ext_ack *extack = f->common.extack; void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, @@ -1717,8 +1717,9 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, struct flow_dissector *dissector = rule->match.dissector; u16 addr_type = 0; u8 ip_proto = 0; + u8 *match_level; - *match_level = MLX5_MATCH_NONE; + match_level = outer_match_level; if (dissector->used_keys & ~(BIT(FLOW_DISSECTOR_KEY_META) | @@ -1746,12 +1747,14 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, } if (mlx5e_get_tc_tun(filter_dev)) { - if (parse_tunnel_attr(priv, spec, f, filter_dev, tunnel_match_level)) + if (parse_tunnel_attr(priv, spec, f, filter_dev, + outer_match_level)) return -EOPNOTSUPP; - /* In decap flow, header pointers should point to the inner + /* At this point, header pointers should point to the inner * headers, outer header were already set by parse_tunnel_attr */ + match_level = inner_match_level; headers_c = get_match_headers_criteria(MLX5_FLOW_CONTEXT_ACTION_DECAP, spec); headers_v = get_match_headers_value(MLX5_FLOW_CONTEXT_ACTION_DECAP, @@ -2053,37 +2056,43 @@ static int parse_cls_flower(struct mlx5e_priv *priv, struct flow_cls_offload *f, struct net_device *filter_dev) { + u8 inner_match_level, outer_match_level, non_tunnel_match_level; struct netlink_ext_ack *extack = f->common.extack; struct mlx5_core_dev *dev = priv->mdev; struct mlx5_eswitch *esw = dev->priv.eswitch; struct mlx5e_rep_priv *rpriv = priv->ppriv; - u8 match_level, tunnel_match_level = MLX5_MATCH_NONE; struct mlx5_eswitch_rep *rep; bool is_eswitch_flow; int err; - err = __parse_cls_flower(priv, spec, f, filter_dev, &match_level, &tunnel_match_level); + inner_match_level = MLX5_MATCH_NONE; + outer_match_level = MLX5_MATCH_NONE; + + err = __parse_cls_flower(priv, spec, f, filter_dev, &inner_match_level, + &outer_match_level); + non_tunnel_match_level = (inner_match_level == MLX5_MATCH_NONE) ? + outer_match_level : inner_match_level; is_eswitch_flow = mlx5e_is_eswitch_flow(flow); if (!err && is_eswitch_flow) { rep = rpriv->rep; if (rep->vport != MLX5_VPORT_UPLINK && (esw->offloads.inline_mode != MLX5_INLINE_MODE_NONE && - esw->offloads.inline_mode < match_level)) { + esw->offloads.inline_mode < non_tunnel_match_level)) { NL_SET_ERR_MSG_MOD(extack, "Flow is not offloaded due to min inline setting"); netdev_warn(priv->netdev, "Flow is not offloaded due to min inline setting, required %d actual %d\n", - match_level, esw->offloads.inline_mode); + non_tunnel_match_level, esw->offloads.inline_mode); return -EOPNOTSUPP; } } if (is_eswitch_flow) { - flow->esw_attr->match_level = match_level; - flow->esw_attr->tunnel_match_level = tunnel_match_level; + flow->esw_attr->inner_match_level = inner_match_level; + flow->esw_attr->outer_match_level = outer_match_level; } else { - flow->nic_attr->match_level = match_level; + flow->nic_attr->match_level = non_tunnel_match_level; } return err; @@ -3437,7 +3446,7 @@ mlx5e_flow_esw_attr_init(struct mlx5_esw_flow_attr *esw_attr, esw_attr->parse_attr = parse_attr; esw_attr->chain = f->common.chain_index; - esw_attr->prio = TC_H_MAJ(f->common.prio) >> 16; + esw_attr->prio = f->common.prio; esw_attr->in_rep = in_rep; esw_attr->in_mdev = in_mdev; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h index 86db0e9776da..aba9e7a6ad3c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h @@ -390,8 +390,8 @@ struct mlx5_esw_flow_attr { struct mlx5_termtbl_handle *termtbl; } dests[MLX5_MAX_FLOW_FWD_VPORTS]; u32 mod_hdr_id; - u8 match_level; - u8 tunnel_match_level; + u8 inner_match_level; + u8 outer_match_level; struct mlx5_fc *counter; u32 chain; u16 prio; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index 42cc5001255b..7d3582ee66b7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -207,14 +207,10 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, mlx5_eswitch_set_rule_source_port(esw, spec, attr); - if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_DECAP) { - if (attr->tunnel_match_level != MLX5_MATCH_NONE) - spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS; - if (attr->match_level != MLX5_MATCH_NONE) - spec->match_criteria_enable |= MLX5_MATCH_INNER_HEADERS; - } else if (attr->match_level != MLX5_MATCH_NONE) { + if (attr->outer_match_level != MLX5_MATCH_NONE) spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS; - } + if (attr->inner_match_level != MLX5_MATCH_NONE) + spec->match_criteria_enable |= MLX5_MATCH_INNER_HEADERS; if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) flow_act.modify_id = attr->mod_hdr_id; @@ -290,7 +286,7 @@ mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw, mlx5_eswitch_set_rule_source_port(esw, spec, attr); spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS; - if (attr->match_level != MLX5_MATCH_NONE) + if (attr->outer_match_level != MLX5_MATCH_NONE) spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS; rule = mlx5_add_flow_rules(fast_fdb, spec, &flow_act, dest, i); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c index ebd81f6b556e..90cb50fe17fd 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c @@ -122,6 +122,14 @@ static int mlx5i_get_ts_info(struct net_device *netdev, return mlx5e_ethtool_get_ts_info(priv, info); } +static int mlx5i_flash_device(struct net_device *netdev, + struct ethtool_flash *flash) +{ + struct mlx5e_priv *priv = mlx5i_epriv(netdev); + + return mlx5e_ethtool_flash_device(priv, flash); +} + enum mlx5_ptys_width { MLX5_PTYS_WIDTH_1X = 1 << 0, MLX5_PTYS_WIDTH_2X = 1 << 1, @@ -233,6 +241,7 @@ const struct ethtool_ops mlx5i_ethtool_ops = { .get_ethtool_stats = mlx5i_get_ethtool_stats, .get_ringparam = mlx5i_get_ringparam, .set_ringparam = mlx5i_set_ringparam, + .flash_device = mlx5i_flash_device, .get_channels = mlx5i_get_channels, .set_channels = mlx5i_set_channels, .get_coalesce = mlx5i_get_coalesce, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/crypto.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/crypto.c index ea9ee88491e5..ea1d4d26ece0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/crypto.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/crypto.c @@ -27,6 +27,7 @@ int mlx5_create_encryption_key(struct mlx5_core_dev *mdev, case 128: general_obj_key_size = MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_KEY_SIZE_128; + key_p += sz_bytes; break; case 256: general_obj_key_size = diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c index 1aaab8446270..150b3a144b83 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c @@ -477,7 +477,7 @@ int mlxsw_sp_acl_rulei_commit(struct mlxsw_sp_acl_rule_info *rulei) void mlxsw_sp_acl_rulei_priority(struct mlxsw_sp_acl_rule_info *rulei, unsigned int priority) { - rulei->priority = priority >> 16; + rulei->priority = priority; } void mlxsw_sp_acl_rulei_keymask_u32(struct mlxsw_sp_acl_rule_info *rulei, diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c index 63b07edd9d81..38bb1cfe4e8c 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c @@ -29,7 +29,7 @@ struct mlxsw_sp_ptp_state { struct mlxsw_sp *mlxsw_sp; - struct rhashtable unmatched_ht; + struct rhltable unmatched_ht; spinlock_t unmatched_lock; /* protects the HT */ struct delayed_work ht_gc_dw; u32 gc_cycle; @@ -45,7 +45,7 @@ struct mlxsw_sp1_ptp_key { struct mlxsw_sp1_ptp_unmatched { struct mlxsw_sp1_ptp_key key; - struct rhash_head ht_node; + struct rhlist_head ht_node; struct rcu_head rcu; struct sk_buff *skb; u64 timestamp; @@ -359,7 +359,7 @@ static int mlxsw_sp_ptp_parse(struct sk_buff *skb, /* Returns NULL on successful insertion, a pointer on conflict, or an ERR_PTR on * error. */ -static struct mlxsw_sp1_ptp_unmatched * +static int mlxsw_sp1_ptp_unmatched_save(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp1_ptp_key key, struct sk_buff *skb, @@ -368,41 +368,51 @@ mlxsw_sp1_ptp_unmatched_save(struct mlxsw_sp *mlxsw_sp, int cycles = MLXSW_SP1_PTP_HT_GC_TIMEOUT / MLXSW_SP1_PTP_HT_GC_INTERVAL; struct mlxsw_sp_ptp_state *ptp_state = mlxsw_sp->ptp_state; struct mlxsw_sp1_ptp_unmatched *unmatched; - struct mlxsw_sp1_ptp_unmatched *conflict; + int err; unmatched = kzalloc(sizeof(*unmatched), GFP_ATOMIC); if (!unmatched) - return ERR_PTR(-ENOMEM); + return -ENOMEM; unmatched->key = key; unmatched->skb = skb; unmatched->timestamp = timestamp; unmatched->gc_cycle = mlxsw_sp->ptp_state->gc_cycle + cycles; - conflict = rhashtable_lookup_get_insert_fast(&ptp_state->unmatched_ht, - &unmatched->ht_node, - mlxsw_sp1_ptp_unmatched_ht_params); - if (conflict) + err = rhltable_insert(&ptp_state->unmatched_ht, &unmatched->ht_node, + mlxsw_sp1_ptp_unmatched_ht_params); + if (err) kfree(unmatched); - return conflict; + return err; } static struct mlxsw_sp1_ptp_unmatched * mlxsw_sp1_ptp_unmatched_lookup(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp1_ptp_key key) + struct mlxsw_sp1_ptp_key key, int *p_length) { - return rhashtable_lookup(&mlxsw_sp->ptp_state->unmatched_ht, &key, - mlxsw_sp1_ptp_unmatched_ht_params); + struct mlxsw_sp1_ptp_unmatched *unmatched, *last = NULL; + struct rhlist_head *tmp, *list; + int length = 0; + + list = rhltable_lookup(&mlxsw_sp->ptp_state->unmatched_ht, &key, + mlxsw_sp1_ptp_unmatched_ht_params); + rhl_for_each_entry_rcu(unmatched, tmp, list, ht_node) { + last = unmatched; + length++; + } + + *p_length = length; + return last; } static int mlxsw_sp1_ptp_unmatched_remove(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp1_ptp_unmatched *unmatched) { - return rhashtable_remove_fast(&mlxsw_sp->ptp_state->unmatched_ht, - &unmatched->ht_node, - mlxsw_sp1_ptp_unmatched_ht_params); + return rhltable_remove(&mlxsw_sp->ptp_state->unmatched_ht, + &unmatched->ht_node, + mlxsw_sp1_ptp_unmatched_ht_params); } /* This function is called in the following scenarios: @@ -489,75 +499,38 @@ static void mlxsw_sp1_ptp_got_piece(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp1_ptp_key key, struct sk_buff *skb, u64 timestamp) { - struct mlxsw_sp1_ptp_unmatched *unmatched, *conflict; + struct mlxsw_sp1_ptp_unmatched *unmatched; + int length; int err; rcu_read_lock(); - unmatched = mlxsw_sp1_ptp_unmatched_lookup(mlxsw_sp, key); - spin_lock(&mlxsw_sp->ptp_state->unmatched_lock); - if (unmatched) { - /* There was an unmatched entry when we looked, but it may have - * been removed before we took the lock. - */ - err = mlxsw_sp1_ptp_unmatched_remove(mlxsw_sp, unmatched); - if (err) - unmatched = NULL; - } - - if (!unmatched) { - /* We have no unmatched entry, but one may have been added after - * we looked, but before we took the lock. - */ - unmatched = mlxsw_sp1_ptp_unmatched_save(mlxsw_sp, key, - skb, timestamp); - if (IS_ERR(unmatched)) { - if (skb) - mlxsw_sp1_ptp_packet_finish(mlxsw_sp, skb, - key.local_port, - key.ingress, NULL); - unmatched = NULL; - } else if (unmatched) { - /* Save just told us, under lock, that the entry is - * there, so this has to work. - */ - err = mlxsw_sp1_ptp_unmatched_remove(mlxsw_sp, - unmatched); - WARN_ON_ONCE(err); - } - } - - /* If unmatched is non-NULL here, it comes either from the lookup, or - * from the save attempt above. In either case the entry was removed - * from the hash table. If unmatched is NULL, a new unmatched entry was - * added to the hash table, and there was no conflict. - */ - + unmatched = mlxsw_sp1_ptp_unmatched_lookup(mlxsw_sp, key, &length); if (skb && unmatched && unmatched->timestamp) { unmatched->skb = skb; } else if (timestamp && unmatched && unmatched->skb) { unmatched->timestamp = timestamp; - } else if (unmatched) { - /* unmatched holds an older entry of the same type: either an - * skb if we are handling skb, or a timestamp if we are handling - * timestamp. We can't match that up, so save what we have. + } else { + /* Either there is no entry to match, or one that is there is + * incompatible. */ - conflict = mlxsw_sp1_ptp_unmatched_save(mlxsw_sp, key, - skb, timestamp); - if (IS_ERR(conflict)) { - if (skb) - mlxsw_sp1_ptp_packet_finish(mlxsw_sp, skb, - key.local_port, - key.ingress, NULL); - } else { - /* Above, we removed an object with this key from the - * hash table, under lock, so conflict can not be a - * valid pointer. - */ - WARN_ON_ONCE(conflict); - } + if (length < 100) + err = mlxsw_sp1_ptp_unmatched_save(mlxsw_sp, key, + skb, timestamp); + else + err = -E2BIG; + if (err && skb) + mlxsw_sp1_ptp_packet_finish(mlxsw_sp, skb, + key.local_port, + key.ingress, NULL); + unmatched = NULL; + } + + if (unmatched) { + err = mlxsw_sp1_ptp_unmatched_remove(mlxsw_sp, unmatched); + WARN_ON_ONCE(err); } spin_unlock(&mlxsw_sp->ptp_state->unmatched_lock); @@ -669,9 +642,8 @@ mlxsw_sp1_ptp_ht_gc_collect(struct mlxsw_sp_ptp_state *ptp_state, local_bh_disable(); spin_lock(&ptp_state->unmatched_lock); - err = rhashtable_remove_fast(&ptp_state->unmatched_ht, - &unmatched->ht_node, - mlxsw_sp1_ptp_unmatched_ht_params); + err = rhltable_remove(&ptp_state->unmatched_ht, &unmatched->ht_node, + mlxsw_sp1_ptp_unmatched_ht_params); spin_unlock(&ptp_state->unmatched_lock); if (err) @@ -702,7 +674,7 @@ static void mlxsw_sp1_ptp_ht_gc(struct work_struct *work) ptp_state = container_of(dwork, struct mlxsw_sp_ptp_state, ht_gc_dw); gc_cycle = ptp_state->gc_cycle++; - rhashtable_walk_enter(&ptp_state->unmatched_ht, &iter); + rhltable_walk_enter(&ptp_state->unmatched_ht, &iter); rhashtable_walk_start(&iter); while ((obj = rhashtable_walk_next(&iter))) { if (IS_ERR(obj)) @@ -855,8 +827,8 @@ struct mlxsw_sp_ptp_state *mlxsw_sp1_ptp_init(struct mlxsw_sp *mlxsw_sp) spin_lock_init(&ptp_state->unmatched_lock); - err = rhashtable_init(&ptp_state->unmatched_ht, - &mlxsw_sp1_ptp_unmatched_ht_params); + err = rhltable_init(&ptp_state->unmatched_ht, + &mlxsw_sp1_ptp_unmatched_ht_params); if (err) goto err_hashtable_init; @@ -891,7 +863,7 @@ err_fifo_clr: err_mtptpt1_set: mlxsw_sp_ptp_mtptpt_set(mlxsw_sp, MLXSW_REG_MTPTPT_TRAP_ID_PTP0, 0); err_mtptpt_set: - rhashtable_destroy(&ptp_state->unmatched_ht); + rhltable_destroy(&ptp_state->unmatched_ht); err_hashtable_init: kfree(ptp_state); return ERR_PTR(err); @@ -906,8 +878,8 @@ void mlxsw_sp1_ptp_fini(struct mlxsw_sp_ptp_state *ptp_state) mlxsw_sp1_ptp_set_fifo_clr_on_trap(mlxsw_sp, false); mlxsw_sp_ptp_mtptpt_set(mlxsw_sp, MLXSW_REG_MTPTPT_TRAP_ID_PTP1, 0); mlxsw_sp_ptp_mtptpt_set(mlxsw_sp, MLXSW_REG_MTPTPT_TRAP_ID_PTP0, 0); - rhashtable_free_and_destroy(&ptp_state->unmatched_ht, - &mlxsw_sp1_ptp_unmatched_free_fn, NULL); + rhltable_free_and_destroy(&ptp_state->unmatched_ht, + &mlxsw_sp1_ptp_unmatched_free_fn, NULL); kfree(ptp_state); } diff --git a/drivers/net/ethernet/mscc/ocelot_flower.c b/drivers/net/ethernet/mscc/ocelot_flower.c index 59487d446a09..b894bc0c9c16 100644 --- a/drivers/net/ethernet/mscc/ocelot_flower.c +++ b/drivers/net/ethernet/mscc/ocelot_flower.c @@ -13,12 +13,6 @@ struct ocelot_port_block { struct ocelot_port *port; }; -static u16 get_prio(u32 prio) -{ - /* prio starts from 0x1000 while the ids starts from 0 */ - return prio >> 16; -} - static int ocelot_flower_parse_action(struct flow_cls_offload *f, struct ocelot_ace_rule *rule) { @@ -168,7 +162,7 @@ static int ocelot_flower_parse(struct flow_cls_offload *f, } finished_key_parsing: - ocelot_rule->prio = get_prio(f->common.prio); + ocelot_rule->prio = f->common.prio; ocelot_rule->id = f->cookie; return ocelot_flower_parse_action(f, ocelot_rule); } @@ -218,7 +212,7 @@ static int ocelot_flower_destroy(struct flow_cls_offload *f, struct ocelot_ace_rule rule; int ret; - rule.prio = get_prio(f->common.prio); + rule.prio = f->common.prio; rule.port = port_block->port; rule.id = f->cookie; @@ -236,7 +230,7 @@ static int ocelot_flower_stats_update(struct flow_cls_offload *f, struct ocelot_ace_rule rule; int ret; - rule.prio = get_prio(f->common.prio); + rule.prio = f->common.prio; rule.port = port_block->port; rule.id = f->cookie; ret = ocelot_ace_rule_stats_update(&rule); diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c index 61fe92719982..c979f38a2e0c 100644 --- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c +++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c @@ -3918,7 +3918,7 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent) * setup (if available). */ status = myri10ge_request_irq(mgp); if (status != 0) - goto abort_with_firmware; + goto abort_with_slices; myri10ge_free_irq(mgp); /* Save configuration space to be restored if the diff --git a/drivers/net/ethernet/netronome/nfp/flower/qos_conf.c b/drivers/net/ethernet/netronome/nfp/flower/qos_conf.c index 86e968cd5ffd..124a43dc136a 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/qos_conf.c +++ b/drivers/net/ethernet/netronome/nfp/flower/qos_conf.c @@ -93,7 +93,7 @@ nfp_flower_install_rate_limiter(struct nfp_app *app, struct net_device *netdev, return -EOPNOTSUPP; } - if (flow->common.prio != (1 << 16)) { + if (flow->common.prio != 1) { NL_SET_ERR_MSG_MOD(extack, "unsupported offload: qos rate limit offload requires highest priority"); return -EOPNOTSUPP; } diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c index ef8f08931fe8..6cacd5e893ac 100644 --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 /* Renesas Ethernet AVB device driver * - * Copyright (C) 2014-2015 Renesas Electronics Corporation + * Copyright (C) 2014-2019 Renesas Electronics Corporation * Copyright (C) 2015 Renesas Solutions Corp. * Copyright (C) 2015-2016 Cogent Embedded, Inc. <source@cogentembedded.com> * @@ -513,7 +513,10 @@ static void ravb_get_tx_tstamp(struct net_device *ndev) kfree(ts_skb); if (tag == tfa_tag) { skb_tstamp_tx(skb, &shhwtstamps); + dev_consume_skb_any(skb); break; + } else { + dev_kfree_skb_any(skb); } } ravb_modify(ndev, TCCR, TCCR_TFR, TCCR_TFR); @@ -1564,7 +1567,7 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev) } goto unmap; } - ts_skb->skb = skb; + ts_skb->skb = skb_get(skb); ts_skb->tag = priv->ts_skb_tag++; priv->ts_skb_tag &= 0x3ff; list_add_tail(&ts_skb->list, &priv->ts_skb_list); @@ -1693,6 +1696,7 @@ static int ravb_close(struct net_device *ndev) /* Clear the timestamp list */ list_for_each_entry_safe(ts_skb, ts_skb2, &priv->ts_skb_list, list) { list_del(&ts_skb->list); + kfree_skb(ts_skb->skb); kfree(ts_skb); } diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c index 37c0bc699cd9..6c305b6ecad0 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c @@ -94,7 +94,7 @@ static int tc_fill_entry(struct stmmac_priv *priv, struct stmmac_tc_entry *entry, *frag = NULL; struct tc_u32_sel *sel = cls->knode.sel; u32 off, data, mask, real_off, rem; - u32 prio = cls->common.prio; + u32 prio = cls->common.prio << 16; int ret; /* Only 1 match per entry */ diff --git a/drivers/net/ethernet/toshiba/tc35815.c b/drivers/net/ethernet/toshiba/tc35815.c index 8479a440527b..12466a72cefc 100644 --- a/drivers/net/ethernet/toshiba/tc35815.c +++ b/drivers/net/ethernet/toshiba/tc35815.c @@ -1504,7 +1504,7 @@ tc35815_rx(struct net_device *dev, int limit) pci_unmap_single(lp->pci_dev, lp->rx_skbs[cur_bd].skb_dma, RX_BUF_SIZE, PCI_DMA_FROMDEVICE); - if (!HAVE_DMA_RXALIGN(lp) && NET_IP_ALIGN) + if (!HAVE_DMA_RXALIGN(lp) && NET_IP_ALIGN != 0) memmove(skb->data, skb->data - NET_IP_ALIGN, pkt_len); data = skb_put(skb, pkt_len); diff --git a/drivers/net/ethernet/tundra/tsi108_eth.c b/drivers/net/ethernet/tundra/tsi108_eth.c index 78a7de3fb622..c62f474b6d08 100644 --- a/drivers/net/ethernet/tundra/tsi108_eth.c +++ b/drivers/net/ethernet/tundra/tsi108_eth.c @@ -371,9 +371,10 @@ tsi108_stat_carry_one(int carry, int carry_bit, int carry_shift, static void tsi108_stat_carry(struct net_device *dev) { struct tsi108_prv_data *data = netdev_priv(dev); + unsigned long flags; u32 carry1, carry2; - spin_lock_irq(&data->misclock); + spin_lock_irqsave(&data->misclock, flags); carry1 = TSI_READ(TSI108_STAT_CARRY1); carry2 = TSI_READ(TSI108_STAT_CARRY2); @@ -441,7 +442,7 @@ static void tsi108_stat_carry(struct net_device *dev) TSI108_STAT_TXPAUSEDROP_CARRY, &data->tx_pause_drop); - spin_unlock_irq(&data->misclock); + spin_unlock_irqrestore(&data->misclock, flags); } /* Read a stat counter atomically with respect to carries. diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index 86884c863013..0a6cd2f1111f 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -1239,12 +1239,15 @@ static void netvsc_get_stats64(struct net_device *net, struct rtnl_link_stats64 *t) { struct net_device_context *ndev_ctx = netdev_priv(net); - struct netvsc_device *nvdev = rcu_dereference_rtnl(ndev_ctx->nvdev); + struct netvsc_device *nvdev; struct netvsc_vf_pcpu_stats vf_tot; int i; + rcu_read_lock(); + + nvdev = rcu_dereference(ndev_ctx->nvdev); if (!nvdev) - return; + goto out; netdev_stats_to_stats64(t, &net->stats); @@ -1283,6 +1286,8 @@ static void netvsc_get_stats64(struct net_device *net, t->rx_packets += packets; t->multicast += multicast; } +out: + rcu_read_unlock(); } static int netvsc_set_mac_addr(struct net_device *ndev, void *p) diff --git a/drivers/net/netdevsim/dev.c b/drivers/net/netdevsim/dev.c index c217049552f7..c5b026150bf5 100644 --- a/drivers/net/netdevsim/dev.c +++ b/drivers/net/netdevsim/dev.c @@ -123,46 +123,47 @@ static void nsim_dev_port_debugfs_exit(struct nsim_dev_port *nsim_dev_port) debugfs_remove_recursive(nsim_dev_port->ddir); } +static struct net *nsim_devlink_net(struct devlink *devlink) +{ + return &init_net; +} + static u64 nsim_dev_ipv4_fib_resource_occ_get(void *priv) { - struct nsim_dev *nsim_dev = priv; + struct net *net = priv; - return nsim_fib_get_val(nsim_dev->fib_data, - NSIM_RESOURCE_IPV4_FIB, false); + return nsim_fib_get_val(net, NSIM_RESOURCE_IPV4_FIB, false); } static u64 nsim_dev_ipv4_fib_rules_res_occ_get(void *priv) { - struct nsim_dev *nsim_dev = priv; + struct net *net = priv; - return nsim_fib_get_val(nsim_dev->fib_data, - NSIM_RESOURCE_IPV4_FIB_RULES, false); + return nsim_fib_get_val(net, NSIM_RESOURCE_IPV4_FIB_RULES, false); } static u64 nsim_dev_ipv6_fib_resource_occ_get(void *priv) { - struct nsim_dev *nsim_dev = priv; + struct net *net = priv; - return nsim_fib_get_val(nsim_dev->fib_data, - NSIM_RESOURCE_IPV6_FIB, false); + return nsim_fib_get_val(net, NSIM_RESOURCE_IPV6_FIB, false); } static u64 nsim_dev_ipv6_fib_rules_res_occ_get(void *priv) { - struct nsim_dev *nsim_dev = priv; + struct net *net = priv; - return nsim_fib_get_val(nsim_dev->fib_data, - NSIM_RESOURCE_IPV6_FIB_RULES, false); + return nsim_fib_get_val(net, NSIM_RESOURCE_IPV6_FIB_RULES, false); } static int nsim_dev_resources_register(struct devlink *devlink) { - struct nsim_dev *nsim_dev = devlink_priv(devlink); struct devlink_resource_size_params params = { .size_max = (u64)-1, .size_granularity = 1, .unit = DEVLINK_RESOURCE_UNIT_ENTRY }; + struct net *net = nsim_devlink_net(devlink); int err; u64 n; @@ -176,8 +177,7 @@ static int nsim_dev_resources_register(struct devlink *devlink) goto out; } - n = nsim_fib_get_val(nsim_dev->fib_data, - NSIM_RESOURCE_IPV4_FIB, true); + n = nsim_fib_get_val(net, NSIM_RESOURCE_IPV4_FIB, true); err = devlink_resource_register(devlink, "fib", n, NSIM_RESOURCE_IPV4_FIB, NSIM_RESOURCE_IPV4, ¶ms); @@ -186,8 +186,7 @@ static int nsim_dev_resources_register(struct devlink *devlink) return err; } - n = nsim_fib_get_val(nsim_dev->fib_data, - NSIM_RESOURCE_IPV4_FIB_RULES, true); + n = nsim_fib_get_val(net, NSIM_RESOURCE_IPV4_FIB_RULES, true); err = devlink_resource_register(devlink, "fib-rules", n, NSIM_RESOURCE_IPV4_FIB_RULES, NSIM_RESOURCE_IPV4, ¶ms); @@ -206,8 +205,7 @@ static int nsim_dev_resources_register(struct devlink *devlink) goto out; } - n = nsim_fib_get_val(nsim_dev->fib_data, - NSIM_RESOURCE_IPV6_FIB, true); + n = nsim_fib_get_val(net, NSIM_RESOURCE_IPV6_FIB, true); err = devlink_resource_register(devlink, "fib", n, NSIM_RESOURCE_IPV6_FIB, NSIM_RESOURCE_IPV6, ¶ms); @@ -216,8 +214,7 @@ static int nsim_dev_resources_register(struct devlink *devlink) return err; } - n = nsim_fib_get_val(nsim_dev->fib_data, - NSIM_RESOURCE_IPV6_FIB_RULES, true); + n = nsim_fib_get_val(net, NSIM_RESOURCE_IPV6_FIB_RULES, true); err = devlink_resource_register(devlink, "fib-rules", n, NSIM_RESOURCE_IPV6_FIB_RULES, NSIM_RESOURCE_IPV6, ¶ms); @@ -229,19 +226,19 @@ static int nsim_dev_resources_register(struct devlink *devlink) devlink_resource_occ_get_register(devlink, NSIM_RESOURCE_IPV4_FIB, nsim_dev_ipv4_fib_resource_occ_get, - nsim_dev); + net); devlink_resource_occ_get_register(devlink, NSIM_RESOURCE_IPV4_FIB_RULES, nsim_dev_ipv4_fib_rules_res_occ_get, - nsim_dev); + net); devlink_resource_occ_get_register(devlink, NSIM_RESOURCE_IPV6_FIB, nsim_dev_ipv6_fib_resource_occ_get, - nsim_dev); + net); devlink_resource_occ_get_register(devlink, NSIM_RESOURCE_IPV6_FIB_RULES, nsim_dev_ipv6_fib_rules_res_occ_get, - nsim_dev); + net); out: return err; } @@ -526,11 +523,11 @@ static void nsim_dev_traps_exit(struct devlink *devlink) static int nsim_dev_reload(struct devlink *devlink, struct netlink_ext_ack *extack) { - struct nsim_dev *nsim_dev = devlink_priv(devlink); enum nsim_resource_id res_ids[] = { NSIM_RESOURCE_IPV4_FIB, NSIM_RESOURCE_IPV4_FIB_RULES, NSIM_RESOURCE_IPV6_FIB, NSIM_RESOURCE_IPV6_FIB_RULES }; + struct net *net = nsim_devlink_net(devlink); int i; for (i = 0; i < ARRAY_SIZE(res_ids); ++i) { @@ -539,8 +536,7 @@ static int nsim_dev_reload(struct devlink *devlink, err = devlink_resource_size_get(devlink, res_ids[i], &val); if (!err) { - err = nsim_fib_set_max(nsim_dev->fib_data, - res_ids[i], val, extack); + err = nsim_fib_set_max(net, res_ids[i], val, extack); if (err) return err; } @@ -670,15 +666,9 @@ nsim_dev_create(struct nsim_bus_dev *nsim_bus_dev, unsigned int port_count) nsim_dev->max_macs = NSIM_DEV_MAX_MACS_DEFAULT; nsim_dev->test1 = NSIM_DEV_TEST1_DEFAULT; - nsim_dev->fib_data = nsim_fib_create(); - if (IS_ERR(nsim_dev->fib_data)) { - err = PTR_ERR(nsim_dev->fib_data); - goto err_devlink_free; - } - err = nsim_dev_resources_register(devlink); if (err) - goto err_fib_destroy; + goto err_devlink_free; err = devlink_register(devlink, &nsim_bus_dev->dev); if (err) @@ -722,8 +712,6 @@ err_dl_unregister: devlink_unregister(devlink); err_resources_unregister: devlink_resources_unregister(devlink, NULL); -err_fib_destroy: - nsim_fib_destroy(nsim_dev->fib_data); err_devlink_free: devlink_free(devlink); return ERR_PTR(err); @@ -741,7 +729,6 @@ static void nsim_dev_destroy(struct nsim_dev *nsim_dev) ARRAY_SIZE(nsim_devlink_params)); devlink_unregister(devlink); devlink_resources_unregister(devlink, NULL); - nsim_fib_destroy(nsim_dev->fib_data); mutex_destroy(&nsim_dev->port_list_lock); devlink_free(devlink); } diff --git a/drivers/net/netdevsim/fib.c b/drivers/net/netdevsim/fib.c index 8c57ba747772..f61d094746c0 100644 --- a/drivers/net/netdevsim/fib.c +++ b/drivers/net/netdevsim/fib.c @@ -18,6 +18,7 @@ #include <net/ip_fib.h> #include <net/ip6_fib.h> #include <net/fib_rules.h> +#include <net/netns/generic.h> #include "netdevsim.h" @@ -32,14 +33,15 @@ struct nsim_per_fib_data { }; struct nsim_fib_data { - struct notifier_block fib_nb; struct nsim_per_fib_data ipv4; struct nsim_per_fib_data ipv6; }; -u64 nsim_fib_get_val(struct nsim_fib_data *fib_data, - enum nsim_resource_id res_id, bool max) +static unsigned int nsim_fib_net_id; + +u64 nsim_fib_get_val(struct net *net, enum nsim_resource_id res_id, bool max) { + struct nsim_fib_data *fib_data = net_generic(net, nsim_fib_net_id); struct nsim_fib_entry *entry; switch (res_id) { @@ -62,10 +64,10 @@ u64 nsim_fib_get_val(struct nsim_fib_data *fib_data, return max ? entry->max : entry->num; } -int nsim_fib_set_max(struct nsim_fib_data *fib_data, - enum nsim_resource_id res_id, u64 val, +int nsim_fib_set_max(struct net *net, enum nsim_resource_id res_id, u64 val, struct netlink_ext_ack *extack) { + struct nsim_fib_data *fib_data = net_generic(net, nsim_fib_net_id); struct nsim_fib_entry *entry; int err = 0; @@ -118,9 +120,9 @@ static int nsim_fib_rule_account(struct nsim_fib_entry *entry, bool add, return err; } -static int nsim_fib_rule_event(struct nsim_fib_data *data, - struct fib_notifier_info *info, bool add) +static int nsim_fib_rule_event(struct fib_notifier_info *info, bool add) { + struct nsim_fib_data *data = net_generic(info->net, nsim_fib_net_id); struct netlink_ext_ack *extack = info->extack; int err = 0; @@ -155,9 +157,9 @@ static int nsim_fib_account(struct nsim_fib_entry *entry, bool add, return err; } -static int nsim_fib_event(struct nsim_fib_data *data, - struct fib_notifier_info *info, bool add) +static int nsim_fib_event(struct fib_notifier_info *info, bool add) { + struct nsim_fib_data *data = net_generic(info->net, nsim_fib_net_id); struct netlink_ext_ack *extack = info->extack; int err = 0; @@ -176,22 +178,18 @@ static int nsim_fib_event(struct nsim_fib_data *data, static int nsim_fib_event_nb(struct notifier_block *nb, unsigned long event, void *ptr) { - struct nsim_fib_data *data = container_of(nb, struct nsim_fib_data, - fib_nb); struct fib_notifier_info *info = ptr; int err = 0; switch (event) { case FIB_EVENT_RULE_ADD: /* fall through */ case FIB_EVENT_RULE_DEL: - err = nsim_fib_rule_event(data, info, - event == FIB_EVENT_RULE_ADD); + err = nsim_fib_rule_event(info, event == FIB_EVENT_RULE_ADD); break; case FIB_EVENT_ENTRY_ADD: /* fall through */ case FIB_EVENT_ENTRY_DEL: - err = nsim_fib_event(data, info, - event == FIB_EVENT_ENTRY_ADD); + err = nsim_fib_event(info, event == FIB_EVENT_ENTRY_ADD); break; } @@ -201,23 +199,30 @@ static int nsim_fib_event_nb(struct notifier_block *nb, unsigned long event, /* inconsistent dump, trying again */ static void nsim_fib_dump_inconsistent(struct notifier_block *nb) { - struct nsim_fib_data *data = container_of(nb, struct nsim_fib_data, - fib_nb); + struct nsim_fib_data *data; + struct net *net; + + rcu_read_lock(); + for_each_net_rcu(net) { + data = net_generic(net, nsim_fib_net_id); + + data->ipv4.fib.num = 0ULL; + data->ipv4.rules.num = 0ULL; - data->ipv4.fib.num = 0ULL; - data->ipv4.rules.num = 0ULL; - data->ipv6.fib.num = 0ULL; - data->ipv6.rules.num = 0ULL; + data->ipv6.fib.num = 0ULL; + data->ipv6.rules.num = 0ULL; + } + rcu_read_unlock(); } -struct nsim_fib_data *nsim_fib_create(void) -{ - struct nsim_fib_data *data; - int err; +static struct notifier_block nsim_fib_nb = { + .notifier_call = nsim_fib_event_nb, +}; - data = kzalloc(sizeof(*data), GFP_KERNEL); - if (!data) - return ERR_PTR(-ENOMEM); +/* Initialize per network namespace state */ +static int __net_init nsim_fib_netns_init(struct net *net) +{ + struct nsim_fib_data *data = net_generic(net, nsim_fib_net_id); data->ipv4.fib.max = (u64)-1; data->ipv4.rules.max = (u64)-1; @@ -225,22 +230,37 @@ struct nsim_fib_data *nsim_fib_create(void) data->ipv6.fib.max = (u64)-1; data->ipv6.rules.max = (u64)-1; - data->fib_nb.notifier_call = nsim_fib_event_nb; - err = register_fib_notifier(&data->fib_nb, nsim_fib_dump_inconsistent); - if (err) { - pr_err("Failed to register fib notifier\n"); - goto err_out; - } + return 0; +} - return data; +static struct pernet_operations nsim_fib_net_ops = { + .init = nsim_fib_netns_init, + .id = &nsim_fib_net_id, + .size = sizeof(struct nsim_fib_data), +}; -err_out: - kfree(data); - return ERR_PTR(err); +void nsim_fib_exit(void) +{ + unregister_pernet_subsys(&nsim_fib_net_ops); + unregister_fib_notifier(&nsim_fib_nb); } -void nsim_fib_destroy(struct nsim_fib_data *data) +int nsim_fib_init(void) { - unregister_fib_notifier(&data->fib_nb); - kfree(data); + int err; + + err = register_pernet_subsys(&nsim_fib_net_ops); + if (err < 0) { + pr_err("Failed to register pernet subsystem\n"); + goto err_out; + } + + err = register_fib_notifier(&nsim_fib_nb, nsim_fib_dump_inconsistent); + if (err < 0) { + pr_err("Failed to register fib notifier\n"); + goto err_out; + } + +err_out: + return err; } diff --git a/drivers/net/netdevsim/netdev.c b/drivers/net/netdevsim/netdev.c index 0740940f41b1..55f57f76d01b 100644 --- a/drivers/net/netdevsim/netdev.c +++ b/drivers/net/netdevsim/netdev.c @@ -357,12 +357,18 @@ static int __init nsim_module_init(void) if (err) goto err_dev_exit; - err = rtnl_link_register(&nsim_link_ops); + err = nsim_fib_init(); if (err) goto err_bus_exit; + err = rtnl_link_register(&nsim_link_ops); + if (err) + goto err_fib_exit; + return 0; +err_fib_exit: + nsim_fib_exit(); err_bus_exit: nsim_bus_exit(); err_dev_exit: @@ -373,6 +379,7 @@ err_dev_exit: static void __exit nsim_module_exit(void) { rtnl_link_unregister(&nsim_link_ops); + nsim_fib_exit(); nsim_bus_exit(); nsim_dev_exit(); } diff --git a/drivers/net/netdevsim/netdevsim.h b/drivers/net/netdevsim/netdevsim.h index 262a6978bbca..66bf13765ad0 100644 --- a/drivers/net/netdevsim/netdevsim.h +++ b/drivers/net/netdevsim/netdevsim.h @@ -173,12 +173,10 @@ int nsim_dev_port_add(struct nsim_bus_dev *nsim_bus_dev, int nsim_dev_port_del(struct nsim_bus_dev *nsim_bus_dev, unsigned int port_index); -struct nsim_fib_data *nsim_fib_create(void); -void nsim_fib_destroy(struct nsim_fib_data *fib_data); -u64 nsim_fib_get_val(struct nsim_fib_data *fib_data, - enum nsim_resource_id res_id, bool max); -int nsim_fib_set_max(struct nsim_fib_data *fib_data, - enum nsim_resource_id res_id, u64 val, +int nsim_fib_init(void); +void nsim_fib_exit(void); +u64 nsim_fib_get_val(struct net *net, enum nsim_resource_id res_id, bool max); +int nsim_fib_set_max(struct net *net, enum nsim_resource_id res_id, u64 val, struct netlink_ext_ack *extack); #if IS_ENABLED(CONFIG_XFRM_OFFLOAD) diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c index d98aa56710a9..2aa7b2e60046 100644 --- a/drivers/net/phy/at803x.c +++ b/drivers/net/phy/at803x.c @@ -253,36 +253,20 @@ static int at803x_config_init(struct phy_device *phydev) * after HW reset: RX delay enabled and TX delay disabled * after SW reset: RX delay enabled, while TX delay retains the * value before reset. - * - * So let's first disable the RX and TX delays in PHY and enable - * them based on the mode selected (this also takes care of RGMII - * mode where we expect delays to be disabled) */ - - ret = at803x_disable_rx_delay(phydev); - if (ret < 0) - return ret; - ret = at803x_disable_tx_delay(phydev); - if (ret < 0) - return ret; - if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID || - phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) { - /* If RGMII_ID or RGMII_RXID are specified enable RX delay, - * otherwise keep it disabled - */ + phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) ret = at803x_enable_rx_delay(phydev); - if (ret < 0) - return ret; - } + else + ret = at803x_disable_rx_delay(phydev); + if (ret < 0) + return ret; if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID || - phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID) { - /* If RGMII_ID or RGMII_TXID are specified enable TX delay, - * otherwise keep it disabled - */ + phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID) ret = at803x_enable_tx_delay(phydev); - } + else + ret = at803x_disable_tx_delay(phydev); return ret; } diff --git a/drivers/net/phy/phy-c45.c b/drivers/net/phy/phy-c45.c index b9d4145781ca..58bb25e4af10 100644 --- a/drivers/net/phy/phy-c45.c +++ b/drivers/net/phy/phy-c45.c @@ -219,6 +219,20 @@ int genphy_c45_read_link(struct phy_device *phydev) int val, devad; bool link = true; + if (phydev->c45_ids.devices_in_package & MDIO_DEVS_AN) { + val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1); + if (val < 0) + return val; + + /* Autoneg is being started, therefore disregard current + * link status and report link as down. + */ + if (val & MDIO_AN_CTRL1_RESTART) { + phydev->link = 0; + return 0; + } + } + while (mmd_mask && link) { devad = __ffs(mmd_mask); mmd_mask &= ~BIT(devad); diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index d5db7604d7c4..d347ddcac45b 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -1741,7 +1741,17 @@ EXPORT_SYMBOL(genphy_aneg_done); */ int genphy_update_link(struct phy_device *phydev) { - int status; + int status = 0, bmcr; + + bmcr = phy_read(phydev, MII_BMCR); + if (bmcr < 0) + return bmcr; + + /* Autoneg is being started, therefore disregard BMSR value and + * report link as down. + */ + if (bmcr & BMCR_ANRESTART) + goto done; /* The link state is latched low so that momentary link * drops can be detected. Do not double-read the status diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c index abfa0da9bbd2..e8089def5a46 100644 --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c @@ -1004,6 +1004,8 @@ static void __team_compute_features(struct team *team) team->dev->vlan_features = vlan_features; team->dev->hw_enc_features = enc_features | NETIF_F_GSO_ENCAP_ALL | + NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_STAG_TX | NETIF_F_GSO_UDP_L4; team->dev->hard_header_len = max_hard_header_len; diff --git a/drivers/net/usb/cx82310_eth.c b/drivers/net/usb/cx82310_eth.c index 5519248a791e..32b08b18e120 100644 --- a/drivers/net/usb/cx82310_eth.c +++ b/drivers/net/usb/cx82310_eth.c @@ -163,7 +163,8 @@ static int cx82310_bind(struct usbnet *dev, struct usb_interface *intf) } if (!timeout) { dev_err(&udev->dev, "firmware not ready in time\n"); - return -ETIMEDOUT; + ret = -ETIMEDOUT; + goto err; } /* enable ethernet mode (?) */ diff --git a/drivers/net/usb/kalmia.c b/drivers/net/usb/kalmia.c index d62b6706a537..fc5895f85cee 100644 --- a/drivers/net/usb/kalmia.c +++ b/drivers/net/usb/kalmia.c @@ -113,16 +113,16 @@ kalmia_init_and_get_ethernet_addr(struct usbnet *dev, u8 *ethernet_addr) status = kalmia_send_init_packet(dev, usb_buf, ARRAY_SIZE(init_msg_1), usb_buf, 24); if (status != 0) - return status; + goto out; memcpy(usb_buf, init_msg_2, 12); status = kalmia_send_init_packet(dev, usb_buf, ARRAY_SIZE(init_msg_2), usb_buf, 28); if (status != 0) - return status; + goto out; memcpy(ethernet_addr, usb_buf + 10, ETH_ALEN); - +out: kfree(usb_buf); return status; } diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c index 769bb262fbec..58f5a219fb65 100644 --- a/drivers/net/usb/lan78xx.c +++ b/drivers/net/usb/lan78xx.c @@ -3785,7 +3785,7 @@ static int lan78xx_probe(struct usb_interface *intf, ret = register_netdev(netdev); if (ret != 0) { netif_err(dev, probe, netdev, "couldn't register the device\n"); - goto out3; + goto out4; } usb_set_intfdata(intf, dev); @@ -3800,12 +3800,14 @@ static int lan78xx_probe(struct usb_interface *intf, ret = lan78xx_phy_init(dev); if (ret < 0) - goto out4; + goto out5; return 0; -out4: +out5: unregister_netdev(netdev); +out4: + usb_free_urb(dev->urb_intr); out3: lan78xx_unbind(dev, intf); out2: diff --git a/drivers/net/wimax/i2400m/fw.c b/drivers/net/wimax/i2400m/fw.c index e9fc168bb734..489cba9b284d 100644 --- a/drivers/net/wimax/i2400m/fw.c +++ b/drivers/net/wimax/i2400m/fw.c @@ -351,13 +351,15 @@ int i2400m_barker_db_init(const char *_options) } result = i2400m_barker_db_add(barker); if (result < 0) - goto error_add; + goto error_parse_add; } kfree(options_orig); } return 0; +error_parse_add: error_parse: + kfree(options_orig); error_add: kfree(i2400m_barker_db); return result; diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c index 4679fcf1a1c4..0020b2e8c279 100644 --- a/drivers/net/xen-netback/netback.c +++ b/drivers/net/xen-netback/netback.c @@ -925,6 +925,7 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue, skb_shinfo(skb)->nr_frags = MAX_SKB_FRAGS; nskb = xenvif_alloc_skb(0); if (unlikely(nskb == NULL)) { + skb_shinfo(skb)->nr_frags = 0; kfree_skb(skb); xenvif_tx_err(queue, &txreq, extra_count, idx); if (net_ratelimit()) @@ -940,6 +941,7 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue, if (xenvif_set_skb_gso(queue->vif, skb, gso)) { /* Failure in xenvif_set_skb_gso is fatal. */ + skb_shinfo(skb)->nr_frags = 0; kfree_skb(skb); kfree_skb(nskb); break; diff --git a/drivers/ntb/msi.c b/drivers/ntb/msi.c index 9dddf133658f..0a5e884a920c 100644 --- a/drivers/ntb/msi.c +++ b/drivers/ntb/msi.c @@ -6,11 +6,6 @@ #include <linux/msi.h> #include <linux/pci.h> -MODULE_LICENSE("Dual BSD/GPL"); -MODULE_VERSION("0.1"); -MODULE_AUTHOR("Logan Gunthorpe <logang@deltatee.com>"); -MODULE_DESCRIPTION("NTB MSI Interrupt Library"); - struct ntb_msi { u64 base_addr; u64 end_addr; diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 8f3fbe5ca937..c258a1ce4b28 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -1286,6 +1286,9 @@ static u32 nvme_passthru_start(struct nvme_ctrl *ctrl, struct nvme_ns *ns, */ if (effects & (NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_CSE_MASK)) { mutex_lock(&ctrl->scan_lock); + mutex_lock(&ctrl->subsys->lock); + nvme_mpath_start_freeze(ctrl->subsys); + nvme_mpath_wait_freeze(ctrl->subsys); nvme_start_freeze(ctrl); nvme_wait_freeze(ctrl); } @@ -1316,6 +1319,8 @@ static void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects) nvme_update_formats(ctrl); if (effects & (NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_CSE_MASK)) { nvme_unfreeze(ctrl); + nvme_mpath_unfreeze(ctrl->subsys); + mutex_unlock(&ctrl->subsys->lock); mutex_unlock(&ctrl->scan_lock); } if (effects & NVME_CMD_EFFECTS_CCC) @@ -1715,6 +1720,7 @@ static void __nvme_revalidate_disk(struct gendisk *disk, struct nvme_id_ns *id) if (ns->head->disk) { nvme_update_disk_info(ns->head->disk, ns, id); blk_queue_stack_limits(ns->head->disk->queue, ns->queue); + revalidate_disk(ns->head->disk); } #endif } @@ -2487,6 +2493,7 @@ static int nvme_init_subsystem(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id) if (ret) { dev_err(ctrl->device, "failed to register subsystem device.\n"); + put_device(&subsys->dev); goto out_unlock; } ida_init(&subsys->ns_ida); @@ -2509,7 +2516,6 @@ out_put_subsystem: nvme_put_subsystem(subsys); out_unlock: mutex_unlock(&nvme_subsystems_lock); - put_device(&subsys->dev); return ret; } @@ -3571,6 +3577,13 @@ void nvme_remove_namespaces(struct nvme_ctrl *ctrl) struct nvme_ns *ns, *next; LIST_HEAD(ns_list); + /* + * make sure to requeue I/O to all namespaces as these + * might result from the scan itself and must complete + * for the scan_work to make progress + */ + nvme_mpath_clear_ctrl_paths(ctrl); + /* prevent racing with ns scanning */ flush_work(&ctrl->scan_work); diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c index 4f0d0d12744e..888d4543894e 100644 --- a/drivers/nvme/host/multipath.c +++ b/drivers/nvme/host/multipath.c @@ -12,6 +12,36 @@ module_param(multipath, bool, 0444); MODULE_PARM_DESC(multipath, "turn on native support for multiple controllers per subsystem"); +void nvme_mpath_unfreeze(struct nvme_subsystem *subsys) +{ + struct nvme_ns_head *h; + + lockdep_assert_held(&subsys->lock); + list_for_each_entry(h, &subsys->nsheads, entry) + if (h->disk) + blk_mq_unfreeze_queue(h->disk->queue); +} + +void nvme_mpath_wait_freeze(struct nvme_subsystem *subsys) +{ + struct nvme_ns_head *h; + + lockdep_assert_held(&subsys->lock); + list_for_each_entry(h, &subsys->nsheads, entry) + if (h->disk) + blk_mq_freeze_queue_wait(h->disk->queue); +} + +void nvme_mpath_start_freeze(struct nvme_subsystem *subsys) +{ + struct nvme_ns_head *h; + + lockdep_assert_held(&subsys->lock); + list_for_each_entry(h, &subsys->nsheads, entry) + if (h->disk) + blk_freeze_queue_start(h->disk->queue); +} + /* * If multipathing is enabled we need to always use the subsystem instance * number for numbering our devices to avoid conflicts between subsystems that @@ -104,18 +134,34 @@ static const char *nvme_ana_state_names[] = { [NVME_ANA_CHANGE] = "change", }; -void nvme_mpath_clear_current_path(struct nvme_ns *ns) +bool nvme_mpath_clear_current_path(struct nvme_ns *ns) { struct nvme_ns_head *head = ns->head; + bool changed = false; int node; if (!head) - return; + goto out; for_each_node(node) { - if (ns == rcu_access_pointer(head->current_path[node])) + if (ns == rcu_access_pointer(head->current_path[node])) { rcu_assign_pointer(head->current_path[node], NULL); + changed = true; + } } +out: + return changed; +} + +void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl) +{ + struct nvme_ns *ns; + + mutex_lock(&ctrl->scan_lock); + list_for_each_entry(ns, &ctrl->namespaces, list) + if (nvme_mpath_clear_current_path(ns)) + kblockd_schedule_work(&ns->head->requeue_work); + mutex_unlock(&ctrl->scan_lock); } static bool nvme_path_is_disabled(struct nvme_ns *ns) @@ -226,6 +272,24 @@ inline struct nvme_ns *nvme_find_path(struct nvme_ns_head *head) return ns; } +static bool nvme_available_path(struct nvme_ns_head *head) +{ + struct nvme_ns *ns; + + list_for_each_entry_rcu(ns, &head->list, siblings) { + switch (ns->ctrl->state) { + case NVME_CTRL_LIVE: + case NVME_CTRL_RESETTING: + case NVME_CTRL_CONNECTING: + /* fallthru */ + return true; + default: + break; + } + } + return false; +} + static blk_qc_t nvme_ns_head_make_request(struct request_queue *q, struct bio *bio) { @@ -252,14 +316,14 @@ static blk_qc_t nvme_ns_head_make_request(struct request_queue *q, disk_devt(ns->head->disk), bio->bi_iter.bi_sector); ret = direct_make_request(bio); - } else if (!list_empty_careful(&head->list)) { - dev_warn_ratelimited(dev, "no path available - requeuing I/O\n"); + } else if (nvme_available_path(head)) { + dev_warn_ratelimited(dev, "no usable path - requeuing I/O\n"); spin_lock_irq(&head->requeue_lock); bio_list_add(&head->requeue_list, bio); spin_unlock_irq(&head->requeue_lock); } else { - dev_warn_ratelimited(dev, "no path - failing I/O\n"); + dev_warn_ratelimited(dev, "no available path - failing I/O\n"); bio->bi_status = BLK_STS_IOERR; bio_endio(bio); diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index 26b563f9985b..778b3a0b6adb 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h @@ -490,6 +490,9 @@ static inline bool nvme_ctrl_use_ana(struct nvme_ctrl *ctrl) return ctrl->ana_log_buf != NULL; } +void nvme_mpath_unfreeze(struct nvme_subsystem *subsys); +void nvme_mpath_wait_freeze(struct nvme_subsystem *subsys); +void nvme_mpath_start_freeze(struct nvme_subsystem *subsys); void nvme_set_disk_name(char *disk_name, struct nvme_ns *ns, struct nvme_ctrl *ctrl, int *flags); void nvme_failover_req(struct request *req); @@ -500,7 +503,8 @@ void nvme_mpath_remove_disk(struct nvme_ns_head *head); int nvme_mpath_init(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id); void nvme_mpath_uninit(struct nvme_ctrl *ctrl); void nvme_mpath_stop(struct nvme_ctrl *ctrl); -void nvme_mpath_clear_current_path(struct nvme_ns *ns); +bool nvme_mpath_clear_current_path(struct nvme_ns *ns); +void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl); struct nvme_ns *nvme_find_path(struct nvme_ns_head *head); static inline void nvme_mpath_check_last_path(struct nvme_ns *ns) @@ -548,7 +552,11 @@ static inline void nvme_mpath_add_disk(struct nvme_ns *ns, static inline void nvme_mpath_remove_disk(struct nvme_ns_head *head) { } -static inline void nvme_mpath_clear_current_path(struct nvme_ns *ns) +static inline bool nvme_mpath_clear_current_path(struct nvme_ns *ns) +{ + return false; +} +static inline void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl) { } static inline void nvme_mpath_check_last_path(struct nvme_ns *ns) @@ -568,6 +576,15 @@ static inline void nvme_mpath_uninit(struct nvme_ctrl *ctrl) static inline void nvme_mpath_stop(struct nvme_ctrl *ctrl) { } +static inline void nvme_mpath_unfreeze(struct nvme_subsystem *subsys) +{ +} +static inline void nvme_mpath_wait_freeze(struct nvme_subsystem *subsys) +{ +} +static inline void nvme_mpath_start_freeze(struct nvme_subsystem *subsys) +{ +} #endif /* CONFIG_NVME_MULTIPATH */ #ifdef CONFIG_NVM diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index db160cee42ad..6bd9b1033965 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -2695,7 +2695,7 @@ static void nvme_async_probe(void *data, async_cookie_t cookie) { struct nvme_dev *dev = data; - nvme_reset_ctrl_sync(&dev->ctrl); + flush_work(&dev->ctrl.reset_work); flush_work(&dev->ctrl.scan_work); nvme_put_ctrl(&dev->ctrl); } @@ -2761,6 +2761,7 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id) dev_info(dev->ctrl.device, "pci function %s\n", dev_name(&pdev->dev)); + nvme_reset_ctrl(&dev->ctrl); nvme_get_ctrl(&dev->ctrl); async_schedule(nvme_async_probe, dev); @@ -2846,7 +2847,7 @@ static int nvme_resume(struct device *dev) struct nvme_dev *ndev = pci_get_drvdata(to_pci_dev(dev)); struct nvme_ctrl *ctrl = &ndev->ctrl; - if (pm_resume_via_firmware() || !ctrl->npss || + if (ndev->last_ps == U32_MAX || nvme_set_power_state(ctrl, ndev->last_ps) != 0) nvme_reset_ctrl(ctrl); return 0; @@ -2859,6 +2860,8 @@ static int nvme_suspend(struct device *dev) struct nvme_ctrl *ctrl = &ndev->ctrl; int ret = -EBUSY; + ndev->last_ps = U32_MAX; + /* * The platform does not remove power for a kernel managed suspend so * use host managed nvme power settings for lowest idle power if @@ -2866,8 +2869,14 @@ static int nvme_suspend(struct device *dev) * shutdown. But if the firmware is involved after the suspend or the * device does not support any non-default power states, shut down the * device fully. + * + * If ASPM is not enabled for the device, shut down the device and allow + * the PCI bus layer to put it into D3 in order to take the PCIe link + * down, so as to allow the platform to achieve its minimum low-power + * state (which may not be possible if the link is up). */ - if (pm_suspend_via_firmware() || !ctrl->npss) { + if (pm_suspend_via_firmware() || !ctrl->npss || + !pcie_aspm_enabled(pdev)) { nvme_dev_disable(ndev, true); return 0; } @@ -2880,7 +2889,6 @@ static int nvme_suspend(struct device *dev) ctrl->state != NVME_CTRL_ADMIN_ONLY) goto unfreeze; - ndev->last_ps = 0; ret = nvme_get_power_state(ctrl, &ndev->last_ps); if (ret < 0) goto unfreeze; diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c index a249db528d54..1a6449bc547b 100644 --- a/drivers/nvme/host/rdma.c +++ b/drivers/nvme/host/rdma.c @@ -562,13 +562,17 @@ out_destroy_cm_id: return ret; } +static void __nvme_rdma_stop_queue(struct nvme_rdma_queue *queue) +{ + rdma_disconnect(queue->cm_id); + ib_drain_qp(queue->qp); +} + static void nvme_rdma_stop_queue(struct nvme_rdma_queue *queue) { if (!test_and_clear_bit(NVME_RDMA_Q_LIVE, &queue->flags)) return; - - rdma_disconnect(queue->cm_id); - ib_drain_qp(queue->qp); + __nvme_rdma_stop_queue(queue); } static void nvme_rdma_free_queue(struct nvme_rdma_queue *queue) @@ -607,11 +611,13 @@ static int nvme_rdma_start_queue(struct nvme_rdma_ctrl *ctrl, int idx) else ret = nvmf_connect_admin_queue(&ctrl->ctrl); - if (!ret) + if (!ret) { set_bit(NVME_RDMA_Q_LIVE, &queue->flags); - else + } else { + __nvme_rdma_stop_queue(queue); dev_info(ctrl->ctrl.device, "failed to connect queue: %d ret=%d\n", idx, ret); + } return ret; } diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c index cd52b9f15376..98613a45bd3b 100644 --- a/drivers/nvme/target/configfs.c +++ b/drivers/nvme/target/configfs.c @@ -675,6 +675,7 @@ static void nvmet_port_subsys_drop_link(struct config_item *parent, found: list_del(&p->entry); + nvmet_port_del_ctrls(port, subsys); nvmet_port_disc_changed(port, subsys); if (list_empty(&port->subsystems)) diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c index dad0243c7c96..3a67e244e568 100644 --- a/drivers/nvme/target/core.c +++ b/drivers/nvme/target/core.c @@ -46,6 +46,9 @@ inline u16 errno_to_nvme_status(struct nvmet_req *req, int errno) u16 status; switch (errno) { + case 0: + status = NVME_SC_SUCCESS; + break; case -ENOSPC: req->error_loc = offsetof(struct nvme_rw_command, length); status = NVME_SC_CAP_EXCEEDED | NVME_SC_DNR; @@ -280,6 +283,18 @@ void nvmet_unregister_transport(const struct nvmet_fabrics_ops *ops) } EXPORT_SYMBOL_GPL(nvmet_unregister_transport); +void nvmet_port_del_ctrls(struct nvmet_port *port, struct nvmet_subsys *subsys) +{ + struct nvmet_ctrl *ctrl; + + mutex_lock(&subsys->lock); + list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) { + if (ctrl->port == port) + ctrl->ops->delete_ctrl(ctrl); + } + mutex_unlock(&subsys->lock); +} + int nvmet_enable_port(struct nvmet_port *port) { const struct nvmet_fabrics_ops *ops; diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c index b16dc3981c69..0940c5024a34 100644 --- a/drivers/nvme/target/loop.c +++ b/drivers/nvme/target/loop.c @@ -654,6 +654,14 @@ static void nvme_loop_remove_port(struct nvmet_port *port) mutex_lock(&nvme_loop_ports_mutex); list_del_init(&port->entry); mutex_unlock(&nvme_loop_ports_mutex); + + /* + * Ensure any ctrls that are in the process of being + * deleted are in fact deleted before we return + * and free the port. This is to prevent active + * ctrls from using a port after it's freed. + */ + flush_workqueue(nvme_delete_wq); } static const struct nvmet_fabrics_ops nvme_loop_ops = { diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h index 6ee66c610739..c51f8dd01dc4 100644 --- a/drivers/nvme/target/nvmet.h +++ b/drivers/nvme/target/nvmet.h @@ -418,6 +418,9 @@ void nvmet_port_send_ana_event(struct nvmet_port *port); int nvmet_register_transport(const struct nvmet_fabrics_ops *ops); void nvmet_unregister_transport(const struct nvmet_fabrics_ops *ops); +void nvmet_port_del_ctrls(struct nvmet_port *port, + struct nvmet_subsys *subsys); + int nvmet_enable_port(struct nvmet_port *port); void nvmet_disable_port(struct nvmet_port *port); diff --git a/drivers/nvmem/nvmem-sysfs.c b/drivers/nvmem/nvmem-sysfs.c index 6f303b91f6e7..9e0c429cd08a 100644 --- a/drivers/nvmem/nvmem-sysfs.c +++ b/drivers/nvmem/nvmem-sysfs.c @@ -224,10 +224,17 @@ int nvmem_sysfs_setup_compat(struct nvmem_device *nvmem, if (!config->base_dev) return -EINVAL; - if (nvmem->read_only) - nvmem->eeprom = bin_attr_ro_root_nvmem; - else - nvmem->eeprom = bin_attr_rw_root_nvmem; + if (nvmem->read_only) { + if (config->root_only) + nvmem->eeprom = bin_attr_ro_root_nvmem; + else + nvmem->eeprom = bin_attr_ro_nvmem; + } else { + if (config->root_only) + nvmem->eeprom = bin_attr_rw_root_nvmem; + else + nvmem->eeprom = bin_attr_rw_nvmem; + } nvmem->eeprom.attr.name = "eeprom"; nvmem->eeprom.size = nvmem->size; #ifdef CONFIG_DEBUG_LOCK_ALLOC diff --git a/drivers/of/irq.c b/drivers/of/irq.c index 7f84bb4903ca..a296eaf52a5b 100644 --- a/drivers/of/irq.c +++ b/drivers/of/irq.c @@ -277,7 +277,7 @@ EXPORT_SYMBOL_GPL(of_irq_parse_raw); * of_irq_parse_one - Resolve an interrupt for a device * @device: the device whose interrupt is to be resolved * @index: index of the interrupt to resolve - * @out_irq: structure of_irq filled by this function + * @out_irq: structure of_phandle_args filled by this function * * This function resolves an interrupt for a node by walking the interrupt tree, * finding which interrupt controller node it is attached to, and returning the diff --git a/drivers/of/resolver.c b/drivers/of/resolver.c index c1b67dd7cd6e..83c766233181 100644 --- a/drivers/of/resolver.c +++ b/drivers/of/resolver.c @@ -206,16 +206,22 @@ static int adjust_local_phandle_references(struct device_node *local_fixups, for_each_child_of_node(local_fixups, child) { for_each_child_of_node(overlay, overlay_child) - if (!node_name_cmp(child, overlay_child)) + if (!node_name_cmp(child, overlay_child)) { + of_node_put(overlay_child); break; + } - if (!overlay_child) + if (!overlay_child) { + of_node_put(child); return -EINVAL; + } err = adjust_local_phandle_references(child, overlay_child, phandle_delta); - if (err) + if (err) { + of_node_put(child); return err; + } } return 0; diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index 29ed5ec1ac27..1b27b5af3d55 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -1025,10 +1025,15 @@ static void __pci_start_power_transition(struct pci_dev *dev, pci_power_t state) if (state == PCI_D0) { pci_platform_power_transition(dev, PCI_D0); /* - * Mandatory power management transition delays are - * handled in the PCIe portdrv resume hooks. + * Mandatory power management transition delays, see + * PCI Express Base Specification Revision 2.0 Section + * 6.6.1: Conventional Reset. Do not delay for + * devices powered on/off by corresponding bridge, + * because have already delayed for the bridge. */ if (dev->runtime_d3cold) { + if (dev->d3cold_delay && !dev->imm_ready) + msleep(dev->d3cold_delay); /* * When powering on a bridge from D3cold, the * whole hierarchy may be powered on into @@ -4602,16 +4607,14 @@ static int pci_pm_reset(struct pci_dev *dev, int probe) return pci_dev_wait(dev, "PM D3->D0", PCIE_RESET_READY_POLL_MS); } - /** - * pcie_wait_for_link_delay - Wait until link is active or inactive + * pcie_wait_for_link - Wait until link is active or inactive * @pdev: Bridge device * @active: waiting for active or inactive? - * @delay: Delay to wait after link has become active (in ms) * * Use this to wait till link becomes active or inactive. */ -bool pcie_wait_for_link_delay(struct pci_dev *pdev, bool active, int delay) +bool pcie_wait_for_link(struct pci_dev *pdev, bool active) { int timeout = 1000; bool ret; @@ -4648,25 +4651,13 @@ bool pcie_wait_for_link_delay(struct pci_dev *pdev, bool active, int delay) timeout -= 10; } if (active && ret) - msleep(delay); + msleep(100); else if (ret != active) pci_info(pdev, "Data Link Layer Link Active not %s in 1000 msec\n", active ? "set" : "cleared"); return ret == active; } -/** - * pcie_wait_for_link - Wait until link is active or inactive - * @pdev: Bridge device - * @active: waiting for active or inactive? - * - * Use this to wait till link becomes active or inactive. - */ -bool pcie_wait_for_link(struct pci_dev *pdev, bool active) -{ - return pcie_wait_for_link_delay(pdev, active, 100); -} - void pci_reset_secondary_bus(struct pci_dev *dev) { u16 ctrl; diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h index 1be03a97cb92..d22d1b807701 100644 --- a/drivers/pci/pci.h +++ b/drivers/pci/pci.h @@ -497,7 +497,6 @@ static inline int pci_dev_specific_disable_acs_redir(struct pci_dev *dev) void pcie_do_recovery(struct pci_dev *dev, enum pci_channel_state state, u32 service); -bool pcie_wait_for_link_delay(struct pci_dev *pdev, bool active, int delay); bool pcie_wait_for_link(struct pci_dev *pdev, bool active); #ifdef CONFIG_PCIEASPM void pcie_aspm_init_link_state(struct pci_dev *pdev); diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c index e44af7f4d37f..464f8f92653f 100644 --- a/drivers/pci/pcie/aspm.c +++ b/drivers/pci/pcie/aspm.c @@ -1170,6 +1170,26 @@ static int pcie_aspm_get_policy(char *buffer, const struct kernel_param *kp) module_param_call(policy, pcie_aspm_set_policy, pcie_aspm_get_policy, NULL, 0644); +/** + * pcie_aspm_enabled - Check if PCIe ASPM has been enabled for a device. + * @pdev: Target device. + */ +bool pcie_aspm_enabled(struct pci_dev *pdev) +{ + struct pci_dev *bridge = pci_upstream_bridge(pdev); + bool ret; + + if (!bridge) + return false; + + mutex_lock(&aspm_lock); + ret = bridge->link_state ? !!bridge->link_state->aspm_enabled : false; + mutex_unlock(&aspm_lock); + + return ret; +} +EXPORT_SYMBOL_GPL(pcie_aspm_enabled); + #ifdef CONFIG_PCIEASPM_DEBUG static ssize_t link_state_show(struct device *dev, struct device_attribute *attr, diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c index 308c3e0c4a34..1b330129089f 100644 --- a/drivers/pci/pcie/portdrv_core.c +++ b/drivers/pci/pcie/portdrv_core.c @@ -9,7 +9,6 @@ #include <linux/module.h> #include <linux/pci.h> #include <linux/kernel.h> -#include <linux/delay.h> #include <linux/errno.h> #include <linux/pm.h> #include <linux/pm_runtime.h> @@ -379,67 +378,6 @@ static int pm_iter(struct device *dev, void *data) return 0; } -static int get_downstream_delay(struct pci_bus *bus) -{ - struct pci_dev *pdev; - int min_delay = 100; - int max_delay = 0; - - list_for_each_entry(pdev, &bus->devices, bus_list) { - if (!pdev->imm_ready) - min_delay = 0; - else if (pdev->d3cold_delay < min_delay) - min_delay = pdev->d3cold_delay; - if (pdev->d3cold_delay > max_delay) - max_delay = pdev->d3cold_delay; - } - - return max(min_delay, max_delay); -} - -/* - * wait_for_downstream_link - Wait for downstream link to establish - * @pdev: PCIe port whose downstream link is waited - * - * Handle delays according to PCIe 4.0 section 6.6.1 before configuration - * access to the downstream component is permitted. - * - * This blocks PCI core resume of the hierarchy below this port until the - * link is trained. Should be called before resuming port services to - * prevent pciehp from starting to tear-down the hierarchy too soon. - */ -static void wait_for_downstream_link(struct pci_dev *pdev) -{ - int delay; - - if (pci_pcie_type(pdev) != PCI_EXP_TYPE_ROOT_PORT && - pci_pcie_type(pdev) != PCI_EXP_TYPE_DOWNSTREAM) - return; - - if (pci_dev_is_disconnected(pdev)) - return; - - if (!pdev->subordinate || list_empty(&pdev->subordinate->devices) || - !pdev->bridge_d3) - return; - - delay = get_downstream_delay(pdev->subordinate); - if (!delay) - return; - - dev_dbg(&pdev->dev, "waiting downstream link for %d ms\n", delay); - - /* - * If downstream port does not support speeds greater than 5 GT/s - * need to wait 100ms. For higher speeds (gen3) we need to wait - * first for the data link layer to become active. - */ - if (pcie_get_speed_cap(pdev) <= PCIE_SPEED_5_0GT) - msleep(delay); - else - pcie_wait_for_link_delay(pdev, true, delay); -} - /** * pcie_port_device_suspend - suspend port services associated with a PCIe port * @dev: PCI Express port to handle @@ -453,8 +391,6 @@ int pcie_port_device_suspend(struct device *dev) int pcie_port_device_resume_noirq(struct device *dev) { size_t off = offsetof(struct pcie_port_service_driver, resume_noirq); - - wait_for_downstream_link(to_pci_dev(dev)); return device_for_each_child(dev, &off, pm_iter); } @@ -485,8 +421,6 @@ int pcie_port_device_runtime_suspend(struct device *dev) int pcie_port_device_runtime_resume(struct device *dev) { size_t off = offsetof(struct pcie_port_service_driver, runtime_resume); - - wait_for_downstream_link(to_pci_dev(dev)); return device_for_each_child(dev, &off, pm_iter); } #endif /* PM */ diff --git a/drivers/pcmcia/db1xxx_ss.c b/drivers/pcmcia/db1xxx_ss.c index eb6168e6ac43..590e594092f2 100644 --- a/drivers/pcmcia/db1xxx_ss.c +++ b/drivers/pcmcia/db1xxx_ss.c @@ -255,8 +255,10 @@ static int db1x_pcmcia_configure(struct pcmcia_socket *skt, switch (state->Vcc) { case 50: ++v; + /* fall through */ case 33: ++v; + /* fall through */ case 0: break; default: @@ -267,9 +269,11 @@ static int db1x_pcmcia_configure(struct pcmcia_socket *skt, switch (state->Vpp) { case 12: ++p; + /* fall through */ case 33: case 50: ++p; + /* fall through */ case 0: break; default: diff --git a/drivers/pinctrl/aspeed/pinctrl-aspeed-g4.c b/drivers/pinctrl/aspeed/pinctrl-aspeed-g4.c index 384396cbb22d..22256576b69a 100644 --- a/drivers/pinctrl/aspeed/pinctrl-aspeed-g4.c +++ b/drivers/pinctrl/aspeed/pinctrl-aspeed-g4.c @@ -2412,7 +2412,7 @@ static const struct aspeed_pin_config aspeed_g4_configs[] = { { PIN_CONFIG_INPUT_DEBOUNCE, { C14, B14 }, SCUA8, 27 }, }; -static int aspeed_g4_sig_expr_set(const struct aspeed_pinmux_data *ctx, +static int aspeed_g4_sig_expr_set(struct aspeed_pinmux_data *ctx, const struct aspeed_sig_expr *expr, bool enable) { diff --git a/drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c b/drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c index 053101f795a2..ba6438ac4d72 100644 --- a/drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c +++ b/drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c @@ -2507,6 +2507,61 @@ static struct aspeed_pin_config aspeed_g5_configs[] = { { PIN_CONFIG_INPUT_DEBOUNCE, { A20, B19 }, SCUA8, 27 }, }; +static struct regmap *aspeed_g5_acquire_regmap(struct aspeed_pinmux_data *ctx, + int ip) +{ + if (ip == ASPEED_IP_SCU) { + WARN(!ctx->maps[ip], "Missing SCU syscon!"); + return ctx->maps[ip]; + } + + if (ip >= ASPEED_NR_PINMUX_IPS) + return ERR_PTR(-EINVAL); + + if (likely(ctx->maps[ip])) + return ctx->maps[ip]; + + if (ip == ASPEED_IP_GFX) { + struct device_node *node; + struct regmap *map; + + node = of_parse_phandle(ctx->dev->of_node, + "aspeed,external-nodes", 0); + if (node) { + map = syscon_node_to_regmap(node); + of_node_put(node); + if (IS_ERR(map)) + return map; + } else + return ERR_PTR(-ENODEV); + + ctx->maps[ASPEED_IP_GFX] = map; + dev_dbg(ctx->dev, "Acquired GFX regmap"); + return map; + } + + if (ip == ASPEED_IP_LPC) { + struct device_node *node; + struct regmap *map; + + node = of_parse_phandle(ctx->dev->of_node, + "aspeed,external-nodes", 1); + if (node) { + map = syscon_node_to_regmap(node->parent); + of_node_put(node); + if (IS_ERR(map)) + return map; + } else + map = ERR_PTR(-ENODEV); + + ctx->maps[ASPEED_IP_LPC] = map; + dev_dbg(ctx->dev, "Acquired LPC regmap"); + return map; + } + + return ERR_PTR(-EINVAL); +} + /** * Configure a pin's signal by applying an expression's descriptor state for * all descriptors in the expression. @@ -2520,7 +2575,7 @@ static struct aspeed_pin_config aspeed_g5_configs[] = { * Return: 0 if the expression is configured as requested and a negative error * code otherwise */ -static int aspeed_g5_sig_expr_set(const struct aspeed_pinmux_data *ctx, +static int aspeed_g5_sig_expr_set(struct aspeed_pinmux_data *ctx, const struct aspeed_sig_expr *expr, bool enable) { @@ -2531,9 +2586,15 @@ static int aspeed_g5_sig_expr_set(const struct aspeed_pinmux_data *ctx, const struct aspeed_sig_desc *desc = &expr->descs[i]; u32 pattern = enable ? desc->enable : desc->disable; u32 val = (pattern << __ffs(desc->mask)); + struct regmap *map; - if (!ctx->maps[desc->ip]) - return -ENODEV; + map = aspeed_g5_acquire_regmap(ctx, desc->ip); + if (IS_ERR(map)) { + dev_err(ctx->dev, + "Failed to acquire regmap for IP block %d\n", + desc->ip); + return PTR_ERR(map); + } /* * Strap registers are configured in hardware or by early-boot @@ -2641,34 +2702,11 @@ static struct pinctrl_desc aspeed_g5_pinctrl_desc = { static int aspeed_g5_pinctrl_probe(struct platform_device *pdev) { int i; - struct regmap *map; - struct device_node *node; for (i = 0; i < ARRAY_SIZE(aspeed_g5_pins); i++) aspeed_g5_pins[i].number = i; - node = of_parse_phandle(pdev->dev.of_node, "aspeed,external-nodes", 0); - map = syscon_node_to_regmap(node); - of_node_put(node); - if (IS_ERR(map)) { - dev_warn(&pdev->dev, "No GFX phandle found, some mux configurations may fail\n"); - map = NULL; - } - aspeed_g5_pinctrl_data.pinmux.maps[ASPEED_IP_GFX] = map; - - node = of_parse_phandle(pdev->dev.of_node, "aspeed,external-nodes", 1); - if (node) { - map = syscon_node_to_regmap(node->parent); - if (IS_ERR(map)) { - dev_warn(&pdev->dev, "LHC parent is not a syscon, some mux configurations may fail\n"); - map = NULL; - } - } else { - dev_warn(&pdev->dev, "No LHC phandle found, some mux configurations may fail\n"); - map = NULL; - } - of_node_put(node); - aspeed_g5_pinctrl_data.pinmux.maps[ASPEED_IP_LPC] = map; + aspeed_g5_pinctrl_data.pinmux.dev = &pdev->dev; return aspeed_pinctrl_probe(pdev, &aspeed_g5_pinctrl_desc, &aspeed_g5_pinctrl_data); diff --git a/drivers/pinctrl/aspeed/pinctrl-aspeed.c b/drivers/pinctrl/aspeed/pinctrl-aspeed.c index 535db3de490b..54933665b5f8 100644 --- a/drivers/pinctrl/aspeed/pinctrl-aspeed.c +++ b/drivers/pinctrl/aspeed/pinctrl-aspeed.c @@ -71,7 +71,7 @@ int aspeed_pinmux_get_fn_groups(struct pinctrl_dev *pctldev, return 0; } -static int aspeed_sig_expr_enable(const struct aspeed_pinmux_data *ctx, +static int aspeed_sig_expr_enable(struct aspeed_pinmux_data *ctx, const struct aspeed_sig_expr *expr) { int ret; @@ -86,7 +86,7 @@ static int aspeed_sig_expr_enable(const struct aspeed_pinmux_data *ctx, return 0; } -static int aspeed_sig_expr_disable(const struct aspeed_pinmux_data *ctx, +static int aspeed_sig_expr_disable(struct aspeed_pinmux_data *ctx, const struct aspeed_sig_expr *expr) { int ret; @@ -109,7 +109,7 @@ static int aspeed_sig_expr_disable(const struct aspeed_pinmux_data *ctx, * * Return: 0 if all expressions are disabled, otherwise a negative error code */ -static int aspeed_disable_sig(const struct aspeed_pinmux_data *ctx, +static int aspeed_disable_sig(struct aspeed_pinmux_data *ctx, const struct aspeed_sig_expr **exprs) { int ret = 0; @@ -217,8 +217,7 @@ int aspeed_pinmux_set_mux(struct pinctrl_dev *pctldev, unsigned int function, { int i; int ret; - const struct aspeed_pinctrl_data *pdata = - pinctrl_dev_get_drvdata(pctldev); + struct aspeed_pinctrl_data *pdata = pinctrl_dev_get_drvdata(pctldev); const struct aspeed_pin_group *pgroup = &pdata->pinmux.groups[group]; const struct aspeed_pin_function *pfunc = &pdata->pinmux.functions[function]; @@ -306,8 +305,7 @@ int aspeed_gpio_request_enable(struct pinctrl_dev *pctldev, unsigned int offset) { int ret; - const struct aspeed_pinctrl_data *pdata = - pinctrl_dev_get_drvdata(pctldev); + struct aspeed_pinctrl_data *pdata = pinctrl_dev_get_drvdata(pctldev); const struct aspeed_pin_desc *pdesc = pdata->pins[offset].drv_data; const struct aspeed_sig_expr ***prios, **funcs, *expr; diff --git a/drivers/pinctrl/aspeed/pinmux-aspeed.c b/drivers/pinctrl/aspeed/pinmux-aspeed.c index 5b0fe178ccf2..839c01b7953f 100644 --- a/drivers/pinctrl/aspeed/pinmux-aspeed.c +++ b/drivers/pinctrl/aspeed/pinmux-aspeed.c @@ -5,7 +5,7 @@ #include "pinmux-aspeed.h" -const char *const aspeed_pinmux_ips[] = { +static const char *const aspeed_pinmux_ips[] = { [ASPEED_IP_SCU] = "SCU", [ASPEED_IP_GFX] = "GFX", [ASPEED_IP_LPC] = "LPC", diff --git a/drivers/pinctrl/aspeed/pinmux-aspeed.h b/drivers/pinctrl/aspeed/pinmux-aspeed.h index 329d54d48667..52d299b59ce2 100644 --- a/drivers/pinctrl/aspeed/pinmux-aspeed.h +++ b/drivers/pinctrl/aspeed/pinmux-aspeed.h @@ -702,11 +702,12 @@ struct aspeed_pin_function { struct aspeed_pinmux_data; struct aspeed_pinmux_ops { - int (*set)(const struct aspeed_pinmux_data *ctx, + int (*set)(struct aspeed_pinmux_data *ctx, const struct aspeed_sig_expr *expr, bool enabled); }; struct aspeed_pinmux_data { + struct device *dev; struct regmap *maps[ASPEED_NR_PINMUX_IPS]; const struct aspeed_pinmux_ops *ops; @@ -725,7 +726,7 @@ int aspeed_sig_expr_eval(const struct aspeed_pinmux_data *ctx, const struct aspeed_sig_expr *expr, bool enabled); -static inline int aspeed_sig_expr_set(const struct aspeed_pinmux_data *ctx, +static inline int aspeed_sig_expr_set(struct aspeed_pinmux_data *ctx, const struct aspeed_sig_expr *expr, bool enabled) { diff --git a/drivers/pwm/core.c b/drivers/pwm/core.c index c3ab07ab31a9..8edfac17364e 100644 --- a/drivers/pwm/core.c +++ b/drivers/pwm/core.c @@ -882,8 +882,11 @@ struct pwm_device *pwm_get(struct device *dev, const char *con_id) return of_pwm_get(dev, dev->of_node, con_id); /* then lookup via ACPI */ - if (dev && is_acpi_node(dev->fwnode)) - return acpi_pwm_get(dev->fwnode); + if (dev && is_acpi_node(dev->fwnode)) { + pwm = acpi_pwm_get(dev->fwnode); + if (!IS_ERR(pwm) || PTR_ERR(pwm) != -ENOENT) + return pwm; + } /* * We look up the provider in the static table typically provided by diff --git a/drivers/s390/net/ctcm_fsms.c b/drivers/s390/net/ctcm_fsms.c index 1b4ee570b712..4a8a5373cb35 100644 --- a/drivers/s390/net/ctcm_fsms.c +++ b/drivers/s390/net/ctcm_fsms.c @@ -1704,6 +1704,7 @@ static void ctcmpc_chx_attnbusy(fsm_instance *fsm, int event, void *arg) grp->changed_side = 2; break; } + /* Else, fall through */ case MPCG_STATE_XID0IOWAIX: case MPCG_STATE_XID7INITW: case MPCG_STATE_XID7INITX: diff --git a/drivers/s390/net/ctcm_mpc.c b/drivers/s390/net/ctcm_mpc.c index e02f295d38a9..1534420a0243 100644 --- a/drivers/s390/net/ctcm_mpc.c +++ b/drivers/s390/net/ctcm_mpc.c @@ -357,6 +357,7 @@ int ctc_mpc_alloc_channel(int port_num, void (*callback)(int, int)) /*fsm_newstate(grp->fsm, MPCG_STATE_XID2INITW);*/ if (callback) grp->send_qllc_disc = 1; + /* Else, fall through */ case MPCG_STATE_XID0IOWAIT: fsm_deltimer(&grp->timer); grp->outstanding_xid2 = 0; @@ -1469,6 +1470,7 @@ static void mpc_action_timeout(fsm_instance *fi, int event, void *arg) if ((fsm_getstate(rch->fsm) == CH_XID0_PENDING) && (fsm_getstate(wch->fsm) == CH_XID0_PENDING)) break; + /* Else, fall through */ default: fsm_event(grp->fsm, MPCG_EVENT_INOP, dev); } @@ -2089,6 +2091,7 @@ static int mpc_send_qllc_discontact(struct net_device *dev) grp->estconnfunc = NULL; break; } + /* Else, fall through */ case MPCG_STATE_FLOWC: case MPCG_STATE_READY: grp->send_qllc_disc = 2; diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h index c7ee07ce3615..28db887d38ed 100644 --- a/drivers/s390/net/qeth_core.h +++ b/drivers/s390/net/qeth_core.h @@ -629,6 +629,7 @@ struct qeth_seqno { struct qeth_reply { struct list_head list; struct completion received; + spinlock_t lock; int (*callback)(struct qeth_card *, struct qeth_reply *, unsigned long); u32 seqno; diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index 5aa0f1268bca..0803070246aa 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -544,6 +544,7 @@ static struct qeth_reply *qeth_alloc_reply(struct qeth_card *card) if (reply) { refcount_set(&reply->refcnt, 1); init_completion(&reply->received); + spin_lock_init(&reply->lock); } return reply; } @@ -799,6 +800,13 @@ static void qeth_issue_next_read_cb(struct qeth_card *card, if (!reply->callback) { rc = 0; + goto no_callback; + } + + spin_lock_irqsave(&reply->lock, flags); + if (reply->rc) { + /* Bail out when the requestor has already left: */ + rc = reply->rc; } else { if (cmd) { reply->offset = (u16)((char *)cmd - (char *)iob->data); @@ -807,7 +815,9 @@ static void qeth_issue_next_read_cb(struct qeth_card *card, rc = reply->callback(card, reply, (unsigned long)iob); } } + spin_unlock_irqrestore(&reply->lock, flags); +no_callback: if (rc <= 0) qeth_notify_reply(reply, rc); qeth_put_reply(reply); @@ -1749,6 +1759,16 @@ static int qeth_send_control_data(struct qeth_card *card, rc = (timeout == -ERESTARTSYS) ? -EINTR : -ETIME; qeth_dequeue_reply(card, reply); + + if (reply_cb) { + /* Wait until the callback for a late reply has completed: */ + spin_lock_irq(&reply->lock); + if (rc) + /* Zap any callback that's still pending: */ + reply->rc = rc; + spin_unlock_irq(&reply->lock); + } + if (!rc) rc = reply->rc; qeth_put_reply(reply); diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c index fd64bc3f4062..cbead3d1b2fd 100644 --- a/drivers/s390/net/qeth_l2_main.c +++ b/drivers/s390/net/qeth_l2_main.c @@ -333,7 +333,7 @@ static int qeth_l2_process_inbound_buffer(struct qeth_card *card, card->osn_info.data_cb(skb); break; } - /* else unknown */ + /* Else, fall through */ default: dev_kfree_skb_any(skb); QETH_CARD_TEXT(card, 3, "inbunkno"); diff --git a/drivers/scsi/arm/fas216.c b/drivers/scsi/arm/fas216.c index aea4fd73c862..6c68c2303638 100644 --- a/drivers/scsi/arm/fas216.c +++ b/drivers/scsi/arm/fas216.c @@ -603,6 +603,7 @@ static void fas216_handlesync(FAS216_Info *info, char *msg) msgqueue_flush(&info->scsi.msgs); msgqueue_addmsg(&info->scsi.msgs, 1, MESSAGE_REJECT); info->scsi.phase = PHASE_MSGOUT_EXPECT; + /* fall through */ case async: dev->period = info->ifcfg.asyncperiod / 4; @@ -915,6 +916,7 @@ static void fas216_disconnect_intr(FAS216_Info *info) fas216_done(info, DID_ABORT); break; } + /* else, fall through */ default: /* huh? */ printk(KERN_ERR "scsi%d.%c: unexpected disconnect in phase %s\n", @@ -1411,6 +1413,8 @@ static void fas216_busservice_intr(FAS216_Info *info, unsigned int stat, unsigne case STATE(STAT_STATUS, PHASE_DATAOUT): /* Data Out -> Status */ case STATE(STAT_STATUS, PHASE_DATAIN): /* Data In -> Status */ fas216_stoptransfer(info); + /* fall through */ + case STATE(STAT_STATUS, PHASE_SELSTEPS):/* Sel w/ steps -> Status */ case STATE(STAT_STATUS, PHASE_MSGOUT): /* Message Out -> Status */ case STATE(STAT_STATUS, PHASE_COMMAND): /* Command -> Status */ @@ -1422,6 +1426,8 @@ static void fas216_busservice_intr(FAS216_Info *info, unsigned int stat, unsigne case STATE(STAT_MESGIN, PHASE_DATAOUT): /* Data Out -> Message In */ case STATE(STAT_MESGIN, PHASE_DATAIN): /* Data In -> Message In */ fas216_stoptransfer(info); + /* fall through */ + case STATE(STAT_MESGIN, PHASE_COMMAND): /* Command -> Message In */ case STATE(STAT_MESGIN, PHASE_SELSTEPS):/* Sel w/ steps -> Message In */ case STATE(STAT_MESGIN, PHASE_MSGOUT): /* Message Out -> Message In */ @@ -1575,6 +1581,7 @@ static void fas216_funcdone_intr(FAS216_Info *info, unsigned int stat, unsigned fas216_message(info); break; } + /* else, fall through */ default: fas216_log(info, 0, "internal phase %s for function done?" @@ -1957,6 +1964,7 @@ static void fas216_kick(FAS216_Info *info) switch (where_from) { case TYPE_QUEUE: fas216_allocate_tag(info, SCpnt); + /* fall through */ case TYPE_OTHER: fas216_start_command(info, SCpnt); break; diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index faf43b1d3dbe..a7549ae32542 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -10776,12 +10776,31 @@ lpfc_cpu_affinity_check(struct lpfc_hba *phba, int vectors) /* This loop sets up all CPUs that are affinitized with a * irq vector assigned to the driver. All affinitized CPUs * will get a link to that vectors IRQ and EQ. + * + * NULL affinity mask handling: + * If irq count is greater than one, log an error message. + * If the null mask is received for the first irq, find the + * first present cpu, and assign the eq index to ensure at + * least one EQ is assigned. */ for (idx = 0; idx < phba->cfg_irq_chann; idx++) { /* Get a CPU mask for all CPUs affinitized to this vector */ maskp = pci_irq_get_affinity(phba->pcidev, idx); - if (!maskp) - continue; + if (!maskp) { + if (phba->cfg_irq_chann > 1) + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "3329 No affinity mask found " + "for vector %d (%d)\n", + idx, phba->cfg_irq_chann); + if (!idx) { + cpu = cpumask_first(cpu_present_mask); + cpup = &phba->sli4_hba.cpu_map[cpu]; + cpup->eq = idx; + cpup->irq = pci_irq_vector(phba->pcidev, idx); + cpup->flag |= LPFC_CPU_FIRST_IRQ; + } + break; + } i = 0; /* Loop through all CPUs associated with vector idx */ diff --git a/drivers/soundwire/Kconfig b/drivers/soundwire/Kconfig index 3a01cfd70fdc..f518273cfbe3 100644 --- a/drivers/soundwire/Kconfig +++ b/drivers/soundwire/Kconfig @@ -4,7 +4,7 @@ # menuconfig SOUNDWIRE - bool "SoundWire support" + tristate "SoundWire support" help SoundWire is a 2-Pin interface with data and clock line ratified by the MIPI Alliance. SoundWire is used for transporting data @@ -17,17 +17,12 @@ if SOUNDWIRE comment "SoundWire Devices" -config SOUNDWIRE_BUS - tristate - select REGMAP_SOUNDWIRE - config SOUNDWIRE_CADENCE tristate config SOUNDWIRE_INTEL tristate "Intel SoundWire Master driver" select SOUNDWIRE_CADENCE - select SOUNDWIRE_BUS depends on X86 && ACPI && SND_SOC help SoundWire Intel Master driver. diff --git a/drivers/soundwire/Makefile b/drivers/soundwire/Makefile index fd99a831b92a..45b7e5001653 100644 --- a/drivers/soundwire/Makefile +++ b/drivers/soundwire/Makefile @@ -5,7 +5,7 @@ #Bus Objs soundwire-bus-objs := bus_type.o bus.o slave.o mipi_disco.o stream.o -obj-$(CONFIG_SOUNDWIRE_BUS) += soundwire-bus.o +obj-$(CONFIG_SOUNDWIRE) += soundwire-bus.o #Cadence Objs soundwire-cadence-objs := cadence_master.o diff --git a/drivers/soundwire/cadence_master.c b/drivers/soundwire/cadence_master.c index ff4badc9b3de..60e8bdee5c75 100644 --- a/drivers/soundwire/cadence_master.c +++ b/drivers/soundwire/cadence_master.c @@ -81,8 +81,8 @@ #define CDNS_MCP_INTSET 0x4C -#define CDNS_SDW_SLAVE_STAT 0x50 -#define CDNS_MCP_SLAVE_STAT_MASK BIT(1, 0) +#define CDNS_MCP_SLAVE_STAT 0x50 +#define CDNS_MCP_SLAVE_STAT_MASK GENMASK(1, 0) #define CDNS_MCP_SLAVE_INTSTAT0 0x54 #define CDNS_MCP_SLAVE_INTSTAT1 0x58 @@ -96,8 +96,8 @@ #define CDNS_MCP_SLAVE_INTMASK0 0x5C #define CDNS_MCP_SLAVE_INTMASK1 0x60 -#define CDNS_MCP_SLAVE_INTMASK0_MASK GENMASK(30, 0) -#define CDNS_MCP_SLAVE_INTMASK1_MASK GENMASK(16, 0) +#define CDNS_MCP_SLAVE_INTMASK0_MASK GENMASK(31, 0) +#define CDNS_MCP_SLAVE_INTMASK1_MASK GENMASK(15, 0) #define CDNS_MCP_PORT_INTSTAT 0x64 #define CDNS_MCP_PDI_STAT 0x6C diff --git a/drivers/staging/android/ion/ion_page_pool.c b/drivers/staging/android/ion/ion_page_pool.c index fd4995fb676e..f85ec5b16b65 100644 --- a/drivers/staging/android/ion/ion_page_pool.c +++ b/drivers/staging/android/ion/ion_page_pool.c @@ -8,11 +8,14 @@ #include <linux/list.h> #include <linux/slab.h> #include <linux/swap.h> +#include <linux/sched/signal.h> #include "ion.h" static inline struct page *ion_page_pool_alloc_pages(struct ion_page_pool *pool) { + if (fatal_signal_pending(current)) + return NULL; return alloc_pages(pool->gfp_mask, pool->order); } diff --git a/drivers/staging/comedi/drivers/dt3000.c b/drivers/staging/comedi/drivers/dt3000.c index 2edf3ee91300..caf4d4df4bd3 100644 --- a/drivers/staging/comedi/drivers/dt3000.c +++ b/drivers/staging/comedi/drivers/dt3000.c @@ -342,9 +342,9 @@ static irqreturn_t dt3k_interrupt(int irq, void *d) static int dt3k_ns_to_timer(unsigned int timer_base, unsigned int *nanosec, unsigned int flags) { - int divider, base, prescale; + unsigned int divider, base, prescale; - /* This function needs improvment */ + /* This function needs improvement */ /* Don't know if divider==0 works. */ for (prescale = 0; prescale < 16; prescale++) { @@ -358,7 +358,7 @@ static int dt3k_ns_to_timer(unsigned int timer_base, unsigned int *nanosec, divider = (*nanosec) / base; break; case CMDF_ROUND_UP: - divider = (*nanosec) / base; + divider = DIV_ROUND_UP(*nanosec, base); break; } if (divider < 65536) { @@ -368,7 +368,7 @@ static int dt3k_ns_to_timer(unsigned int timer_base, unsigned int *nanosec, } prescale = 15; - base = timer_base * (1 << prescale); + base = timer_base * (prescale + 1); divider = 65535; *nanosec = divider * base; return (prescale << 16) | (divider); diff --git a/drivers/staging/fbtft/fb_bd663474.c b/drivers/staging/fbtft/fb_bd663474.c index b6c6d66e4eb1..e2c7646588f8 100644 --- a/drivers/staging/fbtft/fb_bd663474.c +++ b/drivers/staging/fbtft/fb_bd663474.c @@ -24,7 +24,7 @@ static int init_display(struct fbtft_par *par) { - if (!par->gpio.cs) + if (par->gpio.cs) gpiod_set_value(par->gpio.cs, 0); /* Activate chip */ par->fbtftops.reset(par); diff --git a/drivers/staging/fbtft/fb_ili9163.c b/drivers/staging/fbtft/fb_ili9163.c index d609a2b67db9..fd32376700e2 100644 --- a/drivers/staging/fbtft/fb_ili9163.c +++ b/drivers/staging/fbtft/fb_ili9163.c @@ -77,7 +77,7 @@ static int init_display(struct fbtft_par *par) { par->fbtftops.reset(par); - if (!par->gpio.cs) + if (par->gpio.cs) gpiod_set_value(par->gpio.cs, 0); /* Activate chip */ write_reg(par, MIPI_DCS_SOFT_RESET); /* software reset */ diff --git a/drivers/staging/fbtft/fb_ili9325.c b/drivers/staging/fbtft/fb_ili9325.c index b090e7ab6fdd..85e54a10ed72 100644 --- a/drivers/staging/fbtft/fb_ili9325.c +++ b/drivers/staging/fbtft/fb_ili9325.c @@ -85,7 +85,7 @@ static int init_display(struct fbtft_par *par) { par->fbtftops.reset(par); - if (!par->gpio.cs) + if (par->gpio.cs) gpiod_set_value(par->gpio.cs, 0); /* Activate chip */ bt &= 0x07; diff --git a/drivers/staging/fbtft/fb_s6d1121.c b/drivers/staging/fbtft/fb_s6d1121.c index b3d0701880fe..5a129b1352cc 100644 --- a/drivers/staging/fbtft/fb_s6d1121.c +++ b/drivers/staging/fbtft/fb_s6d1121.c @@ -29,7 +29,7 @@ static int init_display(struct fbtft_par *par) { par->fbtftops.reset(par); - if (!par->gpio.cs) + if (par->gpio.cs) gpiod_set_value(par->gpio.cs, 0); /* Activate chip */ /* Initialization sequence from Lib_UTFT */ diff --git a/drivers/staging/fbtft/fb_ssd1289.c b/drivers/staging/fbtft/fb_ssd1289.c index bbf75f795234..88a5b6925901 100644 --- a/drivers/staging/fbtft/fb_ssd1289.c +++ b/drivers/staging/fbtft/fb_ssd1289.c @@ -28,7 +28,7 @@ static int init_display(struct fbtft_par *par) { par->fbtftops.reset(par); - if (!par->gpio.cs) + if (par->gpio.cs) gpiod_set_value(par->gpio.cs, 0); /* Activate chip */ write_reg(par, 0x00, 0x0001); diff --git a/drivers/staging/fbtft/fb_ssd1331.c b/drivers/staging/fbtft/fb_ssd1331.c index 4cfe9f8535d0..37622c9462aa 100644 --- a/drivers/staging/fbtft/fb_ssd1331.c +++ b/drivers/staging/fbtft/fb_ssd1331.c @@ -81,7 +81,7 @@ static void write_reg8_bus8(struct fbtft_par *par, int len, ...) va_start(args, len); *buf = (u8)va_arg(args, unsigned int); - if (!par->gpio.dc) + if (par->gpio.dc) gpiod_set_value(par->gpio.dc, 0); ret = par->fbtftops.write(par, par->buf, sizeof(u8)); if (ret < 0) { @@ -104,7 +104,7 @@ static void write_reg8_bus8(struct fbtft_par *par, int len, ...) return; } } - if (!par->gpio.dc) + if (par->gpio.dc) gpiod_set_value(par->gpio.dc, 1); va_end(args); } diff --git a/drivers/staging/fbtft/fb_upd161704.c b/drivers/staging/fbtft/fb_upd161704.c index 564a38e34440..c77832ae5e5b 100644 --- a/drivers/staging/fbtft/fb_upd161704.c +++ b/drivers/staging/fbtft/fb_upd161704.c @@ -26,7 +26,7 @@ static int init_display(struct fbtft_par *par) { par->fbtftops.reset(par); - if (!par->gpio.cs) + if (par->gpio.cs) gpiod_set_value(par->gpio.cs, 0); /* Activate chip */ /* Initialization sequence from Lib_UTFT */ diff --git a/drivers/staging/fbtft/fbtft-bus.c b/drivers/staging/fbtft/fbtft-bus.c index 2ea814d0dca5..63c65dd67b17 100644 --- a/drivers/staging/fbtft/fbtft-bus.c +++ b/drivers/staging/fbtft/fbtft-bus.c @@ -135,7 +135,7 @@ int fbtft_write_vmem16_bus8(struct fbtft_par *par, size_t offset, size_t len) remain = len / 2; vmem16 = (u16 *)(par->info->screen_buffer + offset); - if (!par->gpio.dc) + if (par->gpio.dc) gpiod_set_value(par->gpio.dc, 1); /* non buffered write */ diff --git a/drivers/staging/fbtft/fbtft-core.c b/drivers/staging/fbtft/fbtft-core.c index 7cbc1bdd2d8a..cf5700a2ea66 100644 --- a/drivers/staging/fbtft/fbtft-core.c +++ b/drivers/staging/fbtft/fbtft-core.c @@ -76,21 +76,18 @@ static int fbtft_request_one_gpio(struct fbtft_par *par, struct gpio_desc **gpiop) { struct device *dev = par->info->device; - struct device_node *node = dev->of_node; int ret = 0; - if (of_find_property(node, name, NULL)) { - *gpiop = devm_gpiod_get_index(dev, dev->driver->name, index, - GPIOD_OUT_HIGH); - if (IS_ERR(*gpiop)) { - ret = PTR_ERR(*gpiop); - dev_err(dev, - "Failed to request %s GPIO:%d\n", name, ret); - return ret; - } - fbtft_par_dbg(DEBUG_REQUEST_GPIOS, par, "%s: '%s' GPIO\n", - __func__, name); + *gpiop = devm_gpiod_get_index_optional(dev, name, index, + GPIOD_OUT_HIGH); + if (IS_ERR(*gpiop)) { + ret = PTR_ERR(*gpiop); + dev_err(dev, + "Failed to request %s GPIO: %d\n", name, ret); + return ret; } + fbtft_par_dbg(DEBUG_REQUEST_GPIOS, par, "%s: '%s' GPIO\n", + __func__, name); return ret; } @@ -103,34 +100,34 @@ static int fbtft_request_gpios_dt(struct fbtft_par *par) if (!par->info->device->of_node) return -EINVAL; - ret = fbtft_request_one_gpio(par, "reset-gpios", 0, &par->gpio.reset); + ret = fbtft_request_one_gpio(par, "reset", 0, &par->gpio.reset); if (ret) return ret; - ret = fbtft_request_one_gpio(par, "dc-gpios", 0, &par->gpio.dc); + ret = fbtft_request_one_gpio(par, "dc", 0, &par->gpio.dc); if (ret) return ret; - ret = fbtft_request_one_gpio(par, "rd-gpios", 0, &par->gpio.rd); + ret = fbtft_request_one_gpio(par, "rd", 0, &par->gpio.rd); if (ret) return ret; - ret = fbtft_request_one_gpio(par, "wr-gpios", 0, &par->gpio.wr); + ret = fbtft_request_one_gpio(par, "wr", 0, &par->gpio.wr); if (ret) return ret; - ret = fbtft_request_one_gpio(par, "cs-gpios", 0, &par->gpio.cs); + ret = fbtft_request_one_gpio(par, "cs", 0, &par->gpio.cs); if (ret) return ret; - ret = fbtft_request_one_gpio(par, "latch-gpios", 0, &par->gpio.latch); + ret = fbtft_request_one_gpio(par, "latch", 0, &par->gpio.latch); if (ret) return ret; for (i = 0; i < 16; i++) { - ret = fbtft_request_one_gpio(par, "db-gpios", i, + ret = fbtft_request_one_gpio(par, "db", i, &par->gpio.db[i]); if (ret) return ret; - ret = fbtft_request_one_gpio(par, "led-gpios", i, + ret = fbtft_request_one_gpio(par, "led", i, &par->gpio.led[i]); if (ret) return ret; - ret = fbtft_request_one_gpio(par, "aux-gpios", i, + ret = fbtft_request_one_gpio(par, "aux", i, &par->gpio.aux[i]); if (ret) return ret; @@ -234,9 +231,9 @@ static void fbtft_reset(struct fbtft_par *par) if (!par->gpio.reset) return; fbtft_par_dbg(DEBUG_RESET, par, "%s()\n", __func__); - gpiod_set_value_cansleep(par->gpio.reset, 0); - usleep_range(20, 40); gpiod_set_value_cansleep(par->gpio.reset, 1); + usleep_range(20, 40); + gpiod_set_value_cansleep(par->gpio.reset, 0); msleep(120); } @@ -921,7 +918,7 @@ static int fbtft_init_display_dt(struct fbtft_par *par) return -EINVAL; par->fbtftops.reset(par); - if (!par->gpio.cs) + if (par->gpio.cs) gpiod_set_value(par->gpio.cs, 0); /* Activate chip */ while (p) { @@ -1012,7 +1009,7 @@ int fbtft_init_display(struct fbtft_par *par) } par->fbtftops.reset(par); - if (!par->gpio.cs) + if (par->gpio.cs) gpiod_set_value(par->gpio.cs, 0); /* Activate chip */ i = 0; diff --git a/drivers/staging/gasket/apex_driver.c b/drivers/staging/gasket/apex_driver.c index 2be45ee9d061..464648ee2036 100644 --- a/drivers/staging/gasket/apex_driver.c +++ b/drivers/staging/gasket/apex_driver.c @@ -532,7 +532,7 @@ static ssize_t sysfs_show(struct device *device, struct device_attribute *attr, break; case ATTR_KERNEL_HIB_SIMPLE_PAGE_TABLE_SIZE: ret = scnprintf(buf, PAGE_SIZE, "%u\n", - gasket_page_table_num_entries( + gasket_page_table_num_simple_entries( gasket_dev->page_table[0])); break; case ATTR_KERNEL_HIB_NUM_ACTIVE_PAGES: diff --git a/drivers/staging/unisys/visornic/visornic_main.c b/drivers/staging/unisys/visornic/visornic_main.c index 6fa7726185de..1d1440d43002 100644 --- a/drivers/staging/unisys/visornic/visornic_main.c +++ b/drivers/staging/unisys/visornic/visornic_main.c @@ -1750,7 +1750,8 @@ static int visornic_poll(struct napi_struct *napi, int budget) } /* poll_for_irq - checks the status of the response queue - * @v: Void pointer to the visronic devdata struct. + * @t: pointer to the 'struct timer_list' from which we can retrieve the + * the visornic devdata struct. * * Main function of the vnic_incoming thread. Periodically check the response * queue and drain it if needed. diff --git a/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c b/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c index d72fdd333050..736eedef23b6 100644 --- a/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c +++ b/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c @@ -1969,6 +1969,7 @@ void wilc_deinit_host_int(struct net_device *net) priv->p2p_listen_state = false; + flush_workqueue(vif->wilc->hif_workqueue); mutex_destroy(&priv->scan_req_lock); ret = wilc_deinit(vif); diff --git a/drivers/tty/serial/kgdboc.c b/drivers/tty/serial/kgdboc.c index bfe5e9e034ec..c7d51b51898f 100644 --- a/drivers/tty/serial/kgdboc.c +++ b/drivers/tty/serial/kgdboc.c @@ -277,10 +277,14 @@ static void kgdboc_pre_exp_handler(void) /* Increment the module count when the debugger is active */ if (!kgdb_connected) try_module_get(THIS_MODULE); + + atomic_inc(&ignore_console_lock_warning); } static void kgdboc_post_exp_handler(void) { + atomic_dec(&ignore_console_lock_warning); + /* decrement the module count when the debugger detaches */ if (!kgdb_connected) module_put(THIS_MODULE); diff --git a/drivers/usb/chipidea/ci_hdrc_imx.c b/drivers/usb/chipidea/ci_hdrc_imx.c index b5abfe89190c..df8812c30640 100644 --- a/drivers/usb/chipidea/ci_hdrc_imx.c +++ b/drivers/usb/chipidea/ci_hdrc_imx.c @@ -454,9 +454,11 @@ err_clk: imx_disable_unprepare_clks(dev); disable_hsic_regulator: if (data->hsic_pad_regulator) - ret = regulator_disable(data->hsic_pad_regulator); + /* don't overwrite original ret (cf. EPROBE_DEFER) */ + regulator_disable(data->hsic_pad_regulator); if (pdata.flags & CI_HDRC_PMQOS) pm_qos_remove_request(&data->pm_qos_req); + data->ci_pdev = NULL; return ret; } @@ -469,14 +471,17 @@ static int ci_hdrc_imx_remove(struct platform_device *pdev) pm_runtime_disable(&pdev->dev); pm_runtime_put_noidle(&pdev->dev); } - ci_hdrc_remove_device(data->ci_pdev); + if (data->ci_pdev) + ci_hdrc_remove_device(data->ci_pdev); if (data->override_phy_control) usb_phy_shutdown(data->phy); - imx_disable_unprepare_clks(&pdev->dev); - if (data->plat_data->flags & CI_HDRC_PMQOS) - pm_qos_remove_request(&data->pm_qos_req); - if (data->hsic_pad_regulator) - regulator_disable(data->hsic_pad_regulator); + if (data->ci_pdev) { + imx_disable_unprepare_clks(&pdev->dev); + if (data->plat_data->flags & CI_HDRC_PMQOS) + pm_qos_remove_request(&data->pm_qos_req); + if (data->hsic_pad_regulator) + regulator_disable(data->hsic_pad_regulator); + } return 0; } diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c index 183b41753c98..62f4fb9b362f 100644 --- a/drivers/usb/class/cdc-acm.c +++ b/drivers/usb/class/cdc-acm.c @@ -1301,10 +1301,6 @@ made_compressed_probe: tty_port_init(&acm->port); acm->port.ops = &acm_port_ops; - minor = acm_alloc_minor(acm); - if (minor < 0) - goto alloc_fail1; - ctrlsize = usb_endpoint_maxp(epctrl); readsize = usb_endpoint_maxp(epread) * (quirks == SINGLE_RX_URB ? 1 : 2); @@ -1312,6 +1308,13 @@ made_compressed_probe: acm->writesize = usb_endpoint_maxp(epwrite) * 20; acm->control = control_interface; acm->data = data_interface; + + usb_get_intf(acm->control); /* undone in destruct() */ + + minor = acm_alloc_minor(acm); + if (minor < 0) + goto alloc_fail1; + acm->minor = minor; acm->dev = usb_dev; if (h.usb_cdc_acm_descriptor) @@ -1458,7 +1461,6 @@ skip_countries: usb_driver_claim_interface(&acm_driver, data_interface, acm); usb_set_intfdata(data_interface, acm); - usb_get_intf(control_interface); tty_dev = tty_port_register_device(&acm->port, acm_tty_driver, minor, &control_interface->dev); if (IS_ERR(tty_dev)) { diff --git a/drivers/usb/core/buffer.c b/drivers/usb/core/buffer.c index 1359b78a624e..6cf22c27f2d2 100644 --- a/drivers/usb/core/buffer.c +++ b/drivers/usb/core/buffer.c @@ -66,9 +66,7 @@ int hcd_buffer_create(struct usb_hcd *hcd) char name[16]; int i, size; - if (!IS_ENABLED(CONFIG_HAS_DMA) || - (!is_device_dma_capable(hcd->self.sysdev) && - !hcd->localmem_pool)) + if (hcd->localmem_pool || !hcd_uses_dma(hcd)) return 0; for (i = 0; i < HCD_BUFFER_POOLS; i++) { @@ -129,8 +127,7 @@ void *hcd_buffer_alloc( return gen_pool_dma_alloc(hcd->localmem_pool, size, dma); /* some USB hosts just use PIO */ - if (!IS_ENABLED(CONFIG_HAS_DMA) || - !is_device_dma_capable(bus->sysdev)) { + if (!hcd_uses_dma(hcd)) { *dma = ~(dma_addr_t) 0; return kmalloc(size, mem_flags); } @@ -160,8 +157,7 @@ void hcd_buffer_free( return; } - if (!IS_ENABLED(CONFIG_HAS_DMA) || - !is_device_dma_capable(bus->sysdev)) { + if (!hcd_uses_dma(hcd)) { kfree(addr); return; } diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c index b265ab5405f9..9063ede411ae 100644 --- a/drivers/usb/core/devio.c +++ b/drivers/usb/core/devio.c @@ -1812,8 +1812,6 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb return 0; error: - if (as && as->usbm) - dec_usb_memory_use_count(as->usbm, &as->usbm->urb_use_count); kfree(isopkt); kfree(dr); if (as) diff --git a/drivers/usb/core/file.c b/drivers/usb/core/file.c index 65de6f73b672..558890ada0e5 100644 --- a/drivers/usb/core/file.c +++ b/drivers/usb/core/file.c @@ -193,9 +193,10 @@ int usb_register_dev(struct usb_interface *intf, intf->minor = minor; break; } - up_write(&minor_rwsem); - if (intf->minor < 0) + if (intf->minor < 0) { + up_write(&minor_rwsem); return -EXFULL; + } /* create a usb class device for this usb interface */ snprintf(name, sizeof(name), class_driver->name, minor - minor_base); @@ -203,12 +204,11 @@ int usb_register_dev(struct usb_interface *intf, MKDEV(USB_MAJOR, minor), class_driver, "%s", kbasename(name)); if (IS_ERR(intf->usb_dev)) { - down_write(&minor_rwsem); usb_minors[minor] = NULL; intf->minor = -1; - up_write(&minor_rwsem); retval = PTR_ERR(intf->usb_dev); } + up_write(&minor_rwsem); return retval; } EXPORT_SYMBOL_GPL(usb_register_dev); @@ -234,12 +234,12 @@ void usb_deregister_dev(struct usb_interface *intf, return; dev_dbg(&intf->dev, "removing %d minor\n", intf->minor); + device_destroy(usb_class->class, MKDEV(USB_MAJOR, intf->minor)); down_write(&minor_rwsem); usb_minors[intf->minor] = NULL; up_write(&minor_rwsem); - device_destroy(usb_class->class, MKDEV(USB_MAJOR, intf->minor)); intf->usb_dev = NULL; intf->minor = -1; destroy_usb_class(); diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c index 9320787ac2e6..8592c0344fe8 100644 --- a/drivers/usb/core/hcd.c +++ b/drivers/usb/core/hcd.c @@ -103,11 +103,6 @@ static DEFINE_SPINLOCK(hcd_urb_unlink_lock); /* wait queue for synchronous unlinks */ DECLARE_WAIT_QUEUE_HEAD(usb_kill_urb_queue); -static inline int is_root_hub(struct usb_device *udev) -{ - return (udev->parent == NULL); -} - /*-------------------------------------------------------------------------*/ /* @@ -880,101 +875,6 @@ static int usb_rh_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) } - -/* - * Show & store the current value of authorized_default - */ -static ssize_t authorized_default_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct usb_device *rh_usb_dev = to_usb_device(dev); - struct usb_bus *usb_bus = rh_usb_dev->bus; - struct usb_hcd *hcd; - - hcd = bus_to_hcd(usb_bus); - return snprintf(buf, PAGE_SIZE, "%u\n", hcd->dev_policy); -} - -static ssize_t authorized_default_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t size) -{ - ssize_t result; - unsigned val; - struct usb_device *rh_usb_dev = to_usb_device(dev); - struct usb_bus *usb_bus = rh_usb_dev->bus; - struct usb_hcd *hcd; - - hcd = bus_to_hcd(usb_bus); - result = sscanf(buf, "%u\n", &val); - if (result == 1) { - hcd->dev_policy = val <= USB_DEVICE_AUTHORIZE_INTERNAL ? - val : USB_DEVICE_AUTHORIZE_ALL; - result = size; - } else { - result = -EINVAL; - } - return result; -} -static DEVICE_ATTR_RW(authorized_default); - -/* - * interface_authorized_default_show - show default authorization status - * for USB interfaces - * - * note: interface_authorized_default is the default value - * for initializing the authorized attribute of interfaces - */ -static ssize_t interface_authorized_default_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct usb_device *usb_dev = to_usb_device(dev); - struct usb_hcd *hcd = bus_to_hcd(usb_dev->bus); - - return sprintf(buf, "%u\n", !!HCD_INTF_AUTHORIZED(hcd)); -} - -/* - * interface_authorized_default_store - store default authorization status - * for USB interfaces - * - * note: interface_authorized_default is the default value - * for initializing the authorized attribute of interfaces - */ -static ssize_t interface_authorized_default_store(struct device *dev, - struct device_attribute *attr, const char *buf, size_t count) -{ - struct usb_device *usb_dev = to_usb_device(dev); - struct usb_hcd *hcd = bus_to_hcd(usb_dev->bus); - int rc = count; - bool val; - - if (strtobool(buf, &val) != 0) - return -EINVAL; - - if (val) - set_bit(HCD_FLAG_INTF_AUTHORIZED, &hcd->flags); - else - clear_bit(HCD_FLAG_INTF_AUTHORIZED, &hcd->flags); - - return rc; -} -static DEVICE_ATTR_RW(interface_authorized_default); - -/* Group all the USB bus attributes */ -static struct attribute *usb_bus_attrs[] = { - &dev_attr_authorized_default.attr, - &dev_attr_interface_authorized_default.attr, - NULL, -}; - -static const struct attribute_group usb_bus_attr_group = { - .name = NULL, /* we want them in the same directory */ - .attrs = usb_bus_attrs, -}; - - - /*-------------------------------------------------------------------------*/ /** @@ -1512,7 +1412,7 @@ int usb_hcd_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb, if (usb_endpoint_xfer_control(&urb->ep->desc)) { if (hcd->self.uses_pio_for_control) return ret; - if (IS_ENABLED(CONFIG_HAS_DMA) && hcd->self.uses_dma) { + if (hcd_uses_dma(hcd)) { if (is_vmalloc_addr(urb->setup_packet)) { WARN_ONCE(1, "setup packet is not dma capable\n"); return -EAGAIN; @@ -1546,7 +1446,7 @@ int usb_hcd_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb, dir = usb_urb_dir_in(urb) ? DMA_FROM_DEVICE : DMA_TO_DEVICE; if (urb->transfer_buffer_length != 0 && !(urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)) { - if (IS_ENABLED(CONFIG_HAS_DMA) && hcd->self.uses_dma) { + if (hcd_uses_dma(hcd)) { if (urb->num_sgs) { int n; @@ -2894,32 +2794,11 @@ int usb_add_hcd(struct usb_hcd *hcd, if (retval != 0) goto err_register_root_hub; - retval = sysfs_create_group(&rhdev->dev.kobj, &usb_bus_attr_group); - if (retval < 0) { - printk(KERN_ERR "Cannot register USB bus sysfs attributes: %d\n", - retval); - goto error_create_attr_group; - } if (hcd->uses_new_polling && HCD_POLL_RH(hcd)) usb_hcd_poll_rh_status(hcd); return retval; -error_create_attr_group: - clear_bit(HCD_FLAG_RH_RUNNING, &hcd->flags); - if (HC_IS_RUNNING(hcd->state)) - hcd->state = HC_STATE_QUIESCING; - spin_lock_irq(&hcd_root_hub_lock); - hcd->rh_registered = 0; - spin_unlock_irq(&hcd_root_hub_lock); - -#ifdef CONFIG_PM - cancel_work_sync(&hcd->wakeup_work); -#endif - cancel_work_sync(&hcd->died_work); - mutex_lock(&usb_bus_idr_lock); - usb_disconnect(&rhdev); /* Sets rhdev to NULL */ - mutex_unlock(&usb_bus_idr_lock); err_register_root_hub: hcd->rh_pollable = 0; clear_bit(HCD_FLAG_POLL_RH, &hcd->flags); @@ -2963,8 +2842,6 @@ void usb_remove_hcd(struct usb_hcd *hcd) dev_info(hcd->self.controller, "remove, state %x\n", hcd->state); usb_get_dev(rhdev); - sysfs_remove_group(&rhdev->dev.kobj, &usb_bus_attr_group); - clear_bit(HCD_FLAG_RH_RUNNING, &hcd->flags); if (HC_IS_RUNNING (hcd->state)) hcd->state = HC_STATE_QUIESCING; diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c index e844bb7b5676..5adf489428aa 100644 --- a/drivers/usb/core/message.c +++ b/drivers/usb/core/message.c @@ -2218,14 +2218,14 @@ int cdc_parse_cdc_header(struct usb_cdc_parsed_header *hdr, (struct usb_cdc_dmm_desc *)buffer; break; case USB_CDC_MDLM_TYPE: - if (elength < sizeof(struct usb_cdc_mdlm_desc *)) + if (elength < sizeof(struct usb_cdc_mdlm_desc)) goto next_desc; if (desc) return -EINVAL; desc = (struct usb_cdc_mdlm_desc *)buffer; break; case USB_CDC_MDLM_DETAIL_TYPE: - if (elength < sizeof(struct usb_cdc_mdlm_detail_desc *)) + if (elength < sizeof(struct usb_cdc_mdlm_detail_desc)) goto next_desc; if (detail) return -EINVAL; diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c index 7e88fdfe3cf5..f19694e69f5c 100644 --- a/drivers/usb/core/sysfs.c +++ b/drivers/usb/core/sysfs.c @@ -15,6 +15,7 @@ #include <linux/kernel.h> #include <linux/string.h> #include <linux/usb.h> +#include <linux/usb/hcd.h> #include <linux/usb/quirks.h> #include <linux/of.h> #include "usb.h" @@ -922,6 +923,116 @@ static struct bin_attribute dev_bin_attr_descriptors = { .size = 18 + 65535, /* dev descr + max-size raw descriptor */ }; +/* + * Show & store the current value of authorized_default + */ +static ssize_t authorized_default_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct usb_device *rh_usb_dev = to_usb_device(dev); + struct usb_bus *usb_bus = rh_usb_dev->bus; + struct usb_hcd *hcd; + + hcd = bus_to_hcd(usb_bus); + return snprintf(buf, PAGE_SIZE, "%u\n", hcd->dev_policy); +} + +static ssize_t authorized_default_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + ssize_t result; + unsigned int val; + struct usb_device *rh_usb_dev = to_usb_device(dev); + struct usb_bus *usb_bus = rh_usb_dev->bus; + struct usb_hcd *hcd; + + hcd = bus_to_hcd(usb_bus); + result = sscanf(buf, "%u\n", &val); + if (result == 1) { + hcd->dev_policy = val <= USB_DEVICE_AUTHORIZE_INTERNAL ? + val : USB_DEVICE_AUTHORIZE_ALL; + result = size; + } else { + result = -EINVAL; + } + return result; +} +static DEVICE_ATTR_RW(authorized_default); + +/* + * interface_authorized_default_show - show default authorization status + * for USB interfaces + * + * note: interface_authorized_default is the default value + * for initializing the authorized attribute of interfaces + */ +static ssize_t interface_authorized_default_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct usb_device *usb_dev = to_usb_device(dev); + struct usb_hcd *hcd = bus_to_hcd(usb_dev->bus); + + return sprintf(buf, "%u\n", !!HCD_INTF_AUTHORIZED(hcd)); +} + +/* + * interface_authorized_default_store - store default authorization status + * for USB interfaces + * + * note: interface_authorized_default is the default value + * for initializing the authorized attribute of interfaces + */ +static ssize_t interface_authorized_default_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct usb_device *usb_dev = to_usb_device(dev); + struct usb_hcd *hcd = bus_to_hcd(usb_dev->bus); + int rc = count; + bool val; + + if (strtobool(buf, &val) != 0) + return -EINVAL; + + if (val) + set_bit(HCD_FLAG_INTF_AUTHORIZED, &hcd->flags); + else + clear_bit(HCD_FLAG_INTF_AUTHORIZED, &hcd->flags); + + return rc; +} +static DEVICE_ATTR_RW(interface_authorized_default); + +/* Group all the USB bus attributes */ +static struct attribute *usb_bus_attrs[] = { + &dev_attr_authorized_default.attr, + &dev_attr_interface_authorized_default.attr, + NULL, +}; + +static const struct attribute_group usb_bus_attr_group = { + .name = NULL, /* we want them in the same directory */ + .attrs = usb_bus_attrs, +}; + + +static int add_default_authorized_attributes(struct device *dev) +{ + int rc = 0; + + if (is_usb_device(dev)) + rc = sysfs_create_group(&dev->kobj, &usb_bus_attr_group); + + return rc; +} + +static void remove_default_authorized_attributes(struct device *dev) +{ + if (is_usb_device(dev)) { + sysfs_remove_group(&dev->kobj, &usb_bus_attr_group); + } +} + int usb_create_sysfs_dev_files(struct usb_device *udev) { struct device *dev = &udev->dev; @@ -938,7 +1049,14 @@ int usb_create_sysfs_dev_files(struct usb_device *udev) retval = add_power_attributes(dev); if (retval) goto error; + + if (is_root_hub(udev)) { + retval = add_default_authorized_attributes(dev); + if (retval) + goto error; + } return retval; + error: usb_remove_sysfs_dev_files(udev); return retval; @@ -948,6 +1066,9 @@ void usb_remove_sysfs_dev_files(struct usb_device *udev) { struct device *dev = &udev->dev; + if (is_root_hub(udev)) + remove_default_authorized_attributes(dev); + remove_power_attributes(dev); remove_persist_attributes(dev); device_remove_bin_file(dev, &dev_bin_attr_descriptors); diff --git a/drivers/usb/core/usb.h b/drivers/usb/core/usb.h index bd8d01f85a13..0c9fde5ad052 100644 --- a/drivers/usb/core/usb.h +++ b/drivers/usb/core/usb.h @@ -153,6 +153,11 @@ static inline int is_usb_port(const struct device *dev) return dev->type == &usb_port_device_type; } +static inline int is_root_hub(struct usb_device *udev) +{ + return (udev->parent == NULL); +} + /* Do the same for device drivers and interface drivers. */ static inline int is_usb_device_driver(struct device_driver *drv) diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c index ee144ff8af5b..111787a137ee 100644 --- a/drivers/usb/dwc2/hcd.c +++ b/drivers/usb/dwc2/hcd.c @@ -4608,7 +4608,7 @@ static int _dwc2_hcd_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, buf = urb->transfer_buffer; - if (hcd->self.uses_dma) { + if (hcd_uses_dma(hcd)) { if (!buf && (urb->transfer_dma & 3)) { dev_err(hsotg->dev, "%s: unaligned transfer with no transfer_buffer", diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c index 9118b42c70b6..76883ff4f5bb 100644 --- a/drivers/usb/gadget/composite.c +++ b/drivers/usb/gadget/composite.c @@ -1976,6 +1976,7 @@ void composite_disconnect(struct usb_gadget *gadget) * disconnect callbacks? */ spin_lock_irqsave(&cdev->lock, flags); + cdev->suspended = 0; if (cdev->config) reset_config(cdev); if (cdev->driver->disconnect) diff --git a/drivers/usb/gadget/function/f_mass_storage.c b/drivers/usb/gadget/function/f_mass_storage.c index 29cc5693e05c..7c96c4665178 100644 --- a/drivers/usb/gadget/function/f_mass_storage.c +++ b/drivers/usb/gadget/function/f_mass_storage.c @@ -261,7 +261,7 @@ struct fsg_common; struct fsg_common { struct usb_gadget *gadget; struct usb_composite_dev *cdev; - struct fsg_dev *fsg, *new_fsg; + struct fsg_dev *fsg; wait_queue_head_t io_wait; wait_queue_head_t fsg_wait; @@ -290,6 +290,7 @@ struct fsg_common { unsigned int bulk_out_maxpacket; enum fsg_state state; /* For exception handling */ unsigned int exception_req_tag; + void *exception_arg; enum data_direction data_dir; u32 data_size; @@ -391,7 +392,8 @@ static int fsg_set_halt(struct fsg_dev *fsg, struct usb_ep *ep) /* These routines may be called in process context or in_irq */ -static void raise_exception(struct fsg_common *common, enum fsg_state new_state) +static void __raise_exception(struct fsg_common *common, enum fsg_state new_state, + void *arg) { unsigned long flags; @@ -404,6 +406,7 @@ static void raise_exception(struct fsg_common *common, enum fsg_state new_state) if (common->state <= new_state) { common->exception_req_tag = common->ep0_req_tag; common->state = new_state; + common->exception_arg = arg; if (common->thread_task) send_sig_info(SIGUSR1, SEND_SIG_PRIV, common->thread_task); @@ -411,6 +414,10 @@ static void raise_exception(struct fsg_common *common, enum fsg_state new_state) spin_unlock_irqrestore(&common->lock, flags); } +static void raise_exception(struct fsg_common *common, enum fsg_state new_state) +{ + __raise_exception(common, new_state, NULL); +} /*-------------------------------------------------------------------------*/ @@ -2285,16 +2292,16 @@ reset: static int fsg_set_alt(struct usb_function *f, unsigned intf, unsigned alt) { struct fsg_dev *fsg = fsg_from_func(f); - fsg->common->new_fsg = fsg; - raise_exception(fsg->common, FSG_STATE_CONFIG_CHANGE); + + __raise_exception(fsg->common, FSG_STATE_CONFIG_CHANGE, fsg); return USB_GADGET_DELAYED_STATUS; } static void fsg_disable(struct usb_function *f) { struct fsg_dev *fsg = fsg_from_func(f); - fsg->common->new_fsg = NULL; - raise_exception(fsg->common, FSG_STATE_CONFIG_CHANGE); + + __raise_exception(fsg->common, FSG_STATE_CONFIG_CHANGE, NULL); } @@ -2307,6 +2314,7 @@ static void handle_exception(struct fsg_common *common) enum fsg_state old_state; struct fsg_lun *curlun; unsigned int exception_req_tag; + struct fsg_dev *new_fsg; /* * Clear the existing signals. Anything but SIGUSR1 is converted @@ -2360,6 +2368,7 @@ static void handle_exception(struct fsg_common *common) common->next_buffhd_to_fill = &common->buffhds[0]; common->next_buffhd_to_drain = &common->buffhds[0]; exception_req_tag = common->exception_req_tag; + new_fsg = common->exception_arg; old_state = common->state; common->state = FSG_STATE_NORMAL; @@ -2413,8 +2422,8 @@ static void handle_exception(struct fsg_common *common) break; case FSG_STATE_CONFIG_CHANGE: - do_set_interface(common, common->new_fsg); - if (common->new_fsg) + do_set_interface(common, new_fsg); + if (new_fsg) usb_composite_setup_continue(common->cdev); break; @@ -2989,8 +2998,7 @@ static void fsg_unbind(struct usb_configuration *c, struct usb_function *f) DBG(fsg, "unbind\n"); if (fsg->common->fsg == fsg) { - fsg->common->new_fsg = NULL; - raise_exception(fsg->common, FSG_STATE_CONFIG_CHANGE); + __raise_exception(fsg->common, FSG_STATE_CONFIG_CHANGE, NULL); /* FIXME: make interruptible or killable somehow? */ wait_event(common->fsg_wait, common->fsg != fsg); } diff --git a/drivers/usb/gadget/udc/renesas_usb3.c b/drivers/usb/gadget/udc/renesas_usb3.c index 87062d22134d..1f4c3fbd1df8 100644 --- a/drivers/usb/gadget/udc/renesas_usb3.c +++ b/drivers/usb/gadget/udc/renesas_usb3.c @@ -19,6 +19,7 @@ #include <linux/pm_runtime.h> #include <linux/sizes.h> #include <linux/slab.h> +#include <linux/string.h> #include <linux/sys_soc.h> #include <linux/uaccess.h> #include <linux/usb/ch9.h> @@ -2450,9 +2451,9 @@ static ssize_t role_store(struct device *dev, struct device_attribute *attr, if (usb3->forced_b_device) return -EBUSY; - if (!strncmp(buf, "host", strlen("host"))) + if (sysfs_streq(buf, "host")) new_mode_is_host = true; - else if (!strncmp(buf, "peripheral", strlen("peripheral"))) + else if (sysfs_streq(buf, "peripheral")) new_mode_is_host = false; else return -EINVAL; diff --git a/drivers/usb/host/fotg210-hcd.c b/drivers/usb/host/fotg210-hcd.c index 77cc36efae95..0dbfa5c10703 100644 --- a/drivers/usb/host/fotg210-hcd.c +++ b/drivers/usb/host/fotg210-hcd.c @@ -1629,6 +1629,10 @@ static int fotg210_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, /* see what we found out */ temp = check_reset_complete(fotg210, wIndex, status_reg, fotg210_readl(fotg210, status_reg)); + + /* restart schedule */ + fotg210->command |= CMD_RUN; + fotg210_writel(fotg210, fotg210->command, &fotg210->regs->command); } if (!(temp & (PORT_RESUME|PORT_RESET))) { diff --git a/drivers/usb/host/xhci-rcar.c b/drivers/usb/host/xhci-rcar.c index 671bce18782c..8616c52849c6 100644 --- a/drivers/usb/host/xhci-rcar.c +++ b/drivers/usb/host/xhci-rcar.c @@ -238,10 +238,15 @@ int xhci_rcar_init_quirk(struct usb_hcd *hcd) * pointers. So, this driver clears the AC64 bit of xhci->hcc_params * to call dma_set_coherent_mask(dev, DMA_BIT_MASK(32)) in * xhci_gen_setup(). + * + * And, since the firmware/internal CPU control the USBSTS.STS_HALT + * and the process speed is down when the roothub port enters U3, + * long delay for the handshake of STS_HALT is neeed in xhci_suspend(). */ if (xhci_rcar_is_gen2(hcd->self.controller) || - xhci_rcar_is_gen3(hcd->self.controller)) - xhci->quirks |= XHCI_NO_64BIT_SUPPORT; + xhci_rcar_is_gen3(hcd->self.controller)) { + xhci->quirks |= XHCI_NO_64BIT_SUPPORT | XHCI_SLOW_SUSPEND; + } if (!xhci_rcar_wait_for_pll_active(hcd)) return -ETIMEDOUT; diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index 248cd7a8b163..03d1e552769b 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c @@ -3089,8 +3089,18 @@ static void xhci_endpoint_reset(struct usb_hcd *hcd, return; udev = (struct usb_device *) host_ep->hcpriv; vdev = xhci->devs[udev->slot_id]; + + /* + * vdev may be lost due to xHC restore error and re-initialization + * during S3/S4 resume. A new vdev will be allocated later by + * xhci_discover_or_reset_device() + */ + if (!udev->slot_id || !vdev) + return; ep_index = xhci_get_endpoint_index(&host_ep->desc); ep = &vdev->eps[ep_index]; + if (!ep) + return; /* Bail out if toggle is already being cleared by a endpoint reset */ if (ep->ep_state & EP_HARD_CLEAR_TOGGLE) { diff --git a/drivers/usb/misc/iowarrior.c b/drivers/usb/misc/iowarrior.c index ba05dd80a020..f5bed9f29e56 100644 --- a/drivers/usb/misc/iowarrior.c +++ b/drivers/usb/misc/iowarrior.c @@ -866,19 +866,20 @@ static void iowarrior_disconnect(struct usb_interface *interface) dev = usb_get_intfdata(interface); mutex_lock(&iowarrior_open_disc_lock); usb_set_intfdata(interface, NULL); + /* prevent device read, write and ioctl */ + dev->present = 0; minor = dev->minor; + mutex_unlock(&iowarrior_open_disc_lock); + /* give back our minor - this will call close() locks need to be dropped at this point*/ - /* give back our minor */ usb_deregister_dev(interface, &iowarrior_class); mutex_lock(&dev->mutex); /* prevent device read, write and ioctl */ - dev->present = 0; mutex_unlock(&dev->mutex); - mutex_unlock(&iowarrior_open_disc_lock); if (dev->opened) { /* There is a process that holds a filedescriptor to the device , diff --git a/drivers/usb/misc/rio500.c b/drivers/usb/misc/rio500.c index 27e9c78a791e..a32d61a79ab8 100644 --- a/drivers/usb/misc/rio500.c +++ b/drivers/usb/misc/rio500.c @@ -51,6 +51,7 @@ struct rio_usb_data { char *obuf, *ibuf; /* transfer buffers */ char bulk_in_ep, bulk_out_ep; /* Endpoint assignments */ wait_queue_head_t wait_q; /* for timeouts */ + struct mutex lock; /* general race avoidance */ }; static DEFINE_MUTEX(rio500_mutex); @@ -62,8 +63,10 @@ static int open_rio(struct inode *inode, struct file *file) /* against disconnect() */ mutex_lock(&rio500_mutex); + mutex_lock(&(rio->lock)); if (rio->isopen || !rio->present) { + mutex_unlock(&(rio->lock)); mutex_unlock(&rio500_mutex); return -EBUSY; } @@ -71,6 +74,7 @@ static int open_rio(struct inode *inode, struct file *file) init_waitqueue_head(&rio->wait_q); + mutex_unlock(&(rio->lock)); dev_info(&rio->rio_dev->dev, "Rio opened.\n"); mutex_unlock(&rio500_mutex); @@ -84,6 +88,7 @@ static int close_rio(struct inode *inode, struct file *file) /* against disconnect() */ mutex_lock(&rio500_mutex); + mutex_lock(&(rio->lock)); rio->isopen = 0; if (!rio->present) { @@ -95,6 +100,7 @@ static int close_rio(struct inode *inode, struct file *file) } else { dev_info(&rio->rio_dev->dev, "Rio closed.\n"); } + mutex_unlock(&(rio->lock)); mutex_unlock(&rio500_mutex); return 0; } @@ -109,7 +115,7 @@ static long ioctl_rio(struct file *file, unsigned int cmd, unsigned long arg) int retries; int retval=0; - mutex_lock(&rio500_mutex); + mutex_lock(&(rio->lock)); /* Sanity check to make sure rio is connected, powered, etc */ if (rio->present == 0 || rio->rio_dev == NULL) { retval = -ENODEV; @@ -253,7 +259,7 @@ static long ioctl_rio(struct file *file, unsigned int cmd, unsigned long arg) err_out: - mutex_unlock(&rio500_mutex); + mutex_unlock(&(rio->lock)); return retval; } @@ -273,12 +279,12 @@ write_rio(struct file *file, const char __user *buffer, int errn = 0; int intr; - intr = mutex_lock_interruptible(&rio500_mutex); + intr = mutex_lock_interruptible(&(rio->lock)); if (intr) return -EINTR; /* Sanity check to make sure rio is connected, powered, etc */ if (rio->present == 0 || rio->rio_dev == NULL) { - mutex_unlock(&rio500_mutex); + mutex_unlock(&(rio->lock)); return -ENODEV; } @@ -301,7 +307,7 @@ write_rio(struct file *file, const char __user *buffer, goto error; } if (signal_pending(current)) { - mutex_unlock(&rio500_mutex); + mutex_unlock(&(rio->lock)); return bytes_written ? bytes_written : -EINTR; } @@ -339,12 +345,12 @@ write_rio(struct file *file, const char __user *buffer, buffer += copy_size; } while (count > 0); - mutex_unlock(&rio500_mutex); + mutex_unlock(&(rio->lock)); return bytes_written ? bytes_written : -EIO; error: - mutex_unlock(&rio500_mutex); + mutex_unlock(&(rio->lock)); return errn; } @@ -361,12 +367,12 @@ read_rio(struct file *file, char __user *buffer, size_t count, loff_t * ppos) char *ibuf; int intr; - intr = mutex_lock_interruptible(&rio500_mutex); + intr = mutex_lock_interruptible(&(rio->lock)); if (intr) return -EINTR; /* Sanity check to make sure rio is connected, powered, etc */ if (rio->present == 0 || rio->rio_dev == NULL) { - mutex_unlock(&rio500_mutex); + mutex_unlock(&(rio->lock)); return -ENODEV; } @@ -377,11 +383,11 @@ read_rio(struct file *file, char __user *buffer, size_t count, loff_t * ppos) while (count > 0) { if (signal_pending(current)) { - mutex_unlock(&rio500_mutex); + mutex_unlock(&(rio->lock)); return read_count ? read_count : -EINTR; } if (!rio->rio_dev) { - mutex_unlock(&rio500_mutex); + mutex_unlock(&(rio->lock)); return -ENODEV; } this_read = (count >= IBUF_SIZE) ? IBUF_SIZE : count; @@ -399,7 +405,7 @@ read_rio(struct file *file, char __user *buffer, size_t count, loff_t * ppos) count = this_read = partial; } else if (result == -ETIMEDOUT || result == 15) { /* FIXME: 15 ??? */ if (!maxretry--) { - mutex_unlock(&rio500_mutex); + mutex_unlock(&(rio->lock)); dev_err(&rio->rio_dev->dev, "read_rio: maxretry timeout\n"); return -ETIME; @@ -409,19 +415,19 @@ read_rio(struct file *file, char __user *buffer, size_t count, loff_t * ppos) finish_wait(&rio->wait_q, &wait); continue; } else if (result != -EREMOTEIO) { - mutex_unlock(&rio500_mutex); + mutex_unlock(&(rio->lock)); dev_err(&rio->rio_dev->dev, "Read Whoops - result:%d partial:%u this_read:%u\n", result, partial, this_read); return -EIO; } else { - mutex_unlock(&rio500_mutex); + mutex_unlock(&(rio->lock)); return (0); } if (this_read) { if (copy_to_user(buffer, ibuf, this_read)) { - mutex_unlock(&rio500_mutex); + mutex_unlock(&(rio->lock)); return -EFAULT; } count -= this_read; @@ -429,7 +435,7 @@ read_rio(struct file *file, char __user *buffer, size_t count, loff_t * ppos) buffer += this_read; } } - mutex_unlock(&rio500_mutex); + mutex_unlock(&(rio->lock)); return read_count; } @@ -494,6 +500,8 @@ static int probe_rio(struct usb_interface *intf, } dev_dbg(&intf->dev, "ibuf address:%p\n", rio->ibuf); + mutex_init(&(rio->lock)); + usb_set_intfdata (intf, rio); rio->present = 1; bail_out: @@ -511,10 +519,12 @@ static void disconnect_rio(struct usb_interface *intf) if (rio) { usb_deregister_dev(intf, &usb_rio_class); + mutex_lock(&(rio->lock)); if (rio->isopen) { rio->isopen = 0; /* better let it finish - the release will do whats needed */ rio->rio_dev = NULL; + mutex_unlock(&(rio->lock)); mutex_unlock(&rio500_mutex); return; } @@ -524,6 +534,7 @@ static void disconnect_rio(struct usb_interface *intf) dev_info(&intf->dev, "USB Rio disconnected.\n"); rio->present = 0; + mutex_unlock(&(rio->lock)); } mutex_unlock(&rio500_mutex); } diff --git a/drivers/usb/misc/yurex.c b/drivers/usb/misc/yurex.c index 7b306aa22d25..6715a128e6c8 100644 --- a/drivers/usb/misc/yurex.c +++ b/drivers/usb/misc/yurex.c @@ -92,7 +92,6 @@ static void yurex_delete(struct kref *kref) dev_dbg(&dev->interface->dev, "%s\n", __func__); - usb_put_dev(dev->udev); if (dev->cntl_urb) { usb_kill_urb(dev->cntl_urb); kfree(dev->cntl_req); @@ -108,6 +107,7 @@ static void yurex_delete(struct kref *kref) dev->int_buffer, dev->urb->transfer_dma); usb_free_urb(dev->urb); } + usb_put_dev(dev->udev); kfree(dev); } diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index c1582fbd1150..38e920ac7f82 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c @@ -968,6 +968,11 @@ static const struct usb_device_id option_ids[] = { { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x7B) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x7C) }, + /* Motorola devices */ + { USB_DEVICE_AND_INTERFACE_INFO(0x22b8, 0x2a70, 0xff, 0xff, 0xff) }, /* mdm6600 */ + { USB_DEVICE_AND_INTERFACE_INFO(0x22b8, 0x2e0a, 0xff, 0xff, 0xff) }, /* mdm9600 */ + { USB_DEVICE_AND_INTERFACE_INFO(0x22b8, 0x4281, 0x0a, 0x00, 0xfc) }, /* mdm ram dl */ + { USB_DEVICE_AND_INTERFACE_INFO(0x22b8, 0x900e, 0xff, 0xff, 0xff) }, /* mdm qc dl */ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V640) }, { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V620) }, @@ -1549,6 +1554,7 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1428, 0xff, 0xff, 0xff), /* Telewell TW-LTE 4G v2 */ .driver_info = RSVD(2) }, { USB_DEVICE_INTERFACE_CLASS(ZTE_VENDOR_ID, 0x1476, 0xff) }, /* GosunCn ZTE WeLink ME3630 (ECM/NCM mode) */ + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1481, 0xff, 0x00, 0x00) }, /* ZTE MF871A */ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1533, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1534, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1535, 0xff, 0xff, 0xff) }, @@ -1952,11 +1958,15 @@ static const struct usb_device_id option_ids[] = { .driver_info = RSVD(4) }, { USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e35, 0xff), /* D-Link DWM-222 */ .driver_info = RSVD(4) }, + { USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e3d, 0xff), /* D-Link DWM-222 A2 */ + .driver_info = RSVD(4) }, { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */ { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */ { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x7e11, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/A3 */ { USB_DEVICE_INTERFACE_CLASS(0x2020, 0x2031, 0xff), /* Olicard 600 */ .driver_info = RSVD(4) }, + { USB_DEVICE_INTERFACE_CLASS(0x2020, 0x2060, 0xff), /* BroadMobi BM818 */ + .driver_info = RSVD(4) }, { USB_DEVICE_INTERFACE_CLASS(0x2020, 0x4000, 0xff) }, /* OLICARD300 - MT6225 */ { USB_DEVICE(INOVIA_VENDOR_ID, INOVIA_SEW858) }, { USB_DEVICE(VIATELECOM_VENDOR_ID, VIATELECOM_PRODUCT_CDS7) }, diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c index fba32d84e578..15abe1d9958f 100644 --- a/drivers/usb/typec/tcpm/tcpm.c +++ b/drivers/usb/typec/tcpm/tcpm.c @@ -379,7 +379,8 @@ static enum tcpm_state tcpm_default_state(struct tcpm_port *port) return SNK_UNATTACHED; else if (port->try_role == TYPEC_SOURCE) return SRC_UNATTACHED; - else if (port->tcpc->config->default_role == TYPEC_SINK) + else if (port->tcpc->config && + port->tcpc->config->default_role == TYPEC_SINK) return SNK_UNATTACHED; /* Fall through to return SRC_UNATTACHED */ } else if (port->port_type == TYPEC_PORT_SNK) { @@ -586,7 +587,20 @@ static void tcpm_debugfs_init(struct tcpm_port *port) static void tcpm_debugfs_exit(struct tcpm_port *port) { + int i; + + mutex_lock(&port->logbuffer_lock); + for (i = 0; i < LOG_BUFFER_ENTRIES; i++) { + kfree(port->logbuffer[i]); + port->logbuffer[i] = NULL; + } + mutex_unlock(&port->logbuffer_lock); + debugfs_remove(port->dentry); + if (list_empty(&rootdir->d_subdirs)) { + debugfs_remove(rootdir); + rootdir = NULL; + } } #else @@ -1095,7 +1109,8 @@ static int tcpm_pd_svdm(struct tcpm_port *port, const __le32 *payload, int cnt, break; case CMD_ATTENTION: /* Attention command does not have response */ - typec_altmode_attention(adev, p[1]); + if (adev) + typec_altmode_attention(adev, p[1]); return 0; default: break; @@ -1147,20 +1162,26 @@ static int tcpm_pd_svdm(struct tcpm_port *port, const __le32 *payload, int cnt, } break; case CMD_ENTER_MODE: - typec_altmode_update_active(pdev, true); - - if (typec_altmode_vdm(adev, p[0], &p[1], cnt)) { - response[0] = VDO(adev->svid, 1, CMD_EXIT_MODE); - response[0] |= VDO_OPOS(adev->mode); - return 1; + if (adev && pdev) { + typec_altmode_update_active(pdev, true); + + if (typec_altmode_vdm(adev, p[0], &p[1], cnt)) { + response[0] = VDO(adev->svid, 1, + CMD_EXIT_MODE); + response[0] |= VDO_OPOS(adev->mode); + return 1; + } } return 0; case CMD_EXIT_MODE: - typec_altmode_update_active(pdev, false); + if (adev && pdev) { + typec_altmode_update_active(pdev, false); - /* Back to USB Operation */ - WARN_ON(typec_altmode_notify(adev, TYPEC_STATE_USB, - NULL)); + /* Back to USB Operation */ + WARN_ON(typec_altmode_notify(adev, + TYPEC_STATE_USB, + NULL)); + } break; default: break; @@ -1170,8 +1191,10 @@ static int tcpm_pd_svdm(struct tcpm_port *port, const __le32 *payload, int cnt, switch (cmd) { case CMD_ENTER_MODE: /* Back to USB Operation */ - WARN_ON(typec_altmode_notify(adev, TYPEC_STATE_USB, - NULL)); + if (adev) + WARN_ON(typec_altmode_notify(adev, + TYPEC_STATE_USB, + NULL)); break; default: break; @@ -1182,7 +1205,8 @@ static int tcpm_pd_svdm(struct tcpm_port *port, const __le32 *payload, int cnt, } /* Informing the alternate mode drivers about everything */ - typec_altmode_vdm(adev, p[0], &p[1], cnt); + if (adev) + typec_altmode_vdm(adev, p[0], &p[1], cnt); return rlen; } @@ -4114,7 +4138,7 @@ static int tcpm_try_role(const struct typec_capability *cap, int role) mutex_lock(&port->lock); if (tcpc->try_role) ret = tcpc->try_role(tcpc, role); - if (!ret && !tcpc->config->try_role_hw) + if (!ret && (!tcpc->config || !tcpc->config->try_role_hw)) port->try_role = role; port->try_src_count = 0; port->try_snk_count = 0; @@ -4701,7 +4725,7 @@ static int tcpm_copy_caps(struct tcpm_port *port, port->typec_caps.prefer_role = tcfg->default_role; port->typec_caps.type = tcfg->type; port->typec_caps.data = tcfg->data; - port->self_powered = port->tcpc->config->self_powered; + port->self_powered = tcfg->self_powered; return 0; } diff --git a/drivers/usb/typec/ucsi/ucsi_ccg.c b/drivers/usb/typec/ucsi/ucsi_ccg.c index f7a79a23ebed..8e9f8fba55af 100644 --- a/drivers/usb/typec/ucsi/ucsi_ccg.c +++ b/drivers/usb/typec/ucsi/ucsi_ccg.c @@ -1018,7 +1018,7 @@ release_fw: ******************************************************************************/ static int ccg_fw_update(struct ucsi_ccg *uc, enum enum_flash_mode flash_mode) { - int err; + int err = 0; while (flash_mode != FLASH_NOT_NEEDED) { err = do_flash(uc, flash_mode); diff --git a/drivers/video/fbdev/omap/omapfb_main.c b/drivers/video/fbdev/omap/omapfb_main.c index 90eca64e3144..702cca59bda1 100644 --- a/drivers/video/fbdev/omap/omapfb_main.c +++ b/drivers/video/fbdev/omap/omapfb_main.c @@ -447,6 +447,7 @@ static int set_color_mode(struct omapfb_plane_struct *plane, return 0; case 12: var->bits_per_pixel = 16; + /* fall through */ case 16: if (plane->fbdev->panel->bpp == 12) plane->color_mode = OMAPFB_COLOR_RGB444; @@ -1534,20 +1535,27 @@ static void omapfb_free_resources(struct omapfb_device *fbdev, int state) case OMAPFB_ACTIVE: for (i = 0; i < fbdev->mem_desc.region_cnt; i++) unregister_framebuffer(fbdev->fb_info[i]); + /* fall through */ case 7: omapfb_unregister_sysfs(fbdev); + /* fall through */ case 6: if (fbdev->panel->disable) fbdev->panel->disable(fbdev->panel); + /* fall through */ case 5: omapfb_set_update_mode(fbdev, OMAPFB_UPDATE_DISABLED); + /* fall through */ case 4: planes_cleanup(fbdev); + /* fall through */ case 3: ctrl_cleanup(fbdev); + /* fall through */ case 2: if (fbdev->panel->cleanup) fbdev->panel->cleanup(fbdev->panel); + /* fall through */ case 1: dev_set_drvdata(fbdev->dev, NULL); kfree(fbdev); diff --git a/drivers/watchdog/ar7_wdt.c b/drivers/watchdog/ar7_wdt.c index b9b2d06b3879..668a1c704f28 100644 --- a/drivers/watchdog/ar7_wdt.c +++ b/drivers/watchdog/ar7_wdt.c @@ -235,6 +235,7 @@ static long ar7_wdt_ioctl(struct file *file, ar7_wdt_update_margin(new_margin); ar7_wdt_kick(1); spin_unlock(&wdt_lock); + /* Fall through */ case WDIOC_GETTIMEOUT: if (put_user(margin, (int *)arg)) diff --git a/drivers/watchdog/pcwd.c b/drivers/watchdog/pcwd.c index 1b2cf5b95a89..c3c93e00b320 100644 --- a/drivers/watchdog/pcwd.c +++ b/drivers/watchdog/pcwd.c @@ -651,7 +651,7 @@ static long pcwd_ioctl(struct file *file, unsigned int cmd, unsigned long arg) return -EINVAL; pcwd_keepalive(); - /* Fall */ + /* Fall through */ case WDIOC_GETTIMEOUT: return put_user(heartbeat, argp); diff --git a/drivers/watchdog/riowd.c b/drivers/watchdog/riowd.c index 41a2a11535a6..b35f7be20c00 100644 --- a/drivers/watchdog/riowd.c +++ b/drivers/watchdog/riowd.c @@ -134,7 +134,7 @@ static long riowd_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) return -EINVAL; riowd_timeout = (new_margin + 59) / 60; riowd_writereg(p, riowd_timeout, WDTO_INDEX); - /* Fall */ + /* Fall through */ case WDIOC_GETTIMEOUT: return put_user(riowd_timeout * 60, (int __user *)argp); diff --git a/drivers/watchdog/sb_wdog.c b/drivers/watchdog/sb_wdog.c index 5a6ced7a7e8f..202fc8d8ca5f 100644 --- a/drivers/watchdog/sb_wdog.c +++ b/drivers/watchdog/sb_wdog.c @@ -202,6 +202,7 @@ static long sbwdog_ioctl(struct file *file, unsigned int cmd, timeout = time; sbwdog_set(user_dog, timeout); sbwdog_pet(user_dog); + /* Fall through */ case WDIOC_GETTIMEOUT: /* diff --git a/drivers/watchdog/scx200_wdt.c b/drivers/watchdog/scx200_wdt.c index efd7996694de..46268309ee9b 100644 --- a/drivers/watchdog/scx200_wdt.c +++ b/drivers/watchdog/scx200_wdt.c @@ -186,6 +186,7 @@ static long scx200_wdt_ioctl(struct file *file, unsigned int cmd, margin = new_margin; scx200_wdt_update_margin(); scx200_wdt_ping(); + /* Fall through */ case WDIOC_GETTIMEOUT: if (put_user(margin, p)) return -EFAULT; diff --git a/drivers/watchdog/wdt.c b/drivers/watchdog/wdt.c index 0650100fad00..7d278b37e083 100644 --- a/drivers/watchdog/wdt.c +++ b/drivers/watchdog/wdt.c @@ -389,7 +389,7 @@ static long wdt_ioctl(struct file *file, unsigned int cmd, unsigned long arg) if (wdt_set_heartbeat(new_heartbeat)) return -EINVAL; wdt_ping(); - /* Fall */ + /* Fall through */ case WDIOC_GETTIMEOUT: return put_user(heartbeat, p); default: diff --git a/drivers/watchdog/wdt977.c b/drivers/watchdog/wdt977.c index 567005d7598e..5c52c73e1839 100644 --- a/drivers/watchdog/wdt977.c +++ b/drivers/watchdog/wdt977.c @@ -398,7 +398,7 @@ static long wdt977_ioctl(struct file *file, unsigned int cmd, return -EINVAL; wdt977_keepalive(); - /* Fall */ + /* Fall through */ case WDIOC_GETTIMEOUT: return put_user(timeout, uarg.i); diff --git a/fs/afs/cmservice.c b/fs/afs/cmservice.c index 4f1b6f466ff5..b86195e4dc6c 100644 --- a/fs/afs/cmservice.c +++ b/fs/afs/cmservice.c @@ -505,18 +505,14 @@ static void SRXAFSCB_ProbeUuid(struct work_struct *work) struct afs_call *call = container_of(work, struct afs_call, work); struct afs_uuid *r = call->request; - struct { - __be32 match; - } reply; - _enter(""); if (memcmp(r, &call->net->uuid, sizeof(call->net->uuid)) == 0) - reply.match = htonl(0); + afs_send_empty_reply(call); else - reply.match = htonl(1); + rxrpc_kernel_abort_call(call->net->socket, call->rxcall, + 1, 1, "K-1"); - afs_send_simple_reply(call, &reply, sizeof(reply)); afs_put_call(call); _leave(""); } diff --git a/fs/afs/dir.c b/fs/afs/dir.c index e640d67274be..81207dc3c997 100644 --- a/fs/afs/dir.c +++ b/fs/afs/dir.c @@ -440,7 +440,7 @@ static int afs_dir_iterate_block(struct afs_vnode *dvnode, * iterate through the data blob that lists the contents of an AFS directory */ static int afs_dir_iterate(struct inode *dir, struct dir_context *ctx, - struct key *key) + struct key *key, afs_dataversion_t *_dir_version) { struct afs_vnode *dvnode = AFS_FS_I(dir); struct afs_xdr_dir_page *dbuf; @@ -460,6 +460,7 @@ static int afs_dir_iterate(struct inode *dir, struct dir_context *ctx, req = afs_read_dir(dvnode, key); if (IS_ERR(req)) return PTR_ERR(req); + *_dir_version = req->data_version; /* round the file position up to the next entry boundary */ ctx->pos += sizeof(union afs_xdr_dirent) - 1; @@ -514,7 +515,10 @@ out: */ static int afs_readdir(struct file *file, struct dir_context *ctx) { - return afs_dir_iterate(file_inode(file), ctx, afs_file_key(file)); + afs_dataversion_t dir_version; + + return afs_dir_iterate(file_inode(file), ctx, afs_file_key(file), + &dir_version); } /* @@ -555,7 +559,8 @@ static int afs_lookup_one_filldir(struct dir_context *ctx, const char *name, * - just returns the FID the dentry name maps to if found */ static int afs_do_lookup_one(struct inode *dir, struct dentry *dentry, - struct afs_fid *fid, struct key *key) + struct afs_fid *fid, struct key *key, + afs_dataversion_t *_dir_version) { struct afs_super_info *as = dir->i_sb->s_fs_info; struct afs_lookup_one_cookie cookie = { @@ -568,7 +573,7 @@ static int afs_do_lookup_one(struct inode *dir, struct dentry *dentry, _enter("{%lu},%p{%pd},", dir->i_ino, dentry, dentry); /* search the directory */ - ret = afs_dir_iterate(dir, &cookie.ctx, key); + ret = afs_dir_iterate(dir, &cookie.ctx, key, _dir_version); if (ret < 0) { _leave(" = %d [iter]", ret); return ret; @@ -642,6 +647,7 @@ static struct inode *afs_do_lookup(struct inode *dir, struct dentry *dentry, struct afs_server *server; struct afs_vnode *dvnode = AFS_FS_I(dir), *vnode; struct inode *inode = NULL, *ti; + afs_dataversion_t data_version = READ_ONCE(dvnode->status.data_version); int ret, i; _enter("{%lu},%p{%pd},", dir->i_ino, dentry, dentry); @@ -669,12 +675,14 @@ static struct inode *afs_do_lookup(struct inode *dir, struct dentry *dentry, cookie->fids[i].vid = as->volume->vid; /* search the directory */ - ret = afs_dir_iterate(dir, &cookie->ctx, key); + ret = afs_dir_iterate(dir, &cookie->ctx, key, &data_version); if (ret < 0) { inode = ERR_PTR(ret); goto out; } + dentry->d_fsdata = (void *)(unsigned long)data_version; + inode = ERR_PTR(-ENOENT); if (!cookie->found) goto out; @@ -968,7 +976,8 @@ static int afs_d_revalidate(struct dentry *dentry, unsigned int flags) struct dentry *parent; struct inode *inode; struct key *key; - long dir_version, de_version; + afs_dataversion_t dir_version; + long de_version; int ret; if (flags & LOOKUP_RCU) @@ -1014,20 +1023,20 @@ static int afs_d_revalidate(struct dentry *dentry, unsigned int flags) * on a 32-bit system, we only have 32 bits in the dentry to store the * version. */ - dir_version = (long)dir->status.data_version; + dir_version = dir->status.data_version; de_version = (long)dentry->d_fsdata; - if (de_version == dir_version) - goto out_valid; + if (de_version == (long)dir_version) + goto out_valid_noupdate; - dir_version = (long)dir->invalid_before; - if (de_version - dir_version >= 0) + dir_version = dir->invalid_before; + if (de_version - (long)dir_version >= 0) goto out_valid; _debug("dir modified"); afs_stat_v(dir, n_reval); /* search the directory for this vnode */ - ret = afs_do_lookup_one(&dir->vfs_inode, dentry, &fid, key); + ret = afs_do_lookup_one(&dir->vfs_inode, dentry, &fid, key, &dir_version); switch (ret) { case 0: /* the filename maps to something */ @@ -1080,7 +1089,8 @@ static int afs_d_revalidate(struct dentry *dentry, unsigned int flags) } out_valid: - dentry->d_fsdata = (void *)dir_version; + dentry->d_fsdata = (void *)(unsigned long)dir_version; +out_valid_noupdate: dput(parent); key_put(key); _leave(" = 1 [valid]"); @@ -1186,6 +1196,20 @@ static void afs_prep_for_new_inode(struct afs_fs_cursor *fc, } /* + * Note that a dentry got changed. We need to set d_fsdata to the data version + * number derived from the result of the operation. It doesn't matter if + * d_fsdata goes backwards as we'll just revalidate. + */ +static void afs_update_dentry_version(struct afs_fs_cursor *fc, + struct dentry *dentry, + struct afs_status_cb *scb) +{ + if (fc->ac.error == 0) + dentry->d_fsdata = + (void *)(unsigned long)scb->status.data_version; +} + +/* * create a directory on an AFS filesystem */ static int afs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) @@ -1227,6 +1251,7 @@ static int afs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) afs_check_for_remote_deletion(&fc, dvnode); afs_vnode_commit_status(&fc, dvnode, fc.cb_break, &data_version, &scb[0]); + afs_update_dentry_version(&fc, dentry, &scb[0]); afs_vnode_new_inode(&fc, dentry, &iget_data, &scb[1]); ret = afs_end_vnode_operation(&fc); if (ret < 0) @@ -1319,6 +1344,7 @@ static int afs_rmdir(struct inode *dir, struct dentry *dentry) afs_vnode_commit_status(&fc, dvnode, fc.cb_break, &data_version, scb); + afs_update_dentry_version(&fc, dentry, scb); ret = afs_end_vnode_operation(&fc); if (ret == 0) { afs_dir_remove_subdir(dentry); @@ -1458,6 +1484,7 @@ static int afs_unlink(struct inode *dir, struct dentry *dentry) &data_version, &scb[0]); afs_vnode_commit_status(&fc, vnode, fc.cb_break_2, &data_version_2, &scb[1]); + afs_update_dentry_version(&fc, dentry, &scb[0]); ret = afs_end_vnode_operation(&fc); if (ret == 0 && !(scb[1].have_status || scb[1].have_error)) ret = afs_dir_remove_link(dvnode, dentry, key); @@ -1526,6 +1553,7 @@ static int afs_create(struct inode *dir, struct dentry *dentry, umode_t mode, afs_check_for_remote_deletion(&fc, dvnode); afs_vnode_commit_status(&fc, dvnode, fc.cb_break, &data_version, &scb[0]); + afs_update_dentry_version(&fc, dentry, &scb[0]); afs_vnode_new_inode(&fc, dentry, &iget_data, &scb[1]); ret = afs_end_vnode_operation(&fc); if (ret < 0) @@ -1607,6 +1635,7 @@ static int afs_link(struct dentry *from, struct inode *dir, afs_vnode_commit_status(&fc, vnode, fc.cb_break_2, NULL, &scb[1]); ihold(&vnode->vfs_inode); + afs_update_dentry_version(&fc, dentry, &scb[0]); d_instantiate(dentry, &vnode->vfs_inode); mutex_unlock(&vnode->io_lock); @@ -1686,6 +1715,7 @@ static int afs_symlink(struct inode *dir, struct dentry *dentry, afs_check_for_remote_deletion(&fc, dvnode); afs_vnode_commit_status(&fc, dvnode, fc.cb_break, &data_version, &scb[0]); + afs_update_dentry_version(&fc, dentry, &scb[0]); afs_vnode_new_inode(&fc, dentry, &iget_data, &scb[1]); ret = afs_end_vnode_operation(&fc); if (ret < 0) @@ -1791,6 +1821,17 @@ static int afs_rename(struct inode *old_dir, struct dentry *old_dentry, } } + /* This bit is potentially nasty as there's a potential race with + * afs_d_revalidate{,_rcu}(). We have to change d_fsdata on the dentry + * to reflect it's new parent's new data_version after the op, but + * d_revalidate may see old_dentry between the op having taken place + * and the version being updated. + * + * So drop the old_dentry for now to make other threads go through + * lookup instead - which we hold a lock against. + */ + d_drop(old_dentry); + ret = -ERESTARTSYS; if (afs_begin_vnode_operation(&fc, orig_dvnode, key, true)) { afs_dataversion_t orig_data_version; @@ -1802,9 +1843,9 @@ static int afs_rename(struct inode *old_dir, struct dentry *old_dentry, if (orig_dvnode != new_dvnode) { if (mutex_lock_interruptible_nested(&new_dvnode->io_lock, 1) < 0) { afs_end_vnode_operation(&fc); - goto error_rehash; + goto error_rehash_old; } - new_data_version = new_dvnode->status.data_version; + new_data_version = new_dvnode->status.data_version + 1; } else { new_data_version = orig_data_version; new_scb = &scb[0]; @@ -1827,7 +1868,7 @@ static int afs_rename(struct inode *old_dir, struct dentry *old_dentry, } ret = afs_end_vnode_operation(&fc); if (ret < 0) - goto error_rehash; + goto error_rehash_old; } if (ret == 0) { @@ -1853,10 +1894,26 @@ static int afs_rename(struct inode *old_dir, struct dentry *old_dentry, drop_nlink(new_inode); spin_unlock(&new_inode->i_lock); } + + /* Now we can update d_fsdata on the dentries to reflect their + * new parent's data_version. + * + * Note that if we ever implement RENAME_EXCHANGE, we'll have + * to update both dentries with opposing dir versions. + */ + if (new_dvnode != orig_dvnode) { + afs_update_dentry_version(&fc, old_dentry, &scb[1]); + afs_update_dentry_version(&fc, new_dentry, &scb[1]); + } else { + afs_update_dentry_version(&fc, old_dentry, &scb[0]); + afs_update_dentry_version(&fc, new_dentry, &scb[0]); + } d_move(old_dentry, new_dentry); goto error_tmp; } +error_rehash_old: + d_rehash(new_dentry); error_rehash: if (rehash) d_rehash(rehash); diff --git a/fs/afs/file.c b/fs/afs/file.c index 56b69576274d..dd3c55c9101c 100644 --- a/fs/afs/file.c +++ b/fs/afs/file.c @@ -191,11 +191,13 @@ void afs_put_read(struct afs_read *req) int i; if (refcount_dec_and_test(&req->usage)) { - for (i = 0; i < req->nr_pages; i++) - if (req->pages[i]) - put_page(req->pages[i]); - if (req->pages != req->array) - kfree(req->pages); + if (req->pages) { + for (i = 0; i < req->nr_pages; i++) + if (req->pages[i]) + put_page(req->pages[i]); + if (req->pages != req->array) + kfree(req->pages); + } kfree(req); } } diff --git a/fs/afs/vlclient.c b/fs/afs/vlclient.c index d7e0fd3c00df..cfb0ac4bd039 100644 --- a/fs/afs/vlclient.c +++ b/fs/afs/vlclient.c @@ -56,23 +56,24 @@ static int afs_deliver_vl_get_entry_by_name_u(struct afs_call *call) struct afs_uuid__xdr *xdr; struct afs_uuid *uuid; int j; + int n = entry->nr_servers; tmp = ntohl(uvldb->serverFlags[i]); if (tmp & AFS_VLSF_DONTUSE || (new_only && !(tmp & AFS_VLSF_NEWREPSITE))) continue; if (tmp & AFS_VLSF_RWVOL) { - entry->fs_mask[i] |= AFS_VOL_VTM_RW; + entry->fs_mask[n] |= AFS_VOL_VTM_RW; if (vlflags & AFS_VLF_BACKEXISTS) - entry->fs_mask[i] |= AFS_VOL_VTM_BAK; + entry->fs_mask[n] |= AFS_VOL_VTM_BAK; } if (tmp & AFS_VLSF_ROVOL) - entry->fs_mask[i] |= AFS_VOL_VTM_RO; - if (!entry->fs_mask[i]) + entry->fs_mask[n] |= AFS_VOL_VTM_RO; + if (!entry->fs_mask[n]) continue; xdr = &uvldb->serverNumber[i]; - uuid = (struct afs_uuid *)&entry->fs_server[i]; + uuid = (struct afs_uuid *)&entry->fs_server[n]; uuid->time_low = xdr->time_low; uuid->time_mid = htons(ntohl(xdr->time_mid)); uuid->time_hi_and_version = htons(ntohl(xdr->time_hi_and_version)); diff --git a/fs/block_dev.c b/fs/block_dev.c index a6f7c892cb4a..677cb364d33f 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c @@ -345,24 +345,15 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages) struct bio *bio; bool is_poll = (iocb->ki_flags & IOCB_HIPRI) != 0; bool is_read = (iov_iter_rw(iter) == READ), is_sync; - bool nowait = (iocb->ki_flags & IOCB_NOWAIT) != 0; loff_t pos = iocb->ki_pos; blk_qc_t qc = BLK_QC_T_NONE; - gfp_t gfp; - ssize_t ret; + int ret = 0; if ((pos | iov_iter_alignment(iter)) & (bdev_logical_block_size(bdev) - 1)) return -EINVAL; - if (nowait) - gfp = GFP_NOWAIT; - else - gfp = GFP_KERNEL; - - bio = bio_alloc_bioset(gfp, nr_pages, &blkdev_dio_pool); - if (!bio) - return -EAGAIN; + bio = bio_alloc_bioset(GFP_KERNEL, nr_pages, &blkdev_dio_pool); dio = container_of(bio, struct blkdev_dio, bio); dio->is_sync = is_sync = is_sync_kiocb(iocb); @@ -384,10 +375,7 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages) if (!is_poll) blk_start_plug(&plug); - ret = 0; for (;;) { - int err; - bio_set_dev(bio, bdev); bio->bi_iter.bi_sector = pos >> 9; bio->bi_write_hint = iocb->ki_hint; @@ -395,10 +383,8 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages) bio->bi_end_io = blkdev_bio_end_io; bio->bi_ioprio = iocb->ki_ioprio; - err = bio_iov_iter_get_pages(bio, iter); - if (unlikely(err)) { - if (!ret) - ret = err; + ret = bio_iov_iter_get_pages(bio, iter); + if (unlikely(ret)) { bio->bi_status = BLK_STS_IOERR; bio_endio(bio); break; @@ -413,14 +399,6 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages) task_io_account_write(bio->bi_iter.bi_size); } - /* - * Tell underlying layer to not block for resource shortage. - * And if we would have blocked, return error inline instead - * of through the bio->bi_end_io() callback. - */ - if (nowait) - bio->bi_opf |= (REQ_NOWAIT | REQ_NOWAIT_INLINE); - dio->size += bio->bi_iter.bi_size; pos += bio->bi_iter.bi_size; @@ -434,12 +412,6 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages) } qc = submit_bio(bio); - if (qc == BLK_QC_T_EAGAIN) { - if (!ret) - ret = -EAGAIN; - goto error; - } - ret = dio->size; if (polled) WRITE_ONCE(iocb->ki_cookie, qc); @@ -460,20 +432,8 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages) atomic_inc(&dio->ref); } - qc = submit_bio(bio); - if (qc == BLK_QC_T_EAGAIN) { - if (!ret) - ret = -EAGAIN; - goto error; - } - ret = dio->size; - - bio = bio_alloc(gfp, nr_pages); - if (!bio) { - if (!ret) - ret = -EAGAIN; - goto error; - } + submit_bio(bio); + bio = bio_alloc(GFP_KERNEL, nr_pages); } if (!is_poll) @@ -493,16 +453,13 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages) } __set_current_state(TASK_RUNNING); -out: if (!ret) ret = blk_status_to_errno(dio->bio.bi_status); + if (likely(!ret)) + ret = dio->size; bio_put(&dio->bio); return ret; -error: - if (!is_poll) - blk_finish_plug(&plug); - goto out; } static ssize_t @@ -1754,7 +1711,10 @@ int blkdev_get(struct block_device *bdev, fmode_t mode, void *holder) /* finish claiming */ mutex_lock(&bdev->bd_mutex); - bd_finish_claiming(bdev, whole, holder); + if (!res) + bd_finish_claiming(bdev, whole, holder); + else + bd_abort_claiming(bdev, whole, holder); /* * Block event polling for write claims if requested. Any * write holder makes the write_holder state stick until diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 299e11e6c554..94660063a162 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -401,7 +401,6 @@ struct btrfs_dev_replace { struct raid_kobject { u64 flags; struct kobject kobj; - struct list_head list; }; /* @@ -915,8 +914,6 @@ struct btrfs_fs_info { u32 thread_pool_size; struct kobject *space_info_kobj; - struct list_head pending_raid_kobjs; - spinlock_t pending_raid_kobjs_lock; /* uncontended */ u64 total_pinned; @@ -2698,7 +2695,6 @@ int btrfs_can_relocate(struct btrfs_fs_info *fs_info, u64 bytenr); int btrfs_make_block_group(struct btrfs_trans_handle *trans, u64 bytes_used, u64 type, u64 chunk_offset, u64 size); -void btrfs_add_raid_kobjects(struct btrfs_fs_info *fs_info); struct btrfs_trans_handle *btrfs_start_trans_remove_block_group( struct btrfs_fs_info *fs_info, const u64 chunk_offset); diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 5f7ee70b3d1a..97beb351a10c 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -2683,8 +2683,6 @@ int open_ctree(struct super_block *sb, INIT_LIST_HEAD(&fs_info->delayed_iputs); INIT_LIST_HEAD(&fs_info->delalloc_roots); INIT_LIST_HEAD(&fs_info->caching_block_groups); - INIT_LIST_HEAD(&fs_info->pending_raid_kobjs); - spin_lock_init(&fs_info->pending_raid_kobjs_lock); spin_lock_init(&fs_info->delalloc_root_lock); spin_lock_init(&fs_info->trans_lock); spin_lock_init(&fs_info->fs_roots_radix_lock); diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index d3b58e388535..8b7eb22d508a 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -4,6 +4,7 @@ */ #include <linux/sched.h> +#include <linux/sched/mm.h> #include <linux/sched/signal.h> #include <linux/pagemap.h> #include <linux/writeback.h> @@ -7888,33 +7889,6 @@ int btrfs_free_block_groups(struct btrfs_fs_info *info) return 0; } -/* link_block_group will queue up kobjects to add when we're reclaim-safe */ -void btrfs_add_raid_kobjects(struct btrfs_fs_info *fs_info) -{ - struct btrfs_space_info *space_info; - struct raid_kobject *rkobj; - LIST_HEAD(list); - int ret = 0; - - spin_lock(&fs_info->pending_raid_kobjs_lock); - list_splice_init(&fs_info->pending_raid_kobjs, &list); - spin_unlock(&fs_info->pending_raid_kobjs_lock); - - list_for_each_entry(rkobj, &list, list) { - space_info = btrfs_find_space_info(fs_info, rkobj->flags); - - ret = kobject_add(&rkobj->kobj, &space_info->kobj, - "%s", btrfs_bg_type_to_raid_name(rkobj->flags)); - if (ret) { - kobject_put(&rkobj->kobj); - break; - } - } - if (ret) - btrfs_warn(fs_info, - "failed to add kobject for block cache, ignoring"); -} - static void link_block_group(struct btrfs_block_group_cache *cache) { struct btrfs_space_info *space_info = cache->space_info; @@ -7929,18 +7903,36 @@ static void link_block_group(struct btrfs_block_group_cache *cache) up_write(&space_info->groups_sem); if (first) { - struct raid_kobject *rkobj = kzalloc(sizeof(*rkobj), GFP_NOFS); + struct raid_kobject *rkobj; + unsigned int nofs_flag; + int ret; + + /* + * Setup a NOFS context because kobject_add(), deep in its call + * chain, does GFP_KERNEL allocations, and we are often called + * in a context where if reclaim is triggered we can deadlock + * (we are either holding a transaction handle or some lock + * required for a transaction commit). + */ + nofs_flag = memalloc_nofs_save(); + rkobj = kzalloc(sizeof(*rkobj), GFP_KERNEL); if (!rkobj) { + memalloc_nofs_restore(nofs_flag); btrfs_warn(cache->fs_info, "couldn't alloc memory for raid level kobject"); return; } rkobj->flags = cache->flags; kobject_init(&rkobj->kobj, &btrfs_raid_ktype); - - spin_lock(&fs_info->pending_raid_kobjs_lock); - list_add_tail(&rkobj->list, &fs_info->pending_raid_kobjs); - spin_unlock(&fs_info->pending_raid_kobjs_lock); + ret = kobject_add(&rkobj->kobj, &space_info->kobj, "%s", + btrfs_bg_type_to_raid_name(rkobj->flags)); + memalloc_nofs_restore(nofs_flag); + if (ret) { + kobject_put(&rkobj->kobj); + btrfs_warn(fs_info, + "failed to add kobject for block cache, ignoring"); + return; + } space_info->block_group_kobjs[index] = &rkobj->kobj; } } @@ -8206,7 +8198,6 @@ int btrfs_read_block_groups(struct btrfs_fs_info *info) inc_block_group_ro(cache, 1); } - btrfs_add_raid_kobjects(info); btrfs_init_global_block_rsv(info); ret = check_chunk_block_group_mappings(info); error: @@ -8975,6 +8966,7 @@ int btrfs_trim_fs(struct btrfs_fs_info *fs_info, struct fstrim_range *range) struct btrfs_device *device; struct list_head *devices; u64 group_trimmed; + u64 range_end = U64_MAX; u64 start; u64 end; u64 trimmed = 0; @@ -8984,16 +8976,23 @@ int btrfs_trim_fs(struct btrfs_fs_info *fs_info, struct fstrim_range *range) int dev_ret = 0; int ret = 0; + /* + * Check range overflow if range->len is set. + * The default range->len is U64_MAX. + */ + if (range->len != U64_MAX && + check_add_overflow(range->start, range->len, &range_end)) + return -EINVAL; + cache = btrfs_lookup_first_block_group(fs_info, range->start); for (; cache; cache = next_block_group(cache)) { - if (cache->key.objectid >= (range->start + range->len)) { + if (cache->key.objectid >= range_end) { btrfs_put_block_group(cache); break; } start = max(range->start, cache->key.objectid); - end = min(range->start + range->len, - cache->key.objectid + cache->key.offset); + end = min(range_end, cache->key.objectid + cache->key.offset); if (end - start >= range->minlen) { if (!block_group_cache_done(cache)) { diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index d74b74ca07af..a447d3ec48d5 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -3087,16 +3087,6 @@ static int btrfs_relocate_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset) if (ret) return ret; - /* - * We add the kobjects here (and after forcing data chunk creation) - * since relocation is the only place we'll create chunks of a new - * type at runtime. The only place where we'll remove the last - * chunk of a type is the call immediately below this one. Even - * so, we're protected against races with the cleaner thread since - * we're covered by the delete_unused_bgs_mutex. - */ - btrfs_add_raid_kobjects(fs_info); - trans = btrfs_start_trans_remove_block_group(root->fs_info, chunk_offset); if (IS_ERR(trans)) { @@ -3223,9 +3213,6 @@ static int btrfs_may_alloc_data_chunk(struct btrfs_fs_info *fs_info, btrfs_end_transaction(trans); if (ret < 0) return ret; - - btrfs_add_raid_kobjects(fs_info); - return 1; } } diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index a4830ced0f98..a15a6e738eb5 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c @@ -1113,6 +1113,7 @@ cifs_demultiplex_thread(void *p) mempool_resize(cifs_req_poolp, length + cifs_min_rcv); set_freezable(); + allow_signal(SIGKILL); while (server->tcpStatus != CifsExiting) { if (try_to_freeze()) continue; diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c index a5bc1b671c12..64a5864127be 100644 --- a/fs/cifs/smb2ops.c +++ b/fs/cifs/smb2ops.c @@ -3489,7 +3489,15 @@ fill_transform_hdr(struct smb2_transform_hdr *tr_hdr, unsigned int orig_len, static inline void smb2_sg_set_buf(struct scatterlist *sg, const void *buf, unsigned int buflen) { - sg_set_page(sg, virt_to_page(buf), buflen, offset_in_page(buf)); + void *addr; + /* + * VMAP_STACK (at least) puts stack into the vmalloc address space + */ + if (is_vmalloc_addr(buf)) + addr = vmalloc_to_page(buf); + else + addr = virt_to_page(buf); + sg_set_page(sg, addr, buflen, offset_in_page(buf)); } /* Assumes the first rqst has a transform header as the first iov. @@ -4070,7 +4078,6 @@ receive_encrypted_standard(struct TCP_Server_Info *server, { int ret, length; char *buf = server->smallbuf; - char *tmpbuf; struct smb2_sync_hdr *shdr; unsigned int pdu_length = server->pdu_size; unsigned int buf_size; @@ -4100,18 +4107,15 @@ receive_encrypted_standard(struct TCP_Server_Info *server, return length; next_is_large = server->large_buf; - one_more: +one_more: shdr = (struct smb2_sync_hdr *)buf; if (shdr->NextCommand) { - if (next_is_large) { - tmpbuf = server->bigbuf; + if (next_is_large) next_buffer = (char *)cifs_buf_get(); - } else { - tmpbuf = server->smallbuf; + else next_buffer = (char *)cifs_small_buf_get(); - } memcpy(next_buffer, - tmpbuf + le32_to_cpu(shdr->NextCommand), + buf + le32_to_cpu(shdr->NextCommand), pdu_length - le32_to_cpu(shdr->NextCommand)); } @@ -4140,12 +4144,21 @@ receive_encrypted_standard(struct TCP_Server_Info *server, pdu_length -= le32_to_cpu(shdr->NextCommand); server->large_buf = next_is_large; if (next_is_large) - server->bigbuf = next_buffer; + server->bigbuf = buf = next_buffer; else - server->smallbuf = next_buffer; - - buf += le32_to_cpu(shdr->NextCommand); + server->smallbuf = buf = next_buffer; goto one_more; + } else if (ret != 0) { + /* + * ret != 0 here means that we didn't get to handle_mid() thus + * server->smallbuf and server->bigbuf are still valid. We need + * to free next_buffer because it is not going to be used + * anywhere. + */ + if (next_is_large) + free_rsp_buf(CIFS_LARGE_BUFFER, next_buffer); + else + free_rsp_buf(CIFS_SMALL_BUFFER, next_buffer); } return ret; diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c index c8cd7b6cdda2..31e4a1b0b170 100644 --- a/fs/cifs/smb2pdu.c +++ b/fs/cifs/smb2pdu.c @@ -252,7 +252,7 @@ smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon) if (tcon == NULL) return 0; - if (smb2_command == SMB2_TREE_CONNECT) + if (smb2_command == SMB2_TREE_CONNECT || smb2_command == SMB2_IOCTL) return 0; if (tcon->tidStatus == CifsExiting) { @@ -1196,7 +1196,12 @@ SMB2_sess_alloc_buffer(struct SMB2_sess_data *sess_data) else req->SecurityMode = 0; +#ifdef CONFIG_CIFS_DFS_UPCALL + req->Capabilities = cpu_to_le32(SMB2_GLOBAL_CAP_DFS); +#else req->Capabilities = 0; +#endif /* DFS_UPCALL */ + req->Channel = 0; /* MBZ */ sess_data->iov[0].iov_base = (char *)req; @@ -600,7 +600,7 @@ struct page *dax_layout_busy_page(struct address_space *mapping) * guaranteed to either see new references or prevent new * references from being established. */ - unmap_mapping_range(mapping, 0, 0, 1); + unmap_mapping_range(mapping, 0, 0, 0); xas_lock_irq(&xas); xas_for_each(&xas, entry, ULONG_MAX) { diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c index 4df26ef2b2b1..4f8b5fd6c81f 100644 --- a/fs/gfs2/bmap.c +++ b/fs/gfs2/bmap.c @@ -390,6 +390,19 @@ static int fillup_metapath(struct gfs2_inode *ip, struct metapath *mp, int h) return mp->mp_aheight - x - 1; } +static sector_t metapath_to_block(struct gfs2_sbd *sdp, struct metapath *mp) +{ + sector_t factor = 1, block = 0; + int hgt; + + for (hgt = mp->mp_fheight - 1; hgt >= 0; hgt--) { + if (hgt < mp->mp_aheight) + block += mp->mp_list[hgt] * factor; + factor *= sdp->sd_inptrs; + } + return block; +} + static void release_metapath(struct metapath *mp) { int i; @@ -430,60 +443,84 @@ static inline unsigned int gfs2_extent_length(struct buffer_head *bh, __be64 *pt return ptr - first; } -typedef const __be64 *(*gfs2_metadata_walker)( - struct metapath *mp, - const __be64 *start, const __be64 *end, - u64 factor, void *data); +enum walker_status { WALK_STOP, WALK_FOLLOW, WALK_CONTINUE }; -#define WALK_STOP ((__be64 *)0) -#define WALK_NEXT ((__be64 *)1) +/* + * gfs2_metadata_walker - walk an indirect block + * @mp: Metapath to indirect block + * @ptrs: Number of pointers to look at + * + * When returning WALK_FOLLOW, the walker must update @mp to point at the right + * indirect block to follow. + */ +typedef enum walker_status (*gfs2_metadata_walker)(struct metapath *mp, + unsigned int ptrs); -static int gfs2_walk_metadata(struct inode *inode, sector_t lblock, - u64 len, struct metapath *mp, gfs2_metadata_walker walker, - void *data) +/* + * gfs2_walk_metadata - walk a tree of indirect blocks + * @inode: The inode + * @mp: Starting point of walk + * @max_len: Maximum number of blocks to walk + * @walker: Called during the walk + * + * Returns 1 if the walk was stopped by @walker, 0 if we went past @max_len or + * past the end of metadata, and a negative error code otherwise. + */ + +static int gfs2_walk_metadata(struct inode *inode, struct metapath *mp, + u64 max_len, gfs2_metadata_walker walker) { - struct metapath clone; struct gfs2_inode *ip = GFS2_I(inode); struct gfs2_sbd *sdp = GFS2_SB(inode); - const __be64 *start, *end, *ptr; u64 factor = 1; unsigned int hgt; - int ret = 0; + int ret; - for (hgt = ip->i_height - 1; hgt >= mp->mp_aheight; hgt--) + /* + * The walk starts in the lowest allocated indirect block, which may be + * before the position indicated by @mp. Adjust @max_len accordingly + * to avoid a short walk. + */ + for (hgt = mp->mp_fheight - 1; hgt >= mp->mp_aheight; hgt--) { + max_len += mp->mp_list[hgt] * factor; + mp->mp_list[hgt] = 0; factor *= sdp->sd_inptrs; + } for (;;) { - u64 step; + u16 start = mp->mp_list[hgt]; + enum walker_status status; + unsigned int ptrs; + u64 len; /* Walk indirect block. */ - start = metapointer(hgt, mp); - end = metaend(hgt, mp); - - step = (end - start) * factor; - if (step > len) - end = start + DIV_ROUND_UP_ULL(len, factor); - - ptr = walker(mp, start, end, factor, data); - if (ptr == WALK_STOP) + ptrs = (hgt >= 1 ? sdp->sd_inptrs : sdp->sd_diptrs) - start; + len = ptrs * factor; + if (len > max_len) + ptrs = DIV_ROUND_UP_ULL(max_len, factor); + status = walker(mp, ptrs); + switch (status) { + case WALK_STOP: + return 1; + case WALK_FOLLOW: + BUG_ON(mp->mp_aheight == mp->mp_fheight); + ptrs = mp->mp_list[hgt] - start; + len = ptrs * factor; break; - if (step >= len) + case WALK_CONTINUE: break; - len -= step; - if (ptr != WALK_NEXT) { - BUG_ON(!*ptr); - mp->mp_list[hgt] += ptr - start; - goto fill_up_metapath; } + if (len >= max_len) + break; + max_len -= len; + if (status == WALK_FOLLOW) + goto fill_up_metapath; lower_metapath: /* Decrease height of metapath. */ - if (mp != &clone) { - clone_metapath(&clone, mp); - mp = &clone; - } brelse(mp->mp_bh[hgt]); mp->mp_bh[hgt] = NULL; + mp->mp_list[hgt] = 0; if (!hgt) break; hgt--; @@ -491,10 +528,7 @@ lower_metapath: /* Advance in metadata tree. */ (mp->mp_list[hgt])++; - start = metapointer(hgt, mp); - end = metaend(hgt, mp); - if (start >= end) { - mp->mp_list[hgt] = 0; + if (mp->mp_list[hgt] >= sdp->sd_inptrs) { if (!hgt) break; goto lower_metapath; @@ -502,44 +536,36 @@ lower_metapath: fill_up_metapath: /* Increase height of metapath. */ - if (mp != &clone) { - clone_metapath(&clone, mp); - mp = &clone; - } ret = fillup_metapath(ip, mp, ip->i_height - 1); if (ret < 0) - break; + return ret; hgt += ret; for (; ret; ret--) do_div(factor, sdp->sd_inptrs); mp->mp_aheight = hgt + 1; } - if (mp == &clone) - release_metapath(mp); - return ret; + return 0; } -struct gfs2_hole_walker_args { - u64 blocks; -}; - -static const __be64 *gfs2_hole_walker(struct metapath *mp, - const __be64 *start, const __be64 *end, - u64 factor, void *data) +static enum walker_status gfs2_hole_walker(struct metapath *mp, + unsigned int ptrs) { - struct gfs2_hole_walker_args *args = data; - const __be64 *ptr; + const __be64 *start, *ptr, *end; + unsigned int hgt; + + hgt = mp->mp_aheight - 1; + start = metapointer(hgt, mp); + end = start + ptrs; for (ptr = start; ptr < end; ptr++) { if (*ptr) { - args->blocks += (ptr - start) * factor; + mp->mp_list[hgt] += ptr - start; if (mp->mp_aheight == mp->mp_fheight) return WALK_STOP; - return ptr; /* increase height */ + return WALK_FOLLOW; } } - args->blocks += (end - start) * factor; - return WALK_NEXT; + return WALK_CONTINUE; } /** @@ -557,12 +583,24 @@ static const __be64 *gfs2_hole_walker(struct metapath *mp, static int gfs2_hole_size(struct inode *inode, sector_t lblock, u64 len, struct metapath *mp, struct iomap *iomap) { - struct gfs2_hole_walker_args args = { }; - int ret = 0; + struct metapath clone; + u64 hole_size; + int ret; - ret = gfs2_walk_metadata(inode, lblock, len, mp, gfs2_hole_walker, &args); - if (!ret) - iomap->length = args.blocks << inode->i_blkbits; + clone_metapath(&clone, mp); + ret = gfs2_walk_metadata(inode, &clone, len, gfs2_hole_walker); + if (ret < 0) + goto out; + + if (ret == 1) + hole_size = metapath_to_block(GFS2_SB(inode), &clone) - lblock; + else + hole_size = len; + iomap->length = hole_size << inode->i_blkbits; + ret = 0; + +out: + release_metapath(&clone); return ret; } diff --git a/fs/io_uring.c b/fs/io_uring.c index d542f1cf4428..24bbe3cb7ad4 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -1097,10 +1097,8 @@ static int io_import_fixed(struct io_ring_ctx *ctx, int rw, iter->bvec = bvec + seg_skip; iter->nr_segs -= seg_skip; - iter->count -= (seg_skip << PAGE_SHIFT); + iter->count -= bvec->bv_len + offset; iter->iov_offset = offset & ~PAGE_MASK; - if (iter->iov_offset) - iter->count -= iter->iov_offset; } } @@ -2025,6 +2023,15 @@ static int io_queue_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req, { int ret; + ret = io_req_defer(ctx, req, s->sqe); + if (ret) { + if (ret != -EIOCBQUEUED) { + io_free_req(req); + io_cqring_add_event(ctx, s->sqe->user_data, ret); + } + return 0; + } + ret = __io_submit_sqe(ctx, req, s, true); if (ret == -EAGAIN && !(req->flags & REQ_F_NOWAIT)) { struct io_uring_sqe *sqe_copy; @@ -2097,13 +2104,6 @@ err: return; } - ret = io_req_defer(ctx, req, s->sqe); - if (ret) { - if (ret != -EIOCBQUEUED) - goto err_req; - return; - } - /* * If we already have a head request, queue this one for async * submittal once the head completes. If we don't have a head but diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c index 0ff3facf81da..071b90a45933 100644 --- a/fs/nfs/delegation.c +++ b/fs/nfs/delegation.c @@ -153,7 +153,7 @@ again: /* Block nfs4_proc_unlck */ mutex_lock(&sp->so_delegreturn_mutex); seq = raw_seqcount_begin(&sp->so_reclaim_seqcount); - err = nfs4_open_delegation_recall(ctx, state, stateid, type); + err = nfs4_open_delegation_recall(ctx, state, stateid); if (!err) err = nfs_delegation_claim_locks(state, stateid); if (!err && read_seqcount_retry(&sp->so_reclaim_seqcount, seq)) @@ -1046,6 +1046,22 @@ void nfs_test_expired_all_delegations(struct nfs_client *clp) nfs4_schedule_state_manager(clp); } +static void +nfs_delegation_test_free_expired(struct inode *inode, + nfs4_stateid *stateid, + const struct cred *cred) +{ + struct nfs_server *server = NFS_SERVER(inode); + const struct nfs4_minor_version_ops *ops = server->nfs_client->cl_mvops; + int status; + + if (!cred) + return; + status = ops->test_and_free_expired(server, stateid, cred); + if (status == -NFS4ERR_EXPIRED || status == -NFS4ERR_BAD_STATEID) + nfs_remove_bad_delegation(inode, stateid); +} + /** * nfs_reap_expired_delegations - reap expired delegations * @clp: nfs_client to process @@ -1057,7 +1073,6 @@ void nfs_test_expired_all_delegations(struct nfs_client *clp) */ void nfs_reap_expired_delegations(struct nfs_client *clp) { - const struct nfs4_minor_version_ops *ops = clp->cl_mvops; struct nfs_delegation *delegation; struct nfs_server *server; struct inode *inode; @@ -1088,11 +1103,7 @@ restart: nfs4_stateid_copy(&stateid, &delegation->stateid); clear_bit(NFS_DELEGATION_TEST_EXPIRED, &delegation->flags); rcu_read_unlock(); - if (cred != NULL && - ops->test_and_free_expired(server, &stateid, cred) < 0) { - nfs_revoke_delegation(inode, &stateid); - nfs_inode_find_state_and_recover(inode, &stateid); - } + nfs_delegation_test_free_expired(inode, &stateid, cred); put_cred(cred); if (nfs4_server_rebooted(clp)) { nfs_inode_mark_test_expired_delegation(server,inode); diff --git a/fs/nfs/delegation.h b/fs/nfs/delegation.h index 5799777df5ec..9eb87ae4c982 100644 --- a/fs/nfs/delegation.h +++ b/fs/nfs/delegation.h @@ -63,7 +63,7 @@ void nfs_reap_expired_delegations(struct nfs_client *clp); /* NFSv4 delegation-related procedures */ int nfs4_proc_delegreturn(struct inode *inode, const struct cred *cred, const nfs4_stateid *stateid, int issync); -int nfs4_open_delegation_recall(struct nfs_open_context *ctx, struct nfs4_state *state, const nfs4_stateid *stateid, fmode_t type); +int nfs4_open_delegation_recall(struct nfs_open_context *ctx, struct nfs4_state *state, const nfs4_stateid *stateid); int nfs4_lock_delegation_recall(struct file_lock *fl, struct nfs4_state *state, const nfs4_stateid *stateid); bool nfs4_copy_delegation_stateid(struct inode *inode, fmode_t flags, nfs4_stateid *dst, const struct cred **cred); bool nfs4_refresh_delegation_stateid(nfs4_stateid *dst, struct inode *inode); diff --git a/fs/nfs/fscache.c b/fs/nfs/fscache.c index 53507aa96b0b..3800ab6f08fa 100644 --- a/fs/nfs/fscache.c +++ b/fs/nfs/fscache.c @@ -114,6 +114,10 @@ void nfs_fscache_get_super_cookie(struct super_block *sb, const char *uniq, int struct rb_node **p, *parent; int diff; + nfss->fscache_key = NULL; + nfss->fscache = NULL; + if (!(nfss->options & NFS_OPTION_FSCACHE)) + return; if (!uniq) { uniq = ""; ulen = 1; @@ -226,10 +230,11 @@ void nfs_fscache_release_super_cookie(struct super_block *sb) void nfs_fscache_init_inode(struct inode *inode) { struct nfs_fscache_inode_auxdata auxdata; + struct nfs_server *nfss = NFS_SERVER(inode); struct nfs_inode *nfsi = NFS_I(inode); nfsi->fscache = NULL; - if (!S_ISREG(inode->i_mode)) + if (!(nfss->fscache && S_ISREG(inode->i_mode))) return; memset(&auxdata, 0, sizeof(auxdata)); diff --git a/fs/nfs/fscache.h b/fs/nfs/fscache.h index 25a75e40d91d..ad041cfbf9ec 100644 --- a/fs/nfs/fscache.h +++ b/fs/nfs/fscache.h @@ -182,7 +182,7 @@ static inline void nfs_fscache_wait_on_invalidate(struct inode *inode) */ static inline const char *nfs_server_fscache_state(struct nfs_server *server) { - if (server->fscache && (server->options & NFS_OPTION_FSCACHE)) + if (server->fscache) return "yes"; return "no "; } diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h index d778dad9a75e..3564da1ba8a1 100644 --- a/fs/nfs/nfs4_fs.h +++ b/fs/nfs/nfs4_fs.h @@ -465,7 +465,8 @@ static inline void nfs4_schedule_session_recovery(struct nfs4_session *session, extern struct nfs4_state_owner *nfs4_get_state_owner(struct nfs_server *, const struct cred *, gfp_t); extern void nfs4_put_state_owner(struct nfs4_state_owner *); -extern void nfs4_purge_state_owners(struct nfs_server *); +extern void nfs4_purge_state_owners(struct nfs_server *, struct list_head *); +extern void nfs4_free_state_owners(struct list_head *head); extern struct nfs4_state * nfs4_get_open_state(struct inode *, struct nfs4_state_owner *); extern void nfs4_put_open_state(struct nfs4_state *); extern void nfs4_close_state(struct nfs4_state *, fmode_t); diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c index 616393a01c06..da6204025a2d 100644 --- a/fs/nfs/nfs4client.c +++ b/fs/nfs/nfs4client.c @@ -758,9 +758,12 @@ out: static void nfs4_destroy_server(struct nfs_server *server) { + LIST_HEAD(freeme); + nfs_server_return_all_delegations(server); unset_pnfs_layoutdriver(server); - nfs4_purge_state_owners(server); + nfs4_purge_state_owners(server, &freeme); + nfs4_free_state_owners(&freeme); } /* diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 39896afc6edf..1406858bae6c 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -1683,6 +1683,14 @@ static void nfs_state_set_open_stateid(struct nfs4_state *state, write_sequnlock(&state->seqlock); } +static void nfs_state_clear_open_state_flags(struct nfs4_state *state) +{ + clear_bit(NFS_O_RDWR_STATE, &state->flags); + clear_bit(NFS_O_WRONLY_STATE, &state->flags); + clear_bit(NFS_O_RDONLY_STATE, &state->flags); + clear_bit(NFS_OPEN_STATE, &state->flags); +} + static void nfs_state_set_delegation(struct nfs4_state *state, const nfs4_stateid *deleg_stateid, fmode_t fmode) @@ -1907,8 +1915,9 @@ _nfs4_opendata_reclaim_to_nfs4_state(struct nfs4_opendata *data) if (data->o_res.delegation_type != 0) nfs4_opendata_check_deleg(data, state); update: - update_open_stateid(state, &data->o_res.stateid, NULL, - data->o_arg.fmode); + if (!update_open_stateid(state, &data->o_res.stateid, + NULL, data->o_arg.fmode)) + return ERR_PTR(-EAGAIN); refcount_inc(&state->count); return state; @@ -1973,8 +1982,11 @@ _nfs4_opendata_to_nfs4_state(struct nfs4_opendata *data) if (data->o_res.delegation_type != 0) nfs4_opendata_check_deleg(data, state); - update_open_stateid(state, &data->o_res.stateid, NULL, - data->o_arg.fmode); + if (!update_open_stateid(state, &data->o_res.stateid, + NULL, data->o_arg.fmode)) { + nfs4_put_open_state(state); + state = ERR_PTR(-EAGAIN); + } out: nfs_release_seqid(data->o_arg.seqid); return state; @@ -2074,13 +2086,7 @@ static int nfs4_open_recover(struct nfs4_opendata *opendata, struct nfs4_state * { int ret; - /* Don't trigger recovery in nfs_test_and_clear_all_open_stateid */ - clear_bit(NFS_O_RDWR_STATE, &state->flags); - clear_bit(NFS_O_WRONLY_STATE, &state->flags); - clear_bit(NFS_O_RDONLY_STATE, &state->flags); /* memory barrier prior to reading state->n_* */ - clear_bit(NFS_DELEGATED_STATE, &state->flags); - clear_bit(NFS_OPEN_STATE, &state->flags); smp_rmb(); ret = nfs4_open_recover_helper(opendata, FMODE_READ|FMODE_WRITE); if (ret != 0) @@ -2156,6 +2162,8 @@ static int nfs4_open_reclaim(struct nfs4_state_owner *sp, struct nfs4_state *sta ctx = nfs4_state_find_open_context(state); if (IS_ERR(ctx)) return -EAGAIN; + clear_bit(NFS_DELEGATED_STATE, &state->flags); + nfs_state_clear_open_state_flags(state); ret = nfs4_do_open_reclaim(ctx, state); put_nfs_open_context(ctx); return ret; @@ -2171,18 +2179,17 @@ static int nfs4_handle_delegation_recall_error(struct nfs_server *server, struct case -ENOENT: case -EAGAIN: case -ESTALE: + case -ETIMEDOUT: break; case -NFS4ERR_BADSESSION: case -NFS4ERR_BADSLOT: case -NFS4ERR_BAD_HIGH_SLOT: case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: case -NFS4ERR_DEADSESSION: - set_bit(NFS_DELEGATED_STATE, &state->flags); nfs4_schedule_session_recovery(server->nfs_client->cl_session, err); return -EAGAIN; case -NFS4ERR_STALE_CLIENTID: case -NFS4ERR_STALE_STATEID: - set_bit(NFS_DELEGATED_STATE, &state->flags); /* Don't recall a delegation if it was lost */ nfs4_schedule_lease_recovery(server->nfs_client); return -EAGAIN; @@ -2203,7 +2210,6 @@ static int nfs4_handle_delegation_recall_error(struct nfs_server *server, struct return -EAGAIN; case -NFS4ERR_DELAY: case -NFS4ERR_GRACE: - set_bit(NFS_DELEGATED_STATE, &state->flags); ssleep(1); return -EAGAIN; case -ENOMEM: @@ -2219,8 +2225,7 @@ static int nfs4_handle_delegation_recall_error(struct nfs_server *server, struct } int nfs4_open_delegation_recall(struct nfs_open_context *ctx, - struct nfs4_state *state, const nfs4_stateid *stateid, - fmode_t type) + struct nfs4_state *state, const nfs4_stateid *stateid) { struct nfs_server *server = NFS_SERVER(state->inode); struct nfs4_opendata *opendata; @@ -2231,20 +2236,23 @@ int nfs4_open_delegation_recall(struct nfs_open_context *ctx, if (IS_ERR(opendata)) return PTR_ERR(opendata); nfs4_stateid_copy(&opendata->o_arg.u.delegation, stateid); - nfs_state_clear_delegation(state); - switch (type & (FMODE_READ|FMODE_WRITE)) { - case FMODE_READ|FMODE_WRITE: - case FMODE_WRITE: + if (!test_bit(NFS_O_RDWR_STATE, &state->flags)) { err = nfs4_open_recover_helper(opendata, FMODE_READ|FMODE_WRITE); if (err) - break; + goto out; + } + if (!test_bit(NFS_O_WRONLY_STATE, &state->flags)) { err = nfs4_open_recover_helper(opendata, FMODE_WRITE); if (err) - break; - /* Fall through */ - case FMODE_READ: + goto out; + } + if (!test_bit(NFS_O_RDONLY_STATE, &state->flags)) { err = nfs4_open_recover_helper(opendata, FMODE_READ); + if (err) + goto out; } + nfs_state_clear_delegation(state); +out: nfs4_opendata_put(opendata); return nfs4_handle_delegation_recall_error(server, state, stateid, NULL, err); } @@ -2492,6 +2500,7 @@ static int nfs4_run_open_task(struct nfs4_opendata *data, if (!ctx) { nfs4_init_sequence(&o_arg->seq_args, &o_res->seq_res, 1, 1); data->is_recover = true; + task_setup_data.flags |= RPC_TASK_TIMEOUT; } else { nfs4_init_sequence(&o_arg->seq_args, &o_res->seq_res, 1, 0); pnfs_lgopen_prepare(data, ctx); @@ -2698,6 +2707,7 @@ static int nfs40_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *st { /* NFSv4.0 doesn't allow for delegation recovery on open expire */ nfs40_clear_delegation_stateid(state); + nfs_state_clear_open_state_flags(state); return nfs4_open_expired(sp, state); } @@ -2740,13 +2750,13 @@ out_free: return -NFS4ERR_EXPIRED; } -static void nfs41_check_delegation_stateid(struct nfs4_state *state) +static int nfs41_check_delegation_stateid(struct nfs4_state *state) { struct nfs_server *server = NFS_SERVER(state->inode); nfs4_stateid stateid; struct nfs_delegation *delegation; const struct cred *cred = NULL; - int status; + int status, ret = NFS_OK; /* Get the delegation credential for use by test/free_stateid */ rcu_read_lock(); @@ -2754,20 +2764,15 @@ static void nfs41_check_delegation_stateid(struct nfs4_state *state) if (delegation == NULL) { rcu_read_unlock(); nfs_state_clear_delegation(state); - return; + return NFS_OK; } nfs4_stateid_copy(&stateid, &delegation->stateid); - if (test_bit(NFS_DELEGATION_REVOKED, &delegation->flags)) { - rcu_read_unlock(); - nfs_state_clear_delegation(state); - return; - } if (!test_and_clear_bit(NFS_DELEGATION_TEST_EXPIRED, &delegation->flags)) { rcu_read_unlock(); - return; + return NFS_OK; } if (delegation->cred) @@ -2777,9 +2782,24 @@ static void nfs41_check_delegation_stateid(struct nfs4_state *state) trace_nfs4_test_delegation_stateid(state, NULL, status); if (status == -NFS4ERR_EXPIRED || status == -NFS4ERR_BAD_STATEID) nfs_finish_clear_delegation_stateid(state, &stateid); + else + ret = status; - if (delegation->cred) - put_cred(cred); + put_cred(cred); + return ret; +} + +static void nfs41_delegation_recover_stateid(struct nfs4_state *state) +{ + nfs4_stateid tmp; + + if (test_bit(NFS_DELEGATED_STATE, &state->flags) && + nfs4_copy_delegation_stateid(state->inode, state->state, + &tmp, NULL) && + nfs4_stateid_match_other(&state->stateid, &tmp)) + nfs_state_set_delegation(state, &tmp, state->state); + else + nfs_state_clear_delegation(state); } /** @@ -2849,21 +2869,12 @@ static int nfs41_check_open_stateid(struct nfs4_state *state) const struct cred *cred = state->owner->so_cred; int status; - if (test_bit(NFS_OPEN_STATE, &state->flags) == 0) { - if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0) { - if (nfs4_have_delegation(state->inode, state->state)) - return NFS_OK; - return -NFS4ERR_OPENMODE; - } + if (test_bit(NFS_OPEN_STATE, &state->flags) == 0) return -NFS4ERR_BAD_STATEID; - } status = nfs41_test_and_free_expired_stateid(server, stateid, cred); trace_nfs4_test_open_stateid(state, NULL, status); if (status == -NFS4ERR_EXPIRED || status == -NFS4ERR_BAD_STATEID) { - clear_bit(NFS_O_RDONLY_STATE, &state->flags); - clear_bit(NFS_O_WRONLY_STATE, &state->flags); - clear_bit(NFS_O_RDWR_STATE, &state->flags); - clear_bit(NFS_OPEN_STATE, &state->flags); + nfs_state_clear_open_state_flags(state); stateid->type = NFS4_INVALID_STATEID_TYPE; return status; } @@ -2876,7 +2887,11 @@ static int nfs41_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *st { int status; - nfs41_check_delegation_stateid(state); + status = nfs41_check_delegation_stateid(state); + if (status != NFS_OK) + return status; + nfs41_delegation_recover_stateid(state); + status = nfs41_check_expired_locks(state); if (status != NFS_OK) return status; @@ -3201,7 +3216,7 @@ static int _nfs4_do_setattr(struct inode *inode, if (nfs4_copy_delegation_stateid(inode, FMODE_WRITE, &arg->stateid, &delegation_cred)) { /* Use that stateid */ - } else if (ctx != NULL) { + } else if (ctx != NULL && ctx->state) { struct nfs_lock_context *l_ctx; if (!nfs4_valid_open_stateid(ctx->state)) return -EBADF; diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index 9afd051a4876..cad4e064b328 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c @@ -624,24 +624,39 @@ void nfs4_put_state_owner(struct nfs4_state_owner *sp) /** * nfs4_purge_state_owners - Release all cached state owners * @server: nfs_server with cached state owners to release + * @head: resulting list of state owners * * Called at umount time. Remaining state owners will be on * the LRU with ref count of zero. + * Note that the state owners are not freed, but are added + * to the list @head, which can later be used as an argument + * to nfs4_free_state_owners. */ -void nfs4_purge_state_owners(struct nfs_server *server) +void nfs4_purge_state_owners(struct nfs_server *server, struct list_head *head) { struct nfs_client *clp = server->nfs_client; struct nfs4_state_owner *sp, *tmp; - LIST_HEAD(doomed); spin_lock(&clp->cl_lock); list_for_each_entry_safe(sp, tmp, &server->state_owners_lru, so_lru) { - list_move(&sp->so_lru, &doomed); + list_move(&sp->so_lru, head); nfs4_remove_state_owner_locked(sp); } spin_unlock(&clp->cl_lock); +} - list_for_each_entry_safe(sp, tmp, &doomed, so_lru) { +/** + * nfs4_purge_state_owners - Release all cached state owners + * @head: resulting list of state owners + * + * Frees a list of state owners that was generated by + * nfs4_purge_state_owners + */ +void nfs4_free_state_owners(struct list_head *head) +{ + struct nfs4_state_owner *sp, *tmp; + + list_for_each_entry_safe(sp, tmp, head, so_lru) { list_del(&sp->so_lru); nfs4_free_state_owner(sp); } @@ -1463,7 +1478,7 @@ void nfs_inode_find_state_and_recover(struct inode *inode, nfs4_schedule_state_manager(clp); } -static void nfs4_state_mark_open_context_bad(struct nfs4_state *state) +static void nfs4_state_mark_open_context_bad(struct nfs4_state *state, int err) { struct inode *inode = state->inode; struct nfs_inode *nfsi = NFS_I(inode); @@ -1474,6 +1489,8 @@ static void nfs4_state_mark_open_context_bad(struct nfs4_state *state) if (ctx->state != state) continue; set_bit(NFS_CONTEXT_BAD, &ctx->flags); + pr_warn("NFSv4: state recovery failed for open file %pd2, " + "error = %d\n", ctx->dentry, err); } rcu_read_unlock(); } @@ -1481,7 +1498,7 @@ static void nfs4_state_mark_open_context_bad(struct nfs4_state *state) static void nfs4_state_mark_recovery_failed(struct nfs4_state *state, int error) { set_bit(NFS_STATE_RECOVERY_FAILED, &state->flags); - nfs4_state_mark_open_context_bad(state); + nfs4_state_mark_open_context_bad(state, error); } @@ -1512,6 +1529,7 @@ restart: switch (status) { case 0: break; + case -ETIMEDOUT: case -ESTALE: case -NFS4ERR_ADMIN_REVOKED: case -NFS4ERR_STALE_STATEID: @@ -1605,6 +1623,7 @@ static int __nfs4_reclaim_open_state(struct nfs4_state_owner *sp, struct nfs4_st static int nfs4_reclaim_open_state(struct nfs4_state_owner *sp, const struct nfs4_state_recovery_ops *ops) { struct nfs4_state *state; + unsigned int loop = 0; int status = 0; /* Note: we rely on the sp->so_states list being ordered @@ -1631,8 +1650,10 @@ restart: switch (status) { default: - if (status >= 0) + if (status >= 0) { + loop = 0; break; + } printk(KERN_ERR "NFS: %s: unhandled error %d\n", __func__, status); /* Fall through */ case -ENOENT: @@ -1646,6 +1667,10 @@ restart: break; case -EAGAIN: ssleep(1); + if (loop++ < 10) { + set_bit(ops->state_flag_bit, &state->flags); + break; + } /* Fall through */ case -NFS4ERR_ADMIN_REVOKED: case -NFS4ERR_STALE_STATEID: @@ -1658,11 +1683,13 @@ restart: case -NFS4ERR_EXPIRED: case -NFS4ERR_NO_GRACE: nfs4_state_mark_reclaim_nograce(sp->so_server->nfs_client, state); + /* Fall through */ case -NFS4ERR_STALE_CLIENTID: case -NFS4ERR_BADSESSION: case -NFS4ERR_BADSLOT: case -NFS4ERR_BAD_HIGH_SLOT: case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: + case -ETIMEDOUT: goto out_err; } nfs4_put_open_state(state); @@ -1856,12 +1883,13 @@ static int nfs4_do_reclaim(struct nfs_client *clp, const struct nfs4_state_recov struct nfs4_state_owner *sp; struct nfs_server *server; struct rb_node *pos; + LIST_HEAD(freeme); int status = 0; restart: rcu_read_lock(); list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) { - nfs4_purge_state_owners(server); + nfs4_purge_state_owners(server, &freeme); spin_lock(&clp->cl_lock); for (pos = rb_first(&server->state_owners); pos != NULL; @@ -1890,6 +1918,7 @@ restart: spin_unlock(&clp->cl_lock); } rcu_read_unlock(); + nfs4_free_state_owners(&freeme); return 0; } @@ -1945,7 +1974,6 @@ static int nfs4_handle_reclaim_lease_error(struct nfs_client *clp, int status) return -EPERM; case -EACCES: case -NFS4ERR_DELAY: - case -ETIMEDOUT: case -EAGAIN: ssleep(1); break; @@ -2574,7 +2602,7 @@ static void nfs4_state_manager(struct nfs_client *clp) } /* Now recover expired state... */ - if (test_and_clear_bit(NFS4CLNT_RECLAIM_NOGRACE, &clp->cl_state)) { + if (test_bit(NFS4CLNT_RECLAIM_NOGRACE, &clp->cl_state)) { section = "reclaim nograce"; status = nfs4_do_reclaim(clp, clp->cl_mvops->nograce_recovery_ops); @@ -2582,6 +2610,7 @@ static void nfs4_state_manager(struct nfs_client *clp) continue; if (status < 0) goto out_error; + clear_bit(NFS4CLNT_RECLAIM_NOGRACE, &clp->cl_state); } nfs4_end_drain_session(clp); diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c index 75bd5b552ba4..4525d5acae38 100644 --- a/fs/nfs/pnfs.c +++ b/fs/nfs/pnfs.c @@ -1903,12 +1903,6 @@ lookup_again: goto out_unlock; } - if (!nfs4_valid_open_stateid(ctx->state)) { - trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg, - PNFS_UPDATE_LAYOUT_INVALID_OPEN); - goto out_unlock; - } - /* * Choose a stateid for the LAYOUTGET. If we don't have a layout * stateid, or it has been invalidated, then we must use the open @@ -1939,6 +1933,7 @@ lookup_again: iomode == IOMODE_RW ? FMODE_WRITE : FMODE_READ, NULL, &stateid, NULL); if (status != 0) { + lseg = ERR_PTR(status); trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg, PNFS_UPDATE_LAYOUT_INVALID_OPEN); diff --git a/fs/nfs/super.c b/fs/nfs/super.c index 628631e2e34f..703f595dce90 100644 --- a/fs/nfs/super.c +++ b/fs/nfs/super.c @@ -2260,6 +2260,7 @@ nfs_compare_remount_data(struct nfs_server *nfss, data->acdirmin != nfss->acdirmin / HZ || data->acdirmax != nfss->acdirmax / HZ || data->timeo != (10U * nfss->client->cl_timeout->to_initval / HZ) || + (data->options & NFS_OPTION_FSCACHE) != (nfss->options & NFS_OPTION_FSCACHE) || data->nfs_server.port != nfss->port || data->nfs_server.addrlen != nfss->nfs_client->cl_addrlen || !rpc_cmp_addr((struct sockaddr *)&data->nfs_server.address, diff --git a/fs/seq_file.c b/fs/seq_file.c index 04f09689cd6d..1600034a929b 100644 --- a/fs/seq_file.c +++ b/fs/seq_file.c @@ -119,6 +119,7 @@ static int traverse(struct seq_file *m, loff_t offset) } if (seq_has_overflowed(m)) goto Eoverflow; + p = m->op->next(m, p, &m->index); if (pos + m->count > offset) { m->from = offset - pos; m->count -= m->from; @@ -126,7 +127,6 @@ static int traverse(struct seq_file *m, loff_t offset) } pos += m->count; m->count = 0; - p = m->op->next(m, p, &m->index); if (pos == offset) break; } diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c index baf0b72c0a37..07aad70f3931 100644 --- a/fs/xfs/libxfs/xfs_bmap.c +++ b/fs/xfs/libxfs/xfs_bmap.c @@ -3835,15 +3835,28 @@ xfs_bmapi_read( XFS_STATS_INC(mp, xs_blk_mapr); ifp = XFS_IFORK_PTR(ip, whichfork); + if (!ifp) { + /* No CoW fork? Return a hole. */ + if (whichfork == XFS_COW_FORK) { + mval->br_startoff = bno; + mval->br_startblock = HOLESTARTBLOCK; + mval->br_blockcount = len; + mval->br_state = XFS_EXT_NORM; + *nmap = 1; + return 0; + } - /* No CoW fork? Return a hole. */ - if (whichfork == XFS_COW_FORK && !ifp) { - mval->br_startoff = bno; - mval->br_startblock = HOLESTARTBLOCK; - mval->br_blockcount = len; - mval->br_state = XFS_EXT_NORM; - *nmap = 1; - return 0; + /* + * A missing attr ifork implies that the inode says we're in + * extents or btree format but failed to pass the inode fork + * verifier while trying to load it. Treat that as a file + * corruption too. + */ +#ifdef DEBUG + xfs_alert(mp, "%s: inode %llu missing fork %d", + __func__, ip->i_ino, whichfork); +#endif /* DEBUG */ + return -EFSCORRUPTED; } if (!(ifp->if_flags & XFS_IFEXTENTS)) { diff --git a/fs/xfs/libxfs/xfs_da_btree.c b/fs/xfs/libxfs/xfs_da_btree.c index d1c77fd0815d..0bf56e94bfe9 100644 --- a/fs/xfs/libxfs/xfs_da_btree.c +++ b/fs/xfs/libxfs/xfs_da_btree.c @@ -487,10 +487,8 @@ xfs_da3_split( ASSERT(state->path.active == 0); oldblk = &state->path.blk[0]; error = xfs_da3_root_split(state, oldblk, addblk); - if (error) { - addblk->bp = NULL; - return error; /* GROT: dir is inconsistent */ - } + if (error) + goto out; /* * Update pointers to the node which used to be block 0 and just got @@ -505,7 +503,10 @@ xfs_da3_split( */ node = oldblk->bp->b_addr; if (node->hdr.info.forw) { - ASSERT(be32_to_cpu(node->hdr.info.forw) == addblk->blkno); + if (be32_to_cpu(node->hdr.info.forw) != addblk->blkno) { + error = -EFSCORRUPTED; + goto out; + } node = addblk->bp->b_addr; node->hdr.info.back = cpu_to_be32(oldblk->blkno); xfs_trans_log_buf(state->args->trans, addblk->bp, @@ -514,15 +515,19 @@ xfs_da3_split( } node = oldblk->bp->b_addr; if (node->hdr.info.back) { - ASSERT(be32_to_cpu(node->hdr.info.back) == addblk->blkno); + if (be32_to_cpu(node->hdr.info.back) != addblk->blkno) { + error = -EFSCORRUPTED; + goto out; + } node = addblk->bp->b_addr; node->hdr.info.forw = cpu_to_be32(oldblk->blkno); xfs_trans_log_buf(state->args->trans, addblk->bp, XFS_DA_LOGRANGE(node, &node->hdr.info, sizeof(node->hdr.info))); } +out: addblk->bp = NULL; - return 0; + return error; } /* diff --git a/fs/xfs/libxfs/xfs_dir2_node.c b/fs/xfs/libxfs/xfs_dir2_node.c index afcc6642690a..1fc44efc344d 100644 --- a/fs/xfs/libxfs/xfs_dir2_node.c +++ b/fs/xfs/libxfs/xfs_dir2_node.c @@ -741,7 +741,8 @@ xfs_dir2_leafn_lookup_for_entry( ents = dp->d_ops->leaf_ents_p(leaf); xfs_dir3_leaf_check(dp, bp); - ASSERT(leafhdr.count > 0); + if (leafhdr.count <= 0) + return -EFSCORRUPTED; /* * Look up the hash value in the leaf entries. diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c index 00e9f5c388d3..7fc3c1ad36bc 100644 --- a/fs/xfs/xfs_log.c +++ b/fs/xfs/xfs_log.c @@ -429,10 +429,7 @@ xfs_log_reserve( ASSERT(*ticp == NULL); tic = xlog_ticket_alloc(log, unit_bytes, cnt, client, permanent, - KM_SLEEP | KM_MAYFAIL); - if (!tic) - return -ENOMEM; - + KM_SLEEP); *ticp = tic; xlog_grant_push_ail(log, tic->t_cnt ? tic->t_unit_res * tic->t_cnt diff --git a/include/asm-generic/5level-fixup.h b/include/asm-generic/5level-fixup.h index bb6cb347018c..f6947da70d71 100644 --- a/include/asm-generic/5level-fixup.h +++ b/include/asm-generic/5level-fixup.h @@ -19,9 +19,24 @@ #define p4d_alloc(mm, pgd, address) (pgd) #define p4d_offset(pgd, start) (pgd) -#define p4d_none(p4d) 0 -#define p4d_bad(p4d) 0 -#define p4d_present(p4d) 1 + +#ifndef __ASSEMBLY__ +static inline int p4d_none(p4d_t p4d) +{ + return 0; +} + +static inline int p4d_bad(p4d_t p4d) +{ + return 0; +} + +static inline int p4d_present(p4d_t p4d) +{ + return 1; +} +#endif + #define p4d_ERROR(p4d) do { } while (0) #define p4d_clear(p4d) pgd_clear(p4d) #define p4d_val(p4d) pgd_val(p4d) diff --git a/include/kvm/arm_pmu.h b/include/kvm/arm_pmu.h index 16c769a7f979..6db030439e29 100644 --- a/include/kvm/arm_pmu.h +++ b/include/kvm/arm_pmu.h @@ -34,6 +34,7 @@ struct kvm_pmu { u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx); void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val); u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu); +void kvm_pmu_vcpu_init(struct kvm_vcpu *vcpu); void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu); void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu); void kvm_pmu_disable_counter_mask(struct kvm_vcpu *vcpu, u64 val); @@ -71,6 +72,7 @@ static inline u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu) { return 0; } +static inline void kvm_pmu_vcpu_init(struct kvm_vcpu *vcpu) {} static inline void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu) {} static inline void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu) {} static inline void kvm_pmu_disable_counter_mask(struct kvm_vcpu *vcpu, u64 val) {} diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h index 46bbc949c20a..7a30524a80ee 100644 --- a/include/kvm/arm_vgic.h +++ b/include/kvm/arm_vgic.h @@ -350,6 +350,7 @@ int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu); void kvm_vgic_load(struct kvm_vcpu *vcpu); void kvm_vgic_put(struct kvm_vcpu *vcpu); +void kvm_vgic_vmcr_sync(struct kvm_vcpu *vcpu); #define irqchip_in_kernel(k) (!!((k)->arch.vgic.in_kernel)) #define vgic_initialized(k) ((k)->arch.vgic.initialized) diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h index 1b1fa1557e68..feff3fe4467e 100644 --- a/include/linux/blk_types.h +++ b/include/linux/blk_types.h @@ -311,7 +311,6 @@ enum req_flag_bits { __REQ_RAHEAD, /* read ahead, can fail anytime */ __REQ_BACKGROUND, /* background IO */ __REQ_NOWAIT, /* Don't wait if request will block */ - __REQ_NOWAIT_INLINE, /* Return would-block error inline */ /* * When a shared kthread needs to issue a bio for a cgroup, doing * so synchronously can lead to priority inversions as the kthread @@ -346,7 +345,6 @@ enum req_flag_bits { #define REQ_RAHEAD (1ULL << __REQ_RAHEAD) #define REQ_BACKGROUND (1ULL << __REQ_BACKGROUND) #define REQ_NOWAIT (1ULL << __REQ_NOWAIT) -#define REQ_NOWAIT_INLINE (1ULL << __REQ_NOWAIT_INLINE) #define REQ_CGROUP_PUNT (1ULL << __REQ_CGROUP_PUNT) #define REQ_NOUNMAP (1ULL << __REQ_NOUNMAP) @@ -420,13 +418,12 @@ static inline int op_stat_group(unsigned int op) typedef unsigned int blk_qc_t; #define BLK_QC_T_NONE -1U -#define BLK_QC_T_EAGAIN -2U #define BLK_QC_T_SHIFT 16 #define BLK_QC_T_INTERNAL (1U << 31) static inline bool blk_qc_t_valid(blk_qc_t cookie) { - return cookie != BLK_QC_T_NONE && cookie != BLK_QC_T_EAGAIN; + return cookie != BLK_QC_T_NONE; } static inline unsigned int blk_qc_t_to_queue_num(blk_qc_t cookie) diff --git a/include/linux/ccp.h b/include/linux/ccp.h index 55cb455cfcb0..a5dfbaf2470d 100644 --- a/include/linux/ccp.h +++ b/include/linux/ccp.h @@ -170,6 +170,8 @@ struct ccp_aes_engine { enum ccp_aes_mode mode; enum ccp_aes_action action; + u32 authsize; + struct scatterlist *key; u32 key_len; /* In bytes */ diff --git a/include/linux/dma-noncoherent.h b/include/linux/dma-noncoherent.h index 3813211a9aad..0bff3d7fac92 100644 --- a/include/linux/dma-noncoherent.h +++ b/include/linux/dma-noncoherent.h @@ -42,13 +42,18 @@ void arch_dma_free(struct device *dev, size_t size, void *cpu_addr, dma_addr_t dma_addr, unsigned long attrs); long arch_dma_coherent_to_pfn(struct device *dev, void *cpu_addr, dma_addr_t dma_addr); - -#ifdef CONFIG_ARCH_HAS_DMA_MMAP_PGPROT pgprot_t arch_dma_mmap_pgprot(struct device *dev, pgprot_t prot, unsigned long attrs); + +#ifdef CONFIG_MMU +pgprot_t dma_pgprot(struct device *dev, pgprot_t prot, unsigned long attrs); #else -# define arch_dma_mmap_pgprot(dev, prot, attrs) pgprot_noncached(prot) -#endif +static inline pgprot_t dma_pgprot(struct device *dev, pgprot_t prot, + unsigned long attrs) +{ + return prot; /* no protection bits supported without page tables */ +} +#endif /* CONFIG_MMU */ #ifdef CONFIG_DMA_NONCOHERENT_CACHE_SYNC void arch_dma_cache_sync(struct device *dev, void *vaddr, size_t size, diff --git a/include/linux/gfp.h b/include/linux/gfp.h index fb07b503dc45..f33881688f42 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h @@ -510,22 +510,18 @@ alloc_pages(gfp_t gfp_mask, unsigned int order) } extern struct page *alloc_pages_vma(gfp_t gfp_mask, int order, struct vm_area_struct *vma, unsigned long addr, - int node, bool hugepage); -#define alloc_hugepage_vma(gfp_mask, vma, addr, order) \ - alloc_pages_vma(gfp_mask, order, vma, addr, numa_node_id(), true) + int node); #else #define alloc_pages(gfp_mask, order) \ alloc_pages_node(numa_node_id(), gfp_mask, order) -#define alloc_pages_vma(gfp_mask, order, vma, addr, node, false)\ - alloc_pages(gfp_mask, order) -#define alloc_hugepage_vma(gfp_mask, vma, addr, order) \ +#define alloc_pages_vma(gfp_mask, order, vma, addr, node)\ alloc_pages(gfp_mask, order) #endif #define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0) #define alloc_page_vma(gfp_mask, vma, addr) \ - alloc_pages_vma(gfp_mask, 0, vma, addr, numa_node_id(), false) + alloc_pages_vma(gfp_mask, 0, vma, addr, numa_node_id()) #define alloc_page_vma_node(gfp_mask, vma, addr, node) \ - alloc_pages_vma(gfp_mask, 0, vma, addr, node, false) + alloc_pages_vma(gfp_mask, 0, vma, addr, node) extern unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order); extern unsigned long get_zeroed_page(gfp_t gfp_mask); diff --git a/include/linux/key.h b/include/linux/key.h index 91f391cd272e..50028338a4cc 100644 --- a/include/linux/key.h +++ b/include/linux/key.h @@ -94,11 +94,11 @@ struct keyring_index_key { union { struct { #ifdef __LITTLE_ENDIAN /* Put desc_len at the LSB of x */ - u8 desc_len; - char desc[sizeof(long) - 1]; /* First few chars of description */ + u16 desc_len; + char desc[sizeof(long) - 2]; /* First few chars of description */ #else - char desc[sizeof(long) - 1]; /* First few chars of description */ - u8 desc_len; + char desc[sizeof(long) - 2]; /* First few chars of description */ + u16 desc_len; #endif }; unsigned long x; diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 5c5b5867024c..fcb46b3374c6 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -861,8 +861,9 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu); void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu); void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu); -bool kvm_arch_has_vcpu_debugfs(void); -int kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu); +#ifdef __KVM_HAVE_ARCH_VCPU_DEBUGFS +void kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu); +#endif int kvm_arch_hardware_enable(void); void kvm_arch_hardware_disable(void); @@ -872,6 +873,7 @@ int kvm_arch_check_processor_compat(void); int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu); bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu); int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu); +bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu); #ifndef __KVM_HAVE_ARCH_VM_ALLOC /* diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 44c41462be33..2cd4359cb38c 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -668,6 +668,7 @@ static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec, void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx, int val); +void __mod_lruvec_slab_state(void *p, enum node_stat_item idx, int val); static inline void mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx, int val) @@ -1072,6 +1073,14 @@ static inline void mod_lruvec_page_state(struct page *page, mod_node_page_state(page_pgdat(page), idx, val); } +static inline void __mod_lruvec_slab_state(void *p, enum node_stat_item idx, + int val) +{ + struct page *page = virt_to_head_page(p); + + __mod_node_page_state(page_pgdat(page), idx, val); +} + static inline unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order, gfp_t gfp_mask, @@ -1159,6 +1168,16 @@ static inline void __dec_lruvec_page_state(struct page *page, __mod_lruvec_page_state(page, idx, -1); } +static inline void __inc_lruvec_slab_state(void *p, enum node_stat_item idx) +{ + __mod_lruvec_slab_state(p, idx, 1); +} + +static inline void __dec_lruvec_slab_state(void *p, enum node_stat_item idx) +{ + __mod_lruvec_slab_state(p, idx, -1); +} + /* idx can be of type enum memcg_stat_item or node_stat_item */ static inline void inc_memcg_state(struct mem_cgroup *memcg, int idx) diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h index 5228c62af416..bac395f1d00a 100644 --- a/include/linux/mempolicy.h +++ b/include/linux/mempolicy.h @@ -139,6 +139,8 @@ struct mempolicy *mpol_shared_policy_lookup(struct shared_policy *sp, struct mempolicy *get_task_policy(struct task_struct *p); struct mempolicy *__get_vma_policy(struct vm_area_struct *vma, unsigned long addr); +struct mempolicy *get_vma_policy(struct vm_area_struct *vma, + unsigned long addr); bool vma_policy_mof(struct vm_area_struct *vma); extern void numa_default_policy(void); diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h index ce9839c8bc1a..c2f056b5766d 100644 --- a/include/linux/mlx5/device.h +++ b/include/linux/mlx5/device.h @@ -446,11 +446,11 @@ enum { }; enum { - MLX5_OPC_MOD_TLS_TIS_STATIC_PARAMS = 0x20, + MLX5_OPC_MOD_TLS_TIS_STATIC_PARAMS = 0x1, }; enum { - MLX5_OPC_MOD_TLS_TIS_PROGRESS_PARAMS = 0x20, + MLX5_OPC_MOD_TLS_TIS_PROGRESS_PARAMS = 0x1, }; enum { diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index da5e7eaed438..a66ed0abe40e 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -10078,9 +10078,8 @@ struct mlx5_ifc_tls_static_params_bits { }; struct mlx5_ifc_tls_progress_params_bits { - u8 valid[0x1]; - u8 reserved_at_1[0x7]; - u8 pd[0x18]; + u8 reserved_at_0[0x8]; + u8 tisn[0x18]; u8 next_record_tcp_sn[0x20]; diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 3a37a89eb7a7..6a7a1083b6fb 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -159,7 +159,16 @@ struct page { /** @pgmap: Points to the hosting device page map. */ struct dev_pagemap *pgmap; void *zone_device_data; - unsigned long _zd_pad_1; /* uses mapping */ + /* + * ZONE_DEVICE private pages are counted as being + * mapped so the next 3 words hold the mapping, index, + * and private fields from the source anonymous or + * page cache page while the page is migrated to device + * private memory. + * ZONE_DEVICE MEMORY_DEVICE_FS_DAX pages also + * use the mapping, index, and private fields when + * pmem backed DAX files are mapped. + */ }; /** @rcu_head: You can use this to free a page by RCU. */ diff --git a/include/linux/pci.h b/include/linux/pci.h index 9e700d9f9f28..82e4cd1b7ac3 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -1567,8 +1567,10 @@ extern bool pcie_ports_native; #ifdef CONFIG_PCIEASPM bool pcie_aspm_support_enabled(void); +bool pcie_aspm_enabled(struct pci_dev *pdev); #else static inline bool pcie_aspm_support_enabled(void) { return false; } +static inline bool pcie_aspm_enabled(struct pci_dev *pdev) { return false; } #endif #ifdef CONFIG_PCIEAER diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 7eb28b72d9ba..77c6dc88e95d 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -1362,6 +1362,14 @@ static inline void skb_copy_hash(struct sk_buff *to, const struct sk_buff *from) to->l4_hash = from->l4_hash; }; +static inline void skb_copy_decrypted(struct sk_buff *to, + const struct sk_buff *from) +{ +#ifdef CONFIG_TLS_DEVICE + to->decrypted = from->decrypted; +#endif +} + #ifdef NET_SKBUFF_DATA_USES_OFFSET static inline unsigned char *skb_end_pointer(const struct sk_buff *skb) { diff --git a/include/linux/socket.h b/include/linux/socket.h index 97523818cb14..fc0bed59fc84 100644 --- a/include/linux/socket.h +++ b/include/linux/socket.h @@ -292,6 +292,9 @@ struct ucred { #define MSG_BATCH 0x40000 /* sendmmsg(): more messages coming */ #define MSG_EOF MSG_FIN #define MSG_NO_SHARED_FRAGS 0x80000 /* sendpage() internal : page frags are not shared */ +#define MSG_SENDPAGE_DECRYPTED 0x100000 /* sendpage() internal : page may carry + * plain text and require encryption + */ #define MSG_ZEROCOPY 0x4000000 /* Use user data in kernel path */ #define MSG_FASTOPEN 0x20000000 /* Send data in TCP SYN */ diff --git a/include/linux/usb.h b/include/linux/usb.h index 83d35d993e8c..e87826e23d59 100644 --- a/include/linux/usb.h +++ b/include/linux/usb.h @@ -1457,7 +1457,7 @@ typedef void (*usb_complete_t)(struct urb *); * field rather than determining a dma address themselves. * * Note that transfer_buffer must still be set if the controller - * does not support DMA (as indicated by bus.uses_dma) and when talking + * does not support DMA (as indicated by hcd_uses_dma()) and when talking * to root hub. If you have to trasfer between highmem zone and the device * on such controller, create a bounce buffer or bail out with an error. * If transfer_buffer cannot be set (is in highmem) and the controller is DMA diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h index bab27ccc8ff5..a20e7815d814 100644 --- a/include/linux/usb/hcd.h +++ b/include/linux/usb/hcd.h @@ -422,6 +422,9 @@ static inline bool hcd_periodic_completion_in_progress(struct usb_hcd *hcd, return hcd->high_prio_bh.completing_ep == ep; } +#define hcd_uses_dma(hcd) \ + (IS_ENABLED(CONFIG_HAS_DMA) && (hcd)->self.uses_dma) + extern int usb_hcd_link_urb_to_ep(struct usb_hcd *hcd, struct urb *urb); extern int usb_hcd_check_unlink_urb(struct usb_hcd *hcd, struct urb *urb, int status); diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h index ded574b32c20..ffc95b382eb5 100644 --- a/include/net/bluetooth/hci_core.h +++ b/include/net/bluetooth/hci_core.h @@ -278,6 +278,7 @@ struct hci_dev { __u16 conn_info_min_age; __u16 conn_info_max_age; __u16 auth_payload_timeout; + __u8 min_enc_key_size; __u8 ssp_debug_mode; __u8 hw_error_code; __u32 clock; diff --git a/include/net/inet_frag.h b/include/net/inet_frag.h index 010f26b31c89..bac79e817776 100644 --- a/include/net/inet_frag.h +++ b/include/net/inet_frag.h @@ -171,7 +171,7 @@ int inet_frag_queue_insert(struct inet_frag_queue *q, struct sk_buff *skb, void *inet_frag_reasm_prepare(struct inet_frag_queue *q, struct sk_buff *skb, struct sk_buff *parent); void inet_frag_reasm_finish(struct inet_frag_queue *q, struct sk_buff *head, - void *reasm_data); + void *reasm_data, bool try_coalesce); struct sk_buff *inet_frag_pull_head(struct inet_frag_queue *q); #endif diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h index 4a9da951a794..cb668bc2692d 100644 --- a/include/net/net_namespace.h +++ b/include/net/net_namespace.h @@ -61,7 +61,6 @@ struct net { spinlock_t rules_mod_lock; u32 hash_mix; - atomic64_t cookie_gen; struct list_head list; /* list of network namespaces */ struct list_head exit_list; /* To linked to call pernet exit diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h index dc301e3d6739..e73d16f8b870 100644 --- a/include/net/netfilter/nf_tables.h +++ b/include/net/netfilter/nf_tables.h @@ -423,8 +423,7 @@ struct nft_set { unsigned char *udata; /* runtime data below here */ const struct nft_set_ops *ops ____cacheline_aligned; - u16 flags:13, - bound:1, + u16 flags:14, genmask:2; u8 klen; u8 dlen; @@ -1360,12 +1359,15 @@ struct nft_trans_rule { struct nft_trans_set { struct nft_set *set; u32 set_id; + bool bound; }; #define nft_trans_set(trans) \ (((struct nft_trans_set *)trans->data)->set) #define nft_trans_set_id(trans) \ (((struct nft_trans_set *)trans->data)->set_id) +#define nft_trans_set_bound(trans) \ + (((struct nft_trans_set *)trans->data)->bound) struct nft_trans_chain { bool update; @@ -1396,12 +1398,15 @@ struct nft_trans_table { struct nft_trans_elem { struct nft_set *set; struct nft_set_elem elem; + bool bound; }; #define nft_trans_elem_set(trans) \ (((struct nft_trans_elem *)trans->data)->set) #define nft_trans_elem(trans) \ (((struct nft_trans_elem *)trans->data)->elem) +#define nft_trans_elem_set_bound(trans) \ + (((struct nft_trans_elem *)trans->data)->bound) struct nft_trans_obj { struct nft_object *obj; diff --git a/include/net/netfilter/nf_tables_offload.h b/include/net/netfilter/nf_tables_offload.h index 8a5969d9b80b..db104665a9e4 100644 --- a/include/net/netfilter/nf_tables_offload.h +++ b/include/net/netfilter/nf_tables_offload.h @@ -78,4 +78,6 @@ void nft_indr_block_get_and_ing_cmd(struct net_device *dev, (__reg)->key = __key; \ memset(&(__reg)->mask, 0xff, (__reg)->len); +int nft_chain_offload_priority(struct nft_base_chain *basechain); + #endif diff --git a/include/net/netlink.h b/include/net/netlink.h index e4650e5b64a1..b140c8f1be22 100644 --- a/include/net/netlink.h +++ b/include/net/netlink.h @@ -684,9 +684,8 @@ static inline int nlmsg_parse(const struct nlmsghdr *nlh, int hdrlen, const struct nla_policy *policy, struct netlink_ext_ack *extack) { - return __nla_parse(tb, maxtype, nlmsg_attrdata(nlh, hdrlen), - nlmsg_attrlen(nlh, hdrlen), policy, - NL_VALIDATE_STRICT, extack); + return __nlmsg_parse(nlh, hdrlen, tb, maxtype, policy, + NL_VALIDATE_STRICT, extack); } /** diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h index 0790a4ed909c..64999ffcb486 100644 --- a/include/net/pkt_cls.h +++ b/include/net/pkt_cls.h @@ -611,7 +611,7 @@ tc_cls_common_offload_init(struct flow_cls_common_offload *cls_common, { cls_common->chain_index = tp->chain->index; cls_common->protocol = tp->protocol; - cls_common->prio = tp->prio; + cls_common->prio = tp->prio >> 16; if (tc_skip_sw(flags) || flags & TCA_CLS_FLAGS_VERBOSE) cls_common->extack = extack; } diff --git a/include/net/sock.h b/include/net/sock.h index 228db3998e46..2c53f1a1d905 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -2482,6 +2482,7 @@ static inline bool sk_fullsock(const struct sock *sk) /* Checks if this SKB belongs to an HW offloaded socket * and whether any SW fallbacks are required based on dev. + * Check decrypted mark in case skb_orphan() cleared socket. */ static inline struct sk_buff *sk_validate_xmit_skb(struct sk_buff *skb, struct net_device *dev) @@ -2489,8 +2490,15 @@ static inline struct sk_buff *sk_validate_xmit_skb(struct sk_buff *skb, #ifdef CONFIG_SOCK_VALIDATE_XMIT struct sock *sk = skb->sk; - if (sk && sk_fullsock(sk) && sk->sk_validate_xmit_skb) + if (sk && sk_fullsock(sk) && sk->sk_validate_xmit_skb) { skb = sk->sk_validate_xmit_skb(sk, dev, skb); +#ifdef CONFIG_TLS_DEVICE + } else if (unlikely(skb->decrypted)) { + pr_warn_ratelimited("unencrypted skb with no associated socket - dropping\n"); + kfree_skb(skb); + skb = NULL; +#endif + } #endif return skb; diff --git a/include/sound/simple_card_utils.h b/include/sound/simple_card_utils.h index 954563ee2277..985a5f583de4 100644 --- a/include/sound/simple_card_utils.h +++ b/include/sound/simple_card_utils.h @@ -141,6 +141,10 @@ inline void asoc_simple_debug_dai(struct asoc_simple_priv *priv, { struct device *dev = simple_priv_to_dev(priv); + /* dai might be NULL */ + if (!dai) + return; + if (dai->name) dev_dbg(dev, "%s dai name = %s\n", name, dai->name); diff --git a/include/trace/events/rxrpc.h b/include/trace/events/rxrpc.h index cc1d060cbf13..fa06b528c73c 100644 --- a/include/trace/events/rxrpc.h +++ b/include/trace/events/rxrpc.h @@ -498,10 +498,10 @@ rxrpc_tx_points; #define E_(a, b) { a, b } TRACE_EVENT(rxrpc_local, - TP_PROTO(struct rxrpc_local *local, enum rxrpc_local_trace op, + TP_PROTO(unsigned int local_debug_id, enum rxrpc_local_trace op, int usage, const void *where), - TP_ARGS(local, op, usage, where), + TP_ARGS(local_debug_id, op, usage, where), TP_STRUCT__entry( __field(unsigned int, local ) @@ -511,7 +511,7 @@ TRACE_EVENT(rxrpc_local, ), TP_fast_assign( - __entry->local = local->debug_id; + __entry->local = local_debug_id; __entry->op = op; __entry->usage = usage; __entry->where = where; diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 4393bd4b2419..0e66371bea13 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -1467,8 +1467,8 @@ union bpf_attr { * If no cookie has been set yet, generate a new cookie. Once * generated, the socket cookie remains stable for the life of the * socket. This helper can be useful for monitoring per socket - * networking traffic statistics as it provides a unique socket - * identifier per namespace. + * networking traffic statistics as it provides a global socket + * identifier that can be assumed unique. * Return * A 8-byte long non-decreasing number on success, or 0 if the * socket field is missing inside *skb*. diff --git a/include/uapi/linux/kfd_ioctl.h b/include/uapi/linux/kfd_ioctl.h index 070d1bc7e725..20917c59f39c 100644 --- a/include/uapi/linux/kfd_ioctl.h +++ b/include/uapi/linux/kfd_ioctl.h @@ -410,21 +410,6 @@ struct kfd_ioctl_unmap_memory_from_gpu_args { __u32 n_success; /* to/from KFD */ }; -/* Allocate GWS for specific queue - * - * @gpu_id: device identifier - * @queue_id: queue's id that GWS is allocated for - * @num_gws: how many GWS to allocate - * @first_gws: index of the first GWS allocated. - * only support contiguous GWS allocation - */ -struct kfd_ioctl_alloc_queue_gws_args { - __u32 gpu_id; /* to KFD */ - __u32 queue_id; /* to KFD */ - __u32 num_gws; /* to KFD */ - __u32 first_gws; /* from KFD */ -}; - struct kfd_ioctl_get_dmabuf_info_args { __u64 size; /* from KFD */ __u64 metadata_ptr; /* to KFD */ @@ -544,10 +529,7 @@ enum kfd_mmio_remap { #define AMDKFD_IOC_IMPORT_DMABUF \ AMDKFD_IOWR(0x1D, struct kfd_ioctl_import_dmabuf_args) -#define AMDKFD_IOC_ALLOC_QUEUE_GWS \ - AMDKFD_IOWR(0x1E, struct kfd_ioctl_alloc_queue_gws_args) - #define AMDKFD_COMMAND_START 0x01 -#define AMDKFD_COMMAND_END 0x1F +#define AMDKFD_COMMAND_END 0x1E #endif diff --git a/include/uapi/rdma/siw-abi.h b/include/uapi/rdma/siw-abi.h index 7de68f1dc707..af735f55b291 100644 --- a/include/uapi/rdma/siw-abi.h +++ b/include/uapi/rdma/siw-abi.h @@ -180,6 +180,7 @@ struct siw_cqe { * to control CQ arming. */ struct siw_cq_ctrl { - __aligned_u64 notify; + __u32 flags; + __u32 pad; }; #endif diff --git a/include/uapi/sound/sof/fw.h b/include/uapi/sound/sof/fw.h index 1afca973eb09..e9f697467a86 100644 --- a/include/uapi/sound/sof/fw.h +++ b/include/uapi/sound/sof/fw.h @@ -13,6 +13,8 @@ #ifndef __INCLUDE_UAPI_SOF_FW_H__ #define __INCLUDE_UAPI_SOF_FW_H__ +#include <linux/types.h> + #define SND_SOF_FW_SIG_SIZE 4 #define SND_SOF_FW_ABI 1 #define SND_SOF_FW_SIG "Reef" @@ -46,8 +48,8 @@ enum snd_sof_fw_blk_type { struct snd_sof_blk_hdr { enum snd_sof_fw_blk_type type; - uint32_t size; /* bytes minus this header */ - uint32_t offset; /* offset from base */ + __u32 size; /* bytes minus this header */ + __u32 offset; /* offset from base */ } __packed; /* @@ -61,8 +63,8 @@ enum snd_sof_fw_mod_type { struct snd_sof_mod_hdr { enum snd_sof_fw_mod_type type; - uint32_t size; /* bytes minus this header */ - uint32_t num_blocks; /* number of blocks */ + __u32 size; /* bytes minus this header */ + __u32 num_blocks; /* number of blocks */ } __packed; /* @@ -70,9 +72,9 @@ struct snd_sof_mod_hdr { */ struct snd_sof_fw_header { unsigned char sig[SND_SOF_FW_SIG_SIZE]; /* "Reef" */ - uint32_t file_size; /* size of file minus this header */ - uint32_t num_modules; /* number of modules */ - uint32_t abi; /* version of header format */ + __u32 file_size; /* size of file minus this header */ + __u32 num_modules; /* number of modules */ + __u32 abi; /* version of header format */ } __packed; #endif diff --git a/include/uapi/sound/sof/header.h b/include/uapi/sound/sof/header.h index 7868990b0d6f..5f4518e7a972 100644 --- a/include/uapi/sound/sof/header.h +++ b/include/uapi/sound/sof/header.h @@ -9,6 +9,8 @@ #ifndef __INCLUDE_UAPI_SOUND_SOF_USER_HEADER_H__ #define __INCLUDE_UAPI_SOUND_SOF_USER_HEADER_H__ +#include <linux/types.h> + /* * Header for all non IPC ABI data. * @@ -16,12 +18,12 @@ * Used by any bespoke component data structures or binary blobs. */ struct sof_abi_hdr { - uint32_t magic; /**< 'S', 'O', 'F', '\0' */ - uint32_t type; /**< component specific type */ - uint32_t size; /**< size in bytes of data excl. this struct */ - uint32_t abi; /**< SOF ABI version */ - uint32_t reserved[4]; /**< reserved for future use */ - uint32_t data[0]; /**< Component data - opaque to core */ + __u32 magic; /**< 'S', 'O', 'F', '\0' */ + __u32 type; /**< component specific type */ + __u32 size; /**< size in bytes of data excl. this struct */ + __u32 abi; /**< SOF ABI version */ + __u32 reserved[4]; /**< reserved for future use */ + __u32 data[0]; /**< Component data - opaque to core */ } __packed; #endif diff --git a/kernel/configs.c b/kernel/configs.c index b062425ccf8d..c09ea4c995e1 100644 --- a/kernel/configs.c +++ b/kernel/configs.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0-or-later /* * kernel/configs.c * Echo the kernel .config file used to build the kernel @@ -6,21 +7,6 @@ * Copyright (C) 2002 Randy Dunlap <rdunlap@xenotime.net> * Copyright (C) 2002 Al Stone <ahs3@fc.hp.com> * Copyright (C) 2002 Hewlett-Packard Company - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or (at - * your option) any later version. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or - * NON INFRINGEMENT. See the GNU General Public License for more - * details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/kernel.h> diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c index 59bdceea3737..795c9b095d75 100644 --- a/kernel/dma/direct.c +++ b/kernel/dma/direct.c @@ -47,9 +47,6 @@ u64 dma_direct_get_required_mask(struct device *dev) { u64 max_dma = phys_to_dma_direct(dev, (max_pfn - 1) << PAGE_SHIFT); - if (dev->bus_dma_mask && dev->bus_dma_mask < max_dma) - max_dma = dev->bus_dma_mask; - return (1ULL << (fls64(max_dma) - 1)) * 2 - 1; } @@ -130,10 +127,12 @@ void *dma_direct_alloc_pages(struct device *dev, size_t size, if (!page) return NULL; - if (attrs & DMA_ATTR_NO_KERNEL_MAPPING) { + if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) && + !force_dma_unencrypted(dev)) { /* remove any dirty cache lines on the kernel alias */ if (!PageHighMem(page)) arch_dma_prep_coherent(page, size); + *dma_handle = phys_to_dma(dev, page_to_phys(page)); /* return the page pointer as the opaque cookie */ return page; } @@ -178,7 +177,8 @@ void dma_direct_free_pages(struct device *dev, size_t size, void *cpu_addr, { unsigned int page_order = get_order(size); - if (attrs & DMA_ATTR_NO_KERNEL_MAPPING) { + if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) && + !force_dma_unencrypted(dev)) { /* cpu_addr is a struct page cookie, not a kernel address */ __dma_direct_free_pages(dev, size, cpu_addr); return; diff --git a/kernel/dma/mapping.c b/kernel/dma/mapping.c index b945239621d8..b0038ca3aa92 100644 --- a/kernel/dma/mapping.c +++ b/kernel/dma/mapping.c @@ -150,6 +150,23 @@ int dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, } EXPORT_SYMBOL(dma_get_sgtable_attrs); +#ifdef CONFIG_MMU +/* + * Return the page attributes used for mapping dma_alloc_* memory, either in + * kernel space if remapping is needed, or to userspace through dma_mmap_*. + */ +pgprot_t dma_pgprot(struct device *dev, pgprot_t prot, unsigned long attrs) +{ + if (dev_is_dma_coherent(dev) || + (IS_ENABLED(CONFIG_DMA_NONCOHERENT_CACHE_SYNC) && + (attrs & DMA_ATTR_NON_CONSISTENT))) + return prot; + if (IS_ENABLED(CONFIG_ARCH_HAS_DMA_MMAP_PGPROT)) + return arch_dma_mmap_pgprot(dev, prot, attrs); + return pgprot_noncached(prot); +} +#endif /* CONFIG_MMU */ + /* * Create userspace mapping for the DMA-coherent memory. */ @@ -164,7 +181,7 @@ int dma_common_mmap(struct device *dev, struct vm_area_struct *vma, unsigned long pfn; int ret = -ENXIO; - vma->vm_page_prot = arch_dma_mmap_pgprot(dev, vma->vm_page_prot, attrs); + vma->vm_page_prot = dma_pgprot(dev, vma->vm_page_prot, attrs); if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret)) return ret; diff --git a/kernel/dma/remap.c b/kernel/dma/remap.c index a594aec07882..ffe78f0b2fe4 100644 --- a/kernel/dma/remap.c +++ b/kernel/dma/remap.c @@ -218,7 +218,7 @@ void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, /* create a coherent mapping */ ret = dma_common_contiguous_remap(page, size, VM_USERMAP, - arch_dma_mmap_pgprot(dev, PAGE_KERNEL, attrs), + dma_pgprot(dev, PAGE_KERNEL, attrs), __builtin_return_address(0)); if (!ret) { __dma_direct_free_pages(dev, size, page); diff --git a/kernel/irq/affinity.c b/kernel/irq/affinity.c index 4352b08ae48d..6fef48033f96 100644 --- a/kernel/irq/affinity.c +++ b/kernel/irq/affinity.c @@ -251,11 +251,9 @@ irq_create_affinity_masks(unsigned int nvecs, struct irq_affinity *affd) * Determine the number of vectors which need interrupt affinities * assigned. If the pre/post request exhausts the available vectors * then nothing to do here except for invoking the calc_sets() - * callback so the device driver can adjust to the situation. If there - * is only a single vector, then managing the queue is pointless as - * well. + * callback so the device driver can adjust to the situation. */ - if (nvecs > 1 && nvecs > affd->pre_vectors + affd->post_vectors) + if (nvecs > affd->pre_vectors + affd->post_vectors) affvecs = nvecs - affd->pre_vectors - affd->post_vectors; else affvecs = 0; diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c index 636ca6f88c8e..867b4bb6d4be 100644 --- a/kernel/sched/cpufreq_schedutil.c +++ b/kernel/sched/cpufreq_schedutil.c @@ -40,6 +40,7 @@ struct sugov_policy { struct task_struct *thread; bool work_in_progress; + bool limits_changed; bool need_freq_update; }; @@ -89,8 +90,11 @@ static bool sugov_should_update_freq(struct sugov_policy *sg_policy, u64 time) !cpufreq_this_cpu_can_update(sg_policy->policy)) return false; - if (unlikely(sg_policy->need_freq_update)) + if (unlikely(sg_policy->limits_changed)) { + sg_policy->limits_changed = false; + sg_policy->need_freq_update = true; return true; + } delta_ns = time - sg_policy->last_freq_update_time; @@ -437,7 +441,7 @@ static inline bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu) { return false; } static inline void ignore_dl_rate_limit(struct sugov_cpu *sg_cpu, struct sugov_policy *sg_policy) { if (cpu_bw_dl(cpu_rq(sg_cpu->cpu)) > sg_cpu->bw_dl) - sg_policy->need_freq_update = true; + sg_policy->limits_changed = true; } static void sugov_update_single(struct update_util_data *hook, u64 time, @@ -457,7 +461,8 @@ static void sugov_update_single(struct update_util_data *hook, u64 time, if (!sugov_should_update_freq(sg_policy, time)) return; - busy = sugov_cpu_is_busy(sg_cpu); + /* Limits may have changed, don't skip frequency update */ + busy = !sg_policy->need_freq_update && sugov_cpu_is_busy(sg_cpu); util = sugov_get_util(sg_cpu); max = sg_cpu->max; @@ -831,6 +836,7 @@ static int sugov_start(struct cpufreq_policy *policy) sg_policy->last_freq_update_time = 0; sg_policy->next_freq = 0; sg_policy->work_in_progress = false; + sg_policy->limits_changed = false; sg_policy->need_freq_update = false; sg_policy->cached_raw_freq = 0; @@ -879,7 +885,7 @@ static void sugov_limits(struct cpufreq_policy *policy) mutex_unlock(&sg_policy->work_lock); } - sg_policy->need_freq_update = true; + sg_policy->limits_changed = true; } struct cpufreq_governor schedutil_gov = { diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index ef5b9f6b1d42..46122edd8552 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c @@ -2088,17 +2088,13 @@ retry: } deactivate_task(rq, next_task, 0); - sub_running_bw(&next_task->dl, &rq->dl); - sub_rq_bw(&next_task->dl, &rq->dl); set_task_cpu(next_task, later_rq->cpu); - add_rq_bw(&next_task->dl, &later_rq->dl); /* * Update the later_rq clock here, because the clock is used * by the cpufreq_update_util() inside __add_running_bw(). */ update_rq_clock(later_rq); - add_running_bw(&next_task->dl, &later_rq->dl); activate_task(later_rq, next_task, ENQUEUE_NOCLOCK); ret = 1; @@ -2186,11 +2182,7 @@ static void pull_dl_task(struct rq *this_rq) resched = true; deactivate_task(src_rq, p, 0); - sub_running_bw(&p->dl, &src_rq->dl); - sub_rq_bw(&p->dl, &src_rq->dl); set_task_cpu(p, this_cpu); - add_rq_bw(&p->dl, &this_rq->dl); - add_running_bw(&p->dl, &this_rq->dl); activate_task(this_rq, p, 0); dmin = p->dl.deadline; diff --git a/kernel/sched/psi.c b/kernel/sched/psi.c index 7acc632c3b82..23fbbcc414d5 100644 --- a/kernel/sched/psi.c +++ b/kernel/sched/psi.c @@ -1051,7 +1051,7 @@ struct psi_trigger *psi_trigger_create(struct psi_group *group, if (!rcu_access_pointer(group->poll_kworker)) { struct sched_param param = { - .sched_priority = MAX_RT_PRIO - 1, + .sched_priority = 1, }; struct kthread_worker *kworker; @@ -1061,7 +1061,7 @@ struct psi_trigger *psi_trigger_create(struct psi_group *group, mutex_unlock(&group->trigger_lock); return ERR_CAST(kworker); } - sched_setscheduler(kworker->task, SCHED_FIFO, ¶m); + sched_setscheduler_nocheck(kworker->task, SCHED_FIFO, ¶m); kthread_init_delayed_work(&group->poll_work, psi_poll_work); rcu_assign_pointer(group->poll_kworker, kworker); diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 1334ede667a8..738065f765ab 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -644,30 +644,40 @@ release: * available * never: never stall for any thp allocation */ -static inline gfp_t alloc_hugepage_direct_gfpmask(struct vm_area_struct *vma) +static inline gfp_t alloc_hugepage_direct_gfpmask(struct vm_area_struct *vma, unsigned long addr) { const bool vma_madvised = !!(vma->vm_flags & VM_HUGEPAGE); + gfp_t this_node = 0; + +#ifdef CONFIG_NUMA + struct mempolicy *pol; + /* + * __GFP_THISNODE is used only when __GFP_DIRECT_RECLAIM is not + * specified, to express a general desire to stay on the current + * node for optimistic allocation attempts. If the defrag mode + * and/or madvise hint requires the direct reclaim then we prefer + * to fallback to other node rather than node reclaim because that + * can lead to excessive reclaim even though there is free memory + * on other nodes. We expect that NUMA preferences are specified + * by memory policies. + */ + pol = get_vma_policy(vma, addr); + if (pol->mode != MPOL_BIND) + this_node = __GFP_THISNODE; + mpol_cond_put(pol); +#endif - /* Always do synchronous compaction */ if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags)) return GFP_TRANSHUGE | (vma_madvised ? 0 : __GFP_NORETRY); - - /* Kick kcompactd and fail quickly */ if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags)) - return GFP_TRANSHUGE_LIGHT | __GFP_KSWAPD_RECLAIM; - - /* Synchronous compaction if madvised, otherwise kick kcompactd */ + return GFP_TRANSHUGE_LIGHT | __GFP_KSWAPD_RECLAIM | this_node; if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags)) - return GFP_TRANSHUGE_LIGHT | - (vma_madvised ? __GFP_DIRECT_RECLAIM : - __GFP_KSWAPD_RECLAIM); - - /* Only do synchronous compaction if madvised */ + return GFP_TRANSHUGE_LIGHT | (vma_madvised ? __GFP_DIRECT_RECLAIM : + __GFP_KSWAPD_RECLAIM | this_node); if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags)) - return GFP_TRANSHUGE_LIGHT | - (vma_madvised ? __GFP_DIRECT_RECLAIM : 0); - - return GFP_TRANSHUGE_LIGHT; + return GFP_TRANSHUGE_LIGHT | (vma_madvised ? __GFP_DIRECT_RECLAIM : + this_node); + return GFP_TRANSHUGE_LIGHT | this_node; } /* Caller must hold page table lock. */ @@ -739,8 +749,8 @@ vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf) pte_free(vma->vm_mm, pgtable); return ret; } - gfp = alloc_hugepage_direct_gfpmask(vma); - page = alloc_hugepage_vma(gfp, vma, haddr, HPAGE_PMD_ORDER); + gfp = alloc_hugepage_direct_gfpmask(vma, haddr); + page = alloc_pages_vma(gfp, HPAGE_PMD_ORDER, vma, haddr, numa_node_id()); if (unlikely(!page)) { count_vm_event(THP_FAULT_FALLBACK); return VM_FAULT_FALLBACK; @@ -1347,8 +1357,9 @@ vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf, pmd_t orig_pmd) alloc: if (__transparent_hugepage_enabled(vma) && !transparent_hugepage_debug_cow()) { - huge_gfp = alloc_hugepage_direct_gfpmask(vma); - new_page = alloc_hugepage_vma(huge_gfp, vma, haddr, HPAGE_PMD_ORDER); + huge_gfp = alloc_hugepage_direct_gfpmask(vma, haddr); + new_page = alloc_pages_vma(huge_gfp, HPAGE_PMD_ORDER, vma, + haddr, numa_node_id()); } else new_page = NULL; diff --git a/mm/hugetlb.c b/mm/hugetlb.c index ede7e7f5d1ab..6d7296dd11b8 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -3856,6 +3856,25 @@ retry: page = alloc_huge_page(vma, haddr, 0); if (IS_ERR(page)) { + /* + * Returning error will result in faulting task being + * sent SIGBUS. The hugetlb fault mutex prevents two + * tasks from racing to fault in the same page which + * could result in false unable to allocate errors. + * Page migration does not take the fault mutex, but + * does a clear then write of pte's under page table + * lock. Page fault code could race with migration, + * notice the clear pte and try to allocate a page + * here. Before returning error, get ptl and make + * sure there really is no pte entry. + */ + ptl = huge_pte_lock(h, mm, ptep); + if (!huge_pte_none(huge_ptep_get(ptep))) { + ret = 0; + spin_unlock(ptl); + goto out; + } + spin_unlock(ptl); ret = vmf_error(PTR_ERR(page)); goto out; } diff --git a/mm/kmemleak.c b/mm/kmemleak.c index 6e9e8cca663e..f6e602918dac 100644 --- a/mm/kmemleak.c +++ b/mm/kmemleak.c @@ -1966,6 +1966,7 @@ static void kmemleak_disable(void) /* stop any memory operation tracing */ kmemleak_enabled = 0; + kmemleak_early_log = 0; /* check whether it is too early for a kernel thread */ if (kmemleak_initialized) @@ -2009,7 +2010,6 @@ void __init kmemleak_init(void) #ifdef CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF if (!kmemleak_skip_disable) { - kmemleak_early_log = 0; kmemleak_disable(); return; } diff --git a/mm/memcontrol.c b/mm/memcontrol.c index cdbb7a84cb6e..6f5c0c517c49 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -768,6 +768,26 @@ void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx, __this_cpu_write(pn->lruvec_stat_cpu->count[idx], x); } +void __mod_lruvec_slab_state(void *p, enum node_stat_item idx, int val) +{ + struct page *page = virt_to_head_page(p); + pg_data_t *pgdat = page_pgdat(page); + struct mem_cgroup *memcg; + struct lruvec *lruvec; + + rcu_read_lock(); + memcg = memcg_from_slab_page(page); + + /* Untracked pages have no memcg, no lruvec. Update only the node */ + if (!memcg || memcg == root_mem_cgroup) { + __mod_node_page_state(pgdat, idx, val); + } else { + lruvec = mem_cgroup_lruvec(pgdat, memcg); + __mod_lruvec_state(lruvec, idx, val); + } + rcu_read_unlock(); +} + /** * __count_memcg_events - account VM events in a cgroup * @memcg: the memory cgroup @@ -1130,26 +1150,45 @@ void mem_cgroup_iter_break(struct mem_cgroup *root, css_put(&prev->css); } -static void invalidate_reclaim_iterators(struct mem_cgroup *dead_memcg) +static void __invalidate_reclaim_iterators(struct mem_cgroup *from, + struct mem_cgroup *dead_memcg) { - struct mem_cgroup *memcg = dead_memcg; struct mem_cgroup_reclaim_iter *iter; struct mem_cgroup_per_node *mz; int nid; int i; - for (; memcg; memcg = parent_mem_cgroup(memcg)) { - for_each_node(nid) { - mz = mem_cgroup_nodeinfo(memcg, nid); - for (i = 0; i <= DEF_PRIORITY; i++) { - iter = &mz->iter[i]; - cmpxchg(&iter->position, - dead_memcg, NULL); - } + for_each_node(nid) { + mz = mem_cgroup_nodeinfo(from, nid); + for (i = 0; i <= DEF_PRIORITY; i++) { + iter = &mz->iter[i]; + cmpxchg(&iter->position, + dead_memcg, NULL); } } } +static void invalidate_reclaim_iterators(struct mem_cgroup *dead_memcg) +{ + struct mem_cgroup *memcg = dead_memcg; + struct mem_cgroup *last; + + do { + __invalidate_reclaim_iterators(memcg, dead_memcg); + last = memcg; + } while ((memcg = parent_mem_cgroup(memcg))); + + /* + * When cgruop1 non-hierarchy mode is used, + * parent_mem_cgroup() does not walk all the way up to the + * cgroup root (root_mem_cgroup). So we have to handle + * dead_memcg from cgroup root separately. + */ + if (last != root_mem_cgroup) + __invalidate_reclaim_iterators(root_mem_cgroup, + dead_memcg); +} + /** * mem_cgroup_scan_tasks - iterate over tasks of a memory cgroup hierarchy * @memcg: hierarchy root diff --git a/mm/mempolicy.c b/mm/mempolicy.c index f48693f75b37..65e0874fce17 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -403,7 +403,7 @@ static const struct mempolicy_operations mpol_ops[MPOL_MAX] = { }, }; -static void migrate_page_add(struct page *page, struct list_head *pagelist, +static int migrate_page_add(struct page *page, struct list_head *pagelist, unsigned long flags); struct queue_pages { @@ -429,11 +429,14 @@ static inline bool queue_pages_required(struct page *page, } /* - * queue_pages_pmd() has three possible return values: - * 1 - pages are placed on the right node or queued successfully. - * 0 - THP was split. - * -EIO - is migration entry or MPOL_MF_STRICT was specified and an existing - * page was already on a node that does not follow the policy. + * queue_pages_pmd() has four possible return values: + * 0 - pages are placed on the right node or queued successfully. + * 1 - there is unmovable page, and MPOL_MF_MOVE* & MPOL_MF_STRICT were + * specified. + * 2 - THP was split. + * -EIO - is migration entry or only MPOL_MF_STRICT was specified and an + * existing page was already on a node that does not follow the + * policy. */ static int queue_pages_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr, unsigned long end, struct mm_walk *walk) @@ -451,23 +454,20 @@ static int queue_pages_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr, if (is_huge_zero_page(page)) { spin_unlock(ptl); __split_huge_pmd(walk->vma, pmd, addr, false, NULL); + ret = 2; goto out; } - if (!queue_pages_required(page, qp)) { - ret = 1; + if (!queue_pages_required(page, qp)) goto unlock; - } - ret = 1; flags = qp->flags; /* go to thp migration */ if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) { - if (!vma_migratable(walk->vma)) { - ret = -EIO; + if (!vma_migratable(walk->vma) || + migrate_page_add(page, qp->pagelist, flags)) { + ret = 1; goto unlock; } - - migrate_page_add(page, qp->pagelist, flags); } else ret = -EIO; unlock: @@ -479,6 +479,13 @@ out: /* * Scan through pages checking if pages follow certain conditions, * and move them to the pagelist if they do. + * + * queue_pages_pte_range() has three possible return values: + * 0 - pages are placed on the right node or queued successfully. + * 1 - there is unmovable page, and MPOL_MF_MOVE* & MPOL_MF_STRICT were + * specified. + * -EIO - only MPOL_MF_STRICT was specified and an existing page was already + * on a node that does not follow the policy. */ static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, struct mm_walk *walk) @@ -488,17 +495,17 @@ static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr, struct queue_pages *qp = walk->private; unsigned long flags = qp->flags; int ret; + bool has_unmovable = false; pte_t *pte; spinlock_t *ptl; ptl = pmd_trans_huge_lock(pmd, vma); if (ptl) { ret = queue_pages_pmd(pmd, ptl, addr, end, walk); - if (ret > 0) - return 0; - else if (ret < 0) + if (ret != 2) return ret; } + /* THP was split, fall through to pte walk */ if (pmd_trans_unstable(pmd)) return 0; @@ -519,14 +526,28 @@ static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr, if (!queue_pages_required(page, qp)) continue; if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) { - if (!vma_migratable(vma)) + /* MPOL_MF_STRICT must be specified if we get here */ + if (!vma_migratable(vma)) { + has_unmovable = true; break; - migrate_page_add(page, qp->pagelist, flags); + } + + /* + * Do not abort immediately since there may be + * temporary off LRU pages in the range. Still + * need migrate other LRU pages. + */ + if (migrate_page_add(page, qp->pagelist, flags)) + has_unmovable = true; } else break; } pte_unmap_unlock(pte - 1, ptl); cond_resched(); + + if (has_unmovable) + return 1; + return addr != end ? -EIO : 0; } @@ -639,7 +660,13 @@ static int queue_pages_test_walk(unsigned long start, unsigned long end, * * If pages found in a given range are on a set of nodes (determined by * @nodes and @flags,) it's isolated and queued to the pagelist which is - * passed via @private.) + * passed via @private. + * + * queue_pages_range() has three possible return values: + * 1 - there is unmovable page, but MPOL_MF_MOVE* & MPOL_MF_STRICT were + * specified. + * 0 - queue pages successfully or no misplaced page. + * -EIO - there is misplaced page and only MPOL_MF_STRICT was specified. */ static int queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end, @@ -940,7 +967,7 @@ static long do_get_mempolicy(int *policy, nodemask_t *nmask, /* * page migration, thp tail pages can be passed. */ -static void migrate_page_add(struct page *page, struct list_head *pagelist, +static int migrate_page_add(struct page *page, struct list_head *pagelist, unsigned long flags) { struct page *head = compound_head(page); @@ -953,8 +980,19 @@ static void migrate_page_add(struct page *page, struct list_head *pagelist, mod_node_page_state(page_pgdat(head), NR_ISOLATED_ANON + page_is_file_cache(head), hpage_nr_pages(head)); + } else if (flags & MPOL_MF_STRICT) { + /* + * Non-movable page may reach here. And, there may be + * temporary off LRU pages or non-LRU movable pages. + * Treat them as unmovable pages since they can't be + * isolated, so they can't be moved at the moment. It + * should return -EIO for this case too. + */ + return -EIO; } } + + return 0; } /* page allocation callback for NUMA node migration */ @@ -1142,8 +1180,8 @@ static struct page *new_page(struct page *page, unsigned long start) } else if (PageTransHuge(page)) { struct page *thp; - thp = alloc_hugepage_vma(GFP_TRANSHUGE, vma, address, - HPAGE_PMD_ORDER); + thp = alloc_pages_vma(GFP_TRANSHUGE, HPAGE_PMD_ORDER, vma, + address, numa_node_id()); if (!thp) return NULL; prep_transhuge_page(thp); @@ -1157,9 +1195,10 @@ static struct page *new_page(struct page *page, unsigned long start) } #else -static void migrate_page_add(struct page *page, struct list_head *pagelist, +static int migrate_page_add(struct page *page, struct list_head *pagelist, unsigned long flags) { + return -EIO; } int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from, @@ -1182,6 +1221,7 @@ static long do_mbind(unsigned long start, unsigned long len, struct mempolicy *new; unsigned long end; int err; + int ret; LIST_HEAD(pagelist); if (flags & ~(unsigned long)MPOL_MF_VALID) @@ -1243,10 +1283,15 @@ static long do_mbind(unsigned long start, unsigned long len, if (err) goto mpol_out; - err = queue_pages_range(mm, start, end, nmask, + ret = queue_pages_range(mm, start, end, nmask, flags | MPOL_MF_INVERT, &pagelist); - if (!err) - err = mbind_range(mm, start, end, new); + + if (ret < 0) { + err = -EIO; + goto up_out; + } + + err = mbind_range(mm, start, end, new); if (!err) { int nr_failed = 0; @@ -1259,13 +1304,14 @@ static long do_mbind(unsigned long start, unsigned long len, putback_movable_pages(&pagelist); } - if (nr_failed && (flags & MPOL_MF_STRICT)) + if ((ret > 0) || (nr_failed && (flags & MPOL_MF_STRICT))) err = -EIO; } else putback_movable_pages(&pagelist); +up_out: up_write(&mm->mmap_sem); - mpol_out: +mpol_out: mpol_put(new); return err; } @@ -1688,7 +1734,7 @@ struct mempolicy *__get_vma_policy(struct vm_area_struct *vma, * freeing by another task. It is the caller's responsibility to free the * extra reference for shared policies. */ -static struct mempolicy *get_vma_policy(struct vm_area_struct *vma, +struct mempolicy *get_vma_policy(struct vm_area_struct *vma, unsigned long addr) { struct mempolicy *pol = __get_vma_policy(vma, addr); @@ -2037,7 +2083,6 @@ static struct page *alloc_page_interleave(gfp_t gfp, unsigned order, * @vma: Pointer to VMA or NULL if not available. * @addr: Virtual Address of the allocation. Must be inside the VMA. * @node: Which node to prefer for allocation (modulo policy). - * @hugepage: for hugepages try only the preferred node if possible * * This function allocates a page from the kernel page pool and applies * a NUMA policy associated with the VMA or the current process. @@ -2048,7 +2093,7 @@ static struct page *alloc_page_interleave(gfp_t gfp, unsigned order, */ struct page * alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma, - unsigned long addr, int node, bool hugepage) + unsigned long addr, int node) { struct mempolicy *pol; struct page *page; @@ -2066,31 +2111,6 @@ alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma, goto out; } - if (unlikely(IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && hugepage)) { - int hpage_node = node; - - /* - * For hugepage allocation and non-interleave policy which - * allows the current node (or other explicitly preferred - * node) we only try to allocate from the current/preferred - * node and don't fall back to other nodes, as the cost of - * remote accesses would likely offset THP benefits. - * - * If the policy is interleave, or does not allow the current - * node in its nodemask, we allocate the standard way. - */ - if (pol->mode == MPOL_PREFERRED && !(pol->flags & MPOL_F_LOCAL)) - hpage_node = pol->v.preferred_node; - - nmask = policy_nodemask(gfp, pol); - if (!nmask || node_isset(hpage_node, *nmask)) { - mpol_cond_put(pol); - page = __alloc_pages_node(hpage_node, - gfp | __GFP_THISNODE, order); - goto out; - } - } - nmask = policy_nodemask(gfp, pol); preferred_nid = policy_node(gfp, pol, node); page = __alloc_pages_nodemask(gfp, order, preferred_nid, nmask); diff --git a/mm/memremap.c b/mm/memremap.c index 6ee03a816d67..ed70c4e8e52a 100644 --- a/mm/memremap.c +++ b/mm/memremap.c @@ -91,6 +91,12 @@ static void dev_pagemap_cleanup(struct dev_pagemap *pgmap) wait_for_completion(&pgmap->done); percpu_ref_exit(pgmap->ref); } + /* + * Undo the pgmap ref assignment for the internal case as the + * caller may re-enable the same pgmap. + */ + if (pgmap->ref == &pgmap->internal_ref) + pgmap->ref = NULL; } static void devm_memremap_pages_release(void *data) @@ -397,6 +403,30 @@ void __put_devmap_managed_page(struct page *page) mem_cgroup_uncharge(page); + /* + * When a device_private page is freed, the page->mapping field + * may still contain a (stale) mapping value. For example, the + * lower bits of page->mapping may still identify the page as + * an anonymous page. Ultimately, this entire field is just + * stale and wrong, and it will cause errors if not cleared. + * One example is: + * + * migrate_vma_pages() + * migrate_vma_insert_page() + * page_add_new_anon_rmap() + * __page_set_anon_rmap() + * ...checks page->mapping, via PageAnon(page) call, + * and incorrectly concludes that the page is an + * anonymous page. Therefore, it incorrectly, + * silently fails to set up the new anon rmap. + * + * For other types of ZONE_DEVICE pages, migration is either + * handled differently or not done at all, so there is no need + * to clear page->mapping. + */ + if (is_device_private_page(page)) + page->mapping = NULL; + page->pgmap->ops->page_free(page); } else if (!count) __put_page(page); diff --git a/mm/rmap.c b/mm/rmap.c index e5dfe2ae6b0d..003377e24232 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -1475,7 +1475,15 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma, /* * No need to invalidate here it will synchronize on * against the special swap migration pte. + * + * The assignment to subpage above was computed from a + * swap PTE which results in an invalid pointer. + * Since only PAGE_SIZE pages can currently be + * migrated, just set it to page. This will need to be + * changed when hugepage migrations to device private + * memory are supported. */ + subpage = page; goto discard; } diff --git a/mm/shmem.c b/mm/shmem.c index 626d8c74b973..2bed4761f279 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -1466,7 +1466,7 @@ static struct page *shmem_alloc_hugepage(gfp_t gfp, shmem_pseudo_vma_init(&pvma, info, hindex); page = alloc_pages_vma(gfp | __GFP_COMP | __GFP_NORETRY | __GFP_NOWARN, - HPAGE_PMD_ORDER, &pvma, 0, numa_node_id(), true); + HPAGE_PMD_ORDER, &pvma, 0, numa_node_id()); shmem_pseudo_vma_destroy(&pvma); if (page) prep_transhuge_page(page); diff --git a/mm/usercopy.c b/mm/usercopy.c index 2a09796edef8..98e924864554 100644 --- a/mm/usercopy.c +++ b/mm/usercopy.c @@ -147,7 +147,7 @@ static inline void check_bogus_address(const unsigned long ptr, unsigned long n, bool to_user) { /* Reject if object wraps past end of memory. */ - if (ptr + n < ptr) + if (ptr + (n - 1) < ptr) usercopy_abort("wrapped address", NULL, to_user, 0, ptr + n); /* Reject if NULL or ZERO-allocation. */ diff --git a/mm/vmalloc.c b/mm/vmalloc.c index e0fc963acc41..7ba11e12a11f 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -3279,9 +3279,19 @@ retry: goto overflow; /* + * If required width exeeds current VA block, move + * base downwards and then recheck. + */ + if (base + end > va->va_end) { + base = pvm_determine_end_from_reverse(&va, align) - end; + term_area = area; + continue; + } + + /* * If this VA does not fit, move base downwards and recheck. */ - if (base + start < va->va_start || base + end > va->va_end) { + if (base + start < va->va_start) { va = node_to_va(rb_prev(&va->rb_node)); base = pvm_determine_end_from_reverse(&va, align) - end; term_area = area; diff --git a/mm/vmscan.c b/mm/vmscan.c index dbdc46a84f63..c77d1e3761a7 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -88,9 +88,6 @@ struct scan_control { /* Can pages be swapped as part of reclaim? */ unsigned int may_swap:1; - /* e.g. boosted watermark reclaim leaves slabs alone */ - unsigned int may_shrinkslab:1; - /* * Cgroups are not reclaimed below their configured memory.low, * unless we threaten to OOM. If any cgroups are skipped due to @@ -2714,10 +2711,8 @@ static bool shrink_node(pg_data_t *pgdat, struct scan_control *sc) shrink_node_memcg(pgdat, memcg, sc, &lru_pages); node_lru_pages += lru_pages; - if (sc->may_shrinkslab) { - shrink_slab(sc->gfp_mask, pgdat->node_id, - memcg, sc->priority); - } + shrink_slab(sc->gfp_mask, pgdat->node_id, memcg, + sc->priority); /* Record the group's reclaim efficiency */ vmpressure(sc->gfp_mask, memcg, false, @@ -3194,7 +3189,6 @@ unsigned long try_to_free_pages(struct zonelist *zonelist, int order, .may_writepage = !laptop_mode, .may_unmap = 1, .may_swap = 1, - .may_shrinkslab = 1, }; /* @@ -3238,7 +3232,6 @@ unsigned long mem_cgroup_shrink_node(struct mem_cgroup *memcg, .may_unmap = 1, .reclaim_idx = MAX_NR_ZONES - 1, .may_swap = !noswap, - .may_shrinkslab = 1, }; unsigned long lru_pages; @@ -3286,7 +3279,6 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg, .may_writepage = !laptop_mode, .may_unmap = 1, .may_swap = may_swap, - .may_shrinkslab = 1, }; set_task_reclaim_state(current, &sc.reclaim_state); @@ -3598,7 +3590,6 @@ restart: */ sc.may_writepage = !laptop_mode && !nr_boost_reclaim; sc.may_swap = !nr_boost_reclaim; - sc.may_shrinkslab = !nr_boost_reclaim; /* * Do some background aging of the anon list, to give diff --git a/mm/workingset.c b/mm/workingset.c index e0b4edcb88c8..c963831d354f 100644 --- a/mm/workingset.c +++ b/mm/workingset.c @@ -380,14 +380,12 @@ void workingset_update_node(struct xa_node *node) if (node->count && node->count == node->nr_values) { if (list_empty(&node->private_list)) { list_lru_add(&shadow_nodes, &node->private_list); - __inc_lruvec_page_state(virt_to_page(node), - WORKINGSET_NODES); + __inc_lruvec_slab_state(node, WORKINGSET_NODES); } } else { if (!list_empty(&node->private_list)) { list_lru_del(&shadow_nodes, &node->private_list); - __dec_lruvec_page_state(virt_to_page(node), - WORKINGSET_NODES); + __dec_lruvec_slab_state(node, WORKINGSET_NODES); } } } @@ -480,7 +478,7 @@ static enum lru_status shadow_lru_isolate(struct list_head *item, } list_lru_isolate(lru, item); - __dec_lruvec_page_state(virt_to_page(node), WORKINGSET_NODES); + __dec_lruvec_slab_state(node, WORKINGSET_NODES); spin_unlock(lru_lock); @@ -503,7 +501,7 @@ static enum lru_status shadow_lru_isolate(struct list_head *item, * shadow entries we were tracking ... */ xas_store(&xas, NULL); - __inc_lruvec_page_state(virt_to_page(node), WORKINGSET_NODERECLAIM); + __inc_lruvec_slab_state(node, WORKINGSET_NODERECLAIM); out_invalid: xa_unlock_irq(&mapping->i_pages); diff --git a/mm/z3fold.c b/mm/z3fold.c index 1a029a7432ee..ed19d98c9dcd 100644 --- a/mm/z3fold.c +++ b/mm/z3fold.c @@ -817,9 +817,19 @@ out: static void z3fold_destroy_pool(struct z3fold_pool *pool) { kmem_cache_destroy(pool->c_handle); - z3fold_unregister_migration(pool); - destroy_workqueue(pool->release_wq); + + /* + * We need to destroy pool->compact_wq before pool->release_wq, + * as any pending work on pool->compact_wq will call + * queue_work(pool->release_wq, &pool->work). + * + * There are still outstanding pages until both workqueues are drained, + * so we cannot unregister migration until then. + */ + destroy_workqueue(pool->compact_wq); + destroy_workqueue(pool->release_wq); + z3fold_unregister_migration(pool); kfree(pool); } diff --git a/net/batman-adv/multicast.c b/net/batman-adv/multicast.c index 67d7f83009ae..1d5bdf3a4b65 100644 --- a/net/batman-adv/multicast.c +++ b/net/batman-adv/multicast.c @@ -2303,7 +2303,7 @@ __batadv_mcast_flags_dump(struct sk_buff *msg, u32 portid, while (bucket_tmp < hash->size) { if (batadv_mcast_flags_dump_bucket(msg, portid, cb, hash, - *bucket, &idx_tmp)) + bucket_tmp, &idx_tmp)) break; bucket_tmp++; @@ -2420,8 +2420,10 @@ void batadv_mcast_purge_orig(struct batadv_orig_node *orig) batadv_mcast_want_unsnoop_update(bat_priv, orig, BATADV_NO_FLAGS); batadv_mcast_want_ipv4_update(bat_priv, orig, BATADV_NO_FLAGS); batadv_mcast_want_ipv6_update(bat_priv, orig, BATADV_NO_FLAGS); - batadv_mcast_want_rtr4_update(bat_priv, orig, BATADV_NO_FLAGS); - batadv_mcast_want_rtr6_update(bat_priv, orig, BATADV_NO_FLAGS); + batadv_mcast_want_rtr4_update(bat_priv, orig, + BATADV_MCAST_WANT_NO_RTR4); + batadv_mcast_want_rtr6_update(bat_priv, orig, + BATADV_MCAST_WANT_NO_RTR6); spin_unlock_bh(&orig->mcast_handler_lock); } diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c index b9585e7d9d2e..04bc79359a17 100644 --- a/net/bluetooth/hci_core.c +++ b/net/bluetooth/hci_core.c @@ -3202,6 +3202,7 @@ struct hci_dev *hci_alloc_dev(void) hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE; hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE; hdev->auth_payload_timeout = DEFAULT_AUTH_PAYLOAD_TIMEOUT; + hdev->min_enc_key_size = HCI_MIN_ENC_KEY_SIZE; mutex_init(&hdev->lock); mutex_init(&hdev->req_lock); diff --git a/net/bluetooth/hci_debugfs.c b/net/bluetooth/hci_debugfs.c index bb67f4a5479a..402e2cc54044 100644 --- a/net/bluetooth/hci_debugfs.c +++ b/net/bluetooth/hci_debugfs.c @@ -433,6 +433,35 @@ static int auto_accept_delay_set(void *data, u64 val) return 0; } +static int min_encrypt_key_size_set(void *data, u64 val) +{ + struct hci_dev *hdev = data; + + if (val < 1 || val > 16) + return -EINVAL; + + hci_dev_lock(hdev); + hdev->min_enc_key_size = val; + hci_dev_unlock(hdev); + + return 0; +} + +static int min_encrypt_key_size_get(void *data, u64 *val) +{ + struct hci_dev *hdev = data; + + hci_dev_lock(hdev); + *val = hdev->min_enc_key_size; + hci_dev_unlock(hdev); + + return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(min_encrypt_key_size_fops, + min_encrypt_key_size_get, + min_encrypt_key_size_set, "%llu\n"); + static int auto_accept_delay_get(void *data, u64 *val) { struct hci_dev *hdev = data; @@ -545,6 +574,8 @@ void hci_debugfs_create_bredr(struct hci_dev *hdev) if (lmp_ssp_capable(hdev)) { debugfs_create_file("ssp_debug_mode", 0444, hdev->debugfs, hdev, &ssp_debug_mode_fops); + debugfs_create_file("min_encrypt_key_size", 0644, hdev->debugfs, + hdev, &min_encrypt_key_size_fops); debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs, hdev, &auto_accept_delay_fops); } diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c index 5abd423b55fa..8d889969ae7e 100644 --- a/net/bluetooth/hidp/core.c +++ b/net/bluetooth/hidp/core.c @@ -101,6 +101,7 @@ static int hidp_send_message(struct hidp_session *session, struct socket *sock, { struct sk_buff *skb; struct sock *sk = sock->sk; + int ret; BT_DBG("session %p data %p size %d", session, data, size); @@ -114,13 +115,17 @@ static int hidp_send_message(struct hidp_session *session, struct socket *sock, } skb_put_u8(skb, hdr); - if (data && size > 0) + if (data && size > 0) { skb_put_data(skb, data, size); + ret = size; + } else { + ret = 0; + } skb_queue_tail(transmit, skb); wake_up_interruptible(sk_sleep(sk)); - return 0; + return ret; } static int hidp_send_ctrl_message(struct hidp_session *session, diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c index cc506fe99b4d..dfc1edb168b7 100644 --- a/net/bluetooth/l2cap_core.c +++ b/net/bluetooth/l2cap_core.c @@ -1361,7 +1361,7 @@ static bool l2cap_check_enc_key_size(struct hci_conn *hcon) * actually encrypted before enforcing a key size. */ return (!test_bit(HCI_CONN_ENCRYPT, &hcon->flags) || - hcon->enc_key_size >= HCI_MIN_ENC_KEY_SIZE); + hcon->enc_key_size >= hcon->hdev->min_enc_key_size); } static void l2cap_do_start(struct l2cap_chan *chan) diff --git a/net/core/sock.c b/net/core/sock.c index d57b0cc995a0..6d08553f885c 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -1992,6 +1992,19 @@ void skb_set_owner_w(struct sk_buff *skb, struct sock *sk) } EXPORT_SYMBOL(skb_set_owner_w); +static bool can_skb_orphan_partial(const struct sk_buff *skb) +{ +#ifdef CONFIG_TLS_DEVICE + /* Drivers depend on in-order delivery for crypto offload, + * partial orphan breaks out-of-order-OK logic. + */ + if (skb->decrypted) + return false; +#endif + return (skb->destructor == sock_wfree || + (IS_ENABLED(CONFIG_INET) && skb->destructor == tcp_wfree)); +} + /* This helper is used by netem, as it can hold packets in its * delay queue. We want to allow the owner socket to send more * packets, as if they were already TX completed by a typical driver. @@ -2003,11 +2016,7 @@ void skb_orphan_partial(struct sk_buff *skb) if (skb_is_tcp_pure_ack(skb)) return; - if (skb->destructor == sock_wfree -#ifdef CONFIG_INET - || skb->destructor == tcp_wfree -#endif - ) { + if (can_skb_orphan_partial(skb)) { struct sock *sk = skb->sk; if (refcount_inc_not_zero(&sk->sk_refcnt)) { diff --git a/net/core/sock_diag.c b/net/core/sock_diag.c index 3312a5849a97..c13ffbd33d8d 100644 --- a/net/core/sock_diag.c +++ b/net/core/sock_diag.c @@ -19,6 +19,7 @@ static const struct sock_diag_handler *sock_diag_handlers[AF_MAX]; static int (*inet_rcv_compat)(struct sk_buff *skb, struct nlmsghdr *nlh); static DEFINE_MUTEX(sock_diag_table_mutex); static struct workqueue_struct *broadcast_wq; +static atomic64_t cookie_gen; u64 sock_gen_cookie(struct sock *sk) { @@ -27,7 +28,7 @@ u64 sock_gen_cookie(struct sock *sk) if (res) return res; - res = atomic64_inc_return(&sock_net(sk)->cookie_gen); + res = atomic64_inc_return(&cookie_gen); atomic64_cmpxchg(&sk->sk_cookie, 0, res); } } diff --git a/net/dsa/switch.c b/net/dsa/switch.c index 4ec5b7f85d51..09d9286b27cc 100644 --- a/net/dsa/switch.c +++ b/net/dsa/switch.c @@ -153,6 +153,9 @@ static void dsa_switch_mdb_add_bitmap(struct dsa_switch *ds, { int port; + if (!ds->ops->port_mdb_add) + return; + for_each_set_bit(port, bitmap, ds->num_ports) ds->ops->port_mdb_add(ds, port, mdb); } diff --git a/net/ieee802154/6lowpan/reassembly.c b/net/ieee802154/6lowpan/reassembly.c index e4aba5d485be..bbe9b3b2d395 100644 --- a/net/ieee802154/6lowpan/reassembly.c +++ b/net/ieee802154/6lowpan/reassembly.c @@ -170,7 +170,7 @@ static int lowpan_frag_reasm(struct lowpan_frag_queue *fq, struct sk_buff *skb, reasm_data = inet_frag_reasm_prepare(&fq->q, skb, prev_tail); if (!reasm_data) goto out_oom; - inet_frag_reasm_finish(&fq->q, skb, reasm_data); + inet_frag_reasm_finish(&fq->q, skb, reasm_data, false); skb->dev = ldev; skb->tstamp = fq->q.stamp; diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c index a999451345f9..10d31733297d 100644 --- a/net/ipv4/inet_fragment.c +++ b/net/ipv4/inet_fragment.c @@ -475,11 +475,12 @@ void *inet_frag_reasm_prepare(struct inet_frag_queue *q, struct sk_buff *skb, EXPORT_SYMBOL(inet_frag_reasm_prepare); void inet_frag_reasm_finish(struct inet_frag_queue *q, struct sk_buff *head, - void *reasm_data) + void *reasm_data, bool try_coalesce) { struct sk_buff **nextp = (struct sk_buff **)reasm_data; struct rb_node *rbn; struct sk_buff *fp; + int sum_truesize; skb_push(head, head->data - skb_network_header(head)); @@ -487,25 +488,41 @@ void inet_frag_reasm_finish(struct inet_frag_queue *q, struct sk_buff *head, fp = FRAG_CB(head)->next_frag; rbn = rb_next(&head->rbnode); rb_erase(&head->rbnode, &q->rb_fragments); + + sum_truesize = head->truesize; while (rbn || fp) { /* fp points to the next sk_buff in the current run; * rbn points to the next run. */ /* Go through the current run. */ while (fp) { - *nextp = fp; - nextp = &fp->next; - fp->prev = NULL; - memset(&fp->rbnode, 0, sizeof(fp->rbnode)); - fp->sk = NULL; - head->data_len += fp->len; - head->len += fp->len; + struct sk_buff *next_frag = FRAG_CB(fp)->next_frag; + bool stolen; + int delta; + + sum_truesize += fp->truesize; if (head->ip_summed != fp->ip_summed) head->ip_summed = CHECKSUM_NONE; else if (head->ip_summed == CHECKSUM_COMPLETE) head->csum = csum_add(head->csum, fp->csum); - head->truesize += fp->truesize; - fp = FRAG_CB(fp)->next_frag; + + if (try_coalesce && skb_try_coalesce(head, fp, &stolen, + &delta)) { + kfree_skb_partial(fp, stolen); + } else { + fp->prev = NULL; + memset(&fp->rbnode, 0, sizeof(fp->rbnode)); + fp->sk = NULL; + + head->data_len += fp->len; + head->len += fp->len; + head->truesize += fp->truesize; + + *nextp = fp; + nextp = &fp->next; + } + + fp = next_frag; } /* Move to the next run. */ if (rbn) { @@ -516,7 +533,7 @@ void inet_frag_reasm_finish(struct inet_frag_queue *q, struct sk_buff *head, rbn = rbnext; } } - sub_frag_mem_limit(q->fqdir, head->truesize); + sub_frag_mem_limit(q->fqdir, sum_truesize); *nextp = NULL; skb_mark_not_on_list(head); diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c index 4385eb9e781f..cfeb8890f94e 100644 --- a/net/ipv4/ip_fragment.c +++ b/net/ipv4/ip_fragment.c @@ -393,6 +393,11 @@ err: return err; } +static bool ip_frag_coalesce_ok(const struct ipq *qp) +{ + return qp->q.key.v4.user == IP_DEFRAG_LOCAL_DELIVER; +} + /* Build a new IP datagram from all its fragments. */ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *skb, struct sk_buff *prev_tail, struct net_device *dev) @@ -421,7 +426,8 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *skb, if (len > 65535) goto out_oversize; - inet_frag_reasm_finish(&qp->q, skb, reasm_data); + inet_frag_reasm_finish(&qp->q, skb, reasm_data, + ip_frag_coalesce_ok(qp)); skb->dev = dev; IPCB(skb)->frag_max_size = max(qp->max_df_size, qp->q.max_size); diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index f8fa1686f7f3..051ef10374f6 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -984,6 +984,9 @@ new_segment: if (!skb) goto wait_for_memory; +#ifdef CONFIG_TLS_DEVICE + skb->decrypted = !!(flags & MSG_SENDPAGE_DECRYPTED); +#endif skb_entail(sk, skb); copy = size_goal; } diff --git a/net/ipv4/tcp_bpf.c b/net/ipv4/tcp_bpf.c index 3d1e15401384..8a56e09cfb0e 100644 --- a/net/ipv4/tcp_bpf.c +++ b/net/ipv4/tcp_bpf.c @@ -398,10 +398,14 @@ more_data: static int tcp_bpf_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) { struct sk_msg tmp, *msg_tx = NULL; - int flags = msg->msg_flags | MSG_NO_SHARED_FRAGS; int copied = 0, err = 0; struct sk_psock *psock; long timeo; + int flags; + + /* Don't let internal do_tcp_sendpages() flags through */ + flags = (msg->msg_flags & ~MSG_SENDPAGE_DECRYPTED); + flags |= MSG_NO_SHARED_FRAGS; psock = sk_psock_get(sk); if (unlikely(!psock)) diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index e6d02e05bb1c..5c46bc4c7e8d 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -1320,6 +1320,7 @@ int tcp_fragment(struct sock *sk, enum tcp_queue tcp_queue, buff = sk_stream_alloc_skb(sk, nsize, gfp, true); if (!buff) return -ENOMEM; /* We'll just try again later. */ + skb_copy_decrypted(buff, skb); sk->sk_wmem_queued += buff->truesize; sk_mem_charge(sk, buff->truesize); @@ -1874,6 +1875,7 @@ static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len, buff = sk_stream_alloc_skb(sk, 0, gfp, true); if (unlikely(!buff)) return -ENOMEM; + skb_copy_decrypted(buff, skb); sk->sk_wmem_queued += buff->truesize; sk_mem_charge(sk, buff->truesize); @@ -2143,6 +2145,7 @@ static int tcp_mtu_probe(struct sock *sk) sk_mem_charge(sk, nskb->truesize); skb = tcp_send_head(sk); + skb_copy_decrypted(nskb, skb); TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(skb)->seq; TCP_SKB_CB(nskb)->end_seq = TCP_SKB_CB(skb)->seq + probe_size; diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c index 0f82c150543b..fed9666a2f7d 100644 --- a/net/ipv6/netfilter/nf_conntrack_reasm.c +++ b/net/ipv6/netfilter/nf_conntrack_reasm.c @@ -348,7 +348,7 @@ static int nf_ct_frag6_reasm(struct frag_queue *fq, struct sk_buff *skb, skb_reset_transport_header(skb); - inet_frag_reasm_finish(&fq->q, skb, reasm_data); + inet_frag_reasm_finish(&fq->q, skb, reasm_data, false); skb->ignore_df = 1; skb->dev = dev; diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c index ca05b16f1bb9..1f5d4d196dcc 100644 --- a/net/ipv6/reassembly.c +++ b/net/ipv6/reassembly.c @@ -282,7 +282,7 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *skb, skb_reset_transport_header(skb); - inet_frag_reasm_finish(&fq->q, skb, reasm_data); + inet_frag_reasm_finish(&fq->q, skb, reasm_data, true); skb->dev = dev; ipv6_hdr(skb)->payload_len = htons(payload_len); diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index a542761e90d1..81a8ef42b88d 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c @@ -453,13 +453,12 @@ EXPORT_SYMBOL_GPL(nf_ct_invert_tuple); * table location, we assume id gets exposed to userspace. * * Following nf_conn items do not change throughout lifetime - * of the nf_conn after it has been committed to main hash table: + * of the nf_conn: * * 1. nf_conn address - * 2. nf_conn->ext address - * 3. nf_conn->master address (normally NULL) - * 4. tuple - * 5. the associated net namespace + * 2. nf_conn->master address (normally NULL) + * 3. the associated net namespace + * 4. the original direction tuple */ u32 nf_ct_get_id(const struct nf_conn *ct) { @@ -469,9 +468,10 @@ u32 nf_ct_get_id(const struct nf_conn *ct) net_get_random_once(&ct_id_seed, sizeof(ct_id_seed)); a = (unsigned long)ct; - b = (unsigned long)ct->master ^ net_hash_mix(nf_ct_net(ct)); - c = (unsigned long)ct->ext; - d = (unsigned long)siphash(&ct->tuplehash, sizeof(ct->tuplehash), + b = (unsigned long)ct->master; + c = (unsigned long)nf_ct_net(ct); + d = (unsigned long)siphash(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple, + sizeof(ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple), &ct_id_seed); #ifdef CONFIG_64BIT return siphash_4u64((u64)a, (u64)b, (u64)c, (u64)d, &ct_id_seed); diff --git a/net/netfilter/nf_flow_table_core.c b/net/netfilter/nf_flow_table_core.c index e3d797252a98..80a8f9ae4c93 100644 --- a/net/netfilter/nf_flow_table_core.c +++ b/net/netfilter/nf_flow_table_core.c @@ -111,15 +111,16 @@ static void flow_offload_fixup_tcp(struct ip_ct_tcp *tcp) #define NF_FLOWTABLE_TCP_PICKUP_TIMEOUT (120 * HZ) #define NF_FLOWTABLE_UDP_PICKUP_TIMEOUT (30 * HZ) -static void flow_offload_fixup_ct_state(struct nf_conn *ct) +static inline __s32 nf_flow_timeout_delta(unsigned int timeout) +{ + return (__s32)(timeout - (u32)jiffies); +} + +static void flow_offload_fixup_ct_timeout(struct nf_conn *ct) { const struct nf_conntrack_l4proto *l4proto; + int l4num = nf_ct_protonum(ct); unsigned int timeout; - int l4num; - - l4num = nf_ct_protonum(ct); - if (l4num == IPPROTO_TCP) - flow_offload_fixup_tcp(&ct->proto.tcp); l4proto = nf_ct_l4proto_find(l4num); if (!l4proto) @@ -132,7 +133,20 @@ static void flow_offload_fixup_ct_state(struct nf_conn *ct) else return; - ct->timeout = nfct_time_stamp + timeout; + if (nf_flow_timeout_delta(ct->timeout) > (__s32)timeout) + ct->timeout = nfct_time_stamp + timeout; +} + +static void flow_offload_fixup_ct_state(struct nf_conn *ct) +{ + if (nf_ct_protonum(ct) == IPPROTO_TCP) + flow_offload_fixup_tcp(&ct->proto.tcp); +} + +static void flow_offload_fixup_ct(struct nf_conn *ct) +{ + flow_offload_fixup_ct_state(ct); + flow_offload_fixup_ct_timeout(ct); } void flow_offload_free(struct flow_offload *flow) @@ -208,6 +222,11 @@ int flow_offload_add(struct nf_flowtable *flow_table, struct flow_offload *flow) } EXPORT_SYMBOL_GPL(flow_offload_add); +static inline bool nf_flow_has_expired(const struct flow_offload *flow) +{ + return nf_flow_timeout_delta(flow->timeout) <= 0; +} + static void flow_offload_del(struct nf_flowtable *flow_table, struct flow_offload *flow) { @@ -223,6 +242,11 @@ static void flow_offload_del(struct nf_flowtable *flow_table, e = container_of(flow, struct flow_offload_entry, flow); clear_bit(IPS_OFFLOAD_BIT, &e->ct->status); + if (nf_flow_has_expired(flow)) + flow_offload_fixup_ct(e->ct); + else if (flow->flags & FLOW_OFFLOAD_TEARDOWN) + flow_offload_fixup_ct_timeout(e->ct); + flow_offload_free(flow); } @@ -298,11 +322,6 @@ nf_flow_table_iterate(struct nf_flowtable *flow_table, return err; } -static inline bool nf_flow_has_expired(const struct flow_offload *flow) -{ - return (__s32)(flow->timeout - (u32)jiffies) <= 0; -} - static void nf_flow_offload_gc_step(struct flow_offload *flow, void *data) { struct nf_flowtable *flow_table = data; diff --git a/net/netfilter/nf_flow_table_ip.c b/net/netfilter/nf_flow_table_ip.c index cdfc33517e85..d68c801dd614 100644 --- a/net/netfilter/nf_flow_table_ip.c +++ b/net/netfilter/nf_flow_table_ip.c @@ -214,6 +214,25 @@ static bool nf_flow_exceeds_mtu(const struct sk_buff *skb, unsigned int mtu) return true; } +static int nf_flow_offload_dst_check(struct dst_entry *dst) +{ + if (unlikely(dst_xfrm(dst))) + return dst_check(dst, 0) ? 0 : -1; + + return 0; +} + +static unsigned int nf_flow_xmit_xfrm(struct sk_buff *skb, + const struct nf_hook_state *state, + struct dst_entry *dst) +{ + skb_orphan(skb); + skb_dst_set_noref(skb, dst); + skb->tstamp = 0; + dst_output(state->net, state->sk, skb); + return NF_STOLEN; +} + unsigned int nf_flow_offload_ip_hook(void *priv, struct sk_buff *skb, const struct nf_hook_state *state) @@ -254,6 +273,11 @@ nf_flow_offload_ip_hook(void *priv, struct sk_buff *skb, if (nf_flow_state_check(flow, ip_hdr(skb)->protocol, skb, thoff)) return NF_ACCEPT; + if (nf_flow_offload_dst_check(&rt->dst)) { + flow_offload_teardown(flow); + return NF_ACCEPT; + } + if (nf_flow_nat_ip(flow, skb, thoff, dir) < 0) return NF_DROP; @@ -261,6 +285,13 @@ nf_flow_offload_ip_hook(void *priv, struct sk_buff *skb, iph = ip_hdr(skb); ip_decrease_ttl(iph); + if (unlikely(dst_xfrm(&rt->dst))) { + memset(skb->cb, 0, sizeof(struct inet_skb_parm)); + IPCB(skb)->iif = skb->dev->ifindex; + IPCB(skb)->flags = IPSKB_FORWARDED; + return nf_flow_xmit_xfrm(skb, state, &rt->dst); + } + skb->dev = outdev; nexthop = rt_nexthop(rt, flow->tuplehash[!dir].tuple.src_v4.s_addr); skb_dst_set_noref(skb, &rt->dst); @@ -467,6 +498,11 @@ nf_flow_offload_ipv6_hook(void *priv, struct sk_buff *skb, sizeof(*ip6h))) return NF_ACCEPT; + if (nf_flow_offload_dst_check(&rt->dst)) { + flow_offload_teardown(flow); + return NF_ACCEPT; + } + if (skb_try_make_writable(skb, sizeof(*ip6h))) return NF_DROP; @@ -477,6 +513,13 @@ nf_flow_offload_ipv6_hook(void *priv, struct sk_buff *skb, ip6h = ipv6_hdr(skb); ip6h->hop_limit--; + if (unlikely(dst_xfrm(&rt->dst))) { + memset(skb->cb, 0, sizeof(struct inet6_skb_parm)); + IP6CB(skb)->iif = skb->dev->ifindex; + IP6CB(skb)->flags = IP6SKB_FORWARDED; + return nf_flow_xmit_xfrm(skb, state, &rt->dst); + } + skb->dev = outdev; nexthop = rt6_nexthop(rt, &flow->tuplehash[!dir].tuple.src_v6); skb_dst_set_noref(skb, &rt->dst); diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index fe3b7b0c6c66..6d00bef023c4 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -138,9 +138,14 @@ static void nft_set_trans_bind(const struct nft_ctx *ctx, struct nft_set *set) return; list_for_each_entry_reverse(trans, &net->nft.commit_list, list) { - if (trans->msg_type == NFT_MSG_NEWSET && - nft_trans_set(trans) == set) { - set->bound = true; + switch (trans->msg_type) { + case NFT_MSG_NEWSET: + if (nft_trans_set(trans) == set) + nft_trans_set_bound(trans) = true; + break; + case NFT_MSG_NEWSETELEM: + if (nft_trans_elem_set(trans) == set) + nft_trans_elem_set_bound(trans) = true; break; } } @@ -1662,6 +1667,10 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask, chain->flags |= NFT_BASE_CHAIN | flags; basechain->policy = NF_ACCEPT; + if (chain->flags & NFT_CHAIN_HW_OFFLOAD && + nft_chain_offload_priority(basechain) < 0) + return -EOPNOTSUPP; + flow_block_init(&basechain->flow_block); } else { chain = kzalloc(sizeof(*chain), GFP_KERNEL); @@ -6906,7 +6915,7 @@ static int __nf_tables_abort(struct net *net) break; case NFT_MSG_NEWSET: trans->ctx.table->use--; - if (nft_trans_set(trans)->bound) { + if (nft_trans_set_bound(trans)) { nft_trans_destroy(trans); break; } @@ -6918,7 +6927,7 @@ static int __nf_tables_abort(struct net *net) nft_trans_destroy(trans); break; case NFT_MSG_NEWSETELEM: - if (nft_trans_elem_set(trans)->bound) { + if (nft_trans_elem_set_bound(trans)) { nft_trans_destroy(trans); break; } diff --git a/net/netfilter/nf_tables_offload.c b/net/netfilter/nf_tables_offload.c index d3c4c9c88bc8..3c2725ade61b 100644 --- a/net/netfilter/nf_tables_offload.c +++ b/net/netfilter/nf_tables_offload.c @@ -103,10 +103,11 @@ void nft_offload_update_dependency(struct nft_offload_ctx *ctx, } static void nft_flow_offload_common_init(struct flow_cls_common_offload *common, - __be16 proto, - struct netlink_ext_ack *extack) + __be16 proto, int priority, + struct netlink_ext_ack *extack) { common->protocol = proto; + common->prio = priority; common->extack = extack; } @@ -124,6 +125,15 @@ static int nft_setup_cb_call(struct nft_base_chain *basechain, return 0; } +int nft_chain_offload_priority(struct nft_base_chain *basechain) +{ + if (basechain->ops.priority <= 0 || + basechain->ops.priority > USHRT_MAX) + return -1; + + return 0; +} + static int nft_flow_offload_rule(struct nft_trans *trans, enum flow_cls_command command) { @@ -142,7 +152,8 @@ static int nft_flow_offload_rule(struct nft_trans *trans, if (flow) proto = flow->proto; - nft_flow_offload_common_init(&cls_flow.common, proto, &extack); + nft_flow_offload_common_init(&cls_flow.common, proto, + basechain->ops.priority, &extack); cls_flow.command = command; cls_flow.cookie = (unsigned long) rule; if (flow) diff --git a/net/netfilter/nft_flow_offload.c b/net/netfilter/nft_flow_offload.c index aa5f571d4361..060a4ed46d5e 100644 --- a/net/netfilter/nft_flow_offload.c +++ b/net/netfilter/nft_flow_offload.c @@ -72,11 +72,11 @@ static void nft_flow_offload_eval(const struct nft_expr *expr, { struct nft_flow_offload *priv = nft_expr_priv(expr); struct nf_flowtable *flowtable = &priv->flowtable->data; + struct tcphdr _tcph, *tcph = NULL; enum ip_conntrack_info ctinfo; struct nf_flow_route route; struct flow_offload *flow; enum ip_conntrack_dir dir; - bool is_tcp = false; struct nf_conn *ct; int ret; @@ -89,7 +89,10 @@ static void nft_flow_offload_eval(const struct nft_expr *expr, switch (ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.protonum) { case IPPROTO_TCP: - is_tcp = true; + tcph = skb_header_pointer(pkt->skb, pkt->xt.thoff, + sizeof(_tcph), &_tcph); + if (unlikely(!tcph || tcph->fin || tcph->rst)) + goto out; break; case IPPROTO_UDP: break; @@ -115,7 +118,7 @@ static void nft_flow_offload_eval(const struct nft_expr *expr, if (!flow) goto err_flow_alloc; - if (is_tcp) { + if (tcph) { ct->proto.tcp.seen[0].flags |= IP_CT_TCP_FLAG_BE_LIBERAL; ct->proto.tcp.seen[1].flags |= IP_CT_TCP_FLAG_BE_LIBERAL; } diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index 8d54f3047768..e2742b006d25 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -2618,6 +2618,13 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg) mutex_lock(&po->pg_vec_lock); + /* packet_sendmsg() check on tx_ring.pg_vec was lockless, + * we need to confirm it under protection of pg_vec_lock. + */ + if (unlikely(!po->tx_ring.pg_vec)) { + err = -EBUSY; + goto out; + } if (likely(saddr == NULL)) { dev = packet_cached_dev_get(po); proto = po->num; diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c index d09eaf153544..0dbbfd1b6487 100644 --- a/net/rxrpc/af_rxrpc.c +++ b/net/rxrpc/af_rxrpc.c @@ -193,7 +193,7 @@ static int rxrpc_bind(struct socket *sock, struct sockaddr *saddr, int len) service_in_use: write_unlock(&local->services_lock); - rxrpc_put_local(local); + rxrpc_unuse_local(local); ret = -EADDRINUSE; error_unlock: release_sock(&rx->sk); @@ -402,7 +402,7 @@ EXPORT_SYMBOL(rxrpc_kernel_check_life); */ void rxrpc_kernel_probe_life(struct socket *sock, struct rxrpc_call *call) { - rxrpc_propose_ACK(call, RXRPC_ACK_PING, 0, 0, true, false, + rxrpc_propose_ACK(call, RXRPC_ACK_PING, 0, true, false, rxrpc_propose_ack_ping_for_check_life); rxrpc_send_ack_packet(call, true, NULL); } @@ -901,7 +901,7 @@ static int rxrpc_release_sock(struct sock *sk) rxrpc_queue_work(&rxnet->service_conn_reaper); rxrpc_queue_work(&rxnet->client_conn_reaper); - rxrpc_put_local(rx->local); + rxrpc_unuse_local(rx->local); rx->local = NULL; key_put(rx->key); rx->key = NULL; diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h index 63b26baa108a..fa5b030acaa8 100644 --- a/net/rxrpc/ar-internal.h +++ b/net/rxrpc/ar-internal.h @@ -257,7 +257,8 @@ struct rxrpc_security { */ struct rxrpc_local { struct rcu_head rcu; - atomic_t usage; + atomic_t active_users; /* Number of users of the local endpoint */ + atomic_t usage; /* Number of references to the structure */ struct rxrpc_net *rxnet; /* The network ns in which this resides */ struct list_head link; struct socket *socket; /* my UDP socket */ @@ -653,7 +654,6 @@ struct rxrpc_call { /* receive-phase ACK management */ u8 ackr_reason; /* reason to ACK */ - u16 ackr_skew; /* skew on packet being ACK'd */ rxrpc_serial_t ackr_serial; /* serial of packet being ACK'd */ rxrpc_serial_t ackr_first_seq; /* first sequence number received */ rxrpc_seq_t ackr_prev_seq; /* previous sequence number received */ @@ -747,7 +747,7 @@ int rxrpc_reject_call(struct rxrpc_sock *); /* * call_event.c */ -void rxrpc_propose_ACK(struct rxrpc_call *, u8, u16, u32, bool, bool, +void rxrpc_propose_ACK(struct rxrpc_call *, u8, u32, bool, bool, enum rxrpc_propose_ack_trace); void rxrpc_process_call(struct work_struct *); @@ -1006,6 +1006,8 @@ struct rxrpc_local *rxrpc_lookup_local(struct net *, const struct sockaddr_rxrpc struct rxrpc_local *rxrpc_get_local(struct rxrpc_local *); struct rxrpc_local *rxrpc_get_local_maybe(struct rxrpc_local *); void rxrpc_put_local(struct rxrpc_local *); +struct rxrpc_local *rxrpc_use_local(struct rxrpc_local *); +void rxrpc_unuse_local(struct rxrpc_local *); void rxrpc_queue_local(struct rxrpc_local *); void rxrpc_destroy_all_locals(struct rxrpc_net *); diff --git a/net/rxrpc/call_event.c b/net/rxrpc/call_event.c index bc2adeb3acb9..c767679bfa5d 100644 --- a/net/rxrpc/call_event.c +++ b/net/rxrpc/call_event.c @@ -43,8 +43,7 @@ static void rxrpc_propose_ping(struct rxrpc_call *call, * propose an ACK be sent */ static void __rxrpc_propose_ACK(struct rxrpc_call *call, u8 ack_reason, - u16 skew, u32 serial, bool immediate, - bool background, + u32 serial, bool immediate, bool background, enum rxrpc_propose_ack_trace why) { enum rxrpc_propose_ack_outcome outcome = rxrpc_propose_ack_use; @@ -69,14 +68,12 @@ static void __rxrpc_propose_ACK(struct rxrpc_call *call, u8 ack_reason, if (RXRPC_ACK_UPDATEABLE & (1 << ack_reason)) { outcome = rxrpc_propose_ack_update; call->ackr_serial = serial; - call->ackr_skew = skew; } if (!immediate) goto trace; } else if (prior > rxrpc_ack_priority[call->ackr_reason]) { call->ackr_reason = ack_reason; call->ackr_serial = serial; - call->ackr_skew = skew; } else { outcome = rxrpc_propose_ack_subsume; } @@ -137,11 +134,11 @@ trace: * propose an ACK be sent, locking the call structure */ void rxrpc_propose_ACK(struct rxrpc_call *call, u8 ack_reason, - u16 skew, u32 serial, bool immediate, bool background, + u32 serial, bool immediate, bool background, enum rxrpc_propose_ack_trace why) { spin_lock_bh(&call->lock); - __rxrpc_propose_ACK(call, ack_reason, skew, serial, + __rxrpc_propose_ACK(call, ack_reason, serial, immediate, background, why); spin_unlock_bh(&call->lock); } @@ -239,7 +236,7 @@ static void rxrpc_resend(struct rxrpc_call *call, unsigned long now_j) ack_ts = ktime_sub(now, call->acks_latest_ts); if (ktime_to_ns(ack_ts) < call->peer->rtt) goto out; - rxrpc_propose_ACK(call, RXRPC_ACK_PING, 0, 0, true, false, + rxrpc_propose_ACK(call, RXRPC_ACK_PING, 0, true, false, rxrpc_propose_ack_ping_for_lost_ack); rxrpc_send_ack_packet(call, true, NULL); goto out; @@ -372,7 +369,7 @@ recheck_state: if (time_after_eq(now, t)) { trace_rxrpc_timer(call, rxrpc_timer_exp_keepalive, now); cmpxchg(&call->keepalive_at, t, now + MAX_JIFFY_OFFSET); - rxrpc_propose_ACK(call, RXRPC_ACK_PING, 0, 0, true, true, + rxrpc_propose_ACK(call, RXRPC_ACK_PING, 0, true, true, rxrpc_propose_ack_ping_for_keepalive); set_bit(RXRPC_CALL_EV_PING, &call->events); } @@ -407,7 +404,7 @@ recheck_state: send_ack = NULL; if (test_and_clear_bit(RXRPC_CALL_EV_ACK_LOST, &call->events)) { call->acks_lost_top = call->tx_top; - rxrpc_propose_ACK(call, RXRPC_ACK_PING, 0, 0, true, false, + rxrpc_propose_ACK(call, RXRPC_ACK_PING, 0, true, false, rxrpc_propose_ack_ping_for_lost_ack); send_ack = &call->acks_lost_ping; } diff --git a/net/rxrpc/input.c b/net/rxrpc/input.c index 5bd6f1546e5c..dd47d465d1d3 100644 --- a/net/rxrpc/input.c +++ b/net/rxrpc/input.c @@ -196,15 +196,14 @@ send_extra_data: * Ping the other end to fill our RTT cache and to retrieve the rwind * and MTU parameters. */ -static void rxrpc_send_ping(struct rxrpc_call *call, struct sk_buff *skb, - int skew) +static void rxrpc_send_ping(struct rxrpc_call *call, struct sk_buff *skb) { struct rxrpc_skb_priv *sp = rxrpc_skb(skb); ktime_t now = skb->tstamp; if (call->peer->rtt_usage < 3 || ktime_before(ktime_add_ms(call->peer->rtt_last_req, 1000), now)) - rxrpc_propose_ACK(call, RXRPC_ACK_PING, skew, sp->hdr.serial, + rxrpc_propose_ACK(call, RXRPC_ACK_PING, sp->hdr.serial, true, true, rxrpc_propose_ack_ping_for_params); } @@ -419,8 +418,7 @@ static void rxrpc_input_dup_data(struct rxrpc_call *call, rxrpc_seq_t seq, /* * Process a DATA packet, adding the packet to the Rx ring. */ -static void rxrpc_input_data(struct rxrpc_call *call, struct sk_buff *skb, - u16 skew) +static void rxrpc_input_data(struct rxrpc_call *call, struct sk_buff *skb) { struct rxrpc_skb_priv *sp = rxrpc_skb(skb); enum rxrpc_call_state state; @@ -600,11 +598,11 @@ skip: ack: if (ack) - rxrpc_propose_ACK(call, ack, skew, ack_serial, + rxrpc_propose_ACK(call, ack, ack_serial, immediate_ack, true, rxrpc_propose_ack_input_data); else - rxrpc_propose_ACK(call, RXRPC_ACK_DELAY, skew, serial, + rxrpc_propose_ACK(call, RXRPC_ACK_DELAY, serial, false, true, rxrpc_propose_ack_input_data); @@ -822,8 +820,7 @@ static void rxrpc_input_soft_acks(struct rxrpc_call *call, u8 *acks, * soft-ACK means that the packet may be discarded and retransmission * requested. A phase is complete when all packets are hard-ACK'd. */ -static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb, - u16 skew) +static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb) { struct rxrpc_ack_summary summary = { 0 }; struct rxrpc_skb_priv *sp = rxrpc_skb(skb); @@ -867,11 +864,11 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb, if (buf.ack.reason == RXRPC_ACK_PING) { _proto("Rx ACK %%%u PING Request", sp->hdr.serial); rxrpc_propose_ACK(call, RXRPC_ACK_PING_RESPONSE, - skew, sp->hdr.serial, true, true, + sp->hdr.serial, true, true, rxrpc_propose_ack_respond_to_ping); } else if (sp->hdr.flags & RXRPC_REQUEST_ACK) { rxrpc_propose_ACK(call, RXRPC_ACK_REQUESTED, - skew, sp->hdr.serial, true, true, + sp->hdr.serial, true, true, rxrpc_propose_ack_respond_to_ack); } @@ -948,7 +945,7 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb, RXRPC_TX_ANNO_LAST && summary.nr_acks == call->tx_top - hard_ack && rxrpc_is_client_call(call)) - rxrpc_propose_ACK(call, RXRPC_ACK_PING, skew, sp->hdr.serial, + rxrpc_propose_ACK(call, RXRPC_ACK_PING, sp->hdr.serial, false, true, rxrpc_propose_ack_ping_for_lost_reply); @@ -1004,7 +1001,7 @@ static void rxrpc_input_abort(struct rxrpc_call *call, struct sk_buff *skb) * Process an incoming call packet. */ static void rxrpc_input_call_packet(struct rxrpc_call *call, - struct sk_buff *skb, u16 skew) + struct sk_buff *skb) { struct rxrpc_skb_priv *sp = rxrpc_skb(skb); unsigned long timo; @@ -1023,11 +1020,11 @@ static void rxrpc_input_call_packet(struct rxrpc_call *call, switch (sp->hdr.type) { case RXRPC_PACKET_TYPE_DATA: - rxrpc_input_data(call, skb, skew); + rxrpc_input_data(call, skb); break; case RXRPC_PACKET_TYPE_ACK: - rxrpc_input_ack(call, skb, skew); + rxrpc_input_ack(call, skb); break; case RXRPC_PACKET_TYPE_BUSY: @@ -1108,8 +1105,12 @@ static void rxrpc_post_packet_to_local(struct rxrpc_local *local, { _enter("%p,%p", local, skb); - skb_queue_tail(&local->event_queue, skb); - rxrpc_queue_local(local); + if (rxrpc_get_local_maybe(local)) { + skb_queue_tail(&local->event_queue, skb); + rxrpc_queue_local(local); + } else { + rxrpc_free_skb(skb, rxrpc_skb_rx_freed); + } } /* @@ -1119,8 +1120,12 @@ static void rxrpc_reject_packet(struct rxrpc_local *local, struct sk_buff *skb) { CHECK_SLAB_OKAY(&local->usage); - skb_queue_tail(&local->reject_queue, skb); - rxrpc_queue_local(local); + if (rxrpc_get_local_maybe(local)) { + skb_queue_tail(&local->reject_queue, skb); + rxrpc_queue_local(local); + } else { + rxrpc_free_skb(skb, rxrpc_skb_rx_freed); + } } /* @@ -1173,7 +1178,6 @@ int rxrpc_input_packet(struct sock *udp_sk, struct sk_buff *skb) struct rxrpc_peer *peer = NULL; struct rxrpc_sock *rx = NULL; unsigned int channel; - int skew = 0; _enter("%p", udp_sk); @@ -1301,15 +1305,8 @@ int rxrpc_input_packet(struct sock *udp_sk, struct sk_buff *skb) goto out; } - /* Note the serial number skew here */ - skew = (int)sp->hdr.serial - (int)conn->hi_serial; - if (skew >= 0) { - if (skew > 0) - conn->hi_serial = sp->hdr.serial; - } else { - skew = -skew; - skew = min(skew, 65535); - } + if ((int)sp->hdr.serial - (int)conn->hi_serial > 0) + conn->hi_serial = sp->hdr.serial; /* Call-bound packets are routed by connection channel. */ channel = sp->hdr.cid & RXRPC_CHANNELMASK; @@ -1372,11 +1369,11 @@ int rxrpc_input_packet(struct sock *udp_sk, struct sk_buff *skb) call = rxrpc_new_incoming_call(local, rx, skb); if (!call) goto reject_packet; - rxrpc_send_ping(call, skb, skew); + rxrpc_send_ping(call, skb); mutex_unlock(&call->user_mutex); } - rxrpc_input_call_packet(call, skb, skew); + rxrpc_input_call_packet(call, skb); goto discard; discard: diff --git a/net/rxrpc/local_object.c b/net/rxrpc/local_object.c index b1c71bad510b..72a6e12a9304 100644 --- a/net/rxrpc/local_object.c +++ b/net/rxrpc/local_object.c @@ -79,6 +79,7 @@ static struct rxrpc_local *rxrpc_alloc_local(struct rxrpc_net *rxnet, local = kzalloc(sizeof(struct rxrpc_local), GFP_KERNEL); if (local) { atomic_set(&local->usage, 1); + atomic_set(&local->active_users, 1); local->rxnet = rxnet; INIT_LIST_HEAD(&local->link); INIT_WORK(&local->processor, rxrpc_local_processor); @@ -92,7 +93,7 @@ static struct rxrpc_local *rxrpc_alloc_local(struct rxrpc_net *rxnet, local->debug_id = atomic_inc_return(&rxrpc_debug_id); memcpy(&local->srx, srx, sizeof(*srx)); local->srx.srx_service = 0; - trace_rxrpc_local(local, rxrpc_local_new, 1, NULL); + trace_rxrpc_local(local->debug_id, rxrpc_local_new, 1, NULL); } _leave(" = %p", local); @@ -266,11 +267,8 @@ struct rxrpc_local *rxrpc_lookup_local(struct net *net, * bind the transport socket may still fail if we're attempting * to use a local address that the dying object is still using. */ - if (!rxrpc_get_local_maybe(local)) { - cursor = cursor->next; - list_del_init(&local->link); + if (!rxrpc_use_local(local)) break; - } age = "old"; goto found; @@ -284,7 +282,10 @@ struct rxrpc_local *rxrpc_lookup_local(struct net *net, if (ret < 0) goto sock_error; - list_add_tail(&local->link, cursor); + if (cursor != &rxnet->local_endpoints) + list_replace_init(cursor, &local->link); + else + list_add_tail(&local->link, cursor); age = "new"; found: @@ -320,7 +321,7 @@ struct rxrpc_local *rxrpc_get_local(struct rxrpc_local *local) int n; n = atomic_inc_return(&local->usage); - trace_rxrpc_local(local, rxrpc_local_got, n, here); + trace_rxrpc_local(local->debug_id, rxrpc_local_got, n, here); return local; } @@ -334,7 +335,8 @@ struct rxrpc_local *rxrpc_get_local_maybe(struct rxrpc_local *local) if (local) { int n = atomic_fetch_add_unless(&local->usage, 1, 0); if (n > 0) - trace_rxrpc_local(local, rxrpc_local_got, n + 1, here); + trace_rxrpc_local(local->debug_id, rxrpc_local_got, + n + 1, here); else local = NULL; } @@ -342,24 +344,18 @@ struct rxrpc_local *rxrpc_get_local_maybe(struct rxrpc_local *local) } /* - * Queue a local endpoint. + * Queue a local endpoint and pass the caller's reference to the work item. */ void rxrpc_queue_local(struct rxrpc_local *local) { const void *here = __builtin_return_address(0); + unsigned int debug_id = local->debug_id; + int n = atomic_read(&local->usage); if (rxrpc_queue_work(&local->processor)) - trace_rxrpc_local(local, rxrpc_local_queued, - atomic_read(&local->usage), here); -} - -/* - * A local endpoint reached its end of life. - */ -static void __rxrpc_put_local(struct rxrpc_local *local) -{ - _enter("%d", local->debug_id); - rxrpc_queue_work(&local->processor); + trace_rxrpc_local(debug_id, rxrpc_local_queued, n, here); + else + rxrpc_put_local(local); } /* @@ -372,10 +368,47 @@ void rxrpc_put_local(struct rxrpc_local *local) if (local) { n = atomic_dec_return(&local->usage); - trace_rxrpc_local(local, rxrpc_local_put, n, here); + trace_rxrpc_local(local->debug_id, rxrpc_local_put, n, here); if (n == 0) - __rxrpc_put_local(local); + call_rcu(&local->rcu, rxrpc_local_rcu); + } +} + +/* + * Start using a local endpoint. + */ +struct rxrpc_local *rxrpc_use_local(struct rxrpc_local *local) +{ + unsigned int au; + + local = rxrpc_get_local_maybe(local); + if (!local) + return NULL; + + au = atomic_fetch_add_unless(&local->active_users, 1, 0); + if (au == 0) { + rxrpc_put_local(local); + return NULL; + } + + return local; +} + +/* + * Cease using a local endpoint. Once the number of active users reaches 0, we + * start the closure of the transport in the work processor. + */ +void rxrpc_unuse_local(struct rxrpc_local *local) +{ + unsigned int au; + + if (local) { + au = atomic_dec_return(&local->active_users); + if (au == 0) + rxrpc_queue_local(local); + else + rxrpc_put_local(local); } } @@ -393,16 +426,6 @@ static void rxrpc_local_destroyer(struct rxrpc_local *local) _enter("%d", local->debug_id); - /* We can get a race between an incoming call packet queueing the - * processor again and the work processor starting the destruction - * process which will shut down the UDP socket. - */ - if (local->dead) { - _leave(" [already dead]"); - return; - } - local->dead = true; - mutex_lock(&rxnet->local_mutex); list_del_init(&local->link); mutex_unlock(&rxnet->local_mutex); @@ -422,13 +445,11 @@ static void rxrpc_local_destroyer(struct rxrpc_local *local) */ rxrpc_purge_queue(&local->reject_queue); rxrpc_purge_queue(&local->event_queue); - - _debug("rcu local %d", local->debug_id); - call_rcu(&local->rcu, rxrpc_local_rcu); } /* - * Process events on an endpoint + * Process events on an endpoint. The work item carries a ref which + * we must release. */ static void rxrpc_local_processor(struct work_struct *work) { @@ -436,13 +457,15 @@ static void rxrpc_local_processor(struct work_struct *work) container_of(work, struct rxrpc_local, processor); bool again; - trace_rxrpc_local(local, rxrpc_local_processing, + trace_rxrpc_local(local->debug_id, rxrpc_local_processing, atomic_read(&local->usage), NULL); do { again = false; - if (atomic_read(&local->usage) == 0) - return rxrpc_local_destroyer(local); + if (atomic_read(&local->active_users) == 0) { + rxrpc_local_destroyer(local); + break; + } if (!skb_queue_empty(&local->reject_queue)) { rxrpc_reject_packets(local); @@ -454,6 +477,8 @@ static void rxrpc_local_processor(struct work_struct *work) again = true; } } while (again); + + rxrpc_put_local(local); } /* diff --git a/net/rxrpc/output.c b/net/rxrpc/output.c index 948e3fe249ec..369e516c4bdf 100644 --- a/net/rxrpc/output.c +++ b/net/rxrpc/output.c @@ -87,7 +87,7 @@ static size_t rxrpc_fill_out_ack(struct rxrpc_connection *conn, *_top = top; pkt->ack.bufferSpace = htons(8); - pkt->ack.maxSkew = htons(call->ackr_skew); + pkt->ack.maxSkew = htons(0); pkt->ack.firstPacket = htonl(hard_ack + 1); pkt->ack.previousPacket = htonl(call->ackr_prev_seq); pkt->ack.serial = htonl(serial); @@ -228,7 +228,6 @@ int rxrpc_send_ack_packet(struct rxrpc_call *call, bool ping, if (ping) clear_bit(RXRPC_CALL_PINGING, &call->flags); rxrpc_propose_ACK(call, pkt->ack.reason, - ntohs(pkt->ack.maxSkew), ntohl(pkt->ack.serial), false, true, rxrpc_propose_ack_retry_tx); diff --git a/net/rxrpc/recvmsg.c b/net/rxrpc/recvmsg.c index 5abf46cf9e6c..9a7e1bc9791d 100644 --- a/net/rxrpc/recvmsg.c +++ b/net/rxrpc/recvmsg.c @@ -141,7 +141,7 @@ static void rxrpc_end_rx_phase(struct rxrpc_call *call, rxrpc_serial_t serial) ASSERTCMP(call->rx_hard_ack, ==, call->rx_top); if (call->state == RXRPC_CALL_CLIENT_RECV_REPLY) { - rxrpc_propose_ACK(call, RXRPC_ACK_IDLE, 0, serial, false, true, + rxrpc_propose_ACK(call, RXRPC_ACK_IDLE, serial, false, true, rxrpc_propose_ack_terminal_ack); //rxrpc_send_ack_packet(call, false, NULL); } @@ -159,7 +159,7 @@ static void rxrpc_end_rx_phase(struct rxrpc_call *call, rxrpc_serial_t serial) call->state = RXRPC_CALL_SERVER_ACK_REQUEST; call->expect_req_by = jiffies + MAX_JIFFY_OFFSET; write_unlock_bh(&call->state_lock); - rxrpc_propose_ACK(call, RXRPC_ACK_DELAY, 0, serial, false, true, + rxrpc_propose_ACK(call, RXRPC_ACK_DELAY, serial, false, true, rxrpc_propose_ack_processing_op); break; default: @@ -212,7 +212,7 @@ static void rxrpc_rotate_rx_window(struct rxrpc_call *call) if (after_eq(hard_ack, call->ackr_consumed + 2) || after_eq(top, call->ackr_seen + 2) || (hard_ack == top && after(hard_ack, call->ackr_consumed))) - rxrpc_propose_ACK(call, RXRPC_ACK_DELAY, 0, serial, + rxrpc_propose_ACK(call, RXRPC_ACK_DELAY, serial, true, true, rxrpc_propose_ack_rotate_rx); if (call->ackr_reason && call->ackr_reason != RXRPC_ACK_DELAY) diff --git a/net/sched/act_skbedit.c b/net/sched/act_skbedit.c index b100870f02a6..37dced00b63d 100644 --- a/net/sched/act_skbedit.c +++ b/net/sched/act_skbedit.c @@ -307,6 +307,17 @@ static int tcf_skbedit_search(struct net *net, struct tc_action **a, u32 index) return tcf_idr_search(tn, a, index); } +static size_t tcf_skbedit_get_fill_size(const struct tc_action *act) +{ + return nla_total_size(sizeof(struct tc_skbedit)) + + nla_total_size(sizeof(u32)) /* TCA_SKBEDIT_PRIORITY */ + + nla_total_size(sizeof(u16)) /* TCA_SKBEDIT_QUEUE_MAPPING */ + + nla_total_size(sizeof(u32)) /* TCA_SKBEDIT_MARK */ + + nla_total_size(sizeof(u16)) /* TCA_SKBEDIT_PTYPE */ + + nla_total_size(sizeof(u32)) /* TCA_SKBEDIT_MASK */ + + nla_total_size_64bit(sizeof(u64)); /* TCA_SKBEDIT_FLAGS */ +} + static struct tc_action_ops act_skbedit_ops = { .kind = "skbedit", .id = TCA_ID_SKBEDIT, @@ -316,6 +327,7 @@ static struct tc_action_ops act_skbedit_ops = { .init = tcf_skbedit_init, .cleanup = tcf_skbedit_cleanup, .walk = tcf_skbedit_walker, + .get_fill_size = tcf_skbedit_get_fill_size, .lookup = tcf_skbedit_search, .size = sizeof(struct tcf_skbedit), }; diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c index 046fd2c102b4..540bde009ea5 100644 --- a/net/sched/sch_taprio.c +++ b/net/sched/sch_taprio.c @@ -1191,7 +1191,8 @@ unlock: spin_unlock_bh(qdisc_lock(sch)); free_sched: - kfree(new_admin); + if (new_admin) + call_rcu(&new_admin->rcu, taprio_free_sched_cb); return err; } diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c index a554d6d15d1b..1cf5bb5b73c4 100644 --- a/net/sctp/sm_sideeffect.c +++ b/net/sctp/sm_sideeffect.c @@ -546,7 +546,7 @@ static void sctp_do_8_2_transport_strike(struct sctp_cmd_seq *commands, */ if (net->sctp.pf_enable && (transport->state == SCTP_ACTIVE) && - (asoc->pf_retrans < transport->pathmaxrxt) && + (transport->error_count < transport->pathmaxrxt) && (transport->error_count > asoc->pf_retrans)) { sctp_assoc_control_transport(asoc, transport, diff --git a/net/sctp/stream.c b/net/sctp/stream.c index 25946604af85..e83cdaa2ab76 100644 --- a/net/sctp/stream.c +++ b/net/sctp/stream.c @@ -316,6 +316,7 @@ int sctp_send_reset_streams(struct sctp_association *asoc, nstr_list[i] = htons(str_list[i]); if (out && !sctp_stream_outq_is_empty(stream, str_nums, nstr_list)) { + kfree(nstr_list); retval = -EAGAIN; goto out; } diff --git a/net/tipc/addr.c b/net/tipc/addr.c index b88d48d00913..0f1eaed1bd1b 100644 --- a/net/tipc/addr.c +++ b/net/tipc/addr.c @@ -75,6 +75,7 @@ void tipc_set_node_addr(struct net *net, u32 addr) tipc_set_node_id(net, node_id); } tn->trial_addr = addr; + tn->addr_trial_end = jiffies; pr_info("32-bit node address hash set to %x\n", addr); } diff --git a/net/tipc/link.c b/net/tipc/link.c index 289e848084ac..6cc75ffd9e2c 100644 --- a/net/tipc/link.c +++ b/net/tipc/link.c @@ -106,8 +106,6 @@ struct tipc_stats { * @transmitq: queue for sent, non-acked messages * @backlogq: queue for messages waiting to be sent * @snt_nxt: next sequence number to use for outbound messages - * @prev_from: sequence number of most previous retransmission request - * @stale_limit: time when repeated identical retransmits must force link reset * @ackers: # of peers that needs to ack each packet before it can be released * @acked: # last packet acked by a certain peer. Used for broadcast. * @rcv_nxt: next sequence number to expect for inbound messages @@ -164,9 +162,7 @@ struct tipc_link { u16 limit; } backlog[5]; u16 snd_nxt; - u16 prev_from; u16 window; - unsigned long stale_limit; /* Reception */ u16 rcv_nxt; @@ -1063,47 +1059,53 @@ static void tipc_link_advance_backlog(struct tipc_link *l, * link_retransmit_failure() - Detect repeated retransmit failures * @l: tipc link sender * @r: tipc link receiver (= l in case of unicast) - * @from: seqno of the 1st packet in retransmit request * @rc: returned code * * Return: true if the repeated retransmit failures happens, otherwise * false */ static bool link_retransmit_failure(struct tipc_link *l, struct tipc_link *r, - u16 from, int *rc) + int *rc) { struct sk_buff *skb = skb_peek(&l->transmq); struct tipc_msg *hdr; if (!skb) return false; - hdr = buf_msg(skb); - /* Detect repeated retransmit failures on same packet */ - if (r->prev_from != from) { - r->prev_from = from; - r->stale_limit = jiffies + msecs_to_jiffies(r->tolerance); - } else if (time_after(jiffies, r->stale_limit)) { - pr_warn("Retransmission failure on link <%s>\n", l->name); - link_print(l, "State of link "); - pr_info("Failed msg: usr %u, typ %u, len %u, err %u\n", - msg_user(hdr), msg_type(hdr), msg_size(hdr), - msg_errcode(hdr)); - pr_info("sqno %u, prev: %x, src: %x\n", - msg_seqno(hdr), msg_prevnode(hdr), msg_orignode(hdr)); - - trace_tipc_list_dump(&l->transmq, true, "retrans failure!"); - trace_tipc_link_dump(l, TIPC_DUMP_NONE, "retrans failure!"); - trace_tipc_link_dump(r, TIPC_DUMP_NONE, "retrans failure!"); + if (!TIPC_SKB_CB(skb)->retr_cnt) + return false; - if (link_is_bc_sndlink(l)) - *rc = TIPC_LINK_DOWN_EVT; + if (!time_after(jiffies, TIPC_SKB_CB(skb)->retr_stamp + + msecs_to_jiffies(r->tolerance))) + return false; + + hdr = buf_msg(skb); + if (link_is_bc_sndlink(l) && !less(r->acked, msg_seqno(hdr))) + return false; + pr_warn("Retransmission failure on link <%s>\n", l->name); + link_print(l, "State of link "); + pr_info("Failed msg: usr %u, typ %u, len %u, err %u\n", + msg_user(hdr), msg_type(hdr), msg_size(hdr), msg_errcode(hdr)); + pr_info("sqno %u, prev: %x, dest: %x\n", + msg_seqno(hdr), msg_prevnode(hdr), msg_destnode(hdr)); + pr_info("retr_stamp %d, retr_cnt %d\n", + jiffies_to_msecs(TIPC_SKB_CB(skb)->retr_stamp), + TIPC_SKB_CB(skb)->retr_cnt); + + trace_tipc_list_dump(&l->transmq, true, "retrans failure!"); + trace_tipc_link_dump(l, TIPC_DUMP_NONE, "retrans failure!"); + trace_tipc_link_dump(r, TIPC_DUMP_NONE, "retrans failure!"); + + if (link_is_bc_sndlink(l)) { + r->state = LINK_RESET; + *rc = TIPC_LINK_DOWN_EVT; + } else { *rc = tipc_link_fsm_evt(l, LINK_FAILURE_EVT); - return true; } - return false; + return true; } /* tipc_link_bc_retrans() - retransmit zero or more packets @@ -1129,7 +1131,7 @@ static int tipc_link_bc_retrans(struct tipc_link *l, struct tipc_link *r, trace_tipc_link_retrans(r, from, to, &l->transmq); - if (link_retransmit_failure(l, r, from, &rc)) + if (link_retransmit_failure(l, r, &rc)) return rc; skb_queue_walk(&l->transmq, skb) { @@ -1138,11 +1140,10 @@ static int tipc_link_bc_retrans(struct tipc_link *l, struct tipc_link *r, continue; if (more(msg_seqno(hdr), to)) break; - if (link_is_bc_sndlink(l)) { - if (time_before(jiffies, TIPC_SKB_CB(skb)->nxt_retr)) - continue; - TIPC_SKB_CB(skb)->nxt_retr = TIPC_BC_RETR_LIM; - } + + if (time_before(jiffies, TIPC_SKB_CB(skb)->nxt_retr)) + continue; + TIPC_SKB_CB(skb)->nxt_retr = TIPC_BC_RETR_LIM; _skb = __pskb_copy(skb, LL_MAX_HEADER + MIN_H_SIZE, GFP_ATOMIC); if (!_skb) return 0; @@ -1152,6 +1153,10 @@ static int tipc_link_bc_retrans(struct tipc_link *l, struct tipc_link *r, _skb->priority = TC_PRIO_CONTROL; __skb_queue_tail(xmitq, _skb); l->stats.retransmitted++; + + /* Increase actual retrans counter & mark first time */ + if (!TIPC_SKB_CB(skb)->retr_cnt++) + TIPC_SKB_CB(skb)->retr_stamp = jiffies; } return 0; } @@ -1393,12 +1398,10 @@ static int tipc_link_advance_transmq(struct tipc_link *l, u16 acked, u16 gap, struct tipc_msg *hdr; u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1; u16 ack = l->rcv_nxt - 1; + bool passed = false; u16 seqno, n = 0; int rc = 0; - if (gap && link_retransmit_failure(l, l, acked + 1, &rc)) - return rc; - skb_queue_walk_safe(&l->transmq, skb, tmp) { seqno = buf_seqno(skb); @@ -1408,12 +1411,17 @@ next_gap_ack: __skb_unlink(skb, &l->transmq); kfree_skb(skb); } else if (less_eq(seqno, acked + gap)) { - /* retransmit skb */ + /* First, check if repeated retrans failures occurs? */ + if (!passed && link_retransmit_failure(l, l, &rc)) + return rc; + passed = true; + + /* retransmit skb if unrestricted*/ if (time_before(jiffies, TIPC_SKB_CB(skb)->nxt_retr)) continue; TIPC_SKB_CB(skb)->nxt_retr = TIPC_UC_RETR_TIME; - - _skb = __pskb_copy(skb, MIN_H_SIZE, GFP_ATOMIC); + _skb = __pskb_copy(skb, LL_MAX_HEADER + MIN_H_SIZE, + GFP_ATOMIC); if (!_skb) continue; hdr = buf_msg(_skb); @@ -1422,6 +1430,10 @@ next_gap_ack: _skb->priority = TC_PRIO_CONTROL; __skb_queue_tail(xmitq, _skb); l->stats.retransmitted++; + + /* Increase actual retrans counter & mark first time */ + if (!TIPC_SKB_CB(skb)->retr_cnt++) + TIPC_SKB_CB(skb)->retr_stamp = jiffies; } else { /* retry with Gap ACK blocks if any */ if (!ga || n >= ga->gack_cnt) @@ -2681,7 +2693,7 @@ int tipc_link_dump(struct tipc_link *l, u16 dqueues, char *buf) i += scnprintf(buf + i, sz - i, " %x", l->peer_caps); i += scnprintf(buf + i, sz - i, " %u", l->silent_intv_cnt); i += scnprintf(buf + i, sz - i, " %u", l->rst_cnt); - i += scnprintf(buf + i, sz - i, " %u", l->prev_from); + i += scnprintf(buf + i, sz - i, " %u", 0); i += scnprintf(buf + i, sz - i, " %u", 0); i += scnprintf(buf + i, sz - i, " %u", l->acked); diff --git a/net/tipc/msg.h b/net/tipc/msg.h index 1c8c8dd32a4e..0daa6f04ca81 100644 --- a/net/tipc/msg.h +++ b/net/tipc/msg.h @@ -102,13 +102,15 @@ struct plist; #define TIPC_MEDIA_INFO_OFFSET 5 struct tipc_skb_cb { - u32 bytes_read; - u32 orig_member; struct sk_buff *tail; unsigned long nxt_retr; - bool validated; + unsigned long retr_stamp; + u32 bytes_read; + u32 orig_member; u16 chain_imp; u16 ackers; + u16 retr_cnt; + bool validated; }; #define TIPC_SKB_CB(__skb) ((struct tipc_skb_cb *)&((__skb)->cb[0])) diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c index d184230665eb..a470df7ffcf9 100644 --- a/net/tls/tls_device.c +++ b/net/tls/tls_device.c @@ -373,9 +373,9 @@ static int tls_push_data(struct sock *sk, struct tls_context *tls_ctx = tls_get_ctx(sk); struct tls_prot_info *prot = &tls_ctx->prot_info; struct tls_offload_context_tx *ctx = tls_offload_ctx_tx(tls_ctx); - int tls_push_record_flags = flags | MSG_SENDPAGE_NOTLAST; int more = flags & (MSG_SENDPAGE_NOTLAST | MSG_MORE); struct tls_record_info *record = ctx->open_record; + int tls_push_record_flags; struct page_frag *pfrag; size_t orig_size = size; u32 max_open_record_len; @@ -390,6 +390,9 @@ static int tls_push_data(struct sock *sk, if (sk->sk_err) return -sk->sk_err; + flags |= MSG_SENDPAGE_DECRYPTED; + tls_push_record_flags = flags | MSG_SENDPAGE_NOTLAST; + timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT); if (tls_is_partially_sent_record(tls_ctx)) { rc = tls_push_partial_record(sk, tls_ctx, flags); @@ -576,7 +579,9 @@ void tls_device_write_space(struct sock *sk, struct tls_context *ctx) gfp_t sk_allocation = sk->sk_allocation; sk->sk_allocation = GFP_ATOMIC; - tls_push_partial_record(sk, ctx, MSG_DONTWAIT | MSG_NOSIGNAL); + tls_push_partial_record(sk, ctx, + MSG_DONTWAIT | MSG_NOSIGNAL | + MSG_SENDPAGE_DECRYPTED); sk->sk_allocation = sk_allocation; } } diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c index 9cbbae606ced..43252a801c3f 100644 --- a/net/tls/tls_main.c +++ b/net/tls/tls_main.c @@ -308,6 +308,8 @@ static void tls_sk_proto_close(struct sock *sk, long timeout) if (free_ctx) icsk->icsk_ulp_data = NULL; sk->sk_prot = ctx->sk_proto; + if (sk->sk_write_space == tls_write_space) + sk->sk_write_space = ctx->sk_write_space; write_unlock_bh(&sk->sk_callback_lock); release_sock(sk); if (ctx->tx_conf == TLS_SW) diff --git a/samples/auxdisplay/cfag12864b-example.c b/samples/auxdisplay/cfag12864b-example.c index 85571e90191f..bfeab44f81d0 100644 --- a/samples/auxdisplay/cfag12864b-example.c +++ b/samples/auxdisplay/cfag12864b-example.c @@ -245,7 +245,7 @@ int main(int argc, char *argv[]) if (argc != 2) { printf( - "Sintax: %s fbdev\n" + "Syntax: %s fbdev\n" "Usually: /dev/fb0, /dev/fb1...\n", argv[0]); return -1; } diff --git a/scripts/Makefile.build b/scripts/Makefile.build index 0d434d0afc0b..2f66ed388d1c 100644 --- a/scripts/Makefile.build +++ b/scripts/Makefile.build @@ -52,6 +52,13 @@ ifndef obj $(warning kbuild: Makefile.build is included improperly) endif +ifeq ($(MAKECMDGOALS)$(need-modorder),) +ifneq ($(obj-m),) +$(warning $(patsubst %.o,'%.ko',$(obj-m)) will not be built even though obj-m is specified.) +$(warning You cannot use subdir-y/m to visit a module Makefile. Use obj-y/m instead.) +endif +endif + # =========================================================================== ifneq ($(strip $(lib-y) $(lib-m) $(lib-)),) @@ -487,7 +494,9 @@ targets += $(call intermediate_targets, .asn1.o, .asn1.c .asn1.h) \ PHONY += $(subdir-ym) $(subdir-ym): - $(Q)$(MAKE) $(build)=$@ need-builtin=$(if $(findstring $@,$(subdir-obj-y)),1) + $(Q)$(MAKE) $(build)=$@ \ + need-builtin=$(if $(filter $@/built-in.a, $(subdir-obj-y)),1) \ + need-modorder=$(if $(need-modorder),$(if $(filter $@/modules.order, $(modorder)),1)) # Add FORCE to the prequisites of a target to force it to be always rebuilt. # --------------------------------------------------------------------------- diff --git a/scripts/Makefile.modpost b/scripts/Makefile.modpost index 92ed02d7cd5e..26e6574ecd08 100644 --- a/scripts/Makefile.modpost +++ b/scripts/Makefile.modpost @@ -85,10 +85,8 @@ endif include scripts/Makefile.lib -modorder := $(if $(KBUILD_EXTMOD),$(KBUILD_EXTMOD)/)modules.order - # find all modules listed in modules.order -modules := $(sort $(shell cat $(modorder))) +modules := $(sort $(shell cat $(MODORDER))) # Stop after building .o files if NOFINAL is set. Makes compile tests quicker __modpost: $(if $(KBUILD_MODPOST_NOFINAL), $(modules:.ko:.o),$(modules)) @@ -98,7 +96,7 @@ MODPOST += $(subst -i,-n,$(filter -i,$(MAKEFLAGS))) -s -T - $(wildcard vmlinux) # We can go over command line length here, so be careful. quiet_cmd_modpost = MODPOST $(words $(modules)) modules - cmd_modpost = sed 's/ko$$/o/' $(modorder) | $(MODPOST) + cmd_modpost = sed 's/ko$$/o/' $(MODORDER) | $(MODPOST) PHONY += modules-modpost modules-modpost: diff --git a/scripts/coccinelle/api/atomic_as_refcounter.cocci b/scripts/coccinelle/api/atomic_as_refcounter.cocci index 988120e0fd67..0f78d94abc35 100644 --- a/scripts/coccinelle/api/atomic_as_refcounter.cocci +++ b/scripts/coccinelle/api/atomic_as_refcounter.cocci @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0-only // Check if refcount_t type and API should be used // instead of atomic_t type when dealing with refcounters // diff --git a/security/keys/trusted.c b/security/keys/trusted.c index 9a94672e7adc..ade699131065 100644 --- a/security/keys/trusted.c +++ b/security/keys/trusted.c @@ -1228,24 +1228,11 @@ hashalg_fail: static int __init init_digests(void) { - u8 digest[TPM_MAX_DIGEST_SIZE]; - int ret; - int i; - - ret = tpm_get_random(chip, digest, TPM_MAX_DIGEST_SIZE); - if (ret < 0) - return ret; - if (ret < TPM_MAX_DIGEST_SIZE) - return -EFAULT; - digests = kcalloc(chip->nr_allocated_banks, sizeof(*digests), GFP_KERNEL); if (!digests) return -ENOMEM; - for (i = 0; i < chip->nr_allocated_banks; i++) - memcpy(digests[i].digest, digest, TPM_MAX_DIGEST_SIZE); - return 0; } diff --git a/sound/firewire/packets-buffer.c b/sound/firewire/packets-buffer.c index 0d35359d25cd..0ecafd0c6722 100644 --- a/sound/firewire/packets-buffer.c +++ b/sound/firewire/packets-buffer.c @@ -37,7 +37,7 @@ int iso_packets_buffer_init(struct iso_packets_buffer *b, struct fw_unit *unit, packets_per_page = PAGE_SIZE / packet_size; if (WARN_ON(!packets_per_page)) { err = -EINVAL; - goto error; + goto err_packets; } pages = DIV_ROUND_UP(count, packets_per_page); diff --git a/sound/pci/hda/hda_controller.c b/sound/pci/hda/hda_controller.c index c8d1b4316245..48d863736b3c 100644 --- a/sound/pci/hda/hda_controller.c +++ b/sound/pci/hda/hda_controller.c @@ -598,11 +598,9 @@ static int azx_pcm_open(struct snd_pcm_substream *substream) } runtime->private_data = azx_dev; - if (chip->gts_present) - azx_pcm_hw.info = azx_pcm_hw.info | - SNDRV_PCM_INFO_HAS_LINK_SYNCHRONIZED_ATIME; - runtime->hw = azx_pcm_hw; + if (chip->gts_present) + runtime->hw.info |= SNDRV_PCM_INFO_HAS_LINK_SYNCHRONIZED_ATIME; runtime->hw.channels_min = hinfo->channels_min; runtime->hw.channels_max = hinfo->channels_max; runtime->hw.formats = hinfo->formats; @@ -615,6 +613,13 @@ static int azx_pcm_open(struct snd_pcm_substream *substream) 20, 178000000); + /* by some reason, the playback stream stalls on PulseAudio with + * tsched=1 when a capture stream triggers. Until we figure out the + * real cause, disable tsched mode by telling the PCM info flag. + */ + if (chip->driver_caps & AZX_DCAPS_AMD_WORKAROUND) + runtime->hw.info |= SNDRV_PCM_INFO_BATCH; + if (chip->align_buffer_size) /* constrain buffer sizes to be multiple of 128 bytes. This is more efficient in terms of memory diff --git a/sound/pci/hda/hda_controller.h b/sound/pci/hda/hda_controller.h index baa15374fbcb..f2a6df5e6bcb 100644 --- a/sound/pci/hda/hda_controller.h +++ b/sound/pci/hda/hda_controller.h @@ -31,7 +31,7 @@ /* 14 unused */ #define AZX_DCAPS_CTX_WORKAROUND (1 << 15) /* X-Fi workaround */ #define AZX_DCAPS_POSFIX_LPIB (1 << 16) /* Use LPIB as default */ -/* 17 unused */ +#define AZX_DCAPS_AMD_WORKAROUND (1 << 17) /* AMD-specific workaround */ #define AZX_DCAPS_NO_64BIT (1 << 18) /* No 64bit address */ #define AZX_DCAPS_SYNC_WRITE (1 << 19) /* sync each cmd write */ #define AZX_DCAPS_OLD_SSYNC (1 << 20) /* Old SSYNC reg for ICH */ diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c index 485edaba0037..5bf24fb819d2 100644 --- a/sound/pci/hda/hda_generic.c +++ b/sound/pci/hda/hda_generic.c @@ -6051,6 +6051,24 @@ void snd_hda_gen_free(struct hda_codec *codec) } EXPORT_SYMBOL_GPL(snd_hda_gen_free); +/** + * snd_hda_gen_reboot_notify - Make codec enter D3 before rebooting + * @codec: the HDA codec + * + * This can be put as patch_ops reboot_notify function. + */ +void snd_hda_gen_reboot_notify(struct hda_codec *codec) +{ + /* Make the codec enter D3 to avoid spurious noises from the internal + * speaker during (and after) reboot + */ + snd_hda_codec_set_power_to_all(codec, codec->core.afg, AC_PWRST_D3); + snd_hda_codec_write(codec, codec->core.afg, 0, + AC_VERB_SET_POWER_STATE, AC_PWRST_D3); + msleep(10); +} +EXPORT_SYMBOL_GPL(snd_hda_gen_reboot_notify); + #ifdef CONFIG_PM /** * snd_hda_gen_check_power_status - check the loopback power save state @@ -6078,6 +6096,7 @@ static const struct hda_codec_ops generic_patch_ops = { .init = snd_hda_gen_init, .free = snd_hda_gen_free, .unsol_event = snd_hda_jack_unsol_event, + .reboot_notify = snd_hda_gen_reboot_notify, #ifdef CONFIG_PM .check_power_status = snd_hda_gen_check_power_status, #endif @@ -6100,7 +6119,7 @@ static int snd_hda_parse_generic_codec(struct hda_codec *codec) err = snd_hda_parse_pin_defcfg(codec, &spec->autocfg, NULL, 0); if (err < 0) - return err; + goto error; err = snd_hda_gen_parse_auto_config(codec, &spec->autocfg); if (err < 0) diff --git a/sound/pci/hda/hda_generic.h b/sound/pci/hda/hda_generic.h index 35a670a71c42..5f199dcb0d18 100644 --- a/sound/pci/hda/hda_generic.h +++ b/sound/pci/hda/hda_generic.h @@ -332,6 +332,7 @@ int snd_hda_gen_parse_auto_config(struct hda_codec *codec, struct auto_pin_cfg *cfg); int snd_hda_gen_build_controls(struct hda_codec *codec); int snd_hda_gen_build_pcms(struct hda_codec *codec); +void snd_hda_gen_reboot_notify(struct hda_codec *codec); /* standard jack event callbacks */ void snd_hda_gen_hp_automute(struct hda_codec *codec, diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index 1e14d7270adf..99fc0917339b 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c @@ -64,6 +64,7 @@ enum { POS_FIX_VIACOMBO, POS_FIX_COMBO, POS_FIX_SKL, + POS_FIX_FIFO, }; /* Defines for ATI HD Audio support in SB450 south bridge */ @@ -135,7 +136,7 @@ module_param_array(model, charp, NULL, 0444); MODULE_PARM_DESC(model, "Use the given board model."); module_param_array(position_fix, int, NULL, 0444); MODULE_PARM_DESC(position_fix, "DMA pointer read method." - "(-1 = system default, 0 = auto, 1 = LPIB, 2 = POSBUF, 3 = VIACOMBO, 4 = COMBO, 5 = SKL+)."); + "(-1 = system default, 0 = auto, 1 = LPIB, 2 = POSBUF, 3 = VIACOMBO, 4 = COMBO, 5 = SKL+, 6 = FIFO)."); module_param_array(bdl_pos_adj, int, NULL, 0644); MODULE_PARM_DESC(bdl_pos_adj, "BDL position adjustment offset."); module_param_array(probe_mask, int, NULL, 0444); @@ -332,6 +333,11 @@ enum { #define AZX_DCAPS_PRESET_ATI_HDMI_NS \ (AZX_DCAPS_PRESET_ATI_HDMI | AZX_DCAPS_SNOOP_OFF) +/* quirks for AMD SB */ +#define AZX_DCAPS_PRESET_AMD_SB \ + (AZX_DCAPS_NO_TCSEL | AZX_DCAPS_SYNC_WRITE | AZX_DCAPS_AMD_WORKAROUND |\ + AZX_DCAPS_SNOOP_TYPE(ATI) | AZX_DCAPS_PM_RUNTIME) + /* quirks for Nvidia */ #define AZX_DCAPS_PRESET_NVIDIA \ (AZX_DCAPS_NO_MSI | AZX_DCAPS_CORBRP_SELF_CLEAR |\ @@ -841,6 +847,49 @@ static unsigned int azx_via_get_position(struct azx *chip, return bound_pos + mod_dma_pos; } +#define AMD_FIFO_SIZE 32 + +/* get the current DMA position with FIFO size correction */ +static unsigned int azx_get_pos_fifo(struct azx *chip, struct azx_dev *azx_dev) +{ + struct snd_pcm_substream *substream = azx_dev->core.substream; + struct snd_pcm_runtime *runtime = substream->runtime; + unsigned int pos, delay; + + pos = snd_hdac_stream_get_pos_lpib(azx_stream(azx_dev)); + if (!runtime) + return pos; + + runtime->delay = AMD_FIFO_SIZE; + delay = frames_to_bytes(runtime, AMD_FIFO_SIZE); + if (azx_dev->insufficient) { + if (pos < delay) { + delay = pos; + runtime->delay = bytes_to_frames(runtime, pos); + } else { + azx_dev->insufficient = 0; + } + } + + /* correct the DMA position for capture stream */ + if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) { + if (pos < delay) + pos += azx_dev->core.bufsize; + pos -= delay; + } + + return pos; +} + +static int azx_get_delay_from_fifo(struct azx *chip, struct azx_dev *azx_dev, + unsigned int pos) +{ + struct snd_pcm_substream *substream = azx_dev->core.substream; + + /* just read back the calculated value in the above */ + return substream->runtime->delay; +} + static unsigned int azx_skl_get_dpib_pos(struct azx *chip, struct azx_dev *azx_dev) { @@ -1417,6 +1466,7 @@ static int check_position_fix(struct azx *chip, int fix) case POS_FIX_VIACOMBO: case POS_FIX_COMBO: case POS_FIX_SKL: + case POS_FIX_FIFO: return fix; } @@ -1433,6 +1483,10 @@ static int check_position_fix(struct azx *chip, int fix) dev_dbg(chip->card->dev, "Using VIACOMBO position fix\n"); return POS_FIX_VIACOMBO; } + if (chip->driver_caps & AZX_DCAPS_AMD_WORKAROUND) { + dev_dbg(chip->card->dev, "Using FIFO position fix\n"); + return POS_FIX_FIFO; + } if (chip->driver_caps & AZX_DCAPS_POSFIX_LPIB) { dev_dbg(chip->card->dev, "Using LPIB position fix\n"); return POS_FIX_LPIB; @@ -1453,6 +1507,7 @@ static void assign_position_fix(struct azx *chip, int fix) [POS_FIX_VIACOMBO] = azx_via_get_position, [POS_FIX_COMBO] = azx_get_pos_lpib, [POS_FIX_SKL] = azx_get_pos_skl, + [POS_FIX_FIFO] = azx_get_pos_fifo, }; chip->get_position[0] = chip->get_position[1] = callbacks[fix]; @@ -1467,6 +1522,9 @@ static void assign_position_fix(struct azx *chip, int fix) azx_get_delay_from_lpib; } + if (fix == POS_FIX_FIFO) + chip->get_delay[0] = chip->get_delay[1] = + azx_get_delay_from_fifo; } /* @@ -2447,6 +2505,12 @@ static const struct pci_device_id azx_ids[] = { /* AMD Hudson */ { PCI_DEVICE(0x1022, 0x780d), .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_ATI_SB }, + /* AMD, X370 & co */ + { PCI_DEVICE(0x1022, 0x1457), + .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_AMD_SB }, + /* AMD, X570 & co */ + { PCI_DEVICE(0x1022, 0x1487), + .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_AMD_SB }, /* AMD Stoney */ { PCI_DEVICE(0x1022, 0x157a), .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_ATI_SB | diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c index f299f137eaea..14298ef45b21 100644 --- a/sound/pci/hda/patch_conexant.c +++ b/sound/pci/hda/patch_conexant.c @@ -163,23 +163,10 @@ static void cx_auto_reboot_notify(struct hda_codec *codec) { struct conexant_spec *spec = codec->spec; - switch (codec->core.vendor_id) { - case 0x14f12008: /* CX8200 */ - case 0x14f150f2: /* CX20722 */ - case 0x14f150f4: /* CX20724 */ - break; - default: - return; - } - /* Turn the problematic codec into D3 to avoid spurious noises from the internal speaker during (and after) reboot */ cx_auto_turn_eapd(codec, spec->num_eapds, spec->eapds, false); - - snd_hda_codec_set_power_to_all(codec, codec->core.afg, AC_PWRST_D3); - snd_hda_codec_write(codec, codec->core.afg, 0, - AC_VERB_SET_POWER_STATE, AC_PWRST_D3); - msleep(10); + snd_hda_gen_reboot_notify(codec); } static void cx_auto_free(struct hda_codec *codec) diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index de224cbea7a0..e333b3e30e31 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -869,15 +869,6 @@ static void alc_reboot_notify(struct hda_codec *codec) alc_shutup(codec); } -/* power down codec to D3 at reboot/shutdown; set as reboot_notify ops */ -static void alc_d3_at_reboot(struct hda_codec *codec) -{ - snd_hda_codec_set_power_to_all(codec, codec->core.afg, AC_PWRST_D3); - snd_hda_codec_write(codec, codec->core.afg, 0, - AC_VERB_SET_POWER_STATE, AC_PWRST_D3); - msleep(10); -} - #define alc_free snd_hda_gen_free #ifdef CONFIG_PM @@ -5152,7 +5143,7 @@ static void alc_fixup_tpt440_dock(struct hda_codec *codec, struct alc_spec *spec = codec->spec; if (action == HDA_FIXUP_ACT_PRE_PROBE) { - spec->reboot_notify = alc_d3_at_reboot; /* reduce noise */ + spec->reboot_notify = snd_hda_gen_reboot_notify; /* reduce noise */ spec->parse_flags = HDA_PINCFG_NO_HP_FIXUP; codec->power_save_node = 0; /* avoid click noises */ snd_hda_apply_pincfgs(codec, pincfgs); @@ -6987,6 +6978,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x103c, 0x82bf, "HP G3 mini", ALC221_FIXUP_HP_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x103c, 0x82c0, "HP G3 mini premium", ALC221_FIXUP_HP_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x103c, 0x83b9, "HP Spectre x360", ALC269_FIXUP_HP_MUTE_LED_MIC3), + SND_PCI_QUIRK(0x103c, 0x8497, "HP Envy x360", ALC269_FIXUP_HP_MUTE_LED_MIC3), SND_PCI_QUIRK(0x1043, 0x103e, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC), SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300), SND_PCI_QUIRK(0x1043, 0x106d, "Asus K53BE", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), diff --git a/sound/soc/amd/raven/acp3x-pcm-dma.c b/sound/soc/amd/raven/acp3x-pcm-dma.c index a4ade6bb5beb..bc4dfafdfcd1 100644 --- a/sound/soc/amd/raven/acp3x-pcm-dma.c +++ b/sound/soc/amd/raven/acp3x-pcm-dma.c @@ -31,8 +31,8 @@ struct i2s_stream_instance { u16 num_pages; u16 channels; u32 xfer_resolution; - struct page *pg; u64 bytescount; + dma_addr_t dma_addr; void __iomem *acp3x_base; }; @@ -211,9 +211,8 @@ static irqreturn_t i2s_irq_handler(int irq, void *dev_id) static void config_acp3x_dma(struct i2s_stream_instance *rtd, int direction) { u16 page_idx; - u64 addr; u32 low, high, val, acp_fifo_addr; - struct page *pg = rtd->pg; + dma_addr_t addr = rtd->dma_addr; /* 8 scratch registers used to map one 64 bit address */ if (direction == SNDRV_PCM_STREAM_PLAYBACK) @@ -229,7 +228,6 @@ static void config_acp3x_dma(struct i2s_stream_instance *rtd, int direction) for (page_idx = 0; page_idx < rtd->num_pages; page_idx++) { /* Load the low address of page int ACP SRAM through SRBM */ - addr = page_to_phys(pg); low = lower_32_bits(addr); high = upper_32_bits(addr); @@ -239,7 +237,7 @@ static void config_acp3x_dma(struct i2s_stream_instance *rtd, int direction) + 4); /* Move to next physically contiguos page */ val += 8; - pg++; + addr += PAGE_SIZE; } if (direction == SNDRV_PCM_STREAM_PLAYBACK) { @@ -341,7 +339,6 @@ static int acp3x_dma_hw_params(struct snd_pcm_substream *substream, { int status; u64 size; - struct page *pg; struct snd_pcm_runtime *runtime = substream->runtime; struct i2s_stream_instance *rtd = runtime->private_data; @@ -354,9 +351,8 @@ static int acp3x_dma_hw_params(struct snd_pcm_substream *substream, return status; memset(substream->runtime->dma_area, 0, params_buffer_bytes(params)); - pg = virt_to_page(substream->dma_buffer.area); - if (pg) { - rtd->pg = pg; + if (substream->dma_buffer.area) { + rtd->dma_addr = substream->dma_buffer.addr; rtd->num_pages = (PAGE_ALIGN(size) >> PAGE_SHIFT); config_acp3x_dma(rtd, substream->stream); status = 0; @@ -385,9 +381,11 @@ static snd_pcm_uframes_t acp3x_dma_pointer(struct snd_pcm_substream *substream) static int acp3x_dma_new(struct snd_soc_pcm_runtime *rtd) { + struct snd_soc_component *component = snd_soc_rtdcom_lookup(rtd, + DRV_NAME); + struct device *parent = component->dev->parent; snd_pcm_lib_preallocate_pages_for_all(rtd->pcm, SNDRV_DMA_TYPE_DEV, - rtd->pcm->card->dev, - MIN_BUFFER, MAX_BUFFER); + parent, MIN_BUFFER, MAX_BUFFER); return 0; } diff --git a/sound/soc/codecs/cs42xx8.c b/sound/soc/codecs/cs42xx8.c index 6203f54d9f25..5b049fcdba20 100644 --- a/sound/soc/codecs/cs42xx8.c +++ b/sound/soc/codecs/cs42xx8.c @@ -47,6 +47,7 @@ struct cs42xx8_priv { unsigned long sysclk; u32 tx_channels; struct gpio_desc *gpiod_reset; + u32 rate[2]; }; /* -127.5dB to 0dB with step of 0.5dB */ @@ -176,21 +177,27 @@ static const struct snd_soc_dapm_route cs42xx8_adc3_dapm_routes[] = { }; struct cs42xx8_ratios { - unsigned int ratio; - unsigned char speed; - unsigned char mclk; + unsigned int mfreq; + unsigned int min_mclk; + unsigned int max_mclk; + unsigned int ratio[3]; }; +/* + * According to reference mannual, define the cs42xx8_ratio struct + * MFreq2 | MFreq1 | MFreq0 | Description | SSM | DSM | QSM | + * 0 | 0 | 0 |1.029MHz to 12.8MHz | 256 | 128 | 64 | + * 0 | 0 | 1 |1.536MHz to 19.2MHz | 384 | 192 | 96 | + * 0 | 1 | 0 |2.048MHz to 25.6MHz | 512 | 256 | 128 | + * 0 | 1 | 1 |3.072MHz to 38.4MHz | 768 | 384 | 192 | + * 1 | x | x |4.096MHz to 51.2MHz |1024 | 512 | 256 | + */ static const struct cs42xx8_ratios cs42xx8_ratios[] = { - { 64, CS42XX8_FM_QUAD, CS42XX8_FUNCMOD_MFREQ_256(4) }, - { 96, CS42XX8_FM_QUAD, CS42XX8_FUNCMOD_MFREQ_384(4) }, - { 128, CS42XX8_FM_QUAD, CS42XX8_FUNCMOD_MFREQ_512(4) }, - { 192, CS42XX8_FM_QUAD, CS42XX8_FUNCMOD_MFREQ_768(4) }, - { 256, CS42XX8_FM_SINGLE, CS42XX8_FUNCMOD_MFREQ_256(1) }, - { 384, CS42XX8_FM_SINGLE, CS42XX8_FUNCMOD_MFREQ_384(1) }, - { 512, CS42XX8_FM_SINGLE, CS42XX8_FUNCMOD_MFREQ_512(1) }, - { 768, CS42XX8_FM_SINGLE, CS42XX8_FUNCMOD_MFREQ_768(1) }, - { 1024, CS42XX8_FM_SINGLE, CS42XX8_FUNCMOD_MFREQ_1024(1) } + { 0, 1029000, 12800000, {256, 128, 64} }, + { 2, 1536000, 19200000, {384, 192, 96} }, + { 4, 2048000, 25600000, {512, 256, 128} }, + { 6, 3072000, 38400000, {768, 384, 192} }, + { 8, 4096000, 51200000, {1024, 512, 256} }, }; static int cs42xx8_set_dai_sysclk(struct snd_soc_dai *codec_dai, @@ -257,14 +264,68 @@ static int cs42xx8_hw_params(struct snd_pcm_substream *substream, struct snd_soc_component *component = dai->component; struct cs42xx8_priv *cs42xx8 = snd_soc_component_get_drvdata(component); bool tx = substream->stream == SNDRV_PCM_STREAM_PLAYBACK; - u32 ratio = cs42xx8->sysclk / params_rate(params); - u32 i, fm, val, mask; + u32 ratio[2]; + u32 rate[2]; + u32 fm[2]; + u32 i, val, mask; + bool condition1, condition2; if (tx) cs42xx8->tx_channels = params_channels(params); + rate[tx] = params_rate(params); + rate[!tx] = cs42xx8->rate[!tx]; + + ratio[tx] = rate[tx] > 0 ? cs42xx8->sysclk / rate[tx] : 0; + ratio[!tx] = rate[!tx] > 0 ? cs42xx8->sysclk / rate[!tx] : 0; + + /* Get functional mode for tx and rx according to rate */ + for (i = 0; i < 2; i++) { + if (cs42xx8->slave_mode) { + fm[i] = CS42XX8_FM_AUTO; + } else { + if (rate[i] < 50000) { + fm[i] = CS42XX8_FM_SINGLE; + } else if (rate[i] > 50000 && rate[i] < 100000) { + fm[i] = CS42XX8_FM_DOUBLE; + } else if (rate[i] > 100000 && rate[i] < 200000) { + fm[i] = CS42XX8_FM_QUAD; + } else { + dev_err(component->dev, + "unsupported sample rate\n"); + return -EINVAL; + } + } + } + for (i = 0; i < ARRAY_SIZE(cs42xx8_ratios); i++) { - if (cs42xx8_ratios[i].ratio == ratio) + /* Is the ratio[tx] valid ? */ + condition1 = ((fm[tx] == CS42XX8_FM_AUTO) ? + (cs42xx8_ratios[i].ratio[0] == ratio[tx] || + cs42xx8_ratios[i].ratio[1] == ratio[tx] || + cs42xx8_ratios[i].ratio[2] == ratio[tx]) : + (cs42xx8_ratios[i].ratio[fm[tx]] == ratio[tx])) && + cs42xx8->sysclk >= cs42xx8_ratios[i].min_mclk && + cs42xx8->sysclk <= cs42xx8_ratios[i].max_mclk; + + if (!ratio[tx]) + condition1 = true; + + /* Is the ratio[!tx] valid ? */ + condition2 = ((fm[!tx] == CS42XX8_FM_AUTO) ? + (cs42xx8_ratios[i].ratio[0] == ratio[!tx] || + cs42xx8_ratios[i].ratio[1] == ratio[!tx] || + cs42xx8_ratios[i].ratio[2] == ratio[!tx]) : + (cs42xx8_ratios[i].ratio[fm[!tx]] == ratio[!tx])); + + if (!ratio[!tx]) + condition2 = true; + + /* + * Both ratio[tx] and ratio[!tx] is valid, then we get + * a proper MFreq. + */ + if (condition1 && condition2) break; } @@ -273,15 +334,31 @@ static int cs42xx8_hw_params(struct snd_pcm_substream *substream, return -EINVAL; } - mask = CS42XX8_FUNCMOD_MFREQ_MASK; - val = cs42xx8_ratios[i].mclk; + cs42xx8->rate[tx] = params_rate(params); - fm = cs42xx8->slave_mode ? CS42XX8_FM_AUTO : cs42xx8_ratios[i].speed; + mask = CS42XX8_FUNCMOD_MFREQ_MASK; + val = cs42xx8_ratios[i].mfreq; regmap_update_bits(cs42xx8->regmap, CS42XX8_FUNCMOD, CS42XX8_FUNCMOD_xC_FM_MASK(tx) | mask, - CS42XX8_FUNCMOD_xC_FM(tx, fm) | val); + CS42XX8_FUNCMOD_xC_FM(tx, fm[tx]) | val); + + return 0; +} + +static int cs42xx8_hw_free(struct snd_pcm_substream *substream, + struct snd_soc_dai *dai) +{ + struct snd_soc_component *component = dai->component; + struct cs42xx8_priv *cs42xx8 = snd_soc_component_get_drvdata(component); + bool tx = substream->stream == SNDRV_PCM_STREAM_PLAYBACK; + /* Clear stored rate */ + cs42xx8->rate[tx] = 0; + + regmap_update_bits(cs42xx8->regmap, CS42XX8_FUNCMOD, + CS42XX8_FUNCMOD_xC_FM_MASK(tx), + CS42XX8_FUNCMOD_xC_FM(tx, CS42XX8_FM_AUTO)); return 0; } @@ -302,6 +379,7 @@ static const struct snd_soc_dai_ops cs42xx8_dai_ops = { .set_fmt = cs42xx8_set_dai_fmt, .set_sysclk = cs42xx8_set_dai_sysclk, .hw_params = cs42xx8_hw_params, + .hw_free = cs42xx8_hw_free, .digital_mute = cs42xx8_digital_mute, }; diff --git a/sound/soc/codecs/max98357a.c b/sound/soc/codecs/max98357a.c index 6f0e28f903bf..16313b973eaa 100644 --- a/sound/soc/codecs/max98357a.c +++ b/sound/soc/codecs/max98357a.c @@ -20,20 +20,10 @@ #include <sound/soc-dapm.h> struct max98357a_priv { - struct delayed_work enable_sdmode_work; struct gpio_desc *sdmode; unsigned int sdmode_delay; }; -static void max98357a_enable_sdmode_work(struct work_struct *work) -{ - struct max98357a_priv *max98357a = - container_of(work, struct max98357a_priv, - enable_sdmode_work.work); - - gpiod_set_value(max98357a->sdmode, 1); -} - static int max98357a_daiops_trigger(struct snd_pcm_substream *substream, int cmd, struct snd_soc_dai *dai) { @@ -46,14 +36,12 @@ static int max98357a_daiops_trigger(struct snd_pcm_substream *substream, case SNDRV_PCM_TRIGGER_START: case SNDRV_PCM_TRIGGER_RESUME: case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: - queue_delayed_work(system_power_efficient_wq, - &max98357a->enable_sdmode_work, - msecs_to_jiffies(max98357a->sdmode_delay)); + mdelay(max98357a->sdmode_delay); + gpiod_set_value(max98357a->sdmode, 1); break; case SNDRV_PCM_TRIGGER_STOP: case SNDRV_PCM_TRIGGER_SUSPEND: case SNDRV_PCM_TRIGGER_PAUSE_PUSH: - cancel_delayed_work_sync(&max98357a->enable_sdmode_work); gpiod_set_value(max98357a->sdmode, 0); break; } @@ -112,30 +100,25 @@ static int max98357a_platform_probe(struct platform_device *pdev) int ret; max98357a = devm_kzalloc(&pdev->dev, sizeof(*max98357a), GFP_KERNEL); - if (!max98357a) return -ENOMEM; max98357a->sdmode = devm_gpiod_get_optional(&pdev->dev, "sdmode", GPIOD_OUT_LOW); - if (IS_ERR(max98357a->sdmode)) return PTR_ERR(max98357a->sdmode); ret = device_property_read_u32(&pdev->dev, "sdmode-delay", &max98357a->sdmode_delay); - if (ret) { max98357a->sdmode_delay = 0; dev_dbg(&pdev->dev, - "no optional property 'sdmode-delay' found, default: no delay\n"); + "no optional property 'sdmode-delay' found, " + "default: no delay\n"); } dev_set_drvdata(&pdev->dev, max98357a); - INIT_DELAYED_WORK(&max98357a->enable_sdmode_work, - max98357a_enable_sdmode_work); - return devm_snd_soc_register_component(&pdev->dev, &max98357a_component_driver, &max98357a_dai_driver, 1); diff --git a/sound/soc/codecs/max98373.c b/sound/soc/codecs/max98373.c index 528695cd6a1c..8c601a3ebc27 100644 --- a/sound/soc/codecs/max98373.c +++ b/sound/soc/codecs/max98373.c @@ -267,6 +267,12 @@ static int max98373_dai_hw_params(struct snd_pcm_substream *substream, case 48000: sampling_rate = MAX98373_PCM_SR_SET1_SR_48000; break; + case 88200: + sampling_rate = MAX98373_PCM_SR_SET1_SR_88200; + break; + case 96000: + sampling_rate = MAX98373_PCM_SR_SET1_SR_96000; + break; default: dev_err(component->dev, "rate %d not supported\n", params_rate(params)); diff --git a/sound/soc/codecs/max98373.h b/sound/soc/codecs/max98373.h index f6a37aa02f26..a59e51355a84 100644 --- a/sound/soc/codecs/max98373.h +++ b/sound/soc/codecs/max98373.h @@ -130,6 +130,8 @@ #define MAX98373_PCM_SR_SET1_SR_32000 (0x6 << 0) #define MAX98373_PCM_SR_SET1_SR_44100 (0x7 << 0) #define MAX98373_PCM_SR_SET1_SR_48000 (0x8 << 0) +#define MAX98373_PCM_SR_SET1_SR_88200 (0x9 << 0) +#define MAX98373_PCM_SR_SET1_SR_96000 (0xA << 0) /* MAX98373_R2028_PCM_SR_SETUP_2 */ #define MAX98373_PCM_SR_SET2_SR_MASK (0xF << 4) diff --git a/sound/soc/codecs/pcm3060-i2c.c b/sound/soc/codecs/pcm3060-i2c.c index cdc8314882bc..abcdeb922201 100644 --- a/sound/soc/codecs/pcm3060-i2c.c +++ b/sound/soc/codecs/pcm3060-i2c.c @@ -2,7 +2,7 @@ // // PCM3060 I2C driver // -// Copyright (C) 2018 Kirill Marinushkin <kmarinushkin@birdec.tech> +// Copyright (C) 2018 Kirill Marinushkin <kmarinushkin@birdec.com> #include <linux/i2c.h> #include <linux/module.h> @@ -56,5 +56,5 @@ static struct i2c_driver pcm3060_i2c_driver = { module_i2c_driver(pcm3060_i2c_driver); MODULE_DESCRIPTION("PCM3060 I2C driver"); -MODULE_AUTHOR("Kirill Marinushkin <kmarinushkin@birdec.tech>"); +MODULE_AUTHOR("Kirill Marinushkin <kmarinushkin@birdec.com>"); MODULE_LICENSE("GPL v2"); diff --git a/sound/soc/codecs/pcm3060-spi.c b/sound/soc/codecs/pcm3060-spi.c index f6f19fa80932..3b79734b832b 100644 --- a/sound/soc/codecs/pcm3060-spi.c +++ b/sound/soc/codecs/pcm3060-spi.c @@ -2,7 +2,7 @@ // // PCM3060 SPI driver // -// Copyright (C) 2018 Kirill Marinushkin <kmarinushkin@birdec.tech> +// Copyright (C) 2018 Kirill Marinushkin <kmarinushkin@birdec.com> #include <linux/module.h> #include <linux/spi/spi.h> @@ -55,5 +55,5 @@ static struct spi_driver pcm3060_spi_driver = { module_spi_driver(pcm3060_spi_driver); MODULE_DESCRIPTION("PCM3060 SPI driver"); -MODULE_AUTHOR("Kirill Marinushkin <kmarinushkin@birdec.tech>"); +MODULE_AUTHOR("Kirill Marinushkin <kmarinushkin@birdec.com>"); MODULE_LICENSE("GPL v2"); diff --git a/sound/soc/codecs/pcm3060.c b/sound/soc/codecs/pcm3060.c index 32b26f1c2282..b2358069cf9b 100644 --- a/sound/soc/codecs/pcm3060.c +++ b/sound/soc/codecs/pcm3060.c @@ -2,7 +2,7 @@ // // PCM3060 codec driver // -// Copyright (C) 2018 Kirill Marinushkin <kmarinushkin@birdec.tech> +// Copyright (C) 2018 Kirill Marinushkin <kmarinushkin@birdec.com> #include <linux/module.h> #include <sound/pcm_params.h> @@ -342,5 +342,5 @@ int pcm3060_probe(struct device *dev) EXPORT_SYMBOL(pcm3060_probe); MODULE_DESCRIPTION("PCM3060 codec driver"); -MODULE_AUTHOR("Kirill Marinushkin <kmarinushkin@birdec.tech>"); +MODULE_AUTHOR("Kirill Marinushkin <kmarinushkin@birdec.com>"); MODULE_LICENSE("GPL v2"); diff --git a/sound/soc/codecs/pcm3060.h b/sound/soc/codecs/pcm3060.h index 75931c9a9d85..18d51e5dac2c 100644 --- a/sound/soc/codecs/pcm3060.h +++ b/sound/soc/codecs/pcm3060.h @@ -2,7 +2,7 @@ /* * PCM3060 codec driver * - * Copyright (C) 2018 Kirill Marinushkin <kmarinushkin@birdec.tech> + * Copyright (C) 2018 Kirill Marinushkin <kmarinushkin@birdec.com> */ #ifndef _SND_SOC_PCM3060_H diff --git a/sound/soc/codecs/rt1011.c b/sound/soc/codecs/rt1011.c index 5605b660f4bf..0a6ff13d76e1 100644 --- a/sound/soc/codecs/rt1011.c +++ b/sound/soc/codecs/rt1011.c @@ -39,7 +39,7 @@ static const struct reg_sequence init_list[] = { { RT1011_POWER_9, 0xa840 }, { RT1011_ADC_SET_5, 0x0a20 }, - { RT1011_DAC_SET_2, 0xa232 }, + { RT1011_DAC_SET_2, 0xa032 }, { RT1011_ADC_SET_1, 0x2925 }, { RT1011_SPK_PRO_DC_DET_1, 0xb00c }, @@ -1917,7 +1917,7 @@ static int rt1011_set_bias_level(struct snd_soc_component *component, snd_soc_component_write(component, RT1011_SYSTEM_RESET_2, 0x0000); snd_soc_component_write(component, - RT1011_SYSTEM_RESET_3, 0x0000); + RT1011_SYSTEM_RESET_3, 0x0001); snd_soc_component_write(component, RT1011_SYSTEM_RESET_1, 0x003f); snd_soc_component_write(component, diff --git a/sound/soc/codecs/rt1308.c b/sound/soc/codecs/rt1308.c index d673506c7c39..d673506c7c39 100755..100644 --- a/sound/soc/codecs/rt1308.c +++ b/sound/soc/codecs/rt1308.c diff --git a/sound/soc/codecs/rt1308.h b/sound/soc/codecs/rt1308.h index c330aae1d527..c330aae1d527 100755..100644 --- a/sound/soc/codecs/rt1308.h +++ b/sound/soc/codecs/rt1308.h diff --git a/sound/soc/generic/audio-graph-card.c b/sound/soc/generic/audio-graph-card.c index 30a4e8399ec3..288df245b2f0 100644 --- a/sound/soc/generic/audio-graph-card.c +++ b/sound/soc/generic/audio-graph-card.c @@ -63,6 +63,7 @@ static int graph_get_dai_id(struct device_node *ep) struct device_node *endpoint; struct of_endpoint info; int i, id; + const u32 *reg; int ret; /* use driver specified DAI ID if exist */ @@ -83,8 +84,9 @@ static int graph_get_dai_id(struct device_node *ep) return info.id; node = of_get_parent(ep); + reg = of_get_property(node, "reg", NULL); of_node_put(node); - if (of_get_property(node, "reg", NULL)) + if (reg) return info.port; } node = of_graph_get_port_parent(ep); @@ -208,10 +210,6 @@ static int graph_dai_link_of_dpcm(struct asoc_simple_priv *priv, dev_dbg(dev, "link_of DPCM (%pOF)\n", ep); - of_node_put(ports); - of_node_put(port); - of_node_put(node); - if (li->cpu) { int is_single_links = 0; @@ -229,17 +227,17 @@ static int graph_dai_link_of_dpcm(struct asoc_simple_priv *priv, ret = asoc_simple_parse_cpu(ep, dai_link, &is_single_links); if (ret) - return ret; + goto out_put_node; ret = asoc_simple_parse_clk_cpu(dev, ep, dai_link, dai); if (ret < 0) - return ret; + goto out_put_node; ret = asoc_simple_set_dailink_name(dev, dai_link, "fe.%s", cpus->dai_name); if (ret < 0) - return ret; + goto out_put_node; /* card->num_links includes Codec */ asoc_simple_canonicalize_cpu(dai_link, is_single_links); @@ -263,17 +261,17 @@ static int graph_dai_link_of_dpcm(struct asoc_simple_priv *priv, ret = asoc_simple_parse_codec(ep, dai_link); if (ret < 0) - return ret; + goto out_put_node; ret = asoc_simple_parse_clk_codec(dev, ep, dai_link, dai); if (ret < 0) - return ret; + goto out_put_node; ret = asoc_simple_set_dailink_name(dev, dai_link, "be.%s", codecs->dai_name); if (ret < 0) - return ret; + goto out_put_node; /* check "prefix" from top node */ snd_soc_of_parse_node_prefix(top, cconf, codecs->of_node, @@ -293,19 +291,23 @@ static int graph_dai_link_of_dpcm(struct asoc_simple_priv *priv, ret = asoc_simple_parse_tdm(ep, dai); if (ret) - return ret; + goto out_put_node; ret = asoc_simple_parse_daifmt(dev, cpu_ep, codec_ep, NULL, &dai_link->dai_fmt); if (ret < 0) - return ret; + goto out_put_node; dai_link->dpcm_playback = 1; dai_link->dpcm_capture = 1; dai_link->ops = &graph_ops; dai_link->init = asoc_simple_dai_init; - return 0; +out_put_node: + of_node_put(ports); + of_node_put(port); + of_node_put(node); + return ret; } static int graph_dai_link_of(struct asoc_simple_priv *priv, diff --git a/sound/soc/generic/simple-card-utils.c b/sound/soc/generic/simple-card-utils.c index ac8678fe55ff..556b1a789629 100644 --- a/sound/soc/generic/simple-card-utils.c +++ b/sound/soc/generic/simple-card-utils.c @@ -349,6 +349,13 @@ void asoc_simple_canonicalize_platform(struct snd_soc_dai_link *dai_link) /* Assumes platform == cpu */ if (!dai_link->platforms->of_node) dai_link->platforms->of_node = dai_link->cpus->of_node; + + /* + * DPCM BE can be no platform. + * Alloced memory will be waste, but not leak. + */ + if (!dai_link->platforms->of_node) + dai_link->num_platforms = 0; } EXPORT_SYMBOL_GPL(asoc_simple_canonicalize_platform); diff --git a/sound/soc/generic/simple-card.c b/sound/soc/generic/simple-card.c index e5cde0d5e63c..ef849151ba56 100644 --- a/sound/soc/generic/simple-card.c +++ b/sound/soc/generic/simple-card.c @@ -124,8 +124,6 @@ static int simple_dai_link_of_dpcm(struct asoc_simple_priv *priv, li->link++; - of_node_put(node); - /* For single DAI link & old style of DT node */ if (is_top) prefix = PREFIX; @@ -147,17 +145,17 @@ static int simple_dai_link_of_dpcm(struct asoc_simple_priv *priv, ret = asoc_simple_parse_cpu(np, dai_link, &is_single_links); if (ret) - return ret; + goto out_put_node; ret = asoc_simple_parse_clk_cpu(dev, np, dai_link, dai); if (ret < 0) - return ret; + goto out_put_node; ret = asoc_simple_set_dailink_name(dev, dai_link, "fe.%s", cpus->dai_name); if (ret < 0) - return ret; + goto out_put_node; asoc_simple_canonicalize_cpu(dai_link, is_single_links); } else { @@ -180,17 +178,17 @@ static int simple_dai_link_of_dpcm(struct asoc_simple_priv *priv, ret = asoc_simple_parse_codec(np, dai_link); if (ret < 0) - return ret; + goto out_put_node; ret = asoc_simple_parse_clk_codec(dev, np, dai_link, dai); if (ret < 0) - return ret; + goto out_put_node; ret = asoc_simple_set_dailink_name(dev, dai_link, "be.%s", codecs->dai_name); if (ret < 0) - return ret; + goto out_put_node; /* check "prefix" from top node */ snd_soc_of_parse_node_prefix(top, cconf, codecs->of_node, @@ -208,19 +206,21 @@ static int simple_dai_link_of_dpcm(struct asoc_simple_priv *priv, ret = asoc_simple_parse_tdm(np, dai); if (ret) - return ret; + goto out_put_node; ret = asoc_simple_parse_daifmt(dev, node, codec, prefix, &dai_link->dai_fmt); if (ret < 0) - return ret; + goto out_put_node; dai_link->dpcm_playback = 1; dai_link->dpcm_capture = 1; dai_link->ops = &simple_ops; dai_link->init = asoc_simple_dai_init; - return 0; +out_put_node: + of_node_put(node); + return ret; } static int simple_dai_link_of(struct asoc_simple_priv *priv, @@ -364,8 +364,6 @@ static int simple_for_each_link(struct asoc_simple_priv *priv, goto error; } - of_node_put(codec); - /* get convert-xxx property */ memset(&adata, 0, sizeof(adata)); for_each_child_of_node(node, np) @@ -387,11 +385,13 @@ static int simple_for_each_link(struct asoc_simple_priv *priv, ret = func_noml(priv, np, codec, li, is_top); if (ret < 0) { + of_node_put(codec); of_node_put(np); goto error; } } + of_node_put(codec); node = of_get_next_child(top, node); } while (!is_top && node); diff --git a/sound/soc/intel/boards/bytcht_es8316.c b/sound/soc/intel/boards/bytcht_es8316.c index fac09be3cade..46612331f5ea 100644 --- a/sound/soc/intel/boards/bytcht_es8316.c +++ b/sound/soc/intel/boards/bytcht_es8316.c @@ -437,6 +437,14 @@ static const struct acpi_gpio_mapping byt_cht_es8316_gpios[] = { /* Please keep this list alphabetically sorted */ static const struct dmi_system_id byt_cht_es8316_quirk_table[] = { + { /* Irbis NB41 */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "IRBIS"), + DMI_MATCH(DMI_PRODUCT_NAME, "NB41"), + }, + .driver_data = (void *)(BYT_CHT_ES8316_INTMIC_IN2_MAP + | BYT_CHT_ES8316_JD_INVERTED), + }, { /* Teclast X98 Plus II */ .matches = { DMI_MATCH(DMI_SYS_VENDOR, "TECLAST"), diff --git a/sound/soc/intel/common/soc-acpi-intel-bxt-match.c b/sound/soc/intel/common/soc-acpi-intel-bxt-match.c index 229e39586868..4a5adae1d785 100644 --- a/sound/soc/intel/common/soc-acpi-intel-bxt-match.c +++ b/sound/soc/intel/common/soc-acpi-intel-bxt-match.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* - * soc-apci-intel-bxt-match.c - tables and support for BXT ACPI enumeration. + * soc-acpi-intel-bxt-match.c - tables and support for BXT ACPI enumeration. * * Copyright (c) 2018, Intel Corporation. * diff --git a/sound/soc/intel/common/soc-acpi-intel-byt-match.c b/sound/soc/intel/common/soc-acpi-intel-byt-match.c index b94b482ac34f..1cc801ba92eb 100644 --- a/sound/soc/intel/common/soc-acpi-intel-byt-match.c +++ b/sound/soc/intel/common/soc-acpi-intel-byt-match.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * soc-apci-intel-byt-match.c - tables and support for BYT ACPI enumeration. + * soc-acpi-intel-byt-match.c - tables and support for BYT ACPI enumeration. * * Copyright (c) 2017, Intel Corporation. */ diff --git a/sound/soc/intel/common/soc-acpi-intel-cht-match.c b/sound/soc/intel/common/soc-acpi-intel-cht-match.c index b7f11f6be1cf..d0fb43c2b9f6 100644 --- a/sound/soc/intel/common/soc-acpi-intel-cht-match.c +++ b/sound/soc/intel/common/soc-acpi-intel-cht-match.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * soc-apci-intel-cht-match.c - tables and support for CHT ACPI enumeration. + * soc-acpi-intel-cht-match.c - tables and support for CHT ACPI enumeration. * * Copyright (c) 2017, Intel Corporation. */ diff --git a/sound/soc/intel/common/soc-acpi-intel-cnl-match.c b/sound/soc/intel/common/soc-acpi-intel-cnl-match.c index c36c0aa4f683..771b0ef21051 100644 --- a/sound/soc/intel/common/soc-acpi-intel-cnl-match.c +++ b/sound/soc/intel/common/soc-acpi-intel-cnl-match.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* - * soc-apci-intel-cnl-match.c - tables and support for CNL ACPI enumeration. + * soc-acpi-intel-cnl-match.c - tables and support for CNL ACPI enumeration. * * Copyright (c) 2018, Intel Corporation. * diff --git a/sound/soc/intel/common/soc-acpi-intel-glk-match.c b/sound/soc/intel/common/soc-acpi-intel-glk-match.c index 616eb09e78a0..60dea358fa04 100644 --- a/sound/soc/intel/common/soc-acpi-intel-glk-match.c +++ b/sound/soc/intel/common/soc-acpi-intel-glk-match.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* - * soc-apci-intel-glk-match.c - tables and support for GLK ACPI enumeration. + * soc-acpi-intel-glk-match.c - tables and support for GLK ACPI enumeration. * * Copyright (c) 2018, Intel Corporation. * diff --git a/sound/soc/intel/common/soc-acpi-intel-hda-match.c b/sound/soc/intel/common/soc-acpi-intel-hda-match.c index 68ae43f7b4b2..cc972d2ac691 100644 --- a/sound/soc/intel/common/soc-acpi-intel-hda-match.c +++ b/sound/soc/intel/common/soc-acpi-intel-hda-match.c @@ -2,7 +2,7 @@ // Copyright (c) 2018, Intel Corporation. /* - * soc-apci-intel-hda-match.c - tables and support for HDA+ACPI enumeration. + * soc-acpi-intel-hda-match.c - tables and support for HDA+ACPI enumeration. * */ diff --git a/sound/soc/intel/common/soc-acpi-intel-hsw-bdw-match.c b/sound/soc/intel/common/soc-acpi-intel-hsw-bdw-match.c index d27853e7a369..34eb0baaa951 100644 --- a/sound/soc/intel/common/soc-acpi-intel-hsw-bdw-match.c +++ b/sound/soc/intel/common/soc-acpi-intel-hsw-bdw-match.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * soc-apci-intel-hsw-bdw-match.c - tables and support for ACPI enumeration. + * soc-acpi-intel-hsw-bdw-match.c - tables and support for ACPI enumeration. * * Copyright (c) 2017, Intel Corporation. */ diff --git a/sound/soc/intel/common/soc-acpi-intel-icl-match.c b/sound/soc/intel/common/soc-acpi-intel-icl-match.c index 0b430b9b3673..38977669b576 100644 --- a/sound/soc/intel/common/soc-acpi-intel-icl-match.c +++ b/sound/soc/intel/common/soc-acpi-intel-icl-match.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* - * soc-apci-intel-icl-match.c - tables and support for ICL ACPI enumeration. + * soc-acpi-intel-icl-match.c - tables and support for ICL ACPI enumeration. * * Copyright (c) 2018, Intel Corporation. * diff --git a/sound/soc/intel/common/soc-acpi-intel-kbl-match.c b/sound/soc/intel/common/soc-acpi-intel-kbl-match.c index 4b331058e807..e200baa11011 100644 --- a/sound/soc/intel/common/soc-acpi-intel-kbl-match.c +++ b/sound/soc/intel/common/soc-acpi-intel-kbl-match.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* - * soc-apci-intel-kbl-match.c - tables and support for KBL ACPI enumeration. + * soc-acpi-intel-kbl-match.c - tables and support for KBL ACPI enumeration. * * Copyright (c) 2018, Intel Corporation. * diff --git a/sound/soc/intel/common/soc-acpi-intel-skl-match.c b/sound/soc/intel/common/soc-acpi-intel-skl-match.c index 0c9c0edd35b3..42fa40a8d932 100644 --- a/sound/soc/intel/common/soc-acpi-intel-skl-match.c +++ b/sound/soc/intel/common/soc-acpi-intel-skl-match.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* - * soc-apci-intel-skl-match.c - tables and support for SKL ACPI enumeration. + * soc-acpi-intel-skl-match.c - tables and support for SKL ACPI enumeration. * * Copyright (c) 2018, Intel Corporation. * diff --git a/sound/soc/qcom/apq8016_sbc.c b/sound/soc/qcom/apq8016_sbc.c index f60a71990f66..ac75838bbfab 100644 --- a/sound/soc/qcom/apq8016_sbc.c +++ b/sound/soc/qcom/apq8016_sbc.c @@ -150,17 +150,17 @@ static struct apq8016_sbc_data *apq8016_sbc_parse_of(struct snd_soc_card *card) link = data->dai_link; - dlc = devm_kzalloc(dev, 2 * sizeof(*dlc), GFP_KERNEL); - if (!dlc) - return ERR_PTR(-ENOMEM); + for_each_child_of_node(node, np) { + dlc = devm_kzalloc(dev, 2 * sizeof(*dlc), GFP_KERNEL); + if (!dlc) + return ERR_PTR(-ENOMEM); - link->cpus = &dlc[0]; - link->platforms = &dlc[1]; + link->cpus = &dlc[0]; + link->platforms = &dlc[1]; - link->num_cpus = 1; - link->num_platforms = 1; + link->num_cpus = 1; + link->num_platforms = 1; - for_each_child_of_node(node, np) { cpu = of_get_child_by_name(np, "cpu"); codec = of_get_child_by_name(np, "codec"); diff --git a/sound/soc/rockchip/rockchip_i2s.c b/sound/soc/rockchip/rockchip_i2s.c index 0a34d0eb8dba..88ebaf6e1880 100644 --- a/sound/soc/rockchip/rockchip_i2s.c +++ b/sound/soc/rockchip/rockchip_i2s.c @@ -326,7 +326,6 @@ static int rockchip_i2s_hw_params(struct snd_pcm_substream *substream, val |= I2S_CHN_4; break; case 2: - case 1: val |= I2S_CHN_2; break; default: @@ -459,7 +458,7 @@ static struct snd_soc_dai_driver rockchip_i2s_dai = { }, .capture = { .stream_name = "Capture", - .channels_min = 1, + .channels_min = 2, .channels_max = 2, .rates = SNDRV_PCM_RATE_8000_192000, .formats = (SNDRV_PCM_FMTBIT_S8 | @@ -659,7 +658,7 @@ static int rockchip_i2s_probe(struct platform_device *pdev) } if (!of_property_read_u32(node, "rockchip,capture-channels", &val)) { - if (val >= 1 && val <= 8) + if (val >= 2 && val <= 8) soc_dai->capture.channels_max = val; } diff --git a/sound/soc/rockchip/rockchip_max98090.c b/sound/soc/rockchip/rockchip_max98090.c index c5fc24675a33..782e534d4c0d 100644 --- a/sound/soc/rockchip/rockchip_max98090.c +++ b/sound/soc/rockchip/rockchip_max98090.c @@ -61,6 +61,37 @@ static const struct snd_kcontrol_new rk_mc_controls[] = { SOC_DAPM_PIN_SWITCH("Speaker"), }; +static int rk_jack_event(struct notifier_block *nb, unsigned long event, + void *data) +{ + struct snd_soc_jack *jack = (struct snd_soc_jack *)data; + struct snd_soc_dapm_context *dapm = &jack->card->dapm; + + if (event & SND_JACK_MICROPHONE) + snd_soc_dapm_force_enable_pin(dapm, "MICBIAS"); + else + snd_soc_dapm_disable_pin(dapm, "MICBIAS"); + + snd_soc_dapm_sync(dapm); + + return 0; +} + +static struct notifier_block rk_jack_nb = { + .notifier_call = rk_jack_event, +}; + +static int rk_init(struct snd_soc_pcm_runtime *runtime) +{ + /* + * The jack has already been created in the rk_98090_headset_init() + * function. + */ + snd_soc_jack_notifier_register(&headset_jack, &rk_jack_nb); + + return 0; +} + static int rk_aif1_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params) { @@ -119,6 +150,7 @@ SND_SOC_DAILINK_DEFS(hifi, static struct snd_soc_dai_link rk_dailink = { .name = "max98090", .stream_name = "Audio", + .init = rk_init, .ops = &rk_aif1_ops, /* set max98090 as slave */ .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF | diff --git a/sound/soc/samsung/odroid.c b/sound/soc/samsung/odroid.c index dfb6e460e7eb..f0f5fa9c27d3 100644 --- a/sound/soc/samsung/odroid.c +++ b/sound/soc/samsung/odroid.c @@ -284,9 +284,8 @@ static int odroid_audio_probe(struct platform_device *pdev) } of_node_put(cpu); - of_node_put(codec); if (ret < 0) - return ret; + goto err_put_node; ret = snd_soc_of_get_dai_link_codecs(dev, codec, codec_link); if (ret < 0) @@ -309,7 +308,6 @@ static int odroid_audio_probe(struct platform_device *pdev) ret = PTR_ERR(priv->clk_i2s_bus); goto err_put_sclk; } - of_node_put(cpu_dai); ret = devm_snd_soc_register_card(dev, card); if (ret < 0) { @@ -317,6 +315,8 @@ static int odroid_audio_probe(struct platform_device *pdev) goto err_put_clk_i2s; } + of_node_put(cpu_dai); + of_node_put(codec); return 0; err_put_clk_i2s: @@ -326,6 +326,8 @@ err_put_sclk: err_put_cpu_dai: of_node_put(cpu_dai); snd_soc_of_put_dai_link_codecs(codec_link); +err_put_node: + of_node_put(codec); return ret; } diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c index fd6eaae6c0ed..44f899b970c2 100644 --- a/sound/soc/soc-core.c +++ b/sound/soc/soc-core.c @@ -1515,8 +1515,11 @@ static int soc_probe_link_dais(struct snd_soc_card *card, } } - if (dai_link->dai_fmt) - snd_soc_runtime_set_dai_fmt(rtd, dai_link->dai_fmt); + if (dai_link->dai_fmt) { + ret = snd_soc_runtime_set_dai_fmt(rtd, dai_link->dai_fmt); + if (ret) + return ret; + } ret = soc_post_component_init(rtd, dai_link->name); if (ret) diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c index f013b24c050a..2790c00735f3 100644 --- a/sound/soc/soc-dapm.c +++ b/sound/soc/soc-dapm.c @@ -1157,8 +1157,8 @@ static __always_inline int is_connected_ep(struct snd_soc_dapm_widget *widget, list_add_tail(&widget->work_list, list); if (custom_stop_condition && custom_stop_condition(widget, dir)) { - widget->endpoints[dir] = 1; - return widget->endpoints[dir]; + list = NULL; + custom_stop_condition = NULL; } if ((widget->is_ep & SND_SOC_DAPM_DIR_TO_EP(dir)) && widget->connected) { @@ -1195,8 +1195,8 @@ static __always_inline int is_connected_ep(struct snd_soc_dapm_widget *widget, * * Optionally, can be supplied with a function acting as a stopping condition. * This function takes the dapm widget currently being examined and the walk - * direction as an arguments, it should return true if the walk should be - * stopped and false otherwise. + * direction as an arguments, it should return true if widgets from that point + * in the graph onwards should not be added to the widget list. */ static int is_connected_output_ep(struct snd_soc_dapm_widget *widget, struct list_head *list, @@ -3706,6 +3706,8 @@ request_failed: dev_err(dapm->dev, "ASoC: Failed to request %s: %d\n", w->name, ret); + kfree_const(w->sname); + kfree(w); return ERR_PTR(ret); } diff --git a/sound/soc/sof/intel/cnl.c b/sound/soc/sof/intel/cnl.c index f2b392998f20..ffd8d4394537 100644 --- a/sound/soc/sof/intel/cnl.c +++ b/sound/soc/sof/intel/cnl.c @@ -101,8 +101,8 @@ static irqreturn_t cnl_ipc_irq_thread(int irq, void *context) /* * This interrupt is not shared so no need to return IRQ_NONE. */ - dev_err_ratelimited(sdev->dev, - "error: nothing to do in IRQ thread\n"); + dev_dbg_ratelimited(sdev->dev, + "nothing to do in IPC IRQ thread\n"); } /* re-enable IPC interrupt */ diff --git a/sound/soc/sof/intel/hda-ipc.c b/sound/soc/sof/intel/hda-ipc.c index 50244b82600c..2ecba91f5219 100644 --- a/sound/soc/sof/intel/hda-ipc.c +++ b/sound/soc/sof/intel/hda-ipc.c @@ -224,8 +224,8 @@ irqreturn_t hda_dsp_ipc_irq_thread(int irq, void *context) /* * This interrupt is not shared so no need to return IRQ_NONE. */ - dev_err_ratelimited(sdev->dev, - "error: nothing to do in IRQ thread\n"); + dev_dbg_ratelimited(sdev->dev, + "nothing to do in IPC IRQ thread\n"); } /* re-enable IPC interrupt */ diff --git a/sound/soc/sunxi/sun4i-i2s.c b/sound/soc/sunxi/sun4i-i2s.c index 9b2232908b65..7fa5c61169db 100644 --- a/sound/soc/sunxi/sun4i-i2s.c +++ b/sound/soc/sunxi/sun4i-i2s.c @@ -1002,8 +1002,8 @@ static const struct sun4i_i2s_quirks sun50i_a64_codec_i2s_quirks = { .field_rxchanmap = REG_FIELD(SUN4I_I2S_RX_CHAN_MAP_REG, 0, 31), .field_txchansel = REG_FIELD(SUN4I_I2S_TX_CHAN_SEL_REG, 0, 2), .field_rxchansel = REG_FIELD(SUN4I_I2S_RX_CHAN_SEL_REG, 0, 2), - .get_sr = sun8i_i2s_get_sr_wss, - .get_wss = sun8i_i2s_get_sr_wss, + .get_sr = sun4i_i2s_get_sr, + .get_wss = sun4i_i2s_get_wss, }; static int sun4i_i2s_init_regmap_fields(struct device *dev, diff --git a/sound/soc/ti/davinci-mcasp.c b/sound/soc/ti/davinci-mcasp.c index ac59b509ead5..bc7bf15ed7a4 100644 --- a/sound/soc/ti/davinci-mcasp.c +++ b/sound/soc/ti/davinci-mcasp.c @@ -195,7 +195,7 @@ static inline void mcasp_set_axr_pdir(struct davinci_mcasp *mcasp, bool enable) { u32 bit; - for_each_set_bit(bit, &mcasp->pdir, PIN_BIT_AFSR) { + for_each_set_bit(bit, &mcasp->pdir, PIN_BIT_AMUTE) { if (enable) mcasp_set_bits(mcasp, DAVINCI_MCASP_PDIR_REG, BIT(bit)); else @@ -223,6 +223,7 @@ static void mcasp_start_rx(struct davinci_mcasp *mcasp) if (mcasp_is_synchronous(mcasp)) { mcasp_set_ctl_reg(mcasp, DAVINCI_MCASP_GBLCTLX_REG, TXHCLKRST); mcasp_set_ctl_reg(mcasp, DAVINCI_MCASP_GBLCTLX_REG, TXCLKRST); + mcasp_set_clk_pdir(mcasp, true); } /* Activate serializer(s) */ @@ -1256,6 +1257,28 @@ static int davinci_mcasp_trigger(struct snd_pcm_substream *substream, return ret; } +static int davinci_mcasp_hw_rule_slot_width(struct snd_pcm_hw_params *params, + struct snd_pcm_hw_rule *rule) +{ + struct davinci_mcasp_ruledata *rd = rule->private; + struct snd_mask *fmt = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT); + struct snd_mask nfmt; + int i, slot_width; + + snd_mask_none(&nfmt); + slot_width = rd->mcasp->slot_width; + + for (i = 0; i <= SNDRV_PCM_FORMAT_LAST; i++) { + if (snd_mask_test(fmt, i)) { + if (snd_pcm_format_width(i) <= slot_width) { + snd_mask_set(&nfmt, i); + } + } + } + + return snd_mask_refine(fmt, &nfmt); +} + static const unsigned int davinci_mcasp_dai_rates[] = { 8000, 11025, 16000, 22050, 32000, 44100, 48000, 64000, 88200, 96000, 176400, 192000, @@ -1377,7 +1400,7 @@ static int davinci_mcasp_startup(struct snd_pcm_substream *substream, struct davinci_mcasp_ruledata *ruledata = &mcasp->ruledata[substream->stream]; u32 max_channels = 0; - int i, dir; + int i, dir, ret; int tdm_slots = mcasp->tdm_slots; /* Do not allow more then one stream per direction */ @@ -1406,6 +1429,7 @@ static int davinci_mcasp_startup(struct snd_pcm_substream *substream, max_channels++; } ruledata->serializers = max_channels; + ruledata->mcasp = mcasp; max_channels *= tdm_slots; /* * If the already active stream has less channels than the calculated @@ -1431,20 +1455,22 @@ static int davinci_mcasp_startup(struct snd_pcm_substream *substream, 0, SNDRV_PCM_HW_PARAM_CHANNELS, &mcasp->chconstr[substream->stream]); - if (mcasp->slot_width) - snd_pcm_hw_constraint_minmax(substream->runtime, - SNDRV_PCM_HW_PARAM_SAMPLE_BITS, - 8, mcasp->slot_width); + if (mcasp->slot_width) { + /* Only allow formats require <= slot_width bits on the bus */ + ret = snd_pcm_hw_rule_add(substream->runtime, 0, + SNDRV_PCM_HW_PARAM_FORMAT, + davinci_mcasp_hw_rule_slot_width, + ruledata, + SNDRV_PCM_HW_PARAM_FORMAT, -1); + if (ret) + return ret; + } /* * If we rely on implicit BCLK divider setting we should * set constraints based on what we can provide. */ if (mcasp->bclk_master && mcasp->bclk_div == 0 && mcasp->sysclk_freq) { - int ret; - - ruledata->mcasp = mcasp; - ret = snd_pcm_hw_rule_add(substream->runtime, 0, SNDRV_PCM_HW_PARAM_RATE, davinci_mcasp_hw_rule_rate, diff --git a/sound/sound_core.c b/sound/sound_core.c index b730d97c4de6..90d118cd9164 100644 --- a/sound/sound_core.c +++ b/sound/sound_core.c @@ -275,7 +275,8 @@ retry: goto retry; } spin_unlock(&sound_loader_lock); - return -EBUSY; + r = -EBUSY; + goto fail; } } diff --git a/sound/usb/hiface/pcm.c b/sound/usb/hiface/pcm.c index 14fc1e1d5d13..c406497c5919 100644 --- a/sound/usb/hiface/pcm.c +++ b/sound/usb/hiface/pcm.c @@ -600,14 +600,13 @@ int hiface_pcm_init(struct hiface_chip *chip, u8 extra_freq) ret = hiface_pcm_init_urb(&rt->out_urbs[i], chip, OUT_EP, hiface_pcm_out_urb_handler); if (ret < 0) - return ret; + goto error; } ret = snd_pcm_new(chip->card, "USB-SPDIF Audio", 0, 1, 0, &pcm); if (ret < 0) { - kfree(rt); dev_err(&chip->dev->dev, "Cannot create pcm instance\n"); - return ret; + goto error; } pcm->private_data = rt; @@ -620,4 +619,10 @@ int hiface_pcm_init(struct hiface_chip *chip, u8 extra_freq) chip->pcm = rt; return 0; + +error: + for (i = 0; i < PCM_N_URBS; i++) + kfree(rt->out_urbs[i].buffer); + kfree(rt); + return ret; } diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c index 7498b5191b68..b5927c3d5bc0 100644 --- a/sound/usb/mixer.c +++ b/sound/usb/mixer.c @@ -68,6 +68,7 @@ struct mixer_build { unsigned char *buffer; unsigned int buflen; DECLARE_BITMAP(unitbitmap, MAX_ID_ELEMS); + DECLARE_BITMAP(termbitmap, MAX_ID_ELEMS); struct usb_audio_term oterm; const struct usbmix_name_map *map; const struct usbmix_selector_map *selector_map; @@ -744,6 +745,8 @@ static int uac_mixer_unit_get_channels(struct mixer_build *state, return -EINVAL; if (!desc->bNrInPins) return -EINVAL; + if (desc->bLength < sizeof(*desc) + desc->bNrInPins) + return -EINVAL; switch (state->mixer->protocol) { case UAC_VERSION_1: @@ -773,16 +776,25 @@ static int uac_mixer_unit_get_channels(struct mixer_build *state, * parse the source unit recursively until it reaches to a terminal * or a branched unit. */ -static int check_input_term(struct mixer_build *state, int id, +static int __check_input_term(struct mixer_build *state, int id, struct usb_audio_term *term) { int protocol = state->mixer->protocol; int err; void *p1; + unsigned char *hdr; memset(term, 0, sizeof(*term)); - while ((p1 = find_audio_control_unit(state, id)) != NULL) { - unsigned char *hdr = p1; + for (;;) { + /* a loop in the terminal chain? */ + if (test_and_set_bit(id, state->termbitmap)) + return -EINVAL; + + p1 = find_audio_control_unit(state, id); + if (!p1) + break; + + hdr = p1; term->id = id; if (protocol == UAC_VERSION_1 || protocol == UAC_VERSION_2) { @@ -800,7 +812,7 @@ static int check_input_term(struct mixer_build *state, int id, /* call recursively to verify that the * referenced clock entity is valid */ - err = check_input_term(state, d->bCSourceID, term); + err = __check_input_term(state, d->bCSourceID, term); if (err < 0) return err; @@ -834,7 +846,7 @@ static int check_input_term(struct mixer_build *state, int id, case UAC2_CLOCK_SELECTOR: { struct uac_selector_unit_descriptor *d = p1; /* call recursively to retrieve the channel info */ - err = check_input_term(state, d->baSourceID[0], term); + err = __check_input_term(state, d->baSourceID[0], term); if (err < 0) return err; term->type = UAC3_SELECTOR_UNIT << 16; /* virtual type */ @@ -897,7 +909,7 @@ static int check_input_term(struct mixer_build *state, int id, /* call recursively to verify that the * referenced clock entity is valid */ - err = check_input_term(state, d->bCSourceID, term); + err = __check_input_term(state, d->bCSourceID, term); if (err < 0) return err; @@ -948,7 +960,7 @@ static int check_input_term(struct mixer_build *state, int id, case UAC3_CLOCK_SELECTOR: { struct uac_selector_unit_descriptor *d = p1; /* call recursively to retrieve the channel info */ - err = check_input_term(state, d->baSourceID[0], term); + err = __check_input_term(state, d->baSourceID[0], term); if (err < 0) return err; term->type = UAC3_SELECTOR_UNIT << 16; /* virtual type */ @@ -964,7 +976,7 @@ static int check_input_term(struct mixer_build *state, int id, return -EINVAL; /* call recursively to retrieve the channel info */ - err = check_input_term(state, d->baSourceID[0], term); + err = __check_input_term(state, d->baSourceID[0], term); if (err < 0) return err; @@ -982,6 +994,15 @@ static int check_input_term(struct mixer_build *state, int id, return -ENODEV; } + +static int check_input_term(struct mixer_build *state, int id, + struct usb_audio_term *term) +{ + memset(term, 0, sizeof(*term)); + memset(state->termbitmap, 0, sizeof(state->termbitmap)); + return __check_input_term(state, id, term); +} + /* * Feature Unit */ diff --git a/sound/usb/stream.c b/sound/usb/stream.c index 7ee9d17d0143..e852c7fd6109 100644 --- a/sound/usb/stream.c +++ b/sound/usb/stream.c @@ -1043,6 +1043,7 @@ found_clock: pd = kzalloc(sizeof(*pd), GFP_KERNEL); if (!pd) { + kfree(fp->chmap); kfree(fp->rate_table); kfree(fp); return NULL; diff --git a/tools/bpf/bpftool/common.c b/tools/bpf/bpftool/common.c index 5215e0870bcb..6a71324be628 100644 --- a/tools/bpf/bpftool/common.c +++ b/tools/bpf/bpftool/common.c @@ -204,7 +204,11 @@ int do_pin_fd(int fd, const char *name) if (err) return err; - return bpf_obj_pin(fd, name); + err = bpf_obj_pin(fd, name); + if (err) + p_err("can't pin the object (%s): %s", name, strerror(errno)); + + return err; } int do_pin_any(int argc, char **argv, int (*get_fd_by_id)(__u32)) @@ -237,7 +241,7 @@ int do_pin_any(int argc, char **argv, int (*get_fd_by_id)(__u32)) fd = get_fd_by_id(id); if (fd < 0) { - p_err("can't get prog by id (%u): %s", id, strerror(errno)); + p_err("can't open object by id (%u): %s", id, strerror(errno)); return -1; } diff --git a/tools/hv/hv_get_dhcp_info.sh b/tools/hv/hv_get_dhcp_info.sh index c38686c44656..2f2a3c7df3de 100755 --- a/tools/hv/hv_get_dhcp_info.sh +++ b/tools/hv/hv_get_dhcp_info.sh @@ -13,7 +13,7 @@ # the script prints the string "Disabled" to stdout. # # Each Distro is expected to implement this script in a distro specific -# fashion. For instance on Distros that ship with Network Manager enabled, +# fashion. For instance, on Distros that ship with Network Manager enabled, # this script can be based on the Network Manager APIs for retrieving DHCP # information. diff --git a/tools/hv/hv_kvp_daemon.c b/tools/hv/hv_kvp_daemon.c index d7e06fe0270e..f5597503c771 100644 --- a/tools/hv/hv_kvp_daemon.c +++ b/tools/hv/hv_kvp_daemon.c @@ -700,7 +700,7 @@ static void kvp_get_ipconfig_info(char *if_name, /* - * Gather the DNS state. + * Gather the DNS state. * Since there is no standard way to get this information * across various distributions of interest; we just invoke * an external script that needs to be ported across distros @@ -1051,7 +1051,7 @@ static int parse_ip_val_buffer(char *in_buf, int *offset, char *start; /* - * in_buf has sequence of characters that are seperated by + * in_buf has sequence of characters that are separated by * the character ';'. The last sequence does not have the * terminating ";" character. */ @@ -1386,6 +1386,8 @@ int main(int argc, char *argv[]) daemonize = 0; break; case 'h': + print_usage(argv); + exit(0); default: print_usage(argv); exit(EXIT_FAILURE); @@ -1490,7 +1492,7 @@ int main(int argc, char *argv[]) case KVP_OP_GET_IP_INFO: kvp_ip_val = &hv_msg->body.kvp_ip_val; - error = kvp_mac_to_ip(kvp_ip_val); + error = kvp_mac_to_ip(kvp_ip_val); if (error) hv_msg->error = error; diff --git a/tools/hv/hv_set_ifconfig.sh b/tools/hv/hv_set_ifconfig.sh index 7ed9f85ef908..d10fe35b7f25 100755 --- a/tools/hv/hv_set_ifconfig.sh +++ b/tools/hv/hv_set_ifconfig.sh @@ -12,7 +12,7 @@ # be used to configure the interface. # # Each Distro is expected to implement this script in a distro specific -# fashion. For instance on Distros that ship with Network Manager enabled, +# fashion. For instance, on Distros that ship with Network Manager enabled, # this script can be based on the Network Manager APIs for configuring the # interface. # diff --git a/tools/hv/hv_vss_daemon.c b/tools/hv/hv_vss_daemon.c index efe1e34dd91b..92902a88f671 100644 --- a/tools/hv/hv_vss_daemon.c +++ b/tools/hv/hv_vss_daemon.c @@ -42,7 +42,7 @@ static int vss_do_freeze(char *dir, unsigned int cmd) * If a partition is mounted more than once, only the first * FREEZE/THAW can succeed and the later ones will get * EBUSY/EINVAL respectively: there could be 2 cases: - * 1) a user may mount the same partition to differnt directories + * 1) a user may mount the same partition to different directories * by mistake or on purpose; * 2) The subvolume of btrfs appears to have the same partition * mounted more than once. @@ -218,6 +218,8 @@ int main(int argc, char *argv[]) daemonize = 0; break; case 'h': + print_usage(argv); + exit(0); default: print_usage(argv); exit(EXIT_FAILURE); diff --git a/tools/hv/lsvmbus b/tools/hv/lsvmbus index 55e7374bade0..099f2c44dbed 100644 --- a/tools/hv/lsvmbus +++ b/tools/hv/lsvmbus @@ -4,10 +4,10 @@ import os from optparse import OptionParser +help_msg = "print verbose messages. Try -vv, -vvv for more verbose messages" parser = OptionParser() -parser.add_option("-v", "--verbose", dest="verbose", - help="print verbose messages. Try -vv, -vvv for \ - more verbose messages", action="count") +parser.add_option( + "-v", "--verbose", dest="verbose", help=help_msg, action="count") (options, args) = parser.parse_args() @@ -21,27 +21,28 @@ if not os.path.isdir(vmbus_sys_path): exit(-1) vmbus_dev_dict = { - '{0e0b6031-5213-4934-818b-38d90ced39db}' : '[Operating system shutdown]', - '{9527e630-d0ae-497b-adce-e80ab0175caf}' : '[Time Synchronization]', - '{57164f39-9115-4e78-ab55-382f3bd5422d}' : '[Heartbeat]', - '{a9a0f4e7-5a45-4d96-b827-8a841e8c03e6}' : '[Data Exchange]', - '{35fa2e29-ea23-4236-96ae-3a6ebacba440}' : '[Backup (volume checkpoint)]', - '{34d14be3-dee4-41c8-9ae7-6b174977c192}' : '[Guest services]', - '{525074dc-8985-46e2-8057-a307dc18a502}' : '[Dynamic Memory]', - '{cfa8b69e-5b4a-4cc0-b98b-8ba1a1f3f95a}' : 'Synthetic mouse', - '{f912ad6d-2b17-48ea-bd65-f927a61c7684}' : 'Synthetic keyboard', - '{da0a7802-e377-4aac-8e77-0558eb1073f8}' : 'Synthetic framebuffer adapter', - '{f8615163-df3e-46c5-913f-f2d2f965ed0e}' : 'Synthetic network adapter', - '{32412632-86cb-44a2-9b5c-50d1417354f5}' : 'Synthetic IDE Controller', - '{ba6163d9-04a1-4d29-b605-72e2ffb1dc7f}' : 'Synthetic SCSI Controller', - '{2f9bcc4a-0069-4af3-b76b-6fd0be528cda}' : 'Synthetic fiber channel adapter', - '{8c2eaf3d-32a7-4b09-ab99-bd1f1c86b501}' : 'Synthetic RDMA adapter', - '{44c4f61d-4444-4400-9d52-802e27ede19f}' : 'PCI Express pass-through', - '{276aacf4-ac15-426c-98dd-7521ad3f01fe}' : '[Reserved system device]', - '{f8e65716-3cb3-4a06-9a60-1889c5cccab5}' : '[Reserved system device]', - '{3375baf4-9e15-4b30-b765-67acb10d607b}' : '[Reserved system device]', + '{0e0b6031-5213-4934-818b-38d90ced39db}': '[Operating system shutdown]', + '{9527e630-d0ae-497b-adce-e80ab0175caf}': '[Time Synchronization]', + '{57164f39-9115-4e78-ab55-382f3bd5422d}': '[Heartbeat]', + '{a9a0f4e7-5a45-4d96-b827-8a841e8c03e6}': '[Data Exchange]', + '{35fa2e29-ea23-4236-96ae-3a6ebacba440}': '[Backup (volume checkpoint)]', + '{34d14be3-dee4-41c8-9ae7-6b174977c192}': '[Guest services]', + '{525074dc-8985-46e2-8057-a307dc18a502}': '[Dynamic Memory]', + '{cfa8b69e-5b4a-4cc0-b98b-8ba1a1f3f95a}': 'Synthetic mouse', + '{f912ad6d-2b17-48ea-bd65-f927a61c7684}': 'Synthetic keyboard', + '{da0a7802-e377-4aac-8e77-0558eb1073f8}': 'Synthetic framebuffer adapter', + '{f8615163-df3e-46c5-913f-f2d2f965ed0e}': 'Synthetic network adapter', + '{32412632-86cb-44a2-9b5c-50d1417354f5}': 'Synthetic IDE Controller', + '{ba6163d9-04a1-4d29-b605-72e2ffb1dc7f}': 'Synthetic SCSI Controller', + '{2f9bcc4a-0069-4af3-b76b-6fd0be528cda}': 'Synthetic fiber channel adapter', + '{8c2eaf3d-32a7-4b09-ab99-bd1f1c86b501}': 'Synthetic RDMA adapter', + '{44c4f61d-4444-4400-9d52-802e27ede19f}': 'PCI Express pass-through', + '{276aacf4-ac15-426c-98dd-7521ad3f01fe}': '[Reserved system device]', + '{f8e65716-3cb3-4a06-9a60-1889c5cccab5}': '[Reserved system device]', + '{3375baf4-9e15-4b30-b765-67acb10d607b}': '[Reserved system device]', } + def get_vmbus_dev_attr(dev_name, attr): try: f = open('%s/%s/%s' % (vmbus_sys_path, dev_name, attr), 'r') @@ -52,6 +53,7 @@ def get_vmbus_dev_attr(dev_name, attr): return lines + class VMBus_Dev: pass @@ -66,12 +68,13 @@ for f in os.listdir(vmbus_sys_path): chn_vp_mapping = get_vmbus_dev_attr(f, 'channel_vp_mapping') chn_vp_mapping = [c.strip() for c in chn_vp_mapping] - chn_vp_mapping = sorted(chn_vp_mapping, - key = lambda c : int(c.split(':')[0])) + chn_vp_mapping = sorted( + chn_vp_mapping, key=lambda c: int(c.split(':')[0])) - chn_vp_mapping = ['\tRel_ID=%s, target_cpu=%s' % - (c.split(':')[0], c.split(':')[1]) - for c in chn_vp_mapping] + chn_vp_mapping = [ + '\tRel_ID=%s, target_cpu=%s' % + (c.split(':')[0], c.split(':')[1]) for c in chn_vp_mapping + ] d = VMBus_Dev() d.sysfs_path = '%s/%s' % (vmbus_sys_path, f) d.vmbus_id = vmbus_id @@ -85,7 +88,7 @@ for f in os.listdir(vmbus_sys_path): vmbus_dev_list.append(d) -vmbus_dev_list = sorted(vmbus_dev_list, key = lambda d : int(d.vmbus_id)) +vmbus_dev_list = sorted(vmbus_dev_list, key=lambda d: int(d.vmbus_id)) format0 = '%2s: %s' format1 = '%2s: Class_ID = %s - %s\n%s' @@ -95,9 +98,15 @@ for d in vmbus_dev_list: if verbose == 0: print(('VMBUS ID ' + format0) % (d.vmbus_id, d.dev_desc)) elif verbose == 1: - print (('VMBUS ID ' + format1) % \ - (d.vmbus_id, d.class_id, d.dev_desc, d.chn_vp_mapping)) + print( + ('VMBUS ID ' + format1) % + (d.vmbus_id, d.class_id, d.dev_desc, d.chn_vp_mapping) + ) else: - print (('VMBUS ID ' + format2) % \ - (d.vmbus_id, d.class_id, d.dev_desc, \ - d.device_id, d.sysfs_path, d.chn_vp_mapping)) + print( + ('VMBUS ID ' + format2) % + ( + d.vmbus_id, d.class_id, d.dev_desc, + d.device_id, d.sysfs_path, d.chn_vp_mapping + ) + ) diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index 4393bd4b2419..0e66371bea13 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -1467,8 +1467,8 @@ union bpf_attr { * If no cookie has been set yet, generate a new cookie. Once * generated, the socket cookie remains stable for the life of the * socket. This helper can be useful for monitoring per socket - * networking traffic statistics as it provides a unique socket - * identifier per namespace. + * networking traffic statistics as it provides a global socket + * identifier that can be assumed unique. * Return * A 8-byte long non-decreasing number on success, or 0 if the * socket field is missing inside *skb*. diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index 2233f919dd88..e0276520171b 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -187,7 +187,6 @@ struct bpf_program { bpf_program_clear_priv_t clear_priv; enum bpf_attach_type expected_attach_type; - int btf_fd; void *func_info; __u32 func_info_rec_size; __u32 func_info_cnt; @@ -318,7 +317,6 @@ void bpf_program__unload(struct bpf_program *prog) prog->instances.nr = -1; zfree(&prog->instances.fds); - zclose(prog->btf_fd); zfree(&prog->func_info); zfree(&prog->line_info); } @@ -397,7 +395,6 @@ bpf_program__init(void *data, size_t size, char *section_name, int idx, prog->instances.fds = NULL; prog->instances.nr = -1; prog->type = BPF_PROG_TYPE_UNSPEC; - prog->btf_fd = -1; return 0; errout: @@ -2296,9 +2293,6 @@ bpf_program_reloc_btf_ext(struct bpf_program *prog, struct bpf_object *obj, prog->line_info_rec_size = btf_ext__line_info_rec_size(obj->btf_ext); } - if (!insn_offset) - prog->btf_fd = btf__fd(obj->btf); - return 0; } @@ -3366,7 +3360,7 @@ load_program(struct bpf_program *prog, struct bpf_insn *insns, int insns_cnt, char *cp, errmsg[STRERR_BUFSIZE]; int log_buf_size = BPF_LOG_BUF_SIZE; char *log_buf; - int ret; + int btf_fd, ret; if (!insns || !insns_cnt) return -EINVAL; @@ -3381,7 +3375,12 @@ load_program(struct bpf_program *prog, struct bpf_insn *insns, int insns_cnt, load_attr.license = license; load_attr.kern_version = kern_version; load_attr.prog_ifindex = prog->prog_ifindex; - load_attr.prog_btf_fd = prog->btf_fd >= 0 ? prog->btf_fd : 0; + /* if .BTF.ext was loaded, kernel supports associated BTF for prog */ + if (prog->obj->btf_ext) + btf_fd = bpf_object__btf_fd(prog->obj); + else + btf_fd = -1; + load_attr.prog_btf_fd = btf_fd >= 0 ? btf_fd : 0; load_attr.func_info = prog->func_info; load_attr.func_info_rec_size = prog->func_info_rec_size; load_attr.func_info_cnt = prog->func_info_cnt; @@ -5903,13 +5902,15 @@ int libbpf_num_possible_cpus(void) static const char *fcpu = "/sys/devices/system/cpu/possible"; int len = 0, n = 0, il = 0, ir = 0; unsigned int start = 0, end = 0; + int tmp_cpus = 0; static int cpus; char buf[128]; int error = 0; int fd = -1; - if (cpus > 0) - return cpus; + tmp_cpus = READ_ONCE(cpus); + if (tmp_cpus > 0) + return tmp_cpus; fd = open(fcpu, O_RDONLY); if (fd < 0) { @@ -5932,7 +5933,7 @@ int libbpf_num_possible_cpus(void) } buf[len] = '\0'; - for (ir = 0, cpus = 0; ir <= len; ir++) { + for (ir = 0, tmp_cpus = 0; ir <= len; ir++) { /* Each sub string separated by ',' has format \d+-\d+ or \d+ */ if (buf[ir] == ',' || buf[ir] == '\0') { buf[ir] = '\0'; @@ -5944,13 +5945,15 @@ int libbpf_num_possible_cpus(void) } else if (n == 1) { end = start; } - cpus += end - start + 1; + tmp_cpus += end - start + 1; il = ir + 1; } } - if (cpus <= 0) { - pr_warning("Invalid #CPUs %d from %s\n", cpus, fcpu); + if (tmp_cpus <= 0) { + pr_warning("Invalid #CPUs %d from %s\n", tmp_cpus, fcpu); return -EINVAL; } - return cpus; + + WRITE_ONCE(cpus, tmp_cpus); + return tmp_cpus; } diff --git a/tools/perf/Documentation/Makefile b/tools/perf/Documentation/Makefile index 6d148a40551c..adc5a7e44b98 100644 --- a/tools/perf/Documentation/Makefile +++ b/tools/perf/Documentation/Makefile @@ -242,7 +242,7 @@ $(OUTPUT)doc.dep : $(wildcard *.txt) build-docdep.perl $(PERL_PATH) ./build-docdep.perl >$@+ $(QUIET_STDERR) && \ mv $@+ $@ --include $(OUPTUT)doc.dep +-include $(OUTPUT)doc.dep _cmds_txt = cmds-ancillaryinterrogators.txt \ cmds-ancillarymanipulators.txt \ diff --git a/tools/perf/arch/s390/util/machine.c b/tools/perf/arch/s390/util/machine.c index a19690a17291..c8c86a0c9b79 100644 --- a/tools/perf/arch/s390/util/machine.c +++ b/tools/perf/arch/s390/util/machine.c @@ -6,8 +6,9 @@ #include "machine.h" #include "api/fs/fs.h" #include "debug.h" +#include "symbol.h" -int arch__fix_module_text_start(u64 *start, const char *name) +int arch__fix_module_text_start(u64 *start, u64 *size, const char *name) { u64 m_start = *start; char path[PATH_MAX]; @@ -17,7 +18,35 @@ int arch__fix_module_text_start(u64 *start, const char *name) if (sysfs__read_ull(path, (unsigned long long *)start) < 0) { pr_debug2("Using module %s start:%#lx\n", path, m_start); *start = m_start; + } else { + /* Successful read of the modules segment text start address. + * Calculate difference between module start address + * in memory and module text segment start address. + * For example module load address is 0x3ff8011b000 + * (from /proc/modules) and module text segment start + * address is 0x3ff8011b870 (from file above). + * + * Adjust the module size and subtract the GOT table + * size located at the beginning of the module. + */ + *size -= (*start - m_start); } return 0; } + +/* On s390 kernel text segment start is located at very low memory addresses, + * for example 0x10000. Modules are located at very high memory addresses, + * for example 0x3ff xxxx xxxx. The gap between end of kernel text segment + * and beginning of first module's text segment is very big. + * Therefore do not fill this gap and do not assign it to the kernel dso map. + */ +void arch__symbols__fixup_end(struct symbol *p, struct symbol *c) +{ + if (strchr(p->name, '[') == NULL && strchr(c->name, '[')) + /* Last kernel symbol mapped to end of page */ + p->end = roundup(p->end, page_size); + else + p->end = c->start; + pr_debug4("%s sym:%s end:%#lx\n", __func__, p->name, p->end); +} diff --git a/tools/perf/bench/numa.c b/tools/perf/bench/numa.c index a640ca7aaada..513cb2f2fa32 100644 --- a/tools/perf/bench/numa.c +++ b/tools/perf/bench/numa.c @@ -379,8 +379,10 @@ static u8 *alloc_data(ssize_t bytes0, int map_flags, /* Allocate and initialize all memory on CPU#0: */ if (init_cpu0) { - orig_mask = bind_to_node(0); - bind_to_memnode(0); + int node = numa_node_of_cpu(0); + + orig_mask = bind_to_node(node); + bind_to_memnode(node); } bytes = bytes0 + HPSIZE; diff --git a/tools/perf/builtin-ftrace.c b/tools/perf/builtin-ftrace.c index 66d5a6658daf..019312810405 100644 --- a/tools/perf/builtin-ftrace.c +++ b/tools/perf/builtin-ftrace.c @@ -173,7 +173,7 @@ static int set_tracing_cpumask(struct cpu_map *cpumap) int last_cpu; last_cpu = cpu_map__cpu(cpumap, cpumap->nr - 1); - mask_size = (last_cpu + 3) / 4 + 1; + mask_size = last_cpu / 4 + 2; /* one more byte for EOS */ mask_size += last_cpu / 32; /* ',' is needed for every 32th cpus */ cpumask = malloc(mask_size); diff --git a/tools/perf/pmu-events/jevents.c b/tools/perf/pmu-events/jevents.c index 1a91a197cafb..d413761621b0 100644 --- a/tools/perf/pmu-events/jevents.c +++ b/tools/perf/pmu-events/jevents.c @@ -453,6 +453,7 @@ static struct fixed { { "inst_retired.any_p", "event=0xc0" }, { "cpu_clk_unhalted.ref", "event=0x0,umask=0x03" }, { "cpu_clk_unhalted.thread", "event=0x3c" }, + { "cpu_clk_unhalted.core", "event=0x3c" }, { "cpu_clk_unhalted.thread_any", "event=0x3c,any=1" }, { NULL, NULL}, }; diff --git a/tools/perf/ui/browser.c b/tools/perf/ui/browser.c index f80c51d53565..d227d74b28f8 100644 --- a/tools/perf/ui/browser.c +++ b/tools/perf/ui/browser.c @@ -1,7 +1,8 @@ // SPDX-License-Identifier: GPL-2.0 -#include "../string2.h" -#include "../config.h" -#include "../../perf.h" +#include "../util/util.h" +#include "../util/string2.h" +#include "../util/config.h" +#include "../perf.h" #include "libslang.h" #include "ui.h" #include "util.h" @@ -14,7 +15,7 @@ #include "browser.h" #include "helpline.h" #include "keysyms.h" -#include "../color.h" +#include "../util/color.h" #include <linux/ctype.h> #include <linux/zalloc.h> diff --git a/tools/perf/ui/tui/progress.c b/tools/perf/ui/tui/progress.c index bc134b82829d..5a24dd3ce4db 100644 --- a/tools/perf/ui/tui/progress.c +++ b/tools/perf/ui/tui/progress.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 #include <linux/kernel.h> -#include "../cache.h" +#include "../../util/cache.h" #include "../progress.h" #include "../libslang.h" #include "../ui.h" diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c index ac9ad2330f93..163536720149 100644 --- a/tools/perf/util/annotate.c +++ b/tools/perf/util/annotate.c @@ -1122,7 +1122,7 @@ static int disasm_line__parse(char *line, const char **namep, char **rawp) goto out; (*rawp)[0] = tmp; - *rawp = skip_spaces(*rawp); + *rawp = strim(*rawp); return 0; diff --git a/tools/perf/util/cpumap.c b/tools/perf/util/cpumap.c index 3acfbe34ebaf..39cce66b4ebc 100644 --- a/tools/perf/util/cpumap.c +++ b/tools/perf/util/cpumap.c @@ -751,7 +751,10 @@ size_t cpu_map__snprint_mask(struct cpu_map *map, char *buf, size_t size) unsigned char *bitmap; int last_cpu = cpu_map__cpu(map, map->nr - 1); - bitmap = zalloc((last_cpu + 7) / 8); + if (buf == NULL) + return 0; + + bitmap = zalloc(last_cpu / 8 + 1); if (bitmap == NULL) { buf[0] = '\0'; return 0; diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c index cf826eca3aaf..83b2fbbeeb90 100644 --- a/tools/perf/util/machine.c +++ b/tools/perf/util/machine.c @@ -1378,6 +1378,7 @@ static int machine__set_modules_path(struct machine *machine) return map_groups__set_modules_path_dir(&machine->kmaps, modules_path, 0); } int __weak arch__fix_module_text_start(u64 *start __maybe_unused, + u64 *size __maybe_unused, const char *name __maybe_unused) { return 0; @@ -1389,7 +1390,7 @@ static int machine__create_module(void *arg, const char *name, u64 start, struct machine *machine = arg; struct map *map; - if (arch__fix_module_text_start(&start, name) < 0) + if (arch__fix_module_text_start(&start, &size, name) < 0) return -1; map = machine__findnew_module_map(machine, start, name); diff --git a/tools/perf/util/machine.h b/tools/perf/util/machine.h index f70ab98a7bde..7aa38da26427 100644 --- a/tools/perf/util/machine.h +++ b/tools/perf/util/machine.h @@ -222,7 +222,7 @@ struct symbol *machine__find_kernel_symbol_by_name(struct machine *machine, struct map *machine__findnew_module_map(struct machine *machine, u64 start, const char *filename); -int arch__fix_module_text_start(u64 *start, const char *name); +int arch__fix_module_text_start(u64 *start, u64 *size, const char *name); int machine__load_kallsyms(struct machine *machine, const char *filename); diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index 173f3378aaa0..4efde7879474 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c @@ -92,6 +92,11 @@ static int prefix_underscores_count(const char *str) return tail - str; } +void __weak arch__symbols__fixup_end(struct symbol *p, struct symbol *c) +{ + p->end = c->start; +} + const char * __weak arch__normalize_symbol_name(const char *name) { return name; @@ -218,7 +223,7 @@ void symbols__fixup_end(struct rb_root_cached *symbols) curr = rb_entry(nd, struct symbol, rb_node); if (prev->end == prev->start && prev->end != curr->start) - prev->end = curr->start; + arch__symbols__fixup_end(prev, curr); } /* Last entry */ diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h index 12755b42ea93..183f630cb5f1 100644 --- a/tools/perf/util/symbol.h +++ b/tools/perf/util/symbol.h @@ -288,6 +288,7 @@ const char *arch__normalize_symbol_name(const char *name); #define SYMBOL_A 0 #define SYMBOL_B 1 +void arch__symbols__fixup_end(struct symbol *p, struct symbol *c); int arch__compare_symbol_names(const char *namea, const char *nameb); int arch__compare_symbol_names_n(const char *namea, const char *nameb, unsigned int n); diff --git a/tools/perf/util/thread.c b/tools/perf/util/thread.c index 873ab505ca80..590793cc5142 100644 --- a/tools/perf/util/thread.c +++ b/tools/perf/util/thread.c @@ -214,14 +214,24 @@ struct comm *thread__comm(const struct thread *thread) struct comm *thread__exec_comm(const struct thread *thread) { - struct comm *comm, *last = NULL; + struct comm *comm, *last = NULL, *second_last = NULL; list_for_each_entry(comm, &thread->comm_list, list) { if (comm->exec) return comm; + second_last = last; last = comm; } + /* + * 'last' with no start time might be the parent's comm of a synthesized + * thread (created by processing a synthesized fork event). For a main + * thread, that is very probably wrong. Prefer a later comm to avoid + * that case. + */ + if (second_last && !last->start && thread->pid_ == thread->tid) + return second_last; + return last; } diff --git a/tools/testing/selftests/bpf/verifier/loops1.c b/tools/testing/selftests/bpf/verifier/loops1.c index 5e980a5ab69d..1fc4e61e9f9f 100644 --- a/tools/testing/selftests/bpf/verifier/loops1.c +++ b/tools/testing/selftests/bpf/verifier/loops1.c @@ -159,3 +159,31 @@ .errstr = "loop detected", .prog_type = BPF_PROG_TYPE_TRACEPOINT, }, +{ + "not-taken loop with back jump to 1st insn", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 123), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 4, -2), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_XDP, + .retval = 123, +}, +{ + "taken loop with back jump to 1st insn", + .insns = { + BPF_MOV64_IMM(BPF_REG_1, 10), + BPF_MOV64_IMM(BPF_REG_2, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), + BPF_EXIT_INSN(), + BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_1), + BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 1), + BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, -3), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_XDP, + .retval = 55, +}, diff --git a/tools/testing/selftests/kvm/.gitignore b/tools/testing/selftests/kvm/.gitignore index 41266af0d3dc..b35da375530a 100644 --- a/tools/testing/selftests/kvm/.gitignore +++ b/tools/testing/selftests/kvm/.gitignore @@ -1,7 +1,7 @@ +/s390x/sync_regs_test /x86_64/cr4_cpuid_sync_test /x86_64/evmcs_test /x86_64/hyperv_cpuid -/x86_64/kvm_create_max_vcpus /x86_64/mmio_warning_test /x86_64/platform_info_test /x86_64/set_sregs_test @@ -13,3 +13,4 @@ /x86_64/vmx_tsc_adjust_test /clear_dirty_log_test /dirty_log_test +/kvm_create_max_vcpus diff --git a/tools/testing/selftests/kvm/config b/tools/testing/selftests/kvm/config new file mode 100644 index 000000000000..63ed533f73d6 --- /dev/null +++ b/tools/testing/selftests/kvm/config @@ -0,0 +1,3 @@ +CONFIG_KVM=y +CONFIG_KVM_INTEL=y +CONFIG_KVM_AMD=y diff --git a/tools/testing/selftests/net/tcp_fastopen_backup_key.sh b/tools/testing/selftests/net/tcp_fastopen_backup_key.sh index 41476399e184..f6e65674b83c 100755 --- a/tools/testing/selftests/net/tcp_fastopen_backup_key.sh +++ b/tools/testing/selftests/net/tcp_fastopen_backup_key.sh @@ -30,7 +30,7 @@ do_test() { ip netns exec "${NETNS}" ./tcp_fastopen_backup_key "$1" val=$(ip netns exec "${NETNS}" nstat -az | \ grep TcpExtTCPFastOpenPassiveFail | awk '{print $2}') - if [ $val -ne 0 ]; then + if [ "$val" != 0 ]; then echo "FAIL: TcpExtTCPFastOpenPassiveFail non-zero" return 1 fi diff --git a/tools/testing/selftests/netfilter/nft_flowtable.sh b/tools/testing/selftests/netfilter/nft_flowtable.sh index fe52488a6f72..16571ac1dab4 100755 --- a/tools/testing/selftests/netfilter/nft_flowtable.sh +++ b/tools/testing/selftests/netfilter/nft_flowtable.sh @@ -321,4 +321,52 @@ else ip netns exec nsr1 nft list ruleset fi +KEY_SHA="0x"$(ps -xaf | sha1sum | cut -d " " -f 1) +KEY_AES="0x"$(ps -xaf | md5sum | cut -d " " -f 1) +SPI1=$RANDOM +SPI2=$RANDOM + +if [ $SPI1 -eq $SPI2 ]; then + SPI2=$((SPI2+1)) +fi + +do_esp() { + local ns=$1 + local me=$2 + local remote=$3 + local lnet=$4 + local rnet=$5 + local spi_out=$6 + local spi_in=$7 + + ip -net $ns xfrm state add src $remote dst $me proto esp spi $spi_in enc aes $KEY_AES auth sha1 $KEY_SHA mode tunnel sel src $rnet dst $lnet + ip -net $ns xfrm state add src $me dst $remote proto esp spi $spi_out enc aes $KEY_AES auth sha1 $KEY_SHA mode tunnel sel src $lnet dst $rnet + + # to encrypt packets as they go out (includes forwarded packets that need encapsulation) + ip -net $ns xfrm policy add src $lnet dst $rnet dir out tmpl src $me dst $remote proto esp mode tunnel priority 1 action allow + # to fwd decrypted packets after esp processing: + ip -net $ns xfrm policy add src $rnet dst $lnet dir fwd tmpl src $remote dst $me proto esp mode tunnel priority 1 action allow + +} + +do_esp nsr1 192.168.10.1 192.168.10.2 10.0.1.0/24 10.0.2.0/24 $SPI1 $SPI2 + +do_esp nsr2 192.168.10.2 192.168.10.1 10.0.2.0/24 10.0.1.0/24 $SPI2 $SPI1 + +ip netns exec nsr1 nft delete table ip nat + +# restore default routes +ip -net ns2 route del 192.168.10.1 via 10.0.2.1 +ip -net ns2 route add default via 10.0.2.1 +ip -net ns2 route add default via dead:2::1 + +test_tcp_forwarding ns1 ns2 +if [ $? -eq 0 ] ;then + echo "PASS: ipsec tunnel mode for ns1/ns2" +else + echo "FAIL: ipsec tunnel mode for ns1/ns2" + ip netns exec nsr1 nft list ruleset 1>&2 + ip netns exec nsr1 cat /proc/net/xfrm_stat 1>&2 +fi + exit $ret diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/skbedit.json b/tools/testing/selftests/tc-testing/tc-tests/actions/skbedit.json index bf5ebf59c2d4..9cdd2e31ac2c 100644 --- a/tools/testing/selftests/tc-testing/tc-tests/actions/skbedit.json +++ b/tools/testing/selftests/tc-testing/tc-tests/actions/skbedit.json @@ -670,5 +670,52 @@ "teardown": [ "$TC actions flush action skbedit" ] + }, + { + "id": "630c", + "name": "Add batch of 32 skbedit actions with all parameters and cookie", + "category": [ + "actions", + "skbedit" + ], + "setup": [ + [ + "$TC actions flush action skbedit", + 0, + 1, + 255 + ] + ], + "cmdUnderTest": "bash -c \"for i in \\`seq 1 32\\`; do cmd=\\\"action skbedit queue_mapping 2 priority 10 mark 7/0xaabbccdd ptype host inheritdsfield index \\$i cookie aabbccddeeff112233445566778800a1 \\\"; args=\"\\$args\\$cmd\"; done && $TC actions add \\$args\"", + "expExitCode": "0", + "verifyCmd": "$TC actions list action skbedit", + "matchPattern": "^[ \t]+index [0-9]+ ref", + "matchCount": "32", + "teardown": [ + "$TC actions flush action skbedit" + ] + }, + { + "id": "706d", + "name": "Delete batch of 32 skbedit actions with all parameters", + "category": [ + "actions", + "skbedit" + ], + "setup": [ + [ + "$TC actions flush action skbedit", + 0, + 1, + 255 + ], + "bash -c \"for i in \\`seq 1 32\\`; do cmd=\\\"action skbedit queue_mapping 2 priority 10 mark 7/0xaabbccdd ptype host inheritdsfield index \\$i \\\"; args=\\\"\\$args\\$cmd\\\"; done && $TC actions add \\$args\"" + ], + "cmdUnderTest": "bash -c \"for i in \\`seq 1 32\\`; do cmd=\\\"action skbedit index \\$i \\\"; args=\"\\$args\\$cmd\"; done && $TC actions del \\$args\"", + "expExitCode": "0", + "verifyCmd": "$TC actions list action skbedit", + "matchPattern": "^[ \t]+index [0-9]+ ref", + "matchCount": "0", + "teardown": [] } ] diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c index acc43242a310..35a069815baf 100644 --- a/virt/kvm/arm/arm.c +++ b/virt/kvm/arm/arm.c @@ -144,11 +144,6 @@ out_fail_alloc: return ret; } -bool kvm_arch_has_vcpu_debugfs(void) -{ - return false; -} - int kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu) { return 0; @@ -323,6 +318,17 @@ int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) { + /* + * If we're about to block (most likely because we've just hit a + * WFI), we need to sync back the state of the GIC CPU interface + * so that we have the lastest PMR and group enables. This ensures + * that kvm_arch_vcpu_runnable has up-to-date data to decide + * whether we have pending interrupts. + */ + preempt_disable(); + kvm_vgic_vmcr_sync(vcpu); + preempt_enable(); + kvm_vgic_v4_enable_doorbell(vcpu); } @@ -340,6 +346,8 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) /* Set up the timer */ kvm_timer_vcpu_init(vcpu); + kvm_pmu_vcpu_init(vcpu); + kvm_arm_reset_debug_ptr(vcpu); return kvm_vgic_vcpu_init(vcpu); diff --git a/virt/kvm/arm/hyp/vgic-v3-sr.c b/virt/kvm/arm/hyp/vgic-v3-sr.c index 254c5f190a3d..ccf1fde9836c 100644 --- a/virt/kvm/arm/hyp/vgic-v3-sr.c +++ b/virt/kvm/arm/hyp/vgic-v3-sr.c @@ -349,8 +349,10 @@ void __hyp_text __vgic_v3_save_aprs(struct kvm_vcpu *vcpu) case 7: cpu_if->vgic_ap0r[3] = __vgic_v3_read_ap0rn(3); cpu_if->vgic_ap0r[2] = __vgic_v3_read_ap0rn(2); + /* Fall through */ case 6: cpu_if->vgic_ap0r[1] = __vgic_v3_read_ap0rn(1); + /* Fall through */ default: cpu_if->vgic_ap0r[0] = __vgic_v3_read_ap0rn(0); } @@ -359,8 +361,10 @@ void __hyp_text __vgic_v3_save_aprs(struct kvm_vcpu *vcpu) case 7: cpu_if->vgic_ap1r[3] = __vgic_v3_read_ap1rn(3); cpu_if->vgic_ap1r[2] = __vgic_v3_read_ap1rn(2); + /* Fall through */ case 6: cpu_if->vgic_ap1r[1] = __vgic_v3_read_ap1rn(1); + /* Fall through */ default: cpu_if->vgic_ap1r[0] = __vgic_v3_read_ap1rn(0); } @@ -382,8 +386,10 @@ void __hyp_text __vgic_v3_restore_aprs(struct kvm_vcpu *vcpu) case 7: __vgic_v3_write_ap0rn(cpu_if->vgic_ap0r[3], 3); __vgic_v3_write_ap0rn(cpu_if->vgic_ap0r[2], 2); + /* Fall through */ case 6: __vgic_v3_write_ap0rn(cpu_if->vgic_ap0r[1], 1); + /* Fall through */ default: __vgic_v3_write_ap0rn(cpu_if->vgic_ap0r[0], 0); } @@ -392,8 +398,10 @@ void __hyp_text __vgic_v3_restore_aprs(struct kvm_vcpu *vcpu) case 7: __vgic_v3_write_ap1rn(cpu_if->vgic_ap1r[3], 3); __vgic_v3_write_ap1rn(cpu_if->vgic_ap1r[2], 2); + /* Fall through */ case 6: __vgic_v3_write_ap1rn(cpu_if->vgic_ap1r[1], 1); + /* Fall through */ default: __vgic_v3_write_ap1rn(cpu_if->vgic_ap1r[0], 0); } diff --git a/virt/kvm/arm/pmu.c b/virt/kvm/arm/pmu.c index 3dd8238ed246..362a01886bab 100644 --- a/virt/kvm/arm/pmu.c +++ b/virt/kvm/arm/pmu.c @@ -215,6 +215,20 @@ static void kvm_pmu_stop_counter(struct kvm_vcpu *vcpu, struct kvm_pmc *pmc) } /** + * kvm_pmu_vcpu_init - assign pmu counter idx for cpu + * @vcpu: The vcpu pointer + * + */ +void kvm_pmu_vcpu_init(struct kvm_vcpu *vcpu) +{ + int i; + struct kvm_pmu *pmu = &vcpu->arch.pmu; + + for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++) + pmu->pmc[i].idx = i; +} + +/** * kvm_pmu_vcpu_reset - reset pmu state for cpu * @vcpu: The vcpu pointer * @@ -224,10 +238,8 @@ void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu) int i; struct kvm_pmu *pmu = &vcpu->arch.pmu; - for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++) { + for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++) kvm_pmu_stop_counter(vcpu, &pmu->pmc[i]); - pmu->pmc[i].idx = i; - } bitmap_zero(vcpu->arch.pmu.chained, ARMV8_PMU_MAX_COUNTER_PAIRS); } diff --git a/virt/kvm/arm/vgic/vgic-mmio.c b/virt/kvm/arm/vgic/vgic-mmio.c index 3ba7278fb533..44efc2ff863f 100644 --- a/virt/kvm/arm/vgic/vgic-mmio.c +++ b/virt/kvm/arm/vgic/vgic-mmio.c @@ -113,6 +113,22 @@ void vgic_mmio_write_senable(struct kvm_vcpu *vcpu, struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); raw_spin_lock_irqsave(&irq->irq_lock, flags); + if (vgic_irq_is_mapped_level(irq)) { + bool was_high = irq->line_level; + + /* + * We need to update the state of the interrupt because + * the guest might have changed the state of the device + * while the interrupt was disabled at the VGIC level. + */ + irq->line_level = vgic_get_phys_line_level(irq); + /* + * Deactivate the physical interrupt so the GIC will let + * us know when it is asserted again. + */ + if (!irq->active && was_high && !irq->line_level) + vgic_irq_set_phys_active(irq, false); + } irq->enabled = true; vgic_queue_irq_unlock(vcpu->kvm, irq, flags); diff --git a/virt/kvm/arm/vgic/vgic-v2.c b/virt/kvm/arm/vgic/vgic-v2.c index 6dd5ad706c92..96aab77d0471 100644 --- a/virt/kvm/arm/vgic/vgic-v2.c +++ b/virt/kvm/arm/vgic/vgic-v2.c @@ -484,10 +484,17 @@ void vgic_v2_load(struct kvm_vcpu *vcpu) kvm_vgic_global_state.vctrl_base + GICH_APR); } -void vgic_v2_put(struct kvm_vcpu *vcpu) +void vgic_v2_vmcr_sync(struct kvm_vcpu *vcpu) { struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2; cpu_if->vgic_vmcr = readl_relaxed(kvm_vgic_global_state.vctrl_base + GICH_VMCR); +} + +void vgic_v2_put(struct kvm_vcpu *vcpu) +{ + struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2; + + vgic_v2_vmcr_sync(vcpu); cpu_if->vgic_apr = readl_relaxed(kvm_vgic_global_state.vctrl_base + GICH_APR); } diff --git a/virt/kvm/arm/vgic/vgic-v3.c b/virt/kvm/arm/vgic/vgic-v3.c index c2c9ce009f63..0c653a1e5215 100644 --- a/virt/kvm/arm/vgic/vgic-v3.c +++ b/virt/kvm/arm/vgic/vgic-v3.c @@ -662,12 +662,17 @@ void vgic_v3_load(struct kvm_vcpu *vcpu) __vgic_v3_activate_traps(vcpu); } -void vgic_v3_put(struct kvm_vcpu *vcpu) +void vgic_v3_vmcr_sync(struct kvm_vcpu *vcpu) { struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3; if (likely(cpu_if->vgic_sre)) cpu_if->vgic_vmcr = kvm_call_hyp_ret(__vgic_v3_read_vmcr); +} + +void vgic_v3_put(struct kvm_vcpu *vcpu) +{ + vgic_v3_vmcr_sync(vcpu); kvm_call_hyp(__vgic_v3_save_aprs, vcpu); diff --git a/virt/kvm/arm/vgic/vgic.c b/virt/kvm/arm/vgic/vgic.c index 04786c8ec77e..13d4b38a94ec 100644 --- a/virt/kvm/arm/vgic/vgic.c +++ b/virt/kvm/arm/vgic/vgic.c @@ -919,6 +919,17 @@ void kvm_vgic_put(struct kvm_vcpu *vcpu) vgic_v3_put(vcpu); } +void kvm_vgic_vmcr_sync(struct kvm_vcpu *vcpu) +{ + if (unlikely(!irqchip_in_kernel(vcpu->kvm))) + return; + + if (kvm_vgic_global_state.type == VGIC_V2) + vgic_v2_vmcr_sync(vcpu); + else + vgic_v3_vmcr_sync(vcpu); +} + int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu) { struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; diff --git a/virt/kvm/arm/vgic/vgic.h b/virt/kvm/arm/vgic/vgic.h index 3b7525deec80..797e05004d80 100644 --- a/virt/kvm/arm/vgic/vgic.h +++ b/virt/kvm/arm/vgic/vgic.h @@ -193,6 +193,7 @@ int vgic_register_dist_iodev(struct kvm *kvm, gpa_t dist_base_address, void vgic_v2_init_lrs(void); void vgic_v2_load(struct kvm_vcpu *vcpu); void vgic_v2_put(struct kvm_vcpu *vcpu); +void vgic_v2_vmcr_sync(struct kvm_vcpu *vcpu); void vgic_v2_save_state(struct kvm_vcpu *vcpu); void vgic_v2_restore_state(struct kvm_vcpu *vcpu); @@ -223,6 +224,7 @@ bool vgic_v3_check_base(struct kvm *kvm); void vgic_v3_load(struct kvm_vcpu *vcpu); void vgic_v3_put(struct kvm_vcpu *vcpu); +void vgic_v3_vmcr_sync(struct kvm_vcpu *vcpu); bool vgic_has_its(struct kvm *kvm); int kvm_vgic_register_its_device(void); diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 887f3b0c2b60..c6a91b044d8d 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -1855,8 +1855,7 @@ void kvm_set_pfn_dirty(kvm_pfn_t pfn) if (!kvm_is_reserved_pfn(pfn)) { struct page *page = pfn_to_page(pfn); - if (!PageReserved(page)) - SetPageDirty(page); + SetPageDirty(page); } } EXPORT_SYMBOL_GPL(kvm_set_pfn_dirty); @@ -2477,6 +2476,29 @@ static bool kvm_vcpu_eligible_for_directed_yield(struct kvm_vcpu *vcpu) #endif } +/* + * Unlike kvm_arch_vcpu_runnable, this function is called outside + * a vcpu_load/vcpu_put pair. However, for most architectures + * kvm_arch_vcpu_runnable does not require vcpu_load. + */ +bool __weak kvm_arch_dy_runnable(struct kvm_vcpu *vcpu) +{ + return kvm_arch_vcpu_runnable(vcpu); +} + +static bool vcpu_dy_runnable(struct kvm_vcpu *vcpu) +{ + if (kvm_arch_dy_runnable(vcpu)) + return true; + +#ifdef CONFIG_KVM_ASYNC_PF + if (!list_empty_careful(&vcpu->async_pf.done)) + return true; +#endif + + return false; +} + void kvm_vcpu_on_spin(struct kvm_vcpu *me, bool yield_to_kernel_mode) { struct kvm *kvm = me->kvm; @@ -2506,9 +2528,10 @@ void kvm_vcpu_on_spin(struct kvm_vcpu *me, bool yield_to_kernel_mode) continue; if (vcpu == me) continue; - if (swait_active(&vcpu->wq) && !kvm_arch_vcpu_runnable(vcpu)) + if (swait_active(&vcpu->wq) && !vcpu_dy_runnable(vcpu)) continue; - if (yield_to_kernel_mode && !kvm_arch_vcpu_in_kernel(vcpu)) + if (READ_ONCE(vcpu->preempted) && yield_to_kernel_mode && + !kvm_arch_vcpu_in_kernel(vcpu)) continue; if (!kvm_vcpu_eligible_for_directed_yield(vcpu)) continue; @@ -2591,30 +2614,20 @@ static int create_vcpu_fd(struct kvm_vcpu *vcpu) return anon_inode_getfd(name, &kvm_vcpu_fops, vcpu, O_RDWR | O_CLOEXEC); } -static int kvm_create_vcpu_debugfs(struct kvm_vcpu *vcpu) +static void kvm_create_vcpu_debugfs(struct kvm_vcpu *vcpu) { +#ifdef __KVM_HAVE_ARCH_VCPU_DEBUGFS char dir_name[ITOA_MAX_LEN * 2]; - int ret; - - if (!kvm_arch_has_vcpu_debugfs()) - return 0; if (!debugfs_initialized()) - return 0; + return; snprintf(dir_name, sizeof(dir_name), "vcpu%d", vcpu->vcpu_id); vcpu->debugfs_dentry = debugfs_create_dir(dir_name, - vcpu->kvm->debugfs_dentry); - if (!vcpu->debugfs_dentry) - return -ENOMEM; - - ret = kvm_arch_create_vcpu_debugfs(vcpu); - if (ret < 0) { - debugfs_remove_recursive(vcpu->debugfs_dentry); - return ret; - } + vcpu->kvm->debugfs_dentry); - return 0; + kvm_arch_create_vcpu_debugfs(vcpu); +#endif } /* @@ -2649,9 +2662,7 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id) if (r) goto vcpu_destroy; - r = kvm_create_vcpu_debugfs(vcpu); - if (r) - goto vcpu_destroy; + kvm_create_vcpu_debugfs(vcpu); mutex_lock(&kvm->lock); if (kvm_get_vcpu_by_id(kvm, id)) { @@ -4205,7 +4216,7 @@ static void kvm_sched_in(struct preempt_notifier *pn, int cpu) { struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn); - vcpu->preempted = false; + WRITE_ONCE(vcpu->preempted, false); WRITE_ONCE(vcpu->ready, false); kvm_arch_sched_in(vcpu, cpu); @@ -4219,7 +4230,7 @@ static void kvm_sched_out(struct preempt_notifier *pn, struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn); if (current->state == TASK_RUNNING) { - vcpu->preempted = true; + WRITE_ONCE(vcpu->preempted, true); WRITE_ONCE(vcpu->ready, true); } kvm_arch_vcpu_put(vcpu); |