diff --git a/Documentation/devicetree/bindings/display/amlogic,meson-dw-hdmi.txt b/Documentation/devicetree/bindings/display/amlogic,meson-dw-hdmi.txt deleted file mode 100644 index 3a50a7862cf3..000000000000 --- a/Documentation/devicetree/bindings/display/amlogic,meson-dw-hdmi.txt +++ /dev/null @@ -1,119 +0,0 @@ -Amlogic specific extensions to the Synopsys Designware HDMI Controller -====================================================================== - -The Amlogic Meson Synopsys Designware Integration is composed of : -- A Synopsys DesignWare HDMI Controller IP -- A TOP control block controlling the Clocks and PHY -- A custom HDMI PHY in order to convert video to TMDS signal - ___________________________________ -| HDMI TOP |<= HPD -|___________________________________| -| | | -| Synopsys HDMI | HDMI PHY |=> TMDS -| Controller |________________| -|___________________________________|<=> DDC - -The HDMI TOP block only supports HPD sensing. -The Synopsys HDMI Controller interrupt is routed through the -TOP Block interrupt. -Communication to the TOP Block and the Synopsys HDMI Controller is done -via a pair of dedicated addr+read/write registers. -The HDMI PHY is configured by registers in the HHI register block. - -Pixel data arrives in 4:4:4 format from the VENC block and the VPU HDMI mux -selects either the ENCI encoder for the 576i or 480i formats or the ENCP -encoder for all the other formats including interlaced HD formats. - -The VENC uses a DVI encoder on top of the ENCI or ENCP encoders to generate -DVI timings for the HDMI controller. - -Amlogic Meson GXBB, GXL and GXM SoCs families embeds the Synopsys DesignWare -HDMI TX IP version 2.01a with HDCP and I2C & S/PDIF -audio source interfaces. - -Required properties: -- compatible: value should be different for each SoC family as : - - GXBB (S905) : "amlogic,meson-gxbb-dw-hdmi" - - GXL (S905X, S905D) : "amlogic,meson-gxl-dw-hdmi" - - GXM (S912) : "amlogic,meson-gxm-dw-hdmi" - followed by the common "amlogic,meson-gx-dw-hdmi" - - G12A (S905X2, S905Y2, S905D2) : "amlogic,meson-g12a-dw-hdmi" -- reg: Physical base address and length of the controller's registers. -- interrupts: The HDMI interrupt number -- clocks, clock-names : must have the phandles to the HDMI iahb and isfr clocks, - and the Amlogic Meson venci clocks as described in - Documentation/devicetree/bindings/clock/clock-bindings.txt, - the clocks are soc specific, the clock-names should be "iahb", "isfr", "venci" -- resets, resets-names: must have the phandles to the HDMI apb, glue and phy - resets as described in : - Documentation/devicetree/bindings/reset/reset.txt, - the reset-names should be "hdmitx_apb", "hdmitx", "hdmitx_phy" - -Optional properties: -- hdmi-supply: Optional phandle to an external 5V regulator to power the HDMI - logic, as described in the file ../regulator/regulator.txt - -Required nodes: - -The connections to the HDMI ports are modeled using the OF graph -bindings specified in Documentation/devicetree/bindings/graph.txt. - -The following table lists for each supported model the port number -corresponding to each HDMI output and input. - - Port 0 Port 1 ------------------------------------------ - S905 (GXBB) VENC Input TMDS Output - S905X (GXL) VENC Input TMDS Output - S905D (GXL) VENC Input TMDS Output - S912 (GXM) VENC Input TMDS Output - S905X2 (G12A) VENC Input TMDS Output - S905Y2 (G12A) VENC Input TMDS Output - S905D2 (G12A) VENC Input TMDS Output - -Example: - -hdmi-connector { - compatible = "hdmi-connector"; - type = "a"; - - port { - hdmi_connector_in: endpoint { - remote-endpoint = <&hdmi_tx_tmds_out>; - }; - }; -}; - -hdmi_tx: hdmi-tx@c883a000 { - compatible = "amlogic,meson-gxbb-dw-hdmi", "amlogic,meson-gx-dw-hdmi"; - reg = <0x0 0xc883a000 0x0 0x1c>; - interrupts = ; - resets = <&reset RESET_HDMITX_CAPB3>, - <&reset RESET_HDMI_SYSTEM_RESET>, - <&reset RESET_HDMI_TX>; - reset-names = "hdmitx_apb", "hdmitx", "hdmitx_phy"; - clocks = <&clkc CLKID_HDMI_PCLK>, - <&clkc CLKID_CLK81>, - <&clkc CLKID_GCLK_VENCI_INT0>; - clock-names = "isfr", "iahb", "venci"; - #address-cells = <1>; - #size-cells = <0>; - - /* VPU VENC Input */ - hdmi_tx_venc_port: port@0 { - reg = <0>; - - hdmi_tx_in: endpoint { - remote-endpoint = <&hdmi_tx_out>; - }; - }; - - /* TMDS Output */ - hdmi_tx_tmds_port: port@1 { - reg = <1>; - - hdmi_tx_tmds_out: endpoint { - remote-endpoint = <&hdmi_connector_in>; - }; - }; -}; diff --git a/Documentation/devicetree/bindings/display/amlogic,meson-dw-hdmi.yaml b/Documentation/devicetree/bindings/display/amlogic,meson-dw-hdmi.yaml new file mode 100644 index 000000000000..fb747682006d --- /dev/null +++ b/Documentation/devicetree/bindings/display/amlogic,meson-dw-hdmi.yaml @@ -0,0 +1,150 @@ +# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) +# Copyright 2019 BayLibre, SAS +%YAML 1.2 +--- +$id: "http://devicetree.org/schemas/display/amlogic,meson-dw-hdmi.yaml#" +$schema: "http://devicetree.org/meta-schemas/core.yaml#" + +title: Amlogic specific extensions to the Synopsys Designware HDMI Controller + +maintainers: + - Neil Armstrong + +description: | + The Amlogic Meson Synopsys Designware Integration is composed of + - A Synopsys DesignWare HDMI Controller IP + - A TOP control block controlling the Clocks and PHY + - A custom HDMI PHY in order to convert video to TMDS signal + ___________________________________ + | HDMI TOP |<= HPD + |___________________________________| + | | | + | Synopsys HDMI | HDMI PHY |=> TMDS + | Controller |________________| + |___________________________________|<=> DDC + + The HDMI TOP block only supports HPD sensing. + The Synopsys HDMI Controller interrupt is routed through the + TOP Block interrupt. + Communication to the TOP Block and the Synopsys HDMI Controller is done + via a pair of dedicated addr+read/write registers. + The HDMI PHY is configured by registers in the HHI register block. + + Pixel data arrives in "4:4:4" format from the VENC block and the VPU HDMI mux + selects either the ENCI encoder for the 576i or 480i formats or the ENCP + encoder for all the other formats including interlaced HD formats. + + The VENC uses a DVI encoder on top of the ENCI or ENCP encoders to generate + DVI timings for the HDMI controller. + + Amlogic Meson GXBB, GXL and GXM SoCs families embeds the Synopsys DesignWare + HDMI TX IP version 2.01a with HDCP and I2C & S/PDIF + audio source interfaces. + +properties: + compatible: + oneOf: + - items: + - enum: + - amlogic,meson-gxbb-dw-hdmi # GXBB (S905) + - amlogic,meson-gxl-dw-hdmi # GXL (S905X, S905D) + - amlogic,meson-gxm-dw-hdmi # GXM (S912) + - const: amlogic,meson-gx-dw-hdmi + - enum: + - amlogic,meson-g12a-dw-hdmi # G12A (S905X2, S905Y2, S905D2) + + reg: + maxItems: 1 + + interrupts: + maxItems: 1 + + clocks: + minItems: 3 + + clock-names: + items: + - const: isfr + - const: iahb + - const: venci + + resets: + minItems: 3 + + reset-names: + items: + - const: hdmitx_apb + - const: hdmitx + - const: hdmitx_phy + + hdmi-supply: + description: phandle to an external 5V regulator to power the HDMI logic + allOf: + - $ref: /schemas/types.yaml#/definitions/phandle + + port@0: + type: object + description: + A port node pointing to the VENC Input port node. + + port@1: + type: object + description: + A port node pointing to the TMDS Output port node. + + "#address-cells": + const: 1 + + "#size-cells": + const: 0 + + "#sound-dai-cells": + const: 0 + +required: + - compatible + - reg + - interrupts + - clocks + - clock-names + - resets + - reset-names + - port@0 + - port@1 + - "#address-cells" + - "#size-cells" + +additionalProperties: false + +examples: + - | + hdmi_tx: hdmi-tx@c883a000 { + compatible = "amlogic,meson-gxbb-dw-hdmi", "amlogic,meson-gx-dw-hdmi"; + reg = <0xc883a000 0x1c>; + interrupts = <57>; + resets = <&reset_apb>, <&reset_hdmitx>, <&reset_hdmitx_phy>; + reset-names = "hdmitx_apb", "hdmitx", "hdmitx_phy"; + clocks = <&clk_isfr>, <&clk_iahb>, <&clk_venci>; + clock-names = "isfr", "iahb", "venci"; + #address-cells = <1>; + #size-cells = <0>; + + /* VPU VENC Input */ + hdmi_tx_venc_port: port@0 { + reg = <0>; + + hdmi_tx_in: endpoint { + remote-endpoint = <&hdmi_tx_out>; + }; + }; + + /* TMDS Output */ + hdmi_tx_tmds_port: port@1 { + reg = <1>; + + hdmi_tx_tmds_out: endpoint { + remote-endpoint = <&hdmi_connector_in>; + }; + }; + }; + diff --git a/Documentation/devicetree/bindings/display/amlogic,meson-vpu.txt b/Documentation/devicetree/bindings/display/amlogic,meson-vpu.txt deleted file mode 100644 index be40a780501c..000000000000 --- a/Documentation/devicetree/bindings/display/amlogic,meson-vpu.txt +++ /dev/null @@ -1,121 +0,0 @@ -Amlogic Meson Display Controller -================================ - -The Amlogic Meson Display controller is composed of several components -that are going to be documented below: - -DMC|---------------VPU (Video Processing Unit)----------------|------HHI------| - | vd1 _______ _____________ _________________ | | -D |-------| |----| | | | | HDMI PLL | -D | vd2 | VIU | | Video Post | | Video Encoders |<---|-----VCLK | -R |-------| |----| Processing | | | | | - | osd2 | | | |---| Enci ----------|----|-----VDAC------| -R |-------| CSC |----| Scalers | | Encp ----------|----|----HDMI-TX----| -A | osd1 | | | Blenders | | Encl ----------|----|---------------| -M |-------|______|----|____________| |________________| | | -___|__________________________________________________________|_______________| - - -VIU: Video Input Unit ---------------------- - -The Video Input Unit is in charge of the pixel scanout from the DDR memory. -It fetches the frames addresses, stride and parameters from the "Canvas" memory. -This part is also in charge of the CSC (Colorspace Conversion). -It can handle 2 OSD Planes and 2 Video Planes. - -VPP: Video Post Processing --------------------------- - -The Video Post Processing is in charge of the scaling and blending of the -various planes into a single pixel stream. -There is a special "pre-blending" used by the video planes with a dedicated -scaler and a "post-blending" to merge with the OSD Planes. -The OSD planes also have a dedicated scaler for one of the OSD. - -VENC: Video Encoders --------------------- - -The VENC is composed of the multiple pixel encoders : - - ENCI : Interlace Video encoder for CVBS and Interlace HDMI - - ENCP : Progressive Video Encoder for HDMI - - ENCL : LCD LVDS Encoder -The VENC Unit gets a Pixel Clocks (VCLK) from a dedicated HDMI PLL and clock -tree and provides the scanout clock to the VPP and VIU. -The ENCI is connected to a single VDAC for Composite Output. -The ENCI and ENCP are connected to an on-chip HDMI Transceiver. - -Device Tree Bindings: ---------------------- - -VPU: Video Processing Unit --------------------------- - -Required properties: -- compatible: value should be different for each SoC family as : - - GXBB (S905) : "amlogic,meson-gxbb-vpu" - - GXL (S905X, S905D) : "amlogic,meson-gxl-vpu" - - GXM (S912) : "amlogic,meson-gxm-vpu" - followed by the common "amlogic,meson-gx-vpu" - - G12A (S905X2, S905Y2, S905D2) : "amlogic,meson-g12a-vpu" -- reg: base address and size of he following memory-mapped regions : - - vpu - - hhi -- reg-names: should contain the names of the previous memory regions -- interrupts: should contain the VENC Vsync interrupt number -- amlogic,canvas: phandle to canvas provider node as described in the file - ../soc/amlogic/amlogic,canvas.txt - -Optional properties: -- power-domains: Optional phandle to associated power domain as described in - the file ../power/power_domain.txt - -Required nodes: - -The connections to the VPU output video ports are modeled using the OF graph -bindings specified in Documentation/devicetree/bindings/graph.txt. - -The following table lists for each supported model the port number -corresponding to each VPU output. - - Port 0 Port 1 ------------------------------------------ - S905 (GXBB) CVBS VDAC HDMI-TX - S905X (GXL) CVBS VDAC HDMI-TX - S905D (GXL) CVBS VDAC HDMI-TX - S912 (GXM) CVBS VDAC HDMI-TX - S905X2 (G12A) CVBS VDAC HDMI-TX - S905Y2 (G12A) CVBS VDAC HDMI-TX - S905D2 (G12A) CVBS VDAC HDMI-TX - -Example: - -tv-connector { - compatible = "composite-video-connector"; - - port { - tv_connector_in: endpoint { - remote-endpoint = <&cvbs_vdac_out>; - }; - }; -}; - -vpu: vpu@d0100000 { - compatible = "amlogic,meson-gxbb-vpu"; - reg = <0x0 0xd0100000 0x0 0x100000>, - <0x0 0xc883c000 0x0 0x1000>, - <0x0 0xc8838000 0x0 0x1000>; - reg-names = "vpu", "hhi", "dmc"; - interrupts = ; - #address-cells = <1>; - #size-cells = <0>; - - /* CVBS VDAC output port */ - port@0 { - reg = <0>; - - cvbs_vdac_out: endpoint { - remote-endpoint = <&tv_connector_in>; - }; - }; -}; diff --git a/Documentation/devicetree/bindings/display/amlogic,meson-vpu.yaml b/Documentation/devicetree/bindings/display/amlogic,meson-vpu.yaml new file mode 100644 index 000000000000..d1205a6697a0 --- /dev/null +++ b/Documentation/devicetree/bindings/display/amlogic,meson-vpu.yaml @@ -0,0 +1,137 @@ +# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) +# Copyright 2019 BayLibre, SAS +%YAML 1.2 +--- +$id: "http://devicetree.org/schemas/display/amlogic,meson-vpu.yaml#" +$schema: "http://devicetree.org/meta-schemas/core.yaml#" + +title: Amlogic Meson Display Controller + +maintainers: + - Neil Armstrong + +description: | + The Amlogic Meson Display controller is composed of several components + that are going to be documented below + + DMC|---------------VPU (Video Processing Unit)----------------|------HHI------| + | vd1 _______ _____________ _________________ | | + D |-------| |----| | | | | HDMI PLL | + D | vd2 | VIU | | Video Post | | Video Encoders |<---|-----VCLK | + R |-------| |----| Processing | | | | | + | osd2 | | | |---| Enci ----------|----|-----VDAC------| + R |-------| CSC |----| Scalers | | Encp ----------|----|----HDMI-TX----| + A | osd1 | | | Blenders | | Encl ----------|----|---------------| + M |-------|______|----|____________| |________________| | | + ___|__________________________________________________________|_______________| + + + VIU: Video Input Unit + --------------------- + + The Video Input Unit is in charge of the pixel scanout from the DDR memory. + It fetches the frames addresses, stride and parameters from the "Canvas" memory. + This part is also in charge of the CSC (Colorspace Conversion). + It can handle 2 OSD Planes and 2 Video Planes. + + VPP: Video Post Processing + -------------------------- + + The Video Post Processing is in charge of the scaling and blending of the + various planes into a single pixel stream. + There is a special "pre-blending" used by the video planes with a dedicated + scaler and a "post-blending" to merge with the OSD Planes. + The OSD planes also have a dedicated scaler for one of the OSD. + + VENC: Video Encoders + -------------------- + + The VENC is composed of the multiple pixel encoders + - ENCI : Interlace Video encoder for CVBS and Interlace HDMI + - ENCP : Progressive Video Encoder for HDMI + - ENCL : LCD LVDS Encoder + The VENC Unit gets a Pixel Clocks (VCLK) from a dedicated HDMI PLL and clock + tree and provides the scanout clock to the VPP and VIU. + The ENCI is connected to a single VDAC for Composite Output. + The ENCI and ENCP are connected to an on-chip HDMI Transceiver. + +properties: + compatible: + oneOf: + - items: + - enum: + - amlogic,meson-gxbb-vpu # GXBB (S905) + - amlogic,meson-gxl-vpu # GXL (S905X, S905D) + - amlogic,meson-gxm-vpu # GXM (S912) + - const: amlogic,meson-gx-vpu + - enum: + - amlogic,meson-g12a-vpu # G12A (S905X2, S905Y2, S905D2) + + reg: + maxItems: 2 + + reg-names: + items: + - const: vpu + - const: hhi + + interrupts: + maxItems: 1 + + power-domains: + maxItems: 1 + description: phandle to the associated power domain + + port@0: + type: object + description: + A port node pointing to the CVBS VDAC port node. + + port@1: + type: object + description: + A port node pointing to the HDMI-TX port node. + + "#address-cells": + const: 1 + + "#size-cells": + const: 0 + +required: + - compatible + - reg + - interrupts + - port@0 + - port@1 + - "#address-cells" + - "#size-cells" + +examples: + - | + vpu: vpu@d0100000 { + compatible = "amlogic,meson-gxbb-vpu", "amlogic,meson-gx-vpu"; + reg = <0xd0100000 0x100000>, <0xc883c000 0x1000>; + reg-names = "vpu", "hhi"; + interrupts = <3>; + #address-cells = <1>; + #size-cells = <0>; + + /* CVBS VDAC output port */ + port@0 { + reg = <0>; + + cvbs_vdac_out: endpoint { + remote-endpoint = <&tv_connector_in>; + }; + }; + + /* HDMI TX output port */ + port@1 { + reg = <1>; + + hdmi_tx_out: endpoint { + remote-endpoint = <&hdmi_tx_in>; + }; + }; + }; diff --git a/Documentation/devicetree/bindings/display/connector/hdmi-connector.txt b/Documentation/devicetree/bindings/display/connector/hdmi-connector.txt index 508aee461e0d..aeb07c4bd703 100644 --- a/Documentation/devicetree/bindings/display/connector/hdmi-connector.txt +++ b/Documentation/devicetree/bindings/display/connector/hdmi-connector.txt @@ -9,6 +9,7 @@ Optional properties: - label: a symbolic name for the connector - hpd-gpios: HPD GPIO number - ddc-i2c-bus: phandle link to the I2C controller used for DDC EDID probing +- ddc-en-gpios: signal to enable DDC bus Required nodes: - Video port for HDMI input diff --git a/Documentation/devicetree/bindings/display/panel/nec,nl8048hl11.yaml b/Documentation/devicetree/bindings/display/panel/nec,nl8048hl11.yaml new file mode 100644 index 000000000000..aa788eaa2f71 --- /dev/null +++ b/Documentation/devicetree/bindings/display/panel/nec,nl8048hl11.yaml @@ -0,0 +1,62 @@ +# SPDX-License-Identifier: GPL-2.0 +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/display/panel/nec,nl8048hl11.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: NEC NL8048HL11 4.1" WVGA TFT LCD panel + +description: + The NEC NL8048HL11 is a 4.1" WVGA TFT LCD panel with a 24-bit RGB parallel + data interface and an SPI control interface. + +maintainers: + - Laurent Pinchart + +allOf: + - $ref: panel-common.yaml# + +properties: + compatible: + const: nec,nl8048hl11 + + label: true + port: true + reg: true + reset-gpios: true + + spi-max-frequency: + maximum: 10000000 + +required: + - compatible + - reg + - reset-gpios + - port + +additionalProperties: false + +examples: + - | + #include + + spi0 { + #address-cells = <1>; + #size-cells = <0>; + + lcd_panel: panel@0 { + compatible = "nec,nl8048hl11"; + reg = <0>; + spi-max-frequency = <10000000>; + + reset-gpios = <&gpio7 7 GPIO_ACTIVE_LOW>; + + port { + lcd_in: endpoint { + remote-endpoint = <&dpi_out>; + }; + }; + }; + }; + +... diff --git a/Documentation/devicetree/bindings/display/panel/ti,nspire.yaml b/Documentation/devicetree/bindings/display/panel/ti,nspire.yaml new file mode 100644 index 000000000000..5c5a3b519e31 --- /dev/null +++ b/Documentation/devicetree/bindings/display/panel/ti,nspire.yaml @@ -0,0 +1,36 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/display/panel/ti,nspire.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Texas Instruments NSPIRE Display Panels + +maintainers: + - Linus Walleij + +allOf: + - $ref: panel-common.yaml# + +properties: + compatible: + enum: + - ti,nspire-cx-lcd-panel + - ti,nspire-classic-lcd-panel + port: true + +required: + - compatible + +additionalProperties: false + +examples: + - | + panel { + compatible = "ti,nspire-cx-lcd-panel"; + port { + panel_in: endpoint { + remote-endpoint = <&pads>; + }; + }; + }; diff --git a/Documentation/devicetree/bindings/vendor-prefixes.yaml b/Documentation/devicetree/bindings/vendor-prefixes.yaml index 6992bbbbffab..29dcc6f8a64a 100644 --- a/Documentation/devicetree/bindings/vendor-prefixes.yaml +++ b/Documentation/devicetree/bindings/vendor-prefixes.yaml @@ -511,6 +511,8 @@ patternProperties: description: Lenovo Group Ltd. "^lg,.*": description: LG Corporation + "^lgphilips,.*": + description: LG Display "^libretech,.*": description: Shenzhen Libre Technology Co., Ltd "^licheepi,.*": @@ -933,6 +935,9 @@ patternProperties: description: Tecon Microprocessor Technologies, LLC. "^topeet,.*": description: Topeet + "^toppoly,.*": + description: TPO (deprecated, use tpo) + deprecated: true "^toradex,.*": description: Toradex AG "^toshiba,.*": diff --git a/MAINTAINERS b/MAINTAINERS index 1bd7b9c2d146..c2d975da561f 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -5334,8 +5334,8 @@ L: linux-amlogic@lists.infradead.org W: http://linux-meson.com/ S: Supported F: drivers/gpu/drm/meson/ -F: Documentation/devicetree/bindings/display/amlogic,meson-vpu.txt -F: Documentation/devicetree/bindings/display/amlogic,meson-dw-hdmi.txt +F: Documentation/devicetree/bindings/display/amlogic,meson-vpu.yaml +F: Documentation/devicetree/bindings/display/amlogic,meson-dw-hdmi.yaml F: Documentation/gpu/meson.rst T: git git://anongit.freedesktop.org/drm/drm-misc diff --git a/drivers/dma-buf/Makefile b/drivers/dma-buf/Makefile index e8c7310cb800..dcfb01e7c6f4 100644 --- a/drivers/dma-buf/Makefile +++ b/drivers/dma-buf/Makefile @@ -1,6 +1,6 @@ # SPDX-License-Identifier: GPL-2.0-only obj-y := dma-buf.o dma-fence.o dma-fence-array.o dma-fence-chain.o \ - reservation.o seqno-fence.o + dma-resv.o seqno-fence.o obj-$(CONFIG_SYNC_FILE) += sync_file.o obj-$(CONFIG_SW_SYNC) += sw_sync.o sync_debug.o obj-$(CONFIG_UDMABUF) += udmabuf.o diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c index f45bfb29ef96..433d91d710e4 100644 --- a/drivers/dma-buf/dma-buf.c +++ b/drivers/dma-buf/dma-buf.c @@ -21,7 +21,7 @@ #include #include #include -#include +#include #include #include #include @@ -104,8 +104,8 @@ static int dma_buf_release(struct inode *inode, struct file *file) list_del(&dmabuf->list_node); mutex_unlock(&db_list.lock); - if (dmabuf->resv == (struct reservation_object *)&dmabuf[1]) - reservation_object_fini(dmabuf->resv); + if (dmabuf->resv == (struct dma_resv *)&dmabuf[1]) + dma_resv_fini(dmabuf->resv); module_put(dmabuf->owner); kfree(dmabuf); @@ -165,7 +165,7 @@ static loff_t dma_buf_llseek(struct file *file, loff_t offset, int whence) * To support cross-device and cross-driver synchronization of buffer access * implicit fences (represented internally in the kernel with &struct fence) can * be attached to a &dma_buf. The glue for that and a few related things are - * provided in the &reservation_object structure. + * provided in the &dma_resv structure. * * Userspace can query the state of these implicitly tracked fences using poll() * and related system calls: @@ -195,8 +195,8 @@ static void dma_buf_poll_cb(struct dma_fence *fence, struct dma_fence_cb *cb) static __poll_t dma_buf_poll(struct file *file, poll_table *poll) { struct dma_buf *dmabuf; - struct reservation_object *resv; - struct reservation_object_list *fobj; + struct dma_resv *resv; + struct dma_resv_list *fobj; struct dma_fence *fence_excl; __poll_t events; unsigned shared_count, seq; @@ -506,13 +506,13 @@ err_alloc_file: struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info) { struct dma_buf *dmabuf; - struct reservation_object *resv = exp_info->resv; + struct dma_resv *resv = exp_info->resv; struct file *file; size_t alloc_size = sizeof(struct dma_buf); int ret; if (!exp_info->resv) - alloc_size += sizeof(struct reservation_object); + alloc_size += sizeof(struct dma_resv); else /* prevent &dma_buf[1] == dma_buf->resv */ alloc_size += 1; @@ -544,8 +544,8 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info) dmabuf->cb_excl.active = dmabuf->cb_shared.active = 0; if (!resv) { - resv = (struct reservation_object *)&dmabuf[1]; - reservation_object_init(resv); + resv = (struct dma_resv *)&dmabuf[1]; + dma_resv_init(resv); } dmabuf->resv = resv; @@ -909,11 +909,11 @@ static int __dma_buf_begin_cpu_access(struct dma_buf *dmabuf, { bool write = (direction == DMA_BIDIRECTIONAL || direction == DMA_TO_DEVICE); - struct reservation_object *resv = dmabuf->resv; + struct dma_resv *resv = dmabuf->resv; long ret; /* Wait on any implicit rendering fences */ - ret = reservation_object_wait_timeout_rcu(resv, write, true, + ret = dma_resv_wait_timeout_rcu(resv, write, true, MAX_SCHEDULE_TIMEOUT); if (ret < 0) return ret; @@ -1154,8 +1154,8 @@ static int dma_buf_debug_show(struct seq_file *s, void *unused) int ret; struct dma_buf *buf_obj; struct dma_buf_attachment *attach_obj; - struct reservation_object *robj; - struct reservation_object_list *fobj; + struct dma_resv *robj; + struct dma_resv_list *fobj; struct dma_fence *fence; unsigned seq; int count = 0, attach_count, shared_count, i; diff --git a/drivers/dma-buf/dma-fence-array.c b/drivers/dma-buf/dma-fence-array.c index 12c6f64c0bc2..d3fbd950be94 100644 --- a/drivers/dma-buf/dma-fence-array.c +++ b/drivers/dma-buf/dma-fence-array.c @@ -13,6 +13,8 @@ #include #include +#define PENDING_ERROR 1 + static const char *dma_fence_array_get_driver_name(struct dma_fence *fence) { return "dma_fence_array"; @@ -23,10 +25,29 @@ static const char *dma_fence_array_get_timeline_name(struct dma_fence *fence) return "unbound"; } +static void dma_fence_array_set_pending_error(struct dma_fence_array *array, + int error) +{ + /* + * Propagate the first error reported by any of our fences, but only + * before we ourselves are signaled. + */ + if (error) + cmpxchg(&array->base.error, PENDING_ERROR, error); +} + +static void dma_fence_array_clear_pending_error(struct dma_fence_array *array) +{ + /* Clear the error flag if not actually set. */ + cmpxchg(&array->base.error, PENDING_ERROR, 0); +} + static void irq_dma_fence_array_work(struct irq_work *wrk) { struct dma_fence_array *array = container_of(wrk, typeof(*array), work); + dma_fence_array_clear_pending_error(array); + dma_fence_signal(&array->base); dma_fence_put(&array->base); } @@ -38,6 +59,8 @@ static void dma_fence_array_cb_func(struct dma_fence *f, container_of(cb, struct dma_fence_array_cb, cb); struct dma_fence_array *array = array_cb->array; + dma_fence_array_set_pending_error(array, f->error); + if (atomic_dec_and_test(&array->num_pending)) irq_work_queue(&array->work); else @@ -63,9 +86,14 @@ static bool dma_fence_array_enable_signaling(struct dma_fence *fence) dma_fence_get(&array->base); if (dma_fence_add_callback(array->fences[i], &cb[i].cb, dma_fence_array_cb_func)) { + int error = array->fences[i]->error; + + dma_fence_array_set_pending_error(array, error); dma_fence_put(&array->base); - if (atomic_dec_and_test(&array->num_pending)) + if (atomic_dec_and_test(&array->num_pending)) { + dma_fence_array_clear_pending_error(array); return false; + } } } @@ -142,6 +170,8 @@ struct dma_fence_array *dma_fence_array_create(int num_fences, atomic_set(&array->num_pending, signal_on_any ? 1 : num_fences); array->fences = fences; + array->base.error = PENDING_ERROR; + return array; } EXPORT_SYMBOL(dma_fence_array_create); diff --git a/drivers/dma-buf/dma-fence.c b/drivers/dma-buf/dma-fence.c index 59ac96ec7ba8..2c136aee3e79 100644 --- a/drivers/dma-buf/dma-fence.c +++ b/drivers/dma-buf/dma-fence.c @@ -60,7 +60,7 @@ static atomic64_t dma_fence_context_counter = ATOMIC64_INIT(1); * * - Then there's also implicit fencing, where the synchronization points are * implicitly passed around as part of shared &dma_buf instances. Such - * implicit fences are stored in &struct reservation_object through the + * implicit fences are stored in &struct dma_resv through the * &dma_buf.resv pointer. */ @@ -129,31 +129,27 @@ EXPORT_SYMBOL(dma_fence_context_alloc); int dma_fence_signal_locked(struct dma_fence *fence) { struct dma_fence_cb *cur, *tmp; - int ret = 0; + struct list_head cb_list; lockdep_assert_held(fence->lock); - if (WARN_ON(!fence)) + if (unlikely(test_and_set_bit(DMA_FENCE_FLAG_SIGNALED_BIT, + &fence->flags))) return -EINVAL; - if (test_and_set_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) { - ret = -EINVAL; + /* Stash the cb_list before replacing it with the timestamp */ + list_replace(&fence->cb_list, &cb_list); - /* - * we might have raced with the unlocked dma_fence_signal, - * still run through all callbacks - */ - } else { - fence->timestamp = ktime_get(); - set_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags); - trace_dma_fence_signaled(fence); - } + fence->timestamp = ktime_get(); + set_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags); + trace_dma_fence_signaled(fence); - list_for_each_entry_safe(cur, tmp, &fence->cb_list, node) { - list_del_init(&cur->node); + list_for_each_entry_safe(cur, tmp, &cb_list, node) { + INIT_LIST_HEAD(&cur->node); cur->func(fence, cur); } - return ret; + + return 0; } EXPORT_SYMBOL(dma_fence_signal_locked); @@ -173,28 +169,16 @@ EXPORT_SYMBOL(dma_fence_signal_locked); int dma_fence_signal(struct dma_fence *fence) { unsigned long flags; + int ret; if (!fence) return -EINVAL; - if (test_and_set_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) - return -EINVAL; + spin_lock_irqsave(fence->lock, flags); + ret = dma_fence_signal_locked(fence); + spin_unlock_irqrestore(fence->lock, flags); - fence->timestamp = ktime_get(); - set_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags); - trace_dma_fence_signaled(fence); - - if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags)) { - struct dma_fence_cb *cur, *tmp; - - spin_lock_irqsave(fence->lock, flags); - list_for_each_entry_safe(cur, tmp, &fence->cb_list, node) { - list_del_init(&cur->node); - cur->func(fence, cur); - } - spin_unlock_irqrestore(fence->lock, flags); - } - return 0; + return ret; } EXPORT_SYMBOL(dma_fence_signal); @@ -248,7 +232,8 @@ void dma_fence_release(struct kref *kref) trace_dma_fence_destroy(fence); - if (WARN(!list_empty(&fence->cb_list), + if (WARN(!list_empty(&fence->cb_list) && + !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags), "Fence %s:%s:%llx:%llx released with pending signals!\n", fence->ops->get_driver_name(fence), fence->ops->get_timeline_name(fence), diff --git a/drivers/dma-buf/reservation.c b/drivers/dma-buf/dma-resv.c similarity index 75% rename from drivers/dma-buf/reservation.c rename to drivers/dma-buf/dma-resv.c index ad6775b32a73..42a8f3f11681 100644 --- a/drivers/dma-buf/reservation.c +++ b/drivers/dma-buf/dma-resv.c @@ -32,7 +32,7 @@ * Authors: Thomas Hellstrom */ -#include +#include #include /** @@ -56,16 +56,15 @@ const char reservation_seqcount_string[] = "reservation_seqcount"; EXPORT_SYMBOL(reservation_seqcount_string); /** - * reservation_object_list_alloc - allocate fence list + * dma_resv_list_alloc - allocate fence list * @shared_max: number of fences we need space for * - * Allocate a new reservation_object_list and make sure to correctly initialize + * Allocate a new dma_resv_list and make sure to correctly initialize * shared_max. */ -static struct reservation_object_list * -reservation_object_list_alloc(unsigned int shared_max) +static struct dma_resv_list *dma_resv_list_alloc(unsigned int shared_max) { - struct reservation_object_list *list; + struct dma_resv_list *list; list = kmalloc(offsetof(typeof(*list), shared[shared_max]), GFP_KERNEL); if (!list) @@ -78,12 +77,12 @@ reservation_object_list_alloc(unsigned int shared_max) } /** - * reservation_object_list_free - free fence list + * dma_resv_list_free - free fence list * @list: list to free * - * Free a reservation_object_list and make sure to drop all references. + * Free a dma_resv_list and make sure to drop all references. */ -static void reservation_object_list_free(struct reservation_object_list *list) +static void dma_resv_list_free(struct dma_resv_list *list) { unsigned int i; @@ -97,10 +96,10 @@ static void reservation_object_list_free(struct reservation_object_list *list) } /** - * reservation_object_init - initialize a reservation object + * dma_resv_init - initialize a reservation object * @obj: the reservation object */ -void reservation_object_init(struct reservation_object *obj) +void dma_resv_init(struct dma_resv *obj) { ww_mutex_init(&obj->lock, &reservation_ww_class); @@ -109,15 +108,15 @@ void reservation_object_init(struct reservation_object *obj) RCU_INIT_POINTER(obj->fence, NULL); RCU_INIT_POINTER(obj->fence_excl, NULL); } -EXPORT_SYMBOL(reservation_object_init); +EXPORT_SYMBOL(dma_resv_init); /** - * reservation_object_fini - destroys a reservation object + * dma_resv_fini - destroys a reservation object * @obj: the reservation object */ -void reservation_object_fini(struct reservation_object *obj) +void dma_resv_fini(struct dma_resv *obj) { - struct reservation_object_list *fobj; + struct dma_resv_list *fobj; struct dma_fence *excl; /* @@ -129,32 +128,31 @@ void reservation_object_fini(struct reservation_object *obj) dma_fence_put(excl); fobj = rcu_dereference_protected(obj->fence, 1); - reservation_object_list_free(fobj); + dma_resv_list_free(fobj); ww_mutex_destroy(&obj->lock); } -EXPORT_SYMBOL(reservation_object_fini); +EXPORT_SYMBOL(dma_resv_fini); /** - * reservation_object_reserve_shared - Reserve space to add shared fences to - * a reservation_object. + * dma_resv_reserve_shared - Reserve space to add shared fences to + * a dma_resv. * @obj: reservation object * @num_fences: number of fences we want to add * - * Should be called before reservation_object_add_shared_fence(). Must + * Should be called before dma_resv_add_shared_fence(). Must * be called with obj->lock held. * * RETURNS * Zero for success, or -errno */ -int reservation_object_reserve_shared(struct reservation_object *obj, - unsigned int num_fences) +int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences) { - struct reservation_object_list *old, *new; + struct dma_resv_list *old, *new; unsigned int i, j, k, max; - reservation_object_assert_held(obj); + dma_resv_assert_held(obj); - old = reservation_object_get_list(obj); + old = dma_resv_get_list(obj); if (old && old->shared_max) { if ((old->shared_count + num_fences) <= old->shared_max) @@ -166,7 +164,7 @@ int reservation_object_reserve_shared(struct reservation_object *obj, max = 4; } - new = reservation_object_list_alloc(max); + new = dma_resv_list_alloc(max); if (!new) return -ENOMEM; @@ -180,7 +178,7 @@ int reservation_object_reserve_shared(struct reservation_object *obj, struct dma_fence *fence; fence = rcu_dereference_protected(old->shared[i], - reservation_object_held(obj)); + dma_resv_held(obj)); if (dma_fence_is_signaled(fence)) RCU_INIT_POINTER(new->shared[--k], fence); else @@ -206,35 +204,34 @@ int reservation_object_reserve_shared(struct reservation_object *obj, struct dma_fence *fence; fence = rcu_dereference_protected(new->shared[i], - reservation_object_held(obj)); + dma_resv_held(obj)); dma_fence_put(fence); } kfree_rcu(old, rcu); return 0; } -EXPORT_SYMBOL(reservation_object_reserve_shared); +EXPORT_SYMBOL(dma_resv_reserve_shared); /** - * reservation_object_add_shared_fence - Add a fence to a shared slot + * dma_resv_add_shared_fence - Add a fence to a shared slot * @obj: the reservation object * @fence: the shared fence to add * * Add a fence to a shared slot, obj->lock must be held, and - * reservation_object_reserve_shared() has been called. + * dma_resv_reserve_shared() has been called. */ -void reservation_object_add_shared_fence(struct reservation_object *obj, - struct dma_fence *fence) +void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence) { - struct reservation_object_list *fobj; + struct dma_resv_list *fobj; struct dma_fence *old; unsigned int i, count; dma_fence_get(fence); - reservation_object_assert_held(obj); + dma_resv_assert_held(obj); - fobj = reservation_object_get_list(obj); + fobj = dma_resv_get_list(obj); count = fobj->shared_count; preempt_disable(); @@ -243,7 +240,7 @@ void reservation_object_add_shared_fence(struct reservation_object *obj, for (i = 0; i < count; ++i) { old = rcu_dereference_protected(fobj->shared[i], - reservation_object_held(obj)); + dma_resv_held(obj)); if (old->context == fence->context || dma_fence_is_signaled(old)) goto replace; @@ -262,25 +259,24 @@ replace: preempt_enable(); dma_fence_put(old); } -EXPORT_SYMBOL(reservation_object_add_shared_fence); +EXPORT_SYMBOL(dma_resv_add_shared_fence); /** - * reservation_object_add_excl_fence - Add an exclusive fence. + * dma_resv_add_excl_fence - Add an exclusive fence. * @obj: the reservation object * @fence: the shared fence to add * * Add a fence to the exclusive slot. The obj->lock must be held. */ -void reservation_object_add_excl_fence(struct reservation_object *obj, - struct dma_fence *fence) +void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence) { - struct dma_fence *old_fence = reservation_object_get_excl(obj); - struct reservation_object_list *old; + struct dma_fence *old_fence = dma_resv_get_excl(obj); + struct dma_resv_list *old; u32 i = 0; - reservation_object_assert_held(obj); + dma_resv_assert_held(obj); - old = reservation_object_get_list(obj); + old = dma_resv_get_list(obj); if (old) i = old->shared_count; @@ -299,27 +295,26 @@ void reservation_object_add_excl_fence(struct reservation_object *obj, /* inplace update, no shared fences */ while (i--) dma_fence_put(rcu_dereference_protected(old->shared[i], - reservation_object_held(obj))); + dma_resv_held(obj))); dma_fence_put(old_fence); } -EXPORT_SYMBOL(reservation_object_add_excl_fence); +EXPORT_SYMBOL(dma_resv_add_excl_fence); /** -* reservation_object_copy_fences - Copy all fences from src to dst. +* dma_resv_copy_fences - Copy all fences from src to dst. * @dst: the destination reservation object * @src: the source reservation object * * Copy all fences from src to dst. dst-lock must be held. */ -int reservation_object_copy_fences(struct reservation_object *dst, - struct reservation_object *src) +int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src) { - struct reservation_object_list *src_list, *dst_list; + struct dma_resv_list *src_list, *dst_list; struct dma_fence *old, *new; unsigned i; - reservation_object_assert_held(dst); + dma_resv_assert_held(dst); rcu_read_lock(); src_list = rcu_dereference(src->fence); @@ -330,7 +325,7 @@ retry: rcu_read_unlock(); - dst_list = reservation_object_list_alloc(shared_count); + dst_list = dma_resv_list_alloc(shared_count); if (!dst_list) return -ENOMEM; @@ -351,7 +346,7 @@ retry: continue; if (!dma_fence_get_rcu(fence)) { - reservation_object_list_free(dst_list); + dma_resv_list_free(dst_list); src_list = rcu_dereference(src->fence); goto retry; } @@ -370,8 +365,8 @@ retry: new = dma_fence_get_rcu_safe(&src->fence_excl); rcu_read_unlock(); - src_list = reservation_object_get_list(dst); - old = reservation_object_get_excl(dst); + src_list = dma_resv_get_list(dst); + old = dma_resv_get_excl(dst); preempt_disable(); write_seqcount_begin(&dst->seq); @@ -381,15 +376,15 @@ retry: write_seqcount_end(&dst->seq); preempt_enable(); - reservation_object_list_free(src_list); + dma_resv_list_free(src_list); dma_fence_put(old); return 0; } -EXPORT_SYMBOL(reservation_object_copy_fences); +EXPORT_SYMBOL(dma_resv_copy_fences); /** - * reservation_object_get_fences_rcu - Get an object's shared and exclusive + * dma_resv_get_fences_rcu - Get an object's shared and exclusive * fences without update side lock held * @obj: the reservation object * @pfence_excl: the returned exclusive fence (or NULL) @@ -401,10 +396,10 @@ EXPORT_SYMBOL(reservation_object_copy_fences); * exclusive fence is not specified the fence is put into the array of the * shared fences as well. Returns either zero or -ENOMEM. */ -int reservation_object_get_fences_rcu(struct reservation_object *obj, - struct dma_fence **pfence_excl, - unsigned *pshared_count, - struct dma_fence ***pshared) +int dma_resv_get_fences_rcu(struct dma_resv *obj, + struct dma_fence **pfence_excl, + unsigned *pshared_count, + struct dma_fence ***pshared) { struct dma_fence **shared = NULL; struct dma_fence *fence_excl; @@ -412,7 +407,7 @@ int reservation_object_get_fences_rcu(struct reservation_object *obj, int ret = 1; do { - struct reservation_object_list *fobj; + struct dma_resv_list *fobj; unsigned int i, seq; size_t sz = 0; @@ -487,10 +482,10 @@ unlock: *pshared = shared; return ret; } -EXPORT_SYMBOL_GPL(reservation_object_get_fences_rcu); +EXPORT_SYMBOL_GPL(dma_resv_get_fences_rcu); /** - * reservation_object_wait_timeout_rcu - Wait on reservation's objects + * dma_resv_wait_timeout_rcu - Wait on reservation's objects * shared and/or exclusive fences. * @obj: the reservation object * @wait_all: if true, wait on all fences, else wait on just exclusive fence @@ -501,9 +496,9 @@ EXPORT_SYMBOL_GPL(reservation_object_get_fences_rcu); * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or * greater than zer on success. */ -long reservation_object_wait_timeout_rcu(struct reservation_object *obj, - bool wait_all, bool intr, - unsigned long timeout) +long dma_resv_wait_timeout_rcu(struct dma_resv *obj, + bool wait_all, bool intr, + unsigned long timeout) { struct dma_fence *fence; unsigned seq, shared_count; @@ -531,8 +526,7 @@ retry: } if (wait_all) { - struct reservation_object_list *fobj = - rcu_dereference(obj->fence); + struct dma_resv_list *fobj = rcu_dereference(obj->fence); if (fobj) shared_count = fobj->shared_count; @@ -575,11 +569,10 @@ unlock_retry: rcu_read_unlock(); goto retry; } -EXPORT_SYMBOL_GPL(reservation_object_wait_timeout_rcu); +EXPORT_SYMBOL_GPL(dma_resv_wait_timeout_rcu); -static inline int -reservation_object_test_signaled_single(struct dma_fence *passed_fence) +static inline int dma_resv_test_signaled_single(struct dma_fence *passed_fence) { struct dma_fence *fence, *lfence = passed_fence; int ret = 1; @@ -596,7 +589,7 @@ reservation_object_test_signaled_single(struct dma_fence *passed_fence) } /** - * reservation_object_test_signaled_rcu - Test if a reservation object's + * dma_resv_test_signaled_rcu - Test if a reservation object's * fences have been signaled. * @obj: the reservation object * @test_all: if true, test all fences, otherwise only test the exclusive @@ -605,8 +598,7 @@ reservation_object_test_signaled_single(struct dma_fence *passed_fence) * RETURNS * true if all fences signaled, else false */ -bool reservation_object_test_signaled_rcu(struct reservation_object *obj, - bool test_all) +bool dma_resv_test_signaled_rcu(struct dma_resv *obj, bool test_all) { unsigned seq, shared_count; int ret; @@ -620,8 +612,7 @@ retry: if (test_all) { unsigned i; - struct reservation_object_list *fobj = - rcu_dereference(obj->fence); + struct dma_resv_list *fobj = rcu_dereference(obj->fence); if (fobj) shared_count = fobj->shared_count; @@ -629,7 +620,7 @@ retry: for (i = 0; i < shared_count; ++i) { struct dma_fence *fence = rcu_dereference(fobj->shared[i]); - ret = reservation_object_test_signaled_single(fence); + ret = dma_resv_test_signaled_single(fence); if (ret < 0) goto retry; else if (!ret) @@ -644,8 +635,7 @@ retry: struct dma_fence *fence_excl = rcu_dereference(obj->fence_excl); if (fence_excl) { - ret = reservation_object_test_signaled_single( - fence_excl); + ret = dma_resv_test_signaled_single(fence_excl); if (ret < 0) goto retry; @@ -657,4 +647,4 @@ retry: rcu_read_unlock(); return ret; } -EXPORT_SYMBOL_GPL(reservation_object_test_signaled_rcu); +EXPORT_SYMBOL_GPL(dma_resv_test_signaled_rcu); diff --git a/drivers/dma-buf/sw_sync.c b/drivers/dma-buf/sw_sync.c index 051f6c2873c7..6713cfb1995c 100644 --- a/drivers/dma-buf/sw_sync.c +++ b/drivers/dma-buf/sw_sync.c @@ -132,17 +132,14 @@ static void timeline_fence_release(struct dma_fence *fence) { struct sync_pt *pt = dma_fence_to_sync_pt(fence); struct sync_timeline *parent = dma_fence_parent(fence); + unsigned long flags; + spin_lock_irqsave(fence->lock, flags); if (!list_empty(&pt->link)) { - unsigned long flags; - - spin_lock_irqsave(fence->lock, flags); - if (!list_empty(&pt->link)) { - list_del(&pt->link); - rb_erase(&pt->node, &parent->pt_tree); - } - spin_unlock_irqrestore(fence->lock, flags); + list_del(&pt->link); + rb_erase(&pt->node, &parent->pt_tree); } + spin_unlock_irqrestore(fence->lock, flags); sync_timeline_put(parent); dma_fence_free(fence); @@ -265,7 +262,8 @@ static struct sync_pt *sync_pt_create(struct sync_timeline *obj, p = &parent->rb_left; } else { if (dma_fence_get_rcu(&other->base)) { - dma_fence_put(&pt->base); + sync_timeline_put(obj); + kfree(pt); pt = other; goto unlock; } diff --git a/drivers/dma-buf/sync_file.c b/drivers/dma-buf/sync_file.c index ee4d1a96d779..25c5c071645b 100644 --- a/drivers/dma-buf/sync_file.c +++ b/drivers/dma-buf/sync_file.c @@ -419,7 +419,7 @@ static long sync_file_ioctl_fence_info(struct sync_file *sync_file, * info->num_fences. */ if (!info.num_fences) { - info.status = dma_fence_is_signaled(sync_file->fence); + info.status = dma_fence_get_status(sync_file->fence); goto no_fences; } else { info.status = 1; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c index e0c47ae52fc1..42b936b6bbf1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c @@ -218,14 +218,14 @@ void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo) static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo, struct amdgpu_amdkfd_fence *ef) { - struct reservation_object *resv = bo->tbo.base.resv; - struct reservation_object_list *old, *new; + struct dma_resv *resv = bo->tbo.base.resv; + struct dma_resv_list *old, *new; unsigned int i, j, k; if (!ef) return -EINVAL; - old = reservation_object_get_list(resv); + old = dma_resv_get_list(resv); if (!old) return 0; @@ -241,7 +241,7 @@ static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo, struct dma_fence *f; f = rcu_dereference_protected(old->shared[i], - reservation_object_held(resv)); + dma_resv_held(resv)); if (f->context == ef->base.context) RCU_INIT_POINTER(new->shared[--j], f); @@ -263,7 +263,7 @@ static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo, struct dma_fence *f; f = rcu_dereference_protected(new->shared[i], - reservation_object_held(resv)); + dma_resv_held(resv)); dma_fence_put(f); } kfree_rcu(old, rcu); @@ -887,7 +887,7 @@ static int init_kfd_vm(struct amdgpu_vm *vm, void **process_info, AMDGPU_FENCE_OWNER_KFD, false); if (ret) goto wait_pd_fail; - ret = reservation_object_reserve_shared(vm->root.base.bo->tbo.base.resv, 1); + ret = dma_resv_reserve_shared(vm->root.base.bo->tbo.base.resv, 1); if (ret) goto reserve_shared_fail; amdgpu_bo_fence(vm->root.base.bo, @@ -2133,7 +2133,7 @@ int amdgpu_amdkfd_add_gws_to_process(void *info, void *gws, struct kgd_mem **mem * Add process eviction fence to bo so they can * evict each other. */ - ret = reservation_object_reserve_shared(gws_bo->tbo.base.resv, 1); + ret = dma_resv_reserve_shared(gws_bo->tbo.base.resv, 1); if (ret) goto reserve_shared_fail; amdgpu_bo_fence(gws_bo, &process_info->eviction_fence->base, true); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 9ccf32c5456a..8c50be56f458 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -730,7 +730,7 @@ static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p) list_for_each_entry(e, &p->validated, tv.head) { struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo); - struct reservation_object *resv = bo->tbo.base.resv; + struct dma_resv *resv = bo->tbo.base.resv; r = amdgpu_sync_resv(p->adev, &p->job->sync, resv, p->filp, amdgpu_bo_explicit_sync(bo)); @@ -1727,7 +1727,7 @@ int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser, *map = mapping; /* Double check that the BO is reserved by this CS */ - if (reservation_object_locking_ctx((*bo)->tbo.base.resv) != &parser->ticket) + if (dma_resv_locking_ctx((*bo)->tbo.base.resv) != &parser->ticket) return -EINVAL; if (!((*bo)->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c index f453e277ed24..1d4aaa9580f4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c @@ -205,7 +205,7 @@ int amdgpu_display_crtc_page_flip_target(struct drm_crtc *crtc, goto unpin; } - r = reservation_object_get_fences_rcu(new_abo->tbo.base.resv, &work->excl, + r = dma_resv_get_fences_rcu(new_abo->tbo.base.resv, &work->excl, &work->shared_count, &work->shared); if (unlikely(r != 0)) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c index f0db7ddcb61b..61f108ec2b5c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c @@ -137,23 +137,23 @@ int amdgpu_gem_prime_mmap(struct drm_gem_object *obj, } static int -__reservation_object_make_exclusive(struct reservation_object *obj) +__dma_resv_make_exclusive(struct dma_resv *obj) { struct dma_fence **fences; unsigned int count; int r; - if (!reservation_object_get_list(obj)) /* no shared fences to convert */ + if (!dma_resv_get_list(obj)) /* no shared fences to convert */ return 0; - r = reservation_object_get_fences_rcu(obj, NULL, &count, &fences); + r = dma_resv_get_fences_rcu(obj, NULL, &count, &fences); if (r) return r; if (count == 0) { /* Now that was unexpected. */ } else if (count == 1) { - reservation_object_add_excl_fence(obj, fences[0]); + dma_resv_add_excl_fence(obj, fences[0]); dma_fence_put(fences[0]); kfree(fences); } else { @@ -165,7 +165,7 @@ __reservation_object_make_exclusive(struct reservation_object *obj) if (!array) goto err_fences_put; - reservation_object_add_excl_fence(obj, &array->base); + dma_resv_add_excl_fence(obj, &array->base); dma_fence_put(&array->base); } @@ -216,7 +216,7 @@ static int amdgpu_dma_buf_map_attach(struct dma_buf *dma_buf, * fences on the reservation object into a single exclusive * fence. */ - r = __reservation_object_make_exclusive(bo->tbo.base.resv); + r = __dma_resv_make_exclusive(bo->tbo.base.resv); if (r) goto error_unreserve; } @@ -367,7 +367,7 @@ amdgpu_gem_prime_import_sg_table(struct drm_device *dev, struct dma_buf_attachment *attach, struct sg_table *sg) { - struct reservation_object *resv = attach->dmabuf->resv; + struct dma_resv *resv = attach->dmabuf->resv; struct amdgpu_device *adev = dev->dev_private; struct amdgpu_bo *bo; struct amdgpu_bo_param bp; @@ -380,7 +380,7 @@ amdgpu_gem_prime_import_sg_table(struct drm_device *dev, bp.flags = 0; bp.type = ttm_bo_type_sg; bp.resv = resv; - reservation_object_lock(resv, NULL); + dma_resv_lock(resv, NULL); ret = amdgpu_bo_create(adev, &bp, &bo); if (ret) goto error; @@ -392,11 +392,11 @@ amdgpu_gem_prime_import_sg_table(struct drm_device *dev, if (attach->dmabuf->ops != &amdgpu_dmabuf_ops) bo->prime_shared_count = 1; - reservation_object_unlock(resv); + dma_resv_unlock(resv); return &bo->tbo.base; error: - reservation_object_unlock(resv); + dma_resv_unlock(resv); return ERR_PTR(ret); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index d532e3d647ca..b174bd5eb38e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c @@ -50,7 +50,7 @@ void amdgpu_gem_object_free(struct drm_gem_object *gobj) int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size, int alignment, u32 initial_domain, u64 flags, enum ttm_bo_type type, - struct reservation_object *resv, + struct dma_resv *resv, struct drm_gem_object **obj) { struct amdgpu_bo *bo; @@ -215,7 +215,7 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data, union drm_amdgpu_gem_create *args = data; uint64_t flags = args->in.domain_flags; uint64_t size = args->in.bo_size; - struct reservation_object *resv = NULL; + struct dma_resv *resv = NULL; struct drm_gem_object *gobj; uint32_t handle; int r; @@ -433,7 +433,7 @@ int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data, return -ENOENT; } robj = gem_to_amdgpu_bo(gobj); - ret = reservation_object_wait_timeout_rcu(robj->tbo.base.resv, true, true, + ret = dma_resv_wait_timeout_rcu(robj->tbo.base.resv, true, true, timeout); /* ret == 0 means not signaled, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h index 2f17150e26e1..0b66d2e6b5d5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h @@ -47,7 +47,7 @@ void amdgpu_gem_force_release(struct amdgpu_device *adev); int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size, int alignment, u32 initial_domain, u64 flags, enum ttm_bo_type type, - struct reservation_object *resv, + struct dma_resv *resv, struct drm_gem_object **obj); int amdgpu_mode_dumb_create(struct drm_file *file_priv, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c index 529065b83885..53734da1c2df 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c @@ -104,7 +104,7 @@ static void amdgpu_pasid_free_cb(struct dma_fence *fence, * * Free the pasid only after all the fences in resv are signaled. */ -void amdgpu_pasid_free_delayed(struct reservation_object *resv, +void amdgpu_pasid_free_delayed(struct dma_resv *resv, unsigned int pasid) { struct dma_fence *fence, **fences; @@ -112,7 +112,7 @@ void amdgpu_pasid_free_delayed(struct reservation_object *resv, unsigned count; int r; - r = reservation_object_get_fences_rcu(resv, NULL, &count, &fences); + r = dma_resv_get_fences_rcu(resv, NULL, &count, &fences); if (r) goto fallback; @@ -156,7 +156,7 @@ fallback: /* Not enough memory for the delayed delete, as last resort * block for all the fences to complete. */ - reservation_object_wait_timeout_rcu(resv, true, false, + dma_resv_wait_timeout_rcu(resv, true, false, MAX_SCHEDULE_TIMEOUT); amdgpu_pasid_free(pasid); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h index 7625419f0fc2..8e58325bbca2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h @@ -72,7 +72,7 @@ struct amdgpu_vmid_mgr { int amdgpu_pasid_alloc(unsigned int bits); void amdgpu_pasid_free(unsigned int pasid); -void amdgpu_pasid_free_delayed(struct reservation_object *resv, +void amdgpu_pasid_free_delayed(struct dma_resv *resv, unsigned int pasid); bool amdgpu_vmid_had_gpu_reset(struct amdgpu_device *adev, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c index 50022acc8a81..f1f8cdd695d3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c @@ -179,7 +179,7 @@ static void amdgpu_mn_invalidate_node(struct amdgpu_mn_node *node, if (!amdgpu_ttm_tt_affect_userptr(bo->tbo.ttm, start, end)) continue; - r = reservation_object_wait_timeout_rcu(bo->tbo.base.resv, + r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv, true, false, MAX_SCHEDULE_TIMEOUT); if (r <= 0) DRM_ERROR("(%ld) failed to wait for user bo\n", r); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index 2d07f16f1789..6ebe61e14f29 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -550,7 +550,7 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev, fail_unreserve: if (!bp->resv) - reservation_object_unlock(bo->tbo.base.resv); + dma_resv_unlock(bo->tbo.base.resv); amdgpu_bo_unref(&bo); return r; } @@ -612,13 +612,13 @@ int amdgpu_bo_create(struct amdgpu_device *adev, if ((flags & AMDGPU_GEM_CREATE_SHADOW) && !(adev->flags & AMD_IS_APU)) { if (!bp->resv) - WARN_ON(reservation_object_lock((*bo_ptr)->tbo.base.resv, + WARN_ON(dma_resv_lock((*bo_ptr)->tbo.base.resv, NULL)); r = amdgpu_bo_create_shadow(adev, bp->size, *bo_ptr); if (!bp->resv) - reservation_object_unlock((*bo_ptr)->tbo.base.resv); + dma_resv_unlock((*bo_ptr)->tbo.base.resv); if (r) amdgpu_bo_unref(bo_ptr); @@ -715,7 +715,7 @@ int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr) return 0; } - r = reservation_object_wait_timeout_rcu(bo->tbo.base.resv, false, false, + r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv, false, false, MAX_SCHEDULE_TIMEOUT); if (r < 0) return r; @@ -1093,7 +1093,7 @@ int amdgpu_bo_set_tiling_flags(struct amdgpu_bo *bo, u64 tiling_flags) */ void amdgpu_bo_get_tiling_flags(struct amdgpu_bo *bo, u64 *tiling_flags) { - reservation_object_assert_held(bo->tbo.base.resv); + dma_resv_assert_held(bo->tbo.base.resv); if (tiling_flags) *tiling_flags = bo->tiling_flags; @@ -1242,7 +1242,7 @@ void amdgpu_bo_release_notify(struct ttm_buffer_object *bo) !(abo->flags & AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE)) return; - reservation_object_lock(bo->base.resv, NULL); + dma_resv_lock(bo->base.resv, NULL); r = amdgpu_fill_buffer(abo, AMDGPU_POISON, bo->base.resv, &fence); if (!WARN_ON(r)) { @@ -1250,7 +1250,7 @@ void amdgpu_bo_release_notify(struct ttm_buffer_object *bo) dma_fence_put(fence); } - reservation_object_unlock(bo->base.resv); + dma_resv_unlock(bo->base.resv); } /** @@ -1325,12 +1325,12 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo) void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence, bool shared) { - struct reservation_object *resv = bo->tbo.base.resv; + struct dma_resv *resv = bo->tbo.base.resv; if (shared) - reservation_object_add_shared_fence(resv, fence); + dma_resv_add_shared_fence(resv, fence); else - reservation_object_add_excl_fence(resv, fence); + dma_resv_add_excl_fence(resv, fence); } /** @@ -1370,7 +1370,7 @@ int amdgpu_bo_sync_wait(struct amdgpu_bo *bo, void *owner, bool intr) u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo) { WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_SYSTEM); - WARN_ON_ONCE(!reservation_object_is_locked(bo->tbo.base.resv) && + WARN_ON_ONCE(!dma_resv_is_locked(bo->tbo.base.resv) && !bo->pin_count && bo->tbo.type != ttm_bo_type_kernel); WARN_ON_ONCE(bo->tbo.mem.start == AMDGPU_BO_INVALID_OFFSET); WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_VRAM && diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h index 05dde0dd04ff..658f4c9779b7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h @@ -41,7 +41,7 @@ struct amdgpu_bo_param { u32 preferred_domain; u64 flags; enum ttm_bo_type type; - struct reservation_object *resv; + struct dma_resv *resv; }; /* bo virtual addresses in a vm */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c index 9828f3c7c655..95e5e93edd18 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c @@ -190,10 +190,10 @@ int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync, */ int amdgpu_sync_resv(struct amdgpu_device *adev, struct amdgpu_sync *sync, - struct reservation_object *resv, + struct dma_resv *resv, void *owner, bool explicit_sync) { - struct reservation_object_list *flist; + struct dma_resv_list *flist; struct dma_fence *f; void *fence_owner; unsigned i; @@ -203,16 +203,16 @@ int amdgpu_sync_resv(struct amdgpu_device *adev, return -EINVAL; /* always sync to the exclusive fence */ - f = reservation_object_get_excl(resv); + f = dma_resv_get_excl(resv); r = amdgpu_sync_fence(adev, sync, f, false); - flist = reservation_object_get_list(resv); + flist = dma_resv_get_list(resv); if (!flist || r) return r; for (i = 0; i < flist->shared_count; ++i) { f = rcu_dereference_protected(flist->shared[i], - reservation_object_held(resv)); + dma_resv_held(resv)); /* We only want to trigger KFD eviction fences on * evict or move jobs. Skip KFD fences otherwise. */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h index 10cf23a57f17..b5f1778a2319 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h @@ -27,7 +27,7 @@ #include struct dma_fence; -struct reservation_object; +struct dma_resv; struct amdgpu_device; struct amdgpu_ring; @@ -44,7 +44,7 @@ int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync, struct dma_fence *f, bool explicit); int amdgpu_sync_resv(struct amdgpu_device *adev, struct amdgpu_sync *sync, - struct reservation_object *resv, + struct dma_resv *resv, void *owner, bool explicit_sync); struct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 8f8b7a350b8b..3e8f9072561e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -303,7 +303,7 @@ int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev, struct amdgpu_copy_mem *src, struct amdgpu_copy_mem *dst, uint64_t size, - struct reservation_object *resv, + struct dma_resv *resv, struct dma_fence **f) { struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring; @@ -1486,7 +1486,7 @@ static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo, { unsigned long num_pages = bo->mem.num_pages; struct drm_mm_node *node = bo->mem.mm_node; - struct reservation_object_list *flist; + struct dma_resv_list *flist; struct dma_fence *f; int i; @@ -1494,18 +1494,18 @@ static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo, * cleanly handle page faults. */ if (bo->type == ttm_bo_type_kernel && - !reservation_object_test_signaled_rcu(bo->base.resv, true)) + !dma_resv_test_signaled_rcu(bo->base.resv, true)) return false; /* If bo is a KFD BO, check if the bo belongs to the current process. * If true, then return false as any KFD process needs all its BOs to * be resident to run successfully */ - flist = reservation_object_get_list(bo->base.resv); + flist = dma_resv_get_list(bo->base.resv); if (flist) { for (i = 0; i < flist->shared_count; ++i) { f = rcu_dereference_protected(flist->shared[i], - reservation_object_held(bo->base.resv)); + dma_resv_held(bo->base.resv)); if (amdkfd_fence_check_mm(f, current->mm)) return false; } @@ -2009,7 +2009,7 @@ error_free: int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset, uint64_t dst_offset, uint32_t byte_count, - struct reservation_object *resv, + struct dma_resv *resv, struct dma_fence **fence, bool direct_submit, bool vm_needs_flush) { @@ -2083,7 +2083,7 @@ error_free: int amdgpu_fill_buffer(struct amdgpu_bo *bo, uint32_t src_data, - struct reservation_object *resv, + struct dma_resv *resv, struct dma_fence **fence) { struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h index bccb8c49e597..0dddedc06ae3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h @@ -85,18 +85,18 @@ void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset, uint64_t dst_offset, uint32_t byte_count, - struct reservation_object *resv, + struct dma_resv *resv, struct dma_fence **fence, bool direct_submit, bool vm_needs_flush); int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev, struct amdgpu_copy_mem *src, struct amdgpu_copy_mem *dst, uint64_t size, - struct reservation_object *resv, + struct dma_resv *resv, struct dma_fence **f); int amdgpu_fill_buffer(struct amdgpu_bo *bo, uint32_t src_data, - struct reservation_object *resv, + struct dma_resv *resv, struct dma_fence **fence); int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c index f858607b17a5..b2c364b8695f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c @@ -1073,7 +1073,7 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo, ib->length_dw = 16; if (direct) { - r = reservation_object_wait_timeout_rcu(bo->tbo.base.resv, + r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv, true, false, msecs_to_jiffies(10)); if (r == 0) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index c8244ce184e8..b7665b31a2ca 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -1702,7 +1702,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, ttm = container_of(bo->tbo.ttm, struct ttm_dma_tt, ttm); pages_addr = ttm->dma_address; } - exclusive = reservation_object_get_excl(bo->tbo.base.resv); + exclusive = dma_resv_get_excl(bo->tbo.base.resv); } if (bo) { @@ -1879,18 +1879,18 @@ static void amdgpu_vm_free_mapping(struct amdgpu_device *adev, */ static void amdgpu_vm_prt_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) { - struct reservation_object *resv = vm->root.base.bo->tbo.base.resv; + struct dma_resv *resv = vm->root.base.bo->tbo.base.resv; struct dma_fence *excl, **shared; unsigned i, shared_count; int r; - r = reservation_object_get_fences_rcu(resv, &excl, + r = dma_resv_get_fences_rcu(resv, &excl, &shared_count, &shared); if (r) { /* Not enough memory to grab the fence list, as last resort * block for all the fences to complete. */ - reservation_object_wait_timeout_rcu(resv, true, false, + dma_resv_wait_timeout_rcu(resv, true, false, MAX_SCHEDULE_TIMEOUT); return; } @@ -1978,7 +1978,7 @@ int amdgpu_vm_handle_moved(struct amdgpu_device *adev, struct amdgpu_vm *vm) { struct amdgpu_bo_va *bo_va, *tmp; - struct reservation_object *resv; + struct dma_resv *resv; bool clear; int r; @@ -1997,7 +1997,7 @@ int amdgpu_vm_handle_moved(struct amdgpu_device *adev, spin_unlock(&vm->invalidated_lock); /* Try to reserve the BO to avoid clearing its ptes */ - if (!amdgpu_vm_debug && reservation_object_trylock(resv)) + if (!amdgpu_vm_debug && dma_resv_trylock(resv)) clear = false; /* Somebody else is using the BO right now */ else @@ -2008,7 +2008,7 @@ int amdgpu_vm_handle_moved(struct amdgpu_device *adev, return r; if (!clear) - reservation_object_unlock(resv); + dma_resv_unlock(resv); spin_lock(&vm->invalidated_lock); } spin_unlock(&vm->invalidated_lock); @@ -2416,7 +2416,7 @@ void amdgpu_vm_bo_trace_cs(struct amdgpu_vm *vm, struct ww_acquire_ctx *ticket) struct amdgpu_bo *bo; bo = mapping->bo_va->base.bo; - if (reservation_object_locking_ctx(bo->tbo.base.resv) != + if (dma_resv_locking_ctx(bo->tbo.base.resv) != ticket) continue; } @@ -2649,7 +2649,7 @@ void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size, */ long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout) { - return reservation_object_wait_timeout_rcu(vm->root.base.bo->tbo.base.resv, + return dma_resv_wait_timeout_rcu(vm->root.base.bo->tbo.base.resv, true, true, timeout); } @@ -2724,7 +2724,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, if (r) goto error_free_root; - r = reservation_object_reserve_shared(root->tbo.base.resv, 1); + r = dma_resv_reserve_shared(root->tbo.base.resv, 1); if (r) goto error_unreserve; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 722c70d40d3b..67f8aee4cd1b 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -5695,7 +5695,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, * deadlock during GPU reset when this fence will not signal * but we hold reservation lock for the BO. */ - r = reservation_object_wait_timeout_rcu(abo->tbo.base.resv, true, + r = dma_resv_wait_timeout_rcu(abo->tbo.base.resv, true, false, msecs_to_jiffies(5000)); if (unlikely(r <= 0)) diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c b/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c index fa9a4593bb37..624d257da20f 100644 --- a/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c +++ b/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c @@ -27,7 +27,7 @@ static void komeda_crtc_update_clock_ratio(struct komeda_crtc_state *kcrtc_st) return; } - pxlclk = kcrtc_st->base.adjusted_mode.crtc_clock * 1000; + pxlclk = kcrtc_st->base.adjusted_mode.crtc_clock * 1000ULL; aclk = komeda_crtc_get_aclk(kcrtc_st); kcrtc_st->clock_ratio = div64_u64(aclk << 32, pxlclk); diff --git a/drivers/gpu/drm/arm/hdlcd_crtc.c b/drivers/gpu/drm/arm/hdlcd_crtc.c index a3efa28436ea..af67fefed38d 100644 --- a/drivers/gpu/drm/arm/hdlcd_crtc.c +++ b/drivers/gpu/drm/arm/hdlcd_crtc.c @@ -9,7 +9,12 @@ * Implementation of a CRTC class for the HDLCD driver. */ -#include +#include +#include +#include + +#include